hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f732974670da14d5adf61de60ba71cb11ddc2b88 | 1,814 | py | Python | pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py | JinYAnGHe/openvino_training_extensions | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | [
"Apache-2.0"
] | null | null | null | pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py | JinYAnGHe/openvino_training_extensions | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | [
"Apache-2.0"
] | null | null | null | pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py | JinYAnGHe/openvino_training_extensions | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch.nn as nn
from model.blocks.shared_blocks import SELayer
class InvertedResidual(nn.Module):
"""Implementation of the modified Inverted residual block"""
def __init__(self, in_channels, out_channels, stride, expand_ratio, outp_size=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and in_channels == out_channels
self.inv_block = nn.Sequential(
nn.Conv2d(in_channels, in_channels * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, in_channels * expand_ratio, 3, stride, 1,
groups=in_channels * expand_ratio, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_channels),
# SELayer(out_channels, 8, nn.PReLU, outp_size)
)
def forward(self, x):
if self.use_res_connect:
return x + self.inv_block(x)
return self.inv_block(x)
| 37.791667 | 91 | 0.677508 |
import torch.nn as nn
from model.blocks.shared_blocks import SELayer
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, stride, expand_ratio, outp_size=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and in_channels == out_channels
self.inv_block = nn.Sequential(
nn.Conv2d(in_channels, in_channels * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, in_channels * expand_ratio, 3, stride, 1,
groups=in_channels * expand_ratio, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_channels),
)
def forward(self, x):
if self.use_res_connect:
return x + self.inv_block(x)
return self.inv_block(x)
| true | true |
f7329a426ce773a89f30ada3b70a80dfca316c23 | 809 | py | Python | setup.py | psawa/gecko-api | e1342b931cb49ce0135d9fd5a77aca6cb087f398 | [
"Apache-2.0"
] | 1 | 2021-08-12T09:13:51.000Z | 2021-08-12T09:13:51.000Z | setup.py | psawa/gecko | e1342b931cb49ce0135d9fd5a77aca6cb087f398 | [
"Apache-2.0"
] | null | null | null | setup.py | psawa/gecko | e1342b931cb49ce0135d9fd5a77aca6cb087f398 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(
name='gecko',
version='0.1',
description='Gecko, a library implementing multiple GEC systems.',
url='http://github.com/psawa/gecko',
author='thibo rosemplatt',
author_email='thibo.rosemplatt@gmail.com',
license='apache 2.0',
packages=['gecko'],
zip_safe=False,
#TODO : Loosen the requirements
install_requires = [
"torch==1.3.0",
"allennlp==0.8.4",
"python-Levenshtein==0.12.0",
"transformers==2.2.2",
"sentencepiece==0.1.91",
"overrides==4.1.2",
"scikit-learn==0.22.0",
"en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.1.0/en_core_web_sm-2.1.0.tar.gz",
]
) | 33.708333 | 140 | 0.578492 | from setuptools import setup
setup(
name='gecko',
version='0.1',
description='Gecko, a library implementing multiple GEC systems.',
url='http://github.com/psawa/gecko',
author='thibo rosemplatt',
author_email='thibo.rosemplatt@gmail.com',
license='apache 2.0',
packages=['gecko'],
zip_safe=False,
install_requires = [
"torch==1.3.0",
"allennlp==0.8.4",
"python-Levenshtein==0.12.0",
"transformers==2.2.2",
"sentencepiece==0.1.91",
"overrides==4.1.2",
"scikit-learn==0.22.0",
"en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.1.0/en_core_web_sm-2.1.0.tar.gz",
]
) | true | true |
f7329aca67e5680b7316ea2075ecde20e8a7be1e | 14,127 | py | Python | hsp/booking.py | JulianFlesch/hsp | 17abe5a63a15cc4dbf753f8fe3d1808814363f6f | [
"MIT"
] | 5 | 2019-10-25T18:20:53.000Z | 2021-10-13T22:14:18.000Z | hsp/booking.py | JulianFlesch/hsp | 17abe5a63a15cc4dbf753f8fe3d1808814363f6f | [
"MIT"
] | 3 | 2019-10-07T18:03:26.000Z | 2020-12-15T15:19:32.000Z | hsp/booking.py | JulianFlesch/hsp | 17abe5a63a15cc4dbf753f8fe3d1808814363f6f | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import (NoSuchElementException,
TimeoutException,
WebDriverException)
from .errors import (CourseIdNotListed, CourseIdAmbiguous,
CourseNotBookable, InvalidCredentials, LoadingFailed)
from .conditions import submit_successful, element_inner_html_has_changed
def start_firefox():
driver = webdriver.Firefox()
return driver
def start_headless_firefox():
ff_options = FirefoxOptions()
ff_options.headless = True
driver = webdriver.Firefox(options=ff_options)
return driver
def start_chrome():
driver = webdriver.Chrome()
return driver
def start_headless_chrome():
chrome_options = ChromeOptions()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(options=chrome_options)
return driver
class HSPCourse:
"""
"""
BASE_URL = "https://buchung.hsp.uni-tuebingen.de/angebote/aktueller_zeitraum/"
COURSE_LIST_URL = BASE_URL + "kurssuche.html"
def __init__(self, course_id, driver=None):
self.timeout = 20 # waiting time for site to load in seconds
self.driver = driver or self._init_driver()
self.course_id = str(course_id)
self.course_page_url = None
self.time = None
self.weekday = None
self.location = None
self.level = None
self._scrape_course_detail()
self.course_name = None
self.booking_possible = None
self.waitinglist_exists = None
self.course_status = None
self._scrape_course_status()
self._booking_page = None
def _accept_cookies_if_shown(self):
assert(self.driver.current_url == self.COURSE_LIST_URL)
xpath = "//h1[@class='in2-modal-heading']"
if WebDriverWait(self.driver, 2).until(EC.presence_of_element_located((By.XPATH, xpath))):
self.driver.find_element_by_xpath("//button[@data-in2-modal-save-button]").click()
def _cl_filter_by_id(self, course_id):
assert(self.driver.current_url == self.COURSE_LIST_URL)
# wait until filter bar is loaded
filter_bar_id = "bs_schlagwort"
filter_bar_loaded = EC.visibility_of_element_located(
(By.ID, filter_bar_id))
WebDriverWait(self.driver, self.timeout).until(filter_bar_loaded)
# displays the number of courses in the course list,
# will be used to determine, when the filtering is complete
xpath = "//div[@id='bs_verlauf']"
filter_result_locator = (By.XPATH, xpath)
course_list_changed = element_inner_html_has_changed(
filter_result_locator,
self.driver.find_element(*filter_result_locator).get_attribute("innerHTML")
)
filter_bar = self.driver.find_element_by_id(filter_bar_id)
filter_bar.send_keys(course_id)
WebDriverWait(self.driver, self.timeout).until(course_list_changed)
def _get_el_from_courselist(self, xpath):
assert(self.driver.current_url == self.COURSE_LIST_URL)
return self.driver.find_element_by_xpath(xpath)
def _get_el_from_coursepage(self, xpath):
assert(self.driver.current_url == self.course_page_url)
return self.driver.find_element_by_xpath(xpath)
def _cl_get_time(self, course_row_xpath):
time_xpath = course_row_xpath + '/td[@class="bs_szeit"]'
return self._get_el_from_courselist(time_xpath).text
def _cl_get_weekday(self, course_row_xpath):
weekday_xpath = course_row_xpath + '/td[@class="bs_stag"]'
return self._get_el_from_courselist(weekday_xpath).text
def _cl_get_location(self, course_row_xpath):
location_xpath = course_row_xpath + '/td[@class="bs_sort"]'
return self._get_el_from_courselist(location_xpath).text
def _cl_get_level(self, course_row_xpath):
location_xpath = course_row_xpath + '/td[@class="bs_sdet"]'
return self._get_el_from_courselist(location_xpath).text
def _cl_get_course_link(self, course_row_xpath):
a_xpath = course_row_xpath + '/td[@class="bs_sbuch"]//a'
a = self._get_el_from_courselist(a_xpath)
return a.get_property("href")
def _cp_get_course_name(self):
title_xp = "//div[@class='bs_head']"
course_name_div = self._get_el_from_coursepage(title_xp)
return course_name_div.text
def _cp_get_bookingbtn_or_status_element(self):
course_code = "K" + self.course_id
xpath = "//a[@id='{}']/following::*".format(course_code)
return self._get_el_from_coursepage(xpath)
def _scrape_course_detail(self):
self.driver.get(self.COURSE_LIST_URL)
try:
self._accept_cookies_if_shown()
self._cl_filter_by_id(self.course_id)
# course site features a table:
# extract the row that starts with the course id
xpath = '//td[text()="{}"]/parent::tr'
course_row_xpath = xpath.format(self.course_id)
self.time = self._cl_get_time(course_row_xpath)
self.weekday = self._cl_get_weekday(course_row_xpath)
self.location = self._cl_get_location(course_row_xpath)
self.level = self._cl_get_level(course_row_xpath)
self.course_page_url = self._cl_get_course_link(course_row_xpath)
except TimeoutException as e:
print(e)
raise LoadingFailed("Timeout while loading course list page")
except NoSuchElementException as e:
print(e)
raise CourseIdNotListed(self.course_id)
def _scrape_course_status(self):
self.driver.get(self.course_page_url)
self.course_name = self._cp_get_course_name()
bookbtn_or_status = self._cp_get_bookingbtn_or_status_element()
# If bookbtn_or_status is a <span> ... </span> element,
# the course is not bookable and there is it contains a
# no-booking-possible status
if bookbtn_or_status.tag_name == "span":
self.course_status = bookbtn_or_status.text
self.booking_possible = False
self.waitinglist_exists = False
elif "bs_btn_warteliste" in bookbtn_or_status.get_attribute("class"):
self.course_status = "queue signup"
self.booking_possible = False
self.waitinglist_exists = True
elif "bs_btn_buchen" in bookbtn_or_status.get_attribute("class"):
self.course_status = "booking possible"
self.booking_possible = True
self.waitinglist_exists = False
else:
self.course_status = "unknown"
self.booking_possible = False
self.waitinglist_exists = False
def _init_driver(self):
try:
driver = start_headless_chrome()
except WebDriverException as e:
print(e)
print("[!] Loading Chrome webdriver failed")
print("... Attempting to use Firefox webdriver")
driver = start_headless_chrome()
return driver
def info(self):
infostr = "#{}: {} {}, {} {}".format(self.course_id or "",
self.course_name or "",
self.level or "",
self.weekday or "",
self.time or "")
return infostr
def status(self):
return "Status: {}".format(self.course_status)
def is_bookable(self):
return self.booking_possible
def has_waitinglist(self):
return self.waitinglist_exists
def _switch_to_booking_page(self):
if self.has_waitinglist() or not self.is_bookable():
raise CourseNotBookable(self.course_id, self.status())
self.driver.get(self.course_page_url)
# at this point, the course is bookable
booking_btn = self._cp_get_bookingbtn_or_status_element()
# snapshot of open windows / tabs
old_windows = self.driver.window_handles
# press the booking button, which opens a new tab
booking_btn.click()
# find the new tab
new_tab = (set(self.driver.window_handles) - set(old_windows)).pop()
# switch to new tab
self.driver.switch_to.window(new_tab)
# make the window larger, so no fields are being hidden
self.driver.set_window_size(height=1500, width=2000)
self._booking_page = self.driver.current_url
def _bp_enter_personal_details(self, credentials):
assert (self.driver.current_url == self._booking_page)
if not credentials or not credentials.is_valid:
raise InvalidCredentials("Credentials are invalid")
# gender radio select
gender_xpath = '//input[@name="sex"][@value="{}"]'.format(
credentials.gender)
self.driver.find_element_by_xpath(gender_xpath).click()
# name field
name_xpath = '//input[@id="BS_F1100"][@name="vorname"]'
self.driver.find_element_by_xpath(name_xpath).send_keys(
credentials.name)
# surname field
surname_xpath = '//input[@id="BS_F1200"][@name="name"]'
self.driver.find_element_by_xpath(surname_xpath).send_keys(
credentials.surname)
# street+no field
street_xpath = '//input[@id="BS_F1300"][@name="strasse"]'
self.driver.find_element_by_xpath(street_xpath).send_keys(
credentials.street + " " + credentials.number)
# zip+city field
city_xpath = '//input[@id="BS_F1400"][@name="ort"]'
self.driver.find_element_by_xpath(city_xpath).send_keys(
credentials.zip_code + " " + credentials.city)
# status dropdown and matriculation number / employee phone
status_xpath_template = '//select[@id="BS_F1600"]//option[@value="{}"]'
status_xpath = status_xpath_template.format(credentials.status)
# student status
if credentials.status in ("S-UNIT", "S-aH"):
self.driver.find_element_by_xpath(status_xpath).click()
pid_xpath = '//input[@id="BS_F1700"][@name="matnr"]'
self.driver.find_element_by_xpath(pid_xpath).send_keys(
credentials.pid)
# employee status
elif credentials.status in ("B-UNIT", "B-UKT", "B-aH"):
self.driver.find_element_by_xpath(status_xpath).click()
pid_xpath = '//input[@id="BS_F1700"][@name="mitnr"]'
self.driver.find_element_by_xpath(pid_xpath).send_keys(
credentials.pid)
elif credentials.status == "Extern":
self.driver.find_element_by_xpath(status_xpath).click()
# email field
email_xpath = '//input[@id="BS_F2000"][@name="email"]'
self.driver.find_element_by_xpath(email_xpath).send_keys(
credentials.email)
# agree to EULA
eula_xpath = '//input[@name="tnbed"]'
self.driver.find_element_by_xpath(eula_xpath).click()
def _bp_enter_confirm_email(self, email):
assert(self.driver.current_url == self._booking_page)
xpath = "//input[@class='bs_form_field'][contains(@name, 'email_check_')]"
locator = (By.XPATH, xpath)
try:
wait = WebDriverWait(self.driver, 5)
email_input = wait.until(EC.visibility_of_element_located(locator))
email_input.send_keys(email)
except TimeoutException:
pass
def _retry_submit(self, submit_loc, control_loc):
"""
Retry submitting, until control_loc disappears
"""
assert(self.driver.current_url == self._booking_page)
wait = WebDriverWait(self.driver, self.timeout)
wait.until(submit_successful(submit_loc, control_loc))
def _bp_wait_until_submit(self):
"""
Retries submitting the data, until the confirmation page is loaded.
Pag chage is detected by observing a checkbox field, that disappears.
"""
xpath = "//input[@type='submit'][@value='weiter zur Buchung']"
submit_locator = (By.XPATH, xpath)
observed_xpath = "//input[@type='checkbox'][@name='tnbed']"
control_locator = (By.XPATH, observed_xpath)
self._retry_submit(submit_locator, control_locator)
def _bp_wait_until_confirm(self):
"""
Retries confirming the form, until the ticket is loaded
"""
xpath = "//input[@type='submit'][contains(@value, 'buchen')]"
submit_locator = (By.XPATH, xpath)
observed_xpath = "//div[contains(@class, 'bs_text_red') and contains(@class, 'bs_text_big')]"
control_locator = (By.XPATH, observed_xpath)
self._retry_submit(submit_locator, control_locator)
def _save_screenshot(self, outfile):
if outfile is None:
tmpl = "booking_confirmation_{}.png"
outfile = tmpl.format(self.course_id)
# save the final page as a screenshot
self.driver.save_screenshot(outfile)
print("[*] Booking ticket saved to {}".format(outfile))
def booking(self, credentials, confirmation_file=None):
self._switch_to_booking_page()
# verify and fill in the personal data
self._bp_enter_personal_details(credentials)
# wait until inputs are submited and page changes
self._bp_wait_until_submit()
# fill in confirm email field, if it exists
self._bp_enter_confirm_email(credentials.email)
# wait until confirm button is pressed and page changes
self._bp_wait_until_confirm()
self._save_screenshot(confirmation_file)
# close the driver
# self.driver.quit()
| 35.3175 | 101 | 0.649819 | from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import (NoSuchElementException,
TimeoutException,
WebDriverException)
from .errors import (CourseIdNotListed, CourseIdAmbiguous,
CourseNotBookable, InvalidCredentials, LoadingFailed)
from .conditions import submit_successful, element_inner_html_has_changed
def start_firefox():
driver = webdriver.Firefox()
return driver
def start_headless_firefox():
ff_options = FirefoxOptions()
ff_options.headless = True
driver = webdriver.Firefox(options=ff_options)
return driver
def start_chrome():
driver = webdriver.Chrome()
return driver
def start_headless_chrome():
chrome_options = ChromeOptions()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(options=chrome_options)
return driver
class HSPCourse:
BASE_URL = "https://buchung.hsp.uni-tuebingen.de/angebote/aktueller_zeitraum/"
COURSE_LIST_URL = BASE_URL + "kurssuche.html"
def __init__(self, course_id, driver=None):
self.timeout = 20
self.driver = driver or self._init_driver()
self.course_id = str(course_id)
self.course_page_url = None
self.time = None
self.weekday = None
self.location = None
self.level = None
self._scrape_course_detail()
self.course_name = None
self.booking_possible = None
self.waitinglist_exists = None
self.course_status = None
self._scrape_course_status()
self._booking_page = None
def _accept_cookies_if_shown(self):
assert(self.driver.current_url == self.COURSE_LIST_URL)
xpath = "//h1[@class='in2-modal-heading']"
if WebDriverWait(self.driver, 2).until(EC.presence_of_element_located((By.XPATH, xpath))):
self.driver.find_element_by_xpath("//button[@data-in2-modal-save-button]").click()
def _cl_filter_by_id(self, course_id):
assert(self.driver.current_url == self.COURSE_LIST_URL)
filter_bar_id = "bs_schlagwort"
filter_bar_loaded = EC.visibility_of_element_located(
(By.ID, filter_bar_id))
WebDriverWait(self.driver, self.timeout).until(filter_bar_loaded)
xpath = "//div[@id='bs_verlauf']"
filter_result_locator = (By.XPATH, xpath)
course_list_changed = element_inner_html_has_changed(
filter_result_locator,
self.driver.find_element(*filter_result_locator).get_attribute("innerHTML")
)
filter_bar = self.driver.find_element_by_id(filter_bar_id)
filter_bar.send_keys(course_id)
WebDriverWait(self.driver, self.timeout).until(course_list_changed)
def _get_el_from_courselist(self, xpath):
assert(self.driver.current_url == self.COURSE_LIST_URL)
return self.driver.find_element_by_xpath(xpath)
def _get_el_from_coursepage(self, xpath):
assert(self.driver.current_url == self.course_page_url)
return self.driver.find_element_by_xpath(xpath)
def _cl_get_time(self, course_row_xpath):
time_xpath = course_row_xpath + '/td[@class="bs_szeit"]'
return self._get_el_from_courselist(time_xpath).text
def _cl_get_weekday(self, course_row_xpath):
weekday_xpath = course_row_xpath + '/td[@class="bs_stag"]'
return self._get_el_from_courselist(weekday_xpath).text
def _cl_get_location(self, course_row_xpath):
location_xpath = course_row_xpath + '/td[@class="bs_sort"]'
return self._get_el_from_courselist(location_xpath).text
def _cl_get_level(self, course_row_xpath):
location_xpath = course_row_xpath + '/td[@class="bs_sdet"]'
return self._get_el_from_courselist(location_xpath).text
def _cl_get_course_link(self, course_row_xpath):
a_xpath = course_row_xpath + '/td[@class="bs_sbuch"]//a'
a = self._get_el_from_courselist(a_xpath)
return a.get_property("href")
def _cp_get_course_name(self):
title_xp = "//div[@class='bs_head']"
course_name_div = self._get_el_from_coursepage(title_xp)
return course_name_div.text
def _cp_get_bookingbtn_or_status_element(self):
course_code = "K" + self.course_id
xpath = "//a[@id='{}']/following::*".format(course_code)
return self._get_el_from_coursepage(xpath)
def _scrape_course_detail(self):
self.driver.get(self.COURSE_LIST_URL)
try:
self._accept_cookies_if_shown()
self._cl_filter_by_id(self.course_id)
xpath = '//td[text()="{}"]/parent::tr'
course_row_xpath = xpath.format(self.course_id)
self.time = self._cl_get_time(course_row_xpath)
self.weekday = self._cl_get_weekday(course_row_xpath)
self.location = self._cl_get_location(course_row_xpath)
self.level = self._cl_get_level(course_row_xpath)
self.course_page_url = self._cl_get_course_link(course_row_xpath)
except TimeoutException as e:
print(e)
raise LoadingFailed("Timeout while loading course list page")
except NoSuchElementException as e:
print(e)
raise CourseIdNotListed(self.course_id)
def _scrape_course_status(self):
self.driver.get(self.course_page_url)
self.course_name = self._cp_get_course_name()
bookbtn_or_status = self._cp_get_bookingbtn_or_status_element()
if bookbtn_or_status.tag_name == "span":
self.course_status = bookbtn_or_status.text
self.booking_possible = False
self.waitinglist_exists = False
elif "bs_btn_warteliste" in bookbtn_or_status.get_attribute("class"):
self.course_status = "queue signup"
self.booking_possible = False
self.waitinglist_exists = True
elif "bs_btn_buchen" in bookbtn_or_status.get_attribute("class"):
self.course_status = "booking possible"
self.booking_possible = True
self.waitinglist_exists = False
else:
self.course_status = "unknown"
self.booking_possible = False
self.waitinglist_exists = False
def _init_driver(self):
try:
driver = start_headless_chrome()
except WebDriverException as e:
print(e)
print("[!] Loading Chrome webdriver failed")
print("... Attempting to use Firefox webdriver")
driver = start_headless_chrome()
return driver
def info(self):
infostr = "#{}: {} {}, {} {}".format(self.course_id or "",
self.course_name or "",
self.level or "",
self.weekday or "",
self.time or "")
return infostr
def status(self):
return "Status: {}".format(self.course_status)
def is_bookable(self):
return self.booking_possible
def has_waitinglist(self):
return self.waitinglist_exists
def _switch_to_booking_page(self):
if self.has_waitinglist() or not self.is_bookable():
raise CourseNotBookable(self.course_id, self.status())
self.driver.get(self.course_page_url)
booking_btn = self._cp_get_bookingbtn_or_status_element()
old_windows = self.driver.window_handles
booking_btn.click()
new_tab = (set(self.driver.window_handles) - set(old_windows)).pop()
self.driver.switch_to.window(new_tab)
self.driver.set_window_size(height=1500, width=2000)
self._booking_page = self.driver.current_url
def _bp_enter_personal_details(self, credentials):
assert (self.driver.current_url == self._booking_page)
if not credentials or not credentials.is_valid:
raise InvalidCredentials("Credentials are invalid")
gender_xpath = '//input[@name="sex"][@value="{}"]'.format(
credentials.gender)
self.driver.find_element_by_xpath(gender_xpath).click()
name_xpath = '//input[@id="BS_F1100"][@name="vorname"]'
self.driver.find_element_by_xpath(name_xpath).send_keys(
credentials.name)
surname_xpath = '//input[@id="BS_F1200"][@name="name"]'
self.driver.find_element_by_xpath(surname_xpath).send_keys(
credentials.surname)
street_xpath = '//input[@id="BS_F1300"][@name="strasse"]'
self.driver.find_element_by_xpath(street_xpath).send_keys(
credentials.street + " " + credentials.number)
city_xpath = '//input[@id="BS_F1400"][@name="ort"]'
self.driver.find_element_by_xpath(city_xpath).send_keys(
credentials.zip_code + " " + credentials.city)
status_xpath_template = '//select[@id="BS_F1600"]//option[@value="{}"]'
status_xpath = status_xpath_template.format(credentials.status)
if credentials.status in ("S-UNIT", "S-aH"):
self.driver.find_element_by_xpath(status_xpath).click()
pid_xpath = '//input[@id="BS_F1700"][@name="matnr"]'
self.driver.find_element_by_xpath(pid_xpath).send_keys(
credentials.pid)
elif credentials.status in ("B-UNIT", "B-UKT", "B-aH"):
self.driver.find_element_by_xpath(status_xpath).click()
pid_xpath = '//input[@id="BS_F1700"][@name="mitnr"]'
self.driver.find_element_by_xpath(pid_xpath).send_keys(
credentials.pid)
elif credentials.status == "Extern":
self.driver.find_element_by_xpath(status_xpath).click()
email_xpath = '//input[@id="BS_F2000"][@name="email"]'
self.driver.find_element_by_xpath(email_xpath).send_keys(
credentials.email)
eula_xpath = '//input[@name="tnbed"]'
self.driver.find_element_by_xpath(eula_xpath).click()
def _bp_enter_confirm_email(self, email):
assert(self.driver.current_url == self._booking_page)
xpath = "//input[@class='bs_form_field'][contains(@name, 'email_check_')]"
locator = (By.XPATH, xpath)
try:
wait = WebDriverWait(self.driver, 5)
email_input = wait.until(EC.visibility_of_element_located(locator))
email_input.send_keys(email)
except TimeoutException:
pass
def _retry_submit(self, submit_loc, control_loc):
assert(self.driver.current_url == self._booking_page)
wait = WebDriverWait(self.driver, self.timeout)
wait.until(submit_successful(submit_loc, control_loc))
def _bp_wait_until_submit(self):
xpath = "//input[@type='submit'][@value='weiter zur Buchung']"
submit_locator = (By.XPATH, xpath)
observed_xpath = "//input[@type='checkbox'][@name='tnbed']"
control_locator = (By.XPATH, observed_xpath)
self._retry_submit(submit_locator, control_locator)
def _bp_wait_until_confirm(self):
xpath = "//input[@type='submit'][contains(@value, 'buchen')]"
submit_locator = (By.XPATH, xpath)
observed_xpath = "//div[contains(@class, 'bs_text_red') and contains(@class, 'bs_text_big')]"
control_locator = (By.XPATH, observed_xpath)
self._retry_submit(submit_locator, control_locator)
def _save_screenshot(self, outfile):
if outfile is None:
tmpl = "booking_confirmation_{}.png"
outfile = tmpl.format(self.course_id)
self.driver.save_screenshot(outfile)
print("[*] Booking ticket saved to {}".format(outfile))
def booking(self, credentials, confirmation_file=None):
self._switch_to_booking_page()
self._bp_enter_personal_details(credentials)
self._bp_wait_until_submit()
self._bp_enter_confirm_email(credentials.email)
self._bp_wait_until_confirm()
self._save_screenshot(confirmation_file)
| true | true |
f7329afae635d026dd93b95f99e82bc6d87bbe1b | 1,481 | py | Python | setup.py | psass-edfsf/centralized-pre-commit-conf | 49ae2cf524dc90f55dfffc2c38ece3e1a2365c5f | [
"MIT"
] | null | null | null | setup.py | psass-edfsf/centralized-pre-commit-conf | 49ae2cf524dc90f55dfffc2c38ece3e1a2365c5f | [
"MIT"
] | null | null | null | setup.py | psass-edfsf/centralized-pre-commit-conf | 49ae2cf524dc90f55dfffc2c38ece3e1a2365c5f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import find_namespace_packages, setup
with open("README.md", "r", encoding="utf-8") as r:
README = r.read()
TEST_REQUIRES = ["pytest-cov", "pytest-vcr", "python-coveralls"]
setup(
author="Pierre Sassoulas",
author_email="pierre.sassoulas@gmail.com",
long_description=README,
long_description_content_type="text/markdown",
name="centralized-pre-commit-conf",
version="0.3.5",
description="Easily install and update centralized pre-commit hooks and their configuration files in decentralized"
" repositories",
packages=find_namespace_packages(),
entry_points={"console_scripts": ["pre-commit-conf=centralized_pre_commit_conf.main:run"]},
package_dir={},
classifiers=[
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Development Status :: 4 - Beta",
],
package_data={"centralized_pre_commit_conf": ["*.cfg", "*.yaml", "*.pylintrc", "*.flake8"]},
install_requires=["setuptools>=45.1", "wheel>=0.34", "colorama", "confuse", "pre-commit>=1.16", "requests"],
tests_require=TEST_REQUIRES,
extras_require={"test": TEST_REQUIRES},
url="https://github.com/Pierre-Sassoulas/centralized-pre-commit-conf",
zip_safe=True,
)
| 37.974359 | 119 | 0.667792 |
from setuptools import find_namespace_packages, setup
with open("README.md", "r", encoding="utf-8") as r:
README = r.read()
TEST_REQUIRES = ["pytest-cov", "pytest-vcr", "python-coveralls"]
setup(
author="Pierre Sassoulas",
author_email="pierre.sassoulas@gmail.com",
long_description=README,
long_description_content_type="text/markdown",
name="centralized-pre-commit-conf",
version="0.3.5",
description="Easily install and update centralized pre-commit hooks and their configuration files in decentralized"
" repositories",
packages=find_namespace_packages(),
entry_points={"console_scripts": ["pre-commit-conf=centralized_pre_commit_conf.main:run"]},
package_dir={},
classifiers=[
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Development Status :: 4 - Beta",
],
package_data={"centralized_pre_commit_conf": ["*.cfg", "*.yaml", "*.pylintrc", "*.flake8"]},
install_requires=["setuptools>=45.1", "wheel>=0.34", "colorama", "confuse", "pre-commit>=1.16", "requests"],
tests_require=TEST_REQUIRES,
extras_require={"test": TEST_REQUIRES},
url="https://github.com/Pierre-Sassoulas/centralized-pre-commit-conf",
zip_safe=True,
)
| true | true |
f7329b61ce18e3c7386bd29579f14e0a410cad5c | 8,856 | py | Python | tests/unit/test_amazon_estimator.py | jaipradeesh/sagemaker-python-sdk | ef842108ccaa324d2be15978aa678926dd1c21ea | [
"Apache-2.0"
] | 3 | 2020-04-18T15:25:28.000Z | 2020-04-21T08:30:59.000Z | tests/unit/test_amazon_estimator.py | jaipradeesh/sagemaker-python-sdk | ef842108ccaa324d2be15978aa678926dd1c21ea | [
"Apache-2.0"
] | 4 | 2019-11-02T16:19:14.000Z | 2019-11-02T21:31:30.000Z | tests/unit/test_amazon_estimator.py | jaipradeesh/sagemaker-python-sdk | ef842108ccaa324d2be15978aa678926dd1c21ea | [
"Apache-2.0"
] | 2 | 2019-05-30T08:47:34.000Z | 2020-04-08T09:42:01.000Z | # Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import numpy as np
import pytest
from mock import Mock, patch, call
# Use PCA as a test implementation of AmazonAlgorithmEstimator
from sagemaker.amazon.pca import PCA
from sagemaker.amazon.amazon_estimator import upload_numpy_to_s3_shards, _build_shards, registry, get_image_uri
COMMON_ARGS = {'role': 'myrole', 'train_instance_count': 1, 'train_instance_type': 'ml.c4.xlarge'}
REGION = "us-west-2"
BUCKET_NAME = "Some-Bucket"
TIMESTAMP = '2017-11-06-14:14:15.671'
@pytest.fixture()
def sagemaker_session():
boto_mock = Mock(name='boto_session', region_name=REGION)
sms = Mock(name='sagemaker_session', boto_session=boto_mock,
region_name=REGION, config=None, local_mode=False)
sms.boto_region_name = REGION
sms.default_bucket = Mock(name='default_bucket', return_value=BUCKET_NAME)
returned_job_description = {'AlgorithmSpecification': {'TrainingInputMode': 'File',
'TrainingImage': registry("us-west-2") + "/pca:1"},
'ModelArtifacts': {'S3ModelArtifacts': "s3://some-bucket/model.tar.gz"},
'HyperParameters':
{'sagemaker_submit_directory': '"s3://some/sourcedir.tar.gz"',
'checkpoint_path': '"s3://other/1508872349"',
'sagemaker_program': '"iris-dnn-classifier.py"',
'sagemaker_enable_cloudwatch_metrics': 'false',
'sagemaker_container_log_level': '"logging.INFO"',
'sagemaker_job_name': '"neo"',
'training_steps': '100'},
'RoleArn': 'arn:aws:iam::366:role/IMRole',
'ResourceConfig':
{'VolumeSizeInGB': 30,
'InstanceCount': 1,
'InstanceType': 'ml.c4.xlarge'},
'StoppingCondition': {'MaxRuntimeInSeconds': 24 * 60 * 60},
'TrainingJobName': 'neo',
'TrainingJobStatus': 'Completed',
'OutputDataConfig': {'KmsKeyId': '',
'S3OutputPath': 's3://place/output/neo'},
'TrainingJobOutput': {'S3TrainingJobOutput': 's3://here/output.tar.gz'}}
sms.sagemaker_client.describe_training_job = Mock(name='describe_training_job',
return_value=returned_job_description)
return sms
def test_gov_ecr_uri():
assert get_image_uri('us-gov-west-1', 'kmeans', 'latest') == \
'226302683700.dkr.ecr.us-gov-west-1.amazonaws.com/kmeans:latest'
assert get_image_uri('us-iso-east-1', 'kmeans', 'latest') == \
'490574956308.dkr.ecr.us-iso-east-1.c2s.ic.gov/kmeans:latest'
def test_init(sagemaker_session):
pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)
assert pca.num_components == 55
def test_init_all_pca_hyperparameters(sagemaker_session):
pca = PCA(num_components=55, algorithm_mode='randomized',
subtract_mean=True, extra_components=33, sagemaker_session=sagemaker_session,
**COMMON_ARGS)
assert pca.num_components == 55
assert pca.algorithm_mode == 'randomized'
assert pca.extra_components == 33
def test_init_estimator_args(sagemaker_session):
pca = PCA(num_components=1, train_max_run=1234, sagemaker_session=sagemaker_session,
data_location='s3://some-bucket/some-key/', **COMMON_ARGS)
assert pca.train_instance_type == COMMON_ARGS['train_instance_type']
assert pca.train_instance_count == COMMON_ARGS['train_instance_count']
assert pca.role == COMMON_ARGS['role']
assert pca.train_max_run == 1234
assert pca.data_location == 's3://some-bucket/some-key/'
def test_data_location_validation(sagemaker_session):
pca = PCA(num_components=2, sagemaker_session=sagemaker_session, **COMMON_ARGS)
with pytest.raises(ValueError):
pca.data_location = "nots3://abcd/efgh"
def test_data_location_does_not_call_default_bucket(sagemaker_session):
data_location = "s3://my-bucket/path/"
pca = PCA(num_components=2, sagemaker_session=sagemaker_session, data_location=data_location, **COMMON_ARGS)
assert pca.data_location == data_location
assert not sagemaker_session.default_bucket.called
def test_prepare_for_training(sagemaker_session):
pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)
train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]
labels = [99, 85, 87, 2]
records = pca.record_set(np.array(train), np.array(labels))
pca._prepare_for_training(records, mini_batch_size=1)
assert pca.feature_dim == 3
assert pca.mini_batch_size == 1
def test_prepare_for_training_list(sagemaker_session):
pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)
train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]
labels = [99, 85, 87, 2]
records = [pca.record_set(np.array(train), np.array(labels))]
pca._prepare_for_training(records, mini_batch_size=1)
assert pca.feature_dim == 3
assert pca.mini_batch_size == 1
def test_prepare_for_training_list_no_train_channel(sagemaker_session):
pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)
train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]
labels = [99, 85, 87, 2]
records = [pca.record_set(np.array(train), np.array(labels), 'test')]
with pytest.raises(ValueError) as ex:
pca._prepare_for_training(records, mini_batch_size=1)
assert 'Must provide train channel.' in str(ex)
@patch('time.strftime', return_value=TIMESTAMP)
def test_fit_ndarray(time, sagemaker_session):
mock_s3 = Mock()
mock_object = Mock()
mock_s3.Object = Mock(return_value=mock_object)
sagemaker_session.boto_session.resource = Mock(return_value=mock_s3)
kwargs = dict(COMMON_ARGS)
kwargs['train_instance_count'] = 3
pca = PCA(num_components=55, sagemaker_session=sagemaker_session,
data_location='s3://{}/key-prefix/'.format(BUCKET_NAME), **kwargs)
train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]
labels = [99, 85, 87, 2]
pca.fit(pca.record_set(np.array(train), np.array(labels)))
mock_s3.Object.assert_any_call(
BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_0.pbr'.format(TIMESTAMP))
mock_s3.Object.assert_any_call(
BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_1.pbr'.format(TIMESTAMP))
mock_s3.Object.assert_any_call(
BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_2.pbr'.format(TIMESTAMP))
mock_s3.Object.assert_any_call(
BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/.amazon.manifest'.format(TIMESTAMP))
assert mock_object.put.call_count == 4
def test_build_shards():
array = np.array([1, 2, 3, 4])
shards = _build_shards(4, array)
assert shards == [np.array([1]), np.array([2]), np.array([3]), np.array([4])]
shards = _build_shards(3, array)
for out, expected in zip(shards, map(np.array, [[1], [2], [3, 4]])):
assert np.array_equal(out, expected)
with pytest.raises(ValueError):
shards = _build_shards(5, array)
def test_upload_numpy_to_s3_shards():
mock_s3 = Mock()
mock_object = Mock()
mock_s3.Object = Mock(return_value=mock_object)
array = np.array([[j for j in range(10)] for i in range(10)])
labels = np.array([i for i in range(10)])
upload_numpy_to_s3_shards(3, mock_s3, BUCKET_NAME, "key-prefix", array, labels)
mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_0.pbr')])
mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_1.pbr')])
mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_2.pbr')])
| 45.649485 | 112 | 0.6486 |
from __future__ import absolute_import
import numpy as np
import pytest
from mock import Mock, patch, call
from sagemaker.amazon.pca import PCA
from sagemaker.amazon.amazon_estimator import upload_numpy_to_s3_shards, _build_shards, registry, get_image_uri
COMMON_ARGS = {'role': 'myrole', 'train_instance_count': 1, 'train_instance_type': 'ml.c4.xlarge'}
REGION = "us-west-2"
BUCKET_NAME = "Some-Bucket"
TIMESTAMP = '2017-11-06-14:14:15.671'
@pytest.fixture()
def sagemaker_session():
boto_mock = Mock(name='boto_session', region_name=REGION)
sms = Mock(name='sagemaker_session', boto_session=boto_mock,
region_name=REGION, config=None, local_mode=False)
sms.boto_region_name = REGION
sms.default_bucket = Mock(name='default_bucket', return_value=BUCKET_NAME)
returned_job_description = {'AlgorithmSpecification': {'TrainingInputMode': 'File',
'TrainingImage': registry("us-west-2") + "/pca:1"},
'ModelArtifacts': {'S3ModelArtifacts': "s3://some-bucket/model.tar.gz"},
'HyperParameters':
{'sagemaker_submit_directory': '"s3://some/sourcedir.tar.gz"',
'checkpoint_path': '"s3://other/1508872349"',
'sagemaker_program': '"iris-dnn-classifier.py"',
'sagemaker_enable_cloudwatch_metrics': 'false',
'sagemaker_container_log_level': '"logging.INFO"',
'sagemaker_job_name': '"neo"',
'training_steps': '100'},
'RoleArn': 'arn:aws:iam::366:role/IMRole',
'ResourceConfig':
{'VolumeSizeInGB': 30,
'InstanceCount': 1,
'InstanceType': 'ml.c4.xlarge'},
'StoppingCondition': {'MaxRuntimeInSeconds': 24 * 60 * 60},
'TrainingJobName': 'neo',
'TrainingJobStatus': 'Completed',
'OutputDataConfig': {'KmsKeyId': '',
'S3OutputPath': 's3://place/output/neo'},
'TrainingJobOutput': {'S3TrainingJobOutput': 's3://here/output.tar.gz'}}
sms.sagemaker_client.describe_training_job = Mock(name='describe_training_job',
return_value=returned_job_description)
return sms
def test_gov_ecr_uri():
assert get_image_uri('us-gov-west-1', 'kmeans', 'latest') == \
'226302683700.dkr.ecr.us-gov-west-1.amazonaws.com/kmeans:latest'
assert get_image_uri('us-iso-east-1', 'kmeans', 'latest') == \
'490574956308.dkr.ecr.us-iso-east-1.c2s.ic.gov/kmeans:latest'
def test_init(sagemaker_session):
pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)
assert pca.num_components == 55
def test_init_all_pca_hyperparameters(sagemaker_session):
pca = PCA(num_components=55, algorithm_mode='randomized',
subtract_mean=True, extra_components=33, sagemaker_session=sagemaker_session,
**COMMON_ARGS)
assert pca.num_components == 55
assert pca.algorithm_mode == 'randomized'
assert pca.extra_components == 33
def test_init_estimator_args(sagemaker_session):
pca = PCA(num_components=1, train_max_run=1234, sagemaker_session=sagemaker_session,
data_location='s3://some-bucket/some-key/', **COMMON_ARGS)
assert pca.train_instance_type == COMMON_ARGS['train_instance_type']
assert pca.train_instance_count == COMMON_ARGS['train_instance_count']
assert pca.role == COMMON_ARGS['role']
assert pca.train_max_run == 1234
assert pca.data_location == 's3://some-bucket/some-key/'
def test_data_location_validation(sagemaker_session):
pca = PCA(num_components=2, sagemaker_session=sagemaker_session, **COMMON_ARGS)
with pytest.raises(ValueError):
pca.data_location = "nots3://abcd/efgh"
def test_data_location_does_not_call_default_bucket(sagemaker_session):
data_location = "s3://my-bucket/path/"
pca = PCA(num_components=2, sagemaker_session=sagemaker_session, data_location=data_location, **COMMON_ARGS)
assert pca.data_location == data_location
assert not sagemaker_session.default_bucket.called
def test_prepare_for_training(sagemaker_session):
pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)
train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]
labels = [99, 85, 87, 2]
records = pca.record_set(np.array(train), np.array(labels))
pca._prepare_for_training(records, mini_batch_size=1)
assert pca.feature_dim == 3
assert pca.mini_batch_size == 1
def test_prepare_for_training_list(sagemaker_session):
pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)
train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]
labels = [99, 85, 87, 2]
records = [pca.record_set(np.array(train), np.array(labels))]
pca._prepare_for_training(records, mini_batch_size=1)
assert pca.feature_dim == 3
assert pca.mini_batch_size == 1
def test_prepare_for_training_list_no_train_channel(sagemaker_session):
pca = PCA(num_components=55, sagemaker_session=sagemaker_session, **COMMON_ARGS)
train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]
labels = [99, 85, 87, 2]
records = [pca.record_set(np.array(train), np.array(labels), 'test')]
with pytest.raises(ValueError) as ex:
pca._prepare_for_training(records, mini_batch_size=1)
assert 'Must provide train channel.' in str(ex)
@patch('time.strftime', return_value=TIMESTAMP)
def test_fit_ndarray(time, sagemaker_session):
mock_s3 = Mock()
mock_object = Mock()
mock_s3.Object = Mock(return_value=mock_object)
sagemaker_session.boto_session.resource = Mock(return_value=mock_s3)
kwargs = dict(COMMON_ARGS)
kwargs['train_instance_count'] = 3
pca = PCA(num_components=55, sagemaker_session=sagemaker_session,
data_location='s3://{}/key-prefix/'.format(BUCKET_NAME), **kwargs)
train = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 8.0], [44.0, 55.0, 66.0]]
labels = [99, 85, 87, 2]
pca.fit(pca.record_set(np.array(train), np.array(labels)))
mock_s3.Object.assert_any_call(
BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_0.pbr'.format(TIMESTAMP))
mock_s3.Object.assert_any_call(
BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_1.pbr'.format(TIMESTAMP))
mock_s3.Object.assert_any_call(
BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/matrix_2.pbr'.format(TIMESTAMP))
mock_s3.Object.assert_any_call(
BUCKET_NAME, 'key-prefix/PCA-2017-11-06-14:14:15.671/.amazon.manifest'.format(TIMESTAMP))
assert mock_object.put.call_count == 4
def test_build_shards():
array = np.array([1, 2, 3, 4])
shards = _build_shards(4, array)
assert shards == [np.array([1]), np.array([2]), np.array([3]), np.array([4])]
shards = _build_shards(3, array)
for out, expected in zip(shards, map(np.array, [[1], [2], [3, 4]])):
assert np.array_equal(out, expected)
with pytest.raises(ValueError):
shards = _build_shards(5, array)
def test_upload_numpy_to_s3_shards():
mock_s3 = Mock()
mock_object = Mock()
mock_s3.Object = Mock(return_value=mock_object)
array = np.array([[j for j in range(10)] for i in range(10)])
labels = np.array([i for i in range(10)])
upload_numpy_to_s3_shards(3, mock_s3, BUCKET_NAME, "key-prefix", array, labels)
mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_0.pbr')])
mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_1.pbr')])
mock_s3.Object.assert_has_calls([call(BUCKET_NAME, 'key-prefix/matrix_2.pbr')])
| true | true |
f7329b72c380683c5a8d5148e83479335f69c65b | 3,889 | py | Python | starry/compat.py | rodluger/starry | da7fee48c5ef94278f0047be0579e2f13492cdd5 | [
"MIT"
] | 116 | 2018-02-23T19:47:15.000Z | 2022-02-21T04:43:46.000Z | starry/compat.py | rodluger/starry | da7fee48c5ef94278f0047be0579e2f13492cdd5 | [
"MIT"
] | 224 | 2018-02-26T00:41:51.000Z | 2022-03-29T10:38:16.000Z | starry/compat.py | rodluger/starry | da7fee48c5ef94278f0047be0579e2f13492cdd5 | [
"MIT"
] | 25 | 2018-02-26T18:14:36.000Z | 2021-11-30T01:00:56.000Z | # -*- coding: utf-8 -*-
import warnings
import aesara_theano_fallback
from aesara_theano_fallback import aesara as theano
import aesara_theano_fallback.tensor as tt
from aesara_theano_fallback import sparse as ts
from aesara_theano_fallback import change_flags, ifelse, USE_AESARA
from aesara_theano_fallback.tensor import slinalg
from aesara_theano_fallback.graph import basic, op, params_type, fg
from inspect import getmro
if USE_AESARA:
from aesara.scan.utils import until as scan_until
else:
try:
from theano.scan.utils import until as scan_until
except ModuleNotFoundError:
from theano.scan_module.scan_utils import until as scan_until
__all__ = [
"theano",
"tt",
"ts",
"slinalg",
"ifelse",
"Apply",
"COp",
"Op",
"Params",
"ParamsType",
"Node",
"change_flags",
"floatX",
"evaluator",
"scan_until",
"USE_AESARA",
]
# Suppress third-party deprecation warnings
warnings.filterwarnings("ignore", category=DeprecationWarning, module="pymc3")
warnings.filterwarnings("ignore", category=DeprecationWarning, module="theano")
warnings.filterwarnings("ignore", category=DeprecationWarning, module="aesara")
# Set double precision
floatX = "float64"
# Compatibility imports
Node = basic.Node
Apply = basic.Apply
Op = op.Op
COp = op.ExternalCOp
Params = params_type.Params
ParamsType = params_type.ParamsType
MissingInputError = fg.MissingInputError
theano.config.floatX = floatX
# This helps prevent defaulting to float32
theano.config.cast_policy = "numpy+floatX"
def is_tensor(*objs):
"""Return ``True`` if any of ``objs`` is a ``Theano`` object."""
for obj in objs:
for c in getmro(type(obj)):
if c is Node:
return True
return False
def evaluator(**kwargs):
"""
Return a function to evaluate theano tensors.
Works inside a `pymc3` model if a `point` is provided.
Lazily imports `pymc3` to minimize overhead.
"""
# Store the kwargs
kwargs_point = kwargs.get("point", None)
kwargs_model = kwargs.get("model", None)
if kwargs_point is not None:
# User provided a point
import pymc3 as pm
import pymc3_ext as pmx
point = kwargs_point
model = kwargs_model
if model is None:
model = pm.Model.get_context()
def get_val(x):
if is_tensor(x):
return pmx.eval_in_model(x, model=model, point=point)
else:
return x
else:
# No point provided
def get_val(x):
if is_tensor(x):
try:
# Try to directly evaluate it
return x.eval()
except MissingInputError as e:
# That didn't work. Perhaps we are in a pymc3 model
# context, but the user didn't provide a point?
import pymc3 as pm
import pymc3_ext as pmx
try:
model = kwargs_model
if model is None:
model = pm.Model.get_context()
except TypeError:
raise ValueError(
"Missing input for variable {}, and no pymc3 model found.".format(
x
)
)
# Warn the user that we're using the test point
warnings.warn(
"Detected pymc3 model context, but no point provided. "
"Evaluating at test_point."
)
return pmx.eval_in_model(
x, model=model, point=model.test_point
)
else:
return x
return get_val
| 25.926667 | 94 | 0.578812 |
import warnings
import aesara_theano_fallback
from aesara_theano_fallback import aesara as theano
import aesara_theano_fallback.tensor as tt
from aesara_theano_fallback import sparse as ts
from aesara_theano_fallback import change_flags, ifelse, USE_AESARA
from aesara_theano_fallback.tensor import slinalg
from aesara_theano_fallback.graph import basic, op, params_type, fg
from inspect import getmro
if USE_AESARA:
from aesara.scan.utils import until as scan_until
else:
try:
from theano.scan.utils import until as scan_until
except ModuleNotFoundError:
from theano.scan_module.scan_utils import until as scan_until
__all__ = [
"theano",
"tt",
"ts",
"slinalg",
"ifelse",
"Apply",
"COp",
"Op",
"Params",
"ParamsType",
"Node",
"change_flags",
"floatX",
"evaluator",
"scan_until",
"USE_AESARA",
]
warnings.filterwarnings("ignore", category=DeprecationWarning, module="pymc3")
warnings.filterwarnings("ignore", category=DeprecationWarning, module="theano")
warnings.filterwarnings("ignore", category=DeprecationWarning, module="aesara")
floatX = "float64"
Node = basic.Node
Apply = basic.Apply
Op = op.Op
COp = op.ExternalCOp
Params = params_type.Params
ParamsType = params_type.ParamsType
MissingInputError = fg.MissingInputError
theano.config.floatX = floatX
theano.config.cast_policy = "numpy+floatX"
def is_tensor(*objs):
for obj in objs:
for c in getmro(type(obj)):
if c is Node:
return True
return False
def evaluator(**kwargs):
kwargs_point = kwargs.get("point", None)
kwargs_model = kwargs.get("model", None)
if kwargs_point is not None:
import pymc3 as pm
import pymc3_ext as pmx
point = kwargs_point
model = kwargs_model
if model is None:
model = pm.Model.get_context()
def get_val(x):
if is_tensor(x):
return pmx.eval_in_model(x, model=model, point=point)
else:
return x
else:
def get_val(x):
if is_tensor(x):
try:
return x.eval()
except MissingInputError as e:
# context, but the user didn't provide a point?
import pymc3 as pm
import pymc3_ext as pmx
try:
model = kwargs_model
if model is None:
model = pm.Model.get_context()
except TypeError:
raise ValueError(
"Missing input for variable {}, and no pymc3 model found.".format(
x
)
)
warnings.warn(
"Detected pymc3 model context, but no point provided. "
"Evaluating at test_point."
)
return pmx.eval_in_model(
x, model=model, point=model.test_point
)
else:
return x
return get_val
| true | true |
f7329c71f34366ec3044004d9d5ac67f51cb5dd3 | 9,231 | py | Python | paoding/utility/simulated_propagation.py | mark-h-meng/nnprune | 544a56a19382bde984c0e52d164eab278e0cd9ae | [
"MIT"
] | null | null | null | paoding/utility/simulated_propagation.py | mark-h-meng/nnprune | 544a56a19382bde984c0e52d164eab278e0cd9ae | [
"MIT"
] | null | null | null | paoding/utility/simulated_propagation.py | mark-h-meng/nnprune | 544a56a19382bde984c0e52d164eab278e0cd9ae | [
"MIT"
] | null | null | null | #!/usr/bin/python3
__author__ = "Mark H. Meng"
__copyright__ = "Copyright 2021, National University of S'pore and A*STAR"
__credits__ = ["G. Bai", "H. Guo", "S. G. Teo", "J. S. Dong"]
__license__ = "MIT"
import paoding.utility.interval_arithmetic as ia
import paoding.utility.utils as utils
import math
def calculate_bounds_of_output(model, intervals, loc):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
# Just return these intervals if current location is at the 2nd last layer
if loc == num_layers - 1:
return intervals
total_pruned_count = 0
propagated_next_layer_interval = None
while loc < num_layers - 1:
# Exclude non FC layers
num_curr_neurons = len(w[loc + 1][0])
num_next_neurons = len(w[loc + 1][0][0])
relu_activation = g[loc]['activation'] == 'relu'
if len(intervals) != num_curr_neurons:
raise Exception("Error: input intervals are not in expected shape -",
num_curr_neurons, "expected, not", len(intervals))
# No activation at the output layer
if loc + 1 == num_layers - 1:
propagated_next_layer_interval = ia.forward_propogation(intervals,
w[loc + 1][0],
w[loc + 1][1],
activation=False)
else:
propagated_next_layer_interval = ia.forward_propogation(intervals,
w[loc + 1][0],
w[loc + 1][1],
activation=True,
relu_activation=relu_activation)
intervals = propagated_next_layer_interval
loc += 1
return propagated_next_layer_interval
# Return the evaluation of the impact in a pair of real numbers as interval
def calculate_impact_of_pruning_next_layer(model, big_map, pruning_pairs, loc, cumulative_next_layer_intervals=None,
kaggle_credit=False):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
# Each pruning pair is in form of a tuple (a,b), in which "a" is the hidden unit to be pruned, and "b"
# is the one to remain. The Delta produced by this pruning is as follow:
# Delta = [b * (w_a + w_b) + 2 * bias_b] - [a * w_a + bias_a + b * w_b + bias_b]
# = (b-a) * w_a + (bias_b - bias_a)
# or if we omit the impact of bias:
# Delta = [b * (w_a + w_b)] - [a * w_a + b * w_b]
# = (b-a) * w_a
# The Delta produced by each pruning is presented at the next layer, and the propagation
# simulates the impact of Delta to the output layer
# In case there is a single unit pruning, s.t. b = -1
# the Delta will be -1 * (a * w_a)
next_layer_size = len(w[loc+1][0][0])
if cumulative_next_layer_intervals is None:
empty_interval = (0,0)
cumulative_next_layer_intervals = [empty_interval for i in range(0, next_layer_size)]
num_layers = len(model.layers)
for (a, b) in pruning_pairs:
(a_lo, a_hi) = big_map[loc][a]
# DEPRECATED
# (a_lo, a_hi) = get_definition_interval(a, loc, parameters=w, relu_activation=use_relu, kaggle_credit=kaggle_credit)
# Check if there is a pair pruning or single unit pruning (b=-1)
if b != -1:
(b_lo, b_hi) = big_map[loc][b]
# DEPRECATED
# (b_lo, b_hi) = get_definition_interval(b, loc, parameters=w, relu_activation=use_relu, kaggle_credit=kaggle_credit)
# approximate the result of (a-b)
(a_minus_b_lo, a_minus_b_hi) = ia.interval_minus((a_lo, a_hi), (b_lo, b_hi))
w_a = w[loc + 1][0][a]
if len(w_a) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
impact_to_next_layer = [ia.interval_scale((a_minus_b_lo, a_minus_b_hi), k) for k in w_a]
else:
w_a = w[loc + 1][0][a]
if len(w_a) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
impact_to_next_layer = [ia.interval_scale((a_lo, a_hi), -1*k) for k in w_a]
if len(impact_to_next_layer) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
for index, interval in enumerate(cumulative_next_layer_intervals):
cumulative_next_layer_intervals[index] = ia.interval_add(interval, impact_to_next_layer[index])
#print(cumulative_next_layer_intervals)
return cumulative_next_layer_intervals
def get_definition_map(model, definition_dict=None, input_interval=(0, 1)):
# First locate the dense (FC) layers, starting from the input layer/flatten layer until the second last layer
## Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
layer_idx = 0
starting_layer_index = -1
ending_layer_index = -1
while layer_idx < num_layers - 1:
if "dense" in model.layers[layer_idx].name:
if starting_layer_index < 0:
starting_layer_index = layer_idx - 1
if ending_layer_index < layer_idx:
ending_layer_index = layer_idx
layer_idx += 1
if (starting_layer_index < 0) or (ending_layer_index < 0):
raise Exception("Fully connected layers not identified")
# Now let's create a hash table as dictionary to store all definition intervals of FC neurons
if definition_dict is None:
definition_dict = {}
definition_dict[starting_layer_index] = {}
for i in range(0, len(w[starting_layer_index + 1][0])):
definition_dict[starting_layer_index][i] = input_interval
for i in range(starting_layer_index + 1, ending_layer_index + 1):
num_prev_neurons = len(w[i][0])
num_curr_neurons = len(w[i][0][0])
if i not in definition_dict.keys():
definition_dict[i] = {}
curr_activation = g[i]['activation']
for m in range(0, num_curr_neurons):
(sum_lo, sum_hi) = (0, 0)
for n in range(0, num_prev_neurons):
affine_w_x = ia.interval_scale(definition_dict[i-1][n], w[i][0][n][m])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), affine_w_x)
bias = (w[i][1][m], w[i][1][m])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), bias)
if curr_activation == 'relu':
definition_dict[i][m] = (0, sum_hi)
else: # Assume it is sigmoid
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
definition_dict[i][m] = (sum_lo, sum_hi)
return definition_dict
# DEPRECATED - Replaced by initialize_definition_map
def get_definition_interval(unit_index, layer_index, parameters, relu_activation=True, kaggle_credit=False):
if kaggle_credit:
input_definition_interval = (-5, 5)
else:
input_definition_interval = (0, 1)
# input_size = len(parameters[1][0])
# Starting from input layer (MLP) or the last flatten layer (CNN)
if layer_index == 1 or (layer_index>1 and not parameters[layer_index-1]):
#print(">> DEBUG: unit_index:", unit_index, " & layer_index:", layer_index)
weights = [parameters[layer_index][0][j][unit_index] for j in range(0, len(parameters[layer_index][0]))]
bias = parameters[layer_index][1][unit_index]
(sum_lo, sum_hi) = ia.interval_sum([ia.interval_scale(input_definition_interval, w) for w in weights])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), (bias, bias))
if relu_activation:
if sum_hi < 0:
sum_hi = 0
if sum_lo < 0:
sum_lo = 0
else:
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
return (sum_lo, sum_hi)
# Temp Wordaround: no definition algorithm avaliable for nodes after the 2nd layer, set as [-1,1]
else:
weights = [parameters[layer_index][0][j][unit_index] for j in range(0, len(parameters[layer_index][0]))]
bias = parameters[layer_index][1][unit_index]
(sum_lo, sum_hi) = ia.interval_sum([ia.interval_scale(input_definition_interval, w) for w in weights])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), (bias, bias))
if relu_activation:
if sum_hi < 0:
sum_hi = 0
if sum_lo < 0:
sum_lo = 0
else:
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
return (sum_lo, sum_hi)
return None
| 43.748815 | 129 | 0.591702 |
__author__ = "Mark H. Meng"
__copyright__ = "Copyright 2021, National University of S'pore and A*STAR"
__credits__ = ["G. Bai", "H. Guo", "S. G. Teo", "J. S. Dong"]
__license__ = "MIT"
import paoding.utility.interval_arithmetic as ia
import paoding.utility.utils as utils
import math
def calculate_bounds_of_output(model, intervals, loc):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
# Just return these intervals if current location is at the 2nd last layer
if loc == num_layers - 1:
return intervals
total_pruned_count = 0
propagated_next_layer_interval = None
while loc < num_layers - 1:
# Exclude non FC layers
num_curr_neurons = len(w[loc + 1][0])
num_next_neurons = len(w[loc + 1][0][0])
relu_activation = g[loc]['activation'] == 'relu'
if len(intervals) != num_curr_neurons:
raise Exception("Error: input intervals are not in expected shape -",
num_curr_neurons, "expected, not", len(intervals))
# No activation at the output layer
if loc + 1 == num_layers - 1:
propagated_next_layer_interval = ia.forward_propogation(intervals,
w[loc + 1][0],
w[loc + 1][1],
activation=False)
else:
propagated_next_layer_interval = ia.forward_propogation(intervals,
w[loc + 1][0],
w[loc + 1][1],
activation=True,
relu_activation=relu_activation)
intervals = propagated_next_layer_interval
loc += 1
return propagated_next_layer_interval
# Return the evaluation of the impact in a pair of real numbers as interval
def calculate_impact_of_pruning_next_layer(model, big_map, pruning_pairs, loc, cumulative_next_layer_intervals=None,
kaggle_credit=False):
# Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
# Each pruning pair is in form of a tuple (a,b), in which "a" is the hidden unit to be pruned, and "b"
# is the one to remain. The Delta produced by this pruning is as follow:
# Delta = [b * (w_a + w_b) + 2 * bias_b] - [a * w_a + bias_a + b * w_b + bias_b]
# = (b-a) * w_a + (bias_b - bias_a)
# or if we omit the impact of bias:
# Delta = [b * (w_a + w_b)] - [a * w_a + b * w_b]
# = (b-a) * w_a
# The Delta produced by each pruning is presented at the next layer, and the propagation
# simulates the impact of Delta to the output layer
# In case there is a single unit pruning, s.t. b = -1
# the Delta will be -1 * (a * w_a)
next_layer_size = len(w[loc+1][0][0])
if cumulative_next_layer_intervals is None:
empty_interval = (0,0)
cumulative_next_layer_intervals = [empty_interval for i in range(0, next_layer_size)]
num_layers = len(model.layers)
for (a, b) in pruning_pairs:
(a_lo, a_hi) = big_map[loc][a]
# DEPRECATED
# (a_lo, a_hi) = get_definition_interval(a, loc, parameters=w, relu_activation=use_relu, kaggle_credit=kaggle_credit)
# Check if there is a pair pruning or single unit pruning (b=-1)
if b != -1:
(b_lo, b_hi) = big_map[loc][b]
# DEPRECATED
# (b_lo, b_hi) = get_definition_interval(b, loc, parameters=w, relu_activation=use_relu, kaggle_credit=kaggle_credit)
# approximate the result of (a-b)
(a_minus_b_lo, a_minus_b_hi) = ia.interval_minus((a_lo, a_hi), (b_lo, b_hi))
w_a = w[loc + 1][0][a]
if len(w_a) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
impact_to_next_layer = [ia.interval_scale((a_minus_b_lo, a_minus_b_hi), k) for k in w_a]
else:
w_a = w[loc + 1][0][a]
if len(w_a) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
impact_to_next_layer = [ia.interval_scale((a_lo, a_hi), -1*k) for k in w_a]
if len(impact_to_next_layer) is not next_layer_size:
raise Exception("Inconsistent size of parameters")
for index, interval in enumerate(cumulative_next_layer_intervals):
cumulative_next_layer_intervals[index] = ia.interval_add(interval, impact_to_next_layer[index])
#print(cumulative_next_layer_intervals)
return cumulative_next_layer_intervals
def get_definition_map(model, definition_dict=None, input_interval=(0, 1)):
# First locate the dense (FC) layers, starting from the input layer/flatten layer until the second last layer
## Load the parameters and configuration of the input model
(w, g) = utils.load_param_and_config(model)
num_layers = len(model.layers)
layer_idx = 0
starting_layer_index = -1
ending_layer_index = -1
while layer_idx < num_layers - 1:
if "dense" in model.layers[layer_idx].name:
if starting_layer_index < 0:
starting_layer_index = layer_idx - 1
if ending_layer_index < layer_idx:
ending_layer_index = layer_idx
layer_idx += 1
if (starting_layer_index < 0) or (ending_layer_index < 0):
raise Exception("Fully connected layers not identified")
# Now let's create a hash table as dictionary to store all definition intervals of FC neurons
if definition_dict is None:
definition_dict = {}
definition_dict[starting_layer_index] = {}
for i in range(0, len(w[starting_layer_index + 1][0])):
definition_dict[starting_layer_index][i] = input_interval
for i in range(starting_layer_index + 1, ending_layer_index + 1):
num_prev_neurons = len(w[i][0])
num_curr_neurons = len(w[i][0][0])
if i not in definition_dict.keys():
definition_dict[i] = {}
curr_activation = g[i]['activation']
for m in range(0, num_curr_neurons):
(sum_lo, sum_hi) = (0, 0)
for n in range(0, num_prev_neurons):
affine_w_x = ia.interval_scale(definition_dict[i-1][n], w[i][0][n][m])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), affine_w_x)
bias = (w[i][1][m], w[i][1][m])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), bias)
if curr_activation == 'relu':
definition_dict[i][m] = (0, sum_hi)
else:
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
definition_dict[i][m] = (sum_lo, sum_hi)
return definition_dict
def get_definition_interval(unit_index, layer_index, parameters, relu_activation=True, kaggle_credit=False):
if kaggle_credit:
input_definition_interval = (-5, 5)
else:
input_definition_interval = (0, 1)
if layer_index == 1 or (layer_index>1 and not parameters[layer_index-1]):
weights = [parameters[layer_index][0][j][unit_index] for j in range(0, len(parameters[layer_index][0]))]
bias = parameters[layer_index][1][unit_index]
(sum_lo, sum_hi) = ia.interval_sum([ia.interval_scale(input_definition_interval, w) for w in weights])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), (bias, bias))
if relu_activation:
if sum_hi < 0:
sum_hi = 0
if sum_lo < 0:
sum_lo = 0
else:
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
return (sum_lo, sum_hi)
else:
weights = [parameters[layer_index][0][j][unit_index] for j in range(0, len(parameters[layer_index][0]))]
bias = parameters[layer_index][1][unit_index]
(sum_lo, sum_hi) = ia.interval_sum([ia.interval_scale(input_definition_interval, w) for w in weights])
(sum_lo, sum_hi) = ia.interval_add((sum_lo, sum_hi), (bias, bias))
if relu_activation:
if sum_hi < 0:
sum_hi = 0
if sum_lo < 0:
sum_lo = 0
else:
sum_hi = 1 / (1 + math.exp(-1 * sum_hi))
sum_lo = 1 / (1 + math.exp(-1 * sum_lo))
return (sum_lo, sum_hi)
return None
| true | true |
f7329cbecdc258302fcfe226d9b98c2ca57d946c | 1,709 | py | Python | app.py | stefanorosss/pytorch-CycleGAN-and-pix2pix | 88c3f3f729ebef6fac5ddf8c60a21cf51e6402f4 | [
"BSD-3-Clause"
] | null | null | null | app.py | stefanorosss/pytorch-CycleGAN-and-pix2pix | 88c3f3f729ebef6fac5ddf8c60a21cf51e6402f4 | [
"BSD-3-Clause"
] | null | null | null | app.py | stefanorosss/pytorch-CycleGAN-and-pix2pix | 88c3f3f729ebef6fac5ddf8c60a21cf51e6402f4 | [
"BSD-3-Clause"
] | null | null | null | import json
import sagemaker
import os
from s3_conx import *
from sagemaker.pytorch import PyTorch
def iterate_to_s3(path):
if os.path.isdir(path):
for _dir in os.listdir(path):
iterate_to_s3(path+_dir)
else:
s3.upload_file_to_s3(path)
return
if __name__ == '__main__':
# Initializes SageMaker session which holds context data
sagemaker_session = sagemaker.Session()
role = sagemaker_session.get_caller_identity_arn()
local_path = 'checkpoints'
estimator = PyTorch(
# name of the runnable script containing __main__ function (entrypoint)
entry_point='train.py',
# path of the folder containing training code. It could also contain a
# requirements.txt file with all the dependencies that needs
# to be installed before running
source_dir='.',
framework_version='1.5.0',
train_instance_count=1,
train_instance_type='ml.p2.xlarge',
#train_instance_type='ml.m4.xlarge',
role=role,
checkpoint_local_path = local_path+'/',
# these hyperparameters are passed to the main script as arguments and
# can be overridden when fine tuning the algorithm
hyperparameters={
'n_epochs': 200 ,
'n_epochs_decay': 1000,
'lr':0.0002,
'dataroot':'datasets/olracle/train',
'checkpoints_dir':'checkpoints',
'name':'olracle-pix2pix',
'model':'pix2pix',
'print_freq':480,
'display_freq':480,
'input_nc':1,
'output_nc':1,
'num_threads':0,
'dataset_mode':'aligned',
'save_epoch_freq':50,
'batch_size': 4,
})
estimator.fit() | 32.245283 | 77 | 0.634874 | import json
import sagemaker
import os
from s3_conx import *
from sagemaker.pytorch import PyTorch
def iterate_to_s3(path):
if os.path.isdir(path):
for _dir in os.listdir(path):
iterate_to_s3(path+_dir)
else:
s3.upload_file_to_s3(path)
return
if __name__ == '__main__':
sagemaker_session = sagemaker.Session()
role = sagemaker_session.get_caller_identity_arn()
local_path = 'checkpoints'
estimator = PyTorch(
entry_point='train.py',
source_dir='.',
framework_version='1.5.0',
train_instance_count=1,
train_instance_type='ml.p2.xlarge',
role=role,
checkpoint_local_path = local_path+'/',
hyperparameters={
'n_epochs': 200 ,
'n_epochs_decay': 1000,
'lr':0.0002,
'dataroot':'datasets/olracle/train',
'checkpoints_dir':'checkpoints',
'name':'olracle-pix2pix',
'model':'pix2pix',
'print_freq':480,
'display_freq':480,
'input_nc':1,
'output_nc':1,
'num_threads':0,
'dataset_mode':'aligned',
'save_epoch_freq':50,
'batch_size': 4,
})
estimator.fit() | true | true |
f7329cdf3349fca6215dae51ac706c10f91549e5 | 11,991 | py | Python | fattureincloud_python_sdk/model/client_type.py | fattureincloud/fattureincloud-python-sdk | f3a40fac345751014ea389680efdaef90f03bac1 | [
"MIT"
] | 2 | 2022-02-17T08:33:17.000Z | 2022-03-22T09:27:00.000Z | fattureincloud_python_sdk/model/client_type.py | fattureincloud/fattureincloud-python-sdk | f3a40fac345751014ea389680efdaef90f03bac1 | [
"MIT"
] | null | null | null | fattureincloud_python_sdk/model/client_type.py | fattureincloud/fattureincloud-python-sdk | f3a40fac345751014ea389680efdaef90f03bac1 | [
"MIT"
] | null | null | null | """
Fatture in Cloud API v2 - API Reference
Connect your software with Fatture in Cloud, the invoicing platform chosen by more than 400.000 businesses in Italy. The Fatture in Cloud API is based on REST, and makes possible to interact with the user related data prior authorization via OAuth2 protocol. # noqa: E501
The version of the OpenAPI document: 2.0.15
Contact: info@fattureincloud.it
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fattureincloud_python_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fattureincloud_python_sdk.exceptions import ApiAttributeError
class ClientType(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'None': None,
'COMPANY': "company",
'PERSON': "person",
'PA': "pa",
'CONDO': "condo",
},
}
validations = {
}
additional_properties_type = None
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""ClientType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Client type.., must be one of ["company", "person", "pa", "condo", ] # noqa: E501
Keyword Args:
value (str): Client type.., must be one of ["company", "person", "pa", "condo", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""ClientType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Client type.., must be one of ["company", "person", "pa", "condo", ] # noqa: E501
Keyword Args:
value (str): Client type.., must be one of ["company", "person", "pa", "condo", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| 41.780488 | 278 | 0.558836 |
import re
import sys
from fattureincloud_python_sdk.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fattureincloud_python_sdk.exceptions import ApiAttributeError
class ClientType(ModelSimple):
allowed_values = {
('value',): {
'None': None,
'COMPANY': "company",
'PERSON': "person",
'PA': "pa",
'CONDO': "condo",
},
}
validations = {
}
additional_properties_type = None
_nullable = True
@cached_property
def openapi_types():
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
| true | true |
f7329d67eaf90a21108de20975c0953c9d7739a7 | 451 | py | Python | yt_dlp/__main__.py | RobinD42/yt-dlc | aae273ded871caac1995381033a5b7ecaf4a526b | [
"Unlicense"
] | 1 | 2021-02-24T00:07:32.000Z | 2021-02-24T00:07:32.000Z | yt_dlp/__main__.py | RobinD42/yt-dlc | aae273ded871caac1995381033a5b7ecaf4a526b | [
"Unlicense"
] | null | null | null | yt_dlp/__main__.py | RobinD42/yt-dlc | aae273ded871caac1995381033a5b7ecaf4a526b | [
"Unlicense"
] | 1 | 2021-09-10T18:22:00.000Z | 2021-09-10T18:22:00.000Z | #!/usr/bin/env python
from __future__ import unicode_literals
# Execute with
# $ python yt_dlp/__main__.py (2.6+)
# $ python -m yt_dlp (2.7+)
import sys
if __package__ is None and not hasattr(sys, 'frozen'):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
import yt_dlp
if __name__ == '__main__':
yt_dlp.main()
| 22.55 | 62 | 0.682927 |
from __future__ import unicode_literals
import sys
if __package__ is None and not hasattr(sys, 'frozen'):
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
import yt_dlp
if __name__ == '__main__':
yt_dlp.main()
| true | true |
f7329fb55bc1abb42bae44f1178eb576c6e018e7 | 1,686 | py | Python | adventofcode2017/08.py | matslindh/codingchallenges | b6792808b03ea07304fda7e74c874c2c4d200dac | [
"MIT"
] | 2 | 2016-12-28T09:40:07.000Z | 2020-12-08T13:58:15.000Z | adventofcode2017/08.py | matslindh/codingchallenges | b6792808b03ea07304fda7e74c874c2c4d200dac | [
"MIT"
] | null | null | null | adventofcode2017/08.py | matslindh/codingchallenges | b6792808b03ea07304fda7e74c874c2c4d200dac | [
"MIT"
] | null | null | null | import operator
def execute(program):
registers = {}
m = 0
for instr in program:
if instr['reg_check'] not in registers:
registers[instr['reg_check']] = 0
if instr['reg'] not in registers:
registers[instr['reg']] = 0
if instr['op_check'](registers[instr['reg_check']], instr['op_cmp']):
registers[instr['reg']] = instr['op'](registers[instr['reg']], instr['val'])
if registers[instr['reg']] > m:
m = registers[instr['reg']]
return registers, m
def parse_program(f):
program = []
operators = {
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
}
instructions = {
'inc': operator.add,
'dec': operator.sub,
}
for line in open(f).readlines():
reg, op, val, _, reg_check, op_check, op_cmp = line.strip().split(' ')
program.append({
'reg': reg,
'op': instructions[op],
'val': int(val),
'reg_check': reg_check,
'op_check': operators[op_check],
'op_cmp': int(op_cmp)
})
return program
def execute_program_file(f):
program = parse_program(f)
return execute(program)
def execute_and_get_largest_values(f):
registers, max_at_any_time = execute_program_file(f)
return max(registers.values()), max_at_any_time
def test_execute_and_get_largest_values():
assert 1, 10 == execute_and_get_largest_values('input/dec08_test')
if __name__ == "__main__":
print(execute_and_get_largest_values('input/dec08')) | 23.746479 | 88 | 0.570581 | import operator
def execute(program):
registers = {}
m = 0
for instr in program:
if instr['reg_check'] not in registers:
registers[instr['reg_check']] = 0
if instr['reg'] not in registers:
registers[instr['reg']] = 0
if instr['op_check'](registers[instr['reg_check']], instr['op_cmp']):
registers[instr['reg']] = instr['op'](registers[instr['reg']], instr['val'])
if registers[instr['reg']] > m:
m = registers[instr['reg']]
return registers, m
def parse_program(f):
program = []
operators = {
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
}
instructions = {
'inc': operator.add,
'dec': operator.sub,
}
for line in open(f).readlines():
reg, op, val, _, reg_check, op_check, op_cmp = line.strip().split(' ')
program.append({
'reg': reg,
'op': instructions[op],
'val': int(val),
'reg_check': reg_check,
'op_check': operators[op_check],
'op_cmp': int(op_cmp)
})
return program
def execute_program_file(f):
program = parse_program(f)
return execute(program)
def execute_and_get_largest_values(f):
registers, max_at_any_time = execute_program_file(f)
return max(registers.values()), max_at_any_time
def test_execute_and_get_largest_values():
assert 1, 10 == execute_and_get_largest_values('input/dec08_test')
if __name__ == "__main__":
print(execute_and_get_largest_values('input/dec08')) | true | true |
f732a1266d5c853c67006f483f36e4ebce514789 | 3,923 | py | Python | tests/test_nidmm.py | jonathanmendez/nitsm-python | c7bbe2e53d56cf987d2369336d32b8baf6ae806a | [
"MIT"
] | 4 | 2021-08-21T06:21:45.000Z | 2021-12-27T05:27:43.000Z | tests/test_nidmm.py | jonathanmendez/nitsm-python | c7bbe2e53d56cf987d2369336d32b8baf6ae806a | [
"MIT"
] | 51 | 2021-07-28T14:48:04.000Z | 2022-03-25T02:35:40.000Z | tests/test_nidmm.py | jonathanmendez/nitsm-python | c7bbe2e53d56cf987d2369336d32b8baf6ae806a | [
"MIT"
] | 2 | 2021-06-23T19:53:17.000Z | 2022-03-27T20:10:27.000Z | import nidmm
import pytest
from nitsm.codemoduleapi import SemiconductorModuleContext
from nitsm.pinquerycontexts import PinQueryContext
@pytest.fixture
def simulated_nidmm_sessions(standalone_tsm_context):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
sessions = [
nidmm.Session(instrument_name, options={"Simulate": True})
for instrument_name in instrument_names
]
for instrument_name, session in zip(instrument_names, sessions):
standalone_tsm_context.set_nidmm_session(instrument_name, session)
yield sessions
for session in sessions:
session.close()
@pytest.mark.pin_map("nidmm.pinmap")
class TestNIDMM:
pin_map_instruments = ["DMM1", "DMM2", "DMM3"]
pin_map_dut_pins = ["DUTPin1"]
pin_map_system_pins = ["SystemPin1"]
def test_get_all_nidmm_instrument_names(
self, standalone_tsm_context: SemiconductorModuleContext
):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
assert isinstance(instrument_names, tuple)
assert len(instrument_names) == len(self.pin_map_instruments)
for instrument_name in instrument_names:
assert isinstance(instrument_name, str)
assert instrument_name in self.pin_map_instruments
def test_set_nidmm_session(self, standalone_tsm_context: SemiconductorModuleContext):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
for instrument_name in instrument_names:
with nidmm.Session(instrument_name, options={"Simulate": True}) as session:
standalone_tsm_context.set_nidmm_session(instrument_name, session)
assert SemiconductorModuleContext._sessions[id(session)] is session
def test_get_all_nidmm_sessions(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
queried_sessions = standalone_tsm_context.get_all_nidmm_sessions()
assert isinstance(queried_sessions, tuple)
assert len(queried_sessions) == len(simulated_nidmm_sessions)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pin_to_nidmm_session(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
pin_query_context, queried_session = standalone_tsm_context.pin_to_nidmm_session(
"SystemPin1"
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pins_to_nidmm_sessions_single_pin(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
pin_query_context, queried_sessions = standalone_tsm_context.pins_to_nidmm_sessions(
"PinGroup1"
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_sessions, tuple)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pins_to_nidmm_sessions_multiple_pins(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
all_pins = self.pin_map_dut_pins + self.pin_map_system_pins
pin_query_context, queried_sessions = standalone_tsm_context.pins_to_nidmm_sessions(
all_pins
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_sessions, tuple)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
| 44.579545 | 92 | 0.748917 | import nidmm
import pytest
from nitsm.codemoduleapi import SemiconductorModuleContext
from nitsm.pinquerycontexts import PinQueryContext
@pytest.fixture
def simulated_nidmm_sessions(standalone_tsm_context):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
sessions = [
nidmm.Session(instrument_name, options={"Simulate": True})
for instrument_name in instrument_names
]
for instrument_name, session in zip(instrument_names, sessions):
standalone_tsm_context.set_nidmm_session(instrument_name, session)
yield sessions
for session in sessions:
session.close()
@pytest.mark.pin_map("nidmm.pinmap")
class TestNIDMM:
pin_map_instruments = ["DMM1", "DMM2", "DMM3"]
pin_map_dut_pins = ["DUTPin1"]
pin_map_system_pins = ["SystemPin1"]
def test_get_all_nidmm_instrument_names(
self, standalone_tsm_context: SemiconductorModuleContext
):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
assert isinstance(instrument_names, tuple)
assert len(instrument_names) == len(self.pin_map_instruments)
for instrument_name in instrument_names:
assert isinstance(instrument_name, str)
assert instrument_name in self.pin_map_instruments
def test_set_nidmm_session(self, standalone_tsm_context: SemiconductorModuleContext):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
for instrument_name in instrument_names:
with nidmm.Session(instrument_name, options={"Simulate": True}) as session:
standalone_tsm_context.set_nidmm_session(instrument_name, session)
assert SemiconductorModuleContext._sessions[id(session)] is session
def test_get_all_nidmm_sessions(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
queried_sessions = standalone_tsm_context.get_all_nidmm_sessions()
assert isinstance(queried_sessions, tuple)
assert len(queried_sessions) == len(simulated_nidmm_sessions)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pin_to_nidmm_session(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
pin_query_context, queried_session = standalone_tsm_context.pin_to_nidmm_session(
"SystemPin1"
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pins_to_nidmm_sessions_single_pin(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
pin_query_context, queried_sessions = standalone_tsm_context.pins_to_nidmm_sessions(
"PinGroup1"
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_sessions, tuple)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pins_to_nidmm_sessions_multiple_pins(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
all_pins = self.pin_map_dut_pins + self.pin_map_system_pins
pin_query_context, queried_sessions = standalone_tsm_context.pins_to_nidmm_sessions(
all_pins
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_sessions, tuple)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
| true | true |
f732a1540ce9c9c39b99ed7b642be2f0ab715075 | 3,774 | py | Python | dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/tasks/flink.py | tracehh/dolphinscheduler | d6fe1ccacf79d9ade3a371a0f520d7e224b40c84 | [
"Apache-2.0"
] | 2,086 | 2021-04-15T20:28:24.000Z | 2022-03-31T22:30:49.000Z | dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/tasks/flink.py | tracehh/dolphinscheduler | d6fe1ccacf79d9ade3a371a0f520d7e224b40c84 | [
"Apache-2.0"
] | 3,789 | 2021-04-15T16:00:32.000Z | 2022-03-31T13:38:53.000Z | dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/tasks/flink.py | tracehh/dolphinscheduler | d6fe1ccacf79d9ade3a371a0f520d7e224b40c84 | [
"Apache-2.0"
] | 1,170 | 2021-04-16T06:40:24.000Z | 2022-03-31T22:30:51.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task Flink."""
from typing import Dict, Optional
from pydolphinscheduler.constants import TaskType
from pydolphinscheduler.core.task import Task
from pydolphinscheduler.java_gateway import launch_gateway
class ProgramType(str):
"""Type of program flink runs, for now it just contain `JAVA`, `SCALA` and `PYTHON`."""
JAVA = "JAVA"
SCALA = "SCALA"
PYTHON = "PYTHON"
class FlinkVersion(str):
"""Flink version, for now it just contain `HIGHT` and `LOW`."""
LOW_VERSION = "<1.10"
HIGHT_VERSION = ">=1.10"
class DeployMode(str):
"""Flink deploy mode, for now it just contain `LOCAL` and `CLUSTER`."""
LOCAL = "local"
CLUSTER = "cluster"
class Flink(Task):
"""Task flink object, declare behavior for flink task to dolphinscheduler."""
_task_custom_attr = {
"main_class",
"main_jar",
"deploy_mode",
"flink_version",
"slot",
"task_manager",
"job_manager_memory",
"task_manager_memory",
"app_name",
"program_type",
"parallelism",
"main_args",
"others",
}
def __init__(
self,
name: str,
main_class: str,
main_package: str,
program_type: Optional[ProgramType] = ProgramType.SCALA,
deploy_mode: Optional[DeployMode] = DeployMode.CLUSTER,
flink_version: Optional[FlinkVersion] = FlinkVersion.LOW_VERSION,
app_name: Optional[str] = None,
job_manager_memory: Optional[str] = "1G",
task_manager_memory: Optional[str] = "2G",
slot: Optional[int] = 1,
task_manager: Optional[int] = 2,
parallelism: Optional[int] = 1,
main_args: Optional[str] = None,
others: Optional[str] = None,
*args,
**kwargs
):
super().__init__(name, TaskType.FLINK, *args, **kwargs)
self.main_class = main_class
self.main_package = main_package
self.program_type = program_type
self.deploy_mode = deploy_mode
self.flink_version = flink_version
self.app_name = app_name
self.job_manager_memory = job_manager_memory
self.task_manager_memory = task_manager_memory
self.slot = slot
self.task_manager = task_manager
self.parallelism = parallelism
self.main_args = main_args
self.others = others
self._resource = {}
@property
def main_jar(self) -> Dict:
"""Return main package of dict."""
resource_info = self.get_resource_info(self.program_type, self.main_package)
return {"id": resource_info.get("id")}
def get_resource_info(self, program_type, main_package) -> Dict:
"""Get resource info from java gateway, contains resource id, name."""
if not self._resource:
self._resource = launch_gateway().entry_point.getResourcesFileInfo(
program_type,
main_package,
)
return self._resource
| 31.983051 | 91 | 0.654213 |
from typing import Dict, Optional
from pydolphinscheduler.constants import TaskType
from pydolphinscheduler.core.task import Task
from pydolphinscheduler.java_gateway import launch_gateway
class ProgramType(str):
JAVA = "JAVA"
SCALA = "SCALA"
PYTHON = "PYTHON"
class FlinkVersion(str):
LOW_VERSION = "<1.10"
HIGHT_VERSION = ">=1.10"
class DeployMode(str):
LOCAL = "local"
CLUSTER = "cluster"
class Flink(Task):
_task_custom_attr = {
"main_class",
"main_jar",
"deploy_mode",
"flink_version",
"slot",
"task_manager",
"job_manager_memory",
"task_manager_memory",
"app_name",
"program_type",
"parallelism",
"main_args",
"others",
}
def __init__(
self,
name: str,
main_class: str,
main_package: str,
program_type: Optional[ProgramType] = ProgramType.SCALA,
deploy_mode: Optional[DeployMode] = DeployMode.CLUSTER,
flink_version: Optional[FlinkVersion] = FlinkVersion.LOW_VERSION,
app_name: Optional[str] = None,
job_manager_memory: Optional[str] = "1G",
task_manager_memory: Optional[str] = "2G",
slot: Optional[int] = 1,
task_manager: Optional[int] = 2,
parallelism: Optional[int] = 1,
main_args: Optional[str] = None,
others: Optional[str] = None,
*args,
**kwargs
):
super().__init__(name, TaskType.FLINK, *args, **kwargs)
self.main_class = main_class
self.main_package = main_package
self.program_type = program_type
self.deploy_mode = deploy_mode
self.flink_version = flink_version
self.app_name = app_name
self.job_manager_memory = job_manager_memory
self.task_manager_memory = task_manager_memory
self.slot = slot
self.task_manager = task_manager
self.parallelism = parallelism
self.main_args = main_args
self.others = others
self._resource = {}
@property
def main_jar(self) -> Dict:
resource_info = self.get_resource_info(self.program_type, self.main_package)
return {"id": resource_info.get("id")}
def get_resource_info(self, program_type, main_package) -> Dict:
if not self._resource:
self._resource = launch_gateway().entry_point.getResourcesFileInfo(
program_type,
main_package,
)
return self._resource
| true | true |
f732a1fd5f6bba4f5da1d7a6c7fabe8eac638bb7 | 476 | py | Python | ex100.py | arthurfas123/Curso-De-Python | c4a15d92811bd101a8562d2c3a90fe2d5a3c360d | [
"MIT"
] | null | null | null | ex100.py | arthurfas123/Curso-De-Python | c4a15d92811bd101a8562d2c3a90fe2d5a3c360d | [
"MIT"
] | null | null | null | ex100.py | arthurfas123/Curso-De-Python | c4a15d92811bd101a8562d2c3a90fe2d5a3c360d | [
"MIT"
] | null | null | null | from random import randint
from time import sleep
def sorteia(lista):
print('=-=' * 15)
for c in range(0, 5):
lista.append(randint(1, 10))
print('Sorteando 5 valores da lista: ', end=' ')
for c in lista:
print(f'{c}', end=' ', flush=True)
sleep(0.3)
print()
def somapar(lista):
soma = 0
for c in n:
if c % 2 == 0:
soma += c
print(f'Soma dos valores pares: {soma}')
n = []
sorteia(n)
somapar(n)
| 17.62963 | 52 | 0.535714 | from random import randint
from time import sleep
def sorteia(lista):
print('=-=' * 15)
for c in range(0, 5):
lista.append(randint(1, 10))
print('Sorteando 5 valores da lista: ', end=' ')
for c in lista:
print(f'{c}', end=' ', flush=True)
sleep(0.3)
print()
def somapar(lista):
soma = 0
for c in n:
if c % 2 == 0:
soma += c
print(f'Soma dos valores pares: {soma}')
n = []
sorteia(n)
somapar(n)
| true | true |
f732a2a5bc2f99320a9eaba98bf163ca6ad86b20 | 3,257 | py | Python | tests/data.py | tardyp/pyserde | 2bef77d9888ffcc650f031f0e883cb2ff08cbf60 | [
"MIT"
] | null | null | null | tests/data.py | tardyp/pyserde | 2bef77d9888ffcc650f031f0e883cb2ff08cbf60 | [
"MIT"
] | null | null | null | tests/data.py | tardyp/pyserde | 2bef77d9888ffcc650f031f0e883cb2ff08cbf60 | [
"MIT"
] | null | null | null | import enum
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
from serde import serde
from . import imported
@serde
@dataclass(unsafe_hash=True)
class Int:
"""
Integer.
"""
i: int
@serde
@dataclass(unsafe_hash=True)
class Str:
"""
String.
"""
s: str
@serde
@dataclass(unsafe_hash=True)
class Float:
"""
Float.
"""
f: float
@serde
@dataclass(unsafe_hash=True)
class Bool:
"""
Boolean.
"""
b: bool
@serde
@dataclass(unsafe_hash=True)
class Pri:
"""
Primitives.
"""
i: int
s: str
f: float
b: bool
@serde
class PriOpt:
"""
Optional Primitives.
"""
i: Optional[int]
s: Optional[str]
f: Optional[float]
b: Optional[bool]
@serde
class PriList:
"""
List containing primitives.
"""
i: List[int]
s: List[str]
f: List[float]
b: List[bool]
@serde
class PriDict:
"""
Dict containing primitives.
"""
i: Dict[int, int]
s: Dict[str, str]
f: Dict[float, float]
b: Dict[bool, bool]
@serde
class PriTuple:
"""
Tuple containing primitives.
"""
i: Tuple[int, int, int]
s: Tuple[str, str, str, str]
f: Tuple[float, float, float, float, float]
b: Tuple[bool, bool, bool, bool, bool, bool]
@serde
@dataclass(unsafe_hash=True)
class NestedInt:
"""
Nested integer.
"""
i: Int
@serde
@dataclass(unsafe_hash=True)
class NestedPri:
"""
Nested primitives.
"""
i: Int
s: Str
f: Float
b: Bool
@serde
class NestedPriOpt:
"""
Optional Primitives.
"""
i: Optional[Int]
s: Optional[Str]
f: Optional[Float]
b: Optional[Bool]
@serde
class NestedPriList:
"""
List containing nested primitives.
"""
i: List[Int]
s: List[Str]
f: List[Float]
b: List[Bool]
@serde
class NestedPriDict:
"""
Dict containing nested primitives.
"""
i: Dict[Str, Int]
s: Dict[Str, Str]
f: Dict[Str, Float]
b: Dict[Str, Bool]
@serde
class NestedPriTuple:
"""
Tuple containing nested primitives.
"""
i: Tuple[Int, Int, Int]
s: Tuple[Str, Str, Str, Str]
f: Tuple[Float, Float, Float, Float, Float]
b: Tuple[Bool, Bool, Bool, Bool, Bool, Bool]
@serde
@dataclass(unsafe_hash=True)
class PriDefault:
"""
Primitives.
"""
i: int = 10
s: str = 'foo'
f: float = 100.0
b: bool = True
@serde
class OptDefault:
"""
Optionals.
"""
n: Optional[int] = None
i: Optional[int] = 10
class E(enum.Enum):
S = 'foo'
F = 10.0
B = True
class IE(enum.IntEnum):
V0 = enum.auto()
V1 = enum.auto()
V2 = 10
V3 = 100
@serde
class EnumInClass:
"""
Class having enum fields.
"""
e: IE = IE.V2
o: Optional[E] = E.S
i: imported.IE = imported.IE.V1
ListPri = List[Pri]
DictPri = Dict[str, Pri]
INT = Int(10)
STR = Str('foo')
FLOAT = Float(100.0)
BOOL = Bool(True)
PRI = Pri(10, 'foo', 100.0, True)
PRI_TUPLE = (10, 'foo', 100.0, True)
PRILIST = ([10], ['foo'], [100.0], [True])
NESTED_PRILIST = ([INT], [STR], [FLOAT], [BOOL])
NESTED_PRILIST_TUPLE = ([(10,)], [('foo',)], [(100.0,)], [(True,)])
| 13.028 | 67 | 0.562174 | import enum
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
from serde import serde
from . import imported
@serde
@dataclass(unsafe_hash=True)
class Int:
i: int
@serde
@dataclass(unsafe_hash=True)
class Str:
s: str
@serde
@dataclass(unsafe_hash=True)
class Float:
f: float
@serde
@dataclass(unsafe_hash=True)
class Bool:
b: bool
@serde
@dataclass(unsafe_hash=True)
class Pri:
i: int
s: str
f: float
b: bool
@serde
class PriOpt:
i: Optional[int]
s: Optional[str]
f: Optional[float]
b: Optional[bool]
@serde
class PriList:
i: List[int]
s: List[str]
f: List[float]
b: List[bool]
@serde
class PriDict:
i: Dict[int, int]
s: Dict[str, str]
f: Dict[float, float]
b: Dict[bool, bool]
@serde
class PriTuple:
i: Tuple[int, int, int]
s: Tuple[str, str, str, str]
f: Tuple[float, float, float, float, float]
b: Tuple[bool, bool, bool, bool, bool, bool]
@serde
@dataclass(unsafe_hash=True)
class NestedInt:
i: Int
@serde
@dataclass(unsafe_hash=True)
class NestedPri:
i: Int
s: Str
f: Float
b: Bool
@serde
class NestedPriOpt:
i: Optional[Int]
s: Optional[Str]
f: Optional[Float]
b: Optional[Bool]
@serde
class NestedPriList:
i: List[Int]
s: List[Str]
f: List[Float]
b: List[Bool]
@serde
class NestedPriDict:
i: Dict[Str, Int]
s: Dict[Str, Str]
f: Dict[Str, Float]
b: Dict[Str, Bool]
@serde
class NestedPriTuple:
i: Tuple[Int, Int, Int]
s: Tuple[Str, Str, Str, Str]
f: Tuple[Float, Float, Float, Float, Float]
b: Tuple[Bool, Bool, Bool, Bool, Bool, Bool]
@serde
@dataclass(unsafe_hash=True)
class PriDefault:
i: int = 10
s: str = 'foo'
f: float = 100.0
b: bool = True
@serde
class OptDefault:
n: Optional[int] = None
i: Optional[int] = 10
class E(enum.Enum):
S = 'foo'
F = 10.0
B = True
class IE(enum.IntEnum):
V0 = enum.auto()
V1 = enum.auto()
V2 = 10
V3 = 100
@serde
class EnumInClass:
e: IE = IE.V2
o: Optional[E] = E.S
i: imported.IE = imported.IE.V1
ListPri = List[Pri]
DictPri = Dict[str, Pri]
INT = Int(10)
STR = Str('foo')
FLOAT = Float(100.0)
BOOL = Bool(True)
PRI = Pri(10, 'foo', 100.0, True)
PRI_TUPLE = (10, 'foo', 100.0, True)
PRILIST = ([10], ['foo'], [100.0], [True])
NESTED_PRILIST = ([INT], [STR], [FLOAT], [BOOL])
NESTED_PRILIST_TUPLE = ([(10,)], [('foo',)], [(100.0,)], [(True,)])
| true | true |
f732a2b88e85016c41399879af5345c63525edb3 | 46 | py | Python | count_loop.py | cascroydon/Flowcharts | b9a84ae4dc6c70fff907a19171a3a19bae3d1335 | [
"MIT"
] | null | null | null | count_loop.py | cascroydon/Flowcharts | b9a84ae4dc6c70fff907a19171a3a19bae3d1335 | [
"MIT"
] | null | null | null | count_loop.py | cascroydon/Flowcharts | b9a84ae4dc6c70fff907a19171a3a19bae3d1335 | [
"MIT"
] | null | null | null | for count in range(10):
print (count + 1)
| 15.333333 | 23 | 0.608696 | for count in range(10):
print (count + 1)
| true | true |
f732a2c5e0fd2b6e98955ccea0377372f3d5c887 | 14,062 | py | Python | torchMoji/torchmoji/model_def.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 163 | 2019-06-23T14:07:57.000Z | 2022-02-25T23:06:07.000Z | torchMoji/torchmoji/model_def.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 8 | 2019-07-24T12:41:31.000Z | 2022-02-10T00:17:20.000Z | torchMoji/torchmoji/model_def.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 31 | 2019-06-26T01:21:07.000Z | 2021-09-06T17:23:24.000Z | # -*- coding: utf-8 -*-
""" Model definition functions and weight loading.
"""
from __future__ import print_function, division, unicode_literals
from os.path import exists
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from torchMoji.torchmoji.lstm import LSTMHardSigmoid
from torchMoji.torchmoji.attlayer import Attention
from torchMoji.torchmoji.global_variables import NB_TOKENS, NB_EMOJI_CLASSES
def torchmoji_feature_encoding(weight_path, return_attention=False):
""" Loads the pretrained torchMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
weight_path: Path to model weights to be loaded.
return_attention: If true, output will include weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = TorchMoji(nb_classes=None,
nb_tokens=NB_TOKENS,
feature_output=True,
return_attention=return_attention)
load_specific_weights(model, weight_path, exclude_names=['output_layer'])
return model
def torchmoji_emojis(weight_path, return_attention=False):
""" Loads the pretrained torchMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
weight_path: Path to model weights to be loaded.
return_attention: If true, output will include weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = TorchMoji(nb_classes=NB_EMOJI_CLASSES,
nb_tokens=NB_TOKENS,
return_attention=return_attention)
model.load_state_dict(torch.load(weight_path))
return model
def torchmoji_transfer(nb_classes, weight_path=None, extend_embedding=0,
embed_dropout_rate=0.1, final_dropout_rate=0.5):
""" Loads the pretrained torchMoji model for finetuning/transfer learning.
Does not load weights for the softmax layer.
Note that if you are planning to use class average F1 for evaluation,
nb_classes should be set to 2 instead of the actual number of classes
in the dataset, since binary classification will be performed on each
class individually.
Note that for the 'new' method, weight_path should be left as None.
# Arguments:
nb_classes: Number of classes in the dataset.
weight_path: Path to model weights to be loaded.
extend_embedding: Number of tokens that have been added to the
vocabulary on top of NB_TOKENS. If this number is larger than 0,
the embedding layer's dimensions are adjusted accordingly, with the
additional weights being set to random values.
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
# Returns:
Model with the given parameters.
"""
model = TorchMoji(nb_classes=nb_classes,
nb_tokens=NB_TOKENS + extend_embedding,
embed_dropout_rate=embed_dropout_rate,
final_dropout_rate=final_dropout_rate,
output_logits=True)
if weight_path is not None:
load_specific_weights(model, weight_path,
exclude_names=['output_layer'],
extend_embedding=extend_embedding)
return model
class TorchMoji(nn.Module):
def __init__(self, nb_classes, nb_tokens, feature_output=False, output_logits=False,
embed_dropout_rate=0, final_dropout_rate=0, return_attention=False):
"""
torchMoji model.
IMPORTANT: The model is loaded in evaluation mode by default (self.eval())
# Arguments:
nb_classes: Number of classes in the dataset.
nb_tokens: Number of tokens in the dataset (i.e. vocabulary size).
feature_output: If True the model returns the penultimate
feature vector rather than Softmax probabilities
(defaults to False).
output_logits: If True the model returns logits rather than probabilities
(defaults to False).
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
return_attention: If True the model also returns attention weights over the sentence
(defaults to False).
"""
super(TorchMoji, self).__init__()
embedding_dim = 256
hidden_size = 512
attention_size = 4 * hidden_size + embedding_dim
self.feature_output = feature_output
self.embed_dropout_rate = embed_dropout_rate
self.final_dropout_rate = final_dropout_rate
self.return_attention = return_attention
self.hidden_size = hidden_size
self.output_logits = output_logits
self.nb_classes = nb_classes
self.add_module('embed', nn.Embedding(nb_tokens, embedding_dim))
# dropout2D: embedding channels are dropped out instead of words
# many exampels in the datasets contain few words that losing one or more words can alter the emotions completely
self.add_module('embed_dropout', nn.Dropout2d(embed_dropout_rate))
self.add_module('lstm_0', LSTMHardSigmoid(embedding_dim, hidden_size, batch_first=True, bidirectional=True))
self.add_module('lstm_1', LSTMHardSigmoid(hidden_size*2, hidden_size, batch_first=True, bidirectional=True))
self.add_module('attention_layer', Attention(attention_size=attention_size, return_attention=return_attention))
if not feature_output:
self.add_module('final_dropout', nn.Dropout(final_dropout_rate))
if output_logits:
self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1)))
else:
self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1),
nn.Softmax() if self.nb_classes > 2 else nn.Sigmoid()))
self.init_weights()
# Put model in evaluation mode by default
self.eval()
def init_weights(self):
"""
Here we reproduce Keras default initialization weights for consistency with Keras version
"""
ih = (param.data for name, param in self.named_parameters() if 'weight_ih' in name)
hh = (param.data for name, param in self.named_parameters() if 'weight_hh' in name)
b = (param.data for name, param in self.named_parameters() if 'bias' in name)
nn.init.uniform(self.embed.weight.data, a=-0.5, b=0.5)
for t in ih:
nn.init.xavier_uniform(t)
for t in hh:
nn.init.orthogonal(t)
for t in b:
nn.init.constant(t, 0)
if not self.feature_output:
nn.init.xavier_uniform(self.output_layer[0].weight.data)
def forward(self, input_seqs):
""" Forward pass.
# Arguments:
input_seqs: Can be one of Numpy array, Torch.LongTensor, Torch.Variable, Torch.PackedSequence.
# Return:
Same format as input format (except for PackedSequence returned as Variable).
"""
# Check if we have Torch.LongTensor inputs or not Torch.Variable (assume Numpy array in this case), take note to return same format
return_numpy = False
return_tensor = False
if isinstance(input_seqs, (torch.LongTensor, torch.cuda.LongTensor)):
input_seqs = Variable(input_seqs)
return_tensor = True
elif not isinstance(input_seqs, Variable):
input_seqs = Variable(torch.from_numpy(input_seqs.astype('int64')).long())
return_numpy = True
# If we don't have a packed inputs, let's pack it
reorder_output = False
if not isinstance(input_seqs, PackedSequence):
ho = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
co = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
# Reorder batch by sequence length
input_lengths = torch.LongTensor([torch.max(input_seqs[i, :].data.nonzero()) + 1 for i in range(input_seqs.size()[0])])
input_lengths, perm_idx = input_lengths.sort(0, descending=True)
input_seqs = input_seqs[perm_idx][:, :input_lengths.max()]
# Pack sequence and work on data tensor to reduce embeddings/dropout computations
packed_input = pack_padded_sequence(input_seqs, input_lengths.cpu().numpy(), batch_first=True)
reorder_output = True
else:
ho = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
co = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
input_lengths = input_seqs.batch_sizes
packed_input = input_seqs
hidden = (Variable(ho, requires_grad=False), Variable(co, requires_grad=False))
# Embed with an activation function to bound the values of the embeddings
x = self.embed(packed_input.data)
x = nn.Tanh()(x)
# pyTorch 2D dropout2d operate on axis 1 which is fine for us
x = self.embed_dropout(x)
# Update packed sequence data for RNN
packed_input = PackedSequence(x, packed_input.batch_sizes)
# skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
# ordering of the way the merge is done is important for consistency with the pretrained model
lstm_0_output, _ = self.lstm_0(packed_input, hidden)
lstm_1_output, _ = self.lstm_1(lstm_0_output, hidden)
# Update packed sequence data for attention layer
packed_input = PackedSequence(torch.cat((lstm_1_output.data,
lstm_0_output.data,
packed_input.data), dim=1),
packed_input.batch_sizes)
input_seqs, _ = pad_packed_sequence(packed_input, batch_first=True)
x, att_weights = self.attention_layer(input_seqs, input_lengths)
# output class probabilities or penultimate feature vector
if not self.feature_output:
x = self.final_dropout(x)
outputs = self.output_layer(x)
else:
outputs = x
# Reorder output if needed
if reorder_output:
reorered = Variable(outputs.data.new(outputs.size()))
reorered[perm_idx] = outputs
outputs = reorered
# Adapt return format if needed
if return_tensor:
outputs = outputs.data
if return_numpy:
outputs = outputs.data.numpy()
if self.return_attention:
return outputs, att_weights
else:
return outputs
def load_specific_weights(model, weight_path, exclude_names=[], extend_embedding=0, verbose=True):
""" Loads model weights from the given file path, excluding any
given layers.
# Arguments:
model: Model whose weights should be loaded.
weight_path: Path to file containing model weights.
exclude_names: List of layer names whose weights should not be loaded.
extend_embedding: Number of new words being added to vocabulary.
verbose: Verbosity flag.
# Raises:
ValueError if the file at weight_path does not exist.
"""
if not exists(weight_path):
raise ValueError('ERROR (load_weights): The weights file at {} does '
'not exist. Refer to the README for instructions.'
.format(weight_path))
if extend_embedding and 'embed' in exclude_names:
raise ValueError('ERROR (load_weights): Cannot extend a vocabulary '
'without loading the embedding weights.')
# Copy only weights from the temporary model that are wanted
# for the specific task (e.g. the Softmax is often ignored)
weights = torch.load(weight_path)
for key, weight in weights.items():
if any(excluded in key for excluded in exclude_names):
if verbose:
print('Ignoring weights for {}'.format(key))
continue
try:
model_w = model.state_dict()[key]
except KeyError:
raise KeyError("Weights had parameters {},".format(key)
+ " but could not find this parameters in model.")
if verbose:
print('Loading weights for {}'.format(key))
# extend embedding layer to allow new randomly initialized words
# if requested. Otherwise, just load the weights for the layer.
if 'embed' in key and extend_embedding > 0:
weight = torch.cat((weight, model_w[NB_TOKENS:, :]), dim=0)
if verbose:
print('Extended vocabulary for embedding layer ' +
'from {} to {} tokens.'.format(
NB_TOKENS, NB_TOKENS + extend_embedding))
try:
model_w.copy_(weight)
except:
print('While copying the weigths named {}, whose dimensions in the model are'
' {} and whose dimensions in the saved file are {}, ...'.format(
key, model_w.size(), weight.size()))
raise
| 44.5 | 139 | 0.645499 |
from __future__ import print_function, division, unicode_literals
from os.path import exists
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from torchMoji.torchmoji.lstm import LSTMHardSigmoid
from torchMoji.torchmoji.attlayer import Attention
from torchMoji.torchmoji.global_variables import NB_TOKENS, NB_EMOJI_CLASSES
def torchmoji_feature_encoding(weight_path, return_attention=False):
model = TorchMoji(nb_classes=None,
nb_tokens=NB_TOKENS,
feature_output=True,
return_attention=return_attention)
load_specific_weights(model, weight_path, exclude_names=['output_layer'])
return model
def torchmoji_emojis(weight_path, return_attention=False):
model = TorchMoji(nb_classes=NB_EMOJI_CLASSES,
nb_tokens=NB_TOKENS,
return_attention=return_attention)
model.load_state_dict(torch.load(weight_path))
return model
def torchmoji_transfer(nb_classes, weight_path=None, extend_embedding=0,
embed_dropout_rate=0.1, final_dropout_rate=0.5):
model = TorchMoji(nb_classes=nb_classes,
nb_tokens=NB_TOKENS + extend_embedding,
embed_dropout_rate=embed_dropout_rate,
final_dropout_rate=final_dropout_rate,
output_logits=True)
if weight_path is not None:
load_specific_weights(model, weight_path,
exclude_names=['output_layer'],
extend_embedding=extend_embedding)
return model
class TorchMoji(nn.Module):
def __init__(self, nb_classes, nb_tokens, feature_output=False, output_logits=False,
embed_dropout_rate=0, final_dropout_rate=0, return_attention=False):
super(TorchMoji, self).__init__()
embedding_dim = 256
hidden_size = 512
attention_size = 4 * hidden_size + embedding_dim
self.feature_output = feature_output
self.embed_dropout_rate = embed_dropout_rate
self.final_dropout_rate = final_dropout_rate
self.return_attention = return_attention
self.hidden_size = hidden_size
self.output_logits = output_logits
self.nb_classes = nb_classes
self.add_module('embed', nn.Embedding(nb_tokens, embedding_dim))
self.add_module('embed_dropout', nn.Dropout2d(embed_dropout_rate))
self.add_module('lstm_0', LSTMHardSigmoid(embedding_dim, hidden_size, batch_first=True, bidirectional=True))
self.add_module('lstm_1', LSTMHardSigmoid(hidden_size*2, hidden_size, batch_first=True, bidirectional=True))
self.add_module('attention_layer', Attention(attention_size=attention_size, return_attention=return_attention))
if not feature_output:
self.add_module('final_dropout', nn.Dropout(final_dropout_rate))
if output_logits:
self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1)))
else:
self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1),
nn.Softmax() if self.nb_classes > 2 else nn.Sigmoid()))
self.init_weights()
self.eval()
def init_weights(self):
ih = (param.data for name, param in self.named_parameters() if 'weight_ih' in name)
hh = (param.data for name, param in self.named_parameters() if 'weight_hh' in name)
b = (param.data for name, param in self.named_parameters() if 'bias' in name)
nn.init.uniform(self.embed.weight.data, a=-0.5, b=0.5)
for t in ih:
nn.init.xavier_uniform(t)
for t in hh:
nn.init.orthogonal(t)
for t in b:
nn.init.constant(t, 0)
if not self.feature_output:
nn.init.xavier_uniform(self.output_layer[0].weight.data)
def forward(self, input_seqs):
return_numpy = False
return_tensor = False
if isinstance(input_seqs, (torch.LongTensor, torch.cuda.LongTensor)):
input_seqs = Variable(input_seqs)
return_tensor = True
elif not isinstance(input_seqs, Variable):
input_seqs = Variable(torch.from_numpy(input_seqs.astype('int64')).long())
return_numpy = True
reorder_output = False
if not isinstance(input_seqs, PackedSequence):
ho = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
co = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
input_lengths = torch.LongTensor([torch.max(input_seqs[i, :].data.nonzero()) + 1 for i in range(input_seqs.size()[0])])
input_lengths, perm_idx = input_lengths.sort(0, descending=True)
input_seqs = input_seqs[perm_idx][:, :input_lengths.max()]
packed_input = pack_padded_sequence(input_seqs, input_lengths.cpu().numpy(), batch_first=True)
reorder_output = True
else:
ho = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
co = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
input_lengths = input_seqs.batch_sizes
packed_input = input_seqs
hidden = (Variable(ho, requires_grad=False), Variable(co, requires_grad=False))
x = self.embed(packed_input.data)
x = nn.Tanh()(x)
x = self.embed_dropout(x)
packed_input = PackedSequence(x, packed_input.batch_sizes)
lstm_0_output, _ = self.lstm_0(packed_input, hidden)
lstm_1_output, _ = self.lstm_1(lstm_0_output, hidden)
packed_input = PackedSequence(torch.cat((lstm_1_output.data,
lstm_0_output.data,
packed_input.data), dim=1),
packed_input.batch_sizes)
input_seqs, _ = pad_packed_sequence(packed_input, batch_first=True)
x, att_weights = self.attention_layer(input_seqs, input_lengths)
if not self.feature_output:
x = self.final_dropout(x)
outputs = self.output_layer(x)
else:
outputs = x
if reorder_output:
reorered = Variable(outputs.data.new(outputs.size()))
reorered[perm_idx] = outputs
outputs = reorered
if return_tensor:
outputs = outputs.data
if return_numpy:
outputs = outputs.data.numpy()
if self.return_attention:
return outputs, att_weights
else:
return outputs
def load_specific_weights(model, weight_path, exclude_names=[], extend_embedding=0, verbose=True):
if not exists(weight_path):
raise ValueError('ERROR (load_weights): The weights file at {} does '
'not exist. Refer to the README for instructions.'
.format(weight_path))
if extend_embedding and 'embed' in exclude_names:
raise ValueError('ERROR (load_weights): Cannot extend a vocabulary '
'without loading the embedding weights.')
weights = torch.load(weight_path)
for key, weight in weights.items():
if any(excluded in key for excluded in exclude_names):
if verbose:
print('Ignoring weights for {}'.format(key))
continue
try:
model_w = model.state_dict()[key]
except KeyError:
raise KeyError("Weights had parameters {},".format(key)
+ " but could not find this parameters in model.")
if verbose:
print('Loading weights for {}'.format(key))
if 'embed' in key and extend_embedding > 0:
weight = torch.cat((weight, model_w[NB_TOKENS:, :]), dim=0)
if verbose:
print('Extended vocabulary for embedding layer ' +
'from {} to {} tokens.'.format(
NB_TOKENS, NB_TOKENS + extend_embedding))
try:
model_w.copy_(weight)
except:
print('While copying the weigths named {}, whose dimensions in the model are'
' {} and whose dimensions in the saved file are {}, ...'.format(
key, model_w.size(), weight.size()))
raise
| true | true |
f732a342632d05f40c130c080066d38b5818a226 | 987 | py | Python | Codewars_Python/memoized_log_cutting.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | Codewars_Python/memoized_log_cutting.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | Codewars_Python/memoized_log_cutting.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | # nlantau, 2021-01-17
INT_MIN=-32422
def cut_log(p,n):
r = [0 for _ in range(n+1)]
r[0] = 0
for j in range(1,n+1):
q = INT_MIN
for i in range(1,j+1):
q = max(q, p[i] + r[j-i])
r[j] = q
return r[n]
# Clever solutions
def cl(p,n):
l = [0]
for _ in range(n):
l.append(max(pi+li for pi, li in zip(p[1:], l[::-1])))
return l[n]
p = [ 0, 1, 5, 8, 9, 10, 17, 17, 20, 24, # 0X's
30, 32, 35, 39, 43, 43, 45, 49, 50, 54, # 1X's
57, 60, 65, 68, 70, 74, 80, 81, 84, 85, # 2X's
87, 91, 95, 99, 101, 104, 107, 112, 115, 116, # 3X's
119] # 40th element
print(cut_log(p, 8), "should equal 22")
print(cl(p, 8), "should equal 22")
print(cut_log(p, 10), "should equal 30")
print(cl(p, 10), "should equal 30")
print(cut_log(p, 22), "should equal 65")
print(cl(p, 22), "should equal 65")
print(cut_log(p, 35), "should equal 105")
print(cl(p, 35), "should equal 105")
| 23.5 | 62 | 0.506586 |
INT_MIN=-32422
def cut_log(p,n):
r = [0 for _ in range(n+1)]
r[0] = 0
for j in range(1,n+1):
q = INT_MIN
for i in range(1,j+1):
q = max(q, p[i] + r[j-i])
r[j] = q
return r[n]
def cl(p,n):
l = [0]
for _ in range(n):
l.append(max(pi+li for pi, li in zip(p[1:], l[::-1])))
return l[n]
p = [ 0, 1, 5, 8, 9, 10, 17, 17, 20, 24,
30, 32, 35, 39, 43, 43, 45, 49, 50, 54, # 1X's
57, 60, 65, 68, 70, 74, 80, 81, 84, 85,
87, 91, 95, 99, 101, 104, 107, 112, 115, 116, # 3X's
119]
print(cut_log(p, 8), "should equal 22")
print(cl(p, 8), "should equal 22")
print(cut_log(p, 10), "should equal 30")
print(cl(p, 10), "should equal 30")
print(cut_log(p, 22), "should equal 65")
print(cl(p, 22), "should equal 65")
print(cut_log(p, 35), "should equal 105")
print(cl(p, 35), "should equal 105")
| true | true |
f732a3a89841c1d3b41f3f0c82246c1472d76f09 | 6,229 | py | Python | myems-api/core/wechatmessage.py | hyh123a/myems | 669ab8554995a622da595384698d670f9cee61f8 | [
"MIT"
] | 2 | 2021-02-19T10:22:36.000Z | 2021-02-19T10:23:22.000Z | myems-api/core/wechatmessage.py | hyh123a/myems | 669ab8554995a622da595384698d670f9cee61f8 | [
"MIT"
] | null | null | null | myems-api/core/wechatmessage.py | hyh123a/myems | 669ab8554995a622da595384698d670f9cee61f8 | [
"MIT"
] | 1 | 2022-01-29T14:18:47.000Z | 2022-01-29T14:18:47.000Z | import falcon
import json
import mysql.connector
import config
from datetime import datetime, timedelta, timezone
class WechatMessageCollection(object):
@staticmethod
def on_options(req, resp, startdate, enddate):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, startdate, enddate):
try:
start_datetime_local = datetime.strptime(startdate, '%Y-%m-%d')
except Exception:
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.INVALID_START_DATE_FORMAT')
try:
end_datetime_local = datetime.strptime(enddate, '%Y-%m-%d')
except Exception:
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.INVALID_END_DATE_FORMAT')
timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6])
if config.utc_offset[0] == '-':
timezone_offset = -timezone_offset
start_datetime_utc = start_datetime_local.replace(tzinfo=timezone.utc)
start_datetime_utc -= timedelta(minutes=timezone_offset)
end_datetime_utc = end_datetime_local.replace(tzinfo=timezone.utc)
end_datetime_utc -= timedelta(minutes=timezone_offset)
end_datetime_utc += timedelta(days=1)
if start_datetime_utc >= end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.START_DATETIME_SHOULD_BE_EARLY_THAN_END_DATETIME')
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
query = (" SELECT id, recipient_name, recipient_openid, message_template_id, "
" message_data, created_datetime_utc, scheduled_datetime_utc, "
" acknowledge_code, status "
" FROM tbl_wechat_messages_outbox "
" WHERE created_datetime_utc >= %s AND created_datetime_utc < %s "
" ORDER BY id DESC ")
cursor.execute(query, (start_datetime_utc, end_datetime_utc))
rows = cursor.fetchall()
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
meta_result = {"id": row[0],
"recipient_name": row[1],
"recipient_openid": row[2],
"message_template_id": row[3],
"message_data": row[4],
"created_datetime_utc": row[5].timestamp() * 1000 if row[5] else None,
"scheduled_datetime_utc": row[6].timestamp() * 1000 if row[6] else None,
"acknowledge_code": row[7],
"status": row[8]}
result.append(meta_result)
resp.body = json.dumps(result)
class WechatMessageItem:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_WECHAT_MESSAGE_ID')
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
query = (" SELECT id, recipient_name, recipient_openid, message_template_id, "
" message_data, created_datetime_utc, scheduled_datetime_utc, "
" acknowledge_code, status "
" FROM tbl_wechat_messages_outbox "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row = cursor.fetchone()
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
if row is None:
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.WECHAT_MESSAGE_NOT_FOUND')
result = {"id": row[0],
"recipient_name": row[1],
"recipient_openid": row[2],
"recipient_template_id": row[3],
"message_data": row[4],
"created_datetime_utc": row[5].timestamp() * 1000 if row[5] else None,
"scheduled_datetime_utc": row[6].timestamp() * 1000 if row[6] else None,
"acknowledge_code": row[7],
"status": row[8]}
resp.body = json.dumps(result)
@staticmethod
def on_delete(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_WECHAT_MESSAGE_ID')
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
cursor.execute(" SELECT id "
" FROM tbl_wechat_messages_outbox "
" WHERE id = %s ", (id_,))
row = cursor.fetchone()
if row is None:
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.WECHAT_MESSAGE_NOT_FOUND')
try:
cursor.execute(" DELETE FROM tbl_wechat_messages_outbox WHERE id = %s ", (id_,))
cnx.commit()
except Exception as e:
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_500, title='API.ERROR',
description='API.DATABASE_ERROR')
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
resp.status = falcon.HTTP_204
| 37.981707 | 103 | 0.541981 | import falcon
import json
import mysql.connector
import config
from datetime import datetime, timedelta, timezone
class WechatMessageCollection(object):
@staticmethod
def on_options(req, resp, startdate, enddate):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, startdate, enddate):
try:
start_datetime_local = datetime.strptime(startdate, '%Y-%m-%d')
except Exception:
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.INVALID_START_DATE_FORMAT')
try:
end_datetime_local = datetime.strptime(enddate, '%Y-%m-%d')
except Exception:
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.INVALID_END_DATE_FORMAT')
timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6])
if config.utc_offset[0] == '-':
timezone_offset = -timezone_offset
start_datetime_utc = start_datetime_local.replace(tzinfo=timezone.utc)
start_datetime_utc -= timedelta(minutes=timezone_offset)
end_datetime_utc = end_datetime_local.replace(tzinfo=timezone.utc)
end_datetime_utc -= timedelta(minutes=timezone_offset)
end_datetime_utc += timedelta(days=1)
if start_datetime_utc >= end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.START_DATETIME_SHOULD_BE_EARLY_THAN_END_DATETIME')
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
query = (" SELECT id, recipient_name, recipient_openid, message_template_id, "
" message_data, created_datetime_utc, scheduled_datetime_utc, "
" acknowledge_code, status "
" FROM tbl_wechat_messages_outbox "
" WHERE created_datetime_utc >= %s AND created_datetime_utc < %s "
" ORDER BY id DESC ")
cursor.execute(query, (start_datetime_utc, end_datetime_utc))
rows = cursor.fetchall()
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
meta_result = {"id": row[0],
"recipient_name": row[1],
"recipient_openid": row[2],
"message_template_id": row[3],
"message_data": row[4],
"created_datetime_utc": row[5].timestamp() * 1000 if row[5] else None,
"scheduled_datetime_utc": row[6].timestamp() * 1000 if row[6] else None,
"acknowledge_code": row[7],
"status": row[8]}
result.append(meta_result)
resp.body = json.dumps(result)
class WechatMessageItem:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_WECHAT_MESSAGE_ID')
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
query = (" SELECT id, recipient_name, recipient_openid, message_template_id, "
" message_data, created_datetime_utc, scheduled_datetime_utc, "
" acknowledge_code, status "
" FROM tbl_wechat_messages_outbox "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row = cursor.fetchone()
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
if row is None:
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.WECHAT_MESSAGE_NOT_FOUND')
result = {"id": row[0],
"recipient_name": row[1],
"recipient_openid": row[2],
"recipient_template_id": row[3],
"message_data": row[4],
"created_datetime_utc": row[5].timestamp() * 1000 if row[5] else None,
"scheduled_datetime_utc": row[6].timestamp() * 1000 if row[6] else None,
"acknowledge_code": row[7],
"status": row[8]}
resp.body = json.dumps(result)
@staticmethod
def on_delete(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_WECHAT_MESSAGE_ID')
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
cursor.execute(" SELECT id "
" FROM tbl_wechat_messages_outbox "
" WHERE id = %s ", (id_,))
row = cursor.fetchone()
if row is None:
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.WECHAT_MESSAGE_NOT_FOUND')
try:
cursor.execute(" DELETE FROM tbl_wechat_messages_outbox WHERE id = %s ", (id_,))
cnx.commit()
except Exception as e:
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_500, title='API.ERROR',
description='API.DATABASE_ERROR')
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
resp.status = falcon.HTTP_204
| true | true |
f732a4710f4e48af1d36e16ade6633b7916317d3 | 438 | py | Python | password-maker.py | JG-Mike/password-generator | 5bf5c4f8fc9387b9048ae96d7f329f4bca8028b3 | [
"MIT"
] | null | null | null | password-maker.py | JG-Mike/password-generator | 5bf5c4f8fc9387b9048ae96d7f329f4bca8028b3 | [
"MIT"
] | null | null | null | password-maker.py | JG-Mike/password-generator | 5bf5c4f8fc9387b9048ae96d7f329f4bca8028b3 | [
"MIT"
] | null | null | null | import random
import string
# length of password
length = int(input('\nEnter the length of password: '))
# define characters for making password
lower = string.ascii_lowercase
upper = string.ascii_uppercase
num = string.digits
symbols = string.punctuation
all = lower + upper + num + symbols
# use random
temp = random.sample(all,length)
# create the password
password = "".join(temp)
# print the password madetf
print(password)
| 18.25 | 56 | 0.748858 | import random
import string
length = int(input('\nEnter the length of password: '))
lower = string.ascii_lowercase
upper = string.ascii_uppercase
num = string.digits
symbols = string.punctuation
all = lower + upper + num + symbols
temp = random.sample(all,length)
password = "".join(temp)
print(password)
| true | true |
f732a4cc35b1d21135b3bf3c9292b0a16c1ea6c5 | 2,842 | py | Python | fastapi_module/controller.py | lqmanh/fastapi-alt-controller | b7370850f9f95e5fdebaf9c211f5dee1dc729a96 | [
"MIT"
] | 1 | 2021-06-10T08:32:32.000Z | 2021-06-10T08:32:32.000Z | fastapi_module/controller.py | lqmanh/fastapi-alt-controller | b7370850f9f95e5fdebaf9c211f5dee1dc729a96 | [
"MIT"
] | 13 | 2021-08-28T08:02:52.000Z | 2022-03-01T01:07:13.000Z | fastapi_module/controller.py | lqmanh/fastapi-module | b7370850f9f95e5fdebaf9c211f5dee1dc729a96 | [
"MIT"
] | null | null | null | import inspect
from collections.abc import Callable
from inspect import Parameter
from typing import Optional, TypeVar, Union
from fastapi import APIRouter, Depends
from starlette.routing import Route, WebSocketRoute
from .types import InitializedError
from .utils import make_cls_accept_cls_annotated_deps
T = TypeVar("T")
def controller(
router: APIRouter, *, version: Optional[float] = None
) -> Callable[[type[T]], type[T]]:
"""
Factory function that returns a decorator converting the decorated class into a controller class.
The first positional argument (typically `self`) to all methods decorated as endpoints using the provided router
will be populated with a controller instance via FastAPI's dependency-injection system.
"""
def decorator(cls: type[T]) -> type[T]:
return _controller(cls, router, version=version)
return decorator
def _controller(
cls: type[T], router: APIRouter, *, version: Optional[float] = None
) -> type[T]:
"""
Decorator that converts the decorated class into a controller class.
Replace all methods of class `cls` decorated as endpoints of router `router` with
function calls that will properly inject an instance of class `cls`.
"""
if getattr(cls, "__fastapi_controller__", False):
raise InitializedError(cls)
setattr(cls, "__fastapi_controller__", cls.__name__)
setattr(cls, "__version__", version)
setattr(cls, "router", router)
cls = make_cls_accept_cls_annotated_deps(cls)
internal_router = APIRouter()
function_members = inspect.getmembers(cls, inspect.isfunction)
function_set = set(func for _, func in function_members)
routes = [
route
for route in router.routes
if isinstance(route, (Route, WebSocketRoute)) and route.endpoint in function_set
]
for route in routes:
router.routes.remove(route)
_update_controller_route_endpoint_signature(cls, route)
route.path = route.path.removeprefix(router.prefix)
internal_router.routes.append(route)
router.include_router(internal_router)
return cls
def _update_controller_route_endpoint_signature(
cls: type[T], route: Union[Route, WebSocketRoute]
) -> None:
"""
Fix a controller route endpoint signature to ensure FastAPI injects dependencies properly.
"""
old_endpoint = route.endpoint
old_signature = inspect.signature(old_endpoint)
old_params = list(old_signature.parameters.values())
old_1st_param = old_params[0]
new_1st_param = old_1st_param.replace(default=Depends(cls))
new_params = [new_1st_param] + [
param.replace(kind=Parameter.KEYWORD_ONLY) for param in old_params[1:]
]
new_signature = old_signature.replace(parameters=new_params)
setattr(route.endpoint, "__signature__", new_signature)
| 35.525 | 116 | 0.728712 | import inspect
from collections.abc import Callable
from inspect import Parameter
from typing import Optional, TypeVar, Union
from fastapi import APIRouter, Depends
from starlette.routing import Route, WebSocketRoute
from .types import InitializedError
from .utils import make_cls_accept_cls_annotated_deps
T = TypeVar("T")
def controller(
router: APIRouter, *, version: Optional[float] = None
) -> Callable[[type[T]], type[T]]:
def decorator(cls: type[T]) -> type[T]:
return _controller(cls, router, version=version)
return decorator
def _controller(
cls: type[T], router: APIRouter, *, version: Optional[float] = None
) -> type[T]:
if getattr(cls, "__fastapi_controller__", False):
raise InitializedError(cls)
setattr(cls, "__fastapi_controller__", cls.__name__)
setattr(cls, "__version__", version)
setattr(cls, "router", router)
cls = make_cls_accept_cls_annotated_deps(cls)
internal_router = APIRouter()
function_members = inspect.getmembers(cls, inspect.isfunction)
function_set = set(func for _, func in function_members)
routes = [
route
for route in router.routes
if isinstance(route, (Route, WebSocketRoute)) and route.endpoint in function_set
]
for route in routes:
router.routes.remove(route)
_update_controller_route_endpoint_signature(cls, route)
route.path = route.path.removeprefix(router.prefix)
internal_router.routes.append(route)
router.include_router(internal_router)
return cls
def _update_controller_route_endpoint_signature(
cls: type[T], route: Union[Route, WebSocketRoute]
) -> None:
old_endpoint = route.endpoint
old_signature = inspect.signature(old_endpoint)
old_params = list(old_signature.parameters.values())
old_1st_param = old_params[0]
new_1st_param = old_1st_param.replace(default=Depends(cls))
new_params = [new_1st_param] + [
param.replace(kind=Parameter.KEYWORD_ONLY) for param in old_params[1:]
]
new_signature = old_signature.replace(parameters=new_params)
setattr(route.endpoint, "__signature__", new_signature)
| true | true |
f732a59e7fc268316b9070494224003bd1b40ac2 | 503 | py | Python | pal/transform/make_write_only.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 26 | 2020-01-06T23:53:17.000Z | 2022-02-01T08:58:21.000Z | pal/transform/make_write_only.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 30 | 2019-11-13T00:55:22.000Z | 2022-01-06T08:09:35.000Z | pal/transform/make_write_only.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 14 | 2019-11-15T16:56:22.000Z | 2021-12-22T10:14:17.000Z | from pal.transform.abstract_transform import AbstractTransform
class MakeWriteOnly(AbstractTransform):
@property
def description(self):
d = "removing readable access mechanisms"
return d
def do_transform(self, reg):
readable = [
"mrs_register",
"mrs_banked",
"mrc",
"mrrc",
"vmrs",
"ldr"
]
for key in readable:
reg.access_mechanisms[key] = []
return reg
| 20.958333 | 62 | 0.540755 | from pal.transform.abstract_transform import AbstractTransform
class MakeWriteOnly(AbstractTransform):
@property
def description(self):
d = "removing readable access mechanisms"
return d
def do_transform(self, reg):
readable = [
"mrs_register",
"mrs_banked",
"mrc",
"mrrc",
"vmrs",
"ldr"
]
for key in readable:
reg.access_mechanisms[key] = []
return reg
| true | true |
f732a62465afae0e6e49ac1c46c00ec0307e13a3 | 493 | py | Python | pyjobs/core/migrations/0042_job_receive_emails.py | Mdslino/PyJobs | d2496d58067503c3304a6c59052238b1f097472b | [
"BSD-3-Clause"
] | 132 | 2017-10-27T23:54:47.000Z | 2022-03-15T12:10:10.000Z | pyjobs/core/migrations/0042_job_receive_emails.py | Mdslino/PyJobs | d2496d58067503c3304a6c59052238b1f097472b | [
"BSD-3-Clause"
] | 129 | 2017-09-05T04:22:50.000Z | 2022-03-12T01:06:49.000Z | pyjobs/core/migrations/0042_job_receive_emails.py | Mdslino/PyJobs | d2496d58067503c3304a6c59052238b1f097472b | [
"BSD-3-Clause"
] | 82 | 2017-10-28T00:14:04.000Z | 2021-07-27T20:00:40.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-10-29 17:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0041_jobapplication_output_sent"),
]
operations = [
migrations.AddField(
model_name="job",
name="receive_emails",
field=models.BooleanField(default=True, verbose_name="Enviar emails?"),
),
]
| 23.47619 | 83 | 0.634888 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0041_jobapplication_output_sent"),
]
operations = [
migrations.AddField(
model_name="job",
name="receive_emails",
field=models.BooleanField(default=True, verbose_name="Enviar emails?"),
),
]
| true | true |
f732a7f0fd236efe6a53656d4ab41520905e53e9 | 20,847 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/rfc2889broadcastrate_1f8e1c7f7f9e4d711149db4a572058fb.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/rfc2889broadcastrate_1f8e1c7f7f9e4d711149db4a572058fb.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/rfc2889broadcastrate_1f8e1c7f7f9e4d711149db4a572058fb.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Rfc2889broadcastRate(Base):
"""The RFC 2889 Broadcast Rate test determines the maximum rate at which the DUT receives and forwards broadcast frames without any frame loss. The test uses a binary search algorithm to obtain a rate at which the DUT does not lose frames within an acceptable rate window. The latency is also calculated in this test.
The Rfc2889broadcastRate class encapsulates a list of rfc2889broadcastRate resources that are managed by the user.
A list of resources can be retrieved from the server using the Rfc2889broadcastRate.find() method.
The list can be managed by using the Rfc2889broadcastRate.add() and Rfc2889broadcastRate.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'rfc2889broadcastRate'
_SDM_ATT_MAP = {
'ForceApplyQTConfig': 'forceApplyQTConfig',
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
_SDM_ENUM_MAP = {
'mode': ['existingMode', 'newMode'],
}
def __init__(self, parent, list_op=False):
super(Rfc2889broadcastRate, self).__init__(parent, list_op)
@property
def LearnFrames(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_f6015f7ddc2fc53013cae8906d006afd.LearnFrames): An instance of the LearnFrames class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_f6015f7ddc2fc53013cae8906d006afd import LearnFrames
if self._properties.get('LearnFrames', None) is not None:
return self._properties.get('LearnFrames')
else:
return LearnFrames(self)._select()
@property
def PassCriteria(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_1568efcb71d423db7b9caee1463792cd.PassCriteria): An instance of the PassCriteria class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_1568efcb71d423db7b9caee1463792cd import PassCriteria
if self._properties.get('PassCriteria', None) is not None:
return self._properties.get('PassCriteria')
else:
return PassCriteria(self)._select()
@property
def Results(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_f711c71e6809173dc065c2cc804decec.Results): An instance of the Results class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_f711c71e6809173dc065c2cc804decec import Results
if self._properties.get('Results', None) is not None:
return self._properties.get('Results')
else:
return Results(self)._select()
@property
def TestConfig(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_27bc595dc7175d4d0737241a72260e04.TestConfig): An instance of the TestConfig class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_27bc595dc7175d4d0737241a72260e04 import TestConfig
if self._properties.get('TestConfig', None) is not None:
return self._properties.get('TestConfig')
else:
return TestConfig(self)._select()
@property
def TrafficSelection(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_f387831939c28a776a58c26afacaf51c.TrafficSelection): An instance of the TrafficSelection class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_f387831939c28a776a58c26afacaf51c import TrafficSelection
if self._properties.get('TrafficSelection', None) is not None:
return self._properties.get('TrafficSelection')
else:
return TrafficSelection(self)
@property
def ForceApplyQTConfig(self):
# type: () -> bool
"""
Returns
-------
- bool: Apply QT config
"""
return self._get_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'])
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'], value)
@property
def InputParameters(self):
# type: () -> str
"""
Returns
-------
- str: Input Parameters
"""
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
# type: () -> str
"""
Returns
-------
- str(existingMode | newMode): Test mode
"""
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Test name
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Rfc2889broadcastRate
"""Updates rfc2889broadcastRate resource on the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Rfc2889broadcastRate
"""Adds a new rfc2889broadcastRate resource on the server and adds it to the container.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with all currently retrieved rfc2889broadcastRate resources using find and the newly added rfc2889broadcastRate resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained rfc2889broadcastRate resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Rfc2889broadcastRate
"""Finds and retrieves rfc2889broadcastRate resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve rfc2889broadcastRate resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all rfc2889broadcastRate resources from the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with matching rfc2889broadcastRate resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of rfc2889broadcastRate data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the rfc2889broadcastRate resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| 43.888421 | 321 | 0.648822 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Rfc2889broadcastRate(Base):
__slots__ = ()
_SDM_NAME = 'rfc2889broadcastRate'
_SDM_ATT_MAP = {
'ForceApplyQTConfig': 'forceApplyQTConfig',
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
_SDM_ENUM_MAP = {
'mode': ['existingMode', 'newMode'],
}
def __init__(self, parent, list_op=False):
super(Rfc2889broadcastRate, self).__init__(parent, list_op)
@property
def LearnFrames(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_f6015f7ddc2fc53013cae8906d006afd import LearnFrames
if self._properties.get('LearnFrames', None) is not None:
return self._properties.get('LearnFrames')
else:
return LearnFrames(self)._select()
@property
def PassCriteria(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_1568efcb71d423db7b9caee1463792cd import PassCriteria
if self._properties.get('PassCriteria', None) is not None:
return self._properties.get('PassCriteria')
else:
return PassCriteria(self)._select()
@property
def Results(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_f711c71e6809173dc065c2cc804decec import Results
if self._properties.get('Results', None) is not None:
return self._properties.get('Results')
else:
return Results(self)._select()
@property
def TestConfig(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_27bc595dc7175d4d0737241a72260e04 import TestConfig
if self._properties.get('TestConfig', None) is not None:
return self._properties.get('TestConfig')
else:
return TestConfig(self)._select()
@property
def TrafficSelection(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_f387831939c28a776a58c26afacaf51c import TrafficSelection
if self._properties.get('TrafficSelection', None) is not None:
return self._properties.get('TrafficSelection')
else:
return TrafficSelection(self)
@property
def ForceApplyQTConfig(self):
return self._get_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'])
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
self._set_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'], value)
@property
def InputParameters(self):
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
def Apply(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| true | true |
f732a7f1402ea43dd9abb61ba68ef9d3a1892d30 | 1,392 | py | Python | cube_model.py | jasonrute/puzzle_cube_code | cf0238bc333d55e3637a4a6a4f408d16d4e14418 | [
"MIT"
] | 2 | 2020-11-12T06:41:44.000Z | 2022-02-27T13:50:38.000Z | cube_model.py | jasonrute/puzzle_cube_code | cf0238bc333d55e3637a4a6a4f408d16d4e14418 | [
"MIT"
] | null | null | null | cube_model.py | jasonrute/puzzle_cube_code | cf0238bc333d55e3637a4a6a4f408d16d4e14418 | [
"MIT"
] | 2 | 2018-05-22T02:40:23.000Z | 2018-07-28T11:14:41.000Z | """
A user-facing wrapper around the neural network models for solving the cube.
"""
import models
from typing import Optional
class CubeModel:
_model = None # type: Optional[models.BaseModel]
def __init__(self):
pass
def load_from_config(self, filepath: Optional[str] = None) -> ():
"""
Build a model from the config file settings.
:param filepath: Optional string to filepath of model weights.
If None (default) then it will load based on the config file.
"""
import config
if filepath is None:
assert False, "Fill in this branch"
self.load(config.model_type, filepath, **config.model_kwargs)
def load(self, model_type: str, filepath: str, **kwargs) -> ():
"""
Build a model.
:param model_type: The name of the model class in models.py
:param filepath: The path to the model weights.
:param kwargs: Key word arguements for initializing the model class (the one given by model_type).
"""
model_constructor = models.__dict__[model_type] # get model class by name
self._model = model_constructor(**kwargs)
self._model.build()
self._model.load_from_file(filepath)
def _function(self):
assert (self._model is not None), "No model loaded"
return self._model.function
| 31.636364 | 106 | 0.637213 |
import models
from typing import Optional
class CubeModel:
_model = None
def __init__(self):
pass
def load_from_config(self, filepath: Optional[str] = None) -> ():
import config
if filepath is None:
assert False, "Fill in this branch"
self.load(config.model_type, filepath, **config.model_kwargs)
def load(self, model_type: str, filepath: str, **kwargs) -> ():
model_constructor = models.__dict__[model_type]
self._model = model_constructor(**kwargs)
self._model.build()
self._model.load_from_file(filepath)
def _function(self):
assert (self._model is not None), "No model loaded"
return self._model.function
| true | true |
f732a8a11f50492a52b576b04ef8f79e805f1093 | 51,156 | py | Python | flax/timelord/timelord.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 174 | 2021-06-16T17:49:22.000Z | 2022-03-17T03:03:17.000Z | flax/timelord/timelord.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 49 | 2021-06-17T14:10:53.000Z | 2022-01-31T11:04:21.000Z | flax/timelord/timelord.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 80 | 2021-06-17T14:23:31.000Z | 2022-02-24T05:52:47.000Z | import asyncio
import dataclasses
import io
import logging
import random
import time
import traceback
from typing import Callable, Dict, List, Optional, Tuple, Set
from chiavdf import create_discriminant
from flax.consensus.constants import ConsensusConstants
from flax.consensus.pot_iterations import calculate_sp_iters, is_overflow_block
from flax.protocols import timelord_protocol
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.server.outbound_message import NodeType, make_msg
from flax.server.server import FlaxServer
from flax.timelord.iters_from_block import iters_from_block
from flax.timelord.timelord_state import LastState
from flax.timelord.types import Chain, IterationType, StateType
from flax.types.blockchain_format.classgroup import ClassgroupElement
from flax.types.blockchain_format.reward_chain_block import RewardChainBlock
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.blockchain_format.vdf import VDFInfo, VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class Timelord:
def __init__(self, root_path, config: Dict, constants: ConsensusConstants):
self.config = config
self.root_path = root_path
self.constants = constants
self._shut_down = False
self.free_clients: List[Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = []
self.potential_free_clients: List = []
self.ip_whitelist = self.config["vdf_clients"]["ip"]
self.server: Optional[FlaxServer] = None
self.chain_type_to_stream: Dict[Chain, Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = {}
self.chain_start_time: Dict = {}
# Chains that currently don't have a vdf_client.
self.unspawned_chains: List[Chain] = [
Chain.CHALLENGE_CHAIN,
Chain.REWARD_CHAIN,
Chain.INFUSED_CHALLENGE_CHAIN,
]
# Chains that currently accept iterations.
self.allows_iters: List[Chain] = []
# Last peak received, None if it's already processed.
self.new_peak: Optional[timelord_protocol.NewPeakTimelord] = None
# Last end of subslot bundle, None if we built a peak on top of it.
self.new_subslot_end: Optional[EndOfSubSlotBundle] = None
# Last state received. Can either be a new peak or a new EndOfSubslotBundle.
# Unfinished block info, iters adjusted to the last peak.
self.unfinished_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Signage points iters, adjusted to the last peak.
self.signage_point_iters: List[Tuple[uint64, uint8]] = []
# For each chain, send those info when the process spawns.
self.iters_to_submit: Dict[Chain, List[uint64]] = {}
self.iters_submitted: Dict[Chain, List[uint64]] = {}
self.iters_finished: Set = set()
# For each iteration submitted, know if it's a signage point, an infusion point or an end of slot.
self.iteration_to_proof_type: Dict[uint64, IterationType] = {}
# List of proofs finished.
self.proofs_finished: List[Tuple[Chain, VDFInfo, VDFProof, int]] = []
# Data to send at vdf_client initialization.
self.overflow_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Incremented each time `reset_chains` has been called.
# Used to label proofs in `finished_proofs` and to only filter proofs corresponding to the most recent state.
self.num_resets: int = 0
self.process_communication_tasks: List[asyncio.Task] = []
self.main_loop = None
self.vdf_server = None
self._shut_down = False
self.vdf_failures: List[Tuple[Chain, Optional[int]]] = []
self.vdf_failures_count: int = 0
self.vdf_failure_time: float = 0
self.total_unfinished: int = 0
self.total_infused: int = 0
self.state_changed_callback: Optional[Callable] = None
self.sanitizer_mode = self.config["sanitizer_mode"]
self.pending_bluebox_info: List[Tuple[float, timelord_protocol.RequestCompactProofOfTime]] = []
self.last_active_time = time.time()
async def _start(self):
self.lock: asyncio.Lock = asyncio.Lock()
self.vdf_server = await asyncio.start_server(
self._handle_client,
self.config["vdf_server"]["host"],
self.config["vdf_server"]["port"],
)
self.last_state: LastState = LastState(self.constants)
if not self.sanitizer_mode:
self.main_loop = asyncio.create_task(self._manage_chains())
else:
self.main_loop = asyncio.create_task(self._manage_discriminant_queue_sanitizer())
log.info("Started timelord.")
def _close(self):
self._shut_down = True
for task in self.process_communication_tasks:
task.cancel()
if self.main_loop is not None:
self.main_loop.cancel()
async def _await_closed(self):
pass
def set_server(self, server: FlaxServer):
self.server = server
async def _handle_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
async with self.lock:
client_ip = writer.get_extra_info("peername")[0]
log.debug(f"New timelord connection from client: {client_ip}.")
if client_ip in self.ip_whitelist:
self.free_clients.append((client_ip, reader, writer))
log.debug(f"Added new VDF client {client_ip}.")
for ip, end_time in list(self.potential_free_clients):
if ip == client_ip:
self.potential_free_clients.remove((ip, end_time))
break
async def _stop_chain(self, chain: Chain):
try:
while chain not in self.allows_iters:
self.lock.release()
await asyncio.sleep(0.05)
log.error(f"Trying to stop {chain} before its initialization.")
await self.lock.acquire()
if chain not in self.chain_type_to_stream:
log.warning(f"Trying to stop a crashed chain: {chain}.")
return None
stop_ip, _, stop_writer = self.chain_type_to_stream[chain]
self.potential_free_clients.append((stop_ip, time.time()))
stop_writer.write(b"010")
await stop_writer.drain()
if chain in self.allows_iters:
self.allows_iters.remove(chain)
if chain not in self.unspawned_chains:
self.unspawned_chains.append(chain)
if chain in self.chain_type_to_stream:
del self.chain_type_to_stream[chain]
except ConnectionResetError as e:
log.error(f"{e}")
def _can_infuse_unfinished_block(self, block: timelord_protocol.NewUnfinishedBlockTimelord) -> Optional[uint64]:
assert self.last_state is not None
sub_slot_iters = self.last_state.get_sub_slot_iters()
difficulty = self.last_state.get_difficulty()
ip_iters = self.last_state.get_last_ip()
rc_block = block.reward_chain_block
try:
block_sp_iters, block_ip_iters = iters_from_block(
self.constants,
rc_block,
sub_slot_iters,
difficulty,
)
except Exception as e:
log.warning(f"Received invalid unfinished block: {e}.")
return None
block_sp_total_iters = self.last_state.total_iters - ip_iters + block_sp_iters
if is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
block_sp_total_iters -= self.last_state.get_sub_slot_iters()
found_index = -1
for index, (rc, total_iters) in enumerate(self.last_state.reward_challenge_cache):
if rc == block.rc_prev:
found_index = index
break
if found_index == -1:
log.warning(f"Will not infuse {block.rc_prev} because its reward chain challenge is not in the chain")
return None
if ip_iters > block_ip_iters:
log.warning("Too late to infuse block")
return None
new_block_iters = uint64(block_ip_iters - ip_iters)
if len(self.last_state.reward_challenge_cache) > found_index + 1:
if self.last_state.reward_challenge_cache[found_index + 1][1] < block_sp_total_iters:
log.warning(
f"Will not infuse unfinished block {block.rc_prev} sp total iters {block_sp_total_iters}, "
f"because there is another infusion before its SP"
)
return None
if self.last_state.reward_challenge_cache[found_index][1] > block_sp_total_iters:
if not is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
log.error(
f"Will not infuse unfinished block {block.rc_prev}, sp total iters: {block_sp_total_iters}, "
f"because its iters are too low"
)
return None
if new_block_iters > 0:
return new_block_iters
return None
async def _reset_chains(self, first_run=False, only_eos=False):
# First, stop all chains.
self.last_active_time = time.time()
log.debug("Resetting chains")
ip_iters = self.last_state.get_last_ip()
sub_slot_iters = self.last_state.get_sub_slot_iters()
if not first_run:
for chain in list(self.chain_type_to_stream.keys()):
await self._stop_chain(chain)
# Adjust all signage points iterations to the peak.
iters_per_signage = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
self.signage_point_iters = [
(k * iters_per_signage - ip_iters, k)
for k in range(1, self.constants.NUM_SPS_SUB_SLOT)
if k * iters_per_signage - ip_iters > 0
]
for sp, k in self.signage_point_iters:
assert k * iters_per_signage > 0
assert k * iters_per_signage < sub_slot_iters
# Adjust all unfinished blocks iterations to the peak.
new_unfinished_blocks = []
self.iters_finished = set()
self.proofs_finished = []
self.num_resets += 1
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
self.iters_to_submit[chain] = []
self.iters_submitted[chain] = []
self.iteration_to_proof_type = {}
if not only_eos:
for block in self.unfinished_blocks + self.overflow_blocks:
new_block_iters: Optional[uint64] = self._can_infuse_unfinished_block(block)
# Does not add duplicates, or blocks that we cannot infuse
if new_block_iters and new_block_iters not in self.iters_to_submit[Chain.CHALLENGE_CHAIN]:
if block not in self.unfinished_blocks:
self.total_unfinished += 1
new_unfinished_blocks.append(block)
for chain in [Chain.REWARD_CHAIN, Chain.CHALLENGE_CHAIN]:
self.iters_to_submit[chain].append(new_block_iters)
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
self.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
# Remove all unfinished blocks that have already passed.
self.unfinished_blocks = new_unfinished_blocks
# Signage points.
if not only_eos and len(self.signage_point_iters) > 0:
count_signage = 0
for signage, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
self.iters_to_submit[chain].append(signage)
self.iteration_to_proof_type[signage] = IterationType.SIGNAGE_POINT
count_signage += 1
if count_signage == 3:
break
left_subslot_iters = sub_slot_iters - ip_iters
assert left_subslot_iters > 0
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.REWARD_CHAIN].append(left_subslot_iters)
self.iteration_to_proof_type[left_subslot_iters] = IterationType.END_OF_SUBSLOT
for chain, iters in self.iters_to_submit.items():
for iteration in iters:
assert iteration > 0
async def _handle_new_peak(self):
assert self.new_peak is not None
self.last_state.set_state(self.new_peak)
if self.total_unfinished > 0:
remove_unfinished = []
for unf_block_timelord in self.unfinished_blocks + self.overflow_blocks:
if (
unf_block_timelord.reward_chain_block.get_hash()
== self.new_peak.reward_chain_block.get_unfinished().get_hash()
):
if unf_block_timelord not in self.unfinished_blocks:
# We never got the EOS for this, but we have the block in overflow list
self.total_unfinished += 1
remove_unfinished.append(unf_block_timelord)
if len(remove_unfinished) > 0:
self.total_infused += 1
for block in remove_unfinished:
if block in self.unfinished_blocks:
self.unfinished_blocks.remove(block)
if block in self.overflow_blocks:
self.overflow_blocks.remove(block)
infusion_rate = round(self.total_infused / self.total_unfinished * 100.0, 2)
log.info(
f"Total unfinished blocks: {self.total_unfinished}. "
f"Total infused blocks: {self.total_infused}. "
f"Infusion rate: {infusion_rate}%."
)
self.new_peak = None
await self._reset_chains()
async def _handle_subslot_end(self):
self.last_state.set_state(self.new_subslot_end)
for block in self.unfinished_blocks:
if self._can_infuse_unfinished_block(block) is not None:
self.total_unfinished += 1
self.new_subslot_end = None
await self._reset_chains()
async def _map_chains_with_vdf_clients(self):
while not self._shut_down:
picked_chain = None
async with self.lock:
if len(self.free_clients) == 0:
break
ip, reader, writer = self.free_clients[0]
for chain_type in self.unspawned_chains:
challenge = self.last_state.get_challenge(chain_type)
initial_form = self.last_state.get_initial_form(chain_type)
if challenge is not None and initial_form is not None:
picked_chain = chain_type
break
if picked_chain is None:
break
picked_chain = self.unspawned_chains[0]
self.chain_type_to_stream[picked_chain] = (ip, reader, writer)
self.free_clients = self.free_clients[1:]
self.unspawned_chains = self.unspawned_chains[1:]
self.chain_start_time[picked_chain] = time.time()
log.debug(f"Mapping free vdf_client with chain: {picked_chain}.")
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
picked_chain, challenge, initial_form, ip, reader, writer, proof_label=self.num_resets
)
)
)
async def _submit_iterations(self):
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
if chain in self.allows_iters:
_, _, writer = self.chain_type_to_stream[chain]
for iteration in self.iters_to_submit[chain]:
if iteration in self.iters_submitted[chain]:
continue
log.debug(f"Submitting iterations to {chain}: {iteration}")
assert iteration > 0
prefix = str(len(str(iteration)))
if len(str(iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(iteration)
writer.write(iter_str.encode())
await writer.drain()
self.iters_submitted[chain].append(iteration)
def _clear_proof_list(self, iters: uint64):
return [
(chain, info, proof, label)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations != iters
]
async def _check_for_new_sp(self, iter_to_look_for: uint64):
signage_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.SIGNAGE_POINT
]
if len(signage_iters) == 0:
return None
to_remove = []
for potential_sp_iters, signage_point_index in self.signage_point_iters:
if potential_sp_iters not in signage_iters or potential_sp_iters != iter_to_look_for:
continue
signage_iter = potential_sp_iters
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == signage_iter and label == self.num_resets
]
# Wait for both cc and rc to have the signage point.
if len(proofs_with_iter) == 2:
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient signage point data {signage_iter}")
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()}" f" has {rc_info.challenge}")
# This proof is on an outdated challenge, so don't use it
continue
iters_from_sub_slot_start = cc_info.number_of_iterations + self.last_state.get_last_ip()
response = timelord_protocol.NewSignagePointVDF(
signage_point_index,
dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start),
cc_proof,
rc_info,
rc_proof,
)
if self.server is not None:
msg = make_msg(ProtocolMessageTypes.new_signage_point_vdf, response)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# Cleanup the signage point from memory.
to_remove.append((signage_iter, signage_point_index))
self.proofs_finished = self._clear_proof_list(signage_iter)
# Send the next 3 signage point to the chains.
next_iters_count = 0
for next_sp, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
if next_sp not in self.iters_submitted[chain] and next_sp not in self.iters_to_submit[chain]:
self.iters_to_submit[chain].append(next_sp)
self.iteration_to_proof_type[next_sp] = IterationType.SIGNAGE_POINT
next_iters_count += 1
if next_iters_count == 3:
break
# Break so we alternate between checking SP and IP
break
for r in to_remove:
self.signage_point_iters.remove(r)
async def _check_for_new_ip(self, iter_to_look_for: uint64):
if len(self.unfinished_blocks) == 0:
return None
infusion_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT
]
for iteration in infusion_iters:
if iteration != iter_to_look_for:
continue
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == iteration and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(proofs_with_iter) == chain_count:
block = None
ip_iters = None
for unfinished_block in self.unfinished_blocks:
try:
_, ip_iters = iters_from_block(
self.constants,
unfinished_block.reward_chain_block,
self.last_state.get_sub_slot_iters(),
self.last_state.get_difficulty(),
)
except Exception as e:
log.error(f"Error {e}")
continue
if ip_iters - self.last_state.get_last_ip() == iteration:
block = unfinished_block
break
assert ip_iters is not None
if block is not None:
ip_total_iters = self.last_state.get_total_iters() + iteration
challenge = block.reward_chain_block.get_hash()
icc_info: Optional[VDFInfo] = None
icc_proof: Optional[VDFProof] = None
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_info = info
icc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}")
return None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(
f"Do not have correct challenge {rc_challenge.hex()} "
f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}"
)
# This proof is on an outdated challenge, so don't use it
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.")
overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index)
if not self.last_state.can_infuse_block(overflow):
log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding")
return None
cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters)
response = timelord_protocol.NewInfusionPointVDF(
challenge,
cc_info,
cc_proof,
rc_info,
rc_proof,
icc_info,
icc_proof,
)
msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self.proofs_finished = self._clear_proof_list(iteration)
if (
self.last_state.get_last_block_total_iters() is None
and not self.last_state.state_type == StateType.FIRST_SUB_SLOT
):
# We don't know when the last block was, so we can't make peaks
return None
sp_total_iters = (
ip_total_iters
- ip_iters
+ calculate_sp_iters(
self.constants,
block.sub_slot_iters,
block.reward_chain_block.signage_point_index,
)
- (block.sub_slot_iters if overflow else 0)
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
is_transaction_block = True
height: uint32 = uint32(0)
else:
last_block_ti = self.last_state.get_last_block_total_iters()
assert last_block_ti is not None
is_transaction_block = last_block_ti < sp_total_iters
height = uint32(self.last_state.get_height() + 1)
if height < 5:
# Don't directly update our state for the first few blocks, because we cannot validate
# whether the pre-farm is correct
return None
new_reward_chain_block = RewardChainBlock(
uint128(self.last_state.get_weight() + block.difficulty),
height,
uint128(ip_total_iters),
block.reward_chain_block.signage_point_index,
block.reward_chain_block.pos_ss_cc_challenge_hash,
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
cc_info,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_block.reward_chain_sp_signature,
rc_info,
icc_info,
is_transaction_block,
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
# Genesis
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
if self.last_state.peak is not None:
assert self.last_state.subslot_end is None
# This means the previous block is also an overflow block, and did not manage
# to lower the deficit, therefore we cannot lower it either. (new slot)
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
# This means we are the first infusion in this sub-slot. This may be a new slot or not.
assert self.last_state.subslot_end is not None
if self.last_state.subslot_end.infused_challenge_chain is None:
# There is no ICC, which means we are not finishing a slot. We can reduce the deficit.
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
else:
# There is an ICC, which means we are finishing a slot. Different slot, so can't change
# the deficit
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
new_deficit = max(self.last_state.deficit - 1, 0)
if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
last_csb_or_eos = ip_total_iters
else:
last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters
if self.last_state.just_infused_sub_epoch_summary():
new_sub_epoch_summary = None
passed_ses_height_but_not_yet_included = False
else:
new_sub_epoch_summary = block.sub_epoch_summary
if new_reward_chain_block.height % self.constants.SUB_EPOCH_BLOCKS == 0:
passed_ses_height_but_not_yet_included = True
else:
passed_ses_height_but_not_yet_included = (
self.last_state.get_passed_ses_height_but_not_yet_included()
)
self.new_peak = timelord_protocol.NewPeakTimelord(
new_reward_chain_block,
block.difficulty,
uint8(new_deficit),
block.sub_slot_iters,
new_sub_epoch_summary,
self.last_state.reward_challenge_cache,
uint128(last_csb_or_eos),
passed_ses_height_but_not_yet_included,
)
await self._handle_new_peak()
# Break so we alternate between checking SP and IP
break
async def _check_for_end_of_subslot(self, iter_to_look_for: uint64):
left_subslot_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.END_OF_SUBSLOT
]
if len(left_subslot_iters) == 0:
return None
if left_subslot_iters[0] != iter_to_look_for:
return None
chains_finished = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == left_subslot_iters[0] and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(chains_finished) == chain_count:
icc_ip_vdf: Optional[VDFInfo] = None
icc_ip_proof: Optional[VDFProof] = None
cc_vdf: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_vdf: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in chains_finished:
if chain == Chain.CHALLENGE_CHAIN:
cc_vdf = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_vdf = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_ip_vdf = info
icc_ip_proof = proof
assert cc_proof is not None and rc_proof is not None and cc_vdf is not None and rc_vdf is not None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_vdf.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"Do not have correct challenge {rc_challenge.hex()} has" f" {rc_vdf.challenge}")
# This proof is on an outdated challenge, so don't use it
return None
log.debug("Collected end of subslot vdfs.")
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
iters_from_sub_slot_start = cc_vdf.number_of_iterations + self.last_state.get_last_ip()
cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start)
if icc_ip_vdf is not None:
if self.last_state.peak is not None:
total_iters = (
self.last_state.get_total_iters()
- self.last_state.get_last_ip()
+ self.last_state.get_sub_slot_iters()
)
else:
total_iters = self.last_state.get_total_iters() + self.last_state.get_sub_slot_iters()
iters_from_cb = uint64(total_iters - self.last_state.last_challenge_sb_or_eos_total_iters)
if iters_from_cb > self.last_state.sub_slot_iters:
log.error(f"{self.last_state.peak}")
log.error(f"{self.last_state.subslot_end}")
assert False
assert iters_from_cb <= self.last_state.sub_slot_iters
icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = (
None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf)
)
if self.last_state.get_deficit() == 0:
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash()
else:
icc_sub_slot_hash = None
next_ses: Optional[SubEpochSummary] = self.last_state.get_next_sub_epoch_summary()
if next_ses is not None:
log.info(f"Including sub epoch summary{next_ses}")
ses_hash = next_ses.get_hash()
new_sub_slot_iters = next_ses.new_sub_slot_iters
new_difficulty = next_ses.new_difficulty
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty)
eos_deficit: uint8 = (
self.last_state.get_deficit()
if self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK > self.last_state.get_deficit() > 0
else self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
rc_sub_slot = RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
)
eos_bundle = EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
rc_sub_slot,
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
if self.server is not None:
msg = make_msg(
ProtocolMessageTypes.new_end_of_sub_slot_vdf,
timelord_protocol.NewEndOfSubSlotVDF(eos_bundle),
)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
log.info(
f"Built end of subslot bundle. cc hash: {eos_bundle.challenge_chain.get_hash()}. New_difficulty: "
f"{eos_bundle.challenge_chain.new_difficulty} New ssi: {eos_bundle.challenge_chain.new_sub_slot_iters}"
)
if next_ses is None or next_ses.new_difficulty is None:
self.unfinished_blocks = self.overflow_blocks.copy()
else:
# No overflow blocks in a new epoch
self.unfinished_blocks = []
self.overflow_blocks = []
self.new_subslot_end = eos_bundle
await self._handle_subslot_end()
async def _handle_failures(self):
if len(self.vdf_failures) > 0:
# This can happen if one of the VDF processes has an issue. In this case, we abort all other
# infusion points and signage points, and go straight to the end of slot, so we avoid potential
# issues with the number of iterations that failed.
failed_chain, proof_label = self.vdf_failures[0]
log.error(
f"Vdf clients failed {self.vdf_failures_count} times. Last failure: {failed_chain}, "
f"label {proof_label}, current: {self.num_resets}"
)
if proof_label == self.num_resets:
await self._reset_chains(only_eos=True)
self.vdf_failure_time = time.time()
self.vdf_failures = []
# If something goes wrong in the VDF client due to a failed thread, we might get stuck in a situation where we
# are waiting for that client to finish. Usually other peers will finish the VDFs and reset us. In the case that
# there are no other timelords, this reset should bring the timelord back to a running state.
if time.time() - self.vdf_failure_time < self.constants.SUB_SLOT_TIME_TARGET * 3:
# If we have recently had a failure, allow some more time to finish the slot (we can be up to 3x slower)
active_time_threshold = self.constants.SUB_SLOT_TIME_TARGET * 3
else:
# If there were no failures recently trigger a reset after 60 seconds of no activity.
# Signage points should be every 9 seconds
active_time_threshold = 60
if time.time() - self.last_active_time > active_time_threshold:
log.error(f"Not active for {active_time_threshold} seconds, restarting all chains")
await self._reset_chains()
async def _manage_chains(self):
async with self.lock:
await asyncio.sleep(5)
await self._reset_chains(True)
while not self._shut_down:
try:
await asyncio.sleep(0.1)
async with self.lock:
await self._handle_failures()
# We've got a new peak, process it.
if self.new_peak is not None:
await self._handle_new_peak()
# Map free vdf_clients to unspawned chains.
await self._map_chains_with_vdf_clients()
async with self.lock:
# Submit pending iterations.
await self._submit_iterations()
not_finished_iters = [
it for it in self.iters_submitted[Chain.REWARD_CHAIN] if it not in self.iters_finished
]
if len(not_finished_iters) == 0:
await asyncio.sleep(0.1)
continue
selected_iter = min(not_finished_iters)
# Check for new infusion point and broadcast it if present.
await self._check_for_new_ip(selected_iter)
# Check for new signage point and broadcast it if present.
await self._check_for_new_sp(selected_iter)
# Check for end of subslot, respawn chains and build EndOfSubslotBundle.
await self._check_for_end_of_subslot(selected_iter)
except Exception:
tb = traceback.format_exc()
log.error(f"Error while handling message: {tb}")
async def _do_process_communication(
self,
chain: Chain,
challenge: bytes32,
initial_form: ClassgroupElement,
ip: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
# Data specific only when running in bluebox mode.
bluebox_iteration: Optional[uint64] = None,
header_hash: Optional[bytes32] = None,
height: Optional[uint32] = None,
field_vdf: Optional[uint8] = None,
# Labels a proof to the current state only
proof_label: Optional[int] = None,
):
disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS)
try:
# Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
# the timelord tells the vdf_client what to execute.
async with self.lock:
if self.sanitizer_mode:
writer.write(b"S")
else:
if self.config["fast_algorithm"]:
# Run n-wesolowski (fast) algorithm.
writer.write(b"N")
else:
# Run two-wesolowski (slow) algorithm.
writer.write(b"T")
await writer.drain()
prefix = str(len(str(disc)))
if len(prefix) == 1:
prefix = "00" + prefix
if len(prefix) == 2:
prefix = "0" + prefix
async with self.lock:
writer.write((prefix + str(disc)).encode())
await writer.drain()
# Send initial_form prefixed with its length.
async with self.lock:
writer.write(bytes([len(initial_form.data)]) + initial_form.data)
await writer.drain()
try:
ok = await reader.readexactly(2)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
return None
if ok.decode() != "OK":
return None
log.debug("Got handshake with VDF client.")
if not self.sanitizer_mode:
async with self.lock:
self.allows_iters.append(chain)
else:
async with self.lock:
assert chain is Chain.BLUEBOX
assert bluebox_iteration is not None
prefix = str(len(str(bluebox_iteration)))
if len(str(bluebox_iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(bluebox_iteration)
writer.write(iter_str.encode())
await writer.drain()
# Listen to the client until "STOP" is received.
while True:
try:
data = await reader.readexactly(4)
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
msg = ""
try:
msg = data.decode()
except Exception:
pass
if msg == "STOP":
log.debug(f"Stopped client running on ip {ip}.")
async with self.lock:
writer.write(b"ACK")
await writer.drain()
break
else:
try:
# This must be a proof, 4 bytes is length prefix
length = int.from_bytes(data, "big")
proof = await reader.readexactly(length)
stdout_bytes_io: io.BytesIO = io.BytesIO(bytes.fromhex(proof.decode()))
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
iterations_needed = uint64(int.from_bytes(stdout_bytes_io.read(8), "big", signed=True))
y_size_bytes = stdout_bytes_io.read(8)
y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True))
y_bytes = stdout_bytes_io.read(y_size)
witness_type = uint8(int.from_bytes(stdout_bytes_io.read(1), "big", signed=True))
proof_bytes: bytes = stdout_bytes_io.read()
# Verifies our own proof just in case
form_size = ClassgroupElement.get_size(self.constants)
output = ClassgroupElement.from_bytes(y_bytes[:form_size])
if not self.sanitizer_mode:
time_taken = time.time() - self.chain_start_time[chain]
ips = int(iterations_needed / time_taken * 10) / 10
log.info(
f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
f" iters, "
f"Estimated IPS: {ips}, Chain: {chain}"
)
vdf_info: VDFInfo = VDFInfo(
challenge,
iterations_needed,
output,
)
vdf_proof: VDFProof = VDFProof(
witness_type,
proof_bytes,
self.sanitizer_mode,
)
if not vdf_proof.is_valid(self.constants, initial_form, vdf_info):
log.error("Invalid proof of time!")
if not self.sanitizer_mode:
async with self.lock:
assert proof_label is not None
self.proofs_finished.append((chain, vdf_info, vdf_proof, proof_label))
else:
async with self.lock:
writer.write(b"010")
await writer.drain()
assert header_hash is not None
assert field_vdf is not None
assert height is not None
response = timelord_protocol.RespondCompactProofOfTime(
vdf_info, vdf_proof, header_hash, height, field_vdf
)
if self.server is not None:
message = make_msg(ProtocolMessageTypes.respond_compact_proof_of_time, response)
await self.server.send_to_all([message], NodeType.FULL_NODE)
except ConnectionResetError as e:
log.debug(f"Connection reset with VDF client {e}")
async def _manage_discriminant_queue_sanitizer(self):
while not self._shut_down:
async with self.lock:
try:
while len(self.pending_bluebox_info) > 0 and len(self.free_clients) > 0:
# Select randomly the field_vdf we're creating a compact vdf for.
# This is done because CC_SP and CC_IP are more frequent than
# CC_EOS and ICC_EOS. This guarantees everything is picked uniformly.
target_field_vdf = random.randint(1, 4)
info = next(
(info for info in self.pending_bluebox_info if info[1].field_vdf == target_field_vdf),
None,
)
if info is None:
# Nothing found with target_field_vdf, just pick the first VDFInfo.
info = self.pending_bluebox_info[0]
ip, reader, writer = self.free_clients[0]
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
Chain.BLUEBOX,
info[1].new_proof_of_time.challenge,
ClassgroupElement.get_default_element(),
ip,
reader,
writer,
info[1].new_proof_of_time.number_of_iterations,
info[1].header_hash,
info[1].height,
info[1].field_vdf,
)
)
)
self.pending_bluebox_info.remove(info)
self.free_clients = self.free_clients[1:]
except Exception as e:
log.error(f"Exception manage discriminant queue: {e}")
await asyncio.sleep(0.1)
| 49.378378 | 120 | 0.562221 | import asyncio
import dataclasses
import io
import logging
import random
import time
import traceback
from typing import Callable, Dict, List, Optional, Tuple, Set
from chiavdf import create_discriminant
from flax.consensus.constants import ConsensusConstants
from flax.consensus.pot_iterations import calculate_sp_iters, is_overflow_block
from flax.protocols import timelord_protocol
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.server.outbound_message import NodeType, make_msg
from flax.server.server import FlaxServer
from flax.timelord.iters_from_block import iters_from_block
from flax.timelord.timelord_state import LastState
from flax.timelord.types import Chain, IterationType, StateType
from flax.types.blockchain_format.classgroup import ClassgroupElement
from flax.types.blockchain_format.reward_chain_block import RewardChainBlock
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.blockchain_format.vdf import VDFInfo, VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class Timelord:
def __init__(self, root_path, config: Dict, constants: ConsensusConstants):
self.config = config
self.root_path = root_path
self.constants = constants
self._shut_down = False
self.free_clients: List[Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = []
self.potential_free_clients: List = []
self.ip_whitelist = self.config["vdf_clients"]["ip"]
self.server: Optional[FlaxServer] = None
self.chain_type_to_stream: Dict[Chain, Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = {}
self.chain_start_time: Dict = {}
self.unspawned_chains: List[Chain] = [
Chain.CHALLENGE_CHAIN,
Chain.REWARD_CHAIN,
Chain.INFUSED_CHALLENGE_CHAIN,
]
# Chains that currently accept iterations.
self.allows_iters: List[Chain] = []
# Last peak received, None if it's already processed.
self.new_peak: Optional[timelord_protocol.NewPeakTimelord] = None
self.new_subslot_end: Optional[EndOfSubSlotBundle] = None
self.unfinished_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
self.signage_point_iters: List[Tuple[uint64, uint8]] = []
self.iters_to_submit: Dict[Chain, List[uint64]] = {}
self.iters_submitted: Dict[Chain, List[uint64]] = {}
self.iters_finished: Set = set()
self.iteration_to_proof_type: Dict[uint64, IterationType] = {}
# List of proofs finished.
self.proofs_finished: List[Tuple[Chain, VDFInfo, VDFProof, int]] = []
# Data to send at vdf_client initialization.
self.overflow_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Incremented each time `reset_chains` has been called.
# Used to label proofs in `finished_proofs` and to only filter proofs corresponding to the most recent state.
self.num_resets: int = 0
self.process_communication_tasks: List[asyncio.Task] = []
self.main_loop = None
self.vdf_server = None
self._shut_down = False
self.vdf_failures: List[Tuple[Chain, Optional[int]]] = []
self.vdf_failures_count: int = 0
self.vdf_failure_time: float = 0
self.total_unfinished: int = 0
self.total_infused: int = 0
self.state_changed_callback: Optional[Callable] = None
self.sanitizer_mode = self.config["sanitizer_mode"]
self.pending_bluebox_info: List[Tuple[float, timelord_protocol.RequestCompactProofOfTime]] = []
self.last_active_time = time.time()
async def _start(self):
self.lock: asyncio.Lock = asyncio.Lock()
self.vdf_server = await asyncio.start_server(
self._handle_client,
self.config["vdf_server"]["host"],
self.config["vdf_server"]["port"],
)
self.last_state: LastState = LastState(self.constants)
if not self.sanitizer_mode:
self.main_loop = asyncio.create_task(self._manage_chains())
else:
self.main_loop = asyncio.create_task(self._manage_discriminant_queue_sanitizer())
log.info("Started timelord.")
def _close(self):
self._shut_down = True
for task in self.process_communication_tasks:
task.cancel()
if self.main_loop is not None:
self.main_loop.cancel()
async def _await_closed(self):
pass
def set_server(self, server: FlaxServer):
self.server = server
async def _handle_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
async with self.lock:
client_ip = writer.get_extra_info("peername")[0]
log.debug(f"New timelord connection from client: {client_ip}.")
if client_ip in self.ip_whitelist:
self.free_clients.append((client_ip, reader, writer))
log.debug(f"Added new VDF client {client_ip}.")
for ip, end_time in list(self.potential_free_clients):
if ip == client_ip:
self.potential_free_clients.remove((ip, end_time))
break
async def _stop_chain(self, chain: Chain):
try:
while chain not in self.allows_iters:
self.lock.release()
await asyncio.sleep(0.05)
log.error(f"Trying to stop {chain} before its initialization.")
await self.lock.acquire()
if chain not in self.chain_type_to_stream:
log.warning(f"Trying to stop a crashed chain: {chain}.")
return None
stop_ip, _, stop_writer = self.chain_type_to_stream[chain]
self.potential_free_clients.append((stop_ip, time.time()))
stop_writer.write(b"010")
await stop_writer.drain()
if chain in self.allows_iters:
self.allows_iters.remove(chain)
if chain not in self.unspawned_chains:
self.unspawned_chains.append(chain)
if chain in self.chain_type_to_stream:
del self.chain_type_to_stream[chain]
except ConnectionResetError as e:
log.error(f"{e}")
def _can_infuse_unfinished_block(self, block: timelord_protocol.NewUnfinishedBlockTimelord) -> Optional[uint64]:
assert self.last_state is not None
sub_slot_iters = self.last_state.get_sub_slot_iters()
difficulty = self.last_state.get_difficulty()
ip_iters = self.last_state.get_last_ip()
rc_block = block.reward_chain_block
try:
block_sp_iters, block_ip_iters = iters_from_block(
self.constants,
rc_block,
sub_slot_iters,
difficulty,
)
except Exception as e:
log.warning(f"Received invalid unfinished block: {e}.")
return None
block_sp_total_iters = self.last_state.total_iters - ip_iters + block_sp_iters
if is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
block_sp_total_iters -= self.last_state.get_sub_slot_iters()
found_index = -1
for index, (rc, total_iters) in enumerate(self.last_state.reward_challenge_cache):
if rc == block.rc_prev:
found_index = index
break
if found_index == -1:
log.warning(f"Will not infuse {block.rc_prev} because its reward chain challenge is not in the chain")
return None
if ip_iters > block_ip_iters:
log.warning("Too late to infuse block")
return None
new_block_iters = uint64(block_ip_iters - ip_iters)
if len(self.last_state.reward_challenge_cache) > found_index + 1:
if self.last_state.reward_challenge_cache[found_index + 1][1] < block_sp_total_iters:
log.warning(
f"Will not infuse unfinished block {block.rc_prev} sp total iters {block_sp_total_iters}, "
f"because there is another infusion before its SP"
)
return None
if self.last_state.reward_challenge_cache[found_index][1] > block_sp_total_iters:
if not is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
log.error(
f"Will not infuse unfinished block {block.rc_prev}, sp total iters: {block_sp_total_iters}, "
f"because its iters are too low"
)
return None
if new_block_iters > 0:
return new_block_iters
return None
async def _reset_chains(self, first_run=False, only_eos=False):
# First, stop all chains.
self.last_active_time = time.time()
log.debug("Resetting chains")
ip_iters = self.last_state.get_last_ip()
sub_slot_iters = self.last_state.get_sub_slot_iters()
if not first_run:
for chain in list(self.chain_type_to_stream.keys()):
await self._stop_chain(chain)
# Adjust all signage points iterations to the peak.
iters_per_signage = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
self.signage_point_iters = [
(k * iters_per_signage - ip_iters, k)
for k in range(1, self.constants.NUM_SPS_SUB_SLOT)
if k * iters_per_signage - ip_iters > 0
]
for sp, k in self.signage_point_iters:
assert k * iters_per_signage > 0
assert k * iters_per_signage < sub_slot_iters
# Adjust all unfinished blocks iterations to the peak.
new_unfinished_blocks = []
self.iters_finished = set()
self.proofs_finished = []
self.num_resets += 1
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
self.iters_to_submit[chain] = []
self.iters_submitted[chain] = []
self.iteration_to_proof_type = {}
if not only_eos:
for block in self.unfinished_blocks + self.overflow_blocks:
new_block_iters: Optional[uint64] = self._can_infuse_unfinished_block(block)
# Does not add duplicates, or blocks that we cannot infuse
if new_block_iters and new_block_iters not in self.iters_to_submit[Chain.CHALLENGE_CHAIN]:
if block not in self.unfinished_blocks:
self.total_unfinished += 1
new_unfinished_blocks.append(block)
for chain in [Chain.REWARD_CHAIN, Chain.CHALLENGE_CHAIN]:
self.iters_to_submit[chain].append(new_block_iters)
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
self.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
# Remove all unfinished blocks that have already passed.
self.unfinished_blocks = new_unfinished_blocks
# Signage points.
if not only_eos and len(self.signage_point_iters) > 0:
count_signage = 0
for signage, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
self.iters_to_submit[chain].append(signage)
self.iteration_to_proof_type[signage] = IterationType.SIGNAGE_POINT
count_signage += 1
if count_signage == 3:
break
left_subslot_iters = sub_slot_iters - ip_iters
assert left_subslot_iters > 0
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.REWARD_CHAIN].append(left_subslot_iters)
self.iteration_to_proof_type[left_subslot_iters] = IterationType.END_OF_SUBSLOT
for chain, iters in self.iters_to_submit.items():
for iteration in iters:
assert iteration > 0
async def _handle_new_peak(self):
assert self.new_peak is not None
self.last_state.set_state(self.new_peak)
if self.total_unfinished > 0:
remove_unfinished = []
for unf_block_timelord in self.unfinished_blocks + self.overflow_blocks:
if (
unf_block_timelord.reward_chain_block.get_hash()
== self.new_peak.reward_chain_block.get_unfinished().get_hash()
):
if unf_block_timelord not in self.unfinished_blocks:
# We never got the EOS for this, but we have the block in overflow list
self.total_unfinished += 1
remove_unfinished.append(unf_block_timelord)
if len(remove_unfinished) > 0:
self.total_infused += 1
for block in remove_unfinished:
if block in self.unfinished_blocks:
self.unfinished_blocks.remove(block)
if block in self.overflow_blocks:
self.overflow_blocks.remove(block)
infusion_rate = round(self.total_infused / self.total_unfinished * 100.0, 2)
log.info(
f"Total unfinished blocks: {self.total_unfinished}. "
f"Total infused blocks: {self.total_infused}. "
f"Infusion rate: {infusion_rate}%."
)
self.new_peak = None
await self._reset_chains()
async def _handle_subslot_end(self):
self.last_state.set_state(self.new_subslot_end)
for block in self.unfinished_blocks:
if self._can_infuse_unfinished_block(block) is not None:
self.total_unfinished += 1
self.new_subslot_end = None
await self._reset_chains()
async def _map_chains_with_vdf_clients(self):
while not self._shut_down:
picked_chain = None
async with self.lock:
if len(self.free_clients) == 0:
break
ip, reader, writer = self.free_clients[0]
for chain_type in self.unspawned_chains:
challenge = self.last_state.get_challenge(chain_type)
initial_form = self.last_state.get_initial_form(chain_type)
if challenge is not None and initial_form is not None:
picked_chain = chain_type
break
if picked_chain is None:
break
picked_chain = self.unspawned_chains[0]
self.chain_type_to_stream[picked_chain] = (ip, reader, writer)
self.free_clients = self.free_clients[1:]
self.unspawned_chains = self.unspawned_chains[1:]
self.chain_start_time[picked_chain] = time.time()
log.debug(f"Mapping free vdf_client with chain: {picked_chain}.")
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
picked_chain, challenge, initial_form, ip, reader, writer, proof_label=self.num_resets
)
)
)
async def _submit_iterations(self):
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
if chain in self.allows_iters:
_, _, writer = self.chain_type_to_stream[chain]
for iteration in self.iters_to_submit[chain]:
if iteration in self.iters_submitted[chain]:
continue
log.debug(f"Submitting iterations to {chain}: {iteration}")
assert iteration > 0
prefix = str(len(str(iteration)))
if len(str(iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(iteration)
writer.write(iter_str.encode())
await writer.drain()
self.iters_submitted[chain].append(iteration)
def _clear_proof_list(self, iters: uint64):
return [
(chain, info, proof, label)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations != iters
]
async def _check_for_new_sp(self, iter_to_look_for: uint64):
signage_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.SIGNAGE_POINT
]
if len(signage_iters) == 0:
return None
to_remove = []
for potential_sp_iters, signage_point_index in self.signage_point_iters:
if potential_sp_iters not in signage_iters or potential_sp_iters != iter_to_look_for:
continue
signage_iter = potential_sp_iters
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == signage_iter and label == self.num_resets
]
# Wait for both cc and rc to have the signage point.
if len(proofs_with_iter) == 2:
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient signage point data {signage_iter}")
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()}" f" has {rc_info.challenge}")
# This proof is on an outdated challenge, so don't use it
continue
iters_from_sub_slot_start = cc_info.number_of_iterations + self.last_state.get_last_ip()
response = timelord_protocol.NewSignagePointVDF(
signage_point_index,
dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start),
cc_proof,
rc_info,
rc_proof,
)
if self.server is not None:
msg = make_msg(ProtocolMessageTypes.new_signage_point_vdf, response)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
to_remove.append((signage_iter, signage_point_index))
self.proofs_finished = self._clear_proof_list(signage_iter)
next_iters_count = 0
for next_sp, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
if next_sp not in self.iters_submitted[chain] and next_sp not in self.iters_to_submit[chain]:
self.iters_to_submit[chain].append(next_sp)
self.iteration_to_proof_type[next_sp] = IterationType.SIGNAGE_POINT
next_iters_count += 1
if next_iters_count == 3:
break
break
for r in to_remove:
self.signage_point_iters.remove(r)
async def _check_for_new_ip(self, iter_to_look_for: uint64):
if len(self.unfinished_blocks) == 0:
return None
infusion_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT
]
for iteration in infusion_iters:
if iteration != iter_to_look_for:
continue
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == iteration and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(proofs_with_iter) == chain_count:
block = None
ip_iters = None
for unfinished_block in self.unfinished_blocks:
try:
_, ip_iters = iters_from_block(
self.constants,
unfinished_block.reward_chain_block,
self.last_state.get_sub_slot_iters(),
self.last_state.get_difficulty(),
)
except Exception as e:
log.error(f"Error {e}")
continue
if ip_iters - self.last_state.get_last_ip() == iteration:
block = unfinished_block
break
assert ip_iters is not None
if block is not None:
ip_total_iters = self.last_state.get_total_iters() + iteration
challenge = block.reward_chain_block.get_hash()
icc_info: Optional[VDFInfo] = None
icc_proof: Optional[VDFProof] = None
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_info = info
icc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}")
return None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(
f"Do not have correct challenge {rc_challenge.hex()} "
f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}"
)
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.")
overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index)
if not self.last_state.can_infuse_block(overflow):
log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding")
return None
cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters)
response = timelord_protocol.NewInfusionPointVDF(
challenge,
cc_info,
cc_proof,
rc_info,
rc_proof,
icc_info,
icc_proof,
)
msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self.proofs_finished = self._clear_proof_list(iteration)
if (
self.last_state.get_last_block_total_iters() is None
and not self.last_state.state_type == StateType.FIRST_SUB_SLOT
):
# We don't know when the last block was, so we can't make peaks
return None
sp_total_iters = (
ip_total_iters
- ip_iters
+ calculate_sp_iters(
self.constants,
block.sub_slot_iters,
block.reward_chain_block.signage_point_index,
)
- (block.sub_slot_iters if overflow else 0)
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
is_transaction_block = True
height: uint32 = uint32(0)
else:
last_block_ti = self.last_state.get_last_block_total_iters()
assert last_block_ti is not None
is_transaction_block = last_block_ti < sp_total_iters
height = uint32(self.last_state.get_height() + 1)
if height < 5:
# Don't directly update our state for the first few blocks, because we cannot validate
return None
new_reward_chain_block = RewardChainBlock(
uint128(self.last_state.get_weight() + block.difficulty),
height,
uint128(ip_total_iters),
block.reward_chain_block.signage_point_index,
block.reward_chain_block.pos_ss_cc_challenge_hash,
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
cc_info,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_block.reward_chain_sp_signature,
rc_info,
icc_info,
is_transaction_block,
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
if self.last_state.peak is not None:
assert self.last_state.subslot_end is None
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
assert self.last_state.subslot_end is not None
if self.last_state.subslot_end.infused_challenge_chain is None:
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
else:
# the deficit
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
new_deficit = max(self.last_state.deficit - 1, 0)
if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
last_csb_or_eos = ip_total_iters
else:
last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters
if self.last_state.just_infused_sub_epoch_summary():
new_sub_epoch_summary = None
passed_ses_height_but_not_yet_included = False
else:
new_sub_epoch_summary = block.sub_epoch_summary
if new_reward_chain_block.height % self.constants.SUB_EPOCH_BLOCKS == 0:
passed_ses_height_but_not_yet_included = True
else:
passed_ses_height_but_not_yet_included = (
self.last_state.get_passed_ses_height_but_not_yet_included()
)
self.new_peak = timelord_protocol.NewPeakTimelord(
new_reward_chain_block,
block.difficulty,
uint8(new_deficit),
block.sub_slot_iters,
new_sub_epoch_summary,
self.last_state.reward_challenge_cache,
uint128(last_csb_or_eos),
passed_ses_height_but_not_yet_included,
)
await self._handle_new_peak()
# Break so we alternate between checking SP and IP
break
async def _check_for_end_of_subslot(self, iter_to_look_for: uint64):
left_subslot_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.END_OF_SUBSLOT
]
if len(left_subslot_iters) == 0:
return None
if left_subslot_iters[0] != iter_to_look_for:
return None
chains_finished = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == left_subslot_iters[0] and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(chains_finished) == chain_count:
icc_ip_vdf: Optional[VDFInfo] = None
icc_ip_proof: Optional[VDFProof] = None
cc_vdf: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_vdf: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in chains_finished:
if chain == Chain.CHALLENGE_CHAIN:
cc_vdf = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_vdf = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_ip_vdf = info
icc_ip_proof = proof
assert cc_proof is not None and rc_proof is not None and cc_vdf is not None and rc_vdf is not None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_vdf.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"Do not have correct challenge {rc_challenge.hex()} has" f" {rc_vdf.challenge}")
# This proof is on an outdated challenge, so don't use it
return None
log.debug("Collected end of subslot vdfs.")
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
iters_from_sub_slot_start = cc_vdf.number_of_iterations + self.last_state.get_last_ip()
cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start)
if icc_ip_vdf is not None:
if self.last_state.peak is not None:
total_iters = (
self.last_state.get_total_iters()
- self.last_state.get_last_ip()
+ self.last_state.get_sub_slot_iters()
)
else:
total_iters = self.last_state.get_total_iters() + self.last_state.get_sub_slot_iters()
iters_from_cb = uint64(total_iters - self.last_state.last_challenge_sb_or_eos_total_iters)
if iters_from_cb > self.last_state.sub_slot_iters:
log.error(f"{self.last_state.peak}")
log.error(f"{self.last_state.subslot_end}")
assert False
assert iters_from_cb <= self.last_state.sub_slot_iters
icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = (
None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf)
)
if self.last_state.get_deficit() == 0:
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash()
else:
icc_sub_slot_hash = None
next_ses: Optional[SubEpochSummary] = self.last_state.get_next_sub_epoch_summary()
if next_ses is not None:
log.info(f"Including sub epoch summary{next_ses}")
ses_hash = next_ses.get_hash()
new_sub_slot_iters = next_ses.new_sub_slot_iters
new_difficulty = next_ses.new_difficulty
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty)
eos_deficit: uint8 = (
self.last_state.get_deficit()
if self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK > self.last_state.get_deficit() > 0
else self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
rc_sub_slot = RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
)
eos_bundle = EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
rc_sub_slot,
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
if self.server is not None:
msg = make_msg(
ProtocolMessageTypes.new_end_of_sub_slot_vdf,
timelord_protocol.NewEndOfSubSlotVDF(eos_bundle),
)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
log.info(
f"Built end of subslot bundle. cc hash: {eos_bundle.challenge_chain.get_hash()}. New_difficulty: "
f"{eos_bundle.challenge_chain.new_difficulty} New ssi: {eos_bundle.challenge_chain.new_sub_slot_iters}"
)
if next_ses is None or next_ses.new_difficulty is None:
self.unfinished_blocks = self.overflow_blocks.copy()
else:
self.unfinished_blocks = []
self.overflow_blocks = []
self.new_subslot_end = eos_bundle
await self._handle_subslot_end()
async def _handle_failures(self):
if len(self.vdf_failures) > 0:
failed_chain, proof_label = self.vdf_failures[0]
log.error(
f"Vdf clients failed {self.vdf_failures_count} times. Last failure: {failed_chain}, "
f"label {proof_label}, current: {self.num_resets}"
)
if proof_label == self.num_resets:
await self._reset_chains(only_eos=True)
self.vdf_failure_time = time.time()
self.vdf_failures = []
if time.time() - self.vdf_failure_time < self.constants.SUB_SLOT_TIME_TARGET * 3:
active_time_threshold = self.constants.SUB_SLOT_TIME_TARGET * 3
else:
active_time_threshold = 60
if time.time() - self.last_active_time > active_time_threshold:
log.error(f"Not active for {active_time_threshold} seconds, restarting all chains")
await self._reset_chains()
async def _manage_chains(self):
async with self.lock:
await asyncio.sleep(5)
await self._reset_chains(True)
while not self._shut_down:
try:
await asyncio.sleep(0.1)
async with self.lock:
await self._handle_failures()
if self.new_peak is not None:
await self._handle_new_peak()
# Map free vdf_clients to unspawned chains.
await self._map_chains_with_vdf_clients()
async with self.lock:
# Submit pending iterations.
await self._submit_iterations()
not_finished_iters = [
it for it in self.iters_submitted[Chain.REWARD_CHAIN] if it not in self.iters_finished
]
if len(not_finished_iters) == 0:
await asyncio.sleep(0.1)
continue
selected_iter = min(not_finished_iters)
# Check for new infusion point and broadcast it if present.
await self._check_for_new_ip(selected_iter)
# Check for new signage point and broadcast it if present.
await self._check_for_new_sp(selected_iter)
# Check for end of subslot, respawn chains and build EndOfSubslotBundle.
await self._check_for_end_of_subslot(selected_iter)
except Exception:
tb = traceback.format_exc()
log.error(f"Error while handling message: {tb}")
async def _do_process_communication(
self,
chain: Chain,
challenge: bytes32,
initial_form: ClassgroupElement,
ip: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
# Data specific only when running in bluebox mode.
bluebox_iteration: Optional[uint64] = None,
header_hash: Optional[bytes32] = None,
height: Optional[uint32] = None,
field_vdf: Optional[uint8] = None,
# Labels a proof to the current state only
proof_label: Optional[int] = None,
):
disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS)
try:
# Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
# the timelord tells the vdf_client what to execute.
async with self.lock:
if self.sanitizer_mode:
writer.write(b"S")
else:
if self.config["fast_algorithm"]:
# Run n-wesolowski (fast) algorithm.
writer.write(b"N")
else:
# Run two-wesolowski (slow) algorithm.
writer.write(b"T")
await writer.drain()
prefix = str(len(str(disc)))
if len(prefix) == 1:
prefix = "00" + prefix
if len(prefix) == 2:
prefix = "0" + prefix
async with self.lock:
writer.write((prefix + str(disc)).encode())
await writer.drain()
# Send initial_form prefixed with its length.
async with self.lock:
writer.write(bytes([len(initial_form.data)]) + initial_form.data)
await writer.drain()
try:
ok = await reader.readexactly(2)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
return None
if ok.decode() != "OK":
return None
log.debug("Got handshake with VDF client.")
if not self.sanitizer_mode:
async with self.lock:
self.allows_iters.append(chain)
else:
async with self.lock:
assert chain is Chain.BLUEBOX
assert bluebox_iteration is not None
prefix = str(len(str(bluebox_iteration)))
if len(str(bluebox_iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(bluebox_iteration)
writer.write(iter_str.encode())
await writer.drain()
# Listen to the client until "STOP" is received.
while True:
try:
data = await reader.readexactly(4)
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
msg = ""
try:
msg = data.decode()
except Exception:
pass
if msg == "STOP":
log.debug(f"Stopped client running on ip {ip}.")
async with self.lock:
writer.write(b"ACK")
await writer.drain()
break
else:
try:
# This must be a proof, 4 bytes is length prefix
length = int.from_bytes(data, "big")
proof = await reader.readexactly(length)
stdout_bytes_io: io.BytesIO = io.BytesIO(bytes.fromhex(proof.decode()))
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
iterations_needed = uint64(int.from_bytes(stdout_bytes_io.read(8), "big", signed=True))
y_size_bytes = stdout_bytes_io.read(8)
y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True))
y_bytes = stdout_bytes_io.read(y_size)
witness_type = uint8(int.from_bytes(stdout_bytes_io.read(1), "big", signed=True))
proof_bytes: bytes = stdout_bytes_io.read()
# Verifies our own proof just in case
form_size = ClassgroupElement.get_size(self.constants)
output = ClassgroupElement.from_bytes(y_bytes[:form_size])
if not self.sanitizer_mode:
time_taken = time.time() - self.chain_start_time[chain]
ips = int(iterations_needed / time_taken * 10) / 10
log.info(
f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
f" iters, "
f"Estimated IPS: {ips}, Chain: {chain}"
)
vdf_info: VDFInfo = VDFInfo(
challenge,
iterations_needed,
output,
)
vdf_proof: VDFProof = VDFProof(
witness_type,
proof_bytes,
self.sanitizer_mode,
)
if not vdf_proof.is_valid(self.constants, initial_form, vdf_info):
log.error("Invalid proof of time!")
if not self.sanitizer_mode:
async with self.lock:
assert proof_label is not None
self.proofs_finished.append((chain, vdf_info, vdf_proof, proof_label))
else:
async with self.lock:
writer.write(b"010")
await writer.drain()
assert header_hash is not None
assert field_vdf is not None
assert height is not None
response = timelord_protocol.RespondCompactProofOfTime(
vdf_info, vdf_proof, header_hash, height, field_vdf
)
if self.server is not None:
message = make_msg(ProtocolMessageTypes.respond_compact_proof_of_time, response)
await self.server.send_to_all([message], NodeType.FULL_NODE)
except ConnectionResetError as e:
log.debug(f"Connection reset with VDF client {e}")
async def _manage_discriminant_queue_sanitizer(self):
while not self._shut_down:
async with self.lock:
try:
while len(self.pending_bluebox_info) > 0 and len(self.free_clients) > 0:
# Select randomly the field_vdf we're creating a compact vdf for.
target_field_vdf = random.randint(1, 4)
info = next(
(info for info in self.pending_bluebox_info if info[1].field_vdf == target_field_vdf),
None,
)
if info is None:
info = self.pending_bluebox_info[0]
ip, reader, writer = self.free_clients[0]
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
Chain.BLUEBOX,
info[1].new_proof_of_time.challenge,
ClassgroupElement.get_default_element(),
ip,
reader,
writer,
info[1].new_proof_of_time.number_of_iterations,
info[1].header_hash,
info[1].height,
info[1].field_vdf,
)
)
)
self.pending_bluebox_info.remove(info)
self.free_clients = self.free_clients[1:]
except Exception as e:
log.error(f"Exception manage discriminant queue: {e}")
await asyncio.sleep(0.1)
| true | true |
f732a92f6a56d1d3f0f151db59875c79d7b14b14 | 6,624 | py | Python | files/RMFE_24.py | akiratk0355/RMFE | 2cce1eb3daebc0594e0b70f60fc78d1979b507a5 | [
"MIT"
] | 1 | 2021-05-13T08:31:57.000Z | 2021-05-13T08:31:57.000Z | files/RMFE_24.py | akiratk0355/RMFE | 2cce1eb3daebc0594e0b70f60fc78d1979b507a5 | [
"MIT"
] | null | null | null | files/RMFE_24.py | akiratk0355/RMFE | 2cce1eb3daebc0594e0b70f60fc78d1979b507a5 | [
"MIT"
] | null | null | null |
# This file was *autogenerated* from the file RMFE_24.sage
from FFTa import *
from field_iso import *
from FFTpreproc import *
from sage.all_cmdline import * # import sage library
_sage_const_2 = Integer(2)
_sage_const_1 = Integer(1)
_sage_const_0 = Integer(0)
_sage_const_4 = Integer(4)
_sage_const_128 = Integer(128)
_sage_const_16 = Integer(16)
_sage_const_32 = Integer(32) # !/usr/bin/env sage
# Implementation of a (32,128)_2 RMFE, as a concatenation of a (2,4)_2 RMFE and a (8,32)_16 RMFE. Functionality is extended to deal with (k,128)_2 RMFEs meaning that: if k<32, phi can be applied to an input (F_2)^k by first padding with zeros to get a vector in (F_2)^32; and if k>32, then the input vector to phi is splitted in subvectors of length 32 (and a remainder block of length k') and the map phi is applied to each vector. In the case of the map psi, it receives a vector of elements in the larger field H=F_(2^128) and applies psi:H->F_2^k to each coordinate, where the k's are specified by the user (they need to be k<=32) and can be different.
# Functions:
# map23: map phi of the (2,4)_2-RMFE: takes as input a vector of (F_2)^2, outputs a single element in the field F=F_16.
# invmap23: map psi of (2,4)_2-RMFE: takes as input a single element in the field F=F_16, outputs a vector in (F_2)^2.
# phi_RMFE23: map phi of (k,128)_2-RMFE with "extended functionality": takes as input a vector in F_2^k, outputs a vector in H^m, where H=F_{2^128} (field of 2^128 elements) and m=k/32+1.
# psi_RMFE23: map psi of (k,128)_2-RMFE with "extended functionality": takes as input a vector in H^m, where H=F_{2^128} (field of 2^128 elements), and a vector of integers (k_1,...,k_m) with k_i<=32, and outputs a vector in F_2^(k_1+k_2+...+k_m), where each block is the result of applying psi to the i-th component of the input H-vector.
H = GF(_sage_const_2 ** _sage_const_128, modulus="primitive", names=('c',))
(c,) = H._first_ngens(1)
h = H.modulus()
F = GF(_sage_const_2 ** _sage_const_4, names=('a',))
(a,) = F._first_ngens(1)
f = F.modulus()
P = PolynomialRing(GF(_sage_const_2), names=('X',))
(X,) = P._first_ngens(1)
R = PolynomialRing(F, names=('Y',))
(Y,) = R._first_ngens(1)
g = R(h).factor()[_sage_const_0][_sage_const_0]
# map24: phi-map of (2,4)_2-RMFE
def map24(v):
return v[_sage_const_0]+(v[_sage_const_0]+v[_sage_const_1])*a
# invmap24: psi-map of (2,4)_2-RMFE
def invmap24(d):
if d != _sage_const_0:
p = d.polynomial(X)
return [p(_sage_const_0), p(_sage_const_1)]
else:
return [_sage_const_0, _sage_const_0]
return D
# phi_RMFE24: If given a binary vector of length k<=32, computes phi-map of (k,128)_2-RMFE. Else, split in blocks of length 32 (and a remainder block of length k') and compute phi-map of (32,128)_2-RMFE on each block (and (k',128)_2-RMFE on the last one). Outputs a list of elements in GF(2^128)
def phi_RMFE24(v):
if len(v) > _sage_const_32:
w = []
number_blocks = len(v)//_sage_const_32 + _sage_const_1
for i in range(number_blocks-_sage_const_1):
w.append(v[_sage_const_32 * i:_sage_const_32 * i+_sage_const_32])
w.append(v[_sage_const_32 * (number_blocks-_sage_const_1):])
res = []
for j in range(len(w)):
res = res+phi_RMFE24(w[j])
return res
# First step, split the binary vector in blocks of two cordinates (fill in left-over block with zeros), apply (2,4)_2-RMFE to each block.
else:
k = len(v)
odd = k % _sage_const_2
if odd:
v.append(_sage_const_0)
l = _sage_const_1
else:
l = _sage_const_0
v1 = []
for i in range((k+l)//_sage_const_2):
t = map24(v[_sage_const_2 * i:_sage_const_2 * i+_sage_const_2])
v1.append(t)
# Second step, apply (k',32)_16-RMFE to result v1. Here k'=length(v1)<=16. Apply inverse FFT to v1, obtaining an interpolating polynomial of degree <=15. Map the <=15-degree polynomial into an element of F_(16^32) and represent it as an element of F_(2^128) via field_iso_desc
while len(v1) < _sage_const_16:
v1.append(_sage_const_0)
# We generate the preprocessing data for the FFT (TODO: having this as a preprocessing would only be useful if the precomputation would be used for more than one evaluation of phi, not the case currently).
B = [a**i for i in range(_sage_const_4)]
data = FFTpreproc(_sage_const_4, B)
# Apply inverse FFT
v2 = invbinaryFFT(v1, _sage_const_4, B, data)
# Represent result as polynomial
m = _sage_const_0
for i in range(len(v2)):
m += v2[i]*Y**i
# Map the <=15-degree polynomial from F_16[X] into an element of F_(2^128) via field_iso_desc (by implicitely first mapping into an element of F_(16^32) and then changing to a representation in F_2^(128)).
r = field_iso_desc(m, _sage_const_4, g, h, F, H, P, R)
return [r]
# psi_RMFE24: Given a list of elements w of F_(2^128), and a vector of values k<=32, computes psi-map of (k,128)_2-RMFE on each element and outputs the concatenation of the resulting vectors
def psi_RMFE24(w, k):
if len(w) != len(k):
raise Exception("inputs to psi_RMFE24 must be of same length")
for i in range(len(k)):
if k[i] > _sage_const_32:
raise Exception(
"every coordinate on second input of psi_RMFE24 needs to be at most 32")
B = [a**i for i in range(_sage_const_4)]
data = FFTpreproc(_sage_const_4, B)
res = []
for j in range(len(w)):
# First change field representation to represent input as element of F_(32^65) and hence as a polynomial in F_32[X] of degree at most 64.
m = field_iso_asc(w[j], _sage_const_4, g, R)
m = list(m)
# Before applying the FFT we need to a polynomial of degree <=15. For this we take modulo X^16+X, as this does not modify evaluation in points of F_16:
hred = listsum(m[_sage_const_0:_sage_const_16], [
_sage_const_0]+m[_sage_const_16:])
# Apply FFT
w1 = binaryFFT(hred, _sage_const_4, B, data)
# Based on value of k, we adjust size of the output.
upper = (k[j]+_sage_const_1)//_sage_const_2
del w1[upper:]
# Apply psi from (2,4)_2-RMFE to each element of resulting vector.
r = []
for i in range(len(w1)):
r = r+invmap24(w1[i])
# Adjust size of output.
del r[k[j]:]
# Concatenate this to global vector.
res = res+r
return res
| 43.012987 | 656 | 0.658816 |
from FFTa import *
from field_iso import *
from FFTpreproc import *
from sage.all_cmdline import *
_sage_const_2 = Integer(2)
_sage_const_1 = Integer(1)
_sage_const_0 = Integer(0)
_sage_const_4 = Integer(4)
_sage_const_128 = Integer(128)
_sage_const_16 = Integer(16)
_sage_const_32 = Integer(32)
H = GF(_sage_const_2 ** _sage_const_128, modulus="primitive", names=('c',))
(c,) = H._first_ngens(1)
h = H.modulus()
F = GF(_sage_const_2 ** _sage_const_4, names=('a',))
(a,) = F._first_ngens(1)
f = F.modulus()
P = PolynomialRing(GF(_sage_const_2), names=('X',))
(X,) = P._first_ngens(1)
R = PolynomialRing(F, names=('Y',))
(Y,) = R._first_ngens(1)
g = R(h).factor()[_sage_const_0][_sage_const_0]
def map24(v):
return v[_sage_const_0]+(v[_sage_const_0]+v[_sage_const_1])*a
def invmap24(d):
if d != _sage_const_0:
p = d.polynomial(X)
return [p(_sage_const_0), p(_sage_const_1)]
else:
return [_sage_const_0, _sage_const_0]
return D
def phi_RMFE24(v):
if len(v) > _sage_const_32:
w = []
number_blocks = len(v)//_sage_const_32 + _sage_const_1
for i in range(number_blocks-_sage_const_1):
w.append(v[_sage_const_32 * i:_sage_const_32 * i+_sage_const_32])
w.append(v[_sage_const_32 * (number_blocks-_sage_const_1):])
res = []
for j in range(len(w)):
res = res+phi_RMFE24(w[j])
return res
else:
k = len(v)
odd = k % _sage_const_2
if odd:
v.append(_sage_const_0)
l = _sage_const_1
else:
l = _sage_const_0
v1 = []
for i in range((k+l)//_sage_const_2):
t = map24(v[_sage_const_2 * i:_sage_const_2 * i+_sage_const_2])
v1.append(t)
while len(v1) < _sage_const_16:
v1.append(_sage_const_0)
B = [a**i for i in range(_sage_const_4)]
data = FFTpreproc(_sage_const_4, B)
v2 = invbinaryFFT(v1, _sage_const_4, B, data)
m = _sage_const_0
for i in range(len(v2)):
m += v2[i]*Y**i
r = field_iso_desc(m, _sage_const_4, g, h, F, H, P, R)
return [r]
def psi_RMFE24(w, k):
if len(w) != len(k):
raise Exception("inputs to psi_RMFE24 must be of same length")
for i in range(len(k)):
if k[i] > _sage_const_32:
raise Exception(
"every coordinate on second input of psi_RMFE24 needs to be at most 32")
B = [a**i for i in range(_sage_const_4)]
data = FFTpreproc(_sage_const_4, B)
res = []
for j in range(len(w)):
m = field_iso_asc(w[j], _sage_const_4, g, R)
m = list(m)
hred = listsum(m[_sage_const_0:_sage_const_16], [
_sage_const_0]+m[_sage_const_16:])
w1 = binaryFFT(hred, _sage_const_4, B, data)
upper = (k[j]+_sage_const_1)//_sage_const_2
del w1[upper:]
r = []
for i in range(len(w1)):
r = r+invmap24(w1[i])
del r[k[j]:]
res = res+r
return res
| true | true |
f732aa6c84cffecfc28ee6c1d6e55c5c92e84145 | 9,150 | py | Python | cctpy6_r100_wn_change/run.py | madokast/cctpy | b02c64220ea533a4fc9cad0b882d1be6edadf1c0 | [
"MIT"
] | 1 | 2021-12-27T13:20:43.000Z | 2021-12-27T13:20:43.000Z | cctpy6_r100_wn_change/run.py | madokast/cctpy | b02c64220ea533a4fc9cad0b882d1be6edadf1c0 | [
"MIT"
] | null | null | null | cctpy6_r100_wn_change/run.py | madokast/cctpy | b02c64220ea533a4fc9cad0b882d1be6edadf1c0 | [
"MIT"
] | null | null | null | # from visdom import Visdom
from cctpy import *
from ccpty_cuda import *
import time
import numpy as np
VIZ_PORT = 8098
ga32 = GPU_ACCELERATOR()
momentum_dispersions = [-0.05, -0.025, 0.0, 0.025, 0.05]
particle_number_per_plane_per_dp = 12
particle_number_per_gantry = len(momentum_dispersions) * particle_number_per_plane_per_dp * 2
default_gantry = HUST_SC_GANTRY(
DL1=0.9007765,
GAP1=0.4301517,
GAP2=0.370816,
qs1_length=0.2340128,
qs1_aperture_radius=60 * MM,
qs1_gradient=0.0,
qs1_second_gradient=0.0,
qs2_length=0.200139,
qs2_aperture_radius=60 * MM,
qs2_gradient=0.0,
qs2_second_gradient=0.0,
DL2=2.35011,
GAP3=0.43188,
qs3_length=0.24379,
)
default_beamline = default_gantry.create_beamline()
first_bending_length = default_gantry.first_bending_part_length()
run_distance = default_beamline.get_length() - first_bending_length
second_bending_part_start_point = default_beamline.trajectory.point_at(first_bending_length)
second_bending_part_start_direct = default_beamline.trajectory.direct_at(first_bending_length)
ip = ParticleFactory.create_proton_along(
trajectory=default_beamline.trajectory,
s=first_bending_length,
kinetic_MeV=215
)
ip_ran = ParticleFactory.create_proton_along(
trajectory=default_beamline.trajectory,
s=default_beamline.get_length(),
kinetic_MeV=215
)
pps = []
for dp in momentum_dispersions:
pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(
xMax=3.5 * MM, xpMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp
))
pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(
yMax=3.5 * MM, ypMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp
))
times = 1
params_and_objs = []
def run(params: np.ndarray):
global times
start_time = time.time()
gantry_number = params.shape[0]
print(f"机架数目{gantry_number}")
beamlines = create_beamlines(gantry_number, params)
print(f"制作机架用时{time.time() - start_time}")
ps = ParticleFactory.create_from_phase_space_particles(
ip, ip.get_natural_coordinate_system(), pps
)
print(f"粒子总数{len(ps) * gantry_number}")
ps_ran_list = ga32.track_multi_particle_beamlime_for_magnet_with_single_qs(
bls=beamlines,
ps=ps,
distance=run_distance,
footstep=20 * MM
)
statistic_x = BaseUtils.Statistic()
statistic_y = BaseUtils.Statistic()
statistic_beam_sizes = BaseUtils.Statistic()
objs: List[List[float]] = []
for gid in range(gantry_number): # ~120
ps_ran = ps_ran_list[gid]
pps_ran = PhaseSpaceParticle.create_from_running_particles(
ip_ran, ip_ran.get_natural_coordinate_system(), ps_ran
)
obj: List[float] = []
# 对于所有粒子
for pid in range(0, len(pps_ran), particle_number_per_plane_per_dp):
# 每 particle_number_per_plane_per_dp 个一组
for pp in pps_ran[pid:pid + particle_number_per_plane_per_dp]:
# 统计 x 和 y
statistic_x.add(pp.x / MM)
statistic_y.add(pp.y / MM) # mm
# 分别求束斑
beam_size_x = (statistic_x.max() - statistic_x.min()) / 2
beam_size_y = (statistic_y.max() - statistic_y.min()) / 2
statistic_x.clear()
statistic_y.clear()
# 只有 x 和 y 中大的我需要
beam_size = max(beam_size_x, beam_size_y)
statistic_beam_sizes.add(beam_size) # 用于统计均值
obj.append(beam_size) # 用于记录每次束斑
# 均值
beam_size_avg = statistic_beam_sizes.average()
statistic_beam_sizes.clear()
objs.append([abs(bs - beam_size_avg) for bs in obj] + [beam_size_avg])
objs_np = np.array(objs)
for gid in range(gantry_number):
param = params[gid]
obj = objs_np[gid]
params_and_objs.append(np.concatenate((param, obj)))
np.savetxt(fname='./record/' + str(times) + '.txt', X=params_and_objs)
try:
# draw_viz(params_and_objs)
pass
except Exception as e:
print(e)
pass
times += 1
print(f"用时{time.time() - start_time} s")
return objs_np
def create_beamlines(gantry_number, params):
return BaseUtils.submit_process_task(
task=create_beamline,
param_list=[
[params[i], second_bending_part_start_point, second_bending_part_start_direct] for i in range(gantry_number)
]
)
def create_beamline(param, second_bending_part_start_point, second_bending_part_start_direct) -> Beamline:
qs3_g = param[0]
qs3_sg = param[1]
dicct_tilt_1 = param[2]
dicct_tilt_2 = param[3]
dicct_tilt_3 = param[4]
agcct_tilt_0 = param[5]
agcct_tilt_2 = param[6]
agcct_tilt_3 = param[7]
dicct_current = param[8]
agcct_current = param[9]
agcct3_wn = int(param[10])
agcct4_wn = int(param[11])
agcct5_wn = int(param[12])
return HUST_SC_GANTRY(
qs3_gradient=qs3_g,
qs3_second_gradient=qs3_sg,
dicct345_tilt_angles=[30, dicct_tilt_1, dicct_tilt_2, dicct_tilt_3],
agcct345_tilt_angles=[agcct_tilt_0, 30, agcct_tilt_2, agcct_tilt_3],
dicct345_current=dicct_current,
agcct345_current=agcct_current,
agcct3_winding_number=agcct3_wn,
agcct4_winding_number=agcct4_wn,
agcct5_winding_number=agcct5_wn,
agcct3_bending_angle=-67.5 * (agcct3_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),
agcct4_bending_angle=-67.5 * (agcct4_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),
agcct5_bending_angle=-67.5 * (agcct5_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),
DL1=0.9007765,
GAP1=0.4301517,
GAP2=0.370816,
qs1_length=0.2340128,
qs1_aperture_radius=60 * MM,
qs1_gradient=0.0,
qs1_second_gradient=0.0,
qs2_length=0.200139,
qs2_aperture_radius=60 * MM,
qs2_gradient=0.0,
qs2_second_gradient=0.0,
DL2=2.35011,
GAP3=0.43188,
qs3_length=0.24379,
agcct345_inner_small_r=92.5 * MM + 17.1 * MM,# 92.5
agcct345_outer_small_r=108.5 * MM + 17.1 * MM, # 83+15
dicct345_inner_small_r=124.5 * MM + 17.1 * MM, # 83+30+1
dicct345_outer_small_r=140.5 * MM + 17.1 * MM, # 83+45 +2
).create_second_bending_part(
start_point=second_bending_part_start_point,
start_driect=second_bending_part_start_direct
)
wins = [] # 画图窗口
def draw_viz(params_and_objs):
viz = Visdom(server='Http://127.0.0.1', port=VIZ_PORT)
assert viz.check_connection()
data = np.array(params_and_objs)
x = np.array(list(range(data.shape[0])))
xd = np.concatenate((x.reshape((-1, 1)), data), axis=1)
# xd 每一列的意义
# 0 编号 0-34265
# 12 qs参数
# 345 / 678 CCT倾斜角参数
# 9 10 电流
# 11 12 13 匝数
# 14 15 16 17 18
# 19 20 21 22 23 束斑和均值差
# 24 束斑均值
lables = ['qs-q', 'qs-s',
'dicct-t4', 'dicct-t6', 'dicct-t8',
'agcct-t2', 'agcct-t6', 'agcct-t8',
'dicct-I', 'agcct-I',
'agcct-wn0', 'agcct-wn1', 'agcct-wn2',
'diff_size1', 'diff_size2', 'diff_size3', 'diff_size4', 'diff_size5',
'diff_size6', 'diff_size7', 'diff_size8', 'diff_size9', 'diff_size0',
'beam_avg', 'max_diff_size']
for i in range(len(lables)):
if len(wins) != len(lables):
if i == len(lables) - 1: # last
wins.append(viz.scatter(
X=np.vstack((xd[:, 0], np.max(xd[:, 14:24], axis=1))).T,
opts={
'title': lables[i] + ' vs individual',
'xlabel': 'individual',
'ylabel': lables[i],
'markersize': 2
}
))
else:
wins.append(viz.scatter(
X=np.vstack((xd[:, 0], xd[:, i + 1])).T,
opts={
'title': lables[i] + ' vs individual',
'xlabel': 'individual',
'ylabel': lables[i],
'markersize': 2
}
))
else:
if i == len(lables) - 1: # last
wins[i] = viz.scatter(
X=np.vstack((xd[:, 0], np.max(xd[:, 14:24], axis=1))).T,
win=wins[i],
opts={
'title': lables[i] + ' vs individual',
'xlabel': 'individual',
'ylabel': lables[i],
'markersize': 2
}
)
else:
viz.scatter(
X=np.vstack((xd[:, 0], xd[:, i + 1])).T,
win=wins[i],
opts={
'title': lables[i] + ' vs individual',
'xlabel': 'individual',
'ylabel': lables[i],
'markersize': 2
}
)
| 31.551724 | 120 | 0.590055 |
from cctpy import *
from ccpty_cuda import *
import time
import numpy as np
VIZ_PORT = 8098
ga32 = GPU_ACCELERATOR()
momentum_dispersions = [-0.05, -0.025, 0.0, 0.025, 0.05]
particle_number_per_plane_per_dp = 12
particle_number_per_gantry = len(momentum_dispersions) * particle_number_per_plane_per_dp * 2
default_gantry = HUST_SC_GANTRY(
DL1=0.9007765,
GAP1=0.4301517,
GAP2=0.370816,
qs1_length=0.2340128,
qs1_aperture_radius=60 * MM,
qs1_gradient=0.0,
qs1_second_gradient=0.0,
qs2_length=0.200139,
qs2_aperture_radius=60 * MM,
qs2_gradient=0.0,
qs2_second_gradient=0.0,
DL2=2.35011,
GAP3=0.43188,
qs3_length=0.24379,
)
default_beamline = default_gantry.create_beamline()
first_bending_length = default_gantry.first_bending_part_length()
run_distance = default_beamline.get_length() - first_bending_length
second_bending_part_start_point = default_beamline.trajectory.point_at(first_bending_length)
second_bending_part_start_direct = default_beamline.trajectory.direct_at(first_bending_length)
ip = ParticleFactory.create_proton_along(
trajectory=default_beamline.trajectory,
s=first_bending_length,
kinetic_MeV=215
)
ip_ran = ParticleFactory.create_proton_along(
trajectory=default_beamline.trajectory,
s=default_beamline.get_length(),
kinetic_MeV=215
)
pps = []
for dp in momentum_dispersions:
pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(
xMax=3.5 * MM, xpMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp
))
pps.extend(PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(
yMax=3.5 * MM, ypMax=7.5 * MM, delta=dp, number=particle_number_per_plane_per_dp
))
times = 1
params_and_objs = []
def run(params: np.ndarray):
global times
start_time = time.time()
gantry_number = params.shape[0]
print(f"机架数目{gantry_number}")
beamlines = create_beamlines(gantry_number, params)
print(f"制作机架用时{time.time() - start_time}")
ps = ParticleFactory.create_from_phase_space_particles(
ip, ip.get_natural_coordinate_system(), pps
)
print(f"粒子总数{len(ps) * gantry_number}")
ps_ran_list = ga32.track_multi_particle_beamlime_for_magnet_with_single_qs(
bls=beamlines,
ps=ps,
distance=run_distance,
footstep=20 * MM
)
statistic_x = BaseUtils.Statistic()
statistic_y = BaseUtils.Statistic()
statistic_beam_sizes = BaseUtils.Statistic()
objs: List[List[float]] = []
for gid in range(gantry_number):
ps_ran = ps_ran_list[gid]
pps_ran = PhaseSpaceParticle.create_from_running_particles(
ip_ran, ip_ran.get_natural_coordinate_system(), ps_ran
)
obj: List[float] = []
for pid in range(0, len(pps_ran), particle_number_per_plane_per_dp):
for pp in pps_ran[pid:pid + particle_number_per_plane_per_dp]:
statistic_x.add(pp.x / MM)
statistic_y.add(pp.y / MM)
beam_size_x = (statistic_x.max() - statistic_x.min()) / 2
beam_size_y = (statistic_y.max() - statistic_y.min()) / 2
statistic_x.clear()
statistic_y.clear()
beam_size = max(beam_size_x, beam_size_y)
statistic_beam_sizes.add(beam_size)
obj.append(beam_size)
beam_size_avg = statistic_beam_sizes.average()
statistic_beam_sizes.clear()
objs.append([abs(bs - beam_size_avg) for bs in obj] + [beam_size_avg])
objs_np = np.array(objs)
for gid in range(gantry_number):
param = params[gid]
obj = objs_np[gid]
params_and_objs.append(np.concatenate((param, obj)))
np.savetxt(fname='./record/' + str(times) + '.txt', X=params_and_objs)
try:
pass
except Exception as e:
print(e)
pass
times += 1
print(f"用时{time.time() - start_time} s")
return objs_np
def create_beamlines(gantry_number, params):
return BaseUtils.submit_process_task(
task=create_beamline,
param_list=[
[params[i], second_bending_part_start_point, second_bending_part_start_direct] for i in range(gantry_number)
]
)
def create_beamline(param, second_bending_part_start_point, second_bending_part_start_direct) -> Beamline:
qs3_g = param[0]
qs3_sg = param[1]
dicct_tilt_1 = param[2]
dicct_tilt_2 = param[3]
dicct_tilt_3 = param[4]
agcct_tilt_0 = param[5]
agcct_tilt_2 = param[6]
agcct_tilt_3 = param[7]
dicct_current = param[8]
agcct_current = param[9]
agcct3_wn = int(param[10])
agcct4_wn = int(param[11])
agcct5_wn = int(param[12])
return HUST_SC_GANTRY(
qs3_gradient=qs3_g,
qs3_second_gradient=qs3_sg,
dicct345_tilt_angles=[30, dicct_tilt_1, dicct_tilt_2, dicct_tilt_3],
agcct345_tilt_angles=[agcct_tilt_0, 30, agcct_tilt_2, agcct_tilt_3],
dicct345_current=dicct_current,
agcct345_current=agcct_current,
agcct3_winding_number=agcct3_wn,
agcct4_winding_number=agcct4_wn,
agcct5_winding_number=agcct5_wn,
agcct3_bending_angle=-67.5 * (agcct3_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),
agcct4_bending_angle=-67.5 * (agcct4_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),
agcct5_bending_angle=-67.5 * (agcct5_wn / (agcct3_wn + agcct4_wn + agcct5_wn)),
DL1=0.9007765,
GAP1=0.4301517,
GAP2=0.370816,
qs1_length=0.2340128,
qs1_aperture_radius=60 * MM,
qs1_gradient=0.0,
qs1_second_gradient=0.0,
qs2_length=0.200139,
qs2_aperture_radius=60 * MM,
qs2_gradient=0.0,
qs2_second_gradient=0.0,
DL2=2.35011,
GAP3=0.43188,
qs3_length=0.24379,
agcct345_inner_small_r=92.5 * MM + 17.1 * MM,
agcct345_outer_small_r=108.5 * MM + 17.1 * MM,
dicct345_inner_small_r=124.5 * MM + 17.1 * MM,
dicct345_outer_small_r=140.5 * MM + 17.1 * MM,
).create_second_bending_part(
start_point=second_bending_part_start_point,
start_driect=second_bending_part_start_direct
)
wins = []
def draw_viz(params_and_objs):
viz = Visdom(server='Http://127.0.0.1', port=VIZ_PORT)
assert viz.check_connection()
data = np.array(params_and_objs)
x = np.array(list(range(data.shape[0])))
xd = np.concatenate((x.reshape((-1, 1)), data), axis=1)
lables = ['qs-q', 'qs-s',
'dicct-t4', 'dicct-t6', 'dicct-t8',
'agcct-t2', 'agcct-t6', 'agcct-t8',
'dicct-I', 'agcct-I',
'agcct-wn0', 'agcct-wn1', 'agcct-wn2',
'diff_size1', 'diff_size2', 'diff_size3', 'diff_size4', 'diff_size5',
'diff_size6', 'diff_size7', 'diff_size8', 'diff_size9', 'diff_size0',
'beam_avg', 'max_diff_size']
for i in range(len(lables)):
if len(wins) != len(lables):
if i == len(lables) - 1:
wins.append(viz.scatter(
X=np.vstack((xd[:, 0], np.max(xd[:, 14:24], axis=1))).T,
opts={
'title': lables[i] + ' vs individual',
'xlabel': 'individual',
'ylabel': lables[i],
'markersize': 2
}
))
else:
wins.append(viz.scatter(
X=np.vstack((xd[:, 0], xd[:, i + 1])).T,
opts={
'title': lables[i] + ' vs individual',
'xlabel': 'individual',
'ylabel': lables[i],
'markersize': 2
}
))
else:
if i == len(lables) - 1:
wins[i] = viz.scatter(
X=np.vstack((xd[:, 0], np.max(xd[:, 14:24], axis=1))).T,
win=wins[i],
opts={
'title': lables[i] + ' vs individual',
'xlabel': 'individual',
'ylabel': lables[i],
'markersize': 2
}
)
else:
viz.scatter(
X=np.vstack((xd[:, 0], xd[:, i + 1])).T,
win=wins[i],
opts={
'title': lables[i] + ' vs individual',
'xlabel': 'individual',
'ylabel': lables[i],
'markersize': 2
}
)
| true | true |
f732ab774f7555bf80fc97f6f5a4ed2a63925d48 | 3,440 | py | Python | subsamplers/cldnn.py | dl4amc/dds | 2d53c74ea1f1452beb2c1c52d3048e4260f22948 | [
"MIT"
] | 4 | 2020-11-05T01:36:52.000Z | 2022-03-10T13:04:12.000Z | subsamplers/cldnn.py | dl4amc/dds | 2d53c74ea1f1452beb2c1c52d3048e4260f22948 | [
"MIT"
] | null | null | null | subsamplers/cldnn.py | dl4amc/dds | 2d53c74ea1f1452beb2c1c52d3048e4260f22948 | [
"MIT"
] | 4 | 2019-08-26T08:23:13.000Z | 2021-09-06T03:32:14.000Z | # coding: utf-8
# Import all the things we need ---
#get_ipython().magic(u'matplotlib inline')
import os,random
#os.environ["KERAS_BACKEND"] = "theano"
os.environ["KERAS_BACKEND"] = "tensorflow"
#os.environ["THEANO_FLAGS"] = "device=gpu%d"%(1) #disabled because we do not have a hardware GPU
import numpy as np
from copy import deepcopy
#import theano as th
#import theano.tensor as T
from keras.utils import np_utils
from keras.models import load_model
import keras.models as models
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.regularizers import *
from keras.optimizers import adam
from keras.optimizers import adagrad
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import seaborn as sns
import cPickle, random, sys, keras
from keras.utils import multi_gpu_model
from keras import backend as K
K.tensorflow_backend._get_available_gpus()
import tensorflow as tf
# Dataset setup
Xd = cPickle.load(open("../data/RML2016.10b_dict.dat", 'rb'))
snrs, mods = map(lambda j: sorted(list(set(map(lambda x: x[j], Xd.keys())))), [1, 0])
X = []
Y_snr = []
lbl = []
for snr in snrs:
for mod in mods:
X.append(Xd[(mod, snr)])
for i in range(Xd[(mod, snr)].shape[0]): lbl.append((mod, snr))
Y_snr = Y_snr + [mod]*6000
X = np.vstack(X)
Y_snr = np.vstack(Y_snr)
def to_onehot(yy):
yy1 = np.zeros([len(yy), max(yy) + 1])
yy1[np.arange(len(yy)), yy] = 1
return yy1
# Use only the train split
np.random.seed(2016)
n_examples = X.shape[0]
n_train_valid = n_examples // 2
train_valid_idx = np.random.choice(range(0, n_examples), size=n_train_valid, replace=False)
X_train_valid = X[train_valid_idx]
n_train = 3 * n_train_valid // 4
train_idx = np.random.choice(range(0, n_train_valid), size=n_train, replace=False)
X = X_train_valid[train_idx]
valid_idx = list(set(range(0, n_train_valid))-set(train_idx))
X_valid = X_train_valid[valid_idx]
Y_snr = to_onehot(map(lambda x: mods.index(lbl[x][0]), range(X.shape[0])))
print("shape of X", np.shape(X))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
num_samples = 64
new_X = []
orig_model = load_model('../models/cldnn_ranker.h5')
for eva_iter in range(X.shape[0]//60000):
snr_data = X[eva_iter*60000:(eva_iter+1)*60000]
snr_out = Y_snr[eva_iter*60000:(eva_iter+1)*60000]
snr_acc_list = []
snr_data_copy = deepcopy(snr_data)
for idx in range(X.shape[2]):
snr_data = deepcopy(snr_data_copy)
snr_data = snr_data.transpose((2, 1, 0))
new_snr_data = np.append(snr_data[:idx], np.zeros((1, snr_data.shape[1], snr_data.shape[2])), axis=0)
snr_data = np.append(new_snr_data, snr_data[idx+1:], axis=0)
snr_data = snr_data.transpose((2, 1, 0))
score = orig_model.evaluate(snr_data, snr_out, batch_size=60000, verbose=0)
snr_acc_list.append((idx, score[1]))
snr_acc_list.sort(key=lambda x: x[1])
snr_acc_list = snr_acc_list[:num_samples]
snr_acc_list.sort(key=lambda x: x[0])
snr_idxs = [ele[0] for ele in snr_acc_list]
snr_data = snr_data.transpose((2, 1, 0))
snr_data = snr_data[snr_idxs]
snr_data = snr_data.transpose((2, 1, 0))
new_X = new_X + [snr_data]
print(eva_iter)
X = np.vstack(new_X)
np.save('../ranker_samples/cldnn/cldnn_'+str(num_samples)+'.npy', X)
| 36.210526 | 109 | 0.684012 |
import os,random
os.environ["KERAS_BACKEND"] = "tensorflow"
from keras.utils import np_utils
from keras.models import load_model
import keras.models as models
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.regularizers import *
from keras.optimizers import adam
from keras.optimizers import adagrad
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import cPickle, random, sys, keras
from keras.utils import multi_gpu_model
from keras import backend as K
K.tensorflow_backend._get_available_gpus()
import tensorflow as tf
Xd = cPickle.load(open("../data/RML2016.10b_dict.dat", 'rb'))
snrs, mods = map(lambda j: sorted(list(set(map(lambda x: x[j], Xd.keys())))), [1, 0])
X = []
Y_snr = []
lbl = []
for snr in snrs:
for mod in mods:
X.append(Xd[(mod, snr)])
for i in range(Xd[(mod, snr)].shape[0]): lbl.append((mod, snr))
Y_snr = Y_snr + [mod]*6000
X = np.vstack(X)
Y_snr = np.vstack(Y_snr)
def to_onehot(yy):
yy1 = np.zeros([len(yy), max(yy) + 1])
yy1[np.arange(len(yy)), yy] = 1
return yy1
np.random.seed(2016)
n_examples = X.shape[0]
n_train_valid = n_examples // 2
train_valid_idx = np.random.choice(range(0, n_examples), size=n_train_valid, replace=False)
X_train_valid = X[train_valid_idx]
n_train = 3 * n_train_valid // 4
train_idx = np.random.choice(range(0, n_train_valid), size=n_train, replace=False)
X = X_train_valid[train_idx]
valid_idx = list(set(range(0, n_train_valid))-set(train_idx))
X_valid = X_train_valid[valid_idx]
Y_snr = to_onehot(map(lambda x: mods.index(lbl[x][0]), range(X.shape[0])))
print("shape of X", np.shape(X))
num_samples = 64
new_X = []
orig_model = load_model('../models/cldnn_ranker.h5')
for eva_iter in range(X.shape[0]//60000):
snr_data = X[eva_iter*60000:(eva_iter+1)*60000]
snr_out = Y_snr[eva_iter*60000:(eva_iter+1)*60000]
snr_acc_list = []
snr_data_copy = deepcopy(snr_data)
for idx in range(X.shape[2]):
snr_data = deepcopy(snr_data_copy)
snr_data = snr_data.transpose((2, 1, 0))
new_snr_data = np.append(snr_data[:idx], np.zeros((1, snr_data.shape[1], snr_data.shape[2])), axis=0)
snr_data = np.append(new_snr_data, snr_data[idx+1:], axis=0)
snr_data = snr_data.transpose((2, 1, 0))
score = orig_model.evaluate(snr_data, snr_out, batch_size=60000, verbose=0)
snr_acc_list.append((idx, score[1]))
snr_acc_list.sort(key=lambda x: x[1])
snr_acc_list = snr_acc_list[:num_samples]
snr_acc_list.sort(key=lambda x: x[0])
snr_idxs = [ele[0] for ele in snr_acc_list]
snr_data = snr_data.transpose((2, 1, 0))
snr_data = snr_data[snr_idxs]
snr_data = snr_data.transpose((2, 1, 0))
new_X = new_X + [snr_data]
print(eva_iter)
X = np.vstack(new_X)
np.save('../ranker_samples/cldnn/cldnn_'+str(num_samples)+'.npy', X)
| true | true |
f732ad2fb5d7e7d62c89efc5f6454bc26525c05a | 10,402 | py | Python | homeassistant/components/axis/device.py | amatas/home-assistant-core | bdbb4f939f34682b2eca993bb041cfb21214015c | [
"Apache-2.0"
] | null | null | null | homeassistant/components/axis/device.py | amatas/home-assistant-core | bdbb4f939f34682b2eca993bb041cfb21214015c | [
"Apache-2.0"
] | 30 | 2021-04-19T09:52:11.000Z | 2022-03-31T06:09:38.000Z | homeassistant/components/axis/device.py | amatas/home-assistant-core | bdbb4f939f34682b2eca993bb041cfb21214015c | [
"Apache-2.0"
] | null | null | null | """Axis network device abstraction."""
import asyncio
import async_timeout
import axis
from axis.configuration import Configuration
from axis.errors import Unauthorized
from axis.event_stream import OPERATION_INITIALIZED
from axis.mqtt import mqtt_json_to_event
from axis.streammanager import SIGNAL_PLAYING, STATE_STOPPED
from homeassistant.components import mqtt
from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN
from homeassistant.components.mqtt.models import Message
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_TRIGGER_TIME,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.httpx_client import get_async_client
from homeassistant.setup import async_when_setup
from .const import (
ATTR_MANUFACTURER,
CONF_EVENTS,
CONF_MODEL,
CONF_STREAM_PROFILE,
CONF_VIDEO_SOURCE,
DEFAULT_EVENTS,
DEFAULT_STREAM_PROFILE,
DEFAULT_TRIGGER_TIME,
DEFAULT_VIDEO_SOURCE,
DOMAIN as AXIS_DOMAIN,
LOGGER,
PLATFORMS,
)
from .errors import AuthenticationRequired, CannotConnect
class AxisNetworkDevice:
"""Manages a Axis device."""
def __init__(self, hass, config_entry):
"""Initialize the device."""
self.hass = hass
self.config_entry = config_entry
self.available = True
self.api = None
self.fw_version = None
self.product_type = None
self.listeners = []
@property
def host(self):
"""Return the host address of this device."""
return self.config_entry.data[CONF_HOST]
@property
def port(self):
"""Return the HTTP port of this device."""
return self.config_entry.data[CONF_PORT]
@property
def username(self):
"""Return the username of this device."""
return self.config_entry.data[CONF_USERNAME]
@property
def password(self):
"""Return the password of this device."""
return self.config_entry.data[CONF_PASSWORD]
@property
def model(self):
"""Return the model of this device."""
return self.config_entry.data[CONF_MODEL]
@property
def name(self):
"""Return the name of this device."""
return self.config_entry.data[CONF_NAME]
@property
def unique_id(self):
"""Return the unique ID (serial number) of this device."""
return self.config_entry.unique_id
# Options
@property
def option_events(self):
"""Config entry option defining if platforms based on events should be created."""
return self.config_entry.options.get(CONF_EVENTS, DEFAULT_EVENTS)
@property
def option_stream_profile(self):
"""Config entry option defining what stream profile camera platform should use."""
return self.config_entry.options.get(
CONF_STREAM_PROFILE, DEFAULT_STREAM_PROFILE
)
@property
def option_trigger_time(self):
"""Config entry option defining minimum number of seconds to keep trigger high."""
return self.config_entry.options.get(CONF_TRIGGER_TIME, DEFAULT_TRIGGER_TIME)
@property
def option_video_source(self):
"""Config entry option defining what video source camera platform should use."""
return self.config_entry.options.get(CONF_VIDEO_SOURCE, DEFAULT_VIDEO_SOURCE)
# Signals
@property
def signal_reachable(self):
"""Device specific event to signal a change in connection status."""
return f"axis_reachable_{self.unique_id}"
@property
def signal_new_event(self):
"""Device specific event to signal new device event available."""
return f"axis_new_event_{self.unique_id}"
@property
def signal_new_address(self):
"""Device specific event to signal a change in device address."""
return f"axis_new_address_{self.unique_id}"
# Callbacks
@callback
def async_connection_status_callback(self, status):
"""Handle signals of device connection status.
This is called on every RTSP keep-alive message.
Only signal state change if state change is true.
"""
if self.available != (status == SIGNAL_PLAYING):
self.available = not self.available
async_dispatcher_send(self.hass, self.signal_reachable, True)
@callback
def async_event_callback(self, action, event_id):
"""Call to configure events when initialized on event stream."""
if action == OPERATION_INITIALIZED:
async_dispatcher_send(self.hass, self.signal_new_event, event_id)
@staticmethod
async def async_new_address_callback(hass, entry):
"""Handle signals of device getting new address.
Called when config entry is updated.
This is a static method because a class method (bound method),
can not be used with weak references.
"""
device = hass.data[AXIS_DOMAIN][entry.unique_id]
device.api.config.host = device.host
async_dispatcher_send(hass, device.signal_new_address)
async def async_update_device_registry(self):
"""Update device registry."""
device_registry = await self.hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, self.unique_id)},
identifiers={(AXIS_DOMAIN, self.unique_id)},
manufacturer=ATTR_MANUFACTURER,
model=f"{self.model} {self.product_type}",
name=self.name,
sw_version=self.fw_version,
)
async def use_mqtt(self, hass: HomeAssistant, component: str) -> None:
"""Set up to use MQTT."""
try:
status = await self.api.vapix.mqtt.get_client_status()
except Unauthorized:
# This means the user has too low privileges
status = {}
if status.get("data", {}).get("status", {}).get("state") == "active":
self.listeners.append(
await mqtt.async_subscribe(
hass, f"{self.api.vapix.serial_number}/#", self.mqtt_message
)
)
@callback
def mqtt_message(self, message: Message) -> None:
"""Receive Axis MQTT message."""
self.disconnect_from_stream()
event = mqtt_json_to_event(message.payload)
self.api.event.update([event])
# Setup and teardown methods
async def async_setup(self):
"""Set up the device."""
try:
self.api = await get_device(
self.hass,
host=self.host,
port=self.port,
username=self.username,
password=self.password,
)
except CannotConnect as err:
raise ConfigEntryNotReady from err
except AuthenticationRequired:
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
AXIS_DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
return False
self.fw_version = self.api.vapix.firmware_version
self.product_type = self.api.vapix.product_type
async def start_platforms():
await asyncio.gather(
*[
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform
)
for platform in PLATFORMS
]
)
if self.option_events:
self.api.stream.connection_status_callback.append(
self.async_connection_status_callback
)
self.api.enable_events(event_callback=self.async_event_callback)
self.api.stream.start()
if self.api.vapix.mqtt:
async_when_setup(self.hass, MQTT_DOMAIN, self.use_mqtt)
self.hass.async_create_task(start_platforms())
self.config_entry.add_update_listener(self.async_new_address_callback)
return True
@callback
def disconnect_from_stream(self):
"""Stop stream."""
if self.api.stream.state != STATE_STOPPED:
self.api.stream.connection_status_callback.remove(
self.async_connection_status_callback
)
self.api.stream.stop()
async def shutdown(self, event):
"""Stop the event stream."""
self.disconnect_from_stream()
async def async_reset(self):
"""Reset this device to default state."""
self.disconnect_from_stream()
unload_ok = all(
await asyncio.gather(
*[
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, platform
)
for platform in PLATFORMS
]
)
)
if not unload_ok:
return False
for unsubscribe_listener in self.listeners:
unsubscribe_listener()
return True
async def get_device(hass, host, port, username, password):
"""Create a Axis device."""
session = get_async_client(hass, verify_ssl=False)
device = axis.AxisDevice(
Configuration(session, host, port=port, username=username, password=password)
)
try:
with async_timeout.timeout(30):
await device.vapix.initialize()
return device
except axis.Unauthorized as err:
LOGGER.warning("Connected to device at %s but not registered", host)
raise AuthenticationRequired from err
except (asyncio.TimeoutError, axis.RequestError) as err:
LOGGER.error("Error connecting to the Axis device at %s", host)
raise CannotConnect from err
except axis.AxisException as err:
LOGGER.exception("Unknown Axis communication error occurred")
raise AuthenticationRequired from err
| 32.204334 | 90 | 0.644972 |
import asyncio
import async_timeout
import axis
from axis.configuration import Configuration
from axis.errors import Unauthorized
from axis.event_stream import OPERATION_INITIALIZED
from axis.mqtt import mqtt_json_to_event
from axis.streammanager import SIGNAL_PLAYING, STATE_STOPPED
from homeassistant.components import mqtt
from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN
from homeassistant.components.mqtt.models import Message
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_TRIGGER_TIME,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.httpx_client import get_async_client
from homeassistant.setup import async_when_setup
from .const import (
ATTR_MANUFACTURER,
CONF_EVENTS,
CONF_MODEL,
CONF_STREAM_PROFILE,
CONF_VIDEO_SOURCE,
DEFAULT_EVENTS,
DEFAULT_STREAM_PROFILE,
DEFAULT_TRIGGER_TIME,
DEFAULT_VIDEO_SOURCE,
DOMAIN as AXIS_DOMAIN,
LOGGER,
PLATFORMS,
)
from .errors import AuthenticationRequired, CannotConnect
class AxisNetworkDevice:
def __init__(self, hass, config_entry):
self.hass = hass
self.config_entry = config_entry
self.available = True
self.api = None
self.fw_version = None
self.product_type = None
self.listeners = []
@property
def host(self):
return self.config_entry.data[CONF_HOST]
@property
def port(self):
return self.config_entry.data[CONF_PORT]
@property
def username(self):
return self.config_entry.data[CONF_USERNAME]
@property
def password(self):
return self.config_entry.data[CONF_PASSWORD]
@property
def model(self):
return self.config_entry.data[CONF_MODEL]
@property
def name(self):
return self.config_entry.data[CONF_NAME]
@property
def unique_id(self):
return self.config_entry.unique_id
@property
def option_events(self):
return self.config_entry.options.get(CONF_EVENTS, DEFAULT_EVENTS)
@property
def option_stream_profile(self):
return self.config_entry.options.get(
CONF_STREAM_PROFILE, DEFAULT_STREAM_PROFILE
)
@property
def option_trigger_time(self):
return self.config_entry.options.get(CONF_TRIGGER_TIME, DEFAULT_TRIGGER_TIME)
@property
def option_video_source(self):
return self.config_entry.options.get(CONF_VIDEO_SOURCE, DEFAULT_VIDEO_SOURCE)
@property
def signal_reachable(self):
return f"axis_reachable_{self.unique_id}"
@property
def signal_new_event(self):
return f"axis_new_event_{self.unique_id}"
@property
def signal_new_address(self):
return f"axis_new_address_{self.unique_id}"
@callback
def async_connection_status_callback(self, status):
if self.available != (status == SIGNAL_PLAYING):
self.available = not self.available
async_dispatcher_send(self.hass, self.signal_reachable, True)
@callback
def async_event_callback(self, action, event_id):
if action == OPERATION_INITIALIZED:
async_dispatcher_send(self.hass, self.signal_new_event, event_id)
@staticmethod
async def async_new_address_callback(hass, entry):
device = hass.data[AXIS_DOMAIN][entry.unique_id]
device.api.config.host = device.host
async_dispatcher_send(hass, device.signal_new_address)
async def async_update_device_registry(self):
device_registry = await self.hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, self.unique_id)},
identifiers={(AXIS_DOMAIN, self.unique_id)},
manufacturer=ATTR_MANUFACTURER,
model=f"{self.model} {self.product_type}",
name=self.name,
sw_version=self.fw_version,
)
async def use_mqtt(self, hass: HomeAssistant, component: str) -> None:
try:
status = await self.api.vapix.mqtt.get_client_status()
except Unauthorized:
status = {}
if status.get("data", {}).get("status", {}).get("state") == "active":
self.listeners.append(
await mqtt.async_subscribe(
hass, f"{self.api.vapix.serial_number}/#", self.mqtt_message
)
)
@callback
def mqtt_message(self, message: Message) -> None:
self.disconnect_from_stream()
event = mqtt_json_to_event(message.payload)
self.api.event.update([event])
async def async_setup(self):
try:
self.api = await get_device(
self.hass,
host=self.host,
port=self.port,
username=self.username,
password=self.password,
)
except CannotConnect as err:
raise ConfigEntryNotReady from err
except AuthenticationRequired:
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
AXIS_DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
return False
self.fw_version = self.api.vapix.firmware_version
self.product_type = self.api.vapix.product_type
async def start_platforms():
await asyncio.gather(
*[
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform
)
for platform in PLATFORMS
]
)
if self.option_events:
self.api.stream.connection_status_callback.append(
self.async_connection_status_callback
)
self.api.enable_events(event_callback=self.async_event_callback)
self.api.stream.start()
if self.api.vapix.mqtt:
async_when_setup(self.hass, MQTT_DOMAIN, self.use_mqtt)
self.hass.async_create_task(start_platforms())
self.config_entry.add_update_listener(self.async_new_address_callback)
return True
@callback
def disconnect_from_stream(self):
if self.api.stream.state != STATE_STOPPED:
self.api.stream.connection_status_callback.remove(
self.async_connection_status_callback
)
self.api.stream.stop()
async def shutdown(self, event):
self.disconnect_from_stream()
async def async_reset(self):
self.disconnect_from_stream()
unload_ok = all(
await asyncio.gather(
*[
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, platform
)
for platform in PLATFORMS
]
)
)
if not unload_ok:
return False
for unsubscribe_listener in self.listeners:
unsubscribe_listener()
return True
async def get_device(hass, host, port, username, password):
session = get_async_client(hass, verify_ssl=False)
device = axis.AxisDevice(
Configuration(session, host, port=port, username=username, password=password)
)
try:
with async_timeout.timeout(30):
await device.vapix.initialize()
return device
except axis.Unauthorized as err:
LOGGER.warning("Connected to device at %s but not registered", host)
raise AuthenticationRequired from err
except (asyncio.TimeoutError, axis.RequestError) as err:
LOGGER.error("Error connecting to the Axis device at %s", host)
raise CannotConnect from err
except axis.AxisException as err:
LOGGER.exception("Unknown Axis communication error occurred")
raise AuthenticationRequired from err
| true | true |
f732ae4687670e15c3de1e4e5c21bee291e33b6a | 4,500 | py | Python | webform.py | rubind/travelreform | 073c5b5ab17feb495d8c3a0e55997f2f1ffd15fa | [
"BSD-3-Clause"
] | null | null | null | webform.py | rubind/travelreform | 073c5b5ab17feb495d8c3a0e55997f2f1ffd15fa | [
"BSD-3-Clause"
] | null | null | null | webform.py | rubind/travelreform | 073c5b5ab17feb495d8c3a0e55997f2f1ffd15fa | [
"BSD-3-Clause"
] | null | null | null | from flask import Flask, render_template, flash, request
from wtforms import Form, TextField, TextAreaField
from wtforms import validators, StringField, SubmitField, DateField
# App config.
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
class TravelAdvance(Form):
need_advance = TextField('Do you need a travel advance?', validators=[validators.required()])
how_much_advance = TextField('How much?')
any_part_personal = TextField('Is any part of this trip personal?', validators=[validators.required()])
personal_travel_dates = DateField('If so, specify dates:')
personal_travel_destination = TextField('If so, specify destination:')
class BasicInfoForm(Form):
name = TextField('Name of Traveler:', validators=[validators.required()])
title = TextField('Traveler Title:', validators=[validators.required()])
phone = TextField('Phone:', validators=[validators.required()])
email = TextField('Email:', validators=[validators.required()])
# data_start = DateField('Start Date', format='%m/%d/%Y')
date_start = TextField('Start Date of Travel:',
validators=[validators.required()])
date_stop = TextField('End Date of Travel:',
validators=[validators.required()])
event_name = TextField('Event Name:', validators=[validators.required()])
event_start = TextField('Event Start:', validators=[validators.required()])
event_stop = TextField('Event Stop:', validators=[validators.required()])
event_attendeetype = TextField('Attendee Type:',
validators=[validators.required()])
stsci_employee = TextField('STScI Employee (Y/N):',
validators=[validators.required()])
external_organization = TextField('External Org.:',
validators=[validators.required()])
destination = TextField('Destination:',
validators=[validators.required()])
wbs = TextField('WBS:', validators=[validators.required()])
purpose_of_travel = TextField('Purpose of Travel:',
validators=[validators.required()])
empl_org_num = TextField('Employee Org #:',
validators=[validators.required()])
@app.route("/", methods=['GET', 'POST'])
def hello():
form = BasicInfoForm(request.form)
empl_nums = ["1.1.01.00.76.03, FACO Facilities Operat",
"DO Director's Office",
"1.1.01.20.10.40, ACS"]
print(form.errors)
if request.method == 'POST':
name = request.form['name']
title = request.form['title']
phone = request.form['phone']
email = request.form['email']
date_start = request.form['date_start']
date_stop = request.form['date_stop']
event_name = request.form['event_name']
event_start = request.form['event_start']
event_stop = request.form['event_stop']
event_attendeetype = request.form['event_attendeetype']
stsci_employee = request.form['stsci_employee']
external_organization = request.form['external_organization']
destination = request.form['destination']
wbs = request.form['wbs']
purpose_of_travel = request.form['purpose_of_travel']
empl_org_num = request.form['empl_org_num']
if form.validate():
# Save the comment here.
flash('Hello ' + name)
flash('Your title is ' + title)
flash('Your phone number is ' + phone)
else:
flash('All the form fields are required. ')
return render_template('webform.html', form=form, empl_nums=empl_nums)
"""
@app.route("/", methods=['GET', 'POST'])
def advance():
form = TravelAdvance(request.form)
print(form.errors)
if request.method == 'POST':
need_advance = request.form['need_advance']
how_much_advance = request.form['how_much_advance']
any_part_personal = request.form['any_part_personal']
personal_travel_dates = request.form['personal_travel_dates']
personal_travel_destination = request.form['personal_travel_destination']
if form.validate():
pass
else:
flash('All the form fields are required. ')
return render_template('advance.html', form=form)
"""
if __name__ == "__main__":
app.run()
| 40.178571 | 107 | 0.637556 | from flask import Flask, render_template, flash, request
from wtforms import Form, TextField, TextAreaField
from wtforms import validators, StringField, SubmitField, DateField
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
class TravelAdvance(Form):
need_advance = TextField('Do you need a travel advance?', validators=[validators.required()])
how_much_advance = TextField('How much?')
any_part_personal = TextField('Is any part of this trip personal?', validators=[validators.required()])
personal_travel_dates = DateField('If so, specify dates:')
personal_travel_destination = TextField('If so, specify destination:')
class BasicInfoForm(Form):
name = TextField('Name of Traveler:', validators=[validators.required()])
title = TextField('Traveler Title:', validators=[validators.required()])
phone = TextField('Phone:', validators=[validators.required()])
email = TextField('Email:', validators=[validators.required()])
date_start = TextField('Start Date of Travel:',
validators=[validators.required()])
date_stop = TextField('End Date of Travel:',
validators=[validators.required()])
event_name = TextField('Event Name:', validators=[validators.required()])
event_start = TextField('Event Start:', validators=[validators.required()])
event_stop = TextField('Event Stop:', validators=[validators.required()])
event_attendeetype = TextField('Attendee Type:',
validators=[validators.required()])
stsci_employee = TextField('STScI Employee (Y/N):',
validators=[validators.required()])
external_organization = TextField('External Org.:',
validators=[validators.required()])
destination = TextField('Destination:',
validators=[validators.required()])
wbs = TextField('WBS:', validators=[validators.required()])
purpose_of_travel = TextField('Purpose of Travel:',
validators=[validators.required()])
empl_org_num = TextField('Employee Org #:',
validators=[validators.required()])
@app.route("/", methods=['GET', 'POST'])
def hello():
form = BasicInfoForm(request.form)
empl_nums = ["1.1.01.00.76.03, FACO Facilities Operat",
"DO Director's Office",
"1.1.01.20.10.40, ACS"]
print(form.errors)
if request.method == 'POST':
name = request.form['name']
title = request.form['title']
phone = request.form['phone']
email = request.form['email']
date_start = request.form['date_start']
date_stop = request.form['date_stop']
event_name = request.form['event_name']
event_start = request.form['event_start']
event_stop = request.form['event_stop']
event_attendeetype = request.form['event_attendeetype']
stsci_employee = request.form['stsci_employee']
external_organization = request.form['external_organization']
destination = request.form['destination']
wbs = request.form['wbs']
purpose_of_travel = request.form['purpose_of_travel']
empl_org_num = request.form['empl_org_num']
if form.validate():
# Save the comment here.
flash('Hello ' + name)
flash('Your title is ' + title)
flash('Your phone number is ' + phone)
else:
flash('All the form fields are required. ')
return render_template('webform.html', form=form, empl_nums=empl_nums)
if __name__ == "__main__":
app.run()
| true | true |
f732afab2b623525ae2d42767da48fdf8cb24eb0 | 8,986 | py | Python | api/open_general_licences/views.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | api/open_general_licences/views.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | api/open_general_licences/views.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | from django.contrib.contenttypes.models import ContentType
from django.db.models import F
from django.http import JsonResponse
from rest_framework import status
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateAPIView
from rest_framework.views import APIView
from api.audit_trail import service as audit_trail_service
from api.audit_trail.enums import AuditType
from api.audit_trail.serializers import AuditSerializer
from api.core import constants
from api.core.authentication import SharedAuthentication, GovAuthentication
from api.core.helpers import str_to_bool
from api.core.permissions import assert_user_has_permission
from lite_content.lite_api.strings import OpenGeneralLicences
from api.open_general_licences.models import OpenGeneralLicence, OpenGeneralLicenceCase
from api.open_general_licences.serializers import OpenGeneralLicenceSerializer
from api.organisations.libraries.get_organisation import get_request_user_organisation
from api.organisations.models import Site
from api.staticdata.statuses.enums import CaseStatusEnum
from api.users.enums import UserType
from api.users.models import GovUser, GovNotification
class OpenGeneralLicenceList(ListCreateAPIView):
authentication_classes = (SharedAuthentication,)
serializer_class = OpenGeneralLicenceSerializer
queryset = (
OpenGeneralLicence.objects.all()
.select_related("case_type")
.prefetch_related("countries", "control_list_entries")
)
def get_serializer_context(self):
user = self.request.user
if hasattr(user, "exporteruser"):
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
cases = (
OpenGeneralLicenceCase.objects.filter(site__in=sites)
.select_related("status", "site", "site__address")
.annotate(records_located_at_name=F("site__site_records_located_at__name"))
)
if str_to_bool(self.request.GET.get("active_only")):
cases = cases.filter(
status__status__in=[
CaseStatusEnum.FINALISED,
CaseStatusEnum.REGISTERED,
CaseStatusEnum.UNDER_ECJU_REVIEW,
]
)
return {"user": user, "organisation": organisation, "cases": cases}
def filter_queryset(self, queryset):
filter_data = self.request.GET
if self.request.user.type == UserType.INTERNAL:
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
elif self.request.user.type == UserType.EXPORTER:
if filter_data.get("site"):
queryset = queryset.filter(cases__site_id=filter_data.get("site"))
if str_to_bool(filter_data.get("active_only")):
queryset = queryset.filter(
cases__status__status__in=[
CaseStatusEnum.FINALISED,
CaseStatusEnum.REGISTERED,
CaseStatusEnum.UNDER_ECJU_REVIEW,
]
)
if str_to_bool(filter_data.get("registered")):
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
queryset = queryset.filter(cases__site__in=sites).distinct()
if filter_data.get("name"):
queryset = queryset.filter(name__icontains=filter_data.get("name"))
if filter_data.get("case_type"):
queryset = queryset.filter(case_type_id=filter_data.get("case_type"))
if filter_data.get("control_list_entry"):
queryset = queryset.filter(control_list_entries__rating=filter_data.get("control_list_entry"))
if filter_data.get("country"):
queryset = queryset.filter(countries__id__contains=filter_data.get("country"))
if filter_data.get("status"):
queryset = queryset.filter(status=filter_data.get("status"))
return queryset
def perform_create(self, serializer):
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
if not self.request.data.get("validate_only", False):
instance = serializer.save()
audit_trail_service.create(
actor=self.request.user, verb=AuditType.OGL_CREATED, action_object=instance,
)
class OpenGeneralLicenceDetail(RetrieveUpdateAPIView):
authentication_classes = (SharedAuthentication,)
serializer_class = OpenGeneralLicenceSerializer
queryset = (
OpenGeneralLicence.objects.all()
.select_related("case_type")
.prefetch_related("countries", "control_list_entries")
)
def get_serializer_context(self):
user = self.request.user
if user.type == UserType.EXPORTER:
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
cases = (
OpenGeneralLicenceCase.objects.filter(site__in=sites)
.select_related("status", "site", "site__address")
.annotate(records_located_at_name=F("site__site_records_located_at__name"))
)
return {"user": user, "organisation": organisation, "cases": cases}
def perform_update(self, serializer):
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
# Don't update the data during validate_only requests
if not self.request.data.get("validate_only", False):
fields = [
("name", OpenGeneralLicences.ActivityFieldDisplay.NAME),
("description", OpenGeneralLicences.ActivityFieldDisplay.DESCRIPTION),
("url", OpenGeneralLicences.ActivityFieldDisplay.URL),
("case_type", OpenGeneralLicences.ActivityFieldDisplay.CASE_TYPE),
("registration_required", OpenGeneralLicences.ActivityFieldDisplay.REGISTRATION_REQUIRED),
("status", OpenGeneralLicences.ActivityFieldDisplay.STATUS),
]
m2m_fields = [
("countries", OpenGeneralLicences.ActivityFieldDisplay.COUNTRIES),
("control_list_entries", OpenGeneralLicences.ActivityFieldDisplay.CONTROL_LIST_ENTRIES),
]
# data setup for audit checks
original_instance = self.get_object()
original_m2m_sets = {}
for field, display in m2m_fields:
original_m2m_sets[field] = set(getattr(original_instance, field).all())
# save model
updated_instance = serializer.save()
for field, display in fields:
if getattr(original_instance, field) != getattr(updated_instance, field):
audit_trail_service.create(
actor=self.request.user,
verb=AuditType.OGL_FIELD_EDITED,
action_object=updated_instance,
payload={
"key": display,
"old": getattr(original_instance, field),
"new": getattr(updated_instance, field),
},
)
for field, display in m2m_fields:
if original_m2m_sets[field] != set(getattr(updated_instance, field).all()):
audit_trail_service.create(
actor=self.request.user,
verb=AuditType.OGL_MULTI_FIELD_EDITED,
action_object=updated_instance,
payload={"key": display},
)
class OpenGeneralLicenceActivityView(APIView):
authentication_classes = (GovAuthentication,)
def get(self, request, pk):
assert_user_has_permission(request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
filter_data = audit_trail_service.get_filters(request.GET)
content_type = ContentType.objects.get_for_model(OpenGeneralLicence)
audit_trail_qs = audit_trail_service.filter_object_activity(
object_id=pk, object_content_type=content_type, **filter_data
)
data = AuditSerializer(audit_trail_qs, many=True).data
if isinstance(request.user, GovUser):
# Delete notifications related to audits
GovNotification.objects.filter(user_id=request.user.pk, object_id__in=[obj["id"] for obj in data]).delete()
filters = audit_trail_service.get_objects_activity_filters(pk, content_type)
return JsonResponse(data={"activity": data, "filters": filters}, status=status.HTTP_200_OK)
| 45.383838 | 119 | 0.662586 | from django.contrib.contenttypes.models import ContentType
from django.db.models import F
from django.http import JsonResponse
from rest_framework import status
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateAPIView
from rest_framework.views import APIView
from api.audit_trail import service as audit_trail_service
from api.audit_trail.enums import AuditType
from api.audit_trail.serializers import AuditSerializer
from api.core import constants
from api.core.authentication import SharedAuthentication, GovAuthentication
from api.core.helpers import str_to_bool
from api.core.permissions import assert_user_has_permission
from lite_content.lite_api.strings import OpenGeneralLicences
from api.open_general_licences.models import OpenGeneralLicence, OpenGeneralLicenceCase
from api.open_general_licences.serializers import OpenGeneralLicenceSerializer
from api.organisations.libraries.get_organisation import get_request_user_organisation
from api.organisations.models import Site
from api.staticdata.statuses.enums import CaseStatusEnum
from api.users.enums import UserType
from api.users.models import GovUser, GovNotification
class OpenGeneralLicenceList(ListCreateAPIView):
authentication_classes = (SharedAuthentication,)
serializer_class = OpenGeneralLicenceSerializer
queryset = (
OpenGeneralLicence.objects.all()
.select_related("case_type")
.prefetch_related("countries", "control_list_entries")
)
def get_serializer_context(self):
user = self.request.user
if hasattr(user, "exporteruser"):
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
cases = (
OpenGeneralLicenceCase.objects.filter(site__in=sites)
.select_related("status", "site", "site__address")
.annotate(records_located_at_name=F("site__site_records_located_at__name"))
)
if str_to_bool(self.request.GET.get("active_only")):
cases = cases.filter(
status__status__in=[
CaseStatusEnum.FINALISED,
CaseStatusEnum.REGISTERED,
CaseStatusEnum.UNDER_ECJU_REVIEW,
]
)
return {"user": user, "organisation": organisation, "cases": cases}
def filter_queryset(self, queryset):
filter_data = self.request.GET
if self.request.user.type == UserType.INTERNAL:
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
elif self.request.user.type == UserType.EXPORTER:
if filter_data.get("site"):
queryset = queryset.filter(cases__site_id=filter_data.get("site"))
if str_to_bool(filter_data.get("active_only")):
queryset = queryset.filter(
cases__status__status__in=[
CaseStatusEnum.FINALISED,
CaseStatusEnum.REGISTERED,
CaseStatusEnum.UNDER_ECJU_REVIEW,
]
)
if str_to_bool(filter_data.get("registered")):
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
queryset = queryset.filter(cases__site__in=sites).distinct()
if filter_data.get("name"):
queryset = queryset.filter(name__icontains=filter_data.get("name"))
if filter_data.get("case_type"):
queryset = queryset.filter(case_type_id=filter_data.get("case_type"))
if filter_data.get("control_list_entry"):
queryset = queryset.filter(control_list_entries__rating=filter_data.get("control_list_entry"))
if filter_data.get("country"):
queryset = queryset.filter(countries__id__contains=filter_data.get("country"))
if filter_data.get("status"):
queryset = queryset.filter(status=filter_data.get("status"))
return queryset
def perform_create(self, serializer):
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
if not self.request.data.get("validate_only", False):
instance = serializer.save()
audit_trail_service.create(
actor=self.request.user, verb=AuditType.OGL_CREATED, action_object=instance,
)
class OpenGeneralLicenceDetail(RetrieveUpdateAPIView):
authentication_classes = (SharedAuthentication,)
serializer_class = OpenGeneralLicenceSerializer
queryset = (
OpenGeneralLicence.objects.all()
.select_related("case_type")
.prefetch_related("countries", "control_list_entries")
)
def get_serializer_context(self):
user = self.request.user
if user.type == UserType.EXPORTER:
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
cases = (
OpenGeneralLicenceCase.objects.filter(site__in=sites)
.select_related("status", "site", "site__address")
.annotate(records_located_at_name=F("site__site_records_located_at__name"))
)
return {"user": user, "organisation": organisation, "cases": cases}
def perform_update(self, serializer):
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
if not self.request.data.get("validate_only", False):
fields = [
("name", OpenGeneralLicences.ActivityFieldDisplay.NAME),
("description", OpenGeneralLicences.ActivityFieldDisplay.DESCRIPTION),
("url", OpenGeneralLicences.ActivityFieldDisplay.URL),
("case_type", OpenGeneralLicences.ActivityFieldDisplay.CASE_TYPE),
("registration_required", OpenGeneralLicences.ActivityFieldDisplay.REGISTRATION_REQUIRED),
("status", OpenGeneralLicences.ActivityFieldDisplay.STATUS),
]
m2m_fields = [
("countries", OpenGeneralLicences.ActivityFieldDisplay.COUNTRIES),
("control_list_entries", OpenGeneralLicences.ActivityFieldDisplay.CONTROL_LIST_ENTRIES),
]
# data setup for audit checks
original_instance = self.get_object()
original_m2m_sets = {}
for field, display in m2m_fields:
original_m2m_sets[field] = set(getattr(original_instance, field).all())
# save model
updated_instance = serializer.save()
for field, display in fields:
if getattr(original_instance, field) != getattr(updated_instance, field):
audit_trail_service.create(
actor=self.request.user,
verb=AuditType.OGL_FIELD_EDITED,
action_object=updated_instance,
payload={
"key": display,
"old": getattr(original_instance, field),
"new": getattr(updated_instance, field),
},
)
for field, display in m2m_fields:
if original_m2m_sets[field] != set(getattr(updated_instance, field).all()):
audit_trail_service.create(
actor=self.request.user,
verb=AuditType.OGL_MULTI_FIELD_EDITED,
action_object=updated_instance,
payload={"key": display},
)
class OpenGeneralLicenceActivityView(APIView):
authentication_classes = (GovAuthentication,)
def get(self, request, pk):
assert_user_has_permission(request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
filter_data = audit_trail_service.get_filters(request.GET)
content_type = ContentType.objects.get_for_model(OpenGeneralLicence)
audit_trail_qs = audit_trail_service.filter_object_activity(
object_id=pk, object_content_type=content_type, **filter_data
)
data = AuditSerializer(audit_trail_qs, many=True).data
if isinstance(request.user, GovUser):
# Delete notifications related to audits
GovNotification.objects.filter(user_id=request.user.pk, object_id__in=[obj["id"] for obj in data]).delete()
filters = audit_trail_service.get_objects_activity_filters(pk, content_type)
return JsonResponse(data={"activity": data, "filters": filters}, status=status.HTTP_200_OK)
| true | true |
f732afd0af1724a0a5302530af48ae4acd5a254a | 1,482 | py | Python | Lesson08/elevatorEx.py | PacktPublishing/Python-Fundamentals | f24569826b1b7f97e3d54630a34ae61110ca12da | [
"MIT"
] | 1 | 2021-04-23T14:01:56.000Z | 2021-04-23T14:01:56.000Z | Lesson08/elevatorEx.py | PacktPublishing/Python-Fundamentals | f24569826b1b7f97e3d54630a34ae61110ca12da | [
"MIT"
] | null | null | null | Lesson08/elevatorEx.py | PacktPublishing/Python-Fundamentals | f24569826b1b7f97e3d54630a34ae61110ca12da | [
"MIT"
] | 4 | 2021-06-29T05:57:44.000Z | 2021-09-02T10:14:55.000Z | class Elevator:
occupancy_limit = 8
def __init__(self, occupants=0):
self.floor = 0
if occupants <= Elevator.occupancy_limit:
self.occupants = occupants
else:
self.occupants = Elevator.occupancy_limit
print('too many occupants', occupants - Elevator.occupancy_limit, 'left outside')
def add_occupants(self,num):
self.occupants += num
if self.occupants > Elevator.occupancy_limit:
print('too many occupants', self.occupants - Elevator.occupancy_limit, 'left outside')
self.occupants = Elevator.occupancy_limit
def remove_occupants(self,num):
if self.occupants - num > 0:
self.occupants -= num
else:
print('elevator empty')
self.occupants = 0
def goto_floor(self,floor):
if floor < -5 or floor > 50:
print('floor',floor,'does not exist')
else:
self.floor = floor
elevator1 = Elevator(6)
elevator1.add_occupants(7)
elevator2 = Elevator(11)
elevator1.goto_floor(20)
elevator1.remove_occupants(99)
elevator2.goto_floor(99)
print(elevator1.__dict__)
print(elevator2.__dict__)
"""
ATTRIBUTES
Occupants attribute which is 0 by default
floor attribute which is 0 by default
METHODS:
Add_occupants
Go to floor
PROPERTIES:
Occupants can only be added if the occupants limit (8) has not been exceeded
Only floors from -5 to 50 exist
"""
| 27.962264 | 98 | 0.647099 | class Elevator:
occupancy_limit = 8
def __init__(self, occupants=0):
self.floor = 0
if occupants <= Elevator.occupancy_limit:
self.occupants = occupants
else:
self.occupants = Elevator.occupancy_limit
print('too many occupants', occupants - Elevator.occupancy_limit, 'left outside')
def add_occupants(self,num):
self.occupants += num
if self.occupants > Elevator.occupancy_limit:
print('too many occupants', self.occupants - Elevator.occupancy_limit, 'left outside')
self.occupants = Elevator.occupancy_limit
def remove_occupants(self,num):
if self.occupants - num > 0:
self.occupants -= num
else:
print('elevator empty')
self.occupants = 0
def goto_floor(self,floor):
if floor < -5 or floor > 50:
print('floor',floor,'does not exist')
else:
self.floor = floor
elevator1 = Elevator(6)
elevator1.add_occupants(7)
elevator2 = Elevator(11)
elevator1.goto_floor(20)
elevator1.remove_occupants(99)
elevator2.goto_floor(99)
print(elevator1.__dict__)
print(elevator2.__dict__)
| true | true |
f732b2e9fd2f67c62643b7cae833d2d028d7a955 | 1,179 | py | Python | piwars/core/config.py | westpark/robotics | 62546d0b2235b9ab73ec7968e2167f516a664c58 | [
"MIT"
] | null | null | null | piwars/core/config.py | westpark/robotics | 62546d0b2235b9ab73ec7968e2167f516a664c58 | [
"MIT"
] | null | null | null | piwars/core/config.py | westpark/robotics | 62546d0b2235b9ab73ec7968e2167f516a664c58 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os, sys
import configparser
import warnings
#
# Look for a global .ini in the current directory. If none is
# there, raise an exception and exit. Look for a local .ini
# in the same directory. If that isn't present, issue a warning
# but carry on and use the global values
#
global_filepath = os.path.abspath("piwars.ini")
if not os.path.isfile(global_filepath):
warnings("No global ini found at %s" % global_filepath)
local_filepath = os.path.join(os.path.dirname(global_filepath), "piwars.local.ini")
if not os.path.isfile(local_filepath):
warnings.warn("No local ini found at %s" % local_filepath)
ini = configparser.ConfigParser()
ini.read([global_filepath, local_filepath])
#
# Since we already have code which expects to find a set of simple
# module constants, keep that approach alive. This does however preclude
# the easy possibility of a reload-while-running
#
LISTEN_ON_IP = ini['network']['listen_on_ip']
LISTEN_ON_PORT = ini['network']['listen_on_port']
PUBSUB_LISTEN_ON_IP = ini['pubsub']['listen_on_ip']
PUBSUB_LISTEN_ON_PORT = ini['pubsub']['listen_on_port']
CODEC = ini['i18n']['codec']
| 36.84375 | 84 | 0.729432 |
import os, sys
import configparser
import warnings
# but carry on and use the global values
#
global_filepath = os.path.abspath("piwars.ini")
if not os.path.isfile(global_filepath):
warnings("No global ini found at %s" % global_filepath)
local_filepath = os.path.join(os.path.dirname(global_filepath), "piwars.local.ini")
if not os.path.isfile(local_filepath):
warnings.warn("No local ini found at %s" % local_filepath)
ini = configparser.ConfigParser()
ini.read([global_filepath, local_filepath])
#
# Since we already have code which expects to find a set of simple
# module constants, keep that approach alive. This does however preclude
# the easy possibility of a reload-while-running
#
LISTEN_ON_IP = ini['network']['listen_on_ip']
LISTEN_ON_PORT = ini['network']['listen_on_port']
PUBSUB_LISTEN_ON_IP = ini['pubsub']['listen_on_ip']
PUBSUB_LISTEN_ON_PORT = ini['pubsub']['listen_on_port']
CODEC = ini['i18n']['codec']
| true | true |
f732b2ec2b02ec6fa26f1db75dfbaa8d9869b735 | 290 | py | Python | nutshell_api/conftest.py | garguelles/nutshell-api | df6e68ca1919b9c0fc63db89f528800aadbab596 | [
"MIT"
] | null | null | null | nutshell_api/conftest.py | garguelles/nutshell-api | df6e68ca1919b9c0fc63db89f528800aadbab596 | [
"MIT"
] | null | null | null | nutshell_api/conftest.py | garguelles/nutshell-api | df6e68ca1919b9c0fc63db89f528800aadbab596 | [
"MIT"
] | null | null | null | import pytest
from nutshell_api.users.models import User
from nutshell_api.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 19.333333 | 58 | 0.782759 | import pytest
from nutshell_api.users.models import User
from nutshell_api.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| true | true |
f732b4f7255e2997817c6d6f96024eef94c92523 | 398 | py | Python | examples/prototype_build_all.py | zorbathut/vespid | a2a66a21118a572570557aa50386f3e80de94f08 | [
"MIT"
] | null | null | null | examples/prototype_build_all.py | zorbathut/vespid | a2a66a21118a572570557aa50386f3e80de94f08 | [
"MIT"
] | null | null | null | examples/prototype_build_all.py | zorbathut/vespid | a2a66a21118a572570557aa50386f3e80de94f08 | [
"MIT"
] | null | null | null | import vespidlib
from util import log
log("Creating task . . .")
task = vespidlib.task_create(
executable_pyscript = open("prototype_build_all_start.py", "r").read(),
name = "fullbuild_project_main_dev",
requirements = {"memory": 0, "cores": 0},
repositories = {"env": {"request": "project_main_dev", "local": True}},
)
log("Waiting for task . . .")
task.wait()
log("Task complete!")
| 20.947368 | 73 | 0.673367 | import vespidlib
from util import log
log("Creating task . . .")
task = vespidlib.task_create(
executable_pyscript = open("prototype_build_all_start.py", "r").read(),
name = "fullbuild_project_main_dev",
requirements = {"memory": 0, "cores": 0},
repositories = {"env": {"request": "project_main_dev", "local": True}},
)
log("Waiting for task . . .")
task.wait()
log("Task complete!")
| true | true |
f732b5ecf89394ec2482495f04a428476df90f10 | 6,455 | py | Python | src/nox_alarm/noxGateway.py | cheperboy/home_alarm | 27a8f68f32be054ee61f5a53fdb9026c026b592c | [
"MIT"
] | null | null | null | src/nox_alarm/noxGateway.py | cheperboy/home_alarm | 27a8f68f32be054ee61f5a53fdb9026c026b592c | [
"MIT"
] | null | null | null | src/nox_alarm/noxGateway.py | cheperboy/home_alarm | 27a8f68f32be054ee61f5a53fdb9026c026b592c | [
"MIT"
] | null | null | null | import sys
import prctl # Used to set thread name (visible in htop)
import zmq
from time import sleep
from threading import Thread, Event, current_thread
from datetime import datetime
from flask import current_app as app
from . import zmq_socket_config
context = zmq.Context()
""" Configure logger """
import logging
logger = logging.getLogger('alarm.thread')
class ThreadNoxAlarmGateway(Thread):
""" Thread used as a "gateway" between the Flask app and the Alarm process.
Forwards Alarm status from Alarm Process to Flask app
Forwards commands (start/stop alarm) from Flask app to Alarm Process
Use zmq PUB/SUB pattern to communicate with Alarm process.
Use socketio instance (parameter given at init) to communicate with Flask app.
Thread started when a first client connects to the web socket.
Any new client will use the existing thread.
Why using a thread:
- Need a while loop to receive status continuously from Alarm Process
- Only one thread needed whatever how many web clients.
- Commands could be received directly from web server socketio handlers but
it is cleaner to centralize all inter-process comminication here, commands and status
(moreover, this thread is initialized with an instance of flask socketio allowing
to communicate easily with the web app).
"""
def __init__(self, socketio):
self.socketio = socketio # Instance of socketio so that the thread interacts with web flask websoket
self.cycle_delay = 1 # cycle delay for execution of the thread while loop
self.command_alarm = None # Flag to receive commands from websocket to thread (to alarm machine)
# Flag to receive "status update" request from web app to thread (to alarm machine)
# self.event_request_status = Event()
self.event_request_status = None
# Create a zmq PUB server to send message to the Alarm Process zmq client
# using socket PUB_COMMAND to send commands start/stop to the Alarm Process
self.PUB_COMMAND = context.socket(zmq.PUB)
self.PUB_COMMAND.bind("tcp://*:%s" % zmq_socket_config.port_socket_noxalarm_command)
# Connect a zmq SUB client connected to the Alarm Process zmq server
# using the Socket SUB_STATE to receive status/event from Alarm Process
self.SUB_STATE = context.socket(zmq.SUB)
self.SUB_STATE.connect ("tcp://localhost:%s" % zmq_socket_config.port_socket_noxalarm_state)
self.SUB_STATE.setsockopt_string(zmq.SUBSCRIBE, zmq_socket_config.TOPIC_EVENT)
self.SUB_STATE.setsockopt_string(zmq.SUBSCRIBE, zmq_socket_config.TOPIC_STATE)
# Call the super class __init__ method (the suêr class is Thread)
super(ThreadNoxAlarmGateway, self).__init__()
def run(self):
""" Start the Gateway thread and run infinite loop
Forwards Alarm status from Alarm Process to Flask app
Forwards commands (start/stop alarm) from Flask app to Alarm Process
"""
prctl.set_name("NoxGateway") # set thread name visible in htop
logger.info('Init thread (delay %ss) %s' %(self.cycle_delay, str(current_thread().ident)))
while (True):
self.forward_command_from_web_to_alarm()
self.forward_status_from_alarm_to_web()
self.forward_request_status_from_web_to_alarm()
sleep(self.cycle_delay)
def forward_status_from_alarm_to_web(self):
""" Forward to web app the status received from Alarm Process.
Receive status using zmq SUB socket.
Forward to web client using socketio instance.
"""
try:
payload = self.SUB_STATE.recv_string(flags=zmq.NOBLOCK)
topic, message = payload.split()
if (topic == zmq_socket_config.TOPIC_STATE):
logger.debug('Noxalarm gateway forwading state %s' %(message))
self.socketio.emit('noxalarmstate', {'state': message}, namespace='/noxalarm')
elif (topic == zmq_socket_config.TOPIC_EVENT):
logger.debug('Noxalarm gateway forwading state %s' %(message))
date = datetime.now().strftime("%d/%m %H:%M")
self.socketio.emit('noxalarmevent', {'alarm_event': message, 'scope': 'nox', 'date': date, 'user': '-'},
namespace='/noxalarm')
# No command received, do nothing
except zmq.error.Again:
pass
def forward_command_from_web_to_alarm(self):
""" Forward to Alarm Process the commands received from web app.
If a command is triggered from web app, a flag is set.
If flag is set, this function forward the command to Alarm Process, then reset flag to None.
Command forwarded using zmq PUB socket.
The Alarm process will call its private methods to start/stop alarm (set Unipi IO)
"""
if self.command_alarm is not None:
if self.command_alarm is True:
self.command_alarm = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.COMMAND_START)
logger.debug('Noxalarm gateway forwad command Start')
if self.command_alarm is False:
self.command_alarm = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.COMMAND_STOP)
logger.debug('Noxalarm gateway forwad command Stop')
def forward_request_status_from_web_to_alarm(self):
""" Forward to Alarm Process a request to update the display.
If a new web client connects, a flag is set.
If flag is set, this function forward the "status update" request to Alarm Process,
then reset flag to None.
The request is forwarded using zmq PUB socket.
The Alarm process will call its private methods to send the status
"""
# if self.event_request_status.is_set():
# self.event_request_status.clear()
if self.event_request_status is True:
self.event_request_status = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.STATUS_UPDATE)
logger.debug('Noxalarm gateway forward request status update')
| 51.64 | 121 | 0.670798 | import sys
import prctl
import zmq
from time import sleep
from threading import Thread, Event, current_thread
from datetime import datetime
from flask import current_app as app
from . import zmq_socket_config
context = zmq.Context()
import logging
logger = logging.getLogger('alarm.thread')
class ThreadNoxAlarmGateway(Thread):
def __init__(self, socketio):
self.socketio = socketio
self.cycle_delay = 1
self.command_alarm = None
self.event_request_status = None
self.PUB_COMMAND = context.socket(zmq.PUB)
self.PUB_COMMAND.bind("tcp://*:%s" % zmq_socket_config.port_socket_noxalarm_command)
self.SUB_STATE = context.socket(zmq.SUB)
self.SUB_STATE.connect ("tcp://localhost:%s" % zmq_socket_config.port_socket_noxalarm_state)
self.SUB_STATE.setsockopt_string(zmq.SUBSCRIBE, zmq_socket_config.TOPIC_EVENT)
self.SUB_STATE.setsockopt_string(zmq.SUBSCRIBE, zmq_socket_config.TOPIC_STATE)
super(ThreadNoxAlarmGateway, self).__init__()
def run(self):
prctl.set_name("NoxGateway")
logger.info('Init thread (delay %ss) %s' %(self.cycle_delay, str(current_thread().ident)))
while (True):
self.forward_command_from_web_to_alarm()
self.forward_status_from_alarm_to_web()
self.forward_request_status_from_web_to_alarm()
sleep(self.cycle_delay)
def forward_status_from_alarm_to_web(self):
try:
payload = self.SUB_STATE.recv_string(flags=zmq.NOBLOCK)
topic, message = payload.split()
if (topic == zmq_socket_config.TOPIC_STATE):
logger.debug('Noxalarm gateway forwading state %s' %(message))
self.socketio.emit('noxalarmstate', {'state': message}, namespace='/noxalarm')
elif (topic == zmq_socket_config.TOPIC_EVENT):
logger.debug('Noxalarm gateway forwading state %s' %(message))
date = datetime.now().strftime("%d/%m %H:%M")
self.socketio.emit('noxalarmevent', {'alarm_event': message, 'scope': 'nox', 'date': date, 'user': '-'},
namespace='/noxalarm')
except zmq.error.Again:
pass
def forward_command_from_web_to_alarm(self):
if self.command_alarm is not None:
if self.command_alarm is True:
self.command_alarm = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.COMMAND_START)
logger.debug('Noxalarm gateway forwad command Start')
if self.command_alarm is False:
self.command_alarm = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.COMMAND_STOP)
logger.debug('Noxalarm gateway forwad command Stop')
def forward_request_status_from_web_to_alarm(self):
if self.event_request_status is True:
self.event_request_status = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.STATUS_UPDATE)
logger.debug('Noxalarm gateway forward request status update')
| true | true |
f732b677283ef4032ae1d98e9bc65f18beaa27f8 | 564 | py | Python | ap_server/common/schemas.py | phelipealves/CreateAPServer | c6775631ff075b0c29ce8a81ea55ffe76327d924 | [
"MIT"
] | 1 | 2019-06-04T15:27:40.000Z | 2019-06-04T15:27:40.000Z | ap_server/common/schemas.py | phelipealves/CreateAPServer | c6775631ff075b0c29ce8a81ea55ffe76327d924 | [
"MIT"
] | null | null | null | ap_server/common/schemas.py | phelipealves/CreateAPServer | c6775631ff075b0c29ce8a81ea55ffe76327d924 | [
"MIT"
] | null | null | null | from marshmallow import Schema, fields, post_load
from ap_server.common.models import CreateApModel
class CreateApSchema(Schema):
wiface = fields.Str(required=True)
bridge = fields.Str(required=True)
ssid = fields.Str(required=True)
virt_prefix = fields.Str(required=True)
password = fields.Str()
freq_band = fields.Str(default="2.4")
channel = fields.Str(default=1)
wpa_version = fields.Str(default="1+2")
timeout = fields.Int(default=20)
@post_load
def make_slice(self, data):
return CreateApModel(**data)
| 28.2 | 49 | 0.703901 | from marshmallow import Schema, fields, post_load
from ap_server.common.models import CreateApModel
class CreateApSchema(Schema):
wiface = fields.Str(required=True)
bridge = fields.Str(required=True)
ssid = fields.Str(required=True)
virt_prefix = fields.Str(required=True)
password = fields.Str()
freq_band = fields.Str(default="2.4")
channel = fields.Str(default=1)
wpa_version = fields.Str(default="1+2")
timeout = fields.Int(default=20)
@post_load
def make_slice(self, data):
return CreateApModel(**data)
| true | true |
f732b7368ff49758dfdfd0112cbb2153c70ab6af | 4,957 | py | Python | harness/tests/storage/test_gcs.py | johnkim-det/determined | 7af3cfe48d26a23702a260f73ca5090b13625cb7 | [
"Apache-2.0"
] | null | null | null | harness/tests/storage/test_gcs.py | johnkim-det/determined | 7af3cfe48d26a23702a260f73ca5090b13625cb7 | [
"Apache-2.0"
] | null | null | null | harness/tests/storage/test_gcs.py | johnkim-det/determined | 7af3cfe48d26a23702a260f73ca5090b13625cb7 | [
"Apache-2.0"
] | null | null | null | import os
import uuid
from pathlib import Path
from typing import Dict, Iterator, List, Optional
import google.auth.exceptions
import google.cloud.storage
import pytest
from determined.common import storage
from determined.tensorboard.fetchers.gcs import GCSFetcher
from tests.storage import util
BUCKET_NAME = "storage-unit-tests"
CHECK_ACCESS_KEY = "check-access"
CHECK_KEY_CONTENT = b"yo, you have access"
@pytest.fixture
def prep_gcs_test_creds(tmp_path: Path) -> Iterator[None]:
"""
Check for the environment variable we pass as part of circleci's "storage-unit-tests" context.
Note that the gcs credentials in the "storage-unit-tests" context are the keyid=c07eed131 key
to the storage-unit-tests@determined-ai.iam.gserviceaccount.com service account. The contents
of the key are at github.com/determined-ai/secrets/gcp/service-accounts/storage-unit-tests.json.
The service account should only have permission to view the "storage-unit-tests" bucket.
"""
if "DET_GCS_TEST_CREDS" not in os.environ:
yield
return
# Save the text in a temporary file and set GOOGLE_APPLICATION_CREDENTIALS to be the path.
creds_path = tmp_path.joinpath("gcs-test-creds.json")
with creds_path.open("w") as f:
f.write(os.environ["DET_GCS_TEST_CREDS"])
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(creds_path)
try:
yield
finally:
del os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
# @pytest.fixture
def get_live_gcs_manager(
tmp_path: Path,
prefix: Optional[str],
prep_gcs_test_creds: None,
require_secrets: bool,
) -> storage.GCSStorageManager:
"""
Skip when we have no gcs access, unless --require-secrets was set, in which case fail.
Note that if you normally have GCS access to the bucket in question and you have done the usual
login with the gcloud cli tool, no environment variables are necessary to run this test locally.
"""
# Instantiating a google.cloud.storage.Client() takes a few seconds, so we speed up test by
# reusing the one created for the storage manager.
try:
manager = storage.GCSStorageManager(
bucket=BUCKET_NAME,
prefix=prefix,
temp_dir=str(tmp_path),
)
blob = manager.bucket.blob(CHECK_ACCESS_KEY)
assert blob.download_as_string() == CHECK_KEY_CONTENT
except google.auth.exceptions.DefaultCredentialsError:
# No access detected.
if require_secrets:
raise
pytest.skip("No GCS access")
return manager
@pytest.mark.cloud
@pytest.mark.parametrize("prefix", [None, "test/prefix/"])
def test_gcs_lifecycle(
require_secrets: bool,
tmp_path: Path,
prefix: Optional[str],
) -> None:
live_gcs_manager = get_live_gcs_manager(tmp_path, prefix, None, require_secrets)
def post_delete_cb(storage_id: str) -> None:
"""Search gcs directly to ensure that a checkpoint is actually deleted."""
storage_prefix = live_gcs_manager.get_storage_prefix(storage_id)
found = [blob.name for blob in live_gcs_manager.bucket.list_blobs(prefix=storage_prefix)]
if found:
file_list = " " + "\n ".join(found)
raise ValueError(f"found {len(found)} files in bucket after delete:\n{file_list}")
util.run_storage_lifecycle_test(live_gcs_manager, post_delete_cb)
def get_tensorboard_fetcher_gcs(
require_secrets: bool, local_sync_dir: str, paths_to_sync: List[str]
) -> GCSFetcher:
storage_config = {"bucket": BUCKET_NAME}
try:
fetcher = GCSFetcher(storage_config, paths_to_sync, local_sync_dir)
blob = fetcher.client.bucket(BUCKET_NAME).blob("check-access")
assert blob.download_as_string() == CHECK_KEY_CONTENT
return fetcher
except google.auth.exceptions.DefaultCredentialsError:
# No access detected.
if require_secrets:
raise
pytest.skip("No GCS access")
@pytest.mark.cloud
def test_tensorboard_fetcher_gcs(
require_secrets: bool, tmp_path: Path, prep_gcs_test_creds: None
) -> None:
local_sync_dir = os.path.join(tmp_path, "sync_dir")
storage_relpath = os.path.join(local_sync_dir, BUCKET_NAME)
# Create two paths as multi-trial sync could happen.
paths_to_sync = [os.path.join("test_dir", str(uuid.uuid4()), "subdir") for _ in range(2)]
fetcher = get_tensorboard_fetcher_gcs(require_secrets, local_sync_dir, paths_to_sync)
def put_files(filepath_content: Dict[str, bytes]) -> None:
for filepath, content in filepath_content.items():
fetcher.client.bucket(BUCKET_NAME).blob(filepath).upload_from_string(content)
def rm_files(filepaths: List[str]) -> None:
for filepath in filepaths:
fetcher.client.bucket(BUCKET_NAME).blob(filepath).delete()
util.run_tensorboard_fetcher_test(local_sync_dir, fetcher, storage_relpath, put_files, rm_files)
| 34.908451 | 100 | 0.713536 | import os
import uuid
from pathlib import Path
from typing import Dict, Iterator, List, Optional
import google.auth.exceptions
import google.cloud.storage
import pytest
from determined.common import storage
from determined.tensorboard.fetchers.gcs import GCSFetcher
from tests.storage import util
BUCKET_NAME = "storage-unit-tests"
CHECK_ACCESS_KEY = "check-access"
CHECK_KEY_CONTENT = b"yo, you have access"
@pytest.fixture
def prep_gcs_test_creds(tmp_path: Path) -> Iterator[None]:
if "DET_GCS_TEST_CREDS" not in os.environ:
yield
return
creds_path = tmp_path.joinpath("gcs-test-creds.json")
with creds_path.open("w") as f:
f.write(os.environ["DET_GCS_TEST_CREDS"])
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(creds_path)
try:
yield
finally:
del os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
def get_live_gcs_manager(
tmp_path: Path,
prefix: Optional[str],
prep_gcs_test_creds: None,
require_secrets: bool,
) -> storage.GCSStorageManager:
try:
manager = storage.GCSStorageManager(
bucket=BUCKET_NAME,
prefix=prefix,
temp_dir=str(tmp_path),
)
blob = manager.bucket.blob(CHECK_ACCESS_KEY)
assert blob.download_as_string() == CHECK_KEY_CONTENT
except google.auth.exceptions.DefaultCredentialsError:
if require_secrets:
raise
pytest.skip("No GCS access")
return manager
@pytest.mark.cloud
@pytest.mark.parametrize("prefix", [None, "test/prefix/"])
def test_gcs_lifecycle(
require_secrets: bool,
tmp_path: Path,
prefix: Optional[str],
) -> None:
live_gcs_manager = get_live_gcs_manager(tmp_path, prefix, None, require_secrets)
def post_delete_cb(storage_id: str) -> None:
storage_prefix = live_gcs_manager.get_storage_prefix(storage_id)
found = [blob.name for blob in live_gcs_manager.bucket.list_blobs(prefix=storage_prefix)]
if found:
file_list = " " + "\n ".join(found)
raise ValueError(f"found {len(found)} files in bucket after delete:\n{file_list}")
util.run_storage_lifecycle_test(live_gcs_manager, post_delete_cb)
def get_tensorboard_fetcher_gcs(
require_secrets: bool, local_sync_dir: str, paths_to_sync: List[str]
) -> GCSFetcher:
storage_config = {"bucket": BUCKET_NAME}
try:
fetcher = GCSFetcher(storage_config, paths_to_sync, local_sync_dir)
blob = fetcher.client.bucket(BUCKET_NAME).blob("check-access")
assert blob.download_as_string() == CHECK_KEY_CONTENT
return fetcher
except google.auth.exceptions.DefaultCredentialsError:
if require_secrets:
raise
pytest.skip("No GCS access")
@pytest.mark.cloud
def test_tensorboard_fetcher_gcs(
require_secrets: bool, tmp_path: Path, prep_gcs_test_creds: None
) -> None:
local_sync_dir = os.path.join(tmp_path, "sync_dir")
storage_relpath = os.path.join(local_sync_dir, BUCKET_NAME)
paths_to_sync = [os.path.join("test_dir", str(uuid.uuid4()), "subdir") for _ in range(2)]
fetcher = get_tensorboard_fetcher_gcs(require_secrets, local_sync_dir, paths_to_sync)
def put_files(filepath_content: Dict[str, bytes]) -> None:
for filepath, content in filepath_content.items():
fetcher.client.bucket(BUCKET_NAME).blob(filepath).upload_from_string(content)
def rm_files(filepaths: List[str]) -> None:
for filepath in filepaths:
fetcher.client.bucket(BUCKET_NAME).blob(filepath).delete()
util.run_tensorboard_fetcher_test(local_sync_dir, fetcher, storage_relpath, put_files, rm_files)
| true | true |
f732b7817804b70eb53719b94f151119e3902335 | 23,548 | py | Python | rlax/_src/mpo_ops_test.py | ofantomas/rlax | 7bf3bf13d4496f1b708f4ccb5865215a16c618d6 | [
"Apache-2.0"
] | 1 | 2022-01-13T22:29:15.000Z | 2022-01-13T22:29:15.000Z | rlax/_src/mpo_ops_test.py | shinriny0ku/rlax | 58b3672b2f7ac1a400b3934ae9888c677f39b9e2 | [
"Apache-2.0"
] | null | null | null | rlax/_src/mpo_ops_test.py | shinriny0ku/rlax | 58b3672b2f7ac1a400b3934ae9888c677f39b9e2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mpo_ops.py."""
import functools
import math
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from rlax._src import distributions
from rlax._src import mpo_ops
NUM_SAMPLES = 10
ACTION_DIM = 3
TIME_DIM = 8
BATCH_DIM = 100
# NOTE: These are not typical values used for MPO. In the test case, we know the
# Q function perfectly so we loosen the bound on the mean to zone in to the
# optimal policy very quickly. Similarly, we maintain a high variance to sample
# distinct actions to explore and learn from.
_INIT_TEMPERATURE = 0.2
_INIT_ALPHA_MEAN = 0.001
_INIT_ALPHA_COVARIANCE = float(1e6)
_EPSILON_BOUND = 0.01
_EPSILON_MEAN_BOUND = 10.0
_EPSILON_COVARIANCE_BOUND = 1e-12
_NUM_ITERATIONS = 5000
_TARGET_UPDATE_PERIOD = 100
_RANDOM_SEED = 42
# The offset to ensure initially the policy is not close to 0
_MEAN_OFFSET = 2.0
# The final action should optimize down to be close to 0.0
_MAX_ACTION_ERROR = 0.2
_MAX_KL_ERROR = 1e-6
_DIAGONAL_GAUSSIAN_DIST = distributions.gaussian_diagonal()
_PROJECTION_OPERATOR = functools.partial(jnp.clip, a_min=1e-10)
def _hk_mock_policy_params(s_tm1):
"""Returns mock policy params."""
# Outputs of the network are mu and sigma. Both shaped [B, ACTION_DIM].
pi_out = hk.nets.MLP(
output_sizes=[2 * ACTION_DIM],
w_init=hk.initializers.VarianceScaling(1e-3),
activation=jnp.tanh,
activate_final=False,
name='online_policy')(s_tm1)
pi_mean, pi_cov = jnp.split(pi_out, 2, axis=-1)
pi_cov = jax.nn.softplus(pi_cov)
pi_mean = pi_mean + _MEAN_OFFSET
return {'mean': pi_mean, 'stddev': pi_cov}
def _init_params(key):
init_fn, _ = hk.transform(_hk_mock_policy_params)
key_seq = hk.PRNGSequence(key)
s_tm1 = jax.random.normal(
next(key_seq), (TIME_DIM, BATCH_DIM, ACTION_DIM), jnp.float32)
online_params = init_fn(next(key_seq), s_tm1)
return dict(
online=online_params,
target=online_params,
mpo=dict(
temperature=_INIT_TEMPERATURE,
alpha_mean=_INIT_ALPHA_MEAN,
alpha_covariance=_INIT_ALPHA_COVARIANCE),
)
def _mock_outputs(online_params, target_params, key, target_name):
"""Returns mock network outputs."""
_, policy_params_fn = hk.transform(_hk_mock_policy_params)
key_seq = hk.PRNGSequence(key)
state_size = ACTION_DIM
# Input state: [TIME_DIM, BATCH_DIM, DIM_STATE]
s_tm1 = jax.random.normal(
next(key_seq), (TIME_DIM, BATCH_DIM, state_size), jnp.float32)
policy_params = policy_params_fn(online_params, None, s_tm1)
target_policy_params = policy_params_fn(target_params, None, s_tm1)
# Shape for actions: [NUM_SAMPLES, TIME_DIM, BATCH_DIM, ACTION_DIM]
mean, stddev = target_policy_params['mean'], target_policy_params['stddev']
mean_repeated = jnp.repeat(
mean.reshape((1,) + mean.shape), NUM_SAMPLES, axis=0)
stddev_repeated = jnp.repeat(
stddev.reshape((1,) + stddev.shape), NUM_SAMPLES, axis=0)
target_actions = _DIAGONAL_GAUSSIAN_DIST.sample(
next(key_seq), mean_repeated, stddev_repeated)
# If the target is advantages then num samples is 1.
if target_name == 'advantages':
target_actions = target_actions[0, ...]
# Shape for Q: [NUM_SAMPLES, TIME_DIM, BATCH_DIM]
# Setting Q = -a_t * tf.transpose(a_t) where a_t = s_t + a.
# The solution to optimizing this is basically for the policy to output
# 0 actions thereby minimizing the cost. Since this is a convex
# optimization problem, the algorithm should get to a good solution quickly.
# First compute a_t = s_t + a with shape: [NUM_SAMPLES, TIME_DIM, BATCH_DIM,
# ACTION_DIM] since action dim is the same as shape dim here and then compute
# the quadratic form.
a_t = target_actions + jnp.expand_dims(s_tm1, 0)
sample_q_values = -jnp.sum(a_t ** 2, axis=-1)
# Set the advantage to the same as the q value.
# Shape for advantages: [TIME_DIM, BATCH_DIM]
advantages = sample_q_values[0, :, :]
return dict(
pi_params=policy_params,
target_pi_params=target_policy_params,
sample_q_values=sample_q_values,
advantages=advantages,
target_actions=target_actions,
)
def get_common_loss_fn_inputs(params, key, target_name):
out = _mock_outputs(params['online'], params['target'], key, target_name)
pi_sample_log_probs = _DIAGONAL_GAUSSIAN_DIST.logprob(
out['target_actions'], out['pi_params']['mean'],
out['pi_params']['stddev'])
return out, {
'sample_log_probs': pi_sample_log_probs,
target_name: out[target_name],
'temperature_constraint': mpo_ops.LagrangePenalty(
params['mpo']['temperature'], _EPSILON_BOUND)}
def get_decoupled_kl_constraints(out, params, per_dimension):
"""Factorises KL for Gaussian."""
kl_mean, kl_covariance = (
distributions.decoupled_multivariate_normal_kl_divergence(
out['target_pi_params']['mean'], out['target_pi_params']['stddev'],
out['pi_params']['mean'], out['pi_params']['stddev'],
per_dimension=per_dimension))
alpha_mean = params['mpo']['alpha_mean'] * jnp.ones_like(kl_mean)
alpha_covariance = params['mpo']['alpha_covariance'] * jnp.ones_like(
kl_covariance)
return [
(kl_mean, mpo_ops.LagrangePenalty(
alpha=alpha_mean, epsilon=_EPSILON_MEAN_BOUND,
per_dimension=per_dimension)),
(kl_covariance, mpo_ops.LagrangePenalty(
alpha=alpha_covariance, epsilon=_EPSILON_COVARIANCE_BOUND,
per_dimension=per_dimension)),
]
def get_coupled_kl_constraints(out, params, per_dimension):
kl_mean, kl_covariance = (
distributions.decoupled_multivariate_normal_kl_divergence(
out['target_pi_params']['mean'], out['target_pi_params']['stddev'],
out['pi_params']['mean'], out['pi_params']['stddev'],
per_dimension=per_dimension))
alpha_mean = params['mpo']['alpha_mean'] * jnp.ones_like(kl_mean)
return [
(kl_mean + kl_covariance, mpo_ops.LagrangePenalty(
alpha=alpha_mean,
epsilon=_EPSILON_MEAN_BOUND + _EPSILON_COVARIANCE_BOUND,
per_dimension=per_dimension))
]
def vmpo_e_step_without_restarting_or_importance_weights(advantages, **kwargs):
restarting_weights = jnp.ones_like(advantages)
importance_weights = jnp.ones_like(advantages)
return mpo_ops.vmpo_compute_weights_and_temperature_loss(
advantages=advantages, restarting_weights=restarting_weights,
importance_weights=importance_weights, **kwargs)
class MPOTest(parameterized.TestCase):
"""Tests for the MPO losses."""
@parameterized.parameters(
{'target_name': 'sample_q_values',
'loss_fn': mpo_ops.mpo_loss,
'get_kl_constraints': get_decoupled_kl_constraints,
'per_dimension': False},
{'target_name': 'advantages',
'loss_fn': mpo_ops.vmpo_loss,
'get_kl_constraints': get_decoupled_kl_constraints,
'per_dimension': False},
{'target_name': 'sample_q_values',
'loss_fn': mpo_ops.mpo_loss,
'get_kl_constraints': get_coupled_kl_constraints,
'per_dimension': False},
{'target_name': 'advantages',
'loss_fn': mpo_ops.vmpo_loss,
'get_kl_constraints': get_coupled_kl_constraints,
'per_dimension': False},
{'target_name': 'sample_q_values',
'loss_fn': mpo_ops.mpo_loss,
'get_kl_constraints': get_decoupled_kl_constraints,
'per_dimension': True},
{'target_name': 'advantages',
'loss_fn': mpo_ops.vmpo_loss,
'get_kl_constraints': get_decoupled_kl_constraints,
'per_dimension': True},
{'target_name': 'sample_q_values',
'loss_fn': mpo_ops.mpo_loss,
'get_kl_constraints': get_coupled_kl_constraints,
'per_dimension': True},
{'target_name': 'advantages',
'loss_fn': mpo_ops.vmpo_loss,
'get_kl_constraints': get_coupled_kl_constraints,
'per_dimension': True},
)
def test_optimization(
self, target_name, loss_fn, get_kl_constraints, per_dimension):
"""Tests that the policy optimization works correctly."""
def _loss(params, key):
out, loss_fn_inputs = get_common_loss_fn_inputs(params, key, target_name)
kl_constraints = get_kl_constraints(out, params, per_dimension)
loss_fn_inputs.update({'kl_constraints': kl_constraints})
loss, mpo_stats = loss_fn(**loss_fn_inputs)
loss = jnp.mean(loss)
temperature_bound = jnp.mean(mpo_stats.normalized_weights * jnp.log(
mpo_stats.num_samples * mpo_stats.normalized_weights + 1e-8))
return loss, {'outputs': out, 'temperature_bound': temperature_bound}
key = jax.random.PRNGKey(_RANDOM_SEED)
grad_fn = jax.jit(jax.grad(_loss, has_aux=True))
optimizer = optax.adam(1e-3)
key, new_key = jax.random.split(key)
params = _init_params(new_key)
opt_state = optimizer.init((params['online'], params['mpo']))
@jax.jit
def _update(params_, opt_state_, key_):
next_key, key_ = jax.random.split(key_)
grad, stats = grad_fn(params_, key_)
updates, opt_state_ = optimizer.update(
(grad['online'], grad['mpo']), opt_state_)
online_params, mpo_params = optax.apply_updates(
(params_['online'], params_['mpo']), updates)
params_['online'] = online_params
params_['mpo'] = mpo_params
return params_, opt_state_, stats, next_key
for iter_idx in range(_NUM_ITERATIONS):
params, opt_state, extra, key = _update(params, opt_state, key)
if iter_idx % _TARGET_UPDATE_PERIOD == 0:
params['target'] = params['online']
# Test the bounds are within tolerance.
key, new_key = jax.random.split(key)
_, extra = _loss(params, new_key)
action_mean = jnp.mean(extra['outputs']['pi_params']['mean'])
# Check action mean is close to 0.
self.assertBetween(action_mean, -_MAX_ACTION_ERROR, _MAX_ACTION_ERROR)
# Check the temperature are within the bounds.
self.assertLess(extra['temperature_bound'], _EPSILON_BOUND)
@parameterized.parameters(
{'e_step_fn': mpo_ops.mpo_compute_weights_and_temperature_loss,
'additional_inputs': {},
# dL/dq == 1 and dL/dt == epsilon (for one sample)
'expected_deriv_of_target': [[[1]]],
'sample_dimension': True},
{'e_step_fn': vmpo_e_step_without_restarting_or_importance_weights,
'additional_inputs': {'top_k_fraction': 1.0},
'expected_deriv_of_target': [[1]],
'sample_dimension': False},
)
def test_e_step_gradient_computation(
self, e_step_fn, additional_inputs, expected_deriv_of_target,
sample_dimension):
"""Tests the gradients from the E-step against the analytic ones."""
# Target has shape [NUM_SAMPLES, T, B] => [1, 1, 1]
target = jnp.array([[3]], jnp.float32)
if sample_dimension:
target = jnp.expand_dims(target, axis=0)
temperature = jnp.array(0.1, jnp.float32)
def fn(target_, temperature_):
temperature_constraint = mpo_ops.LagrangePenalty(
temperature_, _EPSILON_BOUND)
temperature_loss, _, _ = e_step_fn(
target_, temperature_constraint=temperature_constraint,
projection_operator=_PROJECTION_OPERATOR,
**additional_inputs)
return jnp.mean(temperature_loss)
grad = jax.grad(fn, argnums=(0, 1))(target, temperature)
np.testing.assert_almost_equal(np.array(grad[0]), np.array(
expected_deriv_of_target, np.float32), decimal=4)
self.assertAlmostEqual(grad[1], _EPSILON_BOUND, places=4)
@parameterized.parameters(
{'e_step_fn': mpo_ops.mpo_compute_weights_and_temperature_loss,
'additional_inputs': {},
'sample_dimension': True},
{'e_step_fn': vmpo_e_step_without_restarting_or_importance_weights,
'additional_inputs': {'top_k_fraction': 1.0},
'sample_dimension': False},
)
def test_e_step_stop_gradient(
self, e_step_fn, additional_inputs, sample_dimension):
"""Tests no gradients flow through `weights` in the E-Step."""
# Target has shape [NUM_SAMPLES, T, B] => [1, 1, 1]
target = jnp.array([[3]], jnp.float32)
if sample_dimension:
target = jnp.expand_dims(target, axis=0)
temperature = 0.1
# pylint: disable=g-long-lambda
def mean_weights_fn(target_, temperature_):
temperature_constraint = mpo_ops.LagrangePenalty(
temperature_, _EPSILON_BOUND)
_, weights, _ = e_step_fn(
target_, temperature_constraint=temperature_constraint,
projection_operator=_PROJECTION_OPERATOR,
**additional_inputs)
return jnp.mean(weights)
grad = jax.grad(mean_weights_fn, argnums=(0, 1))(target, temperature)
np.testing.assert_almost_equal(
np.array(grad[0]), np.zeros_like(grad[0]), decimal=4)
self.assertAlmostEqual(grad[1], 0., places=4)
def test_kl_constraint_loss_gradients(self):
"""Tests the gradients in the `_kl_constraint_loss` method."""
kl = jnp.array(1., jnp.float32)
alpha = jnp.array(1., jnp.float32)
_, _, alpha = mpo_ops.kl_constraint_loss(kl, mpo_ops.LagrangePenalty(
alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False),
_PROJECTION_OPERATOR)
def alpha_loss_fn(alpha_):
penalty = mpo_ops.LagrangePenalty(
alpha=alpha_, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)
_, alpha_loss, _ = mpo_ops.kl_constraint_loss(
kl, penalty, _PROJECTION_OPERATOR)
return alpha_loss
alpha_gradients = jax.grad(alpha_loss_fn)(alpha)
actual_alpha_gradients = _EPSILON_MEAN_BOUND - kl
def kl_loss_fn(kl_):
penalty = mpo_ops.LagrangePenalty(
alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)
kl_loss, _, _ = mpo_ops.kl_constraint_loss(
kl_, penalty, _PROJECTION_OPERATOR)
return kl_loss
kl_gradients = jax.grad(kl_loss_fn)(kl)
actual_kl_gradients = alpha
self.assertAlmostEqual(kl_gradients, actual_kl_gradients)
self.assertAlmostEqual(alpha_gradients, actual_alpha_gradients)
def test_kl_constraint_loss_stop_gradients(self):
"""Tests the stop gradients in the `kl_constraint_loss` function.
The `alpha_loss` term should not affect the KL and the `kl` term should
not affect `alpha`.
"""
kl = jnp.array(1., jnp.float32)
alpha = jnp.array(1., jnp.float32)
_, _, alpha = mpo_ops.kl_constraint_loss(kl, mpo_ops.LagrangePenalty(
alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False),
_PROJECTION_OPERATOR)
def kl_loss_fn(alpha_):
penalty = mpo_ops.LagrangePenalty(
alpha=alpha_, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)
kl_loss, _, _ = mpo_ops.kl_constraint_loss(
kl, penalty, _PROJECTION_OPERATOR)
return kl_loss
kl_gradients = jax.grad(kl_loss_fn)(alpha)
def alpha_loss_fn(kl_):
penalty = mpo_ops.LagrangePenalty(
alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)
_, alpha_loss, _ = mpo_ops.kl_constraint_loss(
kl_, penalty, _PROJECTION_OPERATOR)
return alpha_loss
alpha_gradients = jax.grad(alpha_loss_fn)(kl)
# Test that there are no gradients of KL w.r.t alpha
self.assertEqual(kl_gradients, 0.)
# Test that there are no gradients of alpha w.r.t kl
self.assertEqual(alpha_gradients, 0.)
@parameterized.parameters(
# With restarting weights of 1 (and temperature of 1) the weights should
# be e^-1, 1, max advantage is 2 and num samples is 2 so temperature loss
# is log(1 + e^-1) + 2 - log(2) + temperature epsilon
{'advantages': np.array([[1.0, 2.0]]),
'restarting_weights': np.array([[1.0, 1.0]]),
'expected_temperature_loss': (math.log(1.0 + math.exp(-1.0)) + 2.0 -
math.log(2.0) + _EPSILON_BOUND)},
# With the second restarting weight set to 0 the weights become 1, 0
# max advantage is 1 and num samples is 1 so temperature loss is
# log(1) + 1 - log(1) + temperature epsilon
{'advantages': np.array([[1.0, 2.0]]),
'restarting_weights': np.array([[1.0, 0.0]]),
'expected_temperature_loss': 1.0 + _EPSILON_BOUND},
)
def test_restarting_weights(
self, advantages, restarting_weights, expected_temperature_loss):
"""Test that calculation is correct if restarting weight is set to 0."""
temperature_loss, _, _ = mpo_ops.vmpo_compute_weights_and_temperature_loss(
advantages, restarting_weights, np.ones_like(restarting_weights),
mpo_ops.LagrangePenalty(1.0, _EPSILON_BOUND),
functools.partial(np.clip, a_min=1e-8, a_max=None), 1.0)
self.assertAlmostEqual(
temperature_loss, expected_temperature_loss, places=4)
@parameterized.parameters(
# When the top k fraction is 1.0 all of the weights should be 1
{'top_k_fraction': 1.0,
'scaled_advantages': np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
'expected_top_k_weights': np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])},
# When the top k fraction is 0.5 it will take the bottom row as these are
# the highest.
{'top_k_fraction': 0.5,
'scaled_advantages': np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
'expected_top_k_weights': np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])}
)
def test_top_k_fraction(
self, top_k_fraction, scaled_advantages, expected_top_k_weights):
"""Test that only the top k fraction are used."""
top_k_weights = mpo_ops.get_top_k_weights(
top_k_fraction, jnp.ones_like(scaled_advantages), scaled_advantages)
np.testing.assert_allclose(top_k_weights, expected_top_k_weights)
def test_top_k_fraction_too_low(self):
"""Test if the top k fraction returns 0 advantages we raise an error."""
with self.assertRaises(ValueError):
mpo_ops.get_top_k_weights(0.01, jnp.ones((3, 2)), jnp.ones((3, 2)))
@parameterized.parameters(
# With importance weights of 1 (and temperature of 1) the weights should
# be e^-1, 1, max advantage is 2 and num samples is 2 so temperature loss
# is log(1 + e^-1) + 2 - log(2) + temperature epsilon
{'advantages': np.array([[1.0, 2.0]]),
'importance_weights': np.array([[1.0, 1.0]]),
'expected_temperature_loss': (math.log(1.0 + math.exp(-1.0)) + 2.0 -
math.log(2.0) + _EPSILON_BOUND)},
# If the second importance weight is 0.5 temperature loss becomes
# log(0.5 + e^-1) + 2 - log(2) + temperature epsilon
{'advantages': np.array([[1.0, 2.0]]),
'importance_weights': np.array([[1.0, 0.5]]),
'expected_temperature_loss': (math.log(0.5 + math.exp(-1.0)) + 2.0 -
math.log(2.0) + _EPSILON_BOUND)},
)
def test_importance_weights(
self, advantages, importance_weights, expected_temperature_loss):
"""Test that importance weights have the correct effect."""
temperature_loss, _, _ = mpo_ops.vmpo_compute_weights_and_temperature_loss(
advantages, np.ones_like(importance_weights), importance_weights,
mpo_ops.LagrangePenalty(1.0, _EPSILON_BOUND),
functools.partial(np.clip, a_min=1e-8, a_max=None), 1.0)
self.assertAlmostEqual(
temperature_loss, expected_temperature_loss, places=4)
@parameterized.parameters({'per_dimension': True}, {'per_dimension': False})
def test_mpo_input_axis_order_equivalence(self, per_dimension):
"""Test loss functions are equivalent regardless of axis order."""
key = jax.random.PRNGKey(_RANDOM_SEED)
key, new_key = jax.random.split(key)
params = _init_params(new_key)
out, mpo_inputs = get_common_loss_fn_inputs(params, key, 'sample_q_values')
kl_constraints = get_coupled_kl_constraints(out, params,
per_dimension=per_dimension)
mpo_inputs.update({'kl_constraints': kl_constraints})
# Original loss fn inputs are [S T B],
stb_loss, stb_outputs = mpo_ops.mpo_loss(**mpo_inputs)
mean_stb_loss = jnp.mean(stb_loss)
# Swap axes and try [S B T]
mpo_inputs.update({
'sample_log_probs': jnp.swapaxes(mpo_inputs['sample_log_probs'], 1, 2),
'sample_q_values': jnp.swapaxes(mpo_inputs['sample_q_values'], 1, 2),
'kl_constraints': [(jnp.swapaxes(kl, 0, 1), mpo_ops.LagrangePenalty(
alpha=jnp.swapaxes(pen.alpha, 0, 1), epsilon=pen.epsilon,
per_dimension=pen.per_dimension)) for (kl, pen) in kl_constraints],
})
sbt_loss, sbt_outputs = mpo_ops.mpo_loss(**mpo_inputs)
mean_sbt_loss = jnp.mean(sbt_loss)
# Try [T B S] denoting sample_axis at 2 instead of 0.
mpo_inputs.update({
'sample_log_probs': jnp.swapaxes(mpo_inputs['sample_log_probs'], 0, 2),
'sample_q_values': jnp.swapaxes(mpo_inputs['sample_q_values'], 0, 2),
'kl_constraints': kl_constraints, # T B
'sample_axis': 2
})
tbs_loss, tbs_outputs = mpo_ops.mpo_loss(**mpo_inputs)
mean_tbs_loss = jnp.mean(tbs_loss)
self.assertAlmostEqual(mean_stb_loss, mean_sbt_loss, places=4)
self.assertAlmostEqual(mean_tbs_loss, mean_sbt_loss, places=4)
self.assertEqual(tbs_outputs.num_samples, sbt_outputs.num_samples)
self.assertEqual(tbs_outputs.num_samples, stb_outputs.num_samples)
@parameterized.parameters({'per_dimension': True}, {'per_dimension': False})
def test_vmpo_input_axis_order_equivalence(self, per_dimension):
"""Test loss functions are equivalent regardless of axis order."""
key = jax.random.PRNGKey(_RANDOM_SEED)
key, new_key = jax.random.split(key)
params = _init_params(new_key)
out, vmpo_inputs = get_common_loss_fn_inputs(params, key, 'advantages')
kl_constraints = get_coupled_kl_constraints(out, params,
per_dimension=per_dimension)
vmpo_inputs.update({'kl_constraints': kl_constraints})
# Original loss fn inputs are [T B],
tb_loss, tb_outputs = mpo_ops.vmpo_loss(**vmpo_inputs)
mean_tb_loss = jnp.mean(tb_loss)
# Swap axes and try [B T]
vmpo_inputs.update({
'sample_log_probs': jnp.swapaxes(vmpo_inputs['sample_log_probs'], 0, 1),
'advantages': jnp.swapaxes(vmpo_inputs['advantages'], 0, 1),
'kl_constraints': [(jnp.swapaxes(kl, 0, 1), mpo_ops.LagrangePenalty(
alpha=jnp.swapaxes(pen.alpha, 0, 1), epsilon=pen.epsilon,
per_dimension=pen.per_dimension)) for (kl, pen) in kl_constraints],
})
bt_loss, bt_outputs = mpo_ops.vmpo_loss(**vmpo_inputs)
mean_bt_loss = jnp.mean(bt_loss)
self.assertAlmostEqual(mean_tb_loss, mean_bt_loss, places=4)
self.assertEqual(tb_outputs.num_samples, bt_outputs.num_samples)
if __name__ == '__main__':
absltest.main()
| 42.200717 | 80 | 0.691439 |
import functools
import math
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from rlax._src import distributions
from rlax._src import mpo_ops
NUM_SAMPLES = 10
ACTION_DIM = 3
TIME_DIM = 8
BATCH_DIM = 100
_INIT_TEMPERATURE = 0.2
_INIT_ALPHA_MEAN = 0.001
_INIT_ALPHA_COVARIANCE = float(1e6)
_EPSILON_BOUND = 0.01
_EPSILON_MEAN_BOUND = 10.0
_EPSILON_COVARIANCE_BOUND = 1e-12
_NUM_ITERATIONS = 5000
_TARGET_UPDATE_PERIOD = 100
_RANDOM_SEED = 42
_MEAN_OFFSET = 2.0
_MAX_ACTION_ERROR = 0.2
_MAX_KL_ERROR = 1e-6
_DIAGONAL_GAUSSIAN_DIST = distributions.gaussian_diagonal()
_PROJECTION_OPERATOR = functools.partial(jnp.clip, a_min=1e-10)
def _hk_mock_policy_params(s_tm1):
pi_out = hk.nets.MLP(
output_sizes=[2 * ACTION_DIM],
w_init=hk.initializers.VarianceScaling(1e-3),
activation=jnp.tanh,
activate_final=False,
name='online_policy')(s_tm1)
pi_mean, pi_cov = jnp.split(pi_out, 2, axis=-1)
pi_cov = jax.nn.softplus(pi_cov)
pi_mean = pi_mean + _MEAN_OFFSET
return {'mean': pi_mean, 'stddev': pi_cov}
def _init_params(key):
init_fn, _ = hk.transform(_hk_mock_policy_params)
key_seq = hk.PRNGSequence(key)
s_tm1 = jax.random.normal(
next(key_seq), (TIME_DIM, BATCH_DIM, ACTION_DIM), jnp.float32)
online_params = init_fn(next(key_seq), s_tm1)
return dict(
online=online_params,
target=online_params,
mpo=dict(
temperature=_INIT_TEMPERATURE,
alpha_mean=_INIT_ALPHA_MEAN,
alpha_covariance=_INIT_ALPHA_COVARIANCE),
)
def _mock_outputs(online_params, target_params, key, target_name):
_, policy_params_fn = hk.transform(_hk_mock_policy_params)
key_seq = hk.PRNGSequence(key)
state_size = ACTION_DIM
s_tm1 = jax.random.normal(
next(key_seq), (TIME_DIM, BATCH_DIM, state_size), jnp.float32)
policy_params = policy_params_fn(online_params, None, s_tm1)
target_policy_params = policy_params_fn(target_params, None, s_tm1)
mean, stddev = target_policy_params['mean'], target_policy_params['stddev']
mean_repeated = jnp.repeat(
mean.reshape((1,) + mean.shape), NUM_SAMPLES, axis=0)
stddev_repeated = jnp.repeat(
stddev.reshape((1,) + stddev.shape), NUM_SAMPLES, axis=0)
target_actions = _DIAGONAL_GAUSSIAN_DIST.sample(
next(key_seq), mean_repeated, stddev_repeated)
if target_name == 'advantages':
target_actions = target_actions[0, ...]
a_t = target_actions + jnp.expand_dims(s_tm1, 0)
sample_q_values = -jnp.sum(a_t ** 2, axis=-1)
advantages = sample_q_values[0, :, :]
return dict(
pi_params=policy_params,
target_pi_params=target_policy_params,
sample_q_values=sample_q_values,
advantages=advantages,
target_actions=target_actions,
)
def get_common_loss_fn_inputs(params, key, target_name):
out = _mock_outputs(params['online'], params['target'], key, target_name)
pi_sample_log_probs = _DIAGONAL_GAUSSIAN_DIST.logprob(
out['target_actions'], out['pi_params']['mean'],
out['pi_params']['stddev'])
return out, {
'sample_log_probs': pi_sample_log_probs,
target_name: out[target_name],
'temperature_constraint': mpo_ops.LagrangePenalty(
params['mpo']['temperature'], _EPSILON_BOUND)}
def get_decoupled_kl_constraints(out, params, per_dimension):
kl_mean, kl_covariance = (
distributions.decoupled_multivariate_normal_kl_divergence(
out['target_pi_params']['mean'], out['target_pi_params']['stddev'],
out['pi_params']['mean'], out['pi_params']['stddev'],
per_dimension=per_dimension))
alpha_mean = params['mpo']['alpha_mean'] * jnp.ones_like(kl_mean)
alpha_covariance = params['mpo']['alpha_covariance'] * jnp.ones_like(
kl_covariance)
return [
(kl_mean, mpo_ops.LagrangePenalty(
alpha=alpha_mean, epsilon=_EPSILON_MEAN_BOUND,
per_dimension=per_dimension)),
(kl_covariance, mpo_ops.LagrangePenalty(
alpha=alpha_covariance, epsilon=_EPSILON_COVARIANCE_BOUND,
per_dimension=per_dimension)),
]
def get_coupled_kl_constraints(out, params, per_dimension):
kl_mean, kl_covariance = (
distributions.decoupled_multivariate_normal_kl_divergence(
out['target_pi_params']['mean'], out['target_pi_params']['stddev'],
out['pi_params']['mean'], out['pi_params']['stddev'],
per_dimension=per_dimension))
alpha_mean = params['mpo']['alpha_mean'] * jnp.ones_like(kl_mean)
return [
(kl_mean + kl_covariance, mpo_ops.LagrangePenalty(
alpha=alpha_mean,
epsilon=_EPSILON_MEAN_BOUND + _EPSILON_COVARIANCE_BOUND,
per_dimension=per_dimension))
]
def vmpo_e_step_without_restarting_or_importance_weights(advantages, **kwargs):
restarting_weights = jnp.ones_like(advantages)
importance_weights = jnp.ones_like(advantages)
return mpo_ops.vmpo_compute_weights_and_temperature_loss(
advantages=advantages, restarting_weights=restarting_weights,
importance_weights=importance_weights, **kwargs)
class MPOTest(parameterized.TestCase):
@parameterized.parameters(
{'target_name': 'sample_q_values',
'loss_fn': mpo_ops.mpo_loss,
'get_kl_constraints': get_decoupled_kl_constraints,
'per_dimension': False},
{'target_name': 'advantages',
'loss_fn': mpo_ops.vmpo_loss,
'get_kl_constraints': get_decoupled_kl_constraints,
'per_dimension': False},
{'target_name': 'sample_q_values',
'loss_fn': mpo_ops.mpo_loss,
'get_kl_constraints': get_coupled_kl_constraints,
'per_dimension': False},
{'target_name': 'advantages',
'loss_fn': mpo_ops.vmpo_loss,
'get_kl_constraints': get_coupled_kl_constraints,
'per_dimension': False},
{'target_name': 'sample_q_values',
'loss_fn': mpo_ops.mpo_loss,
'get_kl_constraints': get_decoupled_kl_constraints,
'per_dimension': True},
{'target_name': 'advantages',
'loss_fn': mpo_ops.vmpo_loss,
'get_kl_constraints': get_decoupled_kl_constraints,
'per_dimension': True},
{'target_name': 'sample_q_values',
'loss_fn': mpo_ops.mpo_loss,
'get_kl_constraints': get_coupled_kl_constraints,
'per_dimension': True},
{'target_name': 'advantages',
'loss_fn': mpo_ops.vmpo_loss,
'get_kl_constraints': get_coupled_kl_constraints,
'per_dimension': True},
)
def test_optimization(
self, target_name, loss_fn, get_kl_constraints, per_dimension):
def _loss(params, key):
out, loss_fn_inputs = get_common_loss_fn_inputs(params, key, target_name)
kl_constraints = get_kl_constraints(out, params, per_dimension)
loss_fn_inputs.update({'kl_constraints': kl_constraints})
loss, mpo_stats = loss_fn(**loss_fn_inputs)
loss = jnp.mean(loss)
temperature_bound = jnp.mean(mpo_stats.normalized_weights * jnp.log(
mpo_stats.num_samples * mpo_stats.normalized_weights + 1e-8))
return loss, {'outputs': out, 'temperature_bound': temperature_bound}
key = jax.random.PRNGKey(_RANDOM_SEED)
grad_fn = jax.jit(jax.grad(_loss, has_aux=True))
optimizer = optax.adam(1e-3)
key, new_key = jax.random.split(key)
params = _init_params(new_key)
opt_state = optimizer.init((params['online'], params['mpo']))
@jax.jit
def _update(params_, opt_state_, key_):
next_key, key_ = jax.random.split(key_)
grad, stats = grad_fn(params_, key_)
updates, opt_state_ = optimizer.update(
(grad['online'], grad['mpo']), opt_state_)
online_params, mpo_params = optax.apply_updates(
(params_['online'], params_['mpo']), updates)
params_['online'] = online_params
params_['mpo'] = mpo_params
return params_, opt_state_, stats, next_key
for iter_idx in range(_NUM_ITERATIONS):
params, opt_state, extra, key = _update(params, opt_state, key)
if iter_idx % _TARGET_UPDATE_PERIOD == 0:
params['target'] = params['online']
key, new_key = jax.random.split(key)
_, extra = _loss(params, new_key)
action_mean = jnp.mean(extra['outputs']['pi_params']['mean'])
self.assertBetween(action_mean, -_MAX_ACTION_ERROR, _MAX_ACTION_ERROR)
self.assertLess(extra['temperature_bound'], _EPSILON_BOUND)
@parameterized.parameters(
{'e_step_fn': mpo_ops.mpo_compute_weights_and_temperature_loss,
'additional_inputs': {},
'expected_deriv_of_target': [[[1]]],
'sample_dimension': True},
{'e_step_fn': vmpo_e_step_without_restarting_or_importance_weights,
'additional_inputs': {'top_k_fraction': 1.0},
'expected_deriv_of_target': [[1]],
'sample_dimension': False},
)
def test_e_step_gradient_computation(
self, e_step_fn, additional_inputs, expected_deriv_of_target,
sample_dimension):
target = jnp.array([[3]], jnp.float32)
if sample_dimension:
target = jnp.expand_dims(target, axis=0)
temperature = jnp.array(0.1, jnp.float32)
def fn(target_, temperature_):
temperature_constraint = mpo_ops.LagrangePenalty(
temperature_, _EPSILON_BOUND)
temperature_loss, _, _ = e_step_fn(
target_, temperature_constraint=temperature_constraint,
projection_operator=_PROJECTION_OPERATOR,
**additional_inputs)
return jnp.mean(temperature_loss)
grad = jax.grad(fn, argnums=(0, 1))(target, temperature)
np.testing.assert_almost_equal(np.array(grad[0]), np.array(
expected_deriv_of_target, np.float32), decimal=4)
self.assertAlmostEqual(grad[1], _EPSILON_BOUND, places=4)
@parameterized.parameters(
{'e_step_fn': mpo_ops.mpo_compute_weights_and_temperature_loss,
'additional_inputs': {},
'sample_dimension': True},
{'e_step_fn': vmpo_e_step_without_restarting_or_importance_weights,
'additional_inputs': {'top_k_fraction': 1.0},
'sample_dimension': False},
)
def test_e_step_stop_gradient(
self, e_step_fn, additional_inputs, sample_dimension):
target = jnp.array([[3]], jnp.float32)
if sample_dimension:
target = jnp.expand_dims(target, axis=0)
temperature = 0.1
def mean_weights_fn(target_, temperature_):
temperature_constraint = mpo_ops.LagrangePenalty(
temperature_, _EPSILON_BOUND)
_, weights, _ = e_step_fn(
target_, temperature_constraint=temperature_constraint,
projection_operator=_PROJECTION_OPERATOR,
**additional_inputs)
return jnp.mean(weights)
grad = jax.grad(mean_weights_fn, argnums=(0, 1))(target, temperature)
np.testing.assert_almost_equal(
np.array(grad[0]), np.zeros_like(grad[0]), decimal=4)
self.assertAlmostEqual(grad[1], 0., places=4)
def test_kl_constraint_loss_gradients(self):
kl = jnp.array(1., jnp.float32)
alpha = jnp.array(1., jnp.float32)
_, _, alpha = mpo_ops.kl_constraint_loss(kl, mpo_ops.LagrangePenalty(
alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False),
_PROJECTION_OPERATOR)
def alpha_loss_fn(alpha_):
penalty = mpo_ops.LagrangePenalty(
alpha=alpha_, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)
_, alpha_loss, _ = mpo_ops.kl_constraint_loss(
kl, penalty, _PROJECTION_OPERATOR)
return alpha_loss
alpha_gradients = jax.grad(alpha_loss_fn)(alpha)
actual_alpha_gradients = _EPSILON_MEAN_BOUND - kl
def kl_loss_fn(kl_):
penalty = mpo_ops.LagrangePenalty(
alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)
kl_loss, _, _ = mpo_ops.kl_constraint_loss(
kl_, penalty, _PROJECTION_OPERATOR)
return kl_loss
kl_gradients = jax.grad(kl_loss_fn)(kl)
actual_kl_gradients = alpha
self.assertAlmostEqual(kl_gradients, actual_kl_gradients)
self.assertAlmostEqual(alpha_gradients, actual_alpha_gradients)
def test_kl_constraint_loss_stop_gradients(self):
kl = jnp.array(1., jnp.float32)
alpha = jnp.array(1., jnp.float32)
_, _, alpha = mpo_ops.kl_constraint_loss(kl, mpo_ops.LagrangePenalty(
alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False),
_PROJECTION_OPERATOR)
def kl_loss_fn(alpha_):
penalty = mpo_ops.LagrangePenalty(
alpha=alpha_, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)
kl_loss, _, _ = mpo_ops.kl_constraint_loss(
kl, penalty, _PROJECTION_OPERATOR)
return kl_loss
kl_gradients = jax.grad(kl_loss_fn)(alpha)
def alpha_loss_fn(kl_):
penalty = mpo_ops.LagrangePenalty(
alpha=alpha, epsilon=_EPSILON_MEAN_BOUND, per_dimension=False)
_, alpha_loss, _ = mpo_ops.kl_constraint_loss(
kl_, penalty, _PROJECTION_OPERATOR)
return alpha_loss
alpha_gradients = jax.grad(alpha_loss_fn)(kl)
self.assertEqual(kl_gradients, 0.)
self.assertEqual(alpha_gradients, 0.)
@parameterized.parameters(
{'advantages': np.array([[1.0, 2.0]]),
'restarting_weights': np.array([[1.0, 1.0]]),
'expected_temperature_loss': (math.log(1.0 + math.exp(-1.0)) + 2.0 -
math.log(2.0) + _EPSILON_BOUND)},
{'advantages': np.array([[1.0, 2.0]]),
'restarting_weights': np.array([[1.0, 0.0]]),
'expected_temperature_loss': 1.0 + _EPSILON_BOUND},
)
def test_restarting_weights(
self, advantages, restarting_weights, expected_temperature_loss):
temperature_loss, _, _ = mpo_ops.vmpo_compute_weights_and_temperature_loss(
advantages, restarting_weights, np.ones_like(restarting_weights),
mpo_ops.LagrangePenalty(1.0, _EPSILON_BOUND),
functools.partial(np.clip, a_min=1e-8, a_max=None), 1.0)
self.assertAlmostEqual(
temperature_loss, expected_temperature_loss, places=4)
@parameterized.parameters(
{'top_k_fraction': 1.0,
'scaled_advantages': np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
'expected_top_k_weights': np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])},
{'top_k_fraction': 0.5,
'scaled_advantages': np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),
'expected_top_k_weights': np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])}
)
def test_top_k_fraction(
self, top_k_fraction, scaled_advantages, expected_top_k_weights):
top_k_weights = mpo_ops.get_top_k_weights(
top_k_fraction, jnp.ones_like(scaled_advantages), scaled_advantages)
np.testing.assert_allclose(top_k_weights, expected_top_k_weights)
def test_top_k_fraction_too_low(self):
with self.assertRaises(ValueError):
mpo_ops.get_top_k_weights(0.01, jnp.ones((3, 2)), jnp.ones((3, 2)))
@parameterized.parameters(
{'advantages': np.array([[1.0, 2.0]]),
'importance_weights': np.array([[1.0, 1.0]]),
'expected_temperature_loss': (math.log(1.0 + math.exp(-1.0)) + 2.0 -
math.log(2.0) + _EPSILON_BOUND)},
{'advantages': np.array([[1.0, 2.0]]),
'importance_weights': np.array([[1.0, 0.5]]),
'expected_temperature_loss': (math.log(0.5 + math.exp(-1.0)) + 2.0 -
math.log(2.0) + _EPSILON_BOUND)},
)
def test_importance_weights(
self, advantages, importance_weights, expected_temperature_loss):
temperature_loss, _, _ = mpo_ops.vmpo_compute_weights_and_temperature_loss(
advantages, np.ones_like(importance_weights), importance_weights,
mpo_ops.LagrangePenalty(1.0, _EPSILON_BOUND),
functools.partial(np.clip, a_min=1e-8, a_max=None), 1.0)
self.assertAlmostEqual(
temperature_loss, expected_temperature_loss, places=4)
@parameterized.parameters({'per_dimension': True}, {'per_dimension': False})
def test_mpo_input_axis_order_equivalence(self, per_dimension):
key = jax.random.PRNGKey(_RANDOM_SEED)
key, new_key = jax.random.split(key)
params = _init_params(new_key)
out, mpo_inputs = get_common_loss_fn_inputs(params, key, 'sample_q_values')
kl_constraints = get_coupled_kl_constraints(out, params,
per_dimension=per_dimension)
mpo_inputs.update({'kl_constraints': kl_constraints})
stb_loss, stb_outputs = mpo_ops.mpo_loss(**mpo_inputs)
mean_stb_loss = jnp.mean(stb_loss)
mpo_inputs.update({
'sample_log_probs': jnp.swapaxes(mpo_inputs['sample_log_probs'], 1, 2),
'sample_q_values': jnp.swapaxes(mpo_inputs['sample_q_values'], 1, 2),
'kl_constraints': [(jnp.swapaxes(kl, 0, 1), mpo_ops.LagrangePenalty(
alpha=jnp.swapaxes(pen.alpha, 0, 1), epsilon=pen.epsilon,
per_dimension=pen.per_dimension)) for (kl, pen) in kl_constraints],
})
sbt_loss, sbt_outputs = mpo_ops.mpo_loss(**mpo_inputs)
mean_sbt_loss = jnp.mean(sbt_loss)
mpo_inputs.update({
'sample_log_probs': jnp.swapaxes(mpo_inputs['sample_log_probs'], 0, 2),
'sample_q_values': jnp.swapaxes(mpo_inputs['sample_q_values'], 0, 2),
'kl_constraints': kl_constraints,
'sample_axis': 2
})
tbs_loss, tbs_outputs = mpo_ops.mpo_loss(**mpo_inputs)
mean_tbs_loss = jnp.mean(tbs_loss)
self.assertAlmostEqual(mean_stb_loss, mean_sbt_loss, places=4)
self.assertAlmostEqual(mean_tbs_loss, mean_sbt_loss, places=4)
self.assertEqual(tbs_outputs.num_samples, sbt_outputs.num_samples)
self.assertEqual(tbs_outputs.num_samples, stb_outputs.num_samples)
@parameterized.parameters({'per_dimension': True}, {'per_dimension': False})
def test_vmpo_input_axis_order_equivalence(self, per_dimension):
key = jax.random.PRNGKey(_RANDOM_SEED)
key, new_key = jax.random.split(key)
params = _init_params(new_key)
out, vmpo_inputs = get_common_loss_fn_inputs(params, key, 'advantages')
kl_constraints = get_coupled_kl_constraints(out, params,
per_dimension=per_dimension)
vmpo_inputs.update({'kl_constraints': kl_constraints})
tb_loss, tb_outputs = mpo_ops.vmpo_loss(**vmpo_inputs)
mean_tb_loss = jnp.mean(tb_loss)
vmpo_inputs.update({
'sample_log_probs': jnp.swapaxes(vmpo_inputs['sample_log_probs'], 0, 1),
'advantages': jnp.swapaxes(vmpo_inputs['advantages'], 0, 1),
'kl_constraints': [(jnp.swapaxes(kl, 0, 1), mpo_ops.LagrangePenalty(
alpha=jnp.swapaxes(pen.alpha, 0, 1), epsilon=pen.epsilon,
per_dimension=pen.per_dimension)) for (kl, pen) in kl_constraints],
})
bt_loss, bt_outputs = mpo_ops.vmpo_loss(**vmpo_inputs)
mean_bt_loss = jnp.mean(bt_loss)
self.assertAlmostEqual(mean_tb_loss, mean_bt_loss, places=4)
self.assertEqual(tb_outputs.num_samples, bt_outputs.num_samples)
if __name__ == '__main__':
absltest.main()
| true | true |
f732b7be8051fddbc0b2bdf37076cfe3133c14cd | 708 | py | Python | setup.py | cirlabs/django-boundaryservice | 28eb9d4ee29b207eaff99edf88d3474feee44575 | [
"MIT"
] | null | null | null | setup.py | cirlabs/django-boundaryservice | 28eb9d4ee29b207eaff99edf88d3474feee44575 | [
"MIT"
] | null | null | null | setup.py | cirlabs/django-boundaryservice | 28eb9d4ee29b207eaff99edf88d3474feee44575 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
setup(
name="django-boundaryservice",
version="0.2.2",
description="A reusable system for aggregating and providing API access to regional boundary data.",
long_description='See `django-boundaryservice <https://github.com/newsapps/django-boundaryservice>`_ on Github for more information.',
author='Christopher Groskopf',
author_email='staringmonkey@gmail.com',
url='http://blog.apps.chicagotribune.com/',
license="MIT",
packages=[
'boundaryservice',
'boundaryservice.management',
'boundaryservice.management.commands'
],
install_requires=[
'django-tastypie==0.9.12'
]
)
| 30.782609 | 138 | 0.693503 |
from distutils.core import setup
setup(
name="django-boundaryservice",
version="0.2.2",
description="A reusable system for aggregating and providing API access to regional boundary data.",
long_description='See `django-boundaryservice <https://github.com/newsapps/django-boundaryservice>`_ on Github for more information.',
author='Christopher Groskopf',
author_email='staringmonkey@gmail.com',
url='http://blog.apps.chicagotribune.com/',
license="MIT",
packages=[
'boundaryservice',
'boundaryservice.management',
'boundaryservice.management.commands'
],
install_requires=[
'django-tastypie==0.9.12'
]
)
| true | true |
f732b878e84f37a96691cbacccaafc907bef9926 | 1,449 | py | Python | mytube/posts/migrations/0007_auto_20200220_1822.py | ashowlsky/mytube_c | 122d75d7dcd23ed0240448e5db5ca130266d26a2 | [
"MIT"
] | null | null | null | mytube/posts/migrations/0007_auto_20200220_1822.py | ashowlsky/mytube_c | 122d75d7dcd23ed0240448e5db5ca130266d26a2 | [
"MIT"
] | null | null | null | mytube/posts/migrations/0007_auto_20200220_1822.py | ashowlsky/mytube_c | 122d75d7dcd23ed0240448e5db5ca130266d26a2 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-02-20 15:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0006_auto_20200220_1709'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='likes',
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Dislike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dislikes', to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dislikes', to=settings.AUTH_USER_MODEL)),
],
),
]
| 39.162162 | 143 | 0.625259 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0006_auto_20200220_1709'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='likes',
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Dislike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dislikes', to='posts.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dislikes', to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f732b9bf73ac7cd7a54b1d584223565e68a99e91 | 3,889 | py | Python | configs/top_down/resnetv1d/coco/resnetv1d152_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 5 | 2022-01-13T15:06:45.000Z | 2022-01-28T19:39:54.000Z | configs/top_down/resnetv1d/coco/resnetv1d152_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | null | null | null | configs/top_down/resnetv1d/coco/resnetv1d152_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 1 | 2021-06-17T13:56:23.000Z | 2021-06-17T13:56:23.000Z | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='mmcls://resnet152_v1d',
backbone=dict(type='ResNetV1d', depth=152),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 27.778571 | 79 | 0.628182 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
model = dict(
type='TopDown',
pretrained='mmcls://resnet152_v1d',
backbone=dict(type='ResNetV1d', depth=152),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| true | true |
f732ba71e63e92ed77053f94229fdb7e2b543af1 | 425 | py | Python | examples/puma_fdyn.py | tavo-robotas/robotics-toolbox-python | 6b822df875c58f5e3c80288442796172321cab5b | [
"MIT"
] | 1 | 2021-05-14T09:58:10.000Z | 2021-05-14T09:58:10.000Z | examples/puma_fdyn.py | tavo-robotas/robotics-toolbox-python | 6b822df875c58f5e3c80288442796172321cab5b | [
"MIT"
] | null | null | null | examples/puma_fdyn.py | tavo-robotas/robotics-toolbox-python | 6b822df875c58f5e3c80288442796172321cab5b | [
"MIT"
] | null | null | null | import roboticstoolbox as rtb
# load a model with inertial parameters
p560 = rtb.models.DH.Puma560()
# remove Coulomb friction
p560 = p560.nofriction()
# print the kinematic & dynamic parameters
p560.printdyn()
# simulate motion over 5s with zero torque input
d = p560.fdyn(5, p560.qr, dt=0.05)
# show the joint angle trajectory
rtb.tools.trajectory.qplot(d.q)
# animate it
p560.plot(d.q.T) # movie='falling_puma.gif')
| 21.25 | 48 | 0.745882 | import roboticstoolbox as rtb
p560 = rtb.models.DH.Puma560()
p560 = p560.nofriction()
p560.printdyn()
d = p560.fdyn(5, p560.qr, dt=0.05)
rtb.tools.trajectory.qplot(d.q)
p560.plot(d.q.T)
| true | true |
f732bac4f94c8eb49d9d1b3cde6dce862da152ea | 2,625 | py | Python | property/forms.py | dmitryro/zrealtycorp.com | 22cb52187b3787676c0ad4ca278189323cec8f24 | [
"MIT"
] | 1 | 2018-02-21T21:25:40.000Z | 2018-02-21T21:25:40.000Z | property/forms.py | dmitryro/zrealtycorp.com | 22cb52187b3787676c0ad4ca278189323cec8f24 | [
"MIT"
] | null | null | null | property/forms.py | dmitryro/zrealtycorp.com | 22cb52187b3787676c0ad4ca278189323cec8f24 | [
"MIT"
] | null | null | null | from django import forms
from django import template
from django.forms import ModelForm
from django.template import loader, Context
from django.core.context_processors import media as media_processor
from djangular.forms import NgFormValidationMixin, NgModelFormMixin
from property.models import Property, Borough, Neighborhood
from smart_selects.db_fields import GroupedForeignKey, ChainedForeignKey
register = template.Library()
#from uni_form.helper import FormHelper
class SearchErrorList(list):
def get(self, request):
pass
class Meta:
def __init__(self, *args, **kwargs):
pass
class SearchForm(NgFormValidationMixin, NgModelFormMixin, ModelForm):
form_name = 'property_form'
# max_price = forms.IntegerField(min_value=0, required=False, initial=0)
# min_price = forms.IntegerField(min_value=0, required=False, initial=0)
min_price = forms.DecimalField(min_value=0, max_value=1000000000,required=False, initial=0)
max_price = forms.DecimalField(min_value=0, max_value=1000000000,required=False, initial=0)
class Meta:
model = Property
fields = ['rooms', 'type', 'category', 'borough','neighborhood','min_price','max_price','pets_allowed']
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.fields['rooms'].widget.attrs.update({'class' : 'search-panel-field'})
self.fields['type'].widget.attrs.update({'class' : 'search-panel-field'})
self.fields['category'].widget.attrs.update({'class' : 'search-panel-field'})
def clean(self):
cleaned_data = super(SearchForm, self).clean()
# raise forms.ValidationError("This error was added to show the non field errors styling.")
return cleaned_data
def form_invalid(self, form):
if self.request.is_ajax():
to_json_responce = dict()
to_json_responce['status'] = 0
to_json_responce['form_errors'] = form.errors
return HttpResponse(json.dumps(to_json_responce), content_type='application/json')
def form_valid(self, form):
form.save()
if self.request.is_ajax():
to_json_responce = dict()
to_json_responce['status'] = 1
return HttpResponse(json.dumps(to_json_responce), content_type='application/json')
def get_context_data(self, **kwargs):
context = super(SearchForm, self).get_context_data(**kwargs)
context.update(contact_form=SearchForm())
context['search_call']='yes'
return context
property_form = SearchForm()
| 36.971831 | 111 | 0.692571 | from django import forms
from django import template
from django.forms import ModelForm
from django.template import loader, Context
from django.core.context_processors import media as media_processor
from djangular.forms import NgFormValidationMixin, NgModelFormMixin
from property.models import Property, Borough, Neighborhood
from smart_selects.db_fields import GroupedForeignKey, ChainedForeignKey
register = template.Library()
class SearchErrorList(list):
def get(self, request):
pass
class Meta:
def __init__(self, *args, **kwargs):
pass
class SearchForm(NgFormValidationMixin, NgModelFormMixin, ModelForm):
form_name = 'property_form'
min_price = forms.DecimalField(min_value=0, max_value=1000000000,required=False, initial=0)
max_price = forms.DecimalField(min_value=0, max_value=1000000000,required=False, initial=0)
class Meta:
model = Property
fields = ['rooms', 'type', 'category', 'borough','neighborhood','min_price','max_price','pets_allowed']
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.fields['rooms'].widget.attrs.update({'class' : 'search-panel-field'})
self.fields['type'].widget.attrs.update({'class' : 'search-panel-field'})
self.fields['category'].widget.attrs.update({'class' : 'search-panel-field'})
def clean(self):
cleaned_data = super(SearchForm, self).clean()
return cleaned_data
def form_invalid(self, form):
if self.request.is_ajax():
to_json_responce = dict()
to_json_responce['status'] = 0
to_json_responce['form_errors'] = form.errors
return HttpResponse(json.dumps(to_json_responce), content_type='application/json')
def form_valid(self, form):
form.save()
if self.request.is_ajax():
to_json_responce = dict()
to_json_responce['status'] = 1
return HttpResponse(json.dumps(to_json_responce), content_type='application/json')
def get_context_data(self, **kwargs):
context = super(SearchForm, self).get_context_data(**kwargs)
context.update(contact_form=SearchForm())
context['search_call']='yes'
return context
property_form = SearchForm()
| true | true |
f732baf10d5767e849b93e2c2732e51ee1fccc79 | 307 | py | Python | src/employer/admin.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
] | null | null | null | src/employer/admin.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
] | 18 | 2020-02-12T00:41:40.000Z | 2022-02-10T12:00:03.000Z | src/employer/admin.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Employer, ContactPerson, Language, Expenses, Vacancy
from django.contrib.auth.models import Permission
admin.site.register(Permission)
@admin.register(Employer, ContactPerson, Language, Expenses, Vacancy)
class AuthorAdmin(admin.ModelAdmin):
pass
| 27.909091 | 72 | 0.814332 | from django.contrib import admin
from .models import Employer, ContactPerson, Language, Expenses, Vacancy
from django.contrib.auth.models import Permission
admin.site.register(Permission)
@admin.register(Employer, ContactPerson, Language, Expenses, Vacancy)
class AuthorAdmin(admin.ModelAdmin):
pass
| true | true |
f732bb13283a50ad6c9ec3907e83c55333398612 | 1,405 | py | Python | nf_core/pipeline-template/{{cookiecutter.name_noslash}}/bin/scrape_software_versions.py | matq007/tools | 54e233a4c167f515b6f616b3c5a8a9bd660861c0 | [
"MIT"
] | null | null | null | nf_core/pipeline-template/{{cookiecutter.name_noslash}}/bin/scrape_software_versions.py | matq007/tools | 54e233a4c167f515b6f616b3c5a8a9bd660861c0 | [
"MIT"
] | null | null | null | nf_core/pipeline-template/{{cookiecutter.name_noslash}}/bin/scrape_software_versions.py | matq007/tools | 54e233a4c167f515b6f616b3c5a8a9bd660861c0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from collections import OrderedDict
import re
# TODO nf-core: Add additional regexes for new tools in process get_software_versions
regexes = {
'{{ cookiecutter.name }}': ['v_pipeline.txt', r"(\S+)"],
'Nextflow': ['v_nextflow.txt', r"(\S+)"],
'FastQC': ['v_fastqc.txt', r"FastQC v(\S+)"],
'MultiQC': ['v_multiqc.txt', r"multiqc, version (\S+)"],
}
results = OrderedDict()
results['{{ cookiecutter.name }}'] = '<span style="color:#999999;\">N/A</span>'
results['Nextflow'] = '<span style="color:#999999;\">N/A</span>'
results['FastQC'] = '<span style="color:#999999;\">N/A</span>'
results['MultiQC'] = '<span style="color:#999999;\">N/A</span>'
# Search each file using its regex
for k, v in regexes.items():
with open(v[0]) as x:
versions = x.read()
match = re.search(v[1], versions)
if match:
results[k] = "v{}".format(match.group(1))
# Dump to YAML
print ('''
id: '{{ cookiecutter.name.lower().replace(' ', '-') }}-software-versions'
section_name: '{{ cookiecutter.name }} Software Versions'
section_href: 'https://github.com/{{ cookiecutter.name }}'
plot_type: 'html'
description: 'are collected at run time from the software output.'
data: |
<dl class="dl-horizontal">
''')
for k,v in results.items():
print(" <dt>{}</dt><dd>{}</dd>".format(k,v))
print (" </dl>")
| 35.125 | 85 | 0.626335 |
from __future__ import print_function
from collections import OrderedDict
import re
regexes = {
'{{ cookiecutter.name }}': ['v_pipeline.txt', r"(\S+)"],
'Nextflow': ['v_nextflow.txt', r"(\S+)"],
'FastQC': ['v_fastqc.txt', r"FastQC v(\S+)"],
'MultiQC': ['v_multiqc.txt', r"multiqc, version (\S+)"],
}
results = OrderedDict()
results['{{ cookiecutter.name }}'] = '<span style="color:#999999;\">N/A</span>'
results['Nextflow'] = '<span style="color:#999999;\">N/A</span>'
results['FastQC'] = '<span style="color:#999999;\">N/A</span>'
results['MultiQC'] = '<span style="color:#999999;\">N/A</span>'
for k, v in regexes.items():
with open(v[0]) as x:
versions = x.read()
match = re.search(v[1], versions)
if match:
results[k] = "v{}".format(match.group(1))
print ('''
id: '{{ cookiecutter.name.lower().replace(' ', '-') }}-software-versions'
section_name: '{{ cookiecutter.name }} Software Versions'
section_href: 'https://github.com/{{ cookiecutter.name }}'
plot_type: 'html'
description: 'are collected at run time from the software output.'
data: |
<dl class="dl-horizontal">
''')
for k,v in results.items():
print(" <dt>{}</dt><dd>{}</dd>".format(k,v))
print (" </dl>")
| true | true |
f732bba51315ed9d0c5f332bedef6621c95e71a9 | 87 | py | Python | awardz/apps.py | chelseaayoo/Awards | 229764c16f80b0c0a5573d7ff9f6b4b655fe4a91 | [
"Unlicense"
] | null | null | null | awardz/apps.py | chelseaayoo/Awards | 229764c16f80b0c0a5573d7ff9f6b4b655fe4a91 | [
"Unlicense"
] | null | null | null | awardz/apps.py | chelseaayoo/Awards | 229764c16f80b0c0a5573d7ff9f6b4b655fe4a91 | [
"Unlicense"
] | null | null | null | from django.apps import AppConfig
class AwardzConfig(AppConfig):
name = 'awardz'
| 14.5 | 33 | 0.747126 | from django.apps import AppConfig
class AwardzConfig(AppConfig):
name = 'awardz'
| true | true |
f732bbb506a036e4c216f8930233cbf27232b515 | 1,464 | py | Python | azure-mgmt-web/azure/mgmt/web/models/stack_minor_version.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | null | null | null | azure-mgmt-web/azure/mgmt/web/models/stack_minor_version.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-web/azure/mgmt/web/models/stack_minor_version.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StackMinorVersion(Model):
"""Application stack minor version.
:param display_version: Application stack minor version (display only).
:type display_version: str
:param runtime_version: Application stack minor version (runtime only).
:type runtime_version: str
:param is_default: <code>true</code> if this is the default minor version;
otherwise, <code>false</code>.
:type is_default: bool
"""
_attribute_map = {
'display_version': {'key': 'displayVersion', 'type': 'str'},
'runtime_version': {'key': 'runtimeVersion', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(StackMinorVersion, self).__init__(**kwargs)
self.display_version = kwargs.get('display_version', None)
self.runtime_version = kwargs.get('runtime_version', None)
self.is_default = kwargs.get('is_default', None)
| 38.526316 | 78 | 0.620219 |
from msrest.serialization import Model
class StackMinorVersion(Model):
_attribute_map = {
'display_version': {'key': 'displayVersion', 'type': 'str'},
'runtime_version': {'key': 'runtimeVersion', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(StackMinorVersion, self).__init__(**kwargs)
self.display_version = kwargs.get('display_version', None)
self.runtime_version = kwargs.get('runtime_version', None)
self.is_default = kwargs.get('is_default', None)
| true | true |
f732bbb92f282cd482c93861370a133e8d34af91 | 1,609 | py | Python | modules/ws_dual_camera.py | wesmith/CSI-Camera | 8bcb7c58f3546dbe8c1c81054185d347056b4ff6 | [
"BSD-3-Clause"
] | null | null | null | modules/ws_dual_camera.py | wesmith/CSI-Camera | 8bcb7c58f3546dbe8c1c81054185d347056b4ff6 | [
"BSD-3-Clause"
] | null | null | null | modules/ws_dual_camera.py | wesmith/CSI-Camera | 8bcb7c58f3546dbe8c1c81054185d347056b4ff6 | [
"BSD-3-Clause"
] | null | null | null | # ws_dual_camera.py
# WSmith 12/23/20
# utilize modified module ws_csi_camera for the camera class
import cv2
import numpy as np
import ws_csi_camera as ws
from importlib import reload
reload(ws) # ws is under development
def display(sensor_mode=ws.S_MODE_3_1280_720_60,
dispW=ws.DISP_W_M3_M4_one_half,
dispH=ws.DISP_H_M3_M4_one_half,
display_fps=True):
# at present, display the picam and a webcam: in the future, display two picams
picam = ws.CSI_Camera(display_fps=display_fps)
webcam = ws.CSI_Camera(display_fps=display_fps)
# this only needed for the picam
picam.create_gstreamer_pipeline(sensor_id=0, sensor_mode=sensor_mode, flip_method=0,
display_height=dispH, display_width=dispW)
picam.open(picam.gstreamer_pipeline)
webcam.open(1)
picam.start()
webcam.start()
txt = "Picam on left: Sensor Mode {}, Display {} x {}".format(sensor_mode, dispW, dispH)
cv2.namedWindow(txt, cv2.WINDOW_AUTOSIZE)
while True:
_, imgL = picam.read()
_, imgR = webcam.read()
imgR = cv2.resize(imgR, (imgL.shape[1], imgL.shape[0]))
img = np.hstack((imgL, imgR))
cv2.imshow(txt, img)
keyCode = cv2.waitKey(5) & 0xFF
if keyCode == ord('q'):
break
picam.stop()
webcam.stop()
picam.release()
webcam.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
display(sensor_mode=ws.S_MODE_2_1920_1080_30,
dispW=ws.DISP_W_M2_one_quarter, dispH=ws.DISP_H_M2_one_quarter)
| 25.539683 | 92 | 0.655687 |
import cv2
import numpy as np
import ws_csi_camera as ws
from importlib import reload
reload(ws)
def display(sensor_mode=ws.S_MODE_3_1280_720_60,
dispW=ws.DISP_W_M3_M4_one_half,
dispH=ws.DISP_H_M3_M4_one_half,
display_fps=True):
picam = ws.CSI_Camera(display_fps=display_fps)
webcam = ws.CSI_Camera(display_fps=display_fps)
picam.create_gstreamer_pipeline(sensor_id=0, sensor_mode=sensor_mode, flip_method=0,
display_height=dispH, display_width=dispW)
picam.open(picam.gstreamer_pipeline)
webcam.open(1)
picam.start()
webcam.start()
txt = "Picam on left: Sensor Mode {}, Display {} x {}".format(sensor_mode, dispW, dispH)
cv2.namedWindow(txt, cv2.WINDOW_AUTOSIZE)
while True:
_, imgL = picam.read()
_, imgR = webcam.read()
imgR = cv2.resize(imgR, (imgL.shape[1], imgL.shape[0]))
img = np.hstack((imgL, imgR))
cv2.imshow(txt, img)
keyCode = cv2.waitKey(5) & 0xFF
if keyCode == ord('q'):
break
picam.stop()
webcam.stop()
picam.release()
webcam.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
display(sensor_mode=ws.S_MODE_2_1920_1080_30,
dispW=ws.DISP_W_M2_one_quarter, dispH=ws.DISP_H_M2_one_quarter)
| true | true |
f732bbc00351376db5365fb58a5cf2d923279112 | 2,816 | py | Python | mosqito/sq_metrics/tonality/tone_to_noise_ecma/_spectrum_smoothing.py | MitchellAcoustics/MoSQITo | 15e45888d08b2932909f50fd6af0ef9d5595a588 | [
"Apache-2.0"
] | null | null | null | mosqito/sq_metrics/tonality/tone_to_noise_ecma/_spectrum_smoothing.py | MitchellAcoustics/MoSQITo | 15e45888d08b2932909f50fd6af0ef9d5595a588 | [
"Apache-2.0"
] | null | null | null | mosqito/sq_metrics/tonality/tone_to_noise_ecma/_spectrum_smoothing.py | MitchellAcoustics/MoSQITo | 15e45888d08b2932909f50fd6af0ef9d5595a588 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 16:44:36 2020
@author: wantysal
"""
# Standard library import
import numpy as np
# Local import
from mosqito.sound_level_meter.noct_spectrum._getFrequencies import _getFrequencies
def _spectrum_smoothing(freqs_in, spec, noct, low_freq, high_freq, freqs_out):
"""
Compute smoothed spectrum according to the n-th octave band chosen
Parameters
----------
freqs : numpy.array
frequency axis
spec : numpy.array
spectrum in dB
noct : integer
n-th octave-band according to which smooth the spectrum
low_freq : float
lowest frequency of the n-th octave bands
high_freq : float
highest frequency of the n-th octave bands
freqs_out : numpy.array
frequency axis along which the smoothed spectrum is given
Returns
-------
smoothed-spectrum : numpy.array
smoothed spectrum along the given frequency axis
"""
# n-th octave bands filter
filter_freqs = _getFrequencies(
low_freq, high_freq, noct, G=10, fr=1000)["f"]
filter_freqs[len(filter_freqs) - 1, 2] = high_freq
filter_freqs[0, 0] = low_freq
# Smoothed spectrum creation
nb_bands = filter_freqs.shape[0]
smoothed_spectrum = np.zeros((nb_bands))
i = 0
# Each band is considered individually until all of them have been treated
while nb_bands > 0:
# Find the index of the spectral components within the frequency bin
bin_index = np.where(
(freqs_in >= filter_freqs[i, 0]) & (freqs_in <= filter_freqs[i, 2])
)[0]
# If the frequency bin is empty, it is deleted from the list
if len(bin_index) == 0:
smoothed_spectrum = np.delete(smoothed_spectrum, i, axis=0)
filter_freqs = np.delete(filter_freqs, i, axis=0)
nb_bands -= 1
else:
# The spectral components within the frequency bin are averaged on an energy basis
spec_sum = 0
for j in bin_index:
spec_sum += 10 ** (spec[j] / 10)
smoothed_spectrum[i] = 10 * np.log10(spec_sum / len(bin_index))
nb_bands -= 1
i += 1
# Pose of the smoothed spectrum on the frequency-axis
cor = []
low = []
high = []
# Index of the lower, center and higher limit of each frequency bin into the original spectrum
for i in range(len(filter_freqs)):
cor.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 1])))
low.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 0])))
high.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 2])))
smooth_spec = np.zeros((spec.shape))
for i in range(filter_freqs.shape[0]):
smooth_spec[low[i]: high[i]] = smoothed_spectrum[i]
return smooth_spec
| 32.744186 | 98 | 0.638494 |
import numpy as np
from mosqito.sound_level_meter.noct_spectrum._getFrequencies import _getFrequencies
def _spectrum_smoothing(freqs_in, spec, noct, low_freq, high_freq, freqs_out):
filter_freqs = _getFrequencies(
low_freq, high_freq, noct, G=10, fr=1000)["f"]
filter_freqs[len(filter_freqs) - 1, 2] = high_freq
filter_freqs[0, 0] = low_freq
nb_bands = filter_freqs.shape[0]
smoothed_spectrum = np.zeros((nb_bands))
i = 0
while nb_bands > 0:
bin_index = np.where(
(freqs_in >= filter_freqs[i, 0]) & (freqs_in <= filter_freqs[i, 2])
)[0]
if len(bin_index) == 0:
smoothed_spectrum = np.delete(smoothed_spectrum, i, axis=0)
filter_freqs = np.delete(filter_freqs, i, axis=0)
nb_bands -= 1
else:
spec_sum = 0
for j in bin_index:
spec_sum += 10 ** (spec[j] / 10)
smoothed_spectrum[i] = 10 * np.log10(spec_sum / len(bin_index))
nb_bands -= 1
i += 1
cor = []
low = []
high = []
for i in range(len(filter_freqs)):
cor.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 1])))
low.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 0])))
high.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 2])))
smooth_spec = np.zeros((spec.shape))
for i in range(filter_freqs.shape[0]):
smooth_spec[low[i]: high[i]] = smoothed_spectrum[i]
return smooth_spec
| true | true |
f732bc1b7ca91f3afc2f231955f65cca8899c654 | 4,498 | py | Python | predictor.py | maltius/tf_blazeface_training | c4c73590f5084fcac56fa1625d227acf45a918ae | [
"Apache-2.0"
] | null | null | null | predictor.py | maltius/tf_blazeface_training | c4c73590f5084fcac56fa1625d227acf45a918ae | [
"Apache-2.0"
] | null | null | null | predictor.py | maltius/tf_blazeface_training | c4c73590f5084fcac56fa1625d227acf45a918ae | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from utils import bbox_utils, data_utils, drawing_utils, io_utils, train_utils, landmark_utils
import blazeface
args = io_utils.handle_args()
if args.handle_gpu:
io_utils.handle_gpu_compatibility()
batch_size = 1
use_custom_images = False
custom_image_path = "data/images/"
hyper_params = train_utils.get_hyper_params()
img_size = hyper_params["img_size"]
data_types = data_utils.get_data_types()
data_shapes = data_utils.get_data_shapes()
padding_values = data_utils.get_padding_values()
if use_custom_images:
img_paths = data_utils.get_custom_imgs(custom_image_path)
total_items = len(img_paths)
test_data = tf.data.Dataset.from_generator(lambda: data_utils.custom_data_generator(
img_paths, img_size, img_size), data_types, data_shapes)
else:
test_split = "train[80%:]"
test_data, info = data_utils.get_dataset("the300w_lp", test_split)
total_items = data_utils.get_total_item_size(info, test_split)
test_data = test_data.map(lambda x: data_utils.preprocessing(x, img_size, img_size))
# train_split = "train[:80%]"
# val_split = "train[80%:]"
# train_data, info = data_utils.get_dataset("the300w_lp", train_split)
# val_data, _ = data_utils.get_dataset("the300w_lp", val_split)
# train_total_items = data_utils.get_total_item_size(info, train_split)
# val_total_items = data_utils.get_total_item_size(info, val_split)
# #
# img_size = hyper_params["img_size"]
# train_data = train_data.map(lambda x : data_utils.preprocessing(x, img_size, img_size, augmentation.apply))
# val_data = val_data.map(lambda x : data_utils.preprocessing(x, img_size, img_size))
#
test_data=ds_val
test_data = test_data.padded_batch(batch_size, padded_shapes=data_shapes, padding_values=padding_values)
model = blazeface.get_model(hyper_params)
model_path = io_utils.get_model_path()
model.load_weights('D:/Downloads/tf-blazeface-master/trained/blazeface_model_weights_85.h5')
# model.load_weights('C:/Users/altius/Downloads/blazeface80_epochs15_any139.h5')
prior_boxes = bbox_utils.generate_prior_boxes(hyper_params["feature_map_shapes"], hyper_params["aspect_ratios"])
variances = hyper_params["variances"]
total_landmarks = hyper_params["total_landmarks"]
landmark_variances = total_landmarks * variances[0:2]
variances += landmark_variances
for image_data in test_data:
img, lands, coords = image_data
print(img.shape)
pass
# ind=0
# pred_deltas, pred_scores = model.predict_on_batch(img)
# pred_deltas *= variances
# #
# pred_bboxes_and_landmarks = bbox_utils.get_bboxes_and_landmarks_from_deltas(prior_boxes, pred_deltas)
# pred_bboxes_and_landmarks = tf.clip_by_value(pred_bboxes_and_landmarks, 0, 1)
# #
# pred_scores = tf.cast(pred_scores, tf.float32)
# #
# weighted_suppressed_data = bbox_utils.weighted_suppression(pred_scores[ind], pred_bboxes_and_landmarks[ind])
# #
# weighted_bboxes = weighted_suppressed_data[..., 0:4]
# weighted_landmarks = weighted_suppressed_data[..., 4:]
# #
# denormalized_bboxes = bbox_utils.denormalize_bboxes(weighted_bboxes, img_size, img_size)
# weighted_landmarks = tf.reshape(weighted_landmarks, (-1, total_landmarks, 2))
# denormalized_landmarks = landmark_utils.denormalize_landmarks(weighted_landmarks, img_size, img_size)
# drawing_utils.draw_bboxes_with_landmarks(img[ind], denormalized_bboxes, denormalized_landmarks)
ind=0
pred_deltas, pred_scores = model.predict_on_batch(img)
pred_deltas *= variances
#
pred_bboxes_and_landmarks = bbox_utils.get_bboxes_and_landmarks_from_deltas(prior_boxes, pred_deltas)
pred_bboxes_and_landmarks = tf.clip_by_value(pred_bboxes_and_landmarks, 0, 1)
#
pred_scores = tf.cast(pred_scores, tf.float32)
#
weighted_suppressed_data = bbox_utils.weighted_suppression(pred_scores[ind]*10, pred_bboxes_and_landmarks[ind])
#
weighted_bboxes = weighted_suppressed_data[..., 0:4]
weighted_landmarks = weighted_suppressed_data[..., 4:]
#
denormalized_bboxes = bbox_utils.denormalize_bboxes(weighted_bboxes, img_size, img_size)
weighted_landmarks = tf.reshape(weighted_landmarks, (-1, total_landmarks, 2))
denormalized_landmarks = landmark_utils.denormalize_landmarks(weighted_landmarks, img_size, img_size)
drawing_utils.draw_bboxes_with_landmarks(img[ind], denormalized_bboxes, denormalized_landmarks)
# for item in weighted_landmarks:
# print(item) | 42.037383 | 115 | 0.769898 | import tensorflow as tf
from utils import bbox_utils, data_utils, drawing_utils, io_utils, train_utils, landmark_utils
import blazeface
args = io_utils.handle_args()
if args.handle_gpu:
io_utils.handle_gpu_compatibility()
batch_size = 1
use_custom_images = False
custom_image_path = "data/images/"
hyper_params = train_utils.get_hyper_params()
img_size = hyper_params["img_size"]
data_types = data_utils.get_data_types()
data_shapes = data_utils.get_data_shapes()
padding_values = data_utils.get_padding_values()
if use_custom_images:
img_paths = data_utils.get_custom_imgs(custom_image_path)
total_items = len(img_paths)
test_data = tf.data.Dataset.from_generator(lambda: data_utils.custom_data_generator(
img_paths, img_size, img_size), data_types, data_shapes)
else:
test_split = "train[80%:]"
test_data, info = data_utils.get_dataset("the300w_lp", test_split)
total_items = data_utils.get_total_item_size(info, test_split)
test_data = test_data.map(lambda x: data_utils.preprocessing(x, img_size, img_size))
test_data=ds_val
test_data = test_data.padded_batch(batch_size, padded_shapes=data_shapes, padding_values=padding_values)
model = blazeface.get_model(hyper_params)
model_path = io_utils.get_model_path()
model.load_weights('D:/Downloads/tf-blazeface-master/trained/blazeface_model_weights_85.h5')
prior_boxes = bbox_utils.generate_prior_boxes(hyper_params["feature_map_shapes"], hyper_params["aspect_ratios"])
variances = hyper_params["variances"]
total_landmarks = hyper_params["total_landmarks"]
landmark_variances = total_landmarks * variances[0:2]
variances += landmark_variances
for image_data in test_data:
img, lands, coords = image_data
print(img.shape)
pass
ind=0
pred_deltas, pred_scores = model.predict_on_batch(img)
pred_deltas *= variances
pred_bboxes_and_landmarks = bbox_utils.get_bboxes_and_landmarks_from_deltas(prior_boxes, pred_deltas)
pred_bboxes_and_landmarks = tf.clip_by_value(pred_bboxes_and_landmarks, 0, 1)
pred_scores = tf.cast(pred_scores, tf.float32)
weighted_suppressed_data = bbox_utils.weighted_suppression(pred_scores[ind]*10, pred_bboxes_and_landmarks[ind])
weighted_bboxes = weighted_suppressed_data[..., 0:4]
weighted_landmarks = weighted_suppressed_data[..., 4:]
denormalized_bboxes = bbox_utils.denormalize_bboxes(weighted_bboxes, img_size, img_size)
weighted_landmarks = tf.reshape(weighted_landmarks, (-1, total_landmarks, 2))
denormalized_landmarks = landmark_utils.denormalize_landmarks(weighted_landmarks, img_size, img_size)
drawing_utils.draw_bboxes_with_landmarks(img[ind], denormalized_bboxes, denormalized_landmarks)
| true | true |
f732bcc0d4b85468209c6e2f8da783ae8aaaf116 | 33,473 | py | Python | glycan_profiling/piped_deconvolve.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | null | null | null | glycan_profiling/piped_deconvolve.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | null | null | null | glycan_profiling/piped_deconvolve.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | null | null | null | '''Implements a multiprocessing deconvolution algorithm
'''
import os
import multiprocessing
from collections import deque
import ms_peak_picker
import ms_deisotope
import traceback
from ms_deisotope.processor import (
ScanProcessor, MSFileLoader,
NoIsotopicClustersError, EmptyScanError)
from ms_deisotope.feature_map.quick_index import index as build_scan_index
from ms_deisotope.data_source.common import ProcessedScan
import logging
from glycan_profiling.task import (
TaskBase,
log_handle,
CallInterval)
from glycan_profiling.config import get_configuration
from multiprocessing import Process, JoinableQueue
try:
from Queue import Empty as QueueEmpty
except ImportError:
from queue import Empty as QueueEmpty
logger = logging.getLogger("glycan_profiler.preprocessor")
DONE = b"--NO-MORE--"
SCAN_STATUS_GOOD = b"good"
SCAN_STATUS_SKIP = b"skip"
user_config = get_configuration()
huge_tree = user_config.get("xml_huge_tree", False)
savgol = ms_peak_picker.scan_filter.SavitskyGolayFilter()
denoise = ms_peak_picker.scan_filter.FTICRBaselineRemoval(window_length=2.)
class ScanIDYieldingProcess(Process):
def __init__(self, ms_file_path, queue, start_scan=None, max_scans=None, end_scan=None,
no_more_event=None, ignore_tandem_scans=False, batch_size=1):
Process.__init__(self)
self.daemon = True
self.ms_file_path = ms_file_path
self.queue = queue
self.loader = None
self.start_scan = start_scan
self.max_scans = max_scans
self.end_scan = end_scan
self.ignore_tandem_scans = ignore_tandem_scans
self.batch_size = batch_size
self.no_more_event = no_more_event
def _make_scan_batch(self):
batch = []
scan_ids = []
for _i in range(self.batch_size):
try:
bunch = next(self.loader)
scan, products = bunch
if scan is not None:
scan_id = scan.id
else:
scan_id = None
product_scan_ids = [p.id for p in products]
except StopIteration:
break
except Exception as e:
log_handle.error("An error occurred in _make_scan_batch", e)
break
if not self.ignore_tandem_scans:
batch.append((scan_id, product_scan_ids, True))
else:
batch.append((scan_id, product_scan_ids, False))
scan_ids.append(scan_id)
return batch, scan_ids
def run(self):
self.loader = MSFileLoader(
self.ms_file_path, huge_tree=huge_tree, decode_binary=False)
if self.start_scan is not None:
try:
self.loader.start_from_scan(
self.start_scan, require_ms1=self.loader.has_ms1_scans(), grouped=True)
except IndexError as e:
log_handle.error("An error occurred while locating start scan", e)
self.loader.reset()
self.loader.make_iterator(grouped=True)
except AttributeError:
log_handle.error("The reader does not support random access, start time will be ignored", e)
self.loader.reset()
self.loader.make_iterator(grouped=True)
else:
self.loader.make_iterator(grouped=True)
count = 0
last = 0
if self.max_scans is None:
max_scans = float('inf')
else:
max_scans = self.max_scans
end_scan = self.end_scan
while count < max_scans:
try:
batch, ids = self._make_scan_batch()
if len(batch) > 0:
self.queue.put(batch)
count += len(ids)
if (count - last) > 1000:
last = count
self.queue.join()
if (end_scan in ids and end_scan is not None) or len(ids) == 0:
log_handle.log("End Scan Found")
break
except StopIteration:
break
except Exception as e:
log_handle.error("An error occurred while fetching scans", e)
break
if self.no_more_event is not None:
self.no_more_event.set()
log_handle.log("All Scan IDs have been dealt. %d scan bunches." % (count,))
else:
self.queue.put(DONE)
class ScanBunchLoader(object):
def __init__(self, mzml_loader):
self.loader = mzml_loader
self.queue = deque()
def put(self, scan_id, product_scan_ids):
self.queue.append((scan_id, product_scan_ids))
def get(self):
scan_id, product_scan_ids = self.queue.popleft()
if scan_id is not None:
precursor = self.loader.get_scan_by_id(scan_id)
else:
precursor = None
products = [self.loader.get_scan_by_id(
pid) for pid in product_scan_ids if pid is not None]
if precursor:
precursor.product_scans = products
return (precursor, products)
class ScanTransformMixin(object):
def log_error(self, error, scan_id, scan, product_scan_ids):
tb = traceback.format_exc()
self.log_handler(
"An %r occurred for %s (index %r) in Process %r\n%s" % (
error, scan_id, scan.index, multiprocessing.current_process(),
tb))
def _init_batch_store(self):
self._batch_store = deque()
def get_work(self, block=True, timeout=30):
if self._batch_store:
return self._batch_store.popleft()
else:
batch = self.input_queue.get(block, timeout)
self._batch_store.extend(batch)
result = self._batch_store.popleft()
return result
def log_message(self, message):
self.log_handler(message + ", %r" %
(multiprocessing.current_process()))
def skip_entry(self, index, ms_level):
self.output_queue.put((SCAN_STATUS_SKIP, index, ms_level))
def skip_scan(self, scan):
self.output_queue.put((SCAN_STATUS_SKIP, scan.index, scan.ms_level))
def send_scan(self, scan):
scan = scan.pack()
# this attribute is not needed, and for MS1 scans is dangerous
# to pickle.
# It can pull other scans which may not yet have been packed
# into the message sent back to the main process which in
# turn can form a reference cycle and eat a lot of memory
scan.product_scans = []
self.output_queue.put((scan, scan.index, scan.ms_level))
def all_work_done(self):
return self._work_complete.is_set()
def make_scan_transformer(self, loader=None):
raise NotImplementedError()
class ScanTransformingProcess(Process, ScanTransformMixin):
"""ScanTransformingProcess describes a child process that consumes scan id bunches
from a shared input queue, retrieves the relevant scans, and preprocesses them using an
instance of :class:`ms_deisotope.processor.ScanProcessor`, sending the reduced result
to a shared output queue.
Attributes
----------
input_queue : multiprocessing.JoinableQueue
A shared input queue which contains payloads of bunches of
scan ids
ms1_deconvolution_args : dict
Parameters passed to :class:`ms_deisotope.processor.ScanProcessor`
ms1_peak_picking_args : dict
Parameters passed to :class:`ms_deisotope.processor.ScanProcessor`
msn_deconvolution_args : dict
Parameters passed to :class:`ms_deisotope.processor.ScanProcessor`
msn_peak_picking_args : dict
Parameters passed to :class:`ms_deisotope.processor.ScanProcessor`
mzml_path : str
Path to the spectral data file on disk
no_more_event : multiprocessing.Event
An event which will be set when the process feeding the input
queue has run out of items to add, indicating that any QueueEmptyException
should be treated as a signal to finish rather than to wait for
new input
output_queue : multiprocessing.JoinableQueue
A shared output queue which this object will put
:class:`ms_deisotope.data_source.common.ProcessedScan` bunches onto.
"""
def __init__(self, mzml_path, input_queue, output_queue,
no_more_event=None, ms1_peak_picking_args=None,
msn_peak_picking_args=None,
ms1_deconvolution_args=None, msn_deconvolution_args=None,
envelope_selector=None, ms1_averaging=0, log_handler=None,
deconvolute=True, verbose=False):
if log_handler is None:
def print_message(msg):
print(msg)
log_handler = print_message
if ms1_peak_picking_args is None:
ms1_peak_picking_args = {
"transforms": [denoise, savgol],
"start_mz": 250
}
if msn_peak_picking_args is None:
msn_peak_picking_args = {
"transforms": []
}
if ms1_deconvolution_args is None:
ms1_deconvolution_args = {
"scorer": ms_deisotope.scoring.PenalizedMSDeconVFitter(35., 2),
"charge_range": (1, 8),
"averagine": ms_deisotope.glycopeptide
}
if msn_deconvolution_args is None:
msn_deconvolution_args = {
"scorer": ms_deisotope.scoring.MSDeconVFitter(10.),
"charge_range": (1, 8),
"averagine": ms_deisotope.glycopeptide
}
Process.__init__(self)
self.verbose = verbose
self._init_batch_store()
self.daemon = True
self.mzml_path = mzml_path
self.input_queue = input_queue
self.output_queue = output_queue
self.ms1_peak_picking_args = ms1_peak_picking_args
self.msn_peak_picking_args = msn_peak_picking_args
self.ms1_deconvolution_args = ms1_deconvolution_args
self.msn_deconvolution_args = msn_deconvolution_args
self.envelope_selector = envelope_selector
self.ms1_averaging = ms1_averaging
self.deconvolute = deconvolute
self.transformer = None
self.no_more_event = no_more_event
self._work_complete = multiprocessing.Event()
self.log_handler = log_handler
def make_scan_transformer(self, loader=None):
transformer = ScanProcessor(
loader,
ms1_peak_picking_args=self.ms1_peak_picking_args,
msn_peak_picking_args=self.msn_peak_picking_args,
ms1_deconvolution_args=self.ms1_deconvolution_args,
msn_deconvolution_args=self.msn_deconvolution_args,
loader_type=lambda x: x,
envelope_selector=self.envelope_selector,
ms1_averaging=self.ms1_averaging)
return transformer
def handle_scan_bunch(self, scan, product_scans, scan_id, product_scan_ids, process_msn=True):
transformer = self.transformer
# handle the MS1 scan if it is present
if scan is not None:
if len(scan.arrays[0]) == 0:
self.skip_scan(scan)
else:
try:
scan, priorities, product_scans = transformer.process_scan_group(
scan, product_scans)
if scan is None:
# no way to report skip
pass
else:
if self.verbose:
self.log_message("Handling Precursor Scan %r with %d peaks" % (scan.id, len(scan.peak_set)))
if self.deconvolute:
transformer.deconvolute_precursor_scan(scan, priorities, product_scans)
self.send_scan(scan)
except NoIsotopicClustersError as e:
self.log_message("No isotopic clusters were extracted from scan %s (%r)" % (
e.scan_id, len(scan.peak_set)))
self.skip_scan(scan)
except EmptyScanError as e:
self.skip_scan(scan)
except Exception as e:
self.skip_scan(scan)
self.log_error(e, scan_id, scan, (product_scan_ids))
for product_scan in product_scans:
# no way to report skip
if product_scan is None:
continue
if len(product_scan.arrays[0]) == 0 or (not process_msn):
self.skip_scan(product_scan)
continue
try:
transformer.pick_product_scan_peaks(product_scan)
if self.verbose:
self.log_message("Handling Product Scan %r with %d peaks (%0.3f/%0.3f, %r)" % (
product_scan.id, len(product_scan.peak_set), product_scan.precursor_information.mz,
product_scan.precursor_information.extracted_mz,
product_scan.precursor_information.defaulted))
if self.deconvolute:
transformer.deconvolute_product_scan(product_scan)
if scan is None:
product_scan.precursor_information.default(orphan=True)
self.send_scan(product_scan)
except NoIsotopicClustersError as e:
self.log_message("No isotopic clusters were extracted from scan %s (%r)" % (
e.scan_id, len(product_scan.peak_set)))
self.skip_scan(product_scan)
except EmptyScanError as e:
self.skip_scan(product_scan)
except Exception as e:
self.skip_scan(product_scan)
self.log_error(e, product_scan.id,
product_scan, (product_scan_ids))
def run(self):
loader = MSFileLoader(
self.mzml_path, huge_tree=huge_tree, decode_binary=False)
queued_loader = ScanBunchLoader(loader)
has_input = True
transformer = self.make_scan_transformer(loader)
self.transformer = transformer
nologs = ["deconvolution_scan_processor"]
if not self.deconvolute:
nologs.append("deconvolution")
debug_mode = os.getenv("GLYCRESOFTDEBUG")
if debug_mode:
handler = logging.FileHandler("piped-deconvolution-debug-%s.log" % (os.getpid()), 'w')
fmt = logging.Formatter(
"%(asctime)s - %(name)s:%(filename)s:%(lineno)-4d - %(levelname)s - %(message)s",
"%H:%M:%S")
handler.setFormatter(fmt)
for logname in nologs:
logger_to_silence = logging.getLogger(logname)
if debug_mode:
logger_to_silence.setLevel("DEBUG")
logger_to_silence.addHandler(handler)
else:
logger_to_silence.propagate = False
logger_to_silence.setLevel("CRITICAL")
logger_to_silence.addHandler(logging.NullHandler())
i = 0
last = 0
while has_input:
try:
scan_id, product_scan_ids, process_msn = self.get_work(True, 10)
self.input_queue.task_done()
except QueueEmpty:
if self.no_more_event is not None and self.no_more_event.is_set():
has_input = False
continue
i += 1 + len(product_scan_ids)
if scan_id == DONE:
has_input = False
break
try:
queued_loader.put(scan_id, product_scan_ids)
scan, product_scans = queued_loader.get()
except Exception as e:
self.log_message("Something went wrong when loading bunch (%s): %r.\nRecovery is not possible." % (
(scan_id, product_scan_ids), e))
self.handle_scan_bunch(scan, product_scans, scan_id, product_scan_ids, process_msn)
if (i - last) > 1000:
last = i
self.output_queue.join()
self.log_message("Done (%d scans)" % i)
if self.no_more_event is None:
self.output_queue.put((DONE, DONE, DONE))
self._work_complete.set()
class ScanCollator(TaskBase):
"""Collates incoming scan bunches from multiple
ScanTransformingProcesses, passing them along in
the correct order.
Attributes
----------
count_jobs_done : int
The number of scan bunches taken from :attr:`queue`
count_since_last : int
The number of work-cycles since the last scan bunch
has been yielded
done_event : multiprocessing.Event
An IPC Event to indicate that all scan ids have been
sent to the worker processes
helper_producers : list
A list of ScanTransformingProcesses
include_fitted : bool
Whether or not to save the raw fitted peaks for each
scan produced. When this is `False`, they will be
discarded and memory will be saved
last_index : int
The index of the last scan yielded through the iterator
loop. This controls the next scan to be yielded and any
waiting conditions
primary_worker : ScanTransformingProcess
The first worker to start consuming scans which will dictate
the first handled index. Is required to run in isolation
from other worker processes to insure that the first scan
arrives in order
queue : multiprocessing.Queue
The IPC queue that all workers place their results on
to be consumed and yielded in order
started_helpers : bool
Whether or not the additional workers in :attr:`helper_producers`
have been started or not
waiting : dict
A mapping from scan index to `Scan` object. Used to serve
scans through the iterator when their index is called for
"""
_log_received_scans = False
def __init__(self, queue, done_event, helper_producers=None, primary_worker=None,
include_fitted=False, input_queue=None):
if helper_producers is None:
helper_producers = []
self.queue = queue
self.last_index = None
self.count_jobs_done = 0
self.count_since_last = 0
self.waiting = {}
self.done_event = done_event
self.helper_producers = helper_producers
self.started_helpers = False
self.primary_worker = primary_worker
self.include_fitted = include_fitted
self.input_queue = input_queue
def all_workers_done(self):
if self.done_event.is_set():
if self.primary_worker.all_work_done():
for helper in self.helper_producers:
if not helper.all_work_done():
return False
return True
else:
return False
return False
def store_item(self, item, index):
"""Stores an incoming work-item for easy
access by its `index` value. If configuration
requires it, this will also reduce the number
of peaks in `item`.
Parameters
----------
item : str or ProcessedScan
Either a stub indicating why this work item
is not
index : int
Scan index to store
"""
if self._log_received_scans:
self.log("-- received %d: %s" % (index, item))
self.waiting[index] = item
if not self.include_fitted and isinstance(item, ProcessedScan):
item.peak_set = []
def consume(self, timeout=10):
"""Fetches the next work item from the input
queue :attr:`queue`, blocking for at most `timeout` seconds.
Parameters
----------
timeout : int, optional
The duration to allow the process to block
for while awaiting new work items.
Returns
-------
bool
Whether or not a new work item was found waiting
on the :attr:`queue`
"""
blocking = timeout != 0
try:
item, index, _ms_level = self.queue.get(blocking, timeout)
self.queue.task_done()
# DONE message may be sent many times.
while item == DONE:
item, index, _ms_level = self.queue.get(blocking, timeout)
self.queue.task_done()
self.store_item(item, index)
return True
except QueueEmpty:
return False
def start_helper_producers(self):
"""Starts the additional :class:`ScanTransformingProcess` workers
in :attr:`helper_producers` if they have not been started already.
Should only be invoked once
"""
if self.started_helpers:
return
self.started_helpers = True
for helper in self.helper_producers:
if helper.is_alive():
continue
helper.start()
def produce(self, scan):
"""Performs any final quality controls on the outgoing
:class:`ProcessedScan` object and takes care of any internal
details.
Resets :attr:`count_since_last` to `0`.
Parameters
----------
scan : ProcessedScan
The scan object being finalized for hand-off
to client code
Returns
-------
ProcessedScan
The version of `scan` ready to be used by other
parts of the program
"""
self.count_since_last = 0
return scan
def count_pending_items(self):
return len(self.waiting)
def drain_queue(self):
i = 0
has_next = self.last_index + 1 not in self.waiting
while (self.count_pending_items() < (1000 if has_next else 10)
and self.consume(.1)):
self.count_jobs_done += 1
has_next = self.last_index + 1 not in self.waiting
i += 1
if i > 15:
self.log("Drained Output Queue of %d Items" % (i, ))
return i
def print_state(self):
try:
if self.queue.qsize() > 0:
self.log("%d since last work item" % (self.count_since_last,))
keys = sorted(self.waiting.keys())
if len(keys) > 5:
self.log("Waiting Keys: %r..." % (keys[:5],))
else:
self.log("Waiting Keys: %r" % (keys,))
self.log("%d Keys Total" % (len(self.waiting),))
self.log("The last index handled: %r" % (self.last_index,))
self.log("Number of items waiting in the queue: %d" %
(self.queue.qsize(),))
except NotImplementedError:
# Some platforms do not support qsize
pass
for worker in ([self.primary_worker] + list(self.helper_producers)):
code = worker.exitcode
if code is not None and code != 0:
self.log("%r has exit code %r" % (worker, code))
worker.join(5)
def __iter__(self):
has_more = True
# Log the state of the collator every 3 minutes
status_monitor = CallInterval(60 * 3, self.print_state)
status_monitor.start()
while has_more:
if self.consume(1):
self.count_jobs_done += 1
try:
if self.queue.qsize() > 500:
self.drain_queue()
except NotImplementedError:
# Some platforms do not support qsize. On these, always drain the queue.
self.drain_queue()
if self.last_index is None:
keys = sorted(self.waiting)
if keys:
i = 0
n = len(keys)
found_content = False
while i < n:
scan = self.waiting.pop(keys[i])
if scan == SCAN_STATUS_SKIP:
self.last_index = keys[i]
i += 1
continue
else:
found_content = True
break
if found_content:
self.last_index = scan.index
yield self.produce(scan)
if self.last_index is not None:
self.start_helper_producers()
elif self.last_index + 1 in self.waiting:
while self.last_index + 1 in self.waiting:
scan = self.waiting.pop(self.last_index + 1)
if scan == SCAN_STATUS_SKIP:
self.last_index += 1
continue
else:
self.last_index = scan.index
yield self.produce(scan)
elif len(self.waiting) == 0:
if self.all_workers_done():
self.log("All Workers Claim Done.")
has_something = self.consume()
self.log("Checked Queue For Work: %r" % has_something)
if not has_something and len(self.waiting) == 0 and self.queue.empty():
has_more = False
else:
self.count_since_last += 1
if self.count_since_last % 1000 == 0:
self.print_state()
status_monitor.stop()
class ScanGeneratorBase(object):
def configure_iteration(self, start_scan=None, end_scan=None, max_scans=None):
raise NotImplementedError()
def make_iterator(self, start_scan=None, end_scan=None, max_scans=None):
raise NotImplementedError()
def __iter__(self):
return self
def __next__(self):
if self._iterator is None: # pylint: disable=access-member-before-definition
self._iterator = self.make_iterator()
return next(self._iterator)
def next(self):
return self.__next__()
def close(self):
pass
@property
def scan_source(self):
return None
_deconvoluting = False
@property
def deconvoluting(self):
return self._deconvoluting
@deconvoluting.setter
def deconvoluting(self, value):
self._deconvoluting = value
_ms1_averaging = 0
@property
def ms1_averaging(self):
return self._ms1_averaging
@ms1_averaging.setter
def ms1_averaging(self, value):
self._ms1_averaging = value
_ignore_tandem_scans = False
@property
def ignore_tandem_scans(self):
return self._ignore_tandem_scans
@ignore_tandem_scans.setter
def ignore_tandem_scans(self, value):
self._ignore_tandem_scans = value
_extract_only_tandem_envelopes = False
@property
def extract_only_tandem_envelopes(self):
return self._extract_only_tandem_envelopes
@extract_only_tandem_envelopes.setter
def extract_only_tandem_envelopes(self, value):
self._extract_only_tandem_envelopes = value
class ScanGenerator(TaskBase, ScanGeneratorBase):
def __init__(self, ms_file, number_of_helpers=4,
ms1_peak_picking_args=None, msn_peak_picking_args=None,
ms1_deconvolution_args=None, msn_deconvolution_args=None,
extract_only_tandem_envelopes=False, ignore_tandem_scans=False,
ms1_averaging=0, deconvolute=True):
self.ms_file = ms_file
self.time_cache = {}
self.ignore_tandem_scans = ignore_tandem_scans
self.scan_ids_exhausted_event = multiprocessing.Event()
self._iterator = None
self._scan_yielder_process = None
self._deconv_process = None
self._input_queue = None
self._output_queue = None
self._deconv_helpers = None
self._order_manager = None
self.number_of_helpers = number_of_helpers
self.ms1_peak_picking_args = ms1_peak_picking_args
self.msn_peak_picking_args = msn_peak_picking_args
self.ms1_averaging = ms1_averaging
self.deconvoluting = deconvolute
self.ms1_deconvolution_args = ms1_deconvolution_args
self.msn_deconvolution_args = msn_deconvolution_args
self.extract_only_tandem_envelopes = extract_only_tandem_envelopes
self._scan_interval_tree = None
self.log_controller = self.ipc_logger()
@property
def scan_source(self):
return self.ms_file
def join(self):
if self._scan_yielder_process is not None:
self._scan_yielder_process.join()
if self._deconv_process is not None:
self._deconv_process.join()
if self._deconv_helpers is not None:
for helper in self._deconv_helpers:
helper.join()
def _terminate(self):
if self._scan_yielder_process is not None:
self._scan_yielder_process.terminate()
if self._deconv_process is not None:
self._deconv_process.terminate()
if self._deconv_helpers is not None:
for helper in self._deconv_helpers:
helper.terminate()
def _preindex_file(self):
reader = MSFileLoader(self.ms_file, use_index=False, huge_tree=huge_tree)
try:
reader.prebuild_byte_offset_file(self.ms_file)
except AttributeError:
# the type does not support this type of indexing
pass
except IOError:
# the file could not be written
pass
except Exception as e:
# something else went wrong
self.error("An error occurred while pre-indexing.", e)
def _make_interval_tree(self, start_scan, end_scan):
reader = MSFileLoader(self.ms_file, decode_binary=False)
if start_scan is not None:
start_ix = reader.get_scan_by_id(start_scan).index
else:
start_ix = 0
if end_scan is not None:
end_ix = reader.get_scan_by_id(end_scan).index
else:
end_ix = len(reader)
reader.reset()
_index, interval_tree = build_scan_index(
reader, self.number_of_helpers + 1, (start_ix, end_ix))
self._scan_interval_tree = interval_tree
self.log("RT Tree: %r" % (self._scan_interval_tree.rt_tree))
def _make_transforming_process(self):
return ScanTransformingProcess(
self.ms_file,
self._input_queue,
self._output_queue,
self.scan_ids_exhausted_event,
ms1_peak_picking_args=self.ms1_peak_picking_args,
msn_peak_picking_args=self.msn_peak_picking_args,
ms1_deconvolution_args=self.ms1_deconvolution_args,
msn_deconvolution_args=self.msn_deconvolution_args,
envelope_selector=self._scan_interval_tree,
log_handler=self.log_controller.sender(),
ms1_averaging=self.ms1_averaging,
deconvolute=self.deconvoluting)
def _make_collator(self):
return ScanCollator(
self._output_queue, self.scan_ids_exhausted_event, self._deconv_helpers,
self._deconv_process, input_queue=self._input_queue,
include_fitted=not self.deconvoluting)
def _initialize_workers(self, start_scan=None, end_scan=None, max_scans=None):
try:
self._input_queue = JoinableQueue(int(1e6))
self._output_queue = JoinableQueue(int(1e6))
except OSError:
# Not all platforms permit limiting the size of queues
self._input_queue = JoinableQueue()
self._output_queue = JoinableQueue()
self._preindex_file()
if self.extract_only_tandem_envelopes:
self.log("Constructing Scan Interval Tree")
self._make_interval_tree(start_scan, end_scan)
self._terminate()
self._scan_yielder_process = ScanIDYieldingProcess(
self.ms_file, self._input_queue, start_scan=start_scan, end_scan=end_scan,
max_scans=max_scans, no_more_event=self.scan_ids_exhausted_event,
ignore_tandem_scans=self.ignore_tandem_scans, batch_size=1)
self._scan_yielder_process.start()
self._deconv_process = self._make_transforming_process()
self._deconv_helpers = []
for _i in range(self.number_of_helpers):
self._deconv_helpers.append(self._make_transforming_process())
self._deconv_process.start()
self._order_manager = self._make_collator()
def make_iterator(self, start_scan=None, end_scan=None, max_scans=None):
self._initialize_workers(start_scan, end_scan, max_scans)
for scan in self._order_manager:
self.time_cache[scan.id] = scan.scan_time
yield scan
self.log_controller.stop()
self.join()
self._terminate()
def configure_iteration(self, start_scan=None, end_scan=None, max_scans=None):
self._iterator = self.make_iterator(start_scan, end_scan, max_scans)
def convert_scan_id_to_retention_time(self, scan_id):
return self.time_cache[scan_id]
def close(self):
self._terminate()
| 36.662651 | 120 | 0.603561 |
import os
import multiprocessing
from collections import deque
import ms_peak_picker
import ms_deisotope
import traceback
from ms_deisotope.processor import (
ScanProcessor, MSFileLoader,
NoIsotopicClustersError, EmptyScanError)
from ms_deisotope.feature_map.quick_index import index as build_scan_index
from ms_deisotope.data_source.common import ProcessedScan
import logging
from glycan_profiling.task import (
TaskBase,
log_handle,
CallInterval)
from glycan_profiling.config import get_configuration
from multiprocessing import Process, JoinableQueue
try:
from Queue import Empty as QueueEmpty
except ImportError:
from queue import Empty as QueueEmpty
logger = logging.getLogger("glycan_profiler.preprocessor")
DONE = b"--NO-MORE--"
SCAN_STATUS_GOOD = b"good"
SCAN_STATUS_SKIP = b"skip"
user_config = get_configuration()
huge_tree = user_config.get("xml_huge_tree", False)
savgol = ms_peak_picker.scan_filter.SavitskyGolayFilter()
denoise = ms_peak_picker.scan_filter.FTICRBaselineRemoval(window_length=2.)
class ScanIDYieldingProcess(Process):
def __init__(self, ms_file_path, queue, start_scan=None, max_scans=None, end_scan=None,
no_more_event=None, ignore_tandem_scans=False, batch_size=1):
Process.__init__(self)
self.daemon = True
self.ms_file_path = ms_file_path
self.queue = queue
self.loader = None
self.start_scan = start_scan
self.max_scans = max_scans
self.end_scan = end_scan
self.ignore_tandem_scans = ignore_tandem_scans
self.batch_size = batch_size
self.no_more_event = no_more_event
def _make_scan_batch(self):
batch = []
scan_ids = []
for _i in range(self.batch_size):
try:
bunch = next(self.loader)
scan, products = bunch
if scan is not None:
scan_id = scan.id
else:
scan_id = None
product_scan_ids = [p.id for p in products]
except StopIteration:
break
except Exception as e:
log_handle.error("An error occurred in _make_scan_batch", e)
break
if not self.ignore_tandem_scans:
batch.append((scan_id, product_scan_ids, True))
else:
batch.append((scan_id, product_scan_ids, False))
scan_ids.append(scan_id)
return batch, scan_ids
def run(self):
self.loader = MSFileLoader(
self.ms_file_path, huge_tree=huge_tree, decode_binary=False)
if self.start_scan is not None:
try:
self.loader.start_from_scan(
self.start_scan, require_ms1=self.loader.has_ms1_scans(), grouped=True)
except IndexError as e:
log_handle.error("An error occurred while locating start scan", e)
self.loader.reset()
self.loader.make_iterator(grouped=True)
except AttributeError:
log_handle.error("The reader does not support random access, start time will be ignored", e)
self.loader.reset()
self.loader.make_iterator(grouped=True)
else:
self.loader.make_iterator(grouped=True)
count = 0
last = 0
if self.max_scans is None:
max_scans = float('inf')
else:
max_scans = self.max_scans
end_scan = self.end_scan
while count < max_scans:
try:
batch, ids = self._make_scan_batch()
if len(batch) > 0:
self.queue.put(batch)
count += len(ids)
if (count - last) > 1000:
last = count
self.queue.join()
if (end_scan in ids and end_scan is not None) or len(ids) == 0:
log_handle.log("End Scan Found")
break
except StopIteration:
break
except Exception as e:
log_handle.error("An error occurred while fetching scans", e)
break
if self.no_more_event is not None:
self.no_more_event.set()
log_handle.log("All Scan IDs have been dealt. %d scan bunches." % (count,))
else:
self.queue.put(DONE)
class ScanBunchLoader(object):
def __init__(self, mzml_loader):
self.loader = mzml_loader
self.queue = deque()
def put(self, scan_id, product_scan_ids):
self.queue.append((scan_id, product_scan_ids))
def get(self):
scan_id, product_scan_ids = self.queue.popleft()
if scan_id is not None:
precursor = self.loader.get_scan_by_id(scan_id)
else:
precursor = None
products = [self.loader.get_scan_by_id(
pid) for pid in product_scan_ids if pid is not None]
if precursor:
precursor.product_scans = products
return (precursor, products)
class ScanTransformMixin(object):
def log_error(self, error, scan_id, scan, product_scan_ids):
tb = traceback.format_exc()
self.log_handler(
"An %r occurred for %s (index %r) in Process %r\n%s" % (
error, scan_id, scan.index, multiprocessing.current_process(),
tb))
def _init_batch_store(self):
self._batch_store = deque()
def get_work(self, block=True, timeout=30):
if self._batch_store:
return self._batch_store.popleft()
else:
batch = self.input_queue.get(block, timeout)
self._batch_store.extend(batch)
result = self._batch_store.popleft()
return result
def log_message(self, message):
self.log_handler(message + ", %r" %
(multiprocessing.current_process()))
def skip_entry(self, index, ms_level):
self.output_queue.put((SCAN_STATUS_SKIP, index, ms_level))
def skip_scan(self, scan):
self.output_queue.put((SCAN_STATUS_SKIP, scan.index, scan.ms_level))
def send_scan(self, scan):
scan = scan.pack()
scan.product_scans = []
self.output_queue.put((scan, scan.index, scan.ms_level))
def all_work_done(self):
return self._work_complete.is_set()
def make_scan_transformer(self, loader=None):
raise NotImplementedError()
class ScanTransformingProcess(Process, ScanTransformMixin):
def __init__(self, mzml_path, input_queue, output_queue,
no_more_event=None, ms1_peak_picking_args=None,
msn_peak_picking_args=None,
ms1_deconvolution_args=None, msn_deconvolution_args=None,
envelope_selector=None, ms1_averaging=0, log_handler=None,
deconvolute=True, verbose=False):
if log_handler is None:
def print_message(msg):
print(msg)
log_handler = print_message
if ms1_peak_picking_args is None:
ms1_peak_picking_args = {
"transforms": [denoise, savgol],
"start_mz": 250
}
if msn_peak_picking_args is None:
msn_peak_picking_args = {
"transforms": []
}
if ms1_deconvolution_args is None:
ms1_deconvolution_args = {
"scorer": ms_deisotope.scoring.PenalizedMSDeconVFitter(35., 2),
"charge_range": (1, 8),
"averagine": ms_deisotope.glycopeptide
}
if msn_deconvolution_args is None:
msn_deconvolution_args = {
"scorer": ms_deisotope.scoring.MSDeconVFitter(10.),
"charge_range": (1, 8),
"averagine": ms_deisotope.glycopeptide
}
Process.__init__(self)
self.verbose = verbose
self._init_batch_store()
self.daemon = True
self.mzml_path = mzml_path
self.input_queue = input_queue
self.output_queue = output_queue
self.ms1_peak_picking_args = ms1_peak_picking_args
self.msn_peak_picking_args = msn_peak_picking_args
self.ms1_deconvolution_args = ms1_deconvolution_args
self.msn_deconvolution_args = msn_deconvolution_args
self.envelope_selector = envelope_selector
self.ms1_averaging = ms1_averaging
self.deconvolute = deconvolute
self.transformer = None
self.no_more_event = no_more_event
self._work_complete = multiprocessing.Event()
self.log_handler = log_handler
def make_scan_transformer(self, loader=None):
transformer = ScanProcessor(
loader,
ms1_peak_picking_args=self.ms1_peak_picking_args,
msn_peak_picking_args=self.msn_peak_picking_args,
ms1_deconvolution_args=self.ms1_deconvolution_args,
msn_deconvolution_args=self.msn_deconvolution_args,
loader_type=lambda x: x,
envelope_selector=self.envelope_selector,
ms1_averaging=self.ms1_averaging)
return transformer
def handle_scan_bunch(self, scan, product_scans, scan_id, product_scan_ids, process_msn=True):
transformer = self.transformer
if scan is not None:
if len(scan.arrays[0]) == 0:
self.skip_scan(scan)
else:
try:
scan, priorities, product_scans = transformer.process_scan_group(
scan, product_scans)
if scan is None:
pass
else:
if self.verbose:
self.log_message("Handling Precursor Scan %r with %d peaks" % (scan.id, len(scan.peak_set)))
if self.deconvolute:
transformer.deconvolute_precursor_scan(scan, priorities, product_scans)
self.send_scan(scan)
except NoIsotopicClustersError as e:
self.log_message("No isotopic clusters were extracted from scan %s (%r)" % (
e.scan_id, len(scan.peak_set)))
self.skip_scan(scan)
except EmptyScanError as e:
self.skip_scan(scan)
except Exception as e:
self.skip_scan(scan)
self.log_error(e, scan_id, scan, (product_scan_ids))
for product_scan in product_scans:
if product_scan is None:
continue
if len(product_scan.arrays[0]) == 0 or (not process_msn):
self.skip_scan(product_scan)
continue
try:
transformer.pick_product_scan_peaks(product_scan)
if self.verbose:
self.log_message("Handling Product Scan %r with %d peaks (%0.3f/%0.3f, %r)" % (
product_scan.id, len(product_scan.peak_set), product_scan.precursor_information.mz,
product_scan.precursor_information.extracted_mz,
product_scan.precursor_information.defaulted))
if self.deconvolute:
transformer.deconvolute_product_scan(product_scan)
if scan is None:
product_scan.precursor_information.default(orphan=True)
self.send_scan(product_scan)
except NoIsotopicClustersError as e:
self.log_message("No isotopic clusters were extracted from scan %s (%r)" % (
e.scan_id, len(product_scan.peak_set)))
self.skip_scan(product_scan)
except EmptyScanError as e:
self.skip_scan(product_scan)
except Exception as e:
self.skip_scan(product_scan)
self.log_error(e, product_scan.id,
product_scan, (product_scan_ids))
def run(self):
loader = MSFileLoader(
self.mzml_path, huge_tree=huge_tree, decode_binary=False)
queued_loader = ScanBunchLoader(loader)
has_input = True
transformer = self.make_scan_transformer(loader)
self.transformer = transformer
nologs = ["deconvolution_scan_processor"]
if not self.deconvolute:
nologs.append("deconvolution")
debug_mode = os.getenv("GLYCRESOFTDEBUG")
if debug_mode:
handler = logging.FileHandler("piped-deconvolution-debug-%s.log" % (os.getpid()), 'w')
fmt = logging.Formatter(
"%(asctime)s - %(name)s:%(filename)s:%(lineno)-4d - %(levelname)s - %(message)s",
"%H:%M:%S")
handler.setFormatter(fmt)
for logname in nologs:
logger_to_silence = logging.getLogger(logname)
if debug_mode:
logger_to_silence.setLevel("DEBUG")
logger_to_silence.addHandler(handler)
else:
logger_to_silence.propagate = False
logger_to_silence.setLevel("CRITICAL")
logger_to_silence.addHandler(logging.NullHandler())
i = 0
last = 0
while has_input:
try:
scan_id, product_scan_ids, process_msn = self.get_work(True, 10)
self.input_queue.task_done()
except QueueEmpty:
if self.no_more_event is not None and self.no_more_event.is_set():
has_input = False
continue
i += 1 + len(product_scan_ids)
if scan_id == DONE:
has_input = False
break
try:
queued_loader.put(scan_id, product_scan_ids)
scan, product_scans = queued_loader.get()
except Exception as e:
self.log_message("Something went wrong when loading bunch (%s): %r.\nRecovery is not possible." % (
(scan_id, product_scan_ids), e))
self.handle_scan_bunch(scan, product_scans, scan_id, product_scan_ids, process_msn)
if (i - last) > 1000:
last = i
self.output_queue.join()
self.log_message("Done (%d scans)" % i)
if self.no_more_event is None:
self.output_queue.put((DONE, DONE, DONE))
self._work_complete.set()
class ScanCollator(TaskBase):
_log_received_scans = False
def __init__(self, queue, done_event, helper_producers=None, primary_worker=None,
include_fitted=False, input_queue=None):
if helper_producers is None:
helper_producers = []
self.queue = queue
self.last_index = None
self.count_jobs_done = 0
self.count_since_last = 0
self.waiting = {}
self.done_event = done_event
self.helper_producers = helper_producers
self.started_helpers = False
self.primary_worker = primary_worker
self.include_fitted = include_fitted
self.input_queue = input_queue
def all_workers_done(self):
if self.done_event.is_set():
if self.primary_worker.all_work_done():
for helper in self.helper_producers:
if not helper.all_work_done():
return False
return True
else:
return False
return False
def store_item(self, item, index):
if self._log_received_scans:
self.log("-- received %d: %s" % (index, item))
self.waiting[index] = item
if not self.include_fitted and isinstance(item, ProcessedScan):
item.peak_set = []
def consume(self, timeout=10):
blocking = timeout != 0
try:
item, index, _ms_level = self.queue.get(blocking, timeout)
self.queue.task_done()
while item == DONE:
item, index, _ms_level = self.queue.get(blocking, timeout)
self.queue.task_done()
self.store_item(item, index)
return True
except QueueEmpty:
return False
def start_helper_producers(self):
if self.started_helpers:
return
self.started_helpers = True
for helper in self.helper_producers:
if helper.is_alive():
continue
helper.start()
def produce(self, scan):
self.count_since_last = 0
return scan
def count_pending_items(self):
return len(self.waiting)
def drain_queue(self):
i = 0
has_next = self.last_index + 1 not in self.waiting
while (self.count_pending_items() < (1000 if has_next else 10)
and self.consume(.1)):
self.count_jobs_done += 1
has_next = self.last_index + 1 not in self.waiting
i += 1
if i > 15:
self.log("Drained Output Queue of %d Items" % (i, ))
return i
def print_state(self):
try:
if self.queue.qsize() > 0:
self.log("%d since last work item" % (self.count_since_last,))
keys = sorted(self.waiting.keys())
if len(keys) > 5:
self.log("Waiting Keys: %r..." % (keys[:5],))
else:
self.log("Waiting Keys: %r" % (keys,))
self.log("%d Keys Total" % (len(self.waiting),))
self.log("The last index handled: %r" % (self.last_index,))
self.log("Number of items waiting in the queue: %d" %
(self.queue.qsize(),))
except NotImplementedError:
pass
for worker in ([self.primary_worker] + list(self.helper_producers)):
code = worker.exitcode
if code is not None and code != 0:
self.log("%r has exit code %r" % (worker, code))
worker.join(5)
def __iter__(self):
has_more = True
status_monitor = CallInterval(60 * 3, self.print_state)
status_monitor.start()
while has_more:
if self.consume(1):
self.count_jobs_done += 1
try:
if self.queue.qsize() > 500:
self.drain_queue()
except NotImplementedError:
self.drain_queue()
if self.last_index is None:
keys = sorted(self.waiting)
if keys:
i = 0
n = len(keys)
found_content = False
while i < n:
scan = self.waiting.pop(keys[i])
if scan == SCAN_STATUS_SKIP:
self.last_index = keys[i]
i += 1
continue
else:
found_content = True
break
if found_content:
self.last_index = scan.index
yield self.produce(scan)
if self.last_index is not None:
self.start_helper_producers()
elif self.last_index + 1 in self.waiting:
while self.last_index + 1 in self.waiting:
scan = self.waiting.pop(self.last_index + 1)
if scan == SCAN_STATUS_SKIP:
self.last_index += 1
continue
else:
self.last_index = scan.index
yield self.produce(scan)
elif len(self.waiting) == 0:
if self.all_workers_done():
self.log("All Workers Claim Done.")
has_something = self.consume()
self.log("Checked Queue For Work: %r" % has_something)
if not has_something and len(self.waiting) == 0 and self.queue.empty():
has_more = False
else:
self.count_since_last += 1
if self.count_since_last % 1000 == 0:
self.print_state()
status_monitor.stop()
class ScanGeneratorBase(object):
def configure_iteration(self, start_scan=None, end_scan=None, max_scans=None):
raise NotImplementedError()
def make_iterator(self, start_scan=None, end_scan=None, max_scans=None):
raise NotImplementedError()
def __iter__(self):
return self
def __next__(self):
if self._iterator is None:
self._iterator = self.make_iterator()
return next(self._iterator)
def next(self):
return self.__next__()
def close(self):
pass
@property
def scan_source(self):
return None
_deconvoluting = False
@property
def deconvoluting(self):
return self._deconvoluting
@deconvoluting.setter
def deconvoluting(self, value):
self._deconvoluting = value
_ms1_averaging = 0
@property
def ms1_averaging(self):
return self._ms1_averaging
@ms1_averaging.setter
def ms1_averaging(self, value):
self._ms1_averaging = value
_ignore_tandem_scans = False
@property
def ignore_tandem_scans(self):
return self._ignore_tandem_scans
@ignore_tandem_scans.setter
def ignore_tandem_scans(self, value):
self._ignore_tandem_scans = value
_extract_only_tandem_envelopes = False
@property
def extract_only_tandem_envelopes(self):
return self._extract_only_tandem_envelopes
@extract_only_tandem_envelopes.setter
def extract_only_tandem_envelopes(self, value):
self._extract_only_tandem_envelopes = value
class ScanGenerator(TaskBase, ScanGeneratorBase):
def __init__(self, ms_file, number_of_helpers=4,
ms1_peak_picking_args=None, msn_peak_picking_args=None,
ms1_deconvolution_args=None, msn_deconvolution_args=None,
extract_only_tandem_envelopes=False, ignore_tandem_scans=False,
ms1_averaging=0, deconvolute=True):
self.ms_file = ms_file
self.time_cache = {}
self.ignore_tandem_scans = ignore_tandem_scans
self.scan_ids_exhausted_event = multiprocessing.Event()
self._iterator = None
self._scan_yielder_process = None
self._deconv_process = None
self._input_queue = None
self._output_queue = None
self._deconv_helpers = None
self._order_manager = None
self.number_of_helpers = number_of_helpers
self.ms1_peak_picking_args = ms1_peak_picking_args
self.msn_peak_picking_args = msn_peak_picking_args
self.ms1_averaging = ms1_averaging
self.deconvoluting = deconvolute
self.ms1_deconvolution_args = ms1_deconvolution_args
self.msn_deconvolution_args = msn_deconvolution_args
self.extract_only_tandem_envelopes = extract_only_tandem_envelopes
self._scan_interval_tree = None
self.log_controller = self.ipc_logger()
@property
def scan_source(self):
return self.ms_file
def join(self):
if self._scan_yielder_process is not None:
self._scan_yielder_process.join()
if self._deconv_process is not None:
self._deconv_process.join()
if self._deconv_helpers is not None:
for helper in self._deconv_helpers:
helper.join()
def _terminate(self):
if self._scan_yielder_process is not None:
self._scan_yielder_process.terminate()
if self._deconv_process is not None:
self._deconv_process.terminate()
if self._deconv_helpers is not None:
for helper in self._deconv_helpers:
helper.terminate()
def _preindex_file(self):
reader = MSFileLoader(self.ms_file, use_index=False, huge_tree=huge_tree)
try:
reader.prebuild_byte_offset_file(self.ms_file)
except AttributeError:
pass
except IOError:
pass
except Exception as e:
self.error("An error occurred while pre-indexing.", e)
def _make_interval_tree(self, start_scan, end_scan):
reader = MSFileLoader(self.ms_file, decode_binary=False)
if start_scan is not None:
start_ix = reader.get_scan_by_id(start_scan).index
else:
start_ix = 0
if end_scan is not None:
end_ix = reader.get_scan_by_id(end_scan).index
else:
end_ix = len(reader)
reader.reset()
_index, interval_tree = build_scan_index(
reader, self.number_of_helpers + 1, (start_ix, end_ix))
self._scan_interval_tree = interval_tree
self.log("RT Tree: %r" % (self._scan_interval_tree.rt_tree))
def _make_transforming_process(self):
return ScanTransformingProcess(
self.ms_file,
self._input_queue,
self._output_queue,
self.scan_ids_exhausted_event,
ms1_peak_picking_args=self.ms1_peak_picking_args,
msn_peak_picking_args=self.msn_peak_picking_args,
ms1_deconvolution_args=self.ms1_deconvolution_args,
msn_deconvolution_args=self.msn_deconvolution_args,
envelope_selector=self._scan_interval_tree,
log_handler=self.log_controller.sender(),
ms1_averaging=self.ms1_averaging,
deconvolute=self.deconvoluting)
def _make_collator(self):
return ScanCollator(
self._output_queue, self.scan_ids_exhausted_event, self._deconv_helpers,
self._deconv_process, input_queue=self._input_queue,
include_fitted=not self.deconvoluting)
def _initialize_workers(self, start_scan=None, end_scan=None, max_scans=None):
try:
self._input_queue = JoinableQueue(int(1e6))
self._output_queue = JoinableQueue(int(1e6))
except OSError:
self._input_queue = JoinableQueue()
self._output_queue = JoinableQueue()
self._preindex_file()
if self.extract_only_tandem_envelopes:
self.log("Constructing Scan Interval Tree")
self._make_interval_tree(start_scan, end_scan)
self._terminate()
self._scan_yielder_process = ScanIDYieldingProcess(
self.ms_file, self._input_queue, start_scan=start_scan, end_scan=end_scan,
max_scans=max_scans, no_more_event=self.scan_ids_exhausted_event,
ignore_tandem_scans=self.ignore_tandem_scans, batch_size=1)
self._scan_yielder_process.start()
self._deconv_process = self._make_transforming_process()
self._deconv_helpers = []
for _i in range(self.number_of_helpers):
self._deconv_helpers.append(self._make_transforming_process())
self._deconv_process.start()
self._order_manager = self._make_collator()
def make_iterator(self, start_scan=None, end_scan=None, max_scans=None):
self._initialize_workers(start_scan, end_scan, max_scans)
for scan in self._order_manager:
self.time_cache[scan.id] = scan.scan_time
yield scan
self.log_controller.stop()
self.join()
self._terminate()
def configure_iteration(self, start_scan=None, end_scan=None, max_scans=None):
self._iterator = self.make_iterator(start_scan, end_scan, max_scans)
def convert_scan_id_to_retention_time(self, scan_id):
return self.time_cache[scan_id]
def close(self):
self._terminate()
| true | true |
f732bd25316f39b9733cdf670d1130073262f447 | 10,310 | py | Python | py/src/ram.py | canh/rosettaboy | c5b8afd91d5c9f58bdd414e5fbd88f67acfbdc30 | [
"MIT"
] | null | null | null | py/src/ram.py | canh/rosettaboy | c5b8afd91d5c9f58bdd414e5fbd88f67acfbdc30 | [
"MIT"
] | null | null | null | py/src/ram.py | canh/rosettaboy | c5b8afd91d5c9f58bdd414e5fbd88f67acfbdc30 | [
"MIT"
] | null | null | null | from typing import List
from .cart import Cart
from .consts import *
ROM_BANK_SIZE = 0x4000
RAM_BANK_SIZE = 0x2000
class RAM:
def __init__(self, cart: Cart, debug: bool = False) -> None:
self.cart = cart
self.boot = self.get_boot()
self.data = [0] * (0xFFFF + 1)
self.debug = debug
self.ram_enable = True
self.ram_bank_mode = False
self.rom_bank_low = 1
self.rom_bank_high = 0
self.rom_bank = 1
self.ram_bank = 0
# 16KB ROM bank 0
for x in range(0x0000, 0x4000):
self.data[x] = self.cart.data[x]
# 16KB Switchable ROM bank
for x in range(0x4000, 0x8000):
self.data[x] = self.cart.data[x]
# 8KB VRAM
# 0x8000 - 0xA000
# from random import randint
# for x in range(0x8000, 0xA000):
# self.data[x] = randint(0, 256)
# 8KB Switchable RAM bank
# 0xA000 - 0xC000
# 8KB Internal RAM
# 0xC000 - 0xE000
# Echo internal RAM
# 0xE000 - 0xFE00
# Sprite Attrib Memory (OAM)
# 0xFE00 - 0xFEA0
# Empty
# 0xFEA0 - 0xFF00
# Mem.Ports
# 0xFF00 - 0xFF4C
self.data[0xFF00] = 0x00 # BUTTONS
self.data[0xFF01] = 0x00 # SB (Serial Data)
self.data[0xFF02] = 0x00 # SC (Serial Control)
self.data[0xFF04] = 0x00 # DIV
self.data[0xFF05] = 0x00 # TIMA
self.data[0xFF06] = 0x00 # TMA
self.data[0xFF07] = 0x00 # TAC
self.data[0xFF0F] = 0x00 # IF
self.data[0xFF10] = 0x80 # NR10
self.data[0xFF11] = 0xBF # NR11
self.data[0xFF12] = 0xF3 # NR12
self.data[0xFF14] = 0xBF # NR14
self.data[0xFF16] = 0x3F # NR21
self.data[0xFF17] = 0x00 # NR22
self.data[0xFF19] = 0xBF # NR24
self.data[0xFF1A] = 0x7F # NR30
self.data[0xFF1B] = 0xFF # NR31
self.data[0xFF1C] = 0x9F # NR32
self.data[0xFF1E] = 0xBF # NR33
self.data[0xFF20] = 0xFF # NR41
self.data[0xFF21] = 0x00 # NR42
self.data[0xFF22] = 0x00 # NR43
self.data[0xFF23] = 0xBF # NR30
self.data[0xFF24] = 0x77 # NR50
self.data[0xFF25] = 0xF3 # NR51
self.data[0xFF26] = 0xF1 # NR52 # 0xF0 on SGB
self.data[0xFF40] = 0x00 # LCDC - official boot rom inits this to 0x91
self.data[0xFF41] = 0x00 # STAT
self.data[0xFF42] = 0x00 # SCX aka SCROLL_Y
self.data[0xFF43] = 0x00 # SCY aka SCROLL_X
self.data[0xFF44] = 144 # LY aka currently drawn line, 0-153, >144 = vblank
self.data[0xFF45] = 0x00 # LYC
self.data[0xFF46] = 0x00 # DMA
self.data[0xFF47] = 0xFC # BGP
self.data[0xFF48] = 0xFF # OBP0
self.data[0xFF49] = 0xFF # OBP1
self.data[0xFF4A] = 0x00 # WY
self.data[0xFF4B] = 0x00 # WX
# Empty
# 0xFF4C - 0xFF80
# Internal RAM
# 0xFF80 - 0xFFFF
# Interrupt Enabled Register
self.data[0xFFFF] = 0x00 # IE
# TODO: ram[E000-FE00] mirrors ram[C000-DE00]
def get_boot(self) -> List[int]:
try:
# boot with the logo scroll if we have a boot rom
with open("boot.gb", "rb") as fp:
BOOT = list(fp.read(0x100))
# NOP the DRM
BOOT[0xE9] = 0x00
BOOT[0xEA] = 0x00
BOOT[0xFA] = 0x00
BOOT[0xFB] = 0x00
except IOError:
# fmt: off
# Directly set CPU registers as
# if the logo had been scrolled
BOOT = [
# prod memory
0x31, 0xFE, 0xFF, # LD SP,$FFFE
# enable LCD
0x3E, 0x91, # LD A,$91
0xE0, 0x40, # LDH [Mem.:LCDC], A
# set flags
0x3E, 0x01, # LD A,$00
0xCB, 0x7F, # BIT 7,A (sets Z,n,H)
0x37, # SCF (sets C)
# set registers
0x3E, 0x01, # LD A,$01
0x06, 0x00, # LD B,$00
0x0E, 0x13, # LD C,$13
0x16, 0x00, # LD D,$00
0x1E, 0xD8, # LD E,$D8
0x26, 0x01, # LD H,$01
0x2E, 0x4D, # LD L,$4D
# skip to the end of the bootloader
0xC3, 0xFD, 0x00, # JP 0x00FD
]
# fmt: on
# these 5 instructions must be the final 2 --
# after these finish executing, PC needs to be 0x100
BOOT += [0x00] * (0xFE - len(BOOT))
BOOT += [0xE0, 0x50] # LDH 50,A (disable boot rom)
assert len(BOOT) == 0x100, f"Bootloader must be 256 bytes ({len(BOOT)})"
return BOOT
def __getitem__(self, addr: int) -> int:
if addr < 0x4000:
# ROM bank 0
if self.data[Mem.BOOT] == 0 and addr < 0x100:
return self.boot[addr]
return self.data[addr]
elif addr < 0x8000:
# Switchable ROM bank
# TODO: array bounds check
offset = addr - 0x4000
bank = self.rom_bank * ROM_BANK_SIZE
return self.cart.data[bank + offset]
elif addr < 0xA000:
# VRAM
pass
elif addr < 0xC000:
# 8KB Switchable RAM bank
if not self.ram_enable:
raise Exception(
"Reading from external ram while disabled: {:04X}", addr
)
bank = self.ram_bank * RAM_BANK_SIZE
offset = addr - 0xA000
if bank + offset > self.cart.ram_size:
# this should never happen because we die on ram_bank being
# set to a too-large value
raise Exception(
"Reading from external ram beyond limit: {:04x} ({:02x}:{:04x})",
bank + offset,
self.ram_bank,
(addr - 0xA000),
)
return self.cart.ram[bank + offset]
elif addr < 0xD000:
# work RAM, bank 0
pass
elif addr < 0xE000:
# work RAM, bankable in CGB
pass
elif addr < 0xFE00:
# ram[E000-FE00] mirrors ram[C000-DE00]
return self.data[addr - 0x2000]
elif addr < 0xFEA0:
# Sprite attribute table
pass
elif addr < 0xFF00:
# Unusable
return 0xFF
elif addr < 0xFF80:
# IO Registers
pass
elif addr < 0xFFFF:
# High RAM
pass
else:
# IE Register
pass
return self.data[addr]
def __setitem__(self, addr: int, val: int) -> None:
if addr < 0x2000:
self.ram_enable = val != 0
elif addr < 0x4000:
self.rom_bank_low = val
self.rom_bank = (self.rom_bank_high << 5) | self.rom_bank_low
if self.debug:
print(
"rom_bank set to {}/{}", self.rom_bank, self.cart.rom_size / ROM_BANK_SIZE
)
if self.rom_bank * ROM_BANK_SIZE > self.cart.rom_size:
raise Exception("Set rom_bank beyond the size of ROM")
elif addr < 0x6000:
if self.ram_bank_mode:
self.ram_bank = val
if self.debug:
print(
"ram_bank set to {}/{}",
self.ram_bank,
self.cart.ram_size / RAM_BANK_SIZE,
)
if self.ram_bank * RAM_BANK_SIZE > self.cart.ram_size:
raise Exception("Set ram_bank beyond the size of RAM")
else:
self.rom_bank_high = val
self.rom_bank = (self.rom_bank_high << 5) | self.rom_bank_low
if self.debug:
print(
"rom_bank set to {}/{}",
self.rom_bank,
self.cart.rom_size / ROM_BANK_SIZE,
)
if self.rom_bank * ROM_BANK_SIZE > self.cart.rom_size:
raise Exception("Set rom_bank beyond the size of ROM")
elif addr < 0x8000:
self.ram_bank_mode = val != 0
if self.debug:
print("ram_bank_mode set to {}", self.ram_bank_mode)
elif addr < 0xA000:
# VRAM
# TODO: if writing to tile RAM, update tiles in Mem.class?
pass
elif addr < 0xC000:
# external RAM, bankable
if not self.ram_enable:
raise Exception(
"Writing to external ram while disabled: {:04x}={:02x}", addr, val
)
bank = self.ram_bank * RAM_BANK_SIZE
offset = addr - 0xA000
if self.debug:
print(
"Writing external RAM: {:04x}={:02x} ({:02x}:{:04x})",
bank + offset,
val,
self.ram_bank,
(addr - 0xA000),
)
if bank + offset >= self.cart.ram_size:
# raise Exception!("Writing beyond RAM limit")
return
self.cart.ram[bank + offset] = val
elif addr < 0xD000:
# work RAM, bank 0
pass
elif addr < 0xE000:
# work RAM, bankable in CGB
pass
elif addr < 0xFE00:
# ram[E000-FE00] mirrors ram[C000-DE00]
self.data[addr - 0x2000] = val
elif addr < 0xFEA0:
# Sprite attribute table
pass
elif addr < 0xFF00:
# Unusable
if self.debug:
print("Writing to invalid ram: {:04x} = {:02x}", addr, val)
elif addr < 0xFF80:
# IO Registers
# if addr == Mem.:SCX as u16 {
# println!("LY = {}, SCX = {}", self.get(Mem.:LY), val);
# }
pass
elif addr < 0xFFFF:
# High RAM
pass
else:
# IE Register
pass
self.data[addr] = val
| 33.365696 | 94 | 0.476819 | from typing import List
from .cart import Cart
from .consts import *
ROM_BANK_SIZE = 0x4000
RAM_BANK_SIZE = 0x2000
class RAM:
def __init__(self, cart: Cart, debug: bool = False) -> None:
self.cart = cart
self.boot = self.get_boot()
self.data = [0] * (0xFFFF + 1)
self.debug = debug
self.ram_enable = True
self.ram_bank_mode = False
self.rom_bank_low = 1
self.rom_bank_high = 0
self.rom_bank = 1
self.ram_bank = 0
for x in range(0x0000, 0x4000):
self.data[x] = self.cart.data[x]
for x in range(0x4000, 0x8000):
self.data[x] = self.cart.data[x]
self.data[0xFF00] = 0x00
self.data[0xFF01] = 0x00
self.data[0xFF02] = 0x00
self.data[0xFF04] = 0x00
self.data[0xFF05] = 0x00
self.data[0xFF06] = 0x00
self.data[0xFF07] = 0x00
self.data[0xFF0F] = 0x00
self.data[0xFF10] = 0x80
self.data[0xFF11] = 0xBF
self.data[0xFF12] = 0xF3
self.data[0xFF14] = 0xBF
self.data[0xFF16] = 0x3F
self.data[0xFF17] = 0x00
self.data[0xFF19] = 0xBF
self.data[0xFF1A] = 0x7F
self.data[0xFF1B] = 0xFF
self.data[0xFF1C] = 0x9F
self.data[0xFF1E] = 0xBF
self.data[0xFF20] = 0xFF
self.data[0xFF21] = 0x00
self.data[0xFF22] = 0x00
self.data[0xFF23] = 0xBF
self.data[0xFF24] = 0x77
self.data[0xFF25] = 0xF3
self.data[0xFF26] = 0xF1 f.data[0xFF40] = 0x00
self.data[0xFF41] = 0x00
self.data[0xFF42] = 0x00
self.data[0xFF43] = 0x00
self.data[0xFF44] = 144
self.data[0xFF45] = 0x00
self.data[0xFF46] = 0x00
self.data[0xFF47] = 0xFC
self.data[0xFF48] = 0xFF
self.data[0xFF49] = 0xFF
self.data[0xFF4A] = 0x00
self.data[0xFF4B] = 0x00
self.data[0xFFFF] = 0x00
def get_boot(self) -> List[int]:
try:
with open("boot.gb", "rb") as fp:
BOOT = list(fp.read(0x100))
BOOT[0xE9] = 0x00
BOOT[0xEA] = 0x00
BOOT[0xFA] = 0x00
BOOT[0xFB] = 0x00
except IOError:
BOOT = [
0x31, 0xFE, 0xFF,
0x3E, 0x91,
0xE0, 0x40,
0x3E, 0x01,
0xCB, 0x7F,
0x37,
0x3E, 0x01,
0x06, 0x00,
0x0E, 0x13,
0x16, 0x00,
0x1E, 0xD8,
0x26, 0x01,
0x2E, 0x4D,
0xC3, 0xFD, 0x00,
]
BOOT += [0x00] * (0xFE - len(BOOT))
BOOT += [0xE0, 0x50]
assert len(BOOT) == 0x100, f"Bootloader must be 256 bytes ({len(BOOT)})"
return BOOT
def __getitem__(self, addr: int) -> int:
if addr < 0x4000:
if self.data[Mem.BOOT] == 0 and addr < 0x100:
return self.boot[addr]
return self.data[addr]
elif addr < 0x8000:
offset = addr - 0x4000
bank = self.rom_bank * ROM_BANK_SIZE
return self.cart.data[bank + offset]
elif addr < 0xA000:
pass
elif addr < 0xC000:
if not self.ram_enable:
raise Exception(
"Reading from external ram while disabled: {:04X}", addr
)
bank = self.ram_bank * RAM_BANK_SIZE
offset = addr - 0xA000
if bank + offset > self.cart.ram_size:
raise Exception(
"Reading from external ram beyond limit: {:04x} ({:02x}:{:04x})",
bank + offset,
self.ram_bank,
(addr - 0xA000),
)
return self.cart.ram[bank + offset]
elif addr < 0xD000:
pass
elif addr < 0xE000:
pass
elif addr < 0xFE00:
return self.data[addr - 0x2000]
elif addr < 0xFEA0:
pass
elif addr < 0xFF00:
return 0xFF
elif addr < 0xFF80:
pass
elif addr < 0xFFFF:
pass
else:
pass
return self.data[addr]
def __setitem__(self, addr: int, val: int) -> None:
if addr < 0x2000:
self.ram_enable = val != 0
elif addr < 0x4000:
self.rom_bank_low = val
self.rom_bank = (self.rom_bank_high << 5) | self.rom_bank_low
if self.debug:
print(
"rom_bank set to {}/{}", self.rom_bank, self.cart.rom_size / ROM_BANK_SIZE
)
if self.rom_bank * ROM_BANK_SIZE > self.cart.rom_size:
raise Exception("Set rom_bank beyond the size of ROM")
elif addr < 0x6000:
if self.ram_bank_mode:
self.ram_bank = val
if self.debug:
print(
"ram_bank set to {}/{}",
self.ram_bank,
self.cart.ram_size / RAM_BANK_SIZE,
)
if self.ram_bank * RAM_BANK_SIZE > self.cart.ram_size:
raise Exception("Set ram_bank beyond the size of RAM")
else:
self.rom_bank_high = val
self.rom_bank = (self.rom_bank_high << 5) | self.rom_bank_low
if self.debug:
print(
"rom_bank set to {}/{}",
self.rom_bank,
self.cart.rom_size / ROM_BANK_SIZE,
)
if self.rom_bank * ROM_BANK_SIZE > self.cart.rom_size:
raise Exception("Set rom_bank beyond the size of ROM")
elif addr < 0x8000:
self.ram_bank_mode = val != 0
if self.debug:
print("ram_bank_mode set to {}", self.ram_bank_mode)
elif addr < 0xA000:
pass
elif addr < 0xC000:
if not self.ram_enable:
raise Exception(
"Writing to external ram while disabled: {:04x}={:02x}", addr, val
)
bank = self.ram_bank * RAM_BANK_SIZE
offset = addr - 0xA000
if self.debug:
print(
"Writing external RAM: {:04x}={:02x} ({:02x}:{:04x})",
bank + offset,
val,
self.ram_bank,
(addr - 0xA000),
)
if bank + offset >= self.cart.ram_size:
return
self.cart.ram[bank + offset] = val
elif addr < 0xD000:
pass
elif addr < 0xE000:
pass
elif addr < 0xFE00:
self.data[addr - 0x2000] = val
elif addr < 0xFEA0:
pass
elif addr < 0xFF00:
if self.debug:
print("Writing to invalid ram: {:04x} = {:02x}", addr, val)
elif addr < 0xFF80:
pass
elif addr < 0xFFFF:
pass
else:
pass
self.data[addr] = val
| true | true |
f732bd351d8106b5bcfd1e2148709586288dfb53 | 604 | py | Python | ftd_auth/serializers/userSerializer.py | Shanaka11/ftd_auth | 5e73f6f909235a5f7ec932b7e78a15544ba31731 | [
"MIT"
] | null | null | null | ftd_auth/serializers/userSerializer.py | Shanaka11/ftd_auth | 5e73f6f909235a5f7ec932b7e78a15544ba31731 | [
"MIT"
] | null | null | null | ftd_auth/serializers/userSerializer.py | Shanaka11/ftd_auth | 5e73f6f909235a5f7ec932b7e78a15544ba31731 | [
"MIT"
] | null | null | null | # Python
# Django
# Rest Framework
from django.contrib.auth.models import User
from rest_framework.serializers import ModelSerializer
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
# Local
class LoginSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
token['username'] = user.username
token['firstname'] = user.first_name
token['email'] = user.email
return token
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = '__all__' | 26.26087 | 74 | 0.716887 |
from django.contrib.auth.models import User
from rest_framework.serializers import ModelSerializer
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
class LoginSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
token['username'] = user.username
token['firstname'] = user.first_name
token['email'] = user.email
return token
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = '__all__' | true | true |
f732bda9d958fb505e55bb197410b6a4a1479d11 | 4,093 | py | Python | stubs.min/System/Windows/__init___parts/PresentationSource.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/__init___parts/PresentationSource.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/__init___parts/PresentationSource.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class PresentationSource(DispatcherObject):
""" Provides an abstract base for classes that present content from another technology as part of an interoperation scenario. In addition,this class provides static methods for working with these sources,as well as the basic visual-layer presentation architecture. """
def AddSource(self,*args):
"""
AddSource(self: PresentationSource)
Adds a System.Windows.PresentationSource derived class instance to the list of
known presentation sources.
"""
pass
@staticmethod
def AddSourceChangedHandler(element,handler):
"""
AddSourceChangedHandler(element: IInputElement,handler: SourceChangedEventHandler)
Adds a handler for the SourceChanged event to the provided element.
element: The element to add the handler to.
handler: The hander implementation to add.
"""
pass
def ClearContentRenderedListeners(self,*args):
"""
ClearContentRenderedListeners(self: PresentationSource)
Sets the list of listeners for the
System.Windows.PresentationSource.ContentRendered event to null.
"""
pass
@staticmethod
def FromDependencyObject(dependencyObject):
"""
FromDependencyObject(dependencyObject: DependencyObject) -> PresentationSource
Returns the source in which a provided System.Windows.DependencyObject is
presented.
dependencyObject: The System.Windows.DependencyObject to find the source for.
Returns: The System.Windows.PresentationSource in which the dependency object is being
presented.
"""
pass
@staticmethod
def FromVisual(visual):
"""
FromVisual(visual: Visual) -> PresentationSource
Returns the source in which a provided System.Windows.Media.Visual is presented.
visual: The System.Windows.Media.Visual to find the source for.
Returns: The System.Windows.PresentationSource in which the visual is being presented,
or null if visual is disposed.
"""
pass
def GetCompositionTargetCore(self,*args):
"""
GetCompositionTargetCore(self: PresentationSource) -> CompositionTarget
When overridden in a derived class,returns a visual target for the given
source.
Returns: Returns a System.Windows.Media.CompositionTarget that is target for rendering
the visual.
"""
pass
def RemoveSource(self,*args):
"""
RemoveSource(self: PresentationSource)
Removes a System.Windows.PresentationSource derived class instance from the
list of known presentation sources.
"""
pass
@staticmethod
def RemoveSourceChangedHandler(e,handler):
"""
RemoveSourceChangedHandler(e: IInputElement,handler: SourceChangedEventHandler)
Removes a handler for the SourceChanged event from the provided element.
e: The element to remove the handler from.
handler: The handler implementation to remove.
"""
pass
def RootChanged(self,*args):
"""
RootChanged(self: PresentationSource,oldRoot: Visual,newRoot: Visual)
Provides notification that the root System.Windows.Media.Visual has changed.
oldRoot: The old root System.Windows.Media.Visual.
newRoot: The new root System.Windows.Media.Visual.
"""
pass
CompositionTarget=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the visual target for the visuals being presented in the source.
Get: CompositionTarget(self: PresentationSource) -> CompositionTarget
"""
IsDisposed=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets a value that declares whether the object is disposed.
Get: IsDisposed(self: PresentationSource) -> bool
"""
RootVisual=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets or sets the root visual being presented in the source.
Get: RootVisual(self: PresentationSource) -> Visual
Set: RootVisual(self: PresentationSource)=value
"""
ContentRendered=None
CurrentSources=None
| 35.284483 | 270 | 0.73247 | class PresentationSource(DispatcherObject):
def AddSource(self,*args):
def AddSourceChangedHandler(element,handler):
"""
AddSourceChangedHandler(element: IInputElement,handler: SourceChangedEventHandler)
"""
pass
dependencyObject: The System.Windows.DependencyObject to find the source for.
When overridden in a derived class,returns a visual target for the given
source.
RemoveSourceChangedHandler(e: IInputElement,handler: SourceChangedEventHandler)
Removes a handler for the SourceChanged event from the provided element.
"""Gets the visual target for the visuals being presented in the source.
Get: IsDisposed(self: PresentationSource) -> bool
"""
CurrentSources=None
| true | true |
f732bec74dcddfb3566db113df4dbc1bbceb9f8b | 920 | py | Python | src/SortedList/SortedList.py | berkayaslan/Sorting_Simulation | 16cfcd404063b060191dab244025012271edacd8 | [
"MIT"
] | 2 | 2020-01-26T09:42:03.000Z | 2020-05-26T13:57:02.000Z | src/SortedList/SortedList.py | berkayaslan/Sorting_Simulation | 16cfcd404063b060191dab244025012271edacd8 | [
"MIT"
] | null | null | null | src/SortedList/SortedList.py | berkayaslan/Sorting_Simulation | 16cfcd404063b060191dab244025012271edacd8 | [
"MIT"
] | null | null | null | class SortedList:
"""
This is a list object which is sorted. Actually
this is not sorted now. Because this is a parent
class.
"""
_list = list()
def __init__(self, arg: list or tuple) -> None:
try:
if type(arg) == list:
self._list = arg
elif type(arg) == tuple:
self._list = self.tuple_to_list(arg)
except:
raise TypeError("It is not a list or tuple.")
self.sort()
@staticmethod
def tuple_to_list(argtuple: tuple) -> list:
return [i for i in argtuple]
def __str__(self) -> str:
return str(self._list)
def sort(self) -> None:
if not (self.__class__.__name__) == "SortedList":
raise NotImplementedError("Please implement this method.")
else:
pass
if __name__ == "__main__":
obj = SortedList((2, 3, 4))
print(obj)
| 23.589744 | 70 | 0.553261 | class SortedList:
_list = list()
def __init__(self, arg: list or tuple) -> None:
try:
if type(arg) == list:
self._list = arg
elif type(arg) == tuple:
self._list = self.tuple_to_list(arg)
except:
raise TypeError("It is not a list or tuple.")
self.sort()
@staticmethod
def tuple_to_list(argtuple: tuple) -> list:
return [i for i in argtuple]
def __str__(self) -> str:
return str(self._list)
def sort(self) -> None:
if not (self.__class__.__name__) == "SortedList":
raise NotImplementedError("Please implement this method.")
else:
pass
if __name__ == "__main__":
obj = SortedList((2, 3, 4))
print(obj)
| true | true |
f732bedfa6b474867fbed972f4231446ce6fd48e | 1,899 | py | Python | astropy/io/misc/asdf/tags/unit/tests/test_quantity.py | rkiman/astropy | 99de28bc0dbfe2ee0bef95b67f5619e03d22cc06 | [
"BSD-3-Clause"
] | 1 | 2022-03-02T17:07:20.000Z | 2022-03-02T17:07:20.000Z | astropy/io/misc/asdf/tags/unit/tests/test_quantity.py | rkiman/astropy | 99de28bc0dbfe2ee0bef95b67f5619e03d22cc06 | [
"BSD-3-Clause"
] | null | null | null | astropy/io/misc/asdf/tags/unit/tests/test_quantity.py | rkiman/astropy | 99de28bc0dbfe2ee0bef95b67f5619e03d22cc06 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = """
quantity: !unit/quantity-1.1.0
value: {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{}
unit: {}
""".format(testval, testunit)
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
| 24.986842 | 63 | 0.651922 |
import io
import pytest
from astropy import units
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = """
quantity: !unit/quantity-1.1.0
value: {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{}
unit: {}
""".format(testval, testunit)
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
| true | true |
f732bee2606f2745c1d111d3ebc4dd6ef6abd1c1 | 2,097 | py | Python | packages/py-ab-testing/ABTesting/controller.py | ramon-villain/ab-testing | e8e449db3083a5c147f32c47b8f24a7dc2d6eda3 | [
"MIT"
] | null | null | null | packages/py-ab-testing/ABTesting/controller.py | ramon-villain/ab-testing | e8e449db3083a5c147f32c47b8f24a7dc2d6eda3 | [
"MIT"
] | null | null | null | packages/py-ab-testing/ABTesting/controller.py | ramon-villain/ab-testing | e8e449db3083a5c147f32c47b8f24a7dc2d6eda3 | [
"MIT"
] | null | null | null | import logging
from typing import Union, Dict
from crc32c import crc32
logger = logging.getLogger(__name__)
def get_modulo_value(experiment, user_id):
# type: (str, Union[str, int]) -> int
return crc32(str(user_id).encode(), crc32(experiment.encode())) % 100
def match_user_cohort(
experiment_config,
user_id,
user_profile
):
# type: (Dict, Union[str, int], Dict[str, str]) -> str
user_segment_num = get_modulo_value(experiment_config['name'], user_id)
allocated_cohort = 'control'
for cohort in experiment_config['cohorts']:
for force_include_key, force_include_val in cohort.get('force_include', {}).items():
if force_include_key in user_profile and user_profile[force_include_key] in force_include_val:
return cohort['name']
if allocated_cohort == 'control':
for allocation in cohort.get('allocation', []):
if allocation[0] <= user_segment_num < allocation[1]:
allocated_cohort = cohort['name']
break
return allocated_cohort
class ABTestingController(object):
def __init__(self, config, user_id, user_profile):
self.experiment_configs = {
experiment_config['name']: experiment_config
for experiment_config in config['experiments']
}
self.user_id = user_id
self.user_profile = user_profile
self.matched_cohorts = {}
def get_cohort(self, experiment_name):
# type: (str) -> str
if experiment_name not in self.matched_cohorts:
if experiment_name in self.experiment_configs:
self.matched_cohorts[experiment_name] = match_user_cohort(
self.experiment_configs[experiment_name],
self.user_id,
self.user_profile
)
else:
logger.info('unrecognized ab testing experiment name: {}'.format(experiment_name))
self.matched_cohorts[experiment_name] = 'control'
return self.matched_cohorts[experiment_name]
| 36.789474 | 106 | 0.641392 | import logging
from typing import Union, Dict
from crc32c import crc32
logger = logging.getLogger(__name__)
def get_modulo_value(experiment, user_id):
return crc32(str(user_id).encode(), crc32(experiment.encode())) % 100
def match_user_cohort(
experiment_config,
user_id,
user_profile
):
user_segment_num = get_modulo_value(experiment_config['name'], user_id)
allocated_cohort = 'control'
for cohort in experiment_config['cohorts']:
for force_include_key, force_include_val in cohort.get('force_include', {}).items():
if force_include_key in user_profile and user_profile[force_include_key] in force_include_val:
return cohort['name']
if allocated_cohort == 'control':
for allocation in cohort.get('allocation', []):
if allocation[0] <= user_segment_num < allocation[1]:
allocated_cohort = cohort['name']
break
return allocated_cohort
class ABTestingController(object):
def __init__(self, config, user_id, user_profile):
self.experiment_configs = {
experiment_config['name']: experiment_config
for experiment_config in config['experiments']
}
self.user_id = user_id
self.user_profile = user_profile
self.matched_cohorts = {}
def get_cohort(self, experiment_name):
if experiment_name not in self.matched_cohorts:
if experiment_name in self.experiment_configs:
self.matched_cohorts[experiment_name] = match_user_cohort(
self.experiment_configs[experiment_name],
self.user_id,
self.user_profile
)
else:
logger.info('unrecognized ab testing experiment name: {}'.format(experiment_name))
self.matched_cohorts[experiment_name] = 'control'
return self.matched_cohorts[experiment_name]
| true | true |
f732bf6018c59a91c6096b9f3ad73ef8d80a1a09 | 1,301 | py | Python | external_apps/docutils-snapshot/test/test_parsers/test_rst/test_directives/test_date.py | spreeker/democracygame | 525139955cb739c295051f317ab670049511bcf8 | [
"BSD-3-Clause"
] | 2 | 2016-05-09T04:57:34.000Z | 2017-03-03T14:22:24.000Z | external_apps/docutils-snapshot/test/test_parsers/test_rst/test_directives/test_date.py | spreeker/democracygame | 525139955cb739c295051f317ab670049511bcf8 | [
"BSD-3-Clause"
] | null | null | null | external_apps/docutils-snapshot/test/test_parsers/test_rst/test_directives/test_date.py | spreeker/democracygame | 525139955cb739c295051f317ab670049511bcf8 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# $Id: test_date.py 4667 2006-07-12 21:40:56Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for the misc.py "date" directive.
"""
from __init__ import DocutilsTestSupport
import time
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['date'] = [
["""\
.. |date| date::
Today's date is |date|.
""",
"""\
<document source="test data">
<substitution_definition names="date">
%s
<paragraph>
Today's date is \n\
<substitution_reference refname="date">
date
.
""" % time.strftime('%Y-%m-%d')],
["""\
.. |date| date:: %a, %d %b %Y
""",
"""\
<document source="test data">
<substitution_definition names="date">
%s
""" % time.strftime('%a, %d %b %Y')],
["""\
.. date::
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Invalid context: the "date" directive can only be used within a substitution definition.
<literal_block xml:space="preserve">
.. date::
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| 20.650794 | 100 | 0.602613 |
from __init__ import DocutilsTestSupport
import time
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['date'] = [
["""\
.. |date| date::
Today's date is |date|.
""",
"""\
<document source="test data">
<substitution_definition names="date">
%s
<paragraph>
Today's date is \n\
<substitution_reference refname="date">
date
.
""" % time.strftime('%Y-%m-%d')],
["""\
.. |date| date:: %a, %d %b %Y
""",
"""\
<document source="test data">
<substitution_definition names="date">
%s
""" % time.strftime('%a, %d %b %Y')],
["""\
.. date::
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Invalid context: the "date" directive can only be used within a substitution definition.
<literal_block xml:space="preserve">
.. date::
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| true | true |
f732bffb35f6dd8dc2be50226e4bb1b83ba91bd4 | 4,471 | py | Python | fn_sentinelone/fn_sentinelone/lib/jinja_common.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | null | null | null | fn_sentinelone/fn_sentinelone/lib/jinja_common.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | null | null | null | fn_sentinelone/fn_sentinelone/lib/jinja_common.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2022. All Rights Reserved.
import calendar
import logging
import json
import os
import time
from resilient_circuits.template_functions import render_json, environment
LOG = logging.getLogger(__name__)
class JinjaEnvironment():
def __init__(self):
# Add the timestamp-parse function to the global JINJA environment
env = environment()
env.globals.update({
"resilient_datetimeformat": jinja_resilient_datetimeformat,
"resilient_substitute": jinja_resilient_substitute,
"resilient_splitpart": jinja_resilient_splitpart
})
env.filters.update({
"resilient_datetimeformat": jinja_resilient_datetimeformat,
"resilient_substitute": jinja_resilient_substitute,
"resilient_splitpart": jinja_resilient_splitpart
})
def make_payload_from_template(self, template_override, default_template, payload):
"""convert a payload into a newformat based on a specified template
Args:
template_override ([str]): [/path/to/template.jinja]
default_template ([str]): [/path/to/template.jinja]
payload ([dict]): [data to convert]
Returns:
[dict]: [converted payload]
"""
template_data = self.get_template(template_override, default_template)
# Render the template.
rendered_payload = render_json(template_data, payload)
LOG.debug(rendered_payload)
return rendered_payload
def get_template(self, specified_template, default_template):
"""return the contents of a jinja template, either from the default or a customer specified
custom path
Args:
specified_template ([str]): [customer specified template path]
default_template ([str]): [default template location]
Returns:
[str]: [contents of template]
"""
template_file_path = specified_template
if template_file_path:
if not (os.path.exists(template_file_path) and os.path.isfile(template_file_path)):
LOG.error(u"Template file: %s doesn't exist, using default template",
template_file_path)
template_file_path = None
if not template_file_path:
# using default template
template_file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
default_template
)
LOG.debug(u"Incident template file: %s", template_file_path)
with open(template_file_path, "r") as definition:
return definition.read()
def jinja_resilient_datetimeformat(value, date_format="%Y-%m-%dT%H:%M:%S"):
"""custom jinja filter to convert UTC dates to epoch format
Args:
value ([str]): [jinja provided field value]
date_format (str, optional): [conversion format]. Defaults to "%Y-%m-%dT%H:%M:%S".
Returns:
[int]: [epoch value of datetime, in milliseconds]
"""
if not value:
return value
utc_time = time.strptime(value[:value.rfind('.')], date_format)
return calendar.timegm(utc_time)*1000
def jinja_resilient_substitute(value, json_str):
"""jinja custom filter to replace values based on a lookup dictionary
Args:
value ([str]): [original value]
json_str ([str]): [string encoded json lookup values]
Returns:
[str]: [replacement value or original value if no replacement found]
"""
replace_dict = json.loads(json_str)
if value in replace_dict:
return replace_dict[value]
# use a default value if specific match is missing
if 'DEFAULT' in replace_dict:
return replace_dict['DEFAULT']
return value
def jinja_resilient_splitpart (value, index, split_chars=' - '):
"""[split a string and return the index]
Args:
value ([str]): [string to split]
index ([int]): [index to return]
split_chars (str, optional): [split characters]. Defaults to ' - '.
Returns:
[str]: [index of string. if index is out of bounds, the original string is returned]
"""
splits = value.split(split_chars)
if len(splits) > index:
return splits[index]
else:
return value
| 34.929688 | 99 | 0.638336 |
import calendar
import logging
import json
import os
import time
from resilient_circuits.template_functions import render_json, environment
LOG = logging.getLogger(__name__)
class JinjaEnvironment():
def __init__(self):
env = environment()
env.globals.update({
"resilient_datetimeformat": jinja_resilient_datetimeformat,
"resilient_substitute": jinja_resilient_substitute,
"resilient_splitpart": jinja_resilient_splitpart
})
env.filters.update({
"resilient_datetimeformat": jinja_resilient_datetimeformat,
"resilient_substitute": jinja_resilient_substitute,
"resilient_splitpart": jinja_resilient_splitpart
})
def make_payload_from_template(self, template_override, default_template, payload):
template_data = self.get_template(template_override, default_template)
rendered_payload = render_json(template_data, payload)
LOG.debug(rendered_payload)
return rendered_payload
def get_template(self, specified_template, default_template):
template_file_path = specified_template
if template_file_path:
if not (os.path.exists(template_file_path) and os.path.isfile(template_file_path)):
LOG.error(u"Template file: %s doesn't exist, using default template",
template_file_path)
template_file_path = None
if not template_file_path:
# using default template
template_file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
default_template
)
LOG.debug(u"Incident template file: %s", template_file_path)
with open(template_file_path, "r") as definition:
return definition.read()
def jinja_resilient_datetimeformat(value, date_format="%Y-%m-%dT%H:%M:%S"):
if not value:
return value
utc_time = time.strptime(value[:value.rfind('.')], date_format)
return calendar.timegm(utc_time)*1000
def jinja_resilient_substitute(value, json_str):
replace_dict = json.loads(json_str)
if value in replace_dict:
return replace_dict[value]
# use a default value if specific match is missing
if 'DEFAULT' in replace_dict:
return replace_dict['DEFAULT']
return value
def jinja_resilient_splitpart (value, index, split_chars=' - '):
splits = value.split(split_chars)
if len(splits) > index:
return splits[index]
else:
return value
| true | true |
f732c1edd35a9e7a115cc899a6c993664cd84b79 | 2,096 | py | Python | tests/tests_geomstats/test_estimators.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 10 | 2018-01-28T17:16:44.000Z | 2022-02-27T02:42:41.000Z | tests/tests_geomstats/test_estimators.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 67 | 2018-01-05T17:15:32.000Z | 2018-05-11T18:50:30.000Z | tests/tests_geomstats/test_estimators.py | YannCabanes/geomstats | ce3f4bab6cd59c2f071371a46e336086771d0493 | [
"MIT"
] | 3 | 2021-11-12T23:57:46.000Z | 2021-12-04T10:05:42.000Z | """Template unit tests for scikit-learn estimators."""
import pytest
from sklearn.datasets import load_iris
import geomstats.backend as gs
import geomstats.tests
from geomstats.learning._template import (
TemplateClassifier,
TemplateEstimator,
TemplateTransformer,
)
ESTIMATORS = (TemplateClassifier, TemplateEstimator, TemplateTransformer)
class TestEstimators(geomstats.tests.TestCase):
_multiprocess_can_split_ = True
def setup_method(self):
self.data = load_iris(return_X_y=True)
@geomstats.tests.np_and_autograd_only
def test_template_estimator(self):
est = TemplateEstimator()
self.assertEqual(est.demo_param, "demo_param")
X, y = self.data
est.fit(X, y)
self.assertTrue(hasattr(est, "is_fitted_"))
y_pred = est.predict(X)
self.assertAllClose(y_pred, gs.ones(gs.shape(X)[0]))
@geomstats.tests.np_and_autograd_only
def test_template_transformer_error(self):
X, _ = self.data
n_samples = gs.shape(X)[0]
trans = TemplateTransformer()
trans.fit(X)
X_diff_size = gs.ones((n_samples, gs.shape(X)[1] + 1))
with pytest.raises(ValueError):
trans.transform(X_diff_size)
def test_template_transformer(self):
X, _ = self.data
trans = TemplateTransformer()
self.assertTrue(trans.demo_param == "demo")
trans.fit(X)
self.assertTrue(trans.n_features_ == X.shape[1])
X_trans = trans.transform(X)
self.assertAllClose(X_trans, gs.sqrt(X))
X_trans = trans.fit_transform(X)
self.assertAllClose(X_trans, gs.sqrt(X))
@geomstats.tests.np_autograd_and_tf_only
def test_template_classifier(self):
X, y = self.data
clf = TemplateClassifier()
self.assertTrue(clf.demo_param == "demo")
clf.fit(X, y)
self.assertTrue(hasattr(clf, "classes_"))
self.assertTrue(hasattr(clf, "X_"))
self.assertTrue(hasattr(clf, "y_"))
y_pred = clf.predict(X)
self.assertTrue(y_pred.shape == (X.shape[0],))
| 28.712329 | 73 | 0.660305 |
import pytest
from sklearn.datasets import load_iris
import geomstats.backend as gs
import geomstats.tests
from geomstats.learning._template import (
TemplateClassifier,
TemplateEstimator,
TemplateTransformer,
)
ESTIMATORS = (TemplateClassifier, TemplateEstimator, TemplateTransformer)
class TestEstimators(geomstats.tests.TestCase):
_multiprocess_can_split_ = True
def setup_method(self):
self.data = load_iris(return_X_y=True)
@geomstats.tests.np_and_autograd_only
def test_template_estimator(self):
est = TemplateEstimator()
self.assertEqual(est.demo_param, "demo_param")
X, y = self.data
est.fit(X, y)
self.assertTrue(hasattr(est, "is_fitted_"))
y_pred = est.predict(X)
self.assertAllClose(y_pred, gs.ones(gs.shape(X)[0]))
@geomstats.tests.np_and_autograd_only
def test_template_transformer_error(self):
X, _ = self.data
n_samples = gs.shape(X)[0]
trans = TemplateTransformer()
trans.fit(X)
X_diff_size = gs.ones((n_samples, gs.shape(X)[1] + 1))
with pytest.raises(ValueError):
trans.transform(X_diff_size)
def test_template_transformer(self):
X, _ = self.data
trans = TemplateTransformer()
self.assertTrue(trans.demo_param == "demo")
trans.fit(X)
self.assertTrue(trans.n_features_ == X.shape[1])
X_trans = trans.transform(X)
self.assertAllClose(X_trans, gs.sqrt(X))
X_trans = trans.fit_transform(X)
self.assertAllClose(X_trans, gs.sqrt(X))
@geomstats.tests.np_autograd_and_tf_only
def test_template_classifier(self):
X, y = self.data
clf = TemplateClassifier()
self.assertTrue(clf.demo_param == "demo")
clf.fit(X, y)
self.assertTrue(hasattr(clf, "classes_"))
self.assertTrue(hasattr(clf, "X_"))
self.assertTrue(hasattr(clf, "y_"))
y_pred = clf.predict(X)
self.assertTrue(y_pred.shape == (X.shape[0],))
| true | true |
f732c2114718cf313a270ef92c60532874848e61 | 1,001 | py | Python | src/wta/nupic/functions/__init__.py | kaist-irnlp/SparseColBERT | f0f0ed4acff5dc3c747f13315de0fe7ea50b5b70 | [
"MIT"
] | null | null | null | src/wta/nupic/functions/__init__.py | kaist-irnlp/SparseColBERT | f0f0ed4acff5dc3c747f13315de0fe7ea50b5b70 | [
"MIT"
] | null | null | null | src/wta/nupic/functions/__init__.py | kaist-irnlp/SparseColBERT | f0f0ed4acff5dc3c747f13315de0fe7ea50b5b70 | [
"MIT"
] | null | null | null | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .k_winners import *
| 45.5 | 72 | 0.654346 |
from .k_winners import *
| true | true |
f732c3adce2518a59f9b1dbfa47490c062436e61 | 2,509 | py | Python | IPProxyPool/db/MongoHelper.py | zyhibook/igotolibrary | b35ff3b9b3c5c938dfa0a1b62f6d94faef47925a | [
"MIT"
] | 171 | 2018-08-01T15:05:06.000Z | 2022-03-28T04:14:54.000Z | IPProxyPool/db/MongoHelper.py | zyhibook/igotolibrary | b35ff3b9b3c5c938dfa0a1b62f6d94faef47925a | [
"MIT"
] | 19 | 2018-09-11T13:29:57.000Z | 2021-12-13T20:31:38.000Z | IPProxyPool/db/MongoHelper.py | zyhibook/igotolibrary | b35ff3b9b3c5c938dfa0a1b62f6d94faef47925a | [
"MIT"
] | 55 | 2018-08-23T01:11:37.000Z | 2022-03-26T11:31:38.000Z | import pymongo
from config import DB_CONFIG, DEFAULT_SCORE
from db.ISqlHelper import ISqlHelper
class MongoHelper(ISqlHelper):
def __init__(self):
self.client = pymongo.MongoClient(DB_CONFIG['DB_CONNECT_STRING'], connect=False)
def init_db(self):
self.db = self.client.proxy
self.proxys = self.db.proxys
def drop_db(self):
self.client.drop_database(self.db)
def insert(self, value=None):
if value:
proxy = dict(ip=value['ip'], port=value['port'], types=value['types'], protocol=value['protocol'],
country=value['country'],
area=value['area'], speed=value['speed'], score=DEFAULT_SCORE)
self.proxys.insert(proxy)
def delete(self, conditions=None):
if conditions:
self.proxys.remove(conditions)
return ('deleteNum', 'ok')
else:
return ('deleteNum', 'None')
def update(self, conditions=None, value=None):
# update({"UserName":"libing"},{"$set":{"Email":"libing@126.com","Password":"123"}})
if conditions and value:
self.proxys.update(conditions, {"$set": value})
return {'updateNum': 'ok'}
else:
return {'updateNum': 'fail'}
def select(self, count=None, conditions=None):
if count:
count = int(count)
else:
count = 0
if conditions:
conditions = dict(conditions)
if 'count' in conditions:
del conditions['count']
conditions_name = ['types', 'protocol']
for condition_name in conditions_name:
value = conditions.get(condition_name, None)
if value:
conditions[condition_name] = int(value)
else:
conditions = {}
items = self.proxys.find(conditions, limit=count).sort(
[("speed", pymongo.ASCENDING), ("score", pymongo.DESCENDING)])
results = []
for item in items:
result = (item['ip'], item['port'], item['score'])
results.append(result)
return results
if __name__ == '__main__':
# from db.MongoHelper import MongoHelper as SqlHelper
# sqlhelper = SqlHelper()
# sqlhelper.init_db()
# # print sqlhelper.select(None,{'types':u'1'})
# items= sqlhelper.proxys.find({'types':0})
# for item in items:
# print item
# # # print sqlhelper.select(None,{'types':u'0'})
pass
| 33.453333 | 110 | 0.570745 | import pymongo
from config import DB_CONFIG, DEFAULT_SCORE
from db.ISqlHelper import ISqlHelper
class MongoHelper(ISqlHelper):
def __init__(self):
self.client = pymongo.MongoClient(DB_CONFIG['DB_CONNECT_STRING'], connect=False)
def init_db(self):
self.db = self.client.proxy
self.proxys = self.db.proxys
def drop_db(self):
self.client.drop_database(self.db)
def insert(self, value=None):
if value:
proxy = dict(ip=value['ip'], port=value['port'], types=value['types'], protocol=value['protocol'],
country=value['country'],
area=value['area'], speed=value['speed'], score=DEFAULT_SCORE)
self.proxys.insert(proxy)
def delete(self, conditions=None):
if conditions:
self.proxys.remove(conditions)
return ('deleteNum', 'ok')
else:
return ('deleteNum', 'None')
def update(self, conditions=None, value=None):
if conditions and value:
self.proxys.update(conditions, {"$set": value})
return {'updateNum': 'ok'}
else:
return {'updateNum': 'fail'}
def select(self, count=None, conditions=None):
if count:
count = int(count)
else:
count = 0
if conditions:
conditions = dict(conditions)
if 'count' in conditions:
del conditions['count']
conditions_name = ['types', 'protocol']
for condition_name in conditions_name:
value = conditions.get(condition_name, None)
if value:
conditions[condition_name] = int(value)
else:
conditions = {}
items = self.proxys.find(conditions, limit=count).sort(
[("speed", pymongo.ASCENDING), ("score", pymongo.DESCENDING)])
results = []
for item in items:
result = (item['ip'], item['port'], item['score'])
results.append(result)
return results
if __name__ == '__main__':
| true | true |
f732c465ee2c93c9054318c972450ddfd9dabd88 | 140 | py | Python | HanderCode/aidaiwangApp/aidaiwangApp/RealName_Auth_from_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | HanderCode/aidaiwangApp/aidaiwangApp/RealName_Auth_from_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | HanderCode/aidaiwangApp/aidaiwangApp/RealName_Auth_from_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'aidai_TEC_QA'
# -*- date:'2017/8/1 0001' -*-
def start_to_realnameauth():
print(u'realname auth') | 23.333333 | 30 | 0.628571 |
__author__ = 'aidai_TEC_QA'
def start_to_realnameauth():
print(u'realname auth') | true | true |
f732c46e706e04c5e83092616d66d56c27e1cdc3 | 1,027 | py | Python | test/testing.py | MuhammadEzzatHBK/CyclopeptideSequencing | cd07045169758478b4845a54d5710bd329a836ca | [
"CC0-1.0"
] | null | null | null | test/testing.py | MuhammadEzzatHBK/CyclopeptideSequencing | cd07045169758478b4845a54d5710bd329a836ca | [
"CC0-1.0"
] | null | null | null | test/testing.py | MuhammadEzzatHBK/CyclopeptideSequencing | cd07045169758478b4845a54d5710bd329a836ca | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun May 16 09:31:53 2021
@author: Muhammad Ayman Ezzat
Youmna Magdy Abdullah
"""
from algorithms import branch_and_bound
import timeit
import pandas as pd
''' Accuracy Testing '''
LabSpectrum = [97, 97, 99, 101, 103, 196, 198, 198, 200, 202, 295,
297, 299, 299, 301, 394, 396, 398, 400, 400, 497]
LabResults = sorted(['PVCPT', 'PTPVC', 'PTPCV', 'PCVPT', 'VPTPC',
'VCPTP', 'TPVCP', 'TPCVP', 'CPTPV', 'CVPTP'])
AssignmentResults = branch_and_bound(LabSpectrum)
print('Input: ', LabSpectrum)
print('Provided Lab Results: ', *LabResults)
print('Our Assignment Results: ', *AssignmentResults)
print('Are they identical? ', LabResults == AssignmentResults)
''' Perforamnce Testing '''
time_taken = []
for i in range(500):
start = timeit.timeit()
branch_and_bound(LabSpectrum)
end = timeit.timeit()
time_taken.append(abs(end - start))
data = {'duration' : time_taken}
DataFrame = pd.DataFrame(data)
DataFrame.to_csv('test_data.csv') | 27.756757 | 66 | 0.666991 |
from algorithms import branch_and_bound
import timeit
import pandas as pd
LabSpectrum = [97, 97, 99, 101, 103, 196, 198, 198, 200, 202, 295,
297, 299, 299, 301, 394, 396, 398, 400, 400, 497]
LabResults = sorted(['PVCPT', 'PTPVC', 'PTPCV', 'PCVPT', 'VPTPC',
'VCPTP', 'TPVCP', 'TPCVP', 'CPTPV', 'CVPTP'])
AssignmentResults = branch_and_bound(LabSpectrum)
print('Input: ', LabSpectrum)
print('Provided Lab Results: ', *LabResults)
print('Our Assignment Results: ', *AssignmentResults)
print('Are they identical? ', LabResults == AssignmentResults)
time_taken = []
for i in range(500):
start = timeit.timeit()
branch_and_bound(LabSpectrum)
end = timeit.timeit()
time_taken.append(abs(end - start))
data = {'duration' : time_taken}
DataFrame = pd.DataFrame(data)
DataFrame.to_csv('test_data.csv') | true | true |
f732c4f61e4dffe5db7fa1534fa658cd243adfea | 11,214 | py | Python | utils/metrics.py | Vizards8/pytorch-spine-segmentation | 588b7e7b09c5a370e337e2f12614df69d177ccaa | [
"MIT"
] | null | null | null | utils/metrics.py | Vizards8/pytorch-spine-segmentation | 588b7e7b09c5a370e337e2f12614df69d177ccaa | [
"MIT"
] | null | null | null | utils/metrics.py | Vizards8/pytorch-spine-segmentation | 588b7e7b09c5a370e337e2f12614df69d177ccaa | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
import math
import scipy.spatial
import scipy.ndimage.morphology
"""
True Positive (真正, TP)预测为正的正样本
True Negative(真负 , TN)预测为负的负样本
False Positive (假正, FP)预测为正的负样本
False Negative(假负 , FN)预测为负的正样本
"""
def metrics(predict, label, out_class):
"""Calculate the required metrics
pred = label = [BS, class_num, H, W]
"""
IOU_list = []
Dice_list = []
false_positive_rate_list = []
false_negative_rate_list = []
acc = []
for i in range(1, out_class):
N = label.size(0)
# indices = []
# # 根据batch_size筛去全0label,有标签才计算评价指标
# for j in range(N):
# gt_true = torch.sum(label[j, i, :, :])
# if gt_true:
# indice.append(j)
#
# if indices:
Dice_list.append(diceCoeffv2(predict[:, i, :, :], label[:, i, :, :]))
IOU_list.append(IOU(predict[:, i, :, :], label[:, i, :, :]))
FP_FN_rate_list = FP_FN_rate(predict[:, i, :, :], label[:, i, :, :])
false_positive_rate_list.append(FP_FN_rate_list[0])
false_negative_rate_list.append(FP_FN_rate_list[1])
# accu = pixel_accuracy(predict[indices, i, :, :], label[indices, i, :, :])
# if accu > 0.9:
# print(f'slice id:{i}, acc:{accu}')
acc.append(pixel_accuracy(predict[:, i, :, :], label[:, i, :, :]))
# return mean(IOU_list), mean(Dice_list), mean(acc), mean(false_positive_rate_list), mean(false_negative_rate_list)
return mean(IOU_list), Dice_list, mean(acc), mean(false_positive_rate_list), mean(false_negative_rate_list)
def mean(list):
"""计算平均值"""
if not len(list):
return 0
return sum(list) / len(list)
def mean_class(list):
"""分别计算每个class平均值,返回list"""
res = []
for i in list:
if not len(i):
print('Warning class missing!')
res.append(0)
else:
res.append(mean(i).item())
return res
def batch_pix_accuracy(predict, target):
"""Batch Pixel Accuracy
Args:
predict: input 4D tensor
target: label 3D tensor
"""
_, predict = torch.max(predict, 1)
predict = predict.cpu().numpy() + 1
target = target.cpu().numpy() + 1
pixel_labeled = np.sum(target > 0)
pixel_correct = np.sum((predict == target) * (target > 0))
assert pixel_correct <= pixel_labeled, \
"Correct area should be smaller than Labeled"
return pixel_correct, pixel_labeled
def batch_intersection_union(predict, target, nclass):
"""Batch Intersection of Union
Args:
predict: input 4D tensor
target: label 3D tensor
nclass: number of categories (int)
"""
_, predict = torch.max(predict, 1)
mini = 1
maxi = nclass
nbins = nclass
predict = predict.cpu().numpy() + 1
target = target.cpu().numpy() + 1
predict = predict * (target > 0).astype(predict.dtype)
intersection = predict * (predict == target)
# areas of intersection and union
area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))
area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))
area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))
area_union = area_pred + area_lab - area_inter
assert (area_inter <= area_union).all(), \
"Intersection area should be smaller than Union area"
return area_inter, area_union
def intersection_and_union(im_pred, im_lab, num_class):
im_pred = np.asarray(im_pred)
im_lab = np.asarray(im_lab)
# Remove classes from unlabeled pixels in gt image.
im_pred = im_pred * (im_lab > 0)
# Compute area intersection:
intersection = im_pred * (im_pred == im_lab)
area_inter, _ = np.histogram(intersection, bins=num_class - 1,
range=(1, num_class - 1))
# Compute area union:
area_pred, _ = np.histogram(im_pred, bins=num_class - 1,
range=(1, num_class - 1))
area_lab, _ = np.histogram(im_lab, bins=num_class - 1,
range=(1, num_class - 1))
area_union = area_pred + area_lab - area_inter
return area_inter, area_union
def diceCoeff(pred, gt, smooth=1e-5, ):
r""" computational formula:
dice = (2 * (pred ∩ gt)) / |pred| + |gt|
|pred|:pred中的元素和
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
intersection = (pred_flat * gt_flat).sum(1)
unionset = pred_flat.sum(1) + gt_flat.sum(1)
score = (2 * intersection + smooth) / (unionset + smooth)
return score.sum() / N
def diceFlat(pred, gt, smooth=1e-5):
intersection = ((pred * gt).sum()).item()
unionset = (pred.sum() + gt.sum()).item()
score = (2 * intersection + smooth) / (unionset + smooth)
return score
def diceCoeffv2(pred, gt, eps=1e-5):
r""" computational formula:
dice = (2 * tp) / (2 * tp + fp + fn)
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (2 * tp + eps) / (2 * tp + fp + fn + eps)
return score.sum() / N
def IOU(pred, gt, eps=1e-5):
r""" computational formula:
IOU = pred ∩ gt / pred ∪ gt
IOU = tp / (tp + fp + fn)
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = (tp + eps) / (tp + fp + fn + eps)
return score.sum() / N
def FP_FN_rate(pred, gt, eps=1e-5):
r"""computational formula:
False_Positive_rate = fp / (fp + tn)
False_Negtive_rate = fn / (fn + tp)
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
false_positive_rate = fp / (fp + tn + eps)
false_negtive_rate = fn / (fn + tp + eps)
return false_positive_rate.sum() / N, false_negtive_rate.sum() / N
def pixel_accuracy(pred, gt, eps=1e-5):
"""TP / (TP + FN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = (tp.float() + eps) / ((tp + fn).float() + eps)
# if score < 0.01:
# print(
# f'score:{score.item()}, gt:{torch.sum(gt_flat, dim=1).item()}, pred:{torch.sum(pred_flat, dim=1).item()}, tp:{tp.item()}, fn:{fn.item()}')
return score.sum() / N
def diceCoeffv3(pred, gt, eps=1e-5):
r""" computational formula:
dice = (2 * tp) / (2 * tp + fp + fn)
"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
# 转为float,以防long类型之间相除结果为0
score = (2 * tp + eps).float() / (2 * tp + fp + fn + eps).float()
return score.sum() / N
def jaccard(pred, gt, eps=1e-5):
"""TP / (TP + FP + FN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
fn = torch.sum((pred_flat == 0) * (gt_flat != 0))
score = (tp.float() + eps) / ((tp + fp + fn).float() + eps)
return score.sum() / N
def jaccardFlat(pred, gt, eps=1e-5):
pred_flat = pred.squeeze()
gt_flat = gt.squeeze()
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
fn = torch.sum((pred_flat == 0) * (gt_flat != 0))
score = (tp.float() + eps) / ((tp + fp + fn).float() + eps)
return score
def jaccardv2(pred, gt, eps=1e-5):
"""TP / (TP + FP + FN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (tp + eps).float() / (tp + fp + fn + eps).float()
return score.sum() / N
def tversky(pred, gt, eps=1e-5, alpha=0.7):
"""TP / (TP + (1-alpha) * FP + alpha * FN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (tp + eps) / (tp + (1 - alpha) * fp + alpha * fn + eps)
return score.sum() / N
def accuracy(pred, gt, eps=1e-5):
"""(TP + TN) / (TP + FP + FN + TN)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = ((tp + tn).float() + eps) / ((tp + fp + tn + fn).float() + eps)
return score.sum() / N
def precision(pred, gt, eps=1e-5):
"""TP / (TP + FP)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
score = (tp.float() + eps) / ((tp + fp).float() + eps)
return score.sum() / N
def specificity(pred, gt, eps=1e-5):
"""TN / (TN + FP)"""
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
tn = torch.sum((pred_flat == 0) * (gt_flat == 0))
score = (tn.float() + eps) / ((fp + tn).float() + eps)
return score.sum() / N
if __name__ == '__main__':
# shape = torch.Size([2, 3, 4, 4])
# 模拟batch_size = 2
'''
1 0 0= bladder
0 1 0 = tumor
0 0 1= background
'''
pred = torch.Tensor([[
[[0, 1, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[1, 0, 1, 1],
[0, 1, 1, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]]]
])
gt = torch.Tensor([[
[[0, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[1, 0, 0, 1],
[0, 1, 1, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]]]
])
dice1 = diceCoeff(pred[:, 0:1, :], gt[:, 0:1, :])
dice2 = jaccard(pred[:, 0:1, :], gt[:, 0:1, :])
dice3 = diceCoeffv3(pred[:, 0:1, :], gt[:, 0:1, :])
print(dice1, dice2, dice3)
| 29.588391 | 152 | 0.537364 | import torch
import torch.nn as nn
import numpy as np
import math
import scipy.spatial
import scipy.ndimage.morphology
def metrics(predict, label, out_class):
IOU_list = []
Dice_list = []
false_positive_rate_list = []
false_negative_rate_list = []
acc = []
for i in range(1, out_class):
N = label.size(0)
Dice_list.append(diceCoeffv2(predict[:, i, :, :], label[:, i, :, :]))
IOU_list.append(IOU(predict[:, i, :, :], label[:, i, :, :]))
FP_FN_rate_list = FP_FN_rate(predict[:, i, :, :], label[:, i, :, :])
false_positive_rate_list.append(FP_FN_rate_list[0])
false_negative_rate_list.append(FP_FN_rate_list[1])
acc.append(pixel_accuracy(predict[:, i, :, :], label[:, i, :, :]))
return mean(IOU_list), Dice_list, mean(acc), mean(false_positive_rate_list), mean(false_negative_rate_list)
def mean(list):
if not len(list):
return 0
return sum(list) / len(list)
def mean_class(list):
res = []
for i in list:
if not len(i):
print('Warning class missing!')
res.append(0)
else:
res.append(mean(i).item())
return res
def batch_pix_accuracy(predict, target):
_, predict = torch.max(predict, 1)
predict = predict.cpu().numpy() + 1
target = target.cpu().numpy() + 1
pixel_labeled = np.sum(target > 0)
pixel_correct = np.sum((predict == target) * (target > 0))
assert pixel_correct <= pixel_labeled, \
"Correct area should be smaller than Labeled"
return pixel_correct, pixel_labeled
def batch_intersection_union(predict, target, nclass):
_, predict = torch.max(predict, 1)
mini = 1
maxi = nclass
nbins = nclass
predict = predict.cpu().numpy() + 1
target = target.cpu().numpy() + 1
predict = predict * (target > 0).astype(predict.dtype)
intersection = predict * (predict == target)
area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))
area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))
area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))
area_union = area_pred + area_lab - area_inter
assert (area_inter <= area_union).all(), \
"Intersection area should be smaller than Union area"
return area_inter, area_union
def intersection_and_union(im_pred, im_lab, num_class):
im_pred = np.asarray(im_pred)
im_lab = np.asarray(im_lab)
im_pred = im_pred * (im_lab > 0)
intersection = im_pred * (im_pred == im_lab)
area_inter, _ = np.histogram(intersection, bins=num_class - 1,
range=(1, num_class - 1))
area_pred, _ = np.histogram(im_pred, bins=num_class - 1,
range=(1, num_class - 1))
area_lab, _ = np.histogram(im_lab, bins=num_class - 1,
range=(1, num_class - 1))
area_union = area_pred + area_lab - area_inter
return area_inter, area_union
def diceCoeff(pred, gt, smooth=1e-5, ):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
intersection = (pred_flat * gt_flat).sum(1)
unionset = pred_flat.sum(1) + gt_flat.sum(1)
score = (2 * intersection + smooth) / (unionset + smooth)
return score.sum() / N
def diceFlat(pred, gt, smooth=1e-5):
intersection = ((pred * gt).sum()).item()
unionset = (pred.sum() + gt.sum()).item()
score = (2 * intersection + smooth) / (unionset + smooth)
return score
def diceCoeffv2(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (2 * tp + eps) / (2 * tp + fp + fn + eps)
return score.sum() / N
def IOU(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = (tp + eps) / (tp + fp + fn + eps)
return score.sum() / N
def FP_FN_rate(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
false_positive_rate = fp / (fp + tn + eps)
false_negtive_rate = fn / (fn + tp + eps)
return false_positive_rate.sum() / N, false_negtive_rate.sum() / N
def pixel_accuracy(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = (tp.float() + eps) / ((tp + fn).float() + eps)
return score.sum() / N
def diceCoeffv3(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = (2 * tp + eps).float() / (2 * tp + fp + fn + eps).float()
return score.sum() / N
def jaccard(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
fn = torch.sum((pred_flat == 0) * (gt_flat != 0))
score = (tp.float() + eps) / ((tp + fp + fn).float() + eps)
return score.sum() / N
def jaccardFlat(pred, gt, eps=1e-5):
pred_flat = pred.squeeze()
gt_flat = gt.squeeze()
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
fn = torch.sum((pred_flat == 0) * (gt_flat != 0))
score = (tp.float() + eps) / ((tp + fp + fn).float() + eps)
return score
def jaccardv2(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (tp + eps).float() / (tp + fp + fn + eps).float()
return score.sum() / N
def tversky(pred, gt, eps=1e-5, alpha=0.7):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum(gt_flat * pred_flat, dim=1)
fp = torch.sum(pred_flat, dim=1) - tp
fn = torch.sum(gt_flat, dim=1) - tp
score = (tp + eps) / (tp + (1 - alpha) * fp + alpha * fn + eps)
return score.sum() / N
def accuracy(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0), dim=1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0), dim=1)
tn = torch.sum((pred_flat == 0) * (gt_flat == 0), dim=1)
fn = torch.sum((pred_flat == 0) * (gt_flat != 0), dim=1)
score = ((tp + tn).float() + eps) / ((tp + fp + tn + fn).float() + eps)
return score.sum() / N
def precision(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
tp = torch.sum((pred_flat != 0) * (gt_flat != 0))
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
score = (tp.float() + eps) / ((tp + fp).float() + eps)
return score.sum() / N
def specificity(pred, gt, eps=1e-5):
N = gt.size(0)
pred_flat = pred.view(N, -1)
gt_flat = gt.view(N, -1)
fp = torch.sum((pred_flat != 0) * (gt_flat == 0))
tn = torch.sum((pred_flat == 0) * (gt_flat == 0))
score = (tn.float() + eps) / ((fp + tn).float() + eps)
return score.sum() / N
if __name__ == '__main__':
pred = torch.Tensor([[
[[0, 1, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[1, 0, 1, 1],
[0, 1, 1, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]]]
])
gt = torch.Tensor([[
[[0, 1, 1, 0],
[1, 0, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
[[1, 0, 0, 1],
[0, 1, 1, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]]]
])
dice1 = diceCoeff(pred[:, 0:1, :], gt[:, 0:1, :])
dice2 = jaccard(pred[:, 0:1, :], gt[:, 0:1, :])
dice3 = diceCoeffv3(pred[:, 0:1, :], gt[:, 0:1, :])
print(dice1, dice2, dice3)
| true | true |
f732c55636971d95bc30b141fcff95a039ed75ef | 11,896 | py | Python | src/MML-CG/train.py | AIM3-RUC/VideoIC | ea324938e839a679324f42161d195f5bef3db26f | [
"MIT"
] | 4 | 2021-03-24T12:30:46.000Z | 2021-12-26T02:57:37.000Z | src/MML-CG/train.py | AIM3-RUC/VideoIC | ea324938e839a679324f42161d195f5bef3db26f | [
"MIT"
] | 2 | 2020-10-19T02:53:32.000Z | 2021-05-10T15:03:42.000Z | src/MML-CG/train.py | AIM3-RUC/VideoIC | ea324938e839a679324f42161d195f5bef3db26f | [
"MIT"
] | 1 | 2021-03-06T06:38:34.000Z | 2021-03-06T06:38:34.000Z | '''
Re-organize the MMIG model
2021-09-20
'''
import os
import sys
import time
import json
import logging
import argparse
import torch
import torch.optim as Optim
from torch.autograd import Variable
import utils
import modules
import dataset
import metrics
# set gpu
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3'
parser = argparse.ArgumentParser(description='train.py')
# set model parameters
parser.add_argument('-n_emb', type=int, default=512, help='Embedding size')
parser.add_argument('-n_hidden', type=int, default=512, help='Hidden size')
parser.add_argument('-n_head', type=int, default=8, help='Number of head')
parser.add_argument('-n_block', type=int, default=6, help="Number of block")
parser.add_argument('-max_len', type=int, default=20, help="Limited length for text")
parser.add_argument('-time_range', type=int, default=5, help='Time range')
parser.add_argument('-max_cnum', type=int, default=15, help="Max comments each second")
parser.add_argument('-beam_size', type=int, default=1, help="Bean size") # 1 means greedy search, which is the same with our paper implement
# training setting
parser.add_argument('-batch_size', type=int, default=32, help='Batch size')
parser.add_argument('-epoch', type=int, default=100, help='Number of epoch')
parser.add_argument('-dropout', type=float, default=0.2, help='Dropout rate')
parser.add_argument('-lr', type=float, default=1e-3, help="Learning rate")
parser.add_argument('-weight_decay', type=float, default=0.001, help="Learning rate")
parser.add_argument('-early_stop', type=float, default=20, help="Early Stop")
# data path
parser.add_argument('-data_path', type=str, default=None, help='dict and image path')
parser.add_argument('-out_path', type=str, default=None, help='out path')
parser.add_argument('-outfile', type=str, default='out.json', help='outfile for generation')
parser.add_argument('-restore', type=str, default=None, help="Restoring model path")
parser.add_argument('-mode', type=str, default=None)
args = parser.parse_args()
# set random seed
torch.manual_seed(116)
torch.cuda.manual_seed(116)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# log file
if args.mode == 'train':
if not os.path.exists(args.out_path):
os.mkdir(args.out_path)
logger.addHandler(logging.FileHandler(os.path.join(args.out_path, 'log'), "w"))
# load img
images = utils.load_images(args.data_path)
# load vocabs
vocabs, rev_vocabs = utils.load_vocabs(args.data_path)
#logger.info('Load vocabs file ' + str(len(vocabs)))
def get_dataset(data_path, images, is_train, set_name):
return dataset.Dataset(data_path = data_path,
vocabs = vocabs,
rev_vocabs=rev_vocabs,
images = images,
left_time_range = args.time_range,
right_time_range = args.time_range,
max_len = args.max_len,
max_cnum = args.max_cnum,
is_train = is_train,
set_name = set_name)
def get_dataloader(dataset, batch_size, is_train):
return torch.utils.data.DataLoader(dataset = dataset,
batch_size = batch_size,
shuffle = is_train)
def save_model(path, model):
model_state_dict = model.state_dict()
torch.save(model_state_dict, path)
def train():
# load dataset
train_set = get_dataset(data_path = os.path.join(args.data_path, 'train.json'),
images = images,
is_train = True)
valid_set = get_dataset(data_path = os.path.join(args.data_path, 'dev.json'),
images = images,
is_train = False)
train_batch = get_dataloader(dataset = train_set,
batch_size = args.batch_size,
is_train = True)
model = modules.Model(n_embs = args.n_emb,
n_hidden = args.n_hidden,
n_head = args.n_head,
n_block = args.n_block,
max_len = args.max_len,
dropout = args.dropout,
vocab_size = len(vocabs),
left_range = args.time_range,
right_range = args.time_range)
if args.restore is not None:
model_dict = torch.load(args.restore)
model.load_state_dict(model_dict)
model.cuda()
optim = Optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr, weight_decay=args.weight_decay)
best_score = -100000
early_stop_count = 0
for i in range(args.epoch):
model.train()
report_loss, start_time, n_batches = 0, time.time(), 0
for batch in train_batch:
model.zero_grad()
V, S, Y = batch
# V: video feature
V = Variable(V).cuda()
# S: Surrounding comments
S = Variable(S).cuda()
# Y: Ground truth
Y = Variable(Y).cuda()
multi_gpu_loss = model(V, S, Y)
loss = torch.sum(multi_gpu_loss)
loss.backward()
optim.step()
report_loss += torch.mean(multi_gpu_loss).item()
n_batches += 1
# report loss
print('\nEpoch: %d, report_loss: %.3f, time: %.2f'
% (i+1, report_loss / n_batches, time.time() - start_time))
logger.info('\nEpoch '+str(i) + ', report_loss: '+str(report_loss/n_batches) + ' , time: ' + str(time.time() - start_time))
# eval
score = eval(model, valid_set)
if score > best_score:
best_score = score
print('Best score ', best_score)
save_model(os.path.join(args.out_path, 'best_checkpoint.pt'), model)
logger.info('Evaluation score ' + str(score) + ', Best score ' + str(best_score))
early_stop_count = 0
else:
early_stop_count += 1
save_model(os.path.join(args.out_path, 'checkpoint.pt'), model)
print('Evaluation score ', score, '. Best score ', best_score, '. Early stop count ', early_stop_count)
if early_stop_count == args.early_stop:
sys.exit()
return 0
def eval(model, valid_set):
print('Start Evaluation ... ')
start_time = time.time()
model.eval()
valid_batch = get_dataloader(valid_set, args.batch_size, is_train=False)
loss = 0
total_batch = 0
with torch.no_grad():
for batch in valid_batch:
V, S, Y = batch
V = Variable(V).cuda()
S = Variable(S).cuda()
Y = Variable(Y).cuda()
loss += torch.mean(model(V, S, Y)).item()
total_batch += 1
loss = loss / total_batch
print('Loss: ', loss)
print("evaluting time:", time.time() - start_time)
return -loss
def test_generation():
# build model
test_set = get_dataset(data_path = os.path.join(args.data_path, 'test.json'),
images = images,
is_train = False,
set_name = 'test')
model = modules.Model(n_embs = args.n_emb,
n_hidden = args.n_hidden,
n_head = args.n_head,
n_block = args.n_block,
max_len = args.max_len,
dropout = args.dropout,
vocab_size = len(vocabs),
left_range = args.time_range,
right_range = args.time_range)
if args.restore is not None:
model_dict = torch.load(args.restore)
model.load_state_dict({k.replace('module.', ''):v for k,v in model_dict.items()})
else:
print('Error! Fail to load model for test mode')
sys.exit()
model.cuda()
model.eval()
with torch.no_grad():
with open(args.outfile, 'w') as fout:
for i in range(len(test_set)):
data = test_set.get_data(i)
V = data['video_feature']
S = data['context_feature']
V = Variable(V).cuda()
S = Variable(S).cuda()
comment_ids = model.generate(V, S, BOS_token=vocabs['<BOS>'], EOS_token=vocabs['<EOS>'], beam_size=args.beam_size).data.tolist()
comment = transform(comment_ids[0])
for key in data:
print(key)
sample = {'video_time': data['video_time'],
'context': data['context'],
'comment': data['comment'],
'candidate': data['candidate'],
'generation': comment}
term = json.dumps(sample, ensure_ascii=False)
fout.write(str(term)+'\n')
def transform(ids):
sentences = []
for wid in ids:
if wid == vocabs['<BOS>']:
continue
if wid == vocabs['<EOS>']:
break
sentences.append(rev_vocabs[wid])
return sentences
def test_ranking():
# build model
test_set = get_dataset(data_path = os.path.join(args.data_path, 'test.json'),
images = images,
is_train = False,
set_name = 'test')
model = modules.Model(n_embs = args.n_emb,
n_hidden = args.n_hidden,
n_head = args.n_head,
n_block = args.n_block,
max_len = args.max_len,
dropout = args.dropout,
vocab_size = len(vocabs),
left_range = args.time_range,
right_range = args.time_range)
if args.restore is not None:
model_dict = torch.load(args.restore)
model.load_state_dict({k.replace('module.', ''):v for k,v in model_dict.items()})
else:
print('Error! Fail to load model for test mode')
sys.exit()
model.cuda()
model.eval()
predictions, references = [], []
with torch.no_grad():
for i in range(len(test_set)):
data = test_set.get_data(i)
V = Variable(data['video_feature']).cuda()
S = Variable(data['context_feature']).cuda()
C = Variable(torch.stack(data['candidate_feature'])).cuda()
comment_ids = model.ranking(V, S, C).data
candidate = []
comments = list(data['candidate'].keys())
for id in comment_ids:
candidate.append(comments[id])
predictions.append(candidate)
references.append(data['candidate'])
recall_1 = metrics.recall(predictions, references, 1)
recall_5 = metrics.recall(predictions, references, 5)
recall_10 = metrics.recall(predictions, references, 10)
mr = metrics.mean_rank(predictions, references)
mrr = metrics.mean_reciprocal_rank(predictions, references)
print('Report ranking result')
print('Recall 1: ', recall_1)
print('Recall 5: ', recall_5)
print('Recall 10: ', recall_10)
print('MR: ', mr)
print('MRR: ', mrr)
if __name__ == '__main__':
if args.mode == 'train':
print('-----------Train Mode-----------')
train()
elif args.mode == 'generate':
print('-----------Generation Mode-----------')
test_generation()
elif args.mode == 'ranking':
print('-----------Ranking Mode-----------')
test_ranking()
else:
print('Wrong Mode') | 37.64557 | 144 | 0.557751 |
import os
import sys
import time
import json
import logging
import argparse
import torch
import torch.optim as Optim
from torch.autograd import Variable
import utils
import modules
import dataset
import metrics
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3'
parser = argparse.ArgumentParser(description='train.py')
parser.add_argument('-n_emb', type=int, default=512, help='Embedding size')
parser.add_argument('-n_hidden', type=int, default=512, help='Hidden size')
parser.add_argument('-n_head', type=int, default=8, help='Number of head')
parser.add_argument('-n_block', type=int, default=6, help="Number of block")
parser.add_argument('-max_len', type=int, default=20, help="Limited length for text")
parser.add_argument('-time_range', type=int, default=5, help='Time range')
parser.add_argument('-max_cnum', type=int, default=15, help="Max comments each second")
parser.add_argument('-beam_size', type=int, default=1, help="Bean size")
parser.add_argument('-batch_size', type=int, default=32, help='Batch size')
parser.add_argument('-epoch', type=int, default=100, help='Number of epoch')
parser.add_argument('-dropout', type=float, default=0.2, help='Dropout rate')
parser.add_argument('-lr', type=float, default=1e-3, help="Learning rate")
parser.add_argument('-weight_decay', type=float, default=0.001, help="Learning rate")
parser.add_argument('-early_stop', type=float, default=20, help="Early Stop")
parser.add_argument('-data_path', type=str, default=None, help='dict and image path')
parser.add_argument('-out_path', type=str, default=None, help='out path')
parser.add_argument('-outfile', type=str, default='out.json', help='outfile for generation')
parser.add_argument('-restore', type=str, default=None, help="Restoring model path")
parser.add_argument('-mode', type=str, default=None)
args = parser.parse_args()
torch.manual_seed(116)
torch.cuda.manual_seed(116)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if args.mode == 'train':
if not os.path.exists(args.out_path):
os.mkdir(args.out_path)
logger.addHandler(logging.FileHandler(os.path.join(args.out_path, 'log'), "w"))
images = utils.load_images(args.data_path)
vocabs, rev_vocabs = utils.load_vocabs(args.data_path)
def get_dataset(data_path, images, is_train, set_name):
return dataset.Dataset(data_path = data_path,
vocabs = vocabs,
rev_vocabs=rev_vocabs,
images = images,
left_time_range = args.time_range,
right_time_range = args.time_range,
max_len = args.max_len,
max_cnum = args.max_cnum,
is_train = is_train,
set_name = set_name)
def get_dataloader(dataset, batch_size, is_train):
return torch.utils.data.DataLoader(dataset = dataset,
batch_size = batch_size,
shuffle = is_train)
def save_model(path, model):
model_state_dict = model.state_dict()
torch.save(model_state_dict, path)
def train():
train_set = get_dataset(data_path = os.path.join(args.data_path, 'train.json'),
images = images,
is_train = True)
valid_set = get_dataset(data_path = os.path.join(args.data_path, 'dev.json'),
images = images,
is_train = False)
train_batch = get_dataloader(dataset = train_set,
batch_size = args.batch_size,
is_train = True)
model = modules.Model(n_embs = args.n_emb,
n_hidden = args.n_hidden,
n_head = args.n_head,
n_block = args.n_block,
max_len = args.max_len,
dropout = args.dropout,
vocab_size = len(vocabs),
left_range = args.time_range,
right_range = args.time_range)
if args.restore is not None:
model_dict = torch.load(args.restore)
model.load_state_dict(model_dict)
model.cuda()
optim = Optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr, weight_decay=args.weight_decay)
best_score = -100000
early_stop_count = 0
for i in range(args.epoch):
model.train()
report_loss, start_time, n_batches = 0, time.time(), 0
for batch in train_batch:
model.zero_grad()
V, S, Y = batch
V = Variable(V).cuda()
S = Variable(S).cuda()
Y = Variable(Y).cuda()
multi_gpu_loss = model(V, S, Y)
loss = torch.sum(multi_gpu_loss)
loss.backward()
optim.step()
report_loss += torch.mean(multi_gpu_loss).item()
n_batches += 1
print('\nEpoch: %d, report_loss: %.3f, time: %.2f'
% (i+1, report_loss / n_batches, time.time() - start_time))
logger.info('\nEpoch '+str(i) + ', report_loss: '+str(report_loss/n_batches) + ' , time: ' + str(time.time() - start_time))
score = eval(model, valid_set)
if score > best_score:
best_score = score
print('Best score ', best_score)
save_model(os.path.join(args.out_path, 'best_checkpoint.pt'), model)
logger.info('Evaluation score ' + str(score) + ', Best score ' + str(best_score))
early_stop_count = 0
else:
early_stop_count += 1
save_model(os.path.join(args.out_path, 'checkpoint.pt'), model)
print('Evaluation score ', score, '. Best score ', best_score, '. Early stop count ', early_stop_count)
if early_stop_count == args.early_stop:
sys.exit()
return 0
def eval(model, valid_set):
print('Start Evaluation ... ')
start_time = time.time()
model.eval()
valid_batch = get_dataloader(valid_set, args.batch_size, is_train=False)
loss = 0
total_batch = 0
with torch.no_grad():
for batch in valid_batch:
V, S, Y = batch
V = Variable(V).cuda()
S = Variable(S).cuda()
Y = Variable(Y).cuda()
loss += torch.mean(model(V, S, Y)).item()
total_batch += 1
loss = loss / total_batch
print('Loss: ', loss)
print("evaluting time:", time.time() - start_time)
return -loss
def test_generation():
test_set = get_dataset(data_path = os.path.join(args.data_path, 'test.json'),
images = images,
is_train = False,
set_name = 'test')
model = modules.Model(n_embs = args.n_emb,
n_hidden = args.n_hidden,
n_head = args.n_head,
n_block = args.n_block,
max_len = args.max_len,
dropout = args.dropout,
vocab_size = len(vocabs),
left_range = args.time_range,
right_range = args.time_range)
if args.restore is not None:
model_dict = torch.load(args.restore)
model.load_state_dict({k.replace('module.', ''):v for k,v in model_dict.items()})
else:
print('Error! Fail to load model for test mode')
sys.exit()
model.cuda()
model.eval()
with torch.no_grad():
with open(args.outfile, 'w') as fout:
for i in range(len(test_set)):
data = test_set.get_data(i)
V = data['video_feature']
S = data['context_feature']
V = Variable(V).cuda()
S = Variable(S).cuda()
comment_ids = model.generate(V, S, BOS_token=vocabs['<BOS>'], EOS_token=vocabs['<EOS>'], beam_size=args.beam_size).data.tolist()
comment = transform(comment_ids[0])
for key in data:
print(key)
sample = {'video_time': data['video_time'],
'context': data['context'],
'comment': data['comment'],
'candidate': data['candidate'],
'generation': comment}
term = json.dumps(sample, ensure_ascii=False)
fout.write(str(term)+'\n')
def transform(ids):
sentences = []
for wid in ids:
if wid == vocabs['<BOS>']:
continue
if wid == vocabs['<EOS>']:
break
sentences.append(rev_vocabs[wid])
return sentences
def test_ranking():
test_set = get_dataset(data_path = os.path.join(args.data_path, 'test.json'),
images = images,
is_train = False,
set_name = 'test')
model = modules.Model(n_embs = args.n_emb,
n_hidden = args.n_hidden,
n_head = args.n_head,
n_block = args.n_block,
max_len = args.max_len,
dropout = args.dropout,
vocab_size = len(vocabs),
left_range = args.time_range,
right_range = args.time_range)
if args.restore is not None:
model_dict = torch.load(args.restore)
model.load_state_dict({k.replace('module.', ''):v for k,v in model_dict.items()})
else:
print('Error! Fail to load model for test mode')
sys.exit()
model.cuda()
model.eval()
predictions, references = [], []
with torch.no_grad():
for i in range(len(test_set)):
data = test_set.get_data(i)
V = Variable(data['video_feature']).cuda()
S = Variable(data['context_feature']).cuda()
C = Variable(torch.stack(data['candidate_feature'])).cuda()
comment_ids = model.ranking(V, S, C).data
candidate = []
comments = list(data['candidate'].keys())
for id in comment_ids:
candidate.append(comments[id])
predictions.append(candidate)
references.append(data['candidate'])
recall_1 = metrics.recall(predictions, references, 1)
recall_5 = metrics.recall(predictions, references, 5)
recall_10 = metrics.recall(predictions, references, 10)
mr = metrics.mean_rank(predictions, references)
mrr = metrics.mean_reciprocal_rank(predictions, references)
print('Report ranking result')
print('Recall 1: ', recall_1)
print('Recall 5: ', recall_5)
print('Recall 10: ', recall_10)
print('MR: ', mr)
print('MRR: ', mrr)
if __name__ == '__main__':
if args.mode == 'train':
print('-----------Train Mode-----------')
train()
elif args.mode == 'generate':
print('-----------Generation Mode-----------')
test_generation()
elif args.mode == 'ranking':
print('-----------Ranking Mode-----------')
test_ranking()
else:
print('Wrong Mode') | true | true |
f732c7f175da6d44c51ab5467b59b053313e1112 | 3,670 | py | Python | sandworks/generators/splines.py | Caged/splineworks | 0fad1e98ba6928f6ffeef0018a4d52696a38cce2 | [
"MIT"
] | 2 | 2017-11-10T18:32:31.000Z | 2017-11-12T10:12:03.000Z | sandworks/generators/splines.py | Caged/sandworks | 0fad1e98ba6928f6ffeef0018a4d52696a38cce2 | [
"MIT"
] | null | null | null | sandworks/generators/splines.py | Caged/sandworks | 0fad1e98ba6928f6ffeef0018a4d52696a38cce2 | [
"MIT"
] | null | null | null | from numpy import pi
from numpy import array
from numpy import linspace
from numpy import arange
from numpy import zeros
from numpy import column_stack
from numpy import array
from time import time
from math import radians
import cairocffi as cairo
from sand import Sand
from ..lib.sand_spline import SandSpline
from ..lib.helpers import hex_to_rgb_decimal, SimpleLinearScale
def guide_iterator(x, y):
while True:
yield array([[x, y]])
def make_vertical_surface(sand, gamma, canvas_width, canvas_height, flipped_height):
"""
Make a vertical image
"""
sand.write_to_surface(gamma)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, canvas_width, canvas_height)
context = cairo.Context(surface)
context.rotate(radians(90))
context.translate(0, -flipped_height)
context.scale(1.0, 1.0)
context.set_source_surface(sand.sur, 0, 0)
context.paint()
return surface
def generate(args):
# Number of lines
line_count = args.lines
width = args.width
height = args.height
if args.dir == 'vertical':
width = args.height
height = args.width
xscale = SimpleLinearScale(domain=array([0, width]), range=array([0, 1]))
yscale = SimpleLinearScale(domain=array([0, height]), range=array([0, 1]))
# Margin as a pixel value of total size. Convert that margin to a number between 0..1
# representing the percentage of total pixel size
margin = args.margin
margin_x = xscale(margin)
margin_y = yscale(margin)
# Output PNG gamma
gamma = 1.5
# What frame to write out
save_frame = args.save_every
# TODO: Step. Appears to be jitter multiplier for points along the spline
# Causes the sand to be more "windswept" towards the later points
step = 0.0000003 * 0.15
# The number of points along the spline. More points means a denser-looking spline.
point_count = 1000
# Convert colors to RGB decimal
sand_color = hex_to_rgb_decimal(args.color)
bg_color = hex_to_rgb_decimal(args.bg_color)
# Set alpha
sand_color.append(0.001)
bg_color.append(1)
sand = Sand(width, height)
sand.set_rgba(sand_color)
sand.set_bg(bg_color)
splines = []
# For each y column
for index, ypos in enumerate(linspace(margin_y, 1.0 - margin_y, line_count)):
# TODO: point_number? Appears to affect the tightness of the wave noise. That is, higher
# values like 500 appear to produce more nodes in each spline, resulting in more noise
# detail.
pnum = 4 + index
guide = guide_iterator(0.5, ypos)
x = linspace(-1, 1.0, pnum) * (1.0 - 2 * margin_x) * 0.5
y = zeros(pnum, 'float')
path = column_stack([x, y])
scale = arange(pnum).astype('float') * step
spline = SandSpline(guide, path, point_count, scale)
splines.append(spline)
j = 0
while True:
for s in splines:
start = time()
xy = next(s)
sand.paint_dots(xy)
if j is not 0 and not j % (save_frame * line_count):
frame_number = int(j / save_frame)
file_name = '{}/{}-{}.png'.format(
args.out_dir,
int(time()),
frame_number)
if args.dir == 'vertical':
surface = make_vertical_surface(sand, gamma, args.width, args.height, height)
surface.write_to_png(file_name)
else:
sand.write_to_png(file_name, gamma)
print('Saved frame {} in {}'.format(frame_number, time() - start))
j += 1
| 29.837398 | 97 | 0.631608 | from numpy import pi
from numpy import array
from numpy import linspace
from numpy import arange
from numpy import zeros
from numpy import column_stack
from numpy import array
from time import time
from math import radians
import cairocffi as cairo
from sand import Sand
from ..lib.sand_spline import SandSpline
from ..lib.helpers import hex_to_rgb_decimal, SimpleLinearScale
def guide_iterator(x, y):
while True:
yield array([[x, y]])
def make_vertical_surface(sand, gamma, canvas_width, canvas_height, flipped_height):
sand.write_to_surface(gamma)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, canvas_width, canvas_height)
context = cairo.Context(surface)
context.rotate(radians(90))
context.translate(0, -flipped_height)
context.scale(1.0, 1.0)
context.set_source_surface(sand.sur, 0, 0)
context.paint()
return surface
def generate(args):
line_count = args.lines
width = args.width
height = args.height
if args.dir == 'vertical':
width = args.height
height = args.width
xscale = SimpleLinearScale(domain=array([0, width]), range=array([0, 1]))
yscale = SimpleLinearScale(domain=array([0, height]), range=array([0, 1]))
margin = args.margin
margin_x = xscale(margin)
margin_y = yscale(margin)
gamma = 1.5
save_frame = args.save_every
step = 0.0000003 * 0.15
point_count = 1000
sand_color = hex_to_rgb_decimal(args.color)
bg_color = hex_to_rgb_decimal(args.bg_color)
sand_color.append(0.001)
bg_color.append(1)
sand = Sand(width, height)
sand.set_rgba(sand_color)
sand.set_bg(bg_color)
splines = []
for index, ypos in enumerate(linspace(margin_y, 1.0 - margin_y, line_count)):
pnum = 4 + index
guide = guide_iterator(0.5, ypos)
x = linspace(-1, 1.0, pnum) * (1.0 - 2 * margin_x) * 0.5
y = zeros(pnum, 'float')
path = column_stack([x, y])
scale = arange(pnum).astype('float') * step
spline = SandSpline(guide, path, point_count, scale)
splines.append(spline)
j = 0
while True:
for s in splines:
start = time()
xy = next(s)
sand.paint_dots(xy)
if j is not 0 and not j % (save_frame * line_count):
frame_number = int(j / save_frame)
file_name = '{}/{}-{}.png'.format(
args.out_dir,
int(time()),
frame_number)
if args.dir == 'vertical':
surface = make_vertical_surface(sand, gamma, args.width, args.height, height)
surface.write_to_png(file_name)
else:
sand.write_to_png(file_name, gamma)
print('Saved frame {} in {}'.format(frame_number, time() - start))
j += 1
| true | true |
f732c923533d29817b7676a497807aaef900c5c1 | 24,341 | py | Python | src/ramstk/views/gtk3/pof/panel.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 26 | 2019-05-15T02:03:47.000Z | 2022-02-21T07:28:11.000Z | src/ramstk/views/gtk3/pof/panel.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 815 | 2019-05-10T12:31:52.000Z | 2022-03-31T12:56:26.000Z | src/ramstk/views/gtk3/pof/panel.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 9 | 2019-04-20T23:06:29.000Z | 2022-01-24T21:21:04.000Z | # -*- coding: utf-8 -*-
#
# ramstk.views.gtk3.pof.panel.py is part of the RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""GTK3 PoF Panels."""
# Standard Library Imports
from typing import Any, Dict, List
# Third Party Imports
import treelib
from pubsub import pub
# RAMSTK Package Imports
from ramstk.views.gtk3 import GdkPixbuf, Gtk, _
from ramstk.views.gtk3.widgets import RAMSTKTreePanel
class PoFTreePanel(RAMSTKTreePanel):
"""Panel to display Physics if Failure analysis worksheet."""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_select_msg = "succeed_retrieve_modes"
_tag = "pof"
_title = _("Physics of Failure (PoF) Analysis")
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self) -> None:
"""Initialize an instance of the PoF analysis worksheet."""
super().__init__()
# Initialize private dictionary instance attributes.
self._dic_row_loader = {
"mode": self.__do_load_mode,
"mechanism": self.__do_load_mechanism,
"opload": self.__do_load_opload,
"opstress": self.__do_load_opstress,
"method": self.__do_load_test_method,
}
self._dic_visible_mask: Dict[str, List[str]] = {
"mode": [
"True",
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
],
"mechanism": [
"True",
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
],
"opload": [
"True",
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"False",
"True",
"False",
"False",
],
"opstress": [
"True",
"True",
"True",
"True",
"True",
"False",
"True",
"True",
"False",
"False",
"True",
"False",
],
"testmethod": [
"True",
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"True",
"False",
"True",
"False",
],
}
# Initialize private list instance attributes.
# Initialize private scalar instance attributes.
self._on_edit_message: str = f"wvw_editing_{self._tag}"
# Initialize public dictionary instance attributes.
self.dic_attribute_widget_map: Dict[str, List[Any]] = {
"mode_id": [
0,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Mode ID"),
"gint",
],
"mechanism_id": [
1,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Mechanism ID"),
"gint",
],
"load_id": [
2,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Load ID"),
"gint",
],
"stress_id": [
3,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Stress ID"),
"gint",
],
"test_id": [
4,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Test ID"),
"gint",
],
"description": [
5,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Description"),
"gchararray",
],
"effect_end": [
6,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("End Effect"),
"gchararray",
],
"severity_class": [
7,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Severity"),
"gchararray",
],
"mode_probability": [
8,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
0.0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Mode Probability"),
"gfloat",
],
"damage_model": [
9,
Gtk.CellRendererCombo(),
"edited",
super().on_cell_toggled,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Damage Model"),
"gchararray",
],
"measurable_parameter": [
10,
Gtk.CellRendererCombo(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Measurable Parameter"),
"gchararray",
],
"load_history": [
11,
Gtk.CellRendererCombo(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Load History Method"),
"gchararray",
],
"boundary_conditions": [
12,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Boundary Conditions"),
"gchararray",
],
"priority_id": [
13,
Gtk.CellRendererSpin(),
"edited",
super().on_cell_edit,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Priority"),
"gint",
],
"remarks": [
14,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Remarks"),
"gchararray",
],
}
self.dic_icons: Dict[str, str] = {}
# Initialize public list instance attributes.
self.lst_damage_models: List[str] = []
self.lst_load_history: List[str] = []
self.lst_measurable_parameters: List[str] = []
# Initialize public scalar instance attributes.
super().do_set_properties()
super().do_make_panel()
super().do_set_callbacks()
self.tvwTreeView.set_tooltip_text(
_(
"Displays the Physics of Failure (PoF) Analysis for the currently "
"selected hardware item."
)
)
# Subscribe to PyPubSub messages.
pub.subscribe(super().do_load_panel, "succeed_delete_test_method")
pub.subscribe(super().do_load_panel, "succeed_delete_opstress")
pub.subscribe(super().do_load_panel, "succeed_delete_opload")
pub.subscribe(super().do_load_panel, "succeed_delete_mechanism")
pub.subscribe(super().do_load_panel, "succeed_delete_mode")
pub.subscribe(self._on_insert, "succeed_insert_test_method")
pub.subscribe(self._on_insert, "succeed_insert_opstress")
pub.subscribe(self._on_insert, "succeed_insert_opload")
pub.subscribe(self._on_insert, "succeed_insert_mechanism")
pub.subscribe(self._on_insert, "succeed_insert_mode")
def do_load_comboboxes(self) -> None:
"""Load the RAMSTKComboBox() widgets.
:return: None
:rtype: None
"""
self.__do_load_damage_models()
self.__do_load_measureable_parameters()
self.__do_load_load_history()
# Set the priority Gtk.CellRendererSpin()'s adjustment limits and
# step increments.
_cell = self.tvwTreeView.get_column(
self.tvwTreeView.position["priority_id"]
).get_cells()[0]
_adjustment = _cell.get_property("adjustment")
_adjustment.configure(5, 1, 5, -1, 0, 0)
# noinspection PyUnusedLocal
def _on_insert(
self, node_id: int, tree: treelib.Tree # pylint: disable=unused-argument
) -> None:
"""This is a wrapper method for the metaclass' do_load_panel().
The do_insert() method sends a message with node_id and the updated tree as
data packages. The metaclass' do_load_panel() only wants the tree passed to
it.
:return: None
:rtype:
"""
return super().do_load_panel(tree)
def _on_row_change(self, selection: Gtk.TreeSelection) -> None:
"""Handle events for the PoF Work View RAMSTKTreeView().
This method is called whenever a RAMSTKTreeView() row is activated.
:param selection: the TreeSelection() of the currently
selected row in the PoF RAMSTKTreeView().
:return: None
"""
_model, _row = selection.get_selected()
if _row is not None:
if _model.get_value(_row, 0) == 0:
_level = "mode"
elif _model.get_value(_row, 1) == 0:
_level = "mechanism"
elif _model.get_value(_row, 2) == 0:
_level = "load"
elif _model.get_value(_row, 3) == 0:
_level = "stress"
else:
_level = "test"
self.tvwTreeView.visible = self._dic_visible_mask[_level]
self.tvwTreeView.do_set_visible_columns()
def __do_load_damage_models(self) -> None:
"""Load the RAMSTKTreeView() damage model CellRendererCombo().
:return: None
"""
self.tvwTreeView.do_load_combo_cell(
self.tvwTreeView.position["damage_model"], self.lst_damage_models
)
def __do_load_load_history(self) -> None:
"""Load the operating load history CellRendererCombo().
:return: None
:rtype: None
"""
self.tvwTreeView.do_load_combo_cell(
self.tvwTreeView.position["load_history"], self.lst_load_history
)
def __do_load_measureable_parameters(self) -> None:
"""Load the measureable parameters CellRendererCombo().
:return: None
"""
self.tvwTreeView.do_load_combo_cell(
self.tvwTreeView.position["measurable_parameter"],
self.lst_measurable_parameters,
)
def __do_load_mechanism(
self, node: treelib.Node, row: Gtk.TreeIter
) -> Gtk.TreeIter:
"""Load a failure mechanism record into the RAMSTKTreeView().
:param node: the treelib Node() with the mechanism data to load.
:param row: the parent row of the mechanism to load into the FMEA form.
:return: _new_row; the row that was just populated with mechanism data.
"""
_new_row = None
[[__, _entity]] = node.data.items() # pylint: disable=unused-variable
_model = self.tvwTreeView.get_model()
# noinspection PyArgumentList
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(
self.dic_icons["mechanism"], 22, 22
)
_attributes = [
_entity.mode_id,
_entity.mechanism_id,
0,
0,
0,
_entity.description,
"",
"",
0.0,
"",
"",
"",
"",
0,
"",
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading failure mechanism {0:s} in "
"the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
def __do_load_mode(self, node: treelib.Node, row: Gtk.TreeIter) -> Gtk.TreeIter:
"""Load a failure mode record into the RAMSTKTreeView().
:param node: the treelib Node() with the mode data to load.
:param row: the parent row of the mode to load into the FMEA form.
:return: _new_row; the row that was just populated with mode data.
"""
_new_row = None
[[__, _entity]] = node.data.items() # pylint: disable=unused-variable
_model = self.tvwTreeView.get_model()
# noinspection PyArgumentList
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(self.dic_icons["mode"], 22, 22)
_attributes = [
_entity.mode_id,
0,
0,
0,
0,
_entity.description,
_entity.effect_end,
_entity.severity_class,
_entity.mode_ratio,
"",
"",
"",
"",
0,
"",
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading failure mode {0:s} in the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
def __do_load_opload(self, node: treelib.Node, row: Gtk.TreeIter) -> Gtk.TreeIter:
"""Load a failure mechanism record into the RAMSTKTreeView().
:param node: the treelib Node() with the mechanism data to load.
:param row: the parent row of the mechanism to load into the FMEA form.
:return: _new_row; the row that was just populated with mechanism data.
"""
_new_row = None
[[__, _entity]] = node.data.items() # pylint: disable=unused-variable
_model = self.tvwTreeView.get_model()
# noinspection PyArgumentList
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(self.dic_icons["opload"], 22, 22)
_damage_model = self.dic_damage_models[_entity.damage_model]
_attributes = [
_entity.mode_id,
_entity.mechanism_id,
_entity.load_id,
0,
0,
_entity.description,
"",
"",
0.0,
_damage_model,
"",
"",
"",
_entity.priority_id,
"",
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading operating load {0:s} in the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
def __do_load_opstress(self, node: treelib.Node, row: Gtk.TreeIter) -> Gtk.TreeIter:
"""Load a failure mechanism record into the RAMSTKTreeView().
:param node: the treelib Node() with the mechanism data to load.
:param row: the parent row of the mechanism to load into the FMEA form.
:return: _new_row; the row that was just populated with mechanism data.
"""
_new_row = None
[[__, _entity]] = node.data.items() # pylint: disable=unused-variable
_model = self.tvwTreeView.get_model()
# noinspection PyArgumentList
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(
self.dic_icons["opstress"], 22, 22
)
_load_history = self.dic_load_history[_entity.load_history]
_measurable_parameter = self.dic_measurable_parameters[
_entity.measurable_parameter
]
_attributes = [
_entity.mode_id,
_entity.mechanism_id,
_entity.load_id,
_entity.stress_id,
0,
_entity.description,
"",
"",
0.0,
"",
_measurable_parameter,
_load_history,
"",
0,
_entity.remarks,
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading operating stress {0:s} in the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
def __do_load_test_method(
self, node: treelib.Node, row: Gtk.TreeIter
) -> Gtk.TreeIter:
"""Load a failure mechanism record into the RAMSTKTreeView().
:param node: the treelib Node() with the mechanism data to load.
:param row: the parent row of the mechanism to load into the FMEA form.
:return: _new_row; the row that was just populated with mechanism data.
"""
_new_row = None
[[__, _entity]] = node.data.items() # pylint: disable=unused-variable
_model = self.tvwTreeView.get_model()
# noinspection PyArgumentList
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(
self.dic_icons["testmethod"], 22, 22
)
_attributes = [
_entity.mode_id,
_entity.mechanism_id,
_entity.load_id,
_entity.stress_id,
_entity.test_id,
_entity.description,
"",
"",
0.0,
"",
"",
"",
_entity.boundary_conditions,
0,
_entity.remarks,
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading test method {0:s} in the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
| 31.652796 | 88 | 0.467976 |
from typing import Any, Dict, List
import treelib
from pubsub import pub
from ramstk.views.gtk3 import GdkPixbuf, Gtk, _
from ramstk.views.gtk3.widgets import RAMSTKTreePanel
class PoFTreePanel(RAMSTKTreePanel):
_select_msg = "succeed_retrieve_modes"
_tag = "pof"
_title = _("Physics of Failure (PoF) Analysis")
def __init__(self) -> None:
super().__init__()
self._dic_row_loader = {
"mode": self.__do_load_mode,
"mechanism": self.__do_load_mechanism,
"opload": self.__do_load_opload,
"opstress": self.__do_load_opstress,
"method": self.__do_load_test_method,
}
self._dic_visible_mask: Dict[str, List[str]] = {
"mode": [
"True",
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
],
"mechanism": [
"True",
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
],
"opload": [
"True",
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"False",
"True",
"False",
"False",
],
"opstress": [
"True",
"True",
"True",
"True",
"True",
"False",
"True",
"True",
"False",
"False",
"True",
"False",
],
"testmethod": [
"True",
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"True",
"False",
"True",
"False",
],
}
self._on_edit_message: str = f"wvw_editing_{self._tag}"
self.dic_attribute_widget_map: Dict[str, List[Any]] = {
"mode_id": [
0,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Mode ID"),
"gint",
],
"mechanism_id": [
1,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Mechanism ID"),
"gint",
],
"load_id": [
2,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Load ID"),
"gint",
],
"stress_id": [
3,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Stress ID"),
"gint",
],
"test_id": [
4,
Gtk.CellRendererText(),
"edited",
None,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Test ID"),
"gint",
],
"description": [
5,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Description"),
"gchararray",
],
"effect_end": [
6,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("End Effect"),
"gchararray",
],
"severity_class": [
7,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Severity"),
"gchararray",
],
"mode_probability": [
8,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
0.0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Mode Probability"),
"gfloat",
],
"damage_model": [
9,
Gtk.CellRendererCombo(),
"edited",
super().on_cell_toggled,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Damage Model"),
"gchararray",
],
"measurable_parameter": [
10,
Gtk.CellRendererCombo(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Measurable Parameter"),
"gchararray",
],
"load_history": [
11,
Gtk.CellRendererCombo(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Load History Method"),
"gchararray",
],
"boundary_conditions": [
12,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Boundary Conditions"),
"gchararray",
],
"priority_id": [
13,
Gtk.CellRendererSpin(),
"edited",
super().on_cell_edit,
self._on_edit_message,
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Priority"),
"gint",
],
"remarks": [
14,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
self._on_edit_message,
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Remarks"),
"gchararray",
],
}
self.dic_icons: Dict[str, str] = {}
self.lst_damage_models: List[str] = []
self.lst_load_history: List[str] = []
self.lst_measurable_parameters: List[str] = []
super().do_set_properties()
super().do_make_panel()
super().do_set_callbacks()
self.tvwTreeView.set_tooltip_text(
_(
"Displays the Physics of Failure (PoF) Analysis for the currently "
"selected hardware item."
)
)
pub.subscribe(super().do_load_panel, "succeed_delete_test_method")
pub.subscribe(super().do_load_panel, "succeed_delete_opstress")
pub.subscribe(super().do_load_panel, "succeed_delete_opload")
pub.subscribe(super().do_load_panel, "succeed_delete_mechanism")
pub.subscribe(super().do_load_panel, "succeed_delete_mode")
pub.subscribe(self._on_insert, "succeed_insert_test_method")
pub.subscribe(self._on_insert, "succeed_insert_opstress")
pub.subscribe(self._on_insert, "succeed_insert_opload")
pub.subscribe(self._on_insert, "succeed_insert_mechanism")
pub.subscribe(self._on_insert, "succeed_insert_mode")
def do_load_comboboxes(self) -> None:
self.__do_load_damage_models()
self.__do_load_measureable_parameters()
self.__do_load_load_history()
# step increments.
_cell = self.tvwTreeView.get_column(
self.tvwTreeView.position["priority_id"]
).get_cells()[0]
_adjustment = _cell.get_property("adjustment")
_adjustment.configure(5, 1, 5, -1, 0, 0)
# noinspection PyUnusedLocal
def _on_insert(
self, node_id: int, tree: treelib.Tree # pylint: disable=unused-argument
) -> None:
return super().do_load_panel(tree)
def _on_row_change(self, selection: Gtk.TreeSelection) -> None:
_model, _row = selection.get_selected()
if _row is not None:
if _model.get_value(_row, 0) == 0:
_level = "mode"
elif _model.get_value(_row, 1) == 0:
_level = "mechanism"
elif _model.get_value(_row, 2) == 0:
_level = "load"
elif _model.get_value(_row, 3) == 0:
_level = "stress"
else:
_level = "test"
self.tvwTreeView.visible = self._dic_visible_mask[_level]
self.tvwTreeView.do_set_visible_columns()
def __do_load_damage_models(self) -> None:
self.tvwTreeView.do_load_combo_cell(
self.tvwTreeView.position["damage_model"], self.lst_damage_models
)
def __do_load_load_history(self) -> None:
self.tvwTreeView.do_load_combo_cell(
self.tvwTreeView.position["load_history"], self.lst_load_history
)
def __do_load_measureable_parameters(self) -> None:
self.tvwTreeView.do_load_combo_cell(
self.tvwTreeView.position["measurable_parameter"],
self.lst_measurable_parameters,
)
def __do_load_mechanism(
self, node: treelib.Node, row: Gtk.TreeIter
) -> Gtk.TreeIter:
_new_row = None
[[__, _entity]] = node.data.items() # pylint: disable=unused-variable
_model = self.tvwTreeView.get_model()
# noinspection PyArgumentList
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(
self.dic_icons["mechanism"], 22, 22
)
_attributes = [
_entity.mode_id,
_entity.mechanism_id,
0,
0,
0,
_entity.description,
"",
"",
0.0,
"",
"",
"",
"",
0,
"",
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading failure mechanism {0:s} in "
"the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
def __do_load_mode(self, node: treelib.Node, row: Gtk.TreeIter) -> Gtk.TreeIter:
_new_row = None
[[__, _entity]] = node.data.items()
_model = self.tvwTreeView.get_model()
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(self.dic_icons["mode"], 22, 22)
_attributes = [
_entity.mode_id,
0,
0,
0,
0,
_entity.description,
_entity.effect_end,
_entity.severity_class,
_entity.mode_ratio,
"",
"",
"",
"",
0,
"",
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading failure mode {0:s} in the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
def __do_load_opload(self, node: treelib.Node, row: Gtk.TreeIter) -> Gtk.TreeIter:
_new_row = None
[[__, _entity]] = node.data.items() # pylint: disable=unused-variable
_model = self.tvwTreeView.get_model()
# noinspection PyArgumentList
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(self.dic_icons["opload"], 22, 22)
_damage_model = self.dic_damage_models[_entity.damage_model]
_attributes = [
_entity.mode_id,
_entity.mechanism_id,
_entity.load_id,
0,
0,
_entity.description,
"",
"",
0.0,
_damage_model,
"",
"",
"",
_entity.priority_id,
"",
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading operating load {0:s} in the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
def __do_load_opstress(self, node: treelib.Node, row: Gtk.TreeIter) -> Gtk.TreeIter:
_new_row = None
[[__, _entity]] = node.data.items()
_model = self.tvwTreeView.get_model()
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(
self.dic_icons["opstress"], 22, 22
)
_load_history = self.dic_load_history[_entity.load_history]
_measurable_parameter = self.dic_measurable_parameters[
_entity.measurable_parameter
]
_attributes = [
_entity.mode_id,
_entity.mechanism_id,
_entity.load_id,
_entity.stress_id,
0,
_entity.description,
"",
"",
0.0,
"",
_measurable_parameter,
_load_history,
"",
0,
_entity.remarks,
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading operating stress {0:s} in the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
def __do_load_test_method(
self, node: treelib.Node, row: Gtk.TreeIter
) -> Gtk.TreeIter:
_new_row = None
[[__, _entity]] = node.data.items() # pylint: disable=unused-variable
_model = self.tvwTreeView.get_model()
# noinspection PyArgumentList
_icon = GdkPixbuf.Pixbuf.new_from_file_at_size(
self.dic_icons["testmethod"], 22, 22
)
_attributes = [
_entity.mode_id,
_entity.mechanism_id,
_entity.load_id,
_entity.stress_id,
_entity.test_id,
_entity.description,
"",
"",
0.0,
"",
"",
"",
_entity.boundary_conditions,
0,
_entity.remarks,
_icon,
]
try:
_new_row = _model.append(row, _attributes)
except (AttributeError, TypeError, ValueError):
_new_row = None
_message = _(
"An error occurred when loading test method {0:s} in the "
"physics of failure analysis. This might indicate it was "
"missing it's data package, some of the data in the package "
"was missing, or some of the data was the wrong type. Row "
"data was: {1}"
).format(str(node.identifier), _attributes)
pub.sendMessage(
"do_log_warning_msg", logger_name="WARNING", message=_message
)
return _new_row
| true | true |
f732ca4155b6b2c7b793f1b680b41826f03b7a9e | 547 | py | Python | main.py | ray2060/mathquiz | ebe0952f1768f382d0c4ae50c470a045a3446e0c | [
"MIT"
] | null | null | null | main.py | ray2060/mathquiz | ebe0952f1768f382d0c4ae50c470a045a3446e0c | [
"MIT"
] | null | null | null | main.py | ray2060/mathquiz | ebe0952f1768f382d0c4ae50c470a045a3446e0c | [
"MIT"
] | null | null | null | import logging
from flask import Flask
import google.cloud.logging
from settings import DEBUG
from views import *
app = Flask(__name__)
app.add_url_rule('/', \
view_func=IndexView.as_view('index'))
app.add_url_rule('/a_plus_b', \
view_func=APlusBView.as_view('a_plus_b'))
if __name__ == '__main__':
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
client = google.cloud.logging.Client()
client.get_default_handler()
client.setup_logging()
app.run(host='127.0.0.1', port=8080, debug=DEBUG)
| 18.233333 | 53 | 0.696527 | import logging
from flask import Flask
import google.cloud.logging
from settings import DEBUG
from views import *
app = Flask(__name__)
app.add_url_rule('/', \
view_func=IndexView.as_view('index'))
app.add_url_rule('/a_plus_b', \
view_func=APlusBView.as_view('a_plus_b'))
if __name__ == '__main__':
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
client = google.cloud.logging.Client()
client.get_default_handler()
client.setup_logging()
app.run(host='127.0.0.1', port=8080, debug=DEBUG)
| true | true |
f732ca70a09568775aaad2535f0cab6bc3141d59 | 365 | py | Python | skstan/model/estimator.py | stenoritama/scikit-stan | bcc641689b7f795d3ffd4b9c8e0b0d3c315d3032 | [
"MIT"
] | 29 | 2017-04-13T00:06:47.000Z | 2022-01-11T04:56:26.000Z | skstan/model/estimator.py | stenoritama/scikit-stan | bcc641689b7f795d3ffd4b9c8e0b0d3c315d3032 | [
"MIT"
] | 51 | 2017-04-12T01:12:34.000Z | 2022-02-10T00:33:06.000Z | skstan/model/estimator.py | stenoritama/scikit-stan | bcc641689b7f795d3ffd4b9c8e0b0d3c315d3032 | [
"MIT"
] | 3 | 2017-04-10T02:33:37.000Z | 2019-01-08T18:13:33.000Z | from abc import ABCMeta
class BaseEstimator(metaclass=ABCMeta):
"""
Abstract base class for all estimators in scikit-stan.
"""
def get_params(self, deep=True):
"""
Parameters
----------
deep
Returns
-------
"""
pass
@classmethod
def _get_param_names(cls):
pass
| 14.038462 | 58 | 0.509589 | from abc import ABCMeta
class BaseEstimator(metaclass=ABCMeta):
def get_params(self, deep=True):
pass
@classmethod
def _get_param_names(cls):
pass
| true | true |
f732caff4bd4df01236d7fd78294a1a3d97ea44b | 17,394 | py | Python | readthedocs/search/tests/test_views.py | mforbes/readthedocs.org | 92f6224a67648a6d27e7a295973c2718d07cee11 | [
"MIT"
] | null | null | null | readthedocs/search/tests/test_views.py | mforbes/readthedocs.org | 92f6224a67648a6d27e7a295973c2718d07cee11 | [
"MIT"
] | null | null | null | readthedocs/search/tests/test_views.py | mforbes/readthedocs.org | 92f6224a67648a6d27e7a295973c2718d07cee11 | [
"MIT"
] | null | null | null | import re
import pytest
from django.contrib.auth.models import User
from django.test import override_settings
from django.urls import reverse
from django_dynamic_fixture import get
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
from readthedocs.search.tests.utils import (
DATA_TYPES_VALUES,
get_search_query_from_project_file,
)
@pytest.mark.django_db
@pytest.mark.search
class TestProjectSearch:
@pytest.fixture(autouse=True)
def setup(self):
self.url = reverse('search')
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context['results']
facets = resp.context['facets']
return results, facets
def test_search_by_project_name(self, client, project, all_projects):
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': project.name },
)
assert len(results) == 1
assert project.name == results[0]['name']
for proj in all_projects[1:]:
assert proj.name != results[0]['name']
def test_search_project_have_correct_language_facets(self, client, project):
"""Test that searching project should have correct language facets in the results"""
# Create a project in bn and add it as a translation
get(Project, language='bn', name=project.name)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': project.name },
)
lang_facets = facets['language']
lang_facets_str = [facet[0] for facet in lang_facets]
# There should be 2 languages
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(['en', 'bn'])
for facet in lang_facets:
assert facet[2] == False # because none of the facets are applied
def test_search_project_filter_language(self, client, project):
"""Test that searching project filtered according to language."""
# Create a project in bn and add it as a translation
translate = get(Project, language='bn', name=project.name)
search_params = { 'q': project.name, 'language': 'bn' }
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
# There should be only 1 result
assert len(results) == 1
lang_facets = facets['language']
lang_facets_str = [facet[0] for facet in lang_facets]
# There should be 2 languages because both `en` and `bn` should show there
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(['en', 'bn'])
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_only_projects_owned_by_the_user(self, client, all_projects):
project = Project.objects.get(slug='docs')
user = get(User)
user.projects.add(project)
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={
# Search for all projects.
'q': ' '.join(project.slug for project in all_projects),
'type': 'project',
},
)
assert len(results) > 0
other_projects = [
project.slug
for project in all_projects
if project.slug != 'docs'
]
for result in results:
assert result['name'] == 'docs'
assert result['name'] not in other_projects
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_no_owned_projects(self, client, all_projects):
user = get(User)
assert user.projects.all().count() == 0
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={
# Search for all projects.
'q': ' '.join(project.slug for project in all_projects),
'type': 'project',
},
)
assert len(results) == 0
@pytest.mark.django_db
@pytest.mark.search
@pytest.mark.usefixtures("all_projects")
class TestPageSearch:
@pytest.fixture(autouse=True)
def setup(self):
self.url = reverse('search')
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context['results']
facets = resp.context['facets']
return results, facets
def _get_highlight(self, result, field, type=None):
# if query is from page title,
# highlighted title is present in 'result.meta.highlight.title'
if not type and field == 'title':
highlight = result['highlights']['title']
# if result is not from page title,
# then results and highlighted results are present inside 'blocks'
else:
blocks = result['blocks']
assert len(blocks) >= 1
# checking first inner_hit
inner_hit_0 = blocks[0]
assert inner_hit_0['type'] == type
highlight = inner_hit_0['highlights'][field]
return highlight
def _get_highlighted_words(self, string):
highlighted_words = re.findall(
'<span>(.*?)</span>',
string
)
return highlighted_words
@pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)
@pytest.mark.parametrize('page_num', [0, 1])
def test_file_search(self, client, project, data_type, page_num):
data_type = data_type.split('.')
type, field = None, None
if len(data_type) < 2:
field = data_type[0]
else:
type, field = data_type
query = get_search_query_from_project_file(
project_slug=project.slug,
page_num=page_num,
type=type,
field=field,
)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' }
)
assert len(results) >= 1
# checking first result
result_0 = results[0]
highlight = self._get_highlight(result_0, field, type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
# Make it lower because our search is case insensitive
assert word.lower() in query.lower()
def test_file_search_have_correct_role_name_facets(self, client):
"""Test that searching files should result all role_names."""
# searching for 'celery' to test that
# correct role_names are displayed
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': 'celery', 'type': 'file' }
)
assert len(results) >= 1
role_name_facets = facets['role_name']
role_name_facets_str = [facet[0] for facet in role_name_facets]
expected_role_names = ['py:class', 'py:function', 'py:method']
assert sorted(expected_role_names) == sorted(role_name_facets_str)
for facet in role_name_facets:
assert facet[2] == False # because none of the facets are applied
def test_file_search_filter_role_name(self, client):
"""Test that searching files filtered according to role_names."""
search_params = { 'q': 'celery', 'type': 'file' }
# searching without the filter
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params
)
assert len(results) >= 2 # there are > 1 results without the filter
role_name_facets = facets['role_name']
for facet in role_name_facets:
assert facet[2] == False # because none of the facets are applied
confval_facet = 'py:class'
# checking if 'py:class' facet is present in results
assert confval_facet in [facet[0] for facet in role_name_facets]
# filtering with role_name=py:class
search_params['role_name'] = confval_facet
new_results, new_facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params
)
new_role_names_facets = new_facets['role_name']
# there is only one result with role_name='py:class'
# in `signals` page
assert len(new_results) == 1
first_result = new_results[0] # first result
blocks = first_result['blocks'] # blocks of first results
assert len(blocks) >= 1
inner_hit_0 = blocks[0] # first inner_hit
assert inner_hit_0['type'] == 'domain'
assert inner_hit_0['role'] == confval_facet
for facet in new_role_names_facets:
if facet[0] == confval_facet:
assert facet[2] == True # because 'std:confval' filter is active
else:
assert facet[2] == False
@pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)
@pytest.mark.parametrize('case', ['upper', 'lower', 'title'])
def test_file_search_case_insensitive(self, client, project, case, data_type):
"""
Check File search is case insensitive.
It tests with uppercase, lowercase and camelcase.
"""
type, field = None, None
data_type = data_type.split('.')
if len(data_type) < 2:
field = data_type[0]
else:
type, field = data_type
query_text = get_search_query_from_project_file(
project_slug=project.slug,
type=type,
field=field,
)
cased_query = getattr(query_text, case)
query = cased_query()
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' }
)
assert len(results) >= 1
first_result = results[0]
highlight = self._get_highlight(first_result, field, type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_exact_match(self, client, project):
"""
Check quoted query match exact phrase.
Making a query with quoted text like ``"foo bar"`` should match exactly
``foo bar`` phrase.
"""
# `Sphinx` word is present both in `kuma` and `docs` files
# But the phrase `Sphinx uses` is present only in `kuma` docs.
# So search with this phrase to check
query = r'"Sphinx uses"'
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' })
# there must be only 1 result
# because the phrase is present in
# only one project
assert len(results) == 1
assert results[0]['project'] == 'kuma'
assert results[0]['domain'] == 'http://readthedocs.org'
assert results[0]['path'] == '/docs/kuma/en/latest/documentation.html'
blocks = results[0]['blocks']
assert len(blocks) == 1
assert blocks[0]['type'] == 'section'
highlight = self._get_highlight(results[0], 'content', 'section')
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_have_correct_project_facets(self, client, all_projects):
"""Test that file search have correct project facets in results"""
# `environment` word is present both in `kuma` and `docs` files
# so search with this phrase
query = 'environment'
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' },
)
# There should be 2 search result
assert len(results) == 2
project_facets = facets['project']
project_facets_str = [facet[0] for facet in project_facets]
assert len(project_facets_str) == 2
# kuma and pipeline should be there
assert sorted(project_facets_str) == sorted(['kuma', 'docs'])
def test_file_search_filter_by_project(self, client):
"""Test that search result are filtered according to project."""
# `environment` word is present both in `kuma` and `docs` files
# so search with this phrase but filter through `kuma` project
search_params = {
'q': 'environment',
'type': 'file',
'project': 'kuma'
}
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
project_facets = facets['project']
resulted_project_facets = [facet[0] for facet in project_facets]
# There should be 1 search result as we have filtered
assert len(results) == 1
# kuma should should be there only
assert 'kuma' == results[0]['project']
# But there should be 2 projects in the project facets
# as the query is present in both projects
assert sorted(resulted_project_facets) == sorted(['kuma', 'docs'])
@pytest.mark.xfail(reason='Versions are not showing correctly! Fixme while rewrite!')
def test_file_search_show_versions(self, client, all_projects, es_index, settings):
# override the settings to index all versions
settings.INDEX_ONLY_LATEST = False
project = all_projects[0]
# Create some versions of the project
versions = [get(Version, project=project) for _ in range(3)]
query = get_search_query_from_project_file(project_slug=project.slug)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' },
)
# Results can be from other projects also
assert len(results) >= 1
version_facets = facets['version']
version_facets_str = [facet[0] for facet in version_facets]
# There should be total 4 versions
# one is latest, and other 3 that we created above
assert len(version_facets) == 4
project_versions = [v.slug for v in versions] + [LATEST]
assert sorted(project_versions) == sorted(version_facets_str)
def test_file_search_subprojects(self, client, all_projects, es_index):
"""
TODO: File search should return results from subprojects also.
This is currently disabled because the UX around it is weird.
You filter by a project, and get results for multiple.
"""
project = all_projects[0]
subproject = all_projects[1]
# Add another project as subproject of the project
project.add_subproject(subproject)
# Now search with subproject content but explicitly filter by the parent project
query = get_search_query_from_project_file(project_slug=subproject.slug)
search_params = {
'q': query,
'type': 'file',
'project': project.slug,
}
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
assert len(results) == 0
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_only_projects_owned_by_the_user(self, client, all_projects):
project = Project.objects.get(slug='docs')
user = get(User)
user.projects.add(project)
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
# Search for the most common english word.
search_params={'q': 'the', 'type': 'file'},
)
assert len(results) > 0
other_projects = [
project.slug
for project in all_projects
if project.slug != 'docs'
]
for result in results:
assert result['project'] == 'docs'
assert result['project'] not in other_projects
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_no_owned_projects(self, client, all_projects):
user = get(User)
assert user.projects.all().count() == 0
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
# Search for the most common english word.
search_params={'q': 'the', 'type': 'file'},
)
assert len(results) == 0
| 36.389121 | 92 | 0.61044 | import re
import pytest
from django.contrib.auth.models import User
from django.test import override_settings
from django.urls import reverse
from django_dynamic_fixture import get
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
from readthedocs.search.tests.utils import (
DATA_TYPES_VALUES,
get_search_query_from_project_file,
)
@pytest.mark.django_db
@pytest.mark.search
class TestProjectSearch:
@pytest.fixture(autouse=True)
def setup(self):
self.url = reverse('search')
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context['results']
facets = resp.context['facets']
return results, facets
def test_search_by_project_name(self, client, project, all_projects):
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': project.name },
)
assert len(results) == 1
assert project.name == results[0]['name']
for proj in all_projects[1:]:
assert proj.name != results[0]['name']
def test_search_project_have_correct_language_facets(self, client, project):
get(Project, language='bn', name=project.name)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': project.name },
)
lang_facets = facets['language']
lang_facets_str = [facet[0] for facet in lang_facets]
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(['en', 'bn'])
for facet in lang_facets:
assert facet[2] == False
def test_search_project_filter_language(self, client, project):
translate = get(Project, language='bn', name=project.name)
search_params = { 'q': project.name, 'language': 'bn' }
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
assert len(results) == 1
lang_facets = facets['language']
lang_facets_str = [facet[0] for facet in lang_facets]
assert len(lang_facets) == 2
assert sorted(lang_facets_str) == sorted(['en', 'bn'])
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_only_projects_owned_by_the_user(self, client, all_projects):
project = Project.objects.get(slug='docs')
user = get(User)
user.projects.add(project)
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={
'q': ' '.join(project.slug for project in all_projects),
'type': 'project',
},
)
assert len(results) > 0
other_projects = [
project.slug
for project in all_projects
if project.slug != 'docs'
]
for result in results:
assert result['name'] == 'docs'
assert result['name'] not in other_projects
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_no_owned_projects(self, client, all_projects):
user = get(User)
assert user.projects.all().count() == 0
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={
'q': ' '.join(project.slug for project in all_projects),
'type': 'project',
},
)
assert len(results) == 0
@pytest.mark.django_db
@pytest.mark.search
@pytest.mark.usefixtures("all_projects")
class TestPageSearch:
@pytest.fixture(autouse=True)
def setup(self):
self.url = reverse('search')
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context['results']
facets = resp.context['facets']
return results, facets
def _get_highlight(self, result, field, type=None):
if not type and field == 'title':
highlight = result['highlights']['title']
else:
blocks = result['blocks']
assert len(blocks) >= 1
inner_hit_0 = blocks[0]
assert inner_hit_0['type'] == type
highlight = inner_hit_0['highlights'][field]
return highlight
def _get_highlighted_words(self, string):
highlighted_words = re.findall(
'<span>(.*?)</span>',
string
)
return highlighted_words
@pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)
@pytest.mark.parametrize('page_num', [0, 1])
def test_file_search(self, client, project, data_type, page_num):
data_type = data_type.split('.')
type, field = None, None
if len(data_type) < 2:
field = data_type[0]
else:
type, field = data_type
query = get_search_query_from_project_file(
project_slug=project.slug,
page_num=page_num,
type=type,
field=field,
)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' }
)
assert len(results) >= 1
result_0 = results[0]
highlight = self._get_highlight(result_0, field, type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_have_correct_role_name_facets(self, client):
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': 'celery', 'type': 'file' }
)
assert len(results) >= 1
role_name_facets = facets['role_name']
role_name_facets_str = [facet[0] for facet in role_name_facets]
expected_role_names = ['py:class', 'py:function', 'py:method']
assert sorted(expected_role_names) == sorted(role_name_facets_str)
for facet in role_name_facets:
assert facet[2] == False
def test_file_search_filter_role_name(self, client):
search_params = { 'q': 'celery', 'type': 'file' }
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params
)
assert len(results) >= 2
role_name_facets = facets['role_name']
for facet in role_name_facets:
assert facet[2] == False
confval_facet = 'py:class'
assert confval_facet in [facet[0] for facet in role_name_facets]
search_params['role_name'] = confval_facet
new_results, new_facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params
)
new_role_names_facets = new_facets['role_name']
assert len(new_results) == 1
first_result = new_results[0]
blocks = first_result['blocks']
assert len(blocks) >= 1
inner_hit_0 = blocks[0]
assert inner_hit_0['type'] == 'domain'
assert inner_hit_0['role'] == confval_facet
for facet in new_role_names_facets:
if facet[0] == confval_facet:
assert facet[2] == True
else:
assert facet[2] == False
@pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)
@pytest.mark.parametrize('case', ['upper', 'lower', 'title'])
def test_file_search_case_insensitive(self, client, project, case, data_type):
type, field = None, None
data_type = data_type.split('.')
if len(data_type) < 2:
field = data_type[0]
else:
type, field = data_type
query_text = get_search_query_from_project_file(
project_slug=project.slug,
type=type,
field=field,
)
cased_query = getattr(query_text, case)
query = cased_query()
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' }
)
assert len(results) >= 1
first_result = results[0]
highlight = self._get_highlight(first_result, field, type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_exact_match(self, client, project):
query = r'"Sphinx uses"'
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' })
assert len(results) == 1
assert results[0]['project'] == 'kuma'
assert results[0]['domain'] == 'http://readthedocs.org'
assert results[0]['path'] == '/docs/kuma/en/latest/documentation.html'
blocks = results[0]['blocks']
assert len(blocks) == 1
assert blocks[0]['type'] == 'section'
highlight = self._get_highlight(results[0], 'content', 'section')
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_have_correct_project_facets(self, client, all_projects):
query = 'environment'
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' },
)
assert len(results) == 2
project_facets = facets['project']
project_facets_str = [facet[0] for facet in project_facets]
assert len(project_facets_str) == 2
assert sorted(project_facets_str) == sorted(['kuma', 'docs'])
def test_file_search_filter_by_project(self, client):
search_params = {
'q': 'environment',
'type': 'file',
'project': 'kuma'
}
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
project_facets = facets['project']
resulted_project_facets = [facet[0] for facet in project_facets]
assert len(results) == 1
assert 'kuma' == results[0]['project']
assert sorted(resulted_project_facets) == sorted(['kuma', 'docs'])
@pytest.mark.xfail(reason='Versions are not showing correctly! Fixme while rewrite!')
def test_file_search_show_versions(self, client, all_projects, es_index, settings):
settings.INDEX_ONLY_LATEST = False
project = all_projects[0]
versions = [get(Version, project=project) for _ in range(3)]
query = get_search_query_from_project_file(project_slug=project.slug)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={ 'q': query, 'type': 'file' },
)
assert len(results) >= 1
version_facets = facets['version']
version_facets_str = [facet[0] for facet in version_facets]
assert len(version_facets) == 4
project_versions = [v.slug for v in versions] + [LATEST]
assert sorted(project_versions) == sorted(version_facets_str)
def test_file_search_subprojects(self, client, all_projects, es_index):
project = all_projects[0]
subproject = all_projects[1]
project.add_subproject(subproject)
query = get_search_query_from_project_file(project_slug=subproject.slug)
search_params = {
'q': query,
'type': 'file',
'project': project.slug,
}
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
assert len(results) == 0
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_only_projects_owned_by_the_user(self, client, all_projects):
project = Project.objects.get(slug='docs')
user = get(User)
user.projects.add(project)
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={'q': 'the', 'type': 'file'},
)
assert len(results) > 0
other_projects = [
project.slug
for project in all_projects
if project.slug != 'docs'
]
for result in results:
assert result['project'] == 'docs'
assert result['project'] not in other_projects
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_no_owned_projects(self, client, all_projects):
user = get(User)
assert user.projects.all().count() == 0
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params={'q': 'the', 'type': 'file'},
)
assert len(results) == 0
| true | true |
f732cb73a738f5a5f977c299883d8a8ebefb1984 | 25,080 | py | Python | tpapi/tests.py | ash30/tpapi | b3758b609c58487052db5830ffe42ab6888187b1 | [
"MIT"
] | null | null | null | tpapi/tests.py | ash30/tpapi | b3758b609c58487052db5830ffe42ab6888187b1 | [
"MIT"
] | null | null | null | tpapi/tests.py | ash30/tpapi | b3758b609c58487052db5830ffe42ab6888187b1 | [
"MIT"
] | null | null | null | import unittest
import re
import json
import collections
from collections import namedtuple
import client,api,entities
# TODO:
# Test multiple get_entities calls
# so that the second one uses the cached value
# Really - the class factory needs a delegate to call inorder to get
# the meta data. THE CLIENT SHOULDN"T NEED TO TEST FOR CLASS EXISTANCE
# MOCKS
class MockCallable(object):
fcall = namedtuple('fcall',['args','kwargs'])
def __init__(self,response=None):
self.last_call = None
self.response = response
def __call__(self,*args,**kwargs):
self.last_call = self.fcall(args,kwargs)
return self.response(*args,**kwargs) if callable(self.response) else self.response
class MockObject(object):
def __init__(self,**kwargs):
self.__dict__.update(kwargs)
# UNIT TESTS
# == client.py Tests == #
class HTTPRequestDispatcherTests(unittest.TestCase):
def setUp(self):
self.test_instance = client.HTTPRequestDispatcher()
def test_encode_params_list(self):
# The only time I can thing this is called
# is when using ids=123,1234 for context
"I think this only called when using 'ids' *maybe?"
n = self.test_instance.encode_params({'test':[1,2,3]})
self.assertEqual(n,"test=1,2,3")
def test_encode_params_str(self):
n = self.test_instance.encode_params({'test':"foobar"})
self.assertEqual(n,"test=foobar")
def test_encode_params_unicode(self):
n = self.test_instance.encode_params({u'test':u"foobar"})
self.assertEqual(n,"test=foobar")
def test_encode_params_int(self):
n = self.test_instance.encode_params({'test':123})
self.assertEqual(n,"test=123")
class TPBasicClientTests(unittest.TestCase):
""" The client is an adapter of the more basic functionality of the
HTTPRequestDispatcher hence to test the base client, we need to prove
proper delegation for each action method.
"""
TEST_BASE_URL = 'testurl'
def setUp(self):
"Setup client with mock requester so we can feed in request reponses"
self.request_response = [[1,2,3]]
self.mock_dispatcher = MockObject(
paginated_get_request = MockCallable(
response = lambda url,params:self.request_response
),
post_request = MockCallable(
response = lambda url,params,msg,response_format:self.request_response
),
)
self.test_client = client.BasicClient(
self.TEST_BASE_URL,self.mock_dispatcher
)
# Method call tests
def test_get_entities_http_request(self):
"Get entities should send a paginated get request"
test_inst = [i for i in self.test_client.get_entities('test_entity')]
self.assertEqual(test_inst,[1,2,3])
def test_create_entity_http_request(self):
"create entity should send post request and return response"
self.request_response = "client just returns response"
test_inst = self.test_client.create_entity('test_entity',{})
self.assertEqual(test_inst,self.request_response)
# Client functionality
def test_get_entities_chains_multi_iterable(self):
""" Get entities should present a list of lists as a single iterable,
This way we simplify paginated request for caller
"""
self.request_response = [[0,1,2,3],[4,5,6],[7,8,9]]
test_inst = [i for i in self.test_client.get_entities('test_entity')]
self.assertEqual(test_inst,range(10))
def test_request_call_includes_baseurl(self):
"""General condition for interaction with client and requester
The client will always make sure to pass full urls to the requester
"""
test_inst = [i for i in self.test_client.get_entities('test_entity')]
self.assertEqual(
self.mock_dispatcher.paginated_get_request.last_call.args[0],
"/".join([self.TEST_BASE_URL,"test_entity"])
)
class TPClientEntityLimitTests(unittest.TestCase):
"""The client is also able to limit number of entities it returns
This is really a safety check to make sure we don't inadvertantly
send too many requests (each request = 25 items)
"""
def setUp(self):
"Setup client with mock requester so we can feed in request reponses"
self.request_response = [[1,2,3,4,5]]
self.mock_dispatcher = MockObject(
paginated_get_request = MockCallable(
response = lambda url,params:self.request_response
),
)
self.test_client = client.BasicClient(
"test",self.mock_dispatcher
)
def test_limit_more_than_response_length(self):
# default limit = 50
test_collection = [i for i in self.test_client.get_entities('test_entity')]
self.assertTrue(len(test_collection)==5)
def test_limit_less_than_response_length(self):
test_collection = [i for i in self.test_client.get_entities('test_entity',return_limit=3)]
self.assertTrue(len(test_collection)==3)
def test_limit_spans_multiple_requests(self):
self.request_response = [range(10),range(10,20)]
test_collection = [i for i in self.test_client.get_entities('test_entity',return_limit=15)]
self.assertEqual(test_collection,range(15))
def test_limit_is_unsupported(self):
"We don't support floats or non numbers or negative ints, should raise error"
"Also it seems 0 returns nothing, so we also guard against that"
# all error cases raise Assertino errors
with self.assertRaises(AssertionError):
test_collection = [
i for i in self.test_client.get_entities('test_entity',return_limit=-1)
]
with self.assertRaises(AssertionError):
test_collection = [
i for i in self.test_client.get_entities('test_entity',return_limit=0.1)
]
with self.assertRaises(AssertionError):
test_collection = [
i for i in self.test_client.get_entities('test_entity',return_limit="s")
]
with self.assertRaises(AssertionError):
test_collection = [
i for i in self.test_client.get_entities('test_entity',return_limit=0)
]
class ObjectMappingClientTests(unittest.TestCase):
""" The conversion of entity data to entity instances is done in
a specific subclass. These tests confirm the right instance
is created for a given entity endpoint as data retrieval is already covered.
"""
def setUp(self):
"Setup client with mock requester so we can feed in request reponses"
# Setup mock client
self.request_response = [[1,2,3,4,5]]
self.mock_dispatcher = MockObject(
paginated_get_request = MockCallable(
response = lambda url,params:self.request_response
),
post_request = MockCallable(
response = lambda url,params,data,response_format:self.request_response
)
)
# setup mock class factory
class MockEntity(object):
def __init__(self,data):
self.d = data
@classmethod
def create_from_data(cls,d):
return cls(d)
def toDict(self):
return self.d
# Mock factory will return new subclass of mock
self.mock_factory = MockObject(
get = MockCallable(
response = lambda entity,immutable: type('MockEntitySubclass',(MockEntity,),{
'name':entity,'immutable':immutable
})
)
)
self.test_client = client.ObjectMappingClient(
"test",self.mock_dispatcher,MockCallable(response=self.mock_factory)
)
def test_get_entities_return_class(self):
"Entity data is instanciated by entity classes based on entity_endpoint"
test_inst = [i for i in self.test_client.get_entities('test_entity')]
# Test mock 'get' method of factory was passed entity endpoint
# also test reponse data was passed to init
for i in test_inst:
self.assertEqual(i.name,'test_entity')
self.assertIn(i.d,range(1,6))
def test_create_entity_return_class(self):
"Test we return an immutable entity and passed the post data to init"
self.request_response = {'foo':'bar'}
test_inst = self.test_client.create_entity('test_entity',{'foo':'bar'})
self.assertTrue(test_inst.immutable)
self.assertEqual(test_inst.d['foo'],'bar')
self.assertEqual(test_inst.name,'test_entity')
def test_get_entities_empty_response(self):
""" If the query result has no items, get entities shouldn't fail
aka instanciate stuff without data
"""
self.request_response = [[]]
test_inst = [i for i in self.test_client.get_entities('test_entity')]
self.assertEqual(test_inst,[])
# == Api.py Tests == #
class QueryTests(unittest.TestCase):
""" Querys form the basis of the public api. They mainly wrap the client
But have some new functionality in how they accept and transform input
and output args.
"""
def setUp(self):
self.mock_client = MockObject(
get_entities=MockCallable(
response=lambda entity_endpoint,params,return_limit:(entity_endpoint,params)
)
)
# Default args
def test_default_args(self):
"We can pass key val pairs at init time that will always be apart of params"
test_query = api.Query(self.mock_client,acid='helloWorld')
test_inst = test_query.get('Bugs')
self.assertEqual(test_inst[1].get('acid'),'helloWorld')
def test_default_args(self):
"We can pass multi default kwargs for incusion into params"
test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar")
test_inst = test_query.get('Bugs')
self.assertEqual(test_inst[1].get('acid'),'helloWorld')
self.assertEqual(test_inst[1].get('foo'),'bar')
def test_get_id_return(self):
"When specifying an Entity Id, we expect a single entity to be returned"
# redefine mock client to return iter
self.mock_client = MockObject(
get_entities=MockCallable(
response=lambda entity_endpoint,params,return_limit:iter([entity_endpoint,1])
)
)
test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar")
test_inst = test_query.get('Bugs',Id=1)
# Test that we didn't get back a list, instead 1st elem
self.assertTrue(isinstance(test_inst,str))
self.assertEqual(test_inst,'Bugs/1')
def test_check_endpoint_exists(self):
"We guard against non existant endpoints to save on the network request"
with self.assertRaises(AssertionError):
test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar")
test_inst = test_query.get('foobar')
# == entities.py Tests == #
class EntityBaseTests(unittest.TestCase):
class mock_object(object):
def __init__(self,**kwargs):
self.__dict__.update(kwargs)
# Data Access Tests
def test_getattr_Tpdata(self):
'I can retrieve value from TP data cache via attribute lookup'
i = entities.EntityBase(data={
'data1':'a',
'data2':1,
'data3':[1,2]
})
self.assertEqual(i.data1,'a')
self.assertEqual(i.data2,1)
self.assertEqual(i.data3,[1,2])
def test_setattr_Tpdata(self):
"I cannot edit tpdata cache ref aka entity instance is immutable"
i = entities.EntityBase(data={'data1':'a'})
with self.assertRaises(AssertionError):
i.data1 = 'b'
def testEntitySubclass_setattr(self):
"Entity subclasses are still immutable"
class test(entities.EntityBase):
pass
i = test(data={})
with self.assertRaises(AssertionError):
i.data1 = 'arbitrary string'
# Comparison Tests
def test_entityComparisonTrue(self):
"Entities with same id should be equal"
i = entities.EntityBase(data={'Id':1})
j = entities.EntityBase(data={'Id':1,'onlyIdsMatter':2})
self.assertEqual(i,j)
def test_entityComparisonFalse(self):
"Entites with different Ids should not be equal"
i = entities.EntityBase(data={'Id':100})
j = entities.EntityBase(data={'Id':1,'onlyIdsMatter':100})
self.assertNotEqual(i,j)
def test_entityComparisonNoId(self):
"An entity without id can never be equal"
i = entities.EntityBase(data={'noId':1})
self.assertNotEqual(i,i)
# Hashable Tests
def test_entityHashingTrue(self):
i = entities.EntityBase(data={'Id':100})
try:
d = {i:"isHashable"}
except:
raise Exception("Entity isn't hashable")
def test_entityHashingNoId(self):
i = entities.EntityBase(data={'Id':100})
self.assertRaises({i:"isn't Hashable"})
class MutableEntityTests(unittest.TestCase):
def test_setProperty(self):
"on a mutable entity, setattr will forward to property objects setter"
pass
class EntityFactoryTests(unittest.TestCase):
""" Make sure EntityClassFactory can parse a metadata reponse into
a suitable class.
"""
_TESTDATA = './testdata.json'
def setUp(self):
with open(self._TESTDATA) as f:
self.test_data = json.load(f)
self.test_client = MockObject(
raw_request = MockCallable(
response = lambda url:self.test_data
)
)
self.test_class_factory = entities.EntityClassFactory(
self.test_client
)
def test_metadataFailsToParse(self):
"If error occurs reading metadata we should get a Generic Entity"
self.test_data = {}
test_instance = self.test_class_factory.get('Bugs')({})
self.assertIsInstance(test_instance,entities.GenericEntity)
def test_classCreation_value_attribute(self):
"Parse meta data and assign value properties"
test_instance = self.test_class_factory.get('Bugs')({})
self.assertIn("Name",test_instance.__class__.__dict__)
self.assertIsInstance(
test_instance.__class__.__dict__['Name'],
entities.ValueAttribute
)
def test_classCreation_resource_attribute(self):
"Parse meta data and assign resource properties"
test_instance = self.test_class_factory.get('Bugs')({})
self.assertIn("Release",test_instance.__class__.__dict__)
self.assertIsInstance(
test_instance.__class__.__dict__['Release'],
entities.ResourceAttribute
)
def test_classCreation_collection_attribute(self):
"Parse meta data and assign Collection properties"
test_instance = self.test_class_factory.get("Bugs")({})
self.assertIn("Comments",test_instance.__class__.__dict__)
self.assertIsInstance(
test_instance.__class__.__dict__["Comments"],
entities.CollectionAttribute
)
def test_get_mutable_entity_class(self):
"Factory should be able to supply a mutable version of a entity"
test_cls = self.test_class_factory.get('Bugs',immutable=False)
self.assertTrue(issubclass(test_cls,entities.MutableEntity))
def test_get_all_property_info(self):
"User should be able to reflect over all class properties"
test_instance = self.test_class_factory.get('Bugs')({})
# Assert all types of properties are present in dict
self.assertIn('Comments',test_instance.entity_properties)
self.assertIn('Release',test_instance.entity_properties)
self.assertIn('Name',test_instance.entity_properties)
# Entity Property Tests #
class BasePropertyTests(unittest.TestCase):
""" The base property class mainly supports reflection of
initial metadata used at init time, the rest is left up to subclasses
"""
def setUp(self):
self.test_property = entities.EntityProperty('name','uri/meta',{'meta1':'foo'})
def test_get_meta_return(self):
"A Property can return a copy of the meta data it was init from"
self.assertEqual(self.test_property.get_meta()['meta1'],'foo')
def test_meta_contains_relURI(self):
"A propery meta data contains an 'entity endppoint' reference for inspection"
self.assertEqual(self.test_property.get_meta()['RelUri'],'uri')
def test_meta_data_is_copy(self):
"User can't change/edit a metadata as you're only returned a copy"
m = self.test_property.get_meta()
m['new_attr'] = 1
self.assertTrue('new_attr' not in self.test_property.get_meta())
class ValuePropertiesTests(unittest.TestCase):
def setUp(self):
class test_class(object):
test_property = entities.ValueAttribute(
name = 'test_property', uri = ""
)
test_error_property = entities.ValueAttribute(
name = 'not there', uri = ""
)
def __init__(self,test_variable):
self._tpdata = {'test_property':test_variable}
self.test_class = test_class
def test_valueDescriptorGet(self):
"Descriptor should return value in _tpdata field"
test_instance = self.test_class(99)
self.assertEqual(test_instance.test_property,99)
def test_valueDescriptorSet(self):
"Setting the property should update the value in _tpdata"
test_instance = self.test_class(99)
test_instance.test_property = 1
self.assertEqual(test_instance._tpdata['test_property'],1)
def test_valueDescriptorSet_missing_attr(self):
"if propert value not found in _tpdata, just set it,don't error"
test_instance = self.test_class(99)
test_instance.test_error_property = 1
self.assertEqual(test_instance._tpdata['not there'],1)
def test_valueDescriptorGetNoValue(self):
"Descriptor should return None if value = None"
test_instance = self.test_class(None)
self.assertEqual(test_instance.test_property,None)
def test_valueDescriptorGetDataNotPresent(self):
"Descriptor should return None if value wasn't in initial tp data"
test_instance = self.test_class(None)
self.assertEqual(test_instance.test_error_property,None)
class ResourcePropertiesTests(unittest.TestCase):
def setUp(self):
self.test_client = MockObject(
get_entities = MockCallable(response = iter([{"Name":"helloWorld"}]))
)
test_client = self.test_client
class test_class(object):
TP = test_client
test_property = entities.ResourceAttribute(
name = 'test_property', uri = 'spam/meta', metadata = {}
)
test_error_property = entities.ResourceAttribute(
name = 'not there', uri = ""
)
def __init__(self,test_variable):
self._tpdata = {
'test_property':test_variable
}
self.test_class = test_class
def test_ResourcePropertyWithoutAnyData(self):
"if no data is there, return None ie, no resource assigned"
test_instance = self.test_class(None)
self.assertEqual(test_instance.test_property,None)
def test_ResourcePropertyCallsClientCorrectly(self):
"Resources are just sparse, only hold Id in _tpdata. Property has to fetch data"
test_instance = self.test_class({'Name':'foobar',"ResourceType":'chips','Id':1})
self.assertEqual(test_instance.test_property['Name'],'helloWorld')
# Make sure url is working
# Interesting, seems we ignore resource type in initial data
# and prefer uri ? Good / bad ?
self.assertEqual(self.test_client.get_entities.last_call.args[0], 'spam/1')
def test_ResourcePropertyCanSetToOtherEntity(self):
"When user sets property, update value to dict with id == new entity"
test_instance = self.test_class(None)
test_instance.test_property = MockObject(Id=999)
self.assertEqual(test_instance._tpdata['test_property'],{'Id':999})
class CollectionPropertiesTests(unittest.TestCase):
""" Collection properties are some what easier than resources
Most of the time they will be blank, as so client returns it
"""
def setUp(self):
self.test_client = MockObject(
get_entities = MockCallable(
response = iter([{"Name":"helloWorld"},{"Name":"Goodbye"}])
)
)
test_client = self.test_client
class test_class(object):
TP = test_client
_api_endpoint = "foo"
test_property = entities.CollectionAttribute(
name = 'test_property', uri = 'spam/meta'
)
def __init__(self,test_variable):
self._tpdata = {
'test_property':test_variable,
'Id':1,
}
def __getattr__(self,name):
# Mimic GenericEntity lookup
return self._tpdata[name]
self.test_class = test_class
def test_trivialCollectionInData(self):
""" If the collection attr has any data
in initial response, just return it
"""
test_instance = self.test_class([
{'Name':'foobar'},
{'Name':'HelloWorld'},
])
self.assertEqual(len(test_instance.test_property),2)
self.assertEqual(test_instance.test_property[0].Name,'foobar')
self.assertIsInstance(
test_instance.test_property[0],entities.GenericEntity
)
def test_CollectionCallsClientCorrectly(self):
"if no data is present, property makes call to client"
test_instance = self.test_class(None)
self.assertNotEqual(test_instance.test_property,None)
# Make sure url is correct ie
# <current entitiy endpoint>/<current entity id>/<collection endpoint>
self.assertEqual(
self.test_client.get_entities.last_call.args[0], 'foo/1/spam'
)
# Integration Tests
class IntegrationTests(unittest.TestCase):
""" Here we setup a full object graph and see if a a request from the
api layer can make its way all the way through and back again returning
entity instances. We mock out the very lowest level, the request.py
module handle in HTTPRequestDispatcher and supply our own data to the requests
"""
def setUp(self):
self.TESTACID='TESTACIDSTR'
# Mock response need to be from root to specifc
# in order to be matched correctly
# e.g { "test":1,"test/123/":2,
self.mock_responses = {
r"Test/Context/\?ids=111":{
'Items':[{'Acid':'foo'}]
},
r"Test/Context/meta": {
'This will error to a generic Entity':1
},
r"Test/Bugs/\?acid=foo":{
'Items':[
{'Id':1,'Name':'Item1'},{'Id':2,'Name':'Item2'}
]
},
"Test/Bugs/meta":{
'Name':"Bug",
'ResourceMetadataPropertiesDescription':{
"ResourceMetadataProperties"\
"ResourceValuesDescription":{"Items":[
{"Name":"Id"},{"Name":"ValueAttrExample"}]},
"ResourceMetadataProperties"\
"ResourceReferencesDescription":{"Items":[{"Name":"ResourceAttrExample"}]},
},
},
}
def mock_request(method,url,auth,**kwargs):
try:
return MockObject(
json = MockCallable(response = [
v for k,v in self.mock_responses.iteritems()
if re.match(r"^("+ k +r")/?\??(&?format=json)?(?!.)",url)][0]
),
raise_for_status = MockCallable(response=None)
)
except IndexError:
raise Exception("Mock Request couldn't match {}".format(url or "None"))
# Mock out requests.py for test client
self.test_requester = client.HTTPRequestDispatcher()
self.test_requester._requests = MockObject(
request = mock_request
)
self.test_client = client.TPEntityClient(
url = 'Test',
requester = self.test_requester,
)
self.test_project = api.ProjectProxy(self.test_client,MockObject(Id=111))
def test_simple_query_request(self):
"Project attributes should return iter of Generic Entities"
# Bad meta should fail and return generic entities
self.mock_responses.update({
r"Test/Bugs/meta":{
'ResourceMetadataPropertiesDescription':{
},
},
})
items = [x for x in self.test_project.get("Bugs")]
# We should get back 2 generic entities
self.assertTrue(len(items) == 2 )
self.assertTrue(items[0].Name == 'Item1')
self.assertTrue(items[0].Id == 1)
self.assertIsInstance(items[0],entities.GenericEntity)
def test_EntityClass_from_request(self):
"This tests to make sure the class factory instanciates dynamic classes"
self.mock_responses.update({
r"Test/Bugs/\?acid=foo":{
'Items':[
{'Id':1,'Name':'Item1','ValueAttrExample':1},
{'Id':2,'Name':'Item2','ValueAttrExample':2},
]
},
})
items = [ x for x in self.test_project.get('Bugs')]
self.assertTrue(len(items) == 2 )
self.assertNotIsInstance(items[0],entities.GenericEntity)
self.assertEqual(items[0].ValueAttrExample, 1)
def test_queryEntityWithoutID(self):
"I can create a query for entities (like Contexts) that don't have an ID"
self.mock_responses.update({
r"Test/Context/\?acid=foo":{
"Items":[{'ItemWithoutId':1,'Name':'Context'}]
}
})
# Get bugs from project
items = [x for x in self.test_project.get('Context')]
# Make sure Returned Entity is Correct and with ID
self.assertEqual(len(items),1)
self.assertEqual(items[0].Name,'Context')
self.assertIsInstance(items[0],entities.GenericEntity)
with self.assertRaises(AttributeError) as e:
items[0].Id
def test_createEntity(self):
"I can create a query to create an entity within a TP Project"
# Try creating a test bug with value and resource based attrs
bug_data = {
'Id': 0,
'ValueAttrExample':'NewBug',
'ResourceAttrExample':MockObject(Id=1)
}
returned_bug_data = bug_data.copy()
returned_bug_data['Id']=123
self.mock_responses.update({
r"Test/Bugs":returned_bug_data
})
# Assert returned bug has same data as input data
# plus now has an ID
new_bug = self.test_project.create('Bugs',bug_data)
self.assertEqual(new_bug.ValueAttrExample,'NewBug')
self.assertEqual(new_bug.Id,123)
if __name__ == "__main__":
unittest.main();
| 35.879828 | 95 | 0.697648 | import unittest
import re
import json
import collections
from collections import namedtuple
import client,api,entities
# MOCKS
class MockCallable(object):
fcall = namedtuple('fcall',['args','kwargs'])
def __init__(self,response=None):
self.last_call = None
self.response = response
def __call__(self,*args,**kwargs):
self.last_call = self.fcall(args,kwargs)
return self.response(*args,**kwargs) if callable(self.response) else self.response
class MockObject(object):
def __init__(self,**kwargs):
self.__dict__.update(kwargs)
# UNIT TESTS
# == client.py Tests == #
class HTTPRequestDispatcherTests(unittest.TestCase):
def setUp(self):
self.test_instance = client.HTTPRequestDispatcher()
def test_encode_params_list(self):
# The only time I can thing this is called
# is when using ids=123,1234 for context
n = self.test_instance.encode_params({'test':[1,2,3]})
self.assertEqual(n,"test=1,2,3")
def test_encode_params_str(self):
n = self.test_instance.encode_params({'test':"foobar"})
self.assertEqual(n,"test=foobar")
def test_encode_params_unicode(self):
n = self.test_instance.encode_params({u'test':u"foobar"})
self.assertEqual(n,"test=foobar")
def test_encode_params_int(self):
n = self.test_instance.encode_params({'test':123})
self.assertEqual(n,"test=123")
class TPBasicClientTests(unittest.TestCase):
TEST_BASE_URL = 'testurl'
def setUp(self):
self.request_response = [[1,2,3]]
self.mock_dispatcher = MockObject(
paginated_get_request = MockCallable(
response = lambda url,params:self.request_response
),
post_request = MockCallable(
response = lambda url,params,msg,response_format:self.request_response
),
)
self.test_client = client.BasicClient(
self.TEST_BASE_URL,self.mock_dispatcher
)
# Method call tests
def test_get_entities_http_request(self):
test_inst = [i for i in self.test_client.get_entities('test_entity')]
self.assertEqual(test_inst,[1,2,3])
def test_create_entity_http_request(self):
self.request_response = "client just returns response"
test_inst = self.test_client.create_entity('test_entity',{})
self.assertEqual(test_inst,self.request_response)
# Client functionality
def test_get_entities_chains_multi_iterable(self):
self.request_response = [[0,1,2,3],[4,5,6],[7,8,9]]
test_inst = [i for i in self.test_client.get_entities('test_entity')]
self.assertEqual(test_inst,range(10))
def test_request_call_includes_baseurl(self):
test_inst = [i for i in self.test_client.get_entities('test_entity')]
self.assertEqual(
self.mock_dispatcher.paginated_get_request.last_call.args[0],
"/".join([self.TEST_BASE_URL,"test_entity"])
)
class TPClientEntityLimitTests(unittest.TestCase):
def setUp(self):
self.request_response = [[1,2,3,4,5]]
self.mock_dispatcher = MockObject(
paginated_get_request = MockCallable(
response = lambda url,params:self.request_response
),
)
self.test_client = client.BasicClient(
"test",self.mock_dispatcher
)
def test_limit_more_than_response_length(self):
# default limit = 50
test_collection = [i for i in self.test_client.get_entities('test_entity')]
self.assertTrue(len(test_collection)==5)
def test_limit_less_than_response_length(self):
test_collection = [i for i in self.test_client.get_entities('test_entity',return_limit=3)]
self.assertTrue(len(test_collection)==3)
def test_limit_spans_multiple_requests(self):
self.request_response = [range(10),range(10,20)]
test_collection = [i for i in self.test_client.get_entities('test_entity',return_limit=15)]
self.assertEqual(test_collection,range(15))
def test_limit_is_unsupported(self):
# all error cases raise Assertino errors
with self.assertRaises(AssertionError):
test_collection = [
i for i in self.test_client.get_entities('test_entity',return_limit=-1)
]
with self.assertRaises(AssertionError):
test_collection = [
i for i in self.test_client.get_entities('test_entity',return_limit=0.1)
]
with self.assertRaises(AssertionError):
test_collection = [
i for i in self.test_client.get_entities('test_entity',return_limit="s")
]
with self.assertRaises(AssertionError):
test_collection = [
i for i in self.test_client.get_entities('test_entity',return_limit=0)
]
class ObjectMappingClientTests(unittest.TestCase):
def setUp(self):
# Setup mock client
self.request_response = [[1,2,3,4,5]]
self.mock_dispatcher = MockObject(
paginated_get_request = MockCallable(
response = lambda url,params:self.request_response
),
post_request = MockCallable(
response = lambda url,params,data,response_format:self.request_response
)
)
# setup mock class factory
class MockEntity(object):
def __init__(self,data):
self.d = data
@classmethod
def create_from_data(cls,d):
return cls(d)
def toDict(self):
return self.d
# Mock factory will return new subclass of mock
self.mock_factory = MockObject(
get = MockCallable(
response = lambda entity,immutable: type('MockEntitySubclass',(MockEntity,),{
'name':entity,'immutable':immutable
})
)
)
self.test_client = client.ObjectMappingClient(
"test",self.mock_dispatcher,MockCallable(response=self.mock_factory)
)
def test_get_entities_return_class(self):
test_inst = [i for i in self.test_client.get_entities('test_entity')]
# Test mock 'get' method of factory was passed entity endpoint
# also test reponse data was passed to init
for i in test_inst:
self.assertEqual(i.name,'test_entity')
self.assertIn(i.d,range(1,6))
def test_create_entity_return_class(self):
self.request_response = {'foo':'bar'}
test_inst = self.test_client.create_entity('test_entity',{'foo':'bar'})
self.assertTrue(test_inst.immutable)
self.assertEqual(test_inst.d['foo'],'bar')
self.assertEqual(test_inst.name,'test_entity')
def test_get_entities_empty_response(self):
self.request_response = [[]]
test_inst = [i for i in self.test_client.get_entities('test_entity')]
self.assertEqual(test_inst,[])
# == Api.py Tests == #
class QueryTests(unittest.TestCase):
def setUp(self):
self.mock_client = MockObject(
get_entities=MockCallable(
response=lambda entity_endpoint,params,return_limit:(entity_endpoint,params)
)
)
# Default args
def test_default_args(self):
test_query = api.Query(self.mock_client,acid='helloWorld')
test_inst = test_query.get('Bugs')
self.assertEqual(test_inst[1].get('acid'),'helloWorld')
def test_default_args(self):
test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar")
test_inst = test_query.get('Bugs')
self.assertEqual(test_inst[1].get('acid'),'helloWorld')
self.assertEqual(test_inst[1].get('foo'),'bar')
def test_get_id_return(self):
# redefine mock client to return iter
self.mock_client = MockObject(
get_entities=MockCallable(
response=lambda entity_endpoint,params,return_limit:iter([entity_endpoint,1])
)
)
test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar")
test_inst = test_query.get('Bugs',Id=1)
# Test that we didn't get back a list, instead 1st elem
self.assertTrue(isinstance(test_inst,str))
self.assertEqual(test_inst,'Bugs/1')
def test_check_endpoint_exists(self):
with self.assertRaises(AssertionError):
test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar")
test_inst = test_query.get('foobar')
# == entities.py Tests == #
class EntityBaseTests(unittest.TestCase):
class mock_object(object):
def __init__(self,**kwargs):
self.__dict__.update(kwargs)
# Data Access Tests
def test_getattr_Tpdata(self):
i = entities.EntityBase(data={
'data1':'a',
'data2':1,
'data3':[1,2]
})
self.assertEqual(i.data1,'a')
self.assertEqual(i.data2,1)
self.assertEqual(i.data3,[1,2])
def test_setattr_Tpdata(self):
i = entities.EntityBase(data={'data1':'a'})
with self.assertRaises(AssertionError):
i.data1 = 'b'
def testEntitySubclass_setattr(self):
class test(entities.EntityBase):
pass
i = test(data={})
with self.assertRaises(AssertionError):
i.data1 = 'arbitrary string'
# Comparison Tests
def test_entityComparisonTrue(self):
i = entities.EntityBase(data={'Id':1})
j = entities.EntityBase(data={'Id':1,'onlyIdsMatter':2})
self.assertEqual(i,j)
def test_entityComparisonFalse(self):
i = entities.EntityBase(data={'Id':100})
j = entities.EntityBase(data={'Id':1,'onlyIdsMatter':100})
self.assertNotEqual(i,j)
def test_entityComparisonNoId(self):
i = entities.EntityBase(data={'noId':1})
self.assertNotEqual(i,i)
# Hashable Tests
def test_entityHashingTrue(self):
i = entities.EntityBase(data={'Id':100})
try:
d = {i:"isHashable"}
except:
raise Exception("Entity isn't hashable")
def test_entityHashingNoId(self):
i = entities.EntityBase(data={'Id':100})
self.assertRaises({i:"isn't Hashable"})
class MutableEntityTests(unittest.TestCase):
def test_setProperty(self):
pass
class EntityFactoryTests(unittest.TestCase):
_TESTDATA = './testdata.json'
def setUp(self):
with open(self._TESTDATA) as f:
self.test_data = json.load(f)
self.test_client = MockObject(
raw_request = MockCallable(
response = lambda url:self.test_data
)
)
self.test_class_factory = entities.EntityClassFactory(
self.test_client
)
def test_metadataFailsToParse(self):
self.test_data = {}
test_instance = self.test_class_factory.get('Bugs')({})
self.assertIsInstance(test_instance,entities.GenericEntity)
def test_classCreation_value_attribute(self):
test_instance = self.test_class_factory.get('Bugs')({})
self.assertIn("Name",test_instance.__class__.__dict__)
self.assertIsInstance(
test_instance.__class__.__dict__['Name'],
entities.ValueAttribute
)
def test_classCreation_resource_attribute(self):
test_instance = self.test_class_factory.get('Bugs')({})
self.assertIn("Release",test_instance.__class__.__dict__)
self.assertIsInstance(
test_instance.__class__.__dict__['Release'],
entities.ResourceAttribute
)
def test_classCreation_collection_attribute(self):
test_instance = self.test_class_factory.get("Bugs")({})
self.assertIn("Comments",test_instance.__class__.__dict__)
self.assertIsInstance(
test_instance.__class__.__dict__["Comments"],
entities.CollectionAttribute
)
def test_get_mutable_entity_class(self):
test_cls = self.test_class_factory.get('Bugs',immutable=False)
self.assertTrue(issubclass(test_cls,entities.MutableEntity))
def test_get_all_property_info(self):
test_instance = self.test_class_factory.get('Bugs')({})
# Assert all types of properties are present in dict
self.assertIn('Comments',test_instance.entity_properties)
self.assertIn('Release',test_instance.entity_properties)
self.assertIn('Name',test_instance.entity_properties)
# Entity Property Tests #
class BasePropertyTests(unittest.TestCase):
def setUp(self):
self.test_property = entities.EntityProperty('name','uri/meta',{'meta1':'foo'})
def test_get_meta_return(self):
self.assertEqual(self.test_property.get_meta()['meta1'],'foo')
def test_meta_contains_relURI(self):
self.assertEqual(self.test_property.get_meta()['RelUri'],'uri')
def test_meta_data_is_copy(self):
m = self.test_property.get_meta()
m['new_attr'] = 1
self.assertTrue('new_attr' not in self.test_property.get_meta())
class ValuePropertiesTests(unittest.TestCase):
def setUp(self):
class test_class(object):
test_property = entities.ValueAttribute(
name = 'test_property', uri = ""
)
test_error_property = entities.ValueAttribute(
name = 'not there', uri = ""
)
def __init__(self,test_variable):
self._tpdata = {'test_property':test_variable}
self.test_class = test_class
def test_valueDescriptorGet(self):
test_instance = self.test_class(99)
self.assertEqual(test_instance.test_property,99)
def test_valueDescriptorSet(self):
test_instance = self.test_class(99)
test_instance.test_property = 1
self.assertEqual(test_instance._tpdata['test_property'],1)
def test_valueDescriptorSet_missing_attr(self):
test_instance = self.test_class(99)
test_instance.test_error_property = 1
self.assertEqual(test_instance._tpdata['not there'],1)
def test_valueDescriptorGetNoValue(self):
test_instance = self.test_class(None)
self.assertEqual(test_instance.test_property,None)
def test_valueDescriptorGetDataNotPresent(self):
test_instance = self.test_class(None)
self.assertEqual(test_instance.test_error_property,None)
class ResourcePropertiesTests(unittest.TestCase):
def setUp(self):
self.test_client = MockObject(
get_entities = MockCallable(response = iter([{"Name":"helloWorld"}]))
)
test_client = self.test_client
class test_class(object):
TP = test_client
test_property = entities.ResourceAttribute(
name = 'test_property', uri = 'spam/meta', metadata = {}
)
test_error_property = entities.ResourceAttribute(
name = 'not there', uri = ""
)
def __init__(self,test_variable):
self._tpdata = {
'test_property':test_variable
}
self.test_class = test_class
def test_ResourcePropertyWithoutAnyData(self):
test_instance = self.test_class(None)
self.assertEqual(test_instance.test_property,None)
def test_ResourcePropertyCallsClientCorrectly(self):
test_instance = self.test_class({'Name':'foobar',"ResourceType":'chips','Id':1})
self.assertEqual(test_instance.test_property['Name'],'helloWorld')
# Make sure url is working
# Interesting, seems we ignore resource type in initial data
# and prefer uri ? Good / bad ?
self.assertEqual(self.test_client.get_entities.last_call.args[0], 'spam/1')
def test_ResourcePropertyCanSetToOtherEntity(self):
test_instance = self.test_class(None)
test_instance.test_property = MockObject(Id=999)
self.assertEqual(test_instance._tpdata['test_property'],{'Id':999})
class CollectionPropertiesTests(unittest.TestCase):
def setUp(self):
self.test_client = MockObject(
get_entities = MockCallable(
response = iter([{"Name":"helloWorld"},{"Name":"Goodbye"}])
)
)
test_client = self.test_client
class test_class(object):
TP = test_client
_api_endpoint = "foo"
test_property = entities.CollectionAttribute(
name = 'test_property', uri = 'spam/meta'
)
def __init__(self,test_variable):
self._tpdata = {
'test_property':test_variable,
'Id':1,
}
def __getattr__(self,name):
# Mimic GenericEntity lookup
return self._tpdata[name]
self.test_class = test_class
def test_trivialCollectionInData(self):
test_instance = self.test_class([
{'Name':'foobar'},
{'Name':'HelloWorld'},
])
self.assertEqual(len(test_instance.test_property),2)
self.assertEqual(test_instance.test_property[0].Name,'foobar')
self.assertIsInstance(
test_instance.test_property[0],entities.GenericEntity
)
def test_CollectionCallsClientCorrectly(self):
test_instance = self.test_class(None)
self.assertNotEqual(test_instance.test_property,None)
# Make sure url is correct ie
# <current entitiy endpoint>/<current entity id>/<collection endpoint>
self.assertEqual(
self.test_client.get_entities.last_call.args[0], 'foo/1/spam'
)
# Integration Tests
class IntegrationTests(unittest.TestCase):
def setUp(self):
self.TESTACID='TESTACIDSTR'
# Mock response need to be from root to specifc
# in order to be matched correctly
# e.g { "test":1,"test/123/":2,
self.mock_responses = {
r"Test/Context/\?ids=111":{
'Items':[{'Acid':'foo'}]
},
r"Test/Context/meta": {
'This will error to a generic Entity':1
},
r"Test/Bugs/\?acid=foo":{
'Items':[
{'Id':1,'Name':'Item1'},{'Id':2,'Name':'Item2'}
]
},
"Test/Bugs/meta":{
'Name':"Bug",
'ResourceMetadataPropertiesDescription':{
"ResourceMetadataProperties"\
"ResourceValuesDescription":{"Items":[
{"Name":"Id"},{"Name":"ValueAttrExample"}]},
"ResourceMetadataProperties"\
"ResourceReferencesDescription":{"Items":[{"Name":"ResourceAttrExample"}]},
},
},
}
def mock_request(method,url,auth,**kwargs):
try:
return MockObject(
json = MockCallable(response = [
v for k,v in self.mock_responses.iteritems()
if re.match(r"^("+ k +r")/?\??(&?format=json)?(?!.)",url)][0]
),
raise_for_status = MockCallable(response=None)
)
except IndexError:
raise Exception("Mock Request couldn't match {}".format(url or "None"))
# Mock out requests.py for test client
self.test_requester = client.HTTPRequestDispatcher()
self.test_requester._requests = MockObject(
request = mock_request
)
self.test_client = client.TPEntityClient(
url = 'Test',
requester = self.test_requester,
)
self.test_project = api.ProjectProxy(self.test_client,MockObject(Id=111))
def test_simple_query_request(self):
# Bad meta should fail and return generic entities
self.mock_responses.update({
r"Test/Bugs/meta":{
'ResourceMetadataPropertiesDescription':{
},
},
})
items = [x for x in self.test_project.get("Bugs")]
# We should get back 2 generic entities
self.assertTrue(len(items) == 2 )
self.assertTrue(items[0].Name == 'Item1')
self.assertTrue(items[0].Id == 1)
self.assertIsInstance(items[0],entities.GenericEntity)
def test_EntityClass_from_request(self):
self.mock_responses.update({
r"Test/Bugs/\?acid=foo":{
'Items':[
{'Id':1,'Name':'Item1','ValueAttrExample':1},
{'Id':2,'Name':'Item2','ValueAttrExample':2},
]
},
})
items = [ x for x in self.test_project.get('Bugs')]
self.assertTrue(len(items) == 2 )
self.assertNotIsInstance(items[0],entities.GenericEntity)
self.assertEqual(items[0].ValueAttrExample, 1)
def test_queryEntityWithoutID(self):
self.mock_responses.update({
r"Test/Context/\?acid=foo":{
"Items":[{'ItemWithoutId':1,'Name':'Context'}]
}
})
# Get bugs from project
items = [x for x in self.test_project.get('Context')]
# Make sure Returned Entity is Correct and with ID
self.assertEqual(len(items),1)
self.assertEqual(items[0].Name,'Context')
self.assertIsInstance(items[0],entities.GenericEntity)
with self.assertRaises(AttributeError) as e:
items[0].Id
def test_createEntity(self):
# Try creating a test bug with value and resource based attrs
bug_data = {
'Id': 0,
'ValueAttrExample':'NewBug',
'ResourceAttrExample':MockObject(Id=1)
}
returned_bug_data = bug_data.copy()
returned_bug_data['Id']=123
self.mock_responses.update({
r"Test/Bugs":returned_bug_data
})
# Assert returned bug has same data as input data
# plus now has an ID
new_bug = self.test_project.create('Bugs',bug_data)
self.assertEqual(new_bug.ValueAttrExample,'NewBug')
self.assertEqual(new_bug.Id,123)
if __name__ == "__main__":
unittest.main();
| true | true |
f732cd273d3829dde453019955cd5cbce3682a02 | 185 | py | Python | locale/pot/api/core/_autosummary/pyvista-MultiBlock-copy-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/core/_autosummary/pyvista-MultiBlock-copy-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/core/_autosummary/pyvista-MultiBlock-copy-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | import pyvista as pv
data = [pv.Sphere(center=(2, 0, 0)), pv.Cube(center=(0, 2, 0)), pv.Cone()]
blocks = pv.MultiBlock(data)
new_blocks = blocks.copy()
len(new_blocks)
# Expected:
## 3
| 23.125 | 74 | 0.664865 | import pyvista as pv
data = [pv.Sphere(center=(2, 0, 0)), pv.Cube(center=(0, 2, 0)), pv.Cone()]
blocks = pv.MultiBlock(data)
new_blocks = blocks.copy()
len(new_blocks)
| true | true |
f732cd50a300c1c0e734cd8f4825eebe37114460 | 3,665 | py | Python | lite/tests/unittest_py/op/test_layer_norm_op.py | devchai123/Paddle-Lite | 442d6996a59c3498eae27610d49a0d5b2c320f24 | [
"Apache-2.0"
] | 3 | 2021-06-17T11:00:13.000Z | 2021-08-10T10:28:59.000Z | lite/tests/unittest_py/op/test_layer_norm_op.py | devchai123/Paddle-Lite | 442d6996a59c3498eae27610d49a0d5b2c320f24 | [
"Apache-2.0"
] | 1 | 2021-01-06T10:21:22.000Z | 2021-01-06T10:21:22.000Z | lite/tests/unittest_py/op/test_layer_norm_op.py | yingshengBD/Paddle-Lite | eea59b66f61bb2acad471010c9526eeec43a15ca | [
"Apache-2.0"
] | 1 | 2021-12-03T10:07:54.000Z | 2021-12-03T10:07:54.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
class TestLayerNormOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=64), min_size=4, max_size=4))
epsilon = draw(st.floats(min_value=0.0001, max_value=0.0005))
begin_norm_axis = draw(st.sampled_from([1, 2]))
def generate_input(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
channel_dim = 1
for dim in range(begin_norm_axis, 4):
channel_dim = channel_dim * in_shape[dim]
def generate_scale(*args, **kwargs):
return np.random.random([channel_dim]).astype(np.float32)
def generate_bias(*args, **kwargs):
return np.random.random([channel_dim]).astype(np.float32)
run_op = OpConfig(
type="layer_norm",
inputs={
"X": ["input_data"],
"Scale": ["scale_data"],
"Bias": ["bias_data"]
},
outputs={
"Y": ["output_data"],
"Mean": ["mean_data"],
"Variance": ["var_data"],
},
attrs={"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis})
program_config = ProgramConfig(
ops=[run_op],
weights={},
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input)),
"scale_data": TensorConfig(data_gen=partial(generate_scale)),
"bias_data": TensorConfig(data_gen=partial(generate_bias)),
},
outputs=["output_data", "mean_data", "var_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["layer_norm"], (5e-5, 5e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
| 34.252336 | 125 | 0.616644 |
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
class TestLayerNormOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=64), min_size=4, max_size=4))
epsilon = draw(st.floats(min_value=0.0001, max_value=0.0005))
begin_norm_axis = draw(st.sampled_from([1, 2]))
def generate_input(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
channel_dim = 1
for dim in range(begin_norm_axis, 4):
channel_dim = channel_dim * in_shape[dim]
def generate_scale(*args, **kwargs):
return np.random.random([channel_dim]).astype(np.float32)
def generate_bias(*args, **kwargs):
return np.random.random([channel_dim]).astype(np.float32)
run_op = OpConfig(
type="layer_norm",
inputs={
"X": ["input_data"],
"Scale": ["scale_data"],
"Bias": ["bias_data"]
},
outputs={
"Y": ["output_data"],
"Mean": ["mean_data"],
"Variance": ["var_data"],
},
attrs={"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis})
program_config = ProgramConfig(
ops=[run_op],
weights={},
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input)),
"scale_data": TensorConfig(data_gen=partial(generate_scale)),
"bias_data": TensorConfig(data_gen=partial(generate_bias)),
},
outputs=["output_data", "mean_data", "var_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["layer_norm"], (5e-5, 5e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
| true | true |
f732cdb437936108c6fb25425952cbf403c695aa | 17,619 | py | Python | tabular/src/autogluon/tabular/trainer/model_presets/presets.py | daobook/autogluon | 7309118f2ab1c9519f25acf61a283a95af95842b | [
"Apache-2.0"
] | 1 | 2020-09-02T01:10:25.000Z | 2020-09-02T01:10:25.000Z | tabular/src/autogluon/tabular/trainer/model_presets/presets.py | daobook/autogluon | 7309118f2ab1c9519f25acf61a283a95af95842b | [
"Apache-2.0"
] | null | null | null | tabular/src/autogluon/tabular/trainer/model_presets/presets.py | daobook/autogluon | 7309118f2ab1c9519f25acf61a283a95af95842b | [
"Apache-2.0"
] | null | null | null | import copy
import inspect
import logging
from collections import defaultdict
from autogluon.core.constants import AG_ARGS, AG_ARGS_FIT, AG_ARGS_ENSEMBLE, BINARY, MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE
from autogluon.core.models import AbstractModel, GreedyWeightedEnsembleModel, StackerEnsembleModel, SimpleWeightedEnsembleModel
from autogluon.core.trainer.utils import process_hyperparameters
from .presets_custom import get_preset_custom
from ...models import LGBModel, CatBoostModel, XGBoostModel, RFModel, XTModel, KNNModel, LinearModel,\
TabularNeuralNetModel, TabularNeuralQuantileModel, NNFastAiTabularModel, FastTextModel, TextPredictorModel, ImagePredictorModel
from ...models.tab_transformer.tab_transformer_model import TabTransformerModel
logger = logging.getLogger(__name__)
# Higher values indicate higher priority, priority dictates the order models are trained for a given level.
DEFAULT_MODEL_PRIORITY = dict(
KNN=100,
GBM=90,
RF=80,
CAT=70,
XT=60,
FASTAI=50,
XGB=40,
LR=30,
NN=20,
FASTTEXT=0,
AG_TEXT_NN=0,
AG_IMAGE_NN=0,
TRANSF=0,
custom=0,
)
# Problem type specific model priority overrides (will update default values in DEFAULT_MODEL_PRIORITY)
PROBLEM_TYPE_MODEL_PRIORITY = {
MULTICLASS: dict(
FASTAI=95,
),
}
DEFAULT_SOFTCLASS_PRIORITY = dict(
GBM=100,
NN=90,
RF=80,
CAT=60,
custom=0,
)
DEFAULT_CUSTOM_MODEL_PRIORITY = 0
DEFAULT_QUANTILE_MODEL = ['RF', 'XT', 'FASTAI', 'QNN', 'ENS_WEIGHTED'] # TODO: OTHERS will be added
MODEL_TYPES = dict(
RF=RFModel,
XT=XTModel,
KNN=KNNModel,
GBM=LGBModel,
CAT=CatBoostModel,
XGB=XGBoostModel,
NN=TabularNeuralNetModel,
QNN=TabularNeuralQuantileModel,
LR=LinearModel,
FASTAI=NNFastAiTabularModel,
TRANSF=TabTransformerModel,
AG_TEXT_NN=TextPredictorModel,
AG_IMAGE_NN=ImagePredictorModel,
FASTTEXT=FastTextModel,
ENS_WEIGHTED=GreedyWeightedEnsembleModel,
SIMPLE_ENS_WEIGHTED=SimpleWeightedEnsembleModel,
)
DEFAULT_MODEL_NAMES = {
RFModel: 'RandomForest',
XTModel: 'ExtraTrees',
KNNModel: 'KNeighbors',
LGBModel: 'LightGBM',
CatBoostModel: 'CatBoost',
XGBoostModel: 'XGBoost',
TabularNeuralNetModel: 'NeuralNetMXNet',
TabularNeuralQuantileModel: 'QuantileNeuralNet',
LinearModel: 'LinearModel',
NNFastAiTabularModel: 'NeuralNetFastAI',
TabTransformerModel: 'Transformer',
TextPredictorModel: 'TextPredictor',
ImagePredictorModel: 'ImagePredictor',
FastTextModel: 'FastText',
GreedyWeightedEnsembleModel: 'WeightedEnsemble',
SimpleWeightedEnsembleModel: 'WeightedEnsemble',
}
VALID_AG_ARGS_KEYS = {
'name',
'name_main',
'name_prefix',
'name_suffix',
'name_bag_suffix',
'model_type',
'priority',
'problem_types',
'disable_in_hpo',
'valid_stacker',
'valid_base',
'hyperparameter_tune_kwargs',
}
# DONE: Add levels, including 'default'
# DONE: Add lists
# DONE: Add custom which can append to lists
# DONE: Add special optional AG args for things like name prefix, name suffix, name, etc.
# DONE: Move creation of stack ensemble internally into this function? Requires passing base models in as well.
# DONE: Add special optional AG args for training order
# DONE: Add special optional AG args for base models
# TODO: Consider making hyperparameters arg in fit() accept lists, concatenate hyperparameter sets together.
# TODO: Consider adding special optional AG args for #cores,#gpus,num_early_stopping_iterations,etc.
# DONE: Consider adding special optional AG args for max train time, max memory size, etc.
# TODO: Consider adding special optional AG args for use_original_features,features_to_use,etc.
# TODO: Consider adding optional AG args to dynamically disable models such as valid_num_classes_range, valid_row_count_range, valid_feature_count_range, etc.
# TODO: Args such as max_repeats, num_folds
# DONE: Add banned_model_types arg
# TODO: Add option to update hyperparameters with only added keys, so disabling CatBoost would just be {'CAT': []}, which keeps the other models as is.
# TODO: special optional AG arg for only training model if eval_metric in list / not in list. Useful for F1 and 'is_unbalanced' arg in LGBM.
def get_preset_models(path, problem_type, eval_metric, hyperparameters,
level: int = 1, ensemble_type=StackerEnsembleModel, ensemble_kwargs: dict = None, ag_args_fit=None, ag_args=None, ag_args_ensemble=None,
name_suffix: str = None, default_priorities=None, invalid_model_names: list = None, excluded_model_types: list = None,
hyperparameter_preprocess_func=None, hyperparameter_preprocess_kwargs=None, silent=True):
hyperparameters = process_hyperparameters(hyperparameters)
if hyperparameter_preprocess_func is not None:
if hyperparameter_preprocess_kwargs is None:
hyperparameter_preprocess_kwargs = dict()
hyperparameters = hyperparameter_preprocess_func(hyperparameters, **hyperparameter_preprocess_kwargs)
if problem_type not in [BINARY, MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE]:
raise NotImplementedError
invalid_name_set = set()
if invalid_model_names is not None:
invalid_name_set.update(invalid_model_names)
invalid_type_set = set()
if excluded_model_types is not None:
logger.log(20, f'Excluded Model Types: {excluded_model_types}')
invalid_type_set.update(excluded_model_types)
if default_priorities is None:
default_priorities = copy.deepcopy(DEFAULT_MODEL_PRIORITY)
if problem_type in PROBLEM_TYPE_MODEL_PRIORITY:
default_priorities.update(PROBLEM_TYPE_MODEL_PRIORITY[problem_type])
level_key = level if level in hyperparameters.keys() else 'default'
if level_key not in hyperparameters.keys() and level_key == 'default':
hyperparameters = {'default': hyperparameters}
hp_level = hyperparameters[level_key]
model_cfg_priority_dict = defaultdict(list)
model_type_list = list(hp_level.keys())
for model_type in model_type_list:
if problem_type == QUANTILE and model_type not in DEFAULT_QUANTILE_MODEL:
if model_type == 'NN' and 'QNN' in DEFAULT_QUANTILE_MODEL:
model_type = 'QNN'
hp_level['QNN'] = hp_level.pop('NN')
else:
continue
models_of_type = hp_level[model_type]
if not isinstance(models_of_type, list):
models_of_type = [models_of_type]
model_cfgs_to_process = []
for model_cfg in models_of_type:
if model_type in invalid_type_set:
logger.log(20, f"\tFound '{model_type}' model in hyperparameters, but '{model_type}' is present in `excluded_model_types` and will be removed.")
continue # Don't include excluded models
if isinstance(model_cfg, str):
if model_type == 'AG_TEXT_NN':
AG_TEXT_IMPORT_ERROR = 'autogluon.text has not been installed. ' \
'You may try to install "autogluon.text" ' \
'first by running. ' \
'`python3 -m pip install autogluon.text`'
try:
from autogluon.text import ag_text_presets
except ImportError:
raise ImportError(AG_TEXT_IMPORT_ERROR)
model_cfgs_to_process.append(ag_text_presets.create(model_cfg))
else:
model_cfgs_to_process += get_preset_custom(name=model_cfg, problem_type=problem_type)
else:
model_cfgs_to_process.append(model_cfg)
for model_cfg in model_cfgs_to_process:
model_cfg = clean_model_cfg(model_cfg=model_cfg, model_type=model_type, ag_args=ag_args, ag_args_ensemble=ag_args_ensemble, ag_args_fit=ag_args_fit, problem_type=problem_type)
model_cfg[AG_ARGS]['priority'] = model_cfg[AG_ARGS].get('priority', default_priorities.get(model_type, DEFAULT_CUSTOM_MODEL_PRIORITY))
model_priority = model_cfg[AG_ARGS]['priority']
# Check if model_cfg is valid
is_valid = is_model_cfg_valid(model_cfg, level=level, problem_type=problem_type)
if AG_ARGS_FIT in model_cfg and not model_cfg[AG_ARGS_FIT]:
model_cfg.pop(AG_ARGS_FIT)
if is_valid:
model_cfg_priority_dict[model_priority].append(model_cfg)
model_cfg_priority_list = [model for priority in sorted(model_cfg_priority_dict.keys(), reverse=True) for model in model_cfg_priority_dict[priority]]
if not silent:
logger.log(20, 'Model configs that will be trained (in order):')
models = []
model_args_fit = {}
for model_cfg in model_cfg_priority_list:
model = model_factory(model_cfg, path=path, problem_type=problem_type, eval_metric=eval_metric,
name_suffix=name_suffix, ensemble_type=ensemble_type, ensemble_kwargs=ensemble_kwargs,
invalid_name_set=invalid_name_set, level=level)
invalid_name_set.add(model.name)
if 'hyperparameter_tune_kwargs' in model_cfg[AG_ARGS]:
model_args_fit[model.name] = {'hyperparameter_tune_kwargs': model_cfg[AG_ARGS]['hyperparameter_tune_kwargs']}
if 'ag_args_ensemble' in model_cfg and not model_cfg['ag_args_ensemble']:
model_cfg.pop('ag_args_ensemble')
if not silent:
logger.log(20, f'\t{model.name}: \t{model_cfg}')
models.append(model)
return models, model_args_fit
def clean_model_cfg(model_cfg: dict, model_type=None, ag_args=None, ag_args_ensemble=None, ag_args_fit=None, problem_type=None):
model_cfg = copy.deepcopy(model_cfg)
if AG_ARGS not in model_cfg:
model_cfg[AG_ARGS] = dict()
if 'model_type' not in model_cfg[AG_ARGS]:
model_cfg[AG_ARGS]['model_type'] = model_type
if model_cfg[AG_ARGS]['model_type'] is None:
raise AssertionError(f'model_type was not specified for model! Model: {model_cfg}')
model_type = model_cfg[AG_ARGS]['model_type']
if not inspect.isclass(model_type):
model_type = MODEL_TYPES[model_type]
elif not issubclass(model_type, AbstractModel):
logger.warning(f'Warning: Custom model type {model_type} does not inherit from {AbstractModel}. This may lead to instability. Consider wrapping {model_type} with an implementation of {AbstractModel}!')
else:
logger.log(20, f'Custom Model Type Detected: {model_type}')
model_cfg[AG_ARGS]['model_type'] = model_type
model_type_real = model_cfg[AG_ARGS]['model_type']
if not inspect.isclass(model_type_real):
model_type_real = MODEL_TYPES[model_type_real]
default_ag_args = model_type_real._get_default_ag_args()
if ag_args is not None:
model_extra_ag_args = ag_args.copy()
model_extra_ag_args.update(model_cfg[AG_ARGS])
model_cfg[AG_ARGS] = model_extra_ag_args
default_ag_args_ensemble = model_type_real._get_default_ag_args_ensemble(problem_type=problem_type)
if ag_args_ensemble is not None:
model_extra_ag_args_ensemble = ag_args_ensemble.copy()
model_extra_ag_args_ensemble.update(model_cfg.get(AG_ARGS_ENSEMBLE, dict()))
model_cfg[AG_ARGS_ENSEMBLE] = model_extra_ag_args_ensemble
if ag_args_fit is not None:
if AG_ARGS_FIT not in model_cfg:
model_cfg[AG_ARGS_FIT] = dict()
model_extra_ag_args_fit = ag_args_fit.copy()
model_extra_ag_args_fit.update(model_cfg[AG_ARGS_FIT])
model_cfg[AG_ARGS_FIT] = model_extra_ag_args_fit
if default_ag_args is not None:
default_ag_args.update(model_cfg[AG_ARGS])
model_cfg[AG_ARGS] = default_ag_args
if default_ag_args_ensemble is not None:
default_ag_args_ensemble.update(model_cfg.get(AG_ARGS_ENSEMBLE, dict()))
model_cfg[AG_ARGS_ENSEMBLE] = default_ag_args_ensemble
return model_cfg
# Check if model is valid
def is_model_cfg_valid(model_cfg, level=1, problem_type=None):
is_valid = True
for key in model_cfg.get(AG_ARGS, {}):
if key not in VALID_AG_ARGS_KEYS:
logger.warning(f'WARNING: Unknown ag_args key: {key}')
if AG_ARGS not in model_cfg:
is_valid = False # AG_ARGS is required
elif model_cfg[AG_ARGS].get('model_type', None) is None:
is_valid = False # model_type is required
elif model_cfg[AG_ARGS].get('hyperparameter_tune_kwargs', None) and model_cfg[AG_ARGS].get('disable_in_hpo', False):
is_valid = False
elif not model_cfg[AG_ARGS].get('valid_stacker', True) and level > 1:
is_valid = False # Not valid as a stacker model
elif not model_cfg[AG_ARGS].get('valid_base', True) and level == 1:
is_valid = False # Not valid as a base model
elif problem_type is not None and problem_type not in model_cfg[AG_ARGS].get('problem_types', [problem_type]):
is_valid = False # Not valid for this problem_type
return is_valid
def model_factory(
model, path, problem_type, eval_metric,
name_suffix=None, ensemble_type=StackerEnsembleModel, ensemble_kwargs=None,
invalid_name_set=None, level=1,
):
if invalid_name_set is None:
invalid_name_set = set()
model_type = model[AG_ARGS]['model_type']
if not inspect.isclass(model_type):
model_type = MODEL_TYPES[model_type]
name_orig = model[AG_ARGS].get('name', None)
if name_orig is None:
name_main = model[AG_ARGS].get('name_main', DEFAULT_MODEL_NAMES.get(model_type, model_type.__name__))
name_prefix = model[AG_ARGS].get('name_prefix', '')
name_suff = model[AG_ARGS].get('name_suffix', '')
name_orig = name_prefix + name_main + name_suff
name_stacker = None
num_increment = 2
if name_suffix is None:
name_suffix = ''
if ensemble_kwargs is None:
name = f'{name_orig}{name_suffix}'
while name in invalid_name_set: # Ensure name is unique
name = f'{name_orig}_{num_increment}{name_suffix}'
num_increment += 1
else:
name = name_orig
name_bag_suffix = model[AG_ARGS].get('name_bag_suffix', '_BAG')
name_stacker = f'{name}{name_bag_suffix}_L{level}{name_suffix}'
while name_stacker in invalid_name_set: # Ensure name is unique
name = f'{name_orig}_{num_increment}'
name_stacker = f'{name}{name_bag_suffix}_L{level}{name_suffix}'
num_increment += 1
model_params = copy.deepcopy(model)
model_params.pop(AG_ARGS, None)
model_params.pop(AG_ARGS_ENSEMBLE, None)
model_init = model_type(path=path, name=name, problem_type=problem_type, eval_metric=eval_metric,
hyperparameters=model_params)
if ensemble_kwargs is not None:
ensemble_kwargs_model = copy.deepcopy(ensemble_kwargs)
extra_ensemble_hyperparameters = copy.deepcopy(model.get(AG_ARGS_ENSEMBLE, dict()))
ensemble_kwargs_model['hyperparameters'] = ensemble_kwargs_model.get('hyperparameters', {})
if ensemble_kwargs_model['hyperparameters'] is None:
ensemble_kwargs_model['hyperparameters'] = {}
ensemble_kwargs_model['hyperparameters'].update(extra_ensemble_hyperparameters)
model_init = ensemble_type(path=path, name=name_stacker, model_base=model_init,
**ensemble_kwargs_model)
return model_init
# TODO: v0.1 cleanup and avoid hardcoded logic with model names
def get_preset_models_softclass(hyperparameters, invalid_model_names: list = None, **kwargs):
# TODO v0.1: This import depends on mxnet, consider refactoring to avoid mxnet
from autogluon.core.metrics.softclass_metrics import soft_log_loss
model_types_standard = ['GBM', 'NN', 'CAT', 'ENS_WEIGHTED']
hyperparameters = copy.deepcopy(hyperparameters)
hyperparameters_standard = {key: hyperparameters[key] for key in hyperparameters if key in model_types_standard}
hyperparameters_rf = {key: hyperparameters[key] for key in hyperparameters if key == 'RF'}
# Swap RF criterion for MSE:
if 'RF' in hyperparameters_rf:
rf_params = hyperparameters_rf['RF']
rf_newparams = {'criterion': 'mse', 'ag_args': {'name_suffix': 'MSE'}}
for i in range(len(rf_params)):
rf_params[i].update(rf_newparams)
rf_params = [j for n, j in enumerate(rf_params) if j not in rf_params[(n+1):]] # Remove duplicates which may arise after overwriting criterion
hyperparameters_standard['RF'] = rf_params
models, model_args_fit = get_preset_models(problem_type=SOFTCLASS, eval_metric=soft_log_loss,
hyperparameters=hyperparameters_standard,
default_priorities=DEFAULT_SOFTCLASS_PRIORITY, invalid_model_names=invalid_model_names, **kwargs)
if len(models) == 0:
raise ValueError("At least one of the following model-types must be present in hyperparameters: ['GBM','CAT','NN','RF'], "
"These are the only supported models for softclass prediction problems. "
"Softclass problems are also not yet supported for fit() with per-stack level hyperparameters.")
for model in models:
model.normalize_pred_probas = True # FIXME: Do we need to do this for child models too?
return models, model_args_fit
| 48.671271 | 209 | 0.704296 | import copy
import inspect
import logging
from collections import defaultdict
from autogluon.core.constants import AG_ARGS, AG_ARGS_FIT, AG_ARGS_ENSEMBLE, BINARY, MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE
from autogluon.core.models import AbstractModel, GreedyWeightedEnsembleModel, StackerEnsembleModel, SimpleWeightedEnsembleModel
from autogluon.core.trainer.utils import process_hyperparameters
from .presets_custom import get_preset_custom
from ...models import LGBModel, CatBoostModel, XGBoostModel, RFModel, XTModel, KNNModel, LinearModel,\
TabularNeuralNetModel, TabularNeuralQuantileModel, NNFastAiTabularModel, FastTextModel, TextPredictorModel, ImagePredictorModel
from ...models.tab_transformer.tab_transformer_model import TabTransformerModel
logger = logging.getLogger(__name__)
DEFAULT_MODEL_PRIORITY = dict(
KNN=100,
GBM=90,
RF=80,
CAT=70,
XT=60,
FASTAI=50,
XGB=40,
LR=30,
NN=20,
FASTTEXT=0,
AG_TEXT_NN=0,
AG_IMAGE_NN=0,
TRANSF=0,
custom=0,
)
PROBLEM_TYPE_MODEL_PRIORITY = {
MULTICLASS: dict(
FASTAI=95,
),
}
DEFAULT_SOFTCLASS_PRIORITY = dict(
GBM=100,
NN=90,
RF=80,
CAT=60,
custom=0,
)
DEFAULT_CUSTOM_MODEL_PRIORITY = 0
DEFAULT_QUANTILE_MODEL = ['RF', 'XT', 'FASTAI', 'QNN', 'ENS_WEIGHTED']
MODEL_TYPES = dict(
RF=RFModel,
XT=XTModel,
KNN=KNNModel,
GBM=LGBModel,
CAT=CatBoostModel,
XGB=XGBoostModel,
NN=TabularNeuralNetModel,
QNN=TabularNeuralQuantileModel,
LR=LinearModel,
FASTAI=NNFastAiTabularModel,
TRANSF=TabTransformerModel,
AG_TEXT_NN=TextPredictorModel,
AG_IMAGE_NN=ImagePredictorModel,
FASTTEXT=FastTextModel,
ENS_WEIGHTED=GreedyWeightedEnsembleModel,
SIMPLE_ENS_WEIGHTED=SimpleWeightedEnsembleModel,
)
DEFAULT_MODEL_NAMES = {
RFModel: 'RandomForest',
XTModel: 'ExtraTrees',
KNNModel: 'KNeighbors',
LGBModel: 'LightGBM',
CatBoostModel: 'CatBoost',
XGBoostModel: 'XGBoost',
TabularNeuralNetModel: 'NeuralNetMXNet',
TabularNeuralQuantileModel: 'QuantileNeuralNet',
LinearModel: 'LinearModel',
NNFastAiTabularModel: 'NeuralNetFastAI',
TabTransformerModel: 'Transformer',
TextPredictorModel: 'TextPredictor',
ImagePredictorModel: 'ImagePredictor',
FastTextModel: 'FastText',
GreedyWeightedEnsembleModel: 'WeightedEnsemble',
SimpleWeightedEnsembleModel: 'WeightedEnsemble',
}
VALID_AG_ARGS_KEYS = {
'name',
'name_main',
'name_prefix',
'name_suffix',
'name_bag_suffix',
'model_type',
'priority',
'problem_types',
'disable_in_hpo',
'valid_stacker',
'valid_base',
'hyperparameter_tune_kwargs',
}
level: int = 1, ensemble_type=StackerEnsembleModel, ensemble_kwargs: dict = None, ag_args_fit=None, ag_args=None, ag_args_ensemble=None,
name_suffix: str = None, default_priorities=None, invalid_model_names: list = None, excluded_model_types: list = None,
hyperparameter_preprocess_func=None, hyperparameter_preprocess_kwargs=None, silent=True):
hyperparameters = process_hyperparameters(hyperparameters)
if hyperparameter_preprocess_func is not None:
if hyperparameter_preprocess_kwargs is None:
hyperparameter_preprocess_kwargs = dict()
hyperparameters = hyperparameter_preprocess_func(hyperparameters, **hyperparameter_preprocess_kwargs)
if problem_type not in [BINARY, MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE]:
raise NotImplementedError
invalid_name_set = set()
if invalid_model_names is not None:
invalid_name_set.update(invalid_model_names)
invalid_type_set = set()
if excluded_model_types is not None:
logger.log(20, f'Excluded Model Types: {excluded_model_types}')
invalid_type_set.update(excluded_model_types)
if default_priorities is None:
default_priorities = copy.deepcopy(DEFAULT_MODEL_PRIORITY)
if problem_type in PROBLEM_TYPE_MODEL_PRIORITY:
default_priorities.update(PROBLEM_TYPE_MODEL_PRIORITY[problem_type])
level_key = level if level in hyperparameters.keys() else 'default'
if level_key not in hyperparameters.keys() and level_key == 'default':
hyperparameters = {'default': hyperparameters}
hp_level = hyperparameters[level_key]
model_cfg_priority_dict = defaultdict(list)
model_type_list = list(hp_level.keys())
for model_type in model_type_list:
if problem_type == QUANTILE and model_type not in DEFAULT_QUANTILE_MODEL:
if model_type == 'NN' and 'QNN' in DEFAULT_QUANTILE_MODEL:
model_type = 'QNN'
hp_level['QNN'] = hp_level.pop('NN')
else:
continue
models_of_type = hp_level[model_type]
if not isinstance(models_of_type, list):
models_of_type = [models_of_type]
model_cfgs_to_process = []
for model_cfg in models_of_type:
if model_type in invalid_type_set:
logger.log(20, f"\tFound '{model_type}' model in hyperparameters, but '{model_type}' is present in `excluded_model_types` and will be removed.")
continue
if isinstance(model_cfg, str):
if model_type == 'AG_TEXT_NN':
AG_TEXT_IMPORT_ERROR = 'autogluon.text has not been installed. ' \
'You may try to install "autogluon.text" ' \
'first by running. ' \
'`python3 -m pip install autogluon.text`'
try:
from autogluon.text import ag_text_presets
except ImportError:
raise ImportError(AG_TEXT_IMPORT_ERROR)
model_cfgs_to_process.append(ag_text_presets.create(model_cfg))
else:
model_cfgs_to_process += get_preset_custom(name=model_cfg, problem_type=problem_type)
else:
model_cfgs_to_process.append(model_cfg)
for model_cfg in model_cfgs_to_process:
model_cfg = clean_model_cfg(model_cfg=model_cfg, model_type=model_type, ag_args=ag_args, ag_args_ensemble=ag_args_ensemble, ag_args_fit=ag_args_fit, problem_type=problem_type)
model_cfg[AG_ARGS]['priority'] = model_cfg[AG_ARGS].get('priority', default_priorities.get(model_type, DEFAULT_CUSTOM_MODEL_PRIORITY))
model_priority = model_cfg[AG_ARGS]['priority']
# Check if model_cfg is valid
is_valid = is_model_cfg_valid(model_cfg, level=level, problem_type=problem_type)
if AG_ARGS_FIT in model_cfg and not model_cfg[AG_ARGS_FIT]:
model_cfg.pop(AG_ARGS_FIT)
if is_valid:
model_cfg_priority_dict[model_priority].append(model_cfg)
model_cfg_priority_list = [model for priority in sorted(model_cfg_priority_dict.keys(), reverse=True) for model in model_cfg_priority_dict[priority]]
if not silent:
logger.log(20, 'Model configs that will be trained (in order):')
models = []
model_args_fit = {}
for model_cfg in model_cfg_priority_list:
model = model_factory(model_cfg, path=path, problem_type=problem_type, eval_metric=eval_metric,
name_suffix=name_suffix, ensemble_type=ensemble_type, ensemble_kwargs=ensemble_kwargs,
invalid_name_set=invalid_name_set, level=level)
invalid_name_set.add(model.name)
if 'hyperparameter_tune_kwargs' in model_cfg[AG_ARGS]:
model_args_fit[model.name] = {'hyperparameter_tune_kwargs': model_cfg[AG_ARGS]['hyperparameter_tune_kwargs']}
if 'ag_args_ensemble' in model_cfg and not model_cfg['ag_args_ensemble']:
model_cfg.pop('ag_args_ensemble')
if not silent:
logger.log(20, f'\t{model.name}: \t{model_cfg}')
models.append(model)
return models, model_args_fit
def clean_model_cfg(model_cfg: dict, model_type=None, ag_args=None, ag_args_ensemble=None, ag_args_fit=None, problem_type=None):
model_cfg = copy.deepcopy(model_cfg)
if AG_ARGS not in model_cfg:
model_cfg[AG_ARGS] = dict()
if 'model_type' not in model_cfg[AG_ARGS]:
model_cfg[AG_ARGS]['model_type'] = model_type
if model_cfg[AG_ARGS]['model_type'] is None:
raise AssertionError(f'model_type was not specified for model! Model: {model_cfg}')
model_type = model_cfg[AG_ARGS]['model_type']
if not inspect.isclass(model_type):
model_type = MODEL_TYPES[model_type]
elif not issubclass(model_type, AbstractModel):
logger.warning(f'Warning: Custom model type {model_type} does not inherit from {AbstractModel}. This may lead to instability. Consider wrapping {model_type} with an implementation of {AbstractModel}!')
else:
logger.log(20, f'Custom Model Type Detected: {model_type}')
model_cfg[AG_ARGS]['model_type'] = model_type
model_type_real = model_cfg[AG_ARGS]['model_type']
if not inspect.isclass(model_type_real):
model_type_real = MODEL_TYPES[model_type_real]
default_ag_args = model_type_real._get_default_ag_args()
if ag_args is not None:
model_extra_ag_args = ag_args.copy()
model_extra_ag_args.update(model_cfg[AG_ARGS])
model_cfg[AG_ARGS] = model_extra_ag_args
default_ag_args_ensemble = model_type_real._get_default_ag_args_ensemble(problem_type=problem_type)
if ag_args_ensemble is not None:
model_extra_ag_args_ensemble = ag_args_ensemble.copy()
model_extra_ag_args_ensemble.update(model_cfg.get(AG_ARGS_ENSEMBLE, dict()))
model_cfg[AG_ARGS_ENSEMBLE] = model_extra_ag_args_ensemble
if ag_args_fit is not None:
if AG_ARGS_FIT not in model_cfg:
model_cfg[AG_ARGS_FIT] = dict()
model_extra_ag_args_fit = ag_args_fit.copy()
model_extra_ag_args_fit.update(model_cfg[AG_ARGS_FIT])
model_cfg[AG_ARGS_FIT] = model_extra_ag_args_fit
if default_ag_args is not None:
default_ag_args.update(model_cfg[AG_ARGS])
model_cfg[AG_ARGS] = default_ag_args
if default_ag_args_ensemble is not None:
default_ag_args_ensemble.update(model_cfg.get(AG_ARGS_ENSEMBLE, dict()))
model_cfg[AG_ARGS_ENSEMBLE] = default_ag_args_ensemble
return model_cfg
# Check if model is valid
def is_model_cfg_valid(model_cfg, level=1, problem_type=None):
is_valid = True
for key in model_cfg.get(AG_ARGS, {}):
if key not in VALID_AG_ARGS_KEYS:
logger.warning(f'WARNING: Unknown ag_args key: {key}')
if AG_ARGS not in model_cfg:
is_valid = False # AG_ARGS is required
elif model_cfg[AG_ARGS].get('model_type', None) is None:
is_valid = False # model_type is required
elif model_cfg[AG_ARGS].get('hyperparameter_tune_kwargs', None) and model_cfg[AG_ARGS].get('disable_in_hpo', False):
is_valid = False
elif not model_cfg[AG_ARGS].get('valid_stacker', True) and level > 1:
is_valid = False # Not valid as a stacker model
elif not model_cfg[AG_ARGS].get('valid_base', True) and level == 1:
is_valid = False # Not valid as a base model
elif problem_type is not None and problem_type not in model_cfg[AG_ARGS].get('problem_types', [problem_type]):
is_valid = False # Not valid for this problem_type
return is_valid
def model_factory(
model, path, problem_type, eval_metric,
name_suffix=None, ensemble_type=StackerEnsembleModel, ensemble_kwargs=None,
invalid_name_set=None, level=1,
):
if invalid_name_set is None:
invalid_name_set = set()
model_type = model[AG_ARGS]['model_type']
if not inspect.isclass(model_type):
model_type = MODEL_TYPES[model_type]
name_orig = model[AG_ARGS].get('name', None)
if name_orig is None:
name_main = model[AG_ARGS].get('name_main', DEFAULT_MODEL_NAMES.get(model_type, model_type.__name__))
name_prefix = model[AG_ARGS].get('name_prefix', '')
name_suff = model[AG_ARGS].get('name_suffix', '')
name_orig = name_prefix + name_main + name_suff
name_stacker = None
num_increment = 2
if name_suffix is None:
name_suffix = ''
if ensemble_kwargs is None:
name = f'{name_orig}{name_suffix}'
while name in invalid_name_set: # Ensure name is unique
name = f'{name_orig}_{num_increment}{name_suffix}'
num_increment += 1
else:
name = name_orig
name_bag_suffix = model[AG_ARGS].get('name_bag_suffix', '_BAG')
name_stacker = f'{name}{name_bag_suffix}_L{level}{name_suffix}'
while name_stacker in invalid_name_set: # Ensure name is unique
name = f'{name_orig}_{num_increment}'
name_stacker = f'{name}{name_bag_suffix}_L{level}{name_suffix}'
num_increment += 1
model_params = copy.deepcopy(model)
model_params.pop(AG_ARGS, None)
model_params.pop(AG_ARGS_ENSEMBLE, None)
model_init = model_type(path=path, name=name, problem_type=problem_type, eval_metric=eval_metric,
hyperparameters=model_params)
if ensemble_kwargs is not None:
ensemble_kwargs_model = copy.deepcopy(ensemble_kwargs)
extra_ensemble_hyperparameters = copy.deepcopy(model.get(AG_ARGS_ENSEMBLE, dict()))
ensemble_kwargs_model['hyperparameters'] = ensemble_kwargs_model.get('hyperparameters', {})
if ensemble_kwargs_model['hyperparameters'] is None:
ensemble_kwargs_model['hyperparameters'] = {}
ensemble_kwargs_model['hyperparameters'].update(extra_ensemble_hyperparameters)
model_init = ensemble_type(path=path, name=name_stacker, model_base=model_init,
**ensemble_kwargs_model)
return model_init
# TODO: v0.1 cleanup and avoid hardcoded logic with model names
def get_preset_models_softclass(hyperparameters, invalid_model_names: list = None, **kwargs):
# TODO v0.1: This import depends on mxnet, consider refactoring to avoid mxnet
from autogluon.core.metrics.softclass_metrics import soft_log_loss
model_types_standard = ['GBM', 'NN', 'CAT', 'ENS_WEIGHTED']
hyperparameters = copy.deepcopy(hyperparameters)
hyperparameters_standard = {key: hyperparameters[key] for key in hyperparameters if key in model_types_standard}
hyperparameters_rf = {key: hyperparameters[key] for key in hyperparameters if key == 'RF'}
# Swap RF criterion for MSE:
if 'RF' in hyperparameters_rf:
rf_params = hyperparameters_rf['RF']
rf_newparams = {'criterion': 'mse', 'ag_args': {'name_suffix': 'MSE'}}
for i in range(len(rf_params)):
rf_params[i].update(rf_newparams)
rf_params = [j for n, j in enumerate(rf_params) if j not in rf_params[(n+1):]] # Remove duplicates which may arise after overwriting criterion
hyperparameters_standard['RF'] = rf_params
models, model_args_fit = get_preset_models(problem_type=SOFTCLASS, eval_metric=soft_log_loss,
hyperparameters=hyperparameters_standard,
default_priorities=DEFAULT_SOFTCLASS_PRIORITY, invalid_model_names=invalid_model_names, **kwargs)
if len(models) == 0:
raise ValueError("At least one of the following model-types must be present in hyperparameters: ['GBM','CAT','NN','RF'], "
"These are the only supported models for softclass prediction problems. "
"Softclass problems are also not yet supported for fit() with per-stack level hyperparameters.")
for model in models:
model.normalize_pred_probas = True # FIXME: Do we need to do this for child models too?
return models, model_args_fit
| true | true |
f732cdbc39f357c77e6db21d1ffa502e1539509b | 11,384 | py | Python | fastai2/text/data.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | fastai2/text/data.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | fastai2/text/data.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/31_text.data.ipynb (unless otherwise specified).
__all__ = ['make_vocab', 'TensorText', 'LMTensorText', 'Numericalize', 'LMDataLoader', 'pad_input', 'pad_input_chunk',
'SortedDL', 'TextBlock', 'TextDataLoaders']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
# Cell
def make_vocab(count, min_freq=3, max_vocab=60000):
"Create a vocab of `max_vocab` size from `Counter` `count` with items present more than `min_freq`"
vocab = [o for o,c in count.most_common(max_vocab) if c >= min_freq]
for o in reversed(defaults.text_spec_tok): #Make sure all special tokens are in the vocab
if o in vocab: vocab.remove(o)
vocab.insert(0, o)
vocab = vocab[:max_vocab]
return vocab + [f'xxfake' for i in range(0, 8-len(vocab)%8)]
# Cell
class TensorText(TensorBase): pass
class LMTensorText(TensorText): pass
# Cell
class Numericalize(Transform):
"Reversible transform of tokenized texts to numericalized ids"
def __init__(self, vocab=None, min_freq=3, max_vocab=60000):
store_attr(self, 'vocab,min_freq,max_vocab')
self.o2i = None if vocab is None else defaultdict(int, {v:k for k,v in enumerate(vocab)})
def setups(self, dsets):
if dsets is None: return
if self.vocab is None:
count = dsets.counter if hasattr(dsets, 'counter') else Counter(p for o in dsets for p in o)
self.vocab = make_vocab(count, min_freq=self.min_freq, max_vocab=self.max_vocab)
self.o2i = defaultdict(int, {v:k for k,v in enumerate(self.vocab) if v != 'xxfake'})
def encodes(self, o): return TensorText(tensor([self.o2i [o_] for o_ in o]))
def decodes(self, o): return L(self.vocab[o_] for o_ in o if self.vocab[o_] != PAD)
# Cell
def _maybe_first(o): return o[0] if isinstance(o, tuple) else o
# Cell
def _get_tokenizer(ds):
tok = getattr(ds, 'tokenizer', None)
if isinstance(tok, Tokenizer): return tok
if isinstance(tok, (list,L)):
for t in tok:
if isinstance(t, Tokenizer): return t
# Cell
def _get_lengths(ds):
tok = _get_tokenizer(ds)
if tok is None: return
return tok.get_lengths(ds.items)
# Cell
#TODO: add backward
@delegates()
class LMDataLoader(TfmdDL):
def __init__(self, dataset, lens=None, cache=2, bs=64, seq_len=72, num_workers=0, **kwargs):
self.items = ReindexCollection(dataset, cache=cache, tfm=_maybe_first)
self.seq_len = seq_len
if lens is None: lens = _get_lengths(dataset)
if lens is None: lens = [len(o) for o in self.items]
self.lens = ReindexCollection(lens, idxs=self.items.idxs)
# The "-1" is to allow for final label, we throw away the end that's less than bs
corpus = round_multiple(sum(lens)-1, bs, round_down=True)
self.bl = corpus//bs #bl stands for batch length
self.n_batches = self.bl//(seq_len) + int(self.bl%seq_len!=0)
self.last_len = self.bl - (self.n_batches-1)*seq_len
self.make_chunks()
super().__init__(dataset=dataset, bs=bs, num_workers=num_workers, **kwargs)
self.n = self.n_batches*bs
def make_chunks(self): self.chunks = Chunks(self.items, self.lens)
def shuffle_fn(self,idxs):
self.items.shuffle()
self.make_chunks()
return idxs
def create_item(self, seq):
if seq>=self.n: raise IndexError
sl = self.last_len if seq//self.bs==self.n_batches-1 else self.seq_len
st = (seq%self.bs)*self.bl + (seq//self.bs)*self.seq_len
txt = self.chunks[st : st+sl+1]
return LMTensorText(txt[:-1]),txt[1:]
@delegates(TfmdDL.new)
def new(self, dataset=None, seq_len=72, **kwargs):
lens = self.lens.coll if dataset is None else None
return super().new(dataset=dataset, lens=lens, seq_len=seq_len, **kwargs)
# Cell
@patch
def truncate(self:TitledStr, n):
words = self.split(' ')[:n]
return TitledStr(' '.join(words))
# Cell
@typedispatch
def show_batch(x: TensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
if trunc_at is not None: samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_batch[object](x, y, samples, max_n=max_n, ctxs=ctxs, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_batch(x: LMTensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
samples = L((s[0].truncate(trunc_at), s[1].truncate(trunc_at)) for s in samples)
return show_batch[TensorText](x, None, samples, ctxs=ctxs, max_n=max_n, trunc_at=None, **kwargs)
# Cell
def pad_input(samples, pad_idx=1, pad_fields=0, pad_first=False, backwards=False):
"Function that collect samples and adds padding. Flips token order if needed"
pad_fields = L(pad_fields)
max_len_l = pad_fields.map(lambda f: max([len(s[f]) for s in samples]))
if backwards: pad_first = not pad_first
def _f(field_idx, x):
if field_idx not in pad_fields: return x
idx = pad_fields.items.index(field_idx) #TODO: remove items if L.index is fixed
sl = slice(-len(x), sys.maxsize) if pad_first else slice(0, len(x))
pad = x.new_zeros(max_len_l[idx]-x.shape[0])+pad_idx
x1 = torch.cat([pad, x] if pad_first else [x, pad])
if backwards: x1 = x1.flip(0)
return retain_type(x1, x)
return [tuple(map(lambda idxx: _f(*idxx), enumerate(s))) for s in samples]
# Cell
def pad_input_chunk(samples, pad_idx=1, pad_first=True, seq_len=72):
max_len = max([len(s[0]) for s in samples])
def _f(x):
l = max_len - x.shape[0]
pad_chunk = x.new_zeros((l//seq_len) * seq_len) + pad_idx
pad_res = x.new_zeros(l % seq_len) + pad_idx
x1 = torch.cat([pad_chunk, x, pad_res]) if pad_first else torch.cat([x, pad_res, pad_chunk])
return retain_type(x1, x)
return [(_f(s[0]), *s[1:]) for s in samples]
# Cell
def _default_sort(x): return len(x[0])
@delegates(TfmdDL)
class SortedDL(TfmdDL):
def __init__(self, dataset, sort_func=None, res=None, **kwargs):
super().__init__(dataset, **kwargs)
self.sort_func = _default_sort if sort_func is None else sort_func
if res is None and self.sort_func == _default_sort: res = _get_lengths(dataset)
self.res = [self.sort_func(self.do_item(i)) for i in range_of(self.dataset)] if res is None else res
if len(self.res) > 0: self.idx_max = np.argmax(self.res)
def get_idxs(self):
idxs = super().get_idxs()
if self.shuffle: return idxs
return sorted(idxs, key=lambda i: self.res[i], reverse=True)
def shuffle_fn(self,idxs):
idxs = np.random.permutation(len(self.dataset))
idx_max = np.where(idxs==self.idx_max)[0][0]
idxs[0],idxs[idx_max] = idxs[idx_max],idxs[0]
sz = self.bs*50
chunks = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
chunks = [sorted(s, key=lambda i: self.res[i], reverse=True) for s in chunks]
sort_idx = np.concatenate(chunks)
sz = self.bs
batches = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)]
sort_idx = np.concatenate(np.random.permutation(batches[1:-1])) if len(batches) > 2 else np.array([],dtype=np.int)
sort_idx = np.concatenate((batches[0], sort_idx) if len(batches)==1 else (batches[0], sort_idx, batches[-1]))
return iter(sort_idx)
@delegates(TfmdDL.new)
def new(self, dataset=None, **kwargs):
res = self.res if dataset is None else None
return super().new(dataset=dataset, res=res, **kwargs)
# Cell
class TextBlock(TransformBlock):
@delegates(Numericalize.__init__)
def __init__(self, tok_tfm, vocab=None, is_lm=False, seq_len=72, **kwargs):
return super().__init__(type_tfms=[tok_tfm, Numericalize(vocab, **kwargs)],
dl_type=LMDataLoader if is_lm else SortedDL,
dls_kwargs={} if is_lm else {'before_batch': partial(pad_input_chunk, seq_len=seq_len)})
@classmethod
@delegates(Tokenizer.from_df, keep=True)
def from_df(cls, text_cols, vocab=None, is_lm=False, seq_len=72, min_freq=3, max_vocab=60000, **kwargs):
return cls(Tokenizer.from_df(text_cols, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
min_freq=min_freq, max_vocab=max_vocab)
@classmethod
@delegates(Tokenizer.from_folder, keep=True)
def from_folder(cls, path, vocab=None, is_lm=False, seq_len=72, min_freq=3, max_vocab=60000, **kwargs):
return cls(Tokenizer.from_folder(path, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
min_freq=min_freq, max_vocab=max_vocab)
# Cell
class TextDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, text_vocab=None, is_lm=False,
tok_tfm=None, seq_len=72, **kwargs):
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
blocks = [TextBlock.from_folder(path, text_vocab, is_lm, seq_len) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len)]
if not is_lm: blocks.append(CategoryBlock(vocab=vocab))
get_items = partial(get_text_files, folders=[train,valid]) if valid_pct is None else get_text_files
dblock = DataBlock(blocks=blocks,
get_items=get_items,
splitter=splitter,
get_y=None if is_lm else parent_label)
return cls.from_dblock(dblock, path, path=path, seq_len=seq_len, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, text_col=0, label_col=1, label_delim=None, y_block=None,
text_vocab=None, is_lm=False, valid_col=None, tok_tfm=None, seq_len=72, **kwargs):
blocks = [TextBlock.from_df(text_col, text_vocab, is_lm, seq_len) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len)]
if y_block is None and not is_lm:
blocks.append(MultiCategoryBlock if is_listy(label_col) and len(label_col) > 1 else CategoryBlock)
if y_block is not None and not is_lm: blocks += (y_block if is_listy(y_block) else [y_block])
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=blocks,
get_x=ColReader(text_col),
get_y=None if is_lm else ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, seq_len=seq_len, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
TextDataLoaders.from_csv = delegates(to=TextDataLoaders.from_df)(TextDataLoaders.from_csv) | 47.831933 | 145 | 0.664353 |
__all__ = ['make_vocab', 'TensorText', 'LMTensorText', 'Numericalize', 'LMDataLoader', 'pad_input', 'pad_input_chunk',
'SortedDL', 'TextBlock', 'TextDataLoaders']
from ..torch_basics import *
from ..data.all import *
from .core import *
def make_vocab(count, min_freq=3, max_vocab=60000):
vocab = [o for o,c in count.most_common(max_vocab) if c >= min_freq]
for o in reversed(defaults.text_spec_tok):
if o in vocab: vocab.remove(o)
vocab.insert(0, o)
vocab = vocab[:max_vocab]
return vocab + [f'xxfake' for i in range(0, 8-len(vocab)%8)]
class TensorText(TensorBase): pass
class LMTensorText(TensorText): pass
class Numericalize(Transform):
def __init__(self, vocab=None, min_freq=3, max_vocab=60000):
store_attr(self, 'vocab,min_freq,max_vocab')
self.o2i = None if vocab is None else defaultdict(int, {v:k for k,v in enumerate(vocab)})
def setups(self, dsets):
if dsets is None: return
if self.vocab is None:
count = dsets.counter if hasattr(dsets, 'counter') else Counter(p for o in dsets for p in o)
self.vocab = make_vocab(count, min_freq=self.min_freq, max_vocab=self.max_vocab)
self.o2i = defaultdict(int, {v:k for k,v in enumerate(self.vocab) if v != 'xxfake'})
def encodes(self, o): return TensorText(tensor([self.o2i [o_] for o_ in o]))
def decodes(self, o): return L(self.vocab[o_] for o_ in o if self.vocab[o_] != PAD)
def _maybe_first(o): return o[0] if isinstance(o, tuple) else o
def _get_tokenizer(ds):
tok = getattr(ds, 'tokenizer', None)
if isinstance(tok, Tokenizer): return tok
if isinstance(tok, (list,L)):
for t in tok:
if isinstance(t, Tokenizer): return t
def _get_lengths(ds):
tok = _get_tokenizer(ds)
if tok is None: return
return tok.get_lengths(ds.items)
@delegates()
class LMDataLoader(TfmdDL):
def __init__(self, dataset, lens=None, cache=2, bs=64, seq_len=72, num_workers=0, **kwargs):
self.items = ReindexCollection(dataset, cache=cache, tfm=_maybe_first)
self.seq_len = seq_len
if lens is None: lens = _get_lengths(dataset)
if lens is None: lens = [len(o) for o in self.items]
self.lens = ReindexCollection(lens, idxs=self.items.idxs)
corpus = round_multiple(sum(lens)-1, bs, round_down=True)
self.bl = corpus//bs #bl stands for batch length
self.n_batches = self.bl//(seq_len) + int(self.bl%seq_len!=0)
self.last_len = self.bl - (self.n_batches-1)*seq_len
self.make_chunks()
super().__init__(dataset=dataset, bs=bs, num_workers=num_workers, **kwargs)
self.n = self.n_batches*bs
def make_chunks(self): self.chunks = Chunks(self.items, self.lens)
def shuffle_fn(self,idxs):
self.items.shuffle()
self.make_chunks()
return idxs
def create_item(self, seq):
if seq>=self.n: raise IndexError
sl = self.last_len if seq//self.bs==self.n_batches-1 else self.seq_len
st = (seq%self.bs)*self.bl + (seq//self.bs)*self.seq_len
txt = self.chunks[st : st+sl+1]
return LMTensorText(txt[:-1]),txt[1:]
@delegates(TfmdDL.new)
def new(self, dataset=None, seq_len=72, **kwargs):
lens = self.lens.coll if dataset is None else None
return super().new(dataset=dataset, lens=lens, seq_len=seq_len, **kwargs)
# Cell
@patch
def truncate(self:TitledStr, n):
words = self.split(' ')[:n]
return TitledStr(' '.join(words))
# Cell
@typedispatch
def show_batch(x: TensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
if trunc_at is not None: samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_batch[object](x, y, samples, max_n=max_n, ctxs=ctxs, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_batch(x: LMTensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
samples = L((s[0].truncate(trunc_at), s[1].truncate(trunc_at)) for s in samples)
return show_batch[TensorText](x, None, samples, ctxs=ctxs, max_n=max_n, trunc_at=None, **kwargs)
# Cell
def pad_input(samples, pad_idx=1, pad_fields=0, pad_first=False, backwards=False):
pad_fields = L(pad_fields)
max_len_l = pad_fields.map(lambda f: max([len(s[f]) for s in samples]))
if backwards: pad_first = not pad_first
def _f(field_idx, x):
if field_idx not in pad_fields: return x
idx = pad_fields.items.index(field_idx) #TODO: remove items if L.index is fixed
sl = slice(-len(x), sys.maxsize) if pad_first else slice(0, len(x))
pad = x.new_zeros(max_len_l[idx]-x.shape[0])+pad_idx
x1 = torch.cat([pad, x] if pad_first else [x, pad])
if backwards: x1 = x1.flip(0)
return retain_type(x1, x)
return [tuple(map(lambda idxx: _f(*idxx), enumerate(s))) for s in samples]
# Cell
def pad_input_chunk(samples, pad_idx=1, pad_first=True, seq_len=72):
max_len = max([len(s[0]) for s in samples])
def _f(x):
l = max_len - x.shape[0]
pad_chunk = x.new_zeros((l//seq_len) * seq_len) + pad_idx
pad_res = x.new_zeros(l % seq_len) + pad_idx
x1 = torch.cat([pad_chunk, x, pad_res]) if pad_first else torch.cat([x, pad_res, pad_chunk])
return retain_type(x1, x)
return [(_f(s[0]), *s[1:]) for s in samples]
# Cell
def _default_sort(x): return len(x[0])
@delegates(TfmdDL)
class SortedDL(TfmdDL):
def __init__(self, dataset, sort_func=None, res=None, **kwargs):
super().__init__(dataset, **kwargs)
self.sort_func = _default_sort if sort_func is None else sort_func
if res is None and self.sort_func == _default_sort: res = _get_lengths(dataset)
self.res = [self.sort_func(self.do_item(i)) for i in range_of(self.dataset)] if res is None else res
if len(self.res) > 0: self.idx_max = np.argmax(self.res)
def get_idxs(self):
idxs = super().get_idxs()
if self.shuffle: return idxs
return sorted(idxs, key=lambda i: self.res[i], reverse=True)
def shuffle_fn(self,idxs):
idxs = np.random.permutation(len(self.dataset))
idx_max = np.where(idxs==self.idx_max)[0][0]
idxs[0],idxs[idx_max] = idxs[idx_max],idxs[0]
sz = self.bs*50
chunks = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
chunks = [sorted(s, key=lambda i: self.res[i], reverse=True) for s in chunks]
sort_idx = np.concatenate(chunks)
sz = self.bs
batches = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)]
sort_idx = np.concatenate(np.random.permutation(batches[1:-1])) if len(batches) > 2 else np.array([],dtype=np.int)
sort_idx = np.concatenate((batches[0], sort_idx) if len(batches)==1 else (batches[0], sort_idx, batches[-1]))
return iter(sort_idx)
@delegates(TfmdDL.new)
def new(self, dataset=None, **kwargs):
res = self.res if dataset is None else None
return super().new(dataset=dataset, res=res, **kwargs)
# Cell
class TextBlock(TransformBlock):
@delegates(Numericalize.__init__)
def __init__(self, tok_tfm, vocab=None, is_lm=False, seq_len=72, **kwargs):
return super().__init__(type_tfms=[tok_tfm, Numericalize(vocab, **kwargs)],
dl_type=LMDataLoader if is_lm else SortedDL,
dls_kwargs={} if is_lm else {'before_batch': partial(pad_input_chunk, seq_len=seq_len)})
@classmethod
@delegates(Tokenizer.from_df, keep=True)
def from_df(cls, text_cols, vocab=None, is_lm=False, seq_len=72, min_freq=3, max_vocab=60000, **kwargs):
return cls(Tokenizer.from_df(text_cols, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
min_freq=min_freq, max_vocab=max_vocab)
@classmethod
@delegates(Tokenizer.from_folder, keep=True)
def from_folder(cls, path, vocab=None, is_lm=False, seq_len=72, min_freq=3, max_vocab=60000, **kwargs):
return cls(Tokenizer.from_folder(path, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
min_freq=min_freq, max_vocab=max_vocab)
# Cell
class TextDataLoaders(DataLoaders):
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, text_vocab=None, is_lm=False,
tok_tfm=None, seq_len=72, **kwargs):
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
blocks = [TextBlock.from_folder(path, text_vocab, is_lm, seq_len) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len)]
if not is_lm: blocks.append(CategoryBlock(vocab=vocab))
get_items = partial(get_text_files, folders=[train,valid]) if valid_pct is None else get_text_files
dblock = DataBlock(blocks=blocks,
get_items=get_items,
splitter=splitter,
get_y=None if is_lm else parent_label)
return cls.from_dblock(dblock, path, path=path, seq_len=seq_len, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, text_col=0, label_col=1, label_delim=None, y_block=None,
text_vocab=None, is_lm=False, valid_col=None, tok_tfm=None, seq_len=72, **kwargs):
blocks = [TextBlock.from_df(text_col, text_vocab, is_lm, seq_len) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len)]
if y_block is None and not is_lm:
blocks.append(MultiCategoryBlock if is_listy(label_col) and len(label_col) > 1 else CategoryBlock)
if y_block is not None and not is_lm: blocks += (y_block if is_listy(y_block) else [y_block])
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=blocks,
get_x=ColReader(text_col),
get_y=None if is_lm else ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, seq_len=seq_len, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
TextDataLoaders.from_csv = delegates(to=TextDataLoaders.from_df)(TextDataLoaders.from_csv) | true | true |
f732ce8ca51d97443ee8ac411a259cde1c44e316 | 17,344 | py | Python | tutorial/cytoscape/events_chapter.py | blozano824/dash-docs | f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d | [
"MIT"
] | 1 | 2019-03-04T03:17:19.000Z | 2019-03-04T03:17:19.000Z | tutorial/cytoscape/events_chapter.py | blozano824/dash-docs | f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d | [
"MIT"
] | null | null | null | tutorial/cytoscape/events_chapter.py | blozano824/dash-docs | f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d | [
"MIT"
] | null | null | null | from textwrap import dedent
import dash_cytoscape as cyto
import dash_core_components as dcc
import dash_html_components as html
from .utils import CreateDisplay, PythonSnippet
from tutorial import tools, styles
examples = {
example: tools.load_example(
'tutorial/examples/cytoscape/{}'.format(example)
)
for example in [
'event_callbacks.py',
'event_callbacks_2.py',
'event_callbacks_3.py'
]
}
nodes = [
{
'data': {'id': short, 'label': label},
'position': {'x': 20 * lat, 'y': -20 * long}
}
for short, label, long, lat in (
('la', 'Los Angeles', 34.03, -118.25),
('nyc', 'New York', 40.71, -74),
('to', 'Toronto', 43.65, -79.38),
('mtl', 'Montreal', 45.50, -73.57),
('van', 'Vancouver', 49.28, -123.12),
('chi', 'Chicago', 41.88, -87.63),
('bos', 'Boston', 42.36, -71.06),
('hou', 'Houston', 29.76, -95.37)
)
]
edges = [
{'data': {'source': source, 'target': target}}
for source, target in (
('van', 'la'),
('la', 'chi'),
('hou', 'chi'),
('to', 'mtl'),
('mtl', 'bos'),
('nyc', 'boston'),
('to', 'hou'),
('to', 'nyc'),
('la', 'nyc'),
('nyc', 'bos')
)
]
default_stylesheet = [
{
'selector': 'node',
'style': {
'background-color': '#BFD7B5',
'label': 'data(label)'
}
},
{
'selector': 'edge',
'style': {
'line-color': '#A3C4BC'
}
}
]
Display = CreateDisplay({
'cyto': cyto,
'html': html,
'dcc': dcc,
'default_stylesheet': default_stylesheet,
'nodes': nodes,
'edges': edges
})
layout = html.Div([
dcc.Markdown(dedent('''
# Cytoscape Event Callbacks
In [part 4](/cytoscape/callbacks), we showed how to update Cytoscape with
other components by assigning callbacks that output to `'elements',
'stylesheet', 'layout'`. Moreover, it is also possible to use properties
of Cytoscape as an input to callbacks, which can be used to update other
components, or Cytoscape itself. Those properties are updated (which fires
the callbacks) when the user interact with elements in a certain way,
which justifies the name of event callbacks. You can find props such as
`tapNode`, which returns a complete description of the node object when
the user clicks or taps on a node, `mouseoverEdgeData`, which returns only
the data dictionary of the edge that was most recently hovered by the user.
The complete list can be found in the [Dash Cytoscape Reference](/cytoscape/reference).
## Simple callback construction
Let's look back at the same city example as the previous chapter:
''')),
Display('''
cyto.Cytoscape(
id='cytoscape-events',
layout={'name': 'preset'},
elements=edges+nodes,
stylesheet=default_stylesheet,
style={'width': '100%', 'height': '450px'}
)
'''),
dcc.Markdown(dedent('''
This time, we will use the `tapNodeData` properties as input
to our callbacks, which will simply dump the content into an `html.Pre`:
''')),
dcc.SyntaxHighlighter(
examples['event_callbacks.py'][0],
language='python',
customStyle=styles.code_container
),
html.Div(
examples['event_callbacks.py'][1],
className='example-container'
),
dcc.Markdown(dedent('''
Notice that the `html.Div` is updated every time you click or tap a node,
and returns the data dictionary of the node. Alternatively, you can use
`tapNode` to obtain the entire element specification (given as a
dictionary), rather than just its `data`.
## Click, tap and hover
Let's now display the data generated whenever you click or hover over a node
or an edge. Simply replace the previous layout and callbacks by this:
''')),
PythonSnippet('''
app.layout = html.Div([
cyto.Cytoscape(
id='cytoscape-event-callbacks',
layout={'name': 'preset'},
elements=edges+nodes,
stylesheet=default_stylesheet,
style={'width': '100%', 'height': '450px'}
),
html.P(id='cytoscape-tapNodeData-output'),
html.P(id='cytoscape-tapEdgeData-output'),
html.P(id='cytoscape-mouseoverNodeData-output'),
html.P(id='cytoscape-mouseoverEdgeData-output')
])
@app.callback(Output('cytoscape-tapNodeData-output', 'children'),
[Input('cytoscape-event-callbacks', 'tapNodeData')])
def displayTapNodeData(data):
if data:
return "You recently clicked/tapped the city: " + data['label']
@app.callback(Output('cytoscape-tapEdgeData-output', 'children'),
[Input('cytoscape-event-callbacks', 'tapEdgeData')])
def displayTapEdgeData(data):
if data:
return "You recently clicked/tapped the edge between " + data['source'].upper() + " and " + data['target'].upper()
@app.callback(Output('cytoscape-mouseoverNodeData-output', 'children'),
[Input('cytoscape-event-callbacks', 'mouseoverNodeData')])
def displayTapNodeData(data):
if data:
return "You recently hovered over the city: " + data['label']
@app.callback(Output('cytoscape-mouseoverEdgeData-output', 'children'),
[Input('cytoscape-event-callbacks', 'mouseoverEdgeData')])
def displayTapEdgeData(data):
if data:
return "You recently hovered over the edge between " + data['source'].upper() + " and " + data['target'].upper()
'''),
html.Div(
examples['event_callbacks_2.py'][1],
className='example-container'
),
dcc.Markdown(dedent('''
## Selecting multiple elements
Additionally, you can also display all the data currently selected, either
through a box selection (Shift+Click and drag) or by individually selecting
multiple elements while holding Shift:
''')),
PythonSnippet('''
app.layout = html.Div([
cyto.Cytoscape(
id='cytoscape-event-callbacks',
layout={'name': 'preset'},
elements=edges+nodes,
stylesheet=default_stylesheet,
style={'width': '100%', 'height': '450px'}
),
dcc.Markdown(id='cytoscape-selectedNodeData-markdown')
])
@app.callback(Output('cytoscape-selectedNodeData-markdown', 'children'),
[Input('cytoscape-event-callbacks', 'selectedNodeData')])
def displaySelectedNodeData(data_list):
if not data_list:
return
cities_list = [data['label'] for data in data_list]
return "You selected the following cities:" + "\\n* ".join(cities_list)
'''),
html.Div(
examples['event_callbacks_3.py'][1],
className='example-container'
),
dcc.Markdown(dedent('''
## Advanced usage of callbacks
Those event callbacks enable more advanced interactions between components.
In fact, you can even use them to update other `Cytoscape` arguments. The
[`usage-stylesheet.py`](https://github.com/plotly/dash-cytoscape/blob/master/usage-stylesheet.py)
example (hosted on the `dash-cytoscape` Github repo) lets you click to change the
color of a node to purple, its targeted
nodes to red, and its incoming nodes to blue. All of this is done using a
single callback function, which takes as input the `tapNode` prop of the
`Cytoscape` component along with a few dropdowns, and outputs to the
`stylesheet` prop. You can try out this
[interactive stylesheet demo](https://dash-gallery.plotly.host/cytoscape-stylesheet)
hosted on the [Dash Deployment Servers](https://plot.ly/products/dash/).
''')),
html.Details(open=False, children=[
html.Summary('Expand to see how to interactively style your elements'),
PythonSnippet('''
@app.callback(Output('cytoscape', 'stylesheet'),
[Input('cytoscape', 'tapNode'),
Input('input-follower-color', 'value'),
Input('input-following-color', 'value'),
Input('dropdown-node-shape', 'value')])
def generate_stylesheet(node, follower_color, following_color, node_shape):
if not node:
return default_stylesheet
stylesheet = [{
"selector": 'node',
'style': {
'opacity': 0.3,
'shape': node_shape
}
}, {
'selector': 'edge',
'style': {
'opacity': 0.2,
"curve-style": "bezier",
}
}, {
"selector": 'node[id = "{}"]'.format(node['data']['id']),
"style": {
'background-color': '#B10DC9',
"border-color": "purple",
"border-width": 2,
"border-opacity": 1,
"opacity": 1,
"label": "data(label)",
"color": "#B10DC9",
"text-opacity": 1,
"font-size": 12,
'z-index': 9999
}
}]
for edge in node['edgesData']:
if edge['source'] == node['data']['id']:
stylesheet.append({
"selector": 'node[id = "{}"]'.format(edge['target']),
"style": {
'background-color': following_color,
'opacity': 0.9
}
})
stylesheet.append({
"selector": 'edge[id= "{}"]'.format(edge['id']),
"style": {
"mid-target-arrow-color": following_color,
"mid-target-arrow-shape": "vee",
"line-color": following_color,
'opacity': 0.9,
'z-index': 5000
}
})
if edge['target'] == node['data']['id']:
stylesheet.append({
"selector": 'node[id = "{}"]'.format(edge['source']),
"style": {
'background-color': follower_color,
'opacity': 0.9,
'z-index': 9999
}
})
stylesheet.append({
"selector": 'edge[id= "{}"]'.format(edge['id']),
"style": {
"mid-target-arrow-color": follower_color,
"mid-target-arrow-shape": "vee",
"line-color": follower_color,
'opacity': 1,
'z-index': 5000
}
})
return stylesheet
''')
]),
dcc.Markdown(dedent('''
Additionally, [`usage-elements.py`](https://github.com/plotly/dash-cytoscape/blob/master/usage-elements.py)
lets you progressively expand your graph
by using `tapNodeData` as the input and `elements` as the output.
The app initially pre-loads the entire dataset, but only loads the graph
with a single node. It then constructs four dictionaries that maps every
single node ID to its following nodes, following edges, followers nodes,
followers edges.
Then, it lets you expand the incoming or the outgoing
neighbors by clicking the node you want to expand. This
is done through a callback that retrieves the followers (outgoing) or following
(incoming) from the dictionaries, and add the to the `elements`.
[Click here for the online demo](https://dash-gallery.plotly.host/cytoscape-elements).
''')),
html.Details(open=False, children=[
html.Summary('Expand to see how to construct the dictionaries'),
PythonSnippet('''
with open('demos/data/sample_network.txt', 'r') as f:
data = f.read().split('\\n')
# We select the first 750 edges and associated nodes for an easier visualization
edges = data[:750]
nodes = set()
following_node_di = {} # user id -> list of users they are following
following_edges_di = {} # user id -> list of cy edges starting from user id
followers_node_di = {} # user id -> list of followers (cy_node format)
followers_edges_di = {} # user id -> list of cy edges ending at user id
cy_edges = []
cy_nodes = []
for edge in edges:
if " " not in edge:
continue
source, target = edge.split(" ")
cy_edge = {'data': {'id': source+target, 'source': source, 'target': target}}
cy_target = {"data": {"id": target, "label": "User #" + str(target[-5:])}}
cy_source = {"data": {"id": source, "label": "User #" + str(source[-5:])}}
if source not in nodes:
nodes.add(source)
cy_nodes.append(cy_source)
if target not in nodes:
nodes.add(target)
cy_nodes.append(cy_target)
# Process dictionary of following
if not following_node_di.get(source):
following_node_di[source] = []
if not following_edges_di.get(source):
following_edges_di[source] = []
following_node_di[source].append(cy_target)
following_edges_di[source].append(cy_edge)
# Process dictionary of followers
if not followers_node_di.get(target):
followers_node_di[target] = []
if not followers_edges_di.get(target):
followers_edges_di[target] = []
followers_node_di[target].append(cy_source)
followers_edges_di[target].append(cy_edge)
''')
]),
html.Details(open=False, children=[
html.Summary('Expand to see how to generate elements'),
PythonSnippet('''
@app.callback(Output('cytoscape', 'elements'),
[Input('cytoscape', 'tapNodeData')],
[State('cytoscape', 'elements'),
State('radio-expand', 'value')])
def generate_elements(nodeData, elements, expansion_mode):
if not nodeData:
return default_elements
# If the node has already been expanded, we don't expand it again
if nodeData.get('expanded'):
return elements
# This retrieves the currently selected element, and tag it as expanded
for element in elements:
if nodeData['id'] == element.get('data').get('id'):
element['data']['expanded'] = True
break
if expansion_mode == 'followers':
followers_nodes = followers_node_di.get(nodeData['id'])
followers_edges = followers_edges_di.get(nodeData['id'])
if followers_nodes:
for node in followers_nodes:
node['classes'] = 'followerNode'
elements.extend(followers_nodes)
if followers_edges:
for edge in followers_edges:
edge['classes'] = 'followerEdge'
elements.extend(followers_edges)
elif expansion_mode == 'following':
following_nodes = following_node_di.get(nodeData['id'])
following_edges = following_edges_di.get(nodeData['id'])
if following_nodes:
for node in following_nodes:
if node['data']['id'] != genesis_node['data']['id']:
node['classes'] = 'followingNode'
elements.append(node)
if following_edges:
for edge in following_edges:
edge['classes'] = 'followingEdge'
elements.extend(following_edges)
return elements
''')
]),
dcc.Markdown(dedent('''
To see more examples of events, check out the [event callbacks demo](https://dash-gallery.plotly.host/cytoscape-events)
(the source file is available as [`usage-events.py`](https://github.com/plotly/dash-cytoscape/blob/master/usage-events.py) on the project repo)
and the [Cytoscape references](/cytoscape/reference).
'''))
])
| 37.298925 | 148 | 0.531135 | from textwrap import dedent
import dash_cytoscape as cyto
import dash_core_components as dcc
import dash_html_components as html
from .utils import CreateDisplay, PythonSnippet
from tutorial import tools, styles
examples = {
example: tools.load_example(
'tutorial/examples/cytoscape/{}'.format(example)
)
for example in [
'event_callbacks.py',
'event_callbacks_2.py',
'event_callbacks_3.py'
]
}
nodes = [
{
'data': {'id': short, 'label': label},
'position': {'x': 20 * lat, 'y': -20 * long}
}
for short, label, long, lat in (
('la', 'Los Angeles', 34.03, -118.25),
('nyc', 'New York', 40.71, -74),
('to', 'Toronto', 43.65, -79.38),
('mtl', 'Montreal', 45.50, -73.57),
('van', 'Vancouver', 49.28, -123.12),
('chi', 'Chicago', 41.88, -87.63),
('bos', 'Boston', 42.36, -71.06),
('hou', 'Houston', 29.76, -95.37)
)
]
edges = [
{'data': {'source': source, 'target': target}}
for source, target in (
('van', 'la'),
('la', 'chi'),
('hou', 'chi'),
('to', 'mtl'),
('mtl', 'bos'),
('nyc', 'boston'),
('to', 'hou'),
('to', 'nyc'),
('la', 'nyc'),
('nyc', 'bos')
)
]
default_stylesheet = [
{
'selector': 'node',
'style': {
'background-color': '#BFD7B5',
'label': 'data(label)'
}
},
{
'selector': 'edge',
'style': {
'line-color': '#A3C4BC'
}
}
]
Display = CreateDisplay({
'cyto': cyto,
'html': html,
'dcc': dcc,
'default_stylesheet': default_stylesheet,
'nodes': nodes,
'edges': edges
})
layout = html.Div([
dcc.Markdown(dedent('''
# Cytoscape Event Callbacks
In [part 4](/cytoscape/callbacks), we showed how to update Cytoscape with
other components by assigning callbacks that output to `'elements',
'stylesheet', 'layout'`. Moreover, it is also possible to use properties
of Cytoscape as an input to callbacks, which can be used to update other
components, or Cytoscape itself. Those properties are updated (which fires
the callbacks) when the user interact with elements in a certain way,
which justifies the name of event callbacks. You can find props such as
`tapNode`, which returns a complete description of the node object when
the user clicks or taps on a node, `mouseoverEdgeData`, which returns only
the data dictionary of the edge that was most recently hovered by the user.
The complete list can be found in the [Dash Cytoscape Reference](/cytoscape/reference).
## Simple callback construction
Let's look back at the same city example as the previous chapter:
''')),
Display('''
cyto.Cytoscape(
id='cytoscape-events',
layout={'name': 'preset'},
elements=edges+nodes,
stylesheet=default_stylesheet,
style={'width': '100%', 'height': '450px'}
)
'''),
dcc.Markdown(dedent('''
This time, we will use the `tapNodeData` properties as input
to our callbacks, which will simply dump the content into an `html.Pre`:
''')),
dcc.SyntaxHighlighter(
examples['event_callbacks.py'][0],
language='python',
customStyle=styles.code_container
),
html.Div(
examples['event_callbacks.py'][1],
className='example-container'
),
dcc.Markdown(dedent('''
Notice that the `html.Div` is updated every time you click or tap a node,
and returns the data dictionary of the node. Alternatively, you can use
`tapNode` to obtain the entire element specification (given as a
dictionary), rather than just its `data`.
## Click, tap and hover
Let's now display the data generated whenever you click or hover over a node
or an edge. Simply replace the previous layout and callbacks by this:
''')),
PythonSnippet('''
app.layout = html.Div([
cyto.Cytoscape(
id='cytoscape-event-callbacks',
layout={'name': 'preset'},
elements=edges+nodes,
stylesheet=default_stylesheet,
style={'width': '100%', 'height': '450px'}
),
html.P(id='cytoscape-tapNodeData-output'),
html.P(id='cytoscape-tapEdgeData-output'),
html.P(id='cytoscape-mouseoverNodeData-output'),
html.P(id='cytoscape-mouseoverEdgeData-output')
])
@app.callback(Output('cytoscape-tapNodeData-output', 'children'),
[Input('cytoscape-event-callbacks', 'tapNodeData')])
def displayTapNodeData(data):
if data:
return "You recently clicked/tapped the city: " + data['label']
@app.callback(Output('cytoscape-tapEdgeData-output', 'children'),
[Input('cytoscape-event-callbacks', 'tapEdgeData')])
def displayTapEdgeData(data):
if data:
return "You recently clicked/tapped the edge between " + data['source'].upper() + " and " + data['target'].upper()
@app.callback(Output('cytoscape-mouseoverNodeData-output', 'children'),
[Input('cytoscape-event-callbacks', 'mouseoverNodeData')])
def displayTapNodeData(data):
if data:
return "You recently hovered over the city: " + data['label']
@app.callback(Output('cytoscape-mouseoverEdgeData-output', 'children'),
[Input('cytoscape-event-callbacks', 'mouseoverEdgeData')])
def displayTapEdgeData(data):
if data:
return "You recently hovered over the edge between " + data['source'].upper() + " and " + data['target'].upper()
'''),
html.Div(
examples['event_callbacks_2.py'][1],
className='example-container'
),
dcc.Markdown(dedent('''
## Selecting multiple elements
Additionally, you can also display all the data currently selected, either
through a box selection (Shift+Click and drag) or by individually selecting
multiple elements while holding Shift:
''')),
PythonSnippet('''
app.layout = html.Div([
cyto.Cytoscape(
id='cytoscape-event-callbacks',
layout={'name': 'preset'},
elements=edges+nodes,
stylesheet=default_stylesheet,
style={'width': '100%', 'height': '450px'}
),
dcc.Markdown(id='cytoscape-selectedNodeData-markdown')
])
@app.callback(Output('cytoscape-selectedNodeData-markdown', 'children'),
[Input('cytoscape-event-callbacks', 'selectedNodeData')])
def displaySelectedNodeData(data_list):
if not data_list:
return
cities_list = [data['label'] for data in data_list]
return "You selected the following cities:" + "\\n* ".join(cities_list)
'''),
html.Div(
examples['event_callbacks_3.py'][1],
className='example-container'
),
dcc.Markdown(dedent('''
## Advanced usage of callbacks
Those event callbacks enable more advanced interactions between components.
In fact, you can even use them to update other `Cytoscape` arguments. The
[`usage-stylesheet.py`](https://github.com/plotly/dash-cytoscape/blob/master/usage-stylesheet.py)
example (hosted on the `dash-cytoscape` Github repo) lets you click to change the
color of a node to purple, its targeted
nodes to red, and its incoming nodes to blue. All of this is done using a
single callback function, which takes as input the `tapNode` prop of the
`Cytoscape` component along with a few dropdowns, and outputs to the
`stylesheet` prop. You can try out this
[interactive stylesheet demo](https://dash-gallery.plotly.host/cytoscape-stylesheet)
hosted on the [Dash Deployment Servers](https://plot.ly/products/dash/).
''')),
html.Details(open=False, children=[
html.Summary('Expand to see how to interactively style your elements'),
PythonSnippet('''
@app.callback(Output('cytoscape', 'stylesheet'),
[Input('cytoscape', 'tapNode'),
Input('input-follower-color', 'value'),
Input('input-following-color', 'value'),
Input('dropdown-node-shape', 'value')])
def generate_stylesheet(node, follower_color, following_color, node_shape):
if not node:
return default_stylesheet
stylesheet = [{
"selector": 'node',
'style': {
'opacity': 0.3,
'shape': node_shape
}
}, {
'selector': 'edge',
'style': {
'opacity': 0.2,
"curve-style": "bezier",
}
}, {
"selector": 'node[id = "{}"]'.format(node['data']['id']),
"style": {
'background-color': '#B10DC9',
"border-color": "purple",
"border-width": 2,
"border-opacity": 1,
"opacity": 1,
"label": "data(label)",
"color": "#B10DC9",
"text-opacity": 1,
"font-size": 12,
'z-index': 9999
}
}]
for edge in node['edgesData']:
if edge['source'] == node['data']['id']:
stylesheet.append({
"selector": 'node[id = "{}"]'.format(edge['target']),
"style": {
'background-color': following_color,
'opacity': 0.9
}
})
stylesheet.append({
"selector": 'edge[id= "{}"]'.format(edge['id']),
"style": {
"mid-target-arrow-color": following_color,
"mid-target-arrow-shape": "vee",
"line-color": following_color,
'opacity': 0.9,
'z-index': 5000
}
})
if edge['target'] == node['data']['id']:
stylesheet.append({
"selector": 'node[id = "{}"]'.format(edge['source']),
"style": {
'background-color': follower_color,
'opacity': 0.9,
'z-index': 9999
}
})
stylesheet.append({
"selector": 'edge[id= "{}"]'.format(edge['id']),
"style": {
"mid-target-arrow-color": follower_color,
"mid-target-arrow-shape": "vee",
"line-color": follower_color,
'opacity': 1,
'z-index': 5000
}
})
return stylesheet
''')
]),
dcc.Markdown(dedent('''
Additionally, [`usage-elements.py`](https://github.com/plotly/dash-cytoscape/blob/master/usage-elements.py)
lets you progressively expand your graph
by using `tapNodeData` as the input and `elements` as the output.
The app initially pre-loads the entire dataset, but only loads the graph
with a single node. It then constructs four dictionaries that maps every
single node ID to its following nodes, following edges, followers nodes,
followers edges.
Then, it lets you expand the incoming or the outgoing
neighbors by clicking the node you want to expand. This
is done through a callback that retrieves the followers (outgoing) or following
(incoming) from the dictionaries, and add the to the `elements`.
[Click here for the online demo](https://dash-gallery.plotly.host/cytoscape-elements).
''')),
html.Details(open=False, children=[
html.Summary('Expand to see how to construct the dictionaries'),
PythonSnippet('''
with open('demos/data/sample_network.txt', 'r') as f:
data = f.read().split('\\n')
# We select the first 750 edges and associated nodes for an easier visualization
edges = data[:750]
nodes = set()
following_node_di = {} # user id -> list of users they are following
following_edges_di = {} # user id -> list of cy edges starting from user id
followers_node_di = {} # user id -> list of followers (cy_node format)
followers_edges_di = {} # user id -> list of cy edges ending at user id
cy_edges = []
cy_nodes = []
for edge in edges:
if " " not in edge:
continue
source, target = edge.split(" ")
cy_edge = {'data': {'id': source+target, 'source': source, 'target': target}}
cy_target = {"data": {"id": target, "label": "User #" + str(target[-5:])}}
cy_source = {"data": {"id": source, "label": "User #" + str(source[-5:])}}
if source not in nodes:
nodes.add(source)
cy_nodes.append(cy_source)
if target not in nodes:
nodes.add(target)
cy_nodes.append(cy_target)
# Process dictionary of following
if not following_node_di.get(source):
following_node_di[source] = []
if not following_edges_di.get(source):
following_edges_di[source] = []
following_node_di[source].append(cy_target)
following_edges_di[source].append(cy_edge)
# Process dictionary of followers
if not followers_node_di.get(target):
followers_node_di[target] = []
if not followers_edges_di.get(target):
followers_edges_di[target] = []
followers_node_di[target].append(cy_source)
followers_edges_di[target].append(cy_edge)
''')
]),
html.Details(open=False, children=[
html.Summary('Expand to see how to generate elements'),
PythonSnippet('''
@app.callback(Output('cytoscape', 'elements'),
[Input('cytoscape', 'tapNodeData')],
[State('cytoscape', 'elements'),
State('radio-expand', 'value')])
def generate_elements(nodeData, elements, expansion_mode):
if not nodeData:
return default_elements
# If the node has already been expanded, we don't expand it again
if nodeData.get('expanded'):
return elements
# This retrieves the currently selected element, and tag it as expanded
for element in elements:
if nodeData['id'] == element.get('data').get('id'):
element['data']['expanded'] = True
break
if expansion_mode == 'followers':
followers_nodes = followers_node_di.get(nodeData['id'])
followers_edges = followers_edges_di.get(nodeData['id'])
if followers_nodes:
for node in followers_nodes:
node['classes'] = 'followerNode'
elements.extend(followers_nodes)
if followers_edges:
for edge in followers_edges:
edge['classes'] = 'followerEdge'
elements.extend(followers_edges)
elif expansion_mode == 'following':
following_nodes = following_node_di.get(nodeData['id'])
following_edges = following_edges_di.get(nodeData['id'])
if following_nodes:
for node in following_nodes:
if node['data']['id'] != genesis_node['data']['id']:
node['classes'] = 'followingNode'
elements.append(node)
if following_edges:
for edge in following_edges:
edge['classes'] = 'followingEdge'
elements.extend(following_edges)
return elements
''')
]),
dcc.Markdown(dedent('''
To see more examples of events, check out the [event callbacks demo](https://dash-gallery.plotly.host/cytoscape-events)
(the source file is available as [`usage-events.py`](https://github.com/plotly/dash-cytoscape/blob/master/usage-events.py) on the project repo)
and the [Cytoscape references](/cytoscape/reference).
'''))
])
| true | true |
f732cf174e123aeeea17cb7f4063e9983ee4077c | 538 | py | Python | onem2m/types.py | franjial/ghostm2m | 2e7898761237cb12f4fddd55665b3a15fb84dddc | [
"MIT"
] | null | null | null | onem2m/types.py | franjial/ghostm2m | 2e7898761237cb12f4fddd55665b3a15fb84dddc | [
"MIT"
] | null | null | null | onem2m/types.py | franjial/ghostm2m | 2e7898761237cb12f4fddd55665b3a15fb84dddc | [
"MIT"
] | null | null | null | from enum import Enum
class Operation(Enum):
Create = 1
Retrieve = 2
Update = 3
Delete = 4
Notify = 5
class ResourceType(Enum):
container = 3
contentInstance = 4
AE = 1
CSEBase = 5
class cseTypeID(Enum):
IN_CSE = 1
MN_CSE = 2
ASN_CSE = 3
class ResponseStatusCode(Enum):
ACCEPTED = 1000
OK = 2000
CREATED = 2001
DELETED = 2002
UPDATED = 2004
BAD_REQUEST = 4000
RELEASE_VERSION_NOT_SUPPORTED = 4001
NOT_FOUND = 4004
OPERATION_NOT_ALLOWED = 4005
INTERNAL_SERVER_ERROR = 5000
NOT_IMPLEMENTED = 5001
#todo all errors | 16.8125 | 37 | 0.728625 | from enum import Enum
class Operation(Enum):
Create = 1
Retrieve = 2
Update = 3
Delete = 4
Notify = 5
class ResourceType(Enum):
container = 3
contentInstance = 4
AE = 1
CSEBase = 5
class cseTypeID(Enum):
IN_CSE = 1
MN_CSE = 2
ASN_CSE = 3
class ResponseStatusCode(Enum):
ACCEPTED = 1000
OK = 2000
CREATED = 2001
DELETED = 2002
UPDATED = 2004
BAD_REQUEST = 4000
RELEASE_VERSION_NOT_SUPPORTED = 4001
NOT_FOUND = 4004
OPERATION_NOT_ALLOWED = 4005
INTERNAL_SERVER_ERROR = 5000
NOT_IMPLEMENTED = 5001
| true | true |
f732cf9a08983af0b4335a385f968d4495d7b53f | 646 | py | Python | shared/packet.py | Tookmund/hackerforce | d757910db1631e26e489a10a99fa67cd74292c4e | [
"Apache-2.0"
] | null | null | null | shared/packet.py | Tookmund/hackerforce | d757910db1631e26e489a10a99fa67cd74292c4e | [
"Apache-2.0"
] | null | null | null | shared/packet.py | Tookmund/hackerforce | d757910db1631e26e489a10a99fa67cd74292c4e | [
"Apache-2.0"
] | 1 | 2021-06-15T21:04:14.000Z | 2021-06-15T21:04:14.000Z | import os
from django.conf import settings
import requests
def get_packet_file_path():
return os.path.join(settings.PROJECT_ROOT, 'static', settings.SPONSORSHIP_PACKET_FILE) if settings.SPONSORSHIP_PACKET_FILE else None
def fetch_packet():
if settings.SPONSORSHIP_PACKET_FILE and settings.SPONSORSHIP_PACKET_URL:
if not os.path.exists(get_packet_file_path()):
r = requests.get(settings.SPONSORSHIP_PACKET_URL, stream=True)
if r.status_code == 200:
with open(get_packet_file_path(), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
| 40.375 | 136 | 0.688854 | import os
from django.conf import settings
import requests
def get_packet_file_path():
return os.path.join(settings.PROJECT_ROOT, 'static', settings.SPONSORSHIP_PACKET_FILE) if settings.SPONSORSHIP_PACKET_FILE else None
def fetch_packet():
if settings.SPONSORSHIP_PACKET_FILE and settings.SPONSORSHIP_PACKET_URL:
if not os.path.exists(get_packet_file_path()):
r = requests.get(settings.SPONSORSHIP_PACKET_URL, stream=True)
if r.status_code == 200:
with open(get_packet_file_path(), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
| true | true |
f732d1d252b489d46ac5e35870d59d9c9c635d67 | 3,452 | py | Python | mapclientplugins/coordinateframeselectorstep/configuredialog.py | tsalemink/hoofcoordinateframeselector | aebdad1759de58a6888966e94b2771a0bea0e105 | [
"Apache-2.0"
] | null | null | null | mapclientplugins/coordinateframeselectorstep/configuredialog.py | tsalemink/hoofcoordinateframeselector | aebdad1759de58a6888966e94b2771a0bea0e105 | [
"Apache-2.0"
] | null | null | null | mapclientplugins/coordinateframeselectorstep/configuredialog.py | tsalemink/hoofcoordinateframeselector | aebdad1759de58a6888966e94b2771a0bea0e105 | [
"Apache-2.0"
] | null | null | null | from PySide2 import QtWidgets
from mapclientplugins.coordinateframeselectorstep.ui_configuredialog import Ui_ConfigureDialog
INVALID_STYLE_SHEET = 'background-color: rgba(239, 0, 0, 50)'
DEFAULT_STYLE_SHEET = ''
class ConfigureDialog(QtWidgets.QDialog):
'''
Configure dialog to present the user with the options to configure this step.
'''
def __init__(self, parent=None):
'''
Constructor
'''
QtWidgets.QDialog.__init__(self, parent)
self._ui = Ui_ConfigureDialog()
self._ui.setupUi(self)
# Keep track of the previous identifier so that we can track changes
# and know how many occurrences of the current identifier there should
# be.
self._previousIdentifier = ''
# Set a place holder for a callable that will get set from the step.
# We will use this method to decide whether the identifier is unique.
self.identifierOccursCount = None
self._makeConnections()
def _makeConnections(self):
self._ui.lineEdit0.textChanged.connect(self.validate)
def accept(self):
'''
Override the accept method so that we can confirm saving an
invalid configuration.
'''
result = QtWidgets.QMessageBox.Yes
if not self.validate():
result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration',
'This configuration is invalid. Unpredictable behaviour may result if you choose \'Yes\', are you sure you want to save this configuration?)',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if result == QtWidgets.QMessageBox.Yes:
QtWidgets.QDialog.accept(self)
def validate(self):
'''
Validate the configuration dialog fields. For any field that is not valid
set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the
overall validity of the configuration.
'''
# Determine if the current identifier is unique throughout the workflow
# The identifierOccursCount method is part of the interface to the workflow framework.
value = self.identifierOccursCount(self._ui.lineEdit0.text())
valid = (value == 0) or (value == 1 and self._previousIdentifier == self._ui.lineEdit0.text())
if valid:
self._ui.lineEdit0.setStyleSheet(DEFAULT_STYLE_SHEET)
else:
self._ui.lineEdit0.setStyleSheet(INVALID_STYLE_SHEET)
return valid
def getConfig(self):
'''
Get the current value of the configuration from the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
'''
self._previousIdentifier = self._ui.lineEdit0.text()
config = {}
config['identifier'] = self._ui.lineEdit0.text()
return config
def setConfig(self, config):
'''
Set the current value of the configuration for the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
'''
self._previousIdentifier = config['identifier']
self._ui.lineEdit0.setText(config['identifier'])
| 40.139535 | 194 | 0.643105 | from PySide2 import QtWidgets
from mapclientplugins.coordinateframeselectorstep.ui_configuredialog import Ui_ConfigureDialog
INVALID_STYLE_SHEET = 'background-color: rgba(239, 0, 0, 50)'
DEFAULT_STYLE_SHEET = ''
class ConfigureDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self._ui = Ui_ConfigureDialog()
self._ui.setupUi(self)
self._previousIdentifier = ''
self.identifierOccursCount = None
self._makeConnections()
def _makeConnections(self):
self._ui.lineEdit0.textChanged.connect(self.validate)
def accept(self):
result = QtWidgets.QMessageBox.Yes
if not self.validate():
result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration',
'This configuration is invalid. Unpredictable behaviour may result if you choose \'Yes\', are you sure you want to save this configuration?)',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if result == QtWidgets.QMessageBox.Yes:
QtWidgets.QDialog.accept(self)
def validate(self):
value = self.identifierOccursCount(self._ui.lineEdit0.text())
valid = (value == 0) or (value == 1 and self._previousIdentifier == self._ui.lineEdit0.text())
if valid:
self._ui.lineEdit0.setStyleSheet(DEFAULT_STYLE_SHEET)
else:
self._ui.lineEdit0.setStyleSheet(INVALID_STYLE_SHEET)
return valid
def getConfig(self):
self._previousIdentifier = self._ui.lineEdit0.text()
config = {}
config['identifier'] = self._ui.lineEdit0.text()
return config
def setConfig(self, config):
self._previousIdentifier = config['identifier']
self._ui.lineEdit0.setText(config['identifier'])
| true | true |
f732d262a5c402980f88711bf712e94e3b49e08e | 7,685 | py | Python | URDF_Exporter/core/Joint.py | romzn/fusion2urdf | 006a97d498267d5209436eaad37a940326c911d5 | [
"MIT"
] | 9 | 2020-11-15T11:05:59.000Z | 2022-03-13T10:38:32.000Z | URDF_Exporter/core/Joint.py | nksas/fusion2urdf | 22df00ddef567ad87c6f4f3f2e391f8d461e6afa | [
"MIT"
] | null | null | null | URDF_Exporter/core/Joint.py | nksas/fusion2urdf | 22df00ddef567ad87c6f4f3f2e391f8d461e6afa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun May 12 20:17:17 2019
@author: syuntoku
"""
import adsk, re
from xml.etree.ElementTree import Element, SubElement
from ..utils import utils
class Joint:
def __init__(self, name, xyz, axis, parent, child, joint_type, upper_limit, lower_limit):
"""
Attributes
----------
name: str
name of the joint
type: str
type of the joint(ex: rev)
xyz: [x, y, z]
coordinate of the joint
axis: [x, y, z]
coordinate of axis of the joint
parent: str
parent link
child: str
child link
joint_xml: str
generated xml describing about the joint
tran_xml: str
generated xml describing about the transmission
"""
self.name = name
self.type = joint_type
self.xyz = xyz
self.parent = parent
self.child = child
self.joint_xml = None
self.tran_xml = None
self.axis = axis # for 'revolute' and 'continuous'
self.upper_limit = upper_limit # for 'revolute' and 'prismatic'
self.lower_limit = lower_limit # for 'revolute' and 'prismatic'
def make_joint_xml(self):
"""
Generate the joint_xml and hold it by self.joint_xml
"""
joint = Element('joint')
joint.attrib = {'name':self.name, 'type':self.type}
origin = SubElement(joint, 'origin')
origin.attrib = {'xyz':' '.join([str(_) for _ in self.xyz]), 'rpy':'0 0 0'}
parent = SubElement(joint, 'parent')
parent.attrib = {'link':self.parent}
child = SubElement(joint, 'child')
child.attrib = {'link':self.child}
if self.type == 'revolute' or self.type == 'continuous' or self.type == 'prismatic':
axis = SubElement(joint, 'axis')
axis.attrib = {'xyz':' '.join([str(_) for _ in self.axis])}
if self.type == 'revolute' or self.type == 'prismatic':
limit = SubElement(joint, 'limit')
limit.attrib = {'upper': str(self.upper_limit), 'lower': str(self.lower_limit),
'effort': '100', 'velocity': '100'}
self.joint_xml = "\n".join(utils.prettify(joint).split("\n")[1:])
def make_transmission_xml(self):
"""
Generate the tran_xml and hold it by self.tran_xml
Notes
-----------
mechanicalTransmission: 1
type: transmission interface/SimpleTransmission
hardwareInterface: PositionJointInterface
"""
tran = Element('transmission')
tran.attrib = {'name':self.name + '_tran'}
joint_type = SubElement(tran, 'type')
joint_type.text = 'transmission_interface/SimpleTransmission'
joint = SubElement(tran, 'joint')
joint.attrib = {'name':self.name}
hardwareInterface_joint = SubElement(joint, 'hardwareInterface')
hardwareInterface_joint.text = 'hardware_interface/EffortJointInterface'
actuator = SubElement(tran, 'actuator')
actuator.attrib = {'name':self.name + '_actr'}
hardwareInterface_actr = SubElement(actuator, 'hardwareInterface')
hardwareInterface_actr.text = 'hardware_interface/EffortJointInterface'
mechanicalReduction = SubElement(actuator, 'mechanicalReduction')
mechanicalReduction.text = '1'
self.tran_xml = "\n".join(utils.prettify(tran).split("\n")[1:])
def make_joints_dict(root, msg):
"""
joints_dict holds parent, axis and xyz informatino of the joints
Parameters
----------
root: adsk.fusion.Design.cast(product)
Root component
msg: str
Tell the status
Returns
----------
joints_dict:
{name: {type, axis, upper_limit, lower_limit, parent, child, xyz}}
msg: str
Tell the status
"""
joint_type_list = [
'fixed', 'revolute', 'prismatic', 'Cylinderical',
'PinSlot', 'Planner', 'Ball'] # these are the names in urdf
joints_dict = {}
for joint in root.joints:
joint_dict = {}
joint_type = joint_type_list[joint.jointMotion.jointType]
joint_dict['type'] = joint_type
# swhich by the type of the joint
joint_dict['axis'] = [0, 0, 0]
joint_dict['upper_limit'] = 0.0
joint_dict['lower_limit'] = 0.0
# support "Revolute", "Rigid" and "Slider"
if joint_type == 'revolute':
joint_dict['axis'] = [round(i, 6) for i in \
joint.jointMotion.rotationAxisVector.asArray()] ## In Fusion, exported axis is normalized.
max_enabled = joint.jointMotion.rotationLimits.isMaximumValueEnabled
min_enabled = joint.jointMotion.rotationLimits.isMinimumValueEnabled
if max_enabled and min_enabled:
joint_dict['upper_limit'] = round(joint.jointMotion.rotationLimits.maximumValue, 6)
joint_dict['lower_limit'] = round(joint.jointMotion.rotationLimits.minimumValue, 6)
elif max_enabled and not min_enabled:
msg = joint.name + 'is not set its lower limit. Please set it and try again.'
break
elif not max_enabled and min_enabled:
msg = joint.name + 'is not set its upper limit. Please set it and try again.'
break
else: # if there is no angle limit
joint_dict['type'] = 'continuous'
elif joint_type == 'prismatic':
joint_dict['axis'] = [round(i, 6) for i in \
joint.jointMotion.slideDirectionVector.asArray()] # Also normalized
max_enabled = joint.jointMotion.slideLimits.isMaximumValueEnabled
min_enabled = joint.jointMotion.slideLimits.isMinimumValueEnabled
if max_enabled and min_enabled:
joint_dict['upper_limit'] = round(joint.jointMotion.slideLimits.maximumValue/100, 6)
joint_dict['lower_limit'] = round(joint.jointMotion.slideLimits.minimumValue/100, 6)
elif max_enabled and not min_enabled:
msg = joint.name + 'is not set its lower limit. Please set it and try again.'
break
elif not max_enabled and min_enabled:
msg = joint.name + 'is not set its upper limit. Please set it and try again.'
break
elif joint_type == 'fixed':
pass
if joint.occurrenceTwo.component.name == 'base_link':
joint_dict['parent'] = 'base_link'
else:
joint_dict['parent'] = re.sub('[ :()]', '_', joint.occurrenceTwo.name)
joint_dict['child'] = re.sub('[ :()]', '_', joint.occurrenceOne.name)
try:
joint_dict['xyz'] = [round(i / 100.0, 6) for i in \
joint.geometryOrOriginOne.origin.asArray()] # converted to meter
except:
try:
if type(joint.geometryOrOriginTwo)==adsk.fusion.JointOrigin:
data = joint.geometryOrOriginTwo.geometry.origin.asArray()
else:
data = joint.geometryOrOriginTwo.origin.asArray()
joint_dict['xyz'] = [round(i / 100.0, 6) for i in data] # converted to meter
except:
msg = joint.name + " doesn't have joint origin. Please set it and run again."
break
joints_dict[joint.name] = joint_dict
return joints_dict, msg | 39.818653 | 106 | 0.575927 |
import adsk, re
from xml.etree.ElementTree import Element, SubElement
from ..utils import utils
class Joint:
def __init__(self, name, xyz, axis, parent, child, joint_type, upper_limit, lower_limit):
self.name = name
self.type = joint_type
self.xyz = xyz
self.parent = parent
self.child = child
self.joint_xml = None
self.tran_xml = None
self.axis = axis
self.upper_limit = upper_limit
self.lower_limit = lower_limit
def make_joint_xml(self):
joint = Element('joint')
joint.attrib = {'name':self.name, 'type':self.type}
origin = SubElement(joint, 'origin')
origin.attrib = {'xyz':' '.join([str(_) for _ in self.xyz]), 'rpy':'0 0 0'}
parent = SubElement(joint, 'parent')
parent.attrib = {'link':self.parent}
child = SubElement(joint, 'child')
child.attrib = {'link':self.child}
if self.type == 'revolute' or self.type == 'continuous' or self.type == 'prismatic':
axis = SubElement(joint, 'axis')
axis.attrib = {'xyz':' '.join([str(_) for _ in self.axis])}
if self.type == 'revolute' or self.type == 'prismatic':
limit = SubElement(joint, 'limit')
limit.attrib = {'upper': str(self.upper_limit), 'lower': str(self.lower_limit),
'effort': '100', 'velocity': '100'}
self.joint_xml = "\n".join(utils.prettify(joint).split("\n")[1:])
def make_transmission_xml(self):
tran = Element('transmission')
tran.attrib = {'name':self.name + '_tran'}
joint_type = SubElement(tran, 'type')
joint_type.text = 'transmission_interface/SimpleTransmission'
joint = SubElement(tran, 'joint')
joint.attrib = {'name':self.name}
hardwareInterface_joint = SubElement(joint, 'hardwareInterface')
hardwareInterface_joint.text = 'hardware_interface/EffortJointInterface'
actuator = SubElement(tran, 'actuator')
actuator.attrib = {'name':self.name + '_actr'}
hardwareInterface_actr = SubElement(actuator, 'hardwareInterface')
hardwareInterface_actr.text = 'hardware_interface/EffortJointInterface'
mechanicalReduction = SubElement(actuator, 'mechanicalReduction')
mechanicalReduction.text = '1'
self.tran_xml = "\n".join(utils.prettify(tran).split("\n")[1:])
def make_joints_dict(root, msg):
joint_type_list = [
'fixed', 'revolute', 'prismatic', 'Cylinderical',
'PinSlot', 'Planner', 'Ball']
joints_dict = {}
for joint in root.joints:
joint_dict = {}
joint_type = joint_type_list[joint.jointMotion.jointType]
joint_dict['type'] = joint_type
joint_dict['axis'] = [0, 0, 0]
joint_dict['upper_limit'] = 0.0
joint_dict['lower_limit'] = 0.0
if joint_type == 'revolute':
joint_dict['axis'] = [round(i, 6) for i in \
joint.jointMotion.rotationAxisVector.asArray()] ion.rotationLimits.isMaximumValueEnabled
min_enabled = joint.jointMotion.rotationLimits.isMinimumValueEnabled
if max_enabled and min_enabled:
joint_dict['upper_limit'] = round(joint.jointMotion.rotationLimits.maximumValue, 6)
joint_dict['lower_limit'] = round(joint.jointMotion.rotationLimits.minimumValue, 6)
elif max_enabled and not min_enabled:
msg = joint.name + 'is not set its lower limit. Please set it and try again.'
break
elif not max_enabled and min_enabled:
msg = joint.name + 'is not set its upper limit. Please set it and try again.'
break
else:
joint_dict['type'] = 'continuous'
elif joint_type == 'prismatic':
joint_dict['axis'] = [round(i, 6) for i in \
joint.jointMotion.slideDirectionVector.asArray()]
max_enabled = joint.jointMotion.slideLimits.isMaximumValueEnabled
min_enabled = joint.jointMotion.slideLimits.isMinimumValueEnabled
if max_enabled and min_enabled:
joint_dict['upper_limit'] = round(joint.jointMotion.slideLimits.maximumValue/100, 6)
joint_dict['lower_limit'] = round(joint.jointMotion.slideLimits.minimumValue/100, 6)
elif max_enabled and not min_enabled:
msg = joint.name + 'is not set its lower limit. Please set it and try again.'
break
elif not max_enabled and min_enabled:
msg = joint.name + 'is not set its upper limit. Please set it and try again.'
break
elif joint_type == 'fixed':
pass
if joint.occurrenceTwo.component.name == 'base_link':
joint_dict['parent'] = 'base_link'
else:
joint_dict['parent'] = re.sub('[ :()]', '_', joint.occurrenceTwo.name)
joint_dict['child'] = re.sub('[ :()]', '_', joint.occurrenceOne.name)
try:
joint_dict['xyz'] = [round(i / 100.0, 6) for i in \
joint.geometryOrOriginOne.origin.asArray()]
except:
try:
if type(joint.geometryOrOriginTwo)==adsk.fusion.JointOrigin:
data = joint.geometryOrOriginTwo.geometry.origin.asArray()
else:
data = joint.geometryOrOriginTwo.origin.asArray()
joint_dict['xyz'] = [round(i / 100.0, 6) for i in data]
except:
msg = joint.name + " doesn't have joint origin. Please set it and run again."
break
joints_dict[joint.name] = joint_dict
return joints_dict, msg | true | true |
f732d2f81e76fb029eac6e1333e8799856fc9049 | 18,027 | py | Python | qutip/cy/br_codegen.py | camponogaraviera/qutip | 1b1f6dffcb3ab97f11b8c6114293e09f378d2e8f | [
"BSD-3-Clause"
] | 1,205 | 2015-01-02T16:23:42.000Z | 2022-03-31T03:21:21.000Z | qutip/cy/br_codegen.py | camponogaraviera/qutip | 1b1f6dffcb3ab97f11b8c6114293e09f378d2e8f | [
"BSD-3-Clause"
] | 1,361 | 2015-01-09T23:38:25.000Z | 2022-03-31T12:26:07.000Z | qutip/cy/br_codegen.py | camponogaraviera/qutip | 1b1f6dffcb3ab97f11b8c6114293e09f378d2e8f | [
"BSD-3-Clause"
] | 569 | 2015-01-19T06:15:33.000Z | 2022-03-28T20:43:39.000Z | import os
import numpy as np
import qutip.settings as qset
from qutip.interpolate import Cubic_Spline
_cython_path = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")
_include_string = "'"+_cython_path+"/complex_math.pxi'"
__all__ = ['BR_Codegen']
class BR_Codegen(object):
"""
Class for generating Bloch-Redfield time-dependent code
at runtime.
"""
def __init__(self, h_terms=None, h_td_terms=None, h_obj=None,
c_terms=None, c_td_terms=None, c_obj=None,
a_terms=None, a_td_terms=None,
spline_count=[0,0],
coupled_ops=[],
coupled_lengths=[],
coupled_spectra=[],
config=None, sparse=False,
use_secular=None,
sec_cutoff=0.1,
args=None,
use_openmp=False,
omp_thresh=None,
omp_threads=None,
atol=None):
try:
import cython
except (ImportError, ModuleNotFoundError):
raise ModuleNotFoundError("Cython is needed for "
"time-depdendent brmesolve")
import sys
import os
sys.path.append(os.getcwd())
# Hamiltonian time-depdendent pieces
self.h_terms = h_terms # number of H pieces
self.h_td_terms = h_td_terms
self.h_obj = h_obj
# Collapse operator time-depdendent pieces
self.c_terms = c_terms # number of C pieces
self.c_td_terms = c_td_terms
self.c_obj = c_obj
# BR operator time-depdendent pieces
self.a_terms = a_terms # number of A pieces
self.a_td_terms = a_td_terms
self.spline_count = spline_count
self.use_secular = int(use_secular)
self.sec_cutoff = sec_cutoff
self.args = args
self.sparse = sparse
self.spline = 0
# Code generator properties
self.code = [] # strings to be written to file
self.level = 0 # indent level
self.config = config
if atol is None:
self.atol = qset.atol
else:
self.atol = atol
self.use_openmp = use_openmp
self.omp_thresh = omp_thresh
self.omp_threads = omp_threads
self.coupled_ops = coupled_ops
self.coupled_lengths = coupled_lengths
self.coupled_spectra = coupled_spectra
def write(self, string):
"""write lines of code to self.code"""
self.code.append(" " * self.level + string + "\n")
def file(self, filename):
"""open file called filename for writing"""
self.file = open(filename, "w")
def generate(self, filename="rhs.pyx"):
"""generate the file"""
for line in cython_preamble(self.use_openmp)+self.aop_td_funcs():
self.write(line)
# write function for Hamiltonian terms (there is always
# be at least one term)
for line in cython_checks() + self.ODE_func_header():
self.write(line)
self.indent()
#Reset spline count
self.spline = 0
for line in self.func_vars()+self.ham_add_and_eigsolve()+ \
self.br_matvec_terms()+["\n"]:
self.write(line)
for line in self.func_end():
self.write(line)
self.dedent()
self.file(filename)
self.file.writelines(self.code)
self.file.close()
self.config.cgen_num += 1
def indent(self):
"""increase indention level by one"""
self.level += 1
def dedent(self):
"""decrease indention level by one"""
if self.level == 0:
raise SyntaxError("Error in code generator")
self.level -= 1
def _get_arg_str(self, args):
if len(args) == 0:
return ''
ret = ''
for name, value in self.args.items():
if isinstance(value, np.ndarray):
ret += ",\n np.ndarray[np.%s_t, ndim=1] %s" % \
(value.dtype.name, name)
else:
if isinstance(value, (int, np.int32, np.int64)):
kind = 'int'
elif isinstance(value, (float, np.float32, np.float64)):
kind = 'float'
elif isinstance(value, (complex, np.complex128)):
kind = 'complex'
#kind = type(value).__name__
ret += ",\n " + kind + " " + name
return ret
def ODE_func_header(self):
"""Creates function header for time-dependent ODE RHS."""
func_name = "def cy_td_ode_rhs("
# strings for time and vector variables
input_vars = ("\n double t" +
",\n complex[::1] vec")
for k in range(self.h_terms):
input_vars += (",\n " +
"complex[::1,:] H%d" % k)
#Add array for each Cubic_Spline H term
for htd in self.h_td_terms:
if isinstance(htd, Cubic_Spline):
if not htd.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
for k in range(self.c_terms):
input_vars += (",\n " +
"complex[::1,:] C%d" % k)
#Add array for each Cubic_Spline c_op term
for ctd in self.c_td_terms:
if isinstance(ctd, Cubic_Spline):
if not ctd.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
#Add coupled a_op terms
for _a in self.a_td_terms:
if isinstance(_a, Cubic_Spline):
if not _a.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
#Add a_op terms
for k in range(self.a_terms):
input_vars += (",\n " +
"complex[::1,:] A%d" % k)
input_vars += (",\n unsigned int nrows")
input_vars += self._get_arg_str(self.args)
func_end = "):"
return [func_name + input_vars + func_end]
def func_vars(self):
"""Writes the variables and their types & spmv parts"""
func_vars = ["", "cdef double complex * " +
'out = <complex *>PyDataMem_NEW_ZEROED(nrows**2,sizeof(complex))']
func_vars.append(" ")
return func_vars
def aop_td_funcs(self):
aop_func_str=[]
spline_val = self.spline_count[0]
coupled_val = 0
kk = 0
while kk < self.a_terms:
if kk not in self.coupled_ops:
aa = self.a_td_terms[kk]
if isinstance(aa, str):
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, aa)]
elif isinstance(aa, tuple):
if isinstance(aa[0],str):
str0 = aa[0]
elif isinstance(aa[0],Cubic_Spline):
if not aa[0].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+",dtype=float)"]
str0 = "interp(w, %s, %s, spline%s)" % (aa[0].a, aa[0].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+",dtype=complex)"]
str0 = "zinterp(w, %s, %s, spline%s)" % (aa[0].a, aa[0].b, spline_val)
spline_val += 1
else:
raise Exception('Error parsing tuple.')
if isinstance(aa[1],str):
str1 = aa[1]
elif isinstance(aa[1],Cubic_Spline):
if not aa[1].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=float)"]
str1 = "interp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=complex)"]
str1 = "zinterp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
spline_val += 1
else:
raise Exception('Error parsing tuple.')
aop_func_str += ["cdef complex spectral{0}(double w, double t): return ({1})*({2})".format(kk, str0, str1)]
else:
raise Exception('Invalid a_td_term.')
kk += 1
else:
aa = self.coupled_spectra[coupled_val]
if isinstance(aa, str):
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, aa)]
elif isinstance(aa, Cubic_Spline):
if not aa[1].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=float)"]
str1 = "interp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=complex)"]
str1 = "zinterp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
spline_val += 1
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, str1)]
kk += self.coupled_lengths[coupled_val]
coupled_val += 1
return aop_func_str
def ham_add_and_eigsolve(self):
ham_str = []
#allocate initial zero-Hamiltonian and eigenvector array in Fortran-order
ham_str += ['cdef complex[::1, :] H = farray_alloc(nrows)']
ham_str += ['cdef complex[::1, :] evecs = farray_alloc(nrows)']
#allocate double array for eigenvalues
ham_str += ['cdef double * eigvals = <double *>PyDataMem_NEW_ZEROED(nrows,sizeof(double))']
for kk in range(self.h_terms):
if isinstance(self.h_td_terms[kk], Cubic_Spline):
S = self.h_td_terms[kk]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
ham_str += ["dense_add_mult(H, H{0}, {1})".format(kk,td_str)]
self.spline += 1
else:
ham_str += ["dense_add_mult(H, H{0}, {1})".format(kk,self.h_td_terms[kk])]
#Do the eigensolving
ham_str += ["ZHEEVR(H, eigvals, evecs, nrows)"]
#Free H as it is no longer needed
ham_str += ["PyDataMem_FREE(&H[0,0])"]
return ham_str
def br_matvec_terms(self):
br_str = []
# Transform vector eigenbasis
br_str += ["cdef double complex * eig_vec = vec_to_eigbasis(vec, evecs, nrows)"]
# Do the diagonal liouvillian matvec
br_str += ["diag_liou_mult(eigvals, eig_vec, out, nrows)"]
# Do the cop_term matvec for each c_term
for kk in range(self.c_terms):
if isinstance(self.c_td_terms[kk], Cubic_Spline):
S = self.c_td_terms[kk]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
if self.use_openmp:
br_str += ["cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})".format(kk,
td_str, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})".format(kk, td_str, self.atol)]
self.spline += 1
else:
if self.use_openmp:
br_str += ["cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})".format(kk,
self.c_td_terms[kk], self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})".format(kk, self.c_td_terms[kk], self.atol)]
if self.a_terms != 0:
#Calculate skew and dw_min terms
br_str += ["cdef double[:,::1] skew = <double[:nrows,:nrows]><double *>PyDataMem_NEW_ZEROED(nrows**2,sizeof(double))"]
br_str += ["cdef double dw_min = skew_and_dwmin(eigvals, skew, nrows)"]
#Compute BR term matvec
kk = 0
coupled_val = 0
while kk < self.a_terms:
if kk not in self.coupled_ops:
if self.use_openmp:
br_str += ["br_term_mult_openmp(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})".format(kk,
self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["br_term_mult(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})".format(kk, self.use_secular, self.sec_cutoff, self.atol)]
kk += 1
else:
br_str += ['cdef complex[::1, :] Ac{0} = farray_alloc(nrows)'.format(kk)]
for nn in range(self.coupled_lengths[coupled_val]):
if isinstance(self.a_td_terms[kk+nn], str):
br_str += ["dense_add_mult(Ac{0}, A{1}, {2})".format(kk,kk+nn,self.a_td_terms[kk+nn])]
elif isinstance(self.a_td_terms[kk+nn], Cubic_Spline):
S = self.a_td_terms[kk+nn]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
br_str += ["dense_add_mult(Ac{0}, A{1}, {2})".format(kk,kk+nn,td_str)]
else:
raise Exception('Invalid time-dependence fot a_op.')
if self.use_openmp:
br_str += ["br_term_mult_openmp(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})".format(kk,
self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["br_term_mult(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})".format(kk, self.use_secular, self.sec_cutoff, self.atol)]
br_str += ["PyDataMem_FREE(&Ac{0}[0,0])".format(kk)]
kk += self.coupled_lengths[coupled_val]
coupled_val += 1
return br_str
def func_end(self):
end_str = []
#Transform out vector back to fock basis
end_str += ["cdef np.ndarray[complex, ndim=1, mode='c'] arr_out = vec_to_fockbasis(out, evecs, nrows)"]
#Free everything at end
if self.a_terms != 0:
end_str += ["PyDataMem_FREE(&skew[0,0])"]
end_str += ["PyDataMem_FREE(&evecs[0,0])"]
end_str += ["PyDataMem_FREE(eigvals)"]
end_str += ["PyDataMem_FREE(eig_vec)"]
end_str += ["PyDataMem_FREE(out)"]
end_str += ["return arr_out"]
return end_str
def cython_preamble(use_omp=False):
if use_omp:
call_str = "from qutip.cy.openmp.br_omp cimport (cop_super_mult_openmp, br_term_mult_openmp)"
else:
call_str = "from qutip.cy.brtools cimport (cop_super_mult, br_term_mult)"
"""
Returns list of code segments for Cython preamble.
"""
return ["""#!python
#cython: language_level=3
# This file is generated automatically by QuTiP.
# (C) 2011 and later, QuSTaR
import numpy as np
cimport numpy as np
cimport cython
np.import_array()
cdef extern from "numpy/arrayobject.h" nogil:
void PyDataMem_NEW_ZEROED(size_t size, size_t elsize)
void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
void PyDataMem_FREE(void * ptr)
from qutip.cy.interpolate cimport interp, zinterp
from qutip.cy.math cimport erf, zerf
cdef double pi = 3.14159265358979323
from qutip.cy.brtools cimport (dense_add_mult, ZHEEVR, dense_to_eigbasis,
vec_to_eigbasis, vec_to_fockbasis, skew_and_dwmin,
diag_liou_mult, spec_func, farray_alloc)
"""
+call_str+
"""
include """+_include_string+"""
"""]
def cython_checks():
"""
List of strings that turn off Cython checks.
"""
return ["""
@cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)"""]
| 42.718009 | 183 | 0.520885 | import os
import numpy as np
import qutip.settings as qset
from qutip.interpolate import Cubic_Spline
_cython_path = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")
_include_string = "'"+_cython_path+"/complex_math.pxi'"
__all__ = ['BR_Codegen']
class BR_Codegen(object):
def __init__(self, h_terms=None, h_td_terms=None, h_obj=None,
c_terms=None, c_td_terms=None, c_obj=None,
a_terms=None, a_td_terms=None,
spline_count=[0,0],
coupled_ops=[],
coupled_lengths=[],
coupled_spectra=[],
config=None, sparse=False,
use_secular=None,
sec_cutoff=0.1,
args=None,
use_openmp=False,
omp_thresh=None,
omp_threads=None,
atol=None):
try:
import cython
except (ImportError, ModuleNotFoundError):
raise ModuleNotFoundError("Cython is needed for "
"time-depdendent brmesolve")
import sys
import os
sys.path.append(os.getcwd())
self.h_terms = h_terms
self.h_td_terms = h_td_terms
self.h_obj = h_obj
self.c_terms = c_terms
self.c_td_terms = c_td_terms
self.c_obj = c_obj
self.a_terms = a_terms
self.a_td_terms = a_td_terms
self.spline_count = spline_count
self.use_secular = int(use_secular)
self.sec_cutoff = sec_cutoff
self.args = args
self.sparse = sparse
self.spline = 0
self.code = []
self.level = 0
self.config = config
if atol is None:
self.atol = qset.atol
else:
self.atol = atol
self.use_openmp = use_openmp
self.omp_thresh = omp_thresh
self.omp_threads = omp_threads
self.coupled_ops = coupled_ops
self.coupled_lengths = coupled_lengths
self.coupled_spectra = coupled_spectra
def write(self, string):
self.code.append(" " * self.level + string + "\n")
def file(self, filename):
self.file = open(filename, "w")
def generate(self, filename="rhs.pyx"):
for line in cython_preamble(self.use_openmp)+self.aop_td_funcs():
self.write(line)
for line in cython_checks() + self.ODE_func_header():
self.write(line)
self.indent()
self.spline = 0
for line in self.func_vars()+self.ham_add_and_eigsolve()+ \
self.br_matvec_terms()+["\n"]:
self.write(line)
for line in self.func_end():
self.write(line)
self.dedent()
self.file(filename)
self.file.writelines(self.code)
self.file.close()
self.config.cgen_num += 1
def indent(self):
self.level += 1
def dedent(self):
if self.level == 0:
raise SyntaxError("Error in code generator")
self.level -= 1
def _get_arg_str(self, args):
if len(args) == 0:
return ''
ret = ''
for name, value in self.args.items():
if isinstance(value, np.ndarray):
ret += ",\n np.ndarray[np.%s_t, ndim=1] %s" % \
(value.dtype.name, name)
else:
if isinstance(value, (int, np.int32, np.int64)):
kind = 'int'
elif isinstance(value, (float, np.float32, np.float64)):
kind = 'float'
elif isinstance(value, (complex, np.complex128)):
kind = 'complex'
ret += ",\n " + kind + " " + name
return ret
def ODE_func_header(self):
func_name = "def cy_td_ode_rhs("
input_vars = ("\n double t" +
",\n complex[::1] vec")
for k in range(self.h_terms):
input_vars += (",\n " +
"complex[::1,:] H%d" % k)
for htd in self.h_td_terms:
if isinstance(htd, Cubic_Spline):
if not htd.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
for k in range(self.c_terms):
input_vars += (",\n " +
"complex[::1,:] C%d" % k)
for ctd in self.c_td_terms:
if isinstance(ctd, Cubic_Spline):
if not ctd.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
for _a in self.a_td_terms:
if isinstance(_a, Cubic_Spline):
if not _a.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
for k in range(self.a_terms):
input_vars += (",\n " +
"complex[::1,:] A%d" % k)
input_vars += (",\n unsigned int nrows")
input_vars += self._get_arg_str(self.args)
func_end = "):"
return [func_name + input_vars + func_end]
def func_vars(self):
func_vars = ["", "cdef double complex * " +
'out = <complex *>PyDataMem_NEW_ZEROED(nrows**2,sizeof(complex))']
func_vars.append(" ")
return func_vars
def aop_td_funcs(self):
aop_func_str=[]
spline_val = self.spline_count[0]
coupled_val = 0
kk = 0
while kk < self.a_terms:
if kk not in self.coupled_ops:
aa = self.a_td_terms[kk]
if isinstance(aa, str):
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, aa)]
elif isinstance(aa, tuple):
if isinstance(aa[0],str):
str0 = aa[0]
elif isinstance(aa[0],Cubic_Spline):
if not aa[0].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+",dtype=float)"]
str0 = "interp(w, %s, %s, spline%s)" % (aa[0].a, aa[0].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+",dtype=complex)"]
str0 = "zinterp(w, %s, %s, spline%s)" % (aa[0].a, aa[0].b, spline_val)
spline_val += 1
else:
raise Exception('Error parsing tuple.')
if isinstance(aa[1],str):
str1 = aa[1]
elif isinstance(aa[1],Cubic_Spline):
if not aa[1].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=float)"]
str1 = "interp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=complex)"]
str1 = "zinterp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
spline_val += 1
else:
raise Exception('Error parsing tuple.')
aop_func_str += ["cdef complex spectral{0}(double w, double t): return ({1})*({2})".format(kk, str0, str1)]
else:
raise Exception('Invalid a_td_term.')
kk += 1
else:
aa = self.coupled_spectra[coupled_val]
if isinstance(aa, str):
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, aa)]
elif isinstance(aa, Cubic_Spline):
if not aa[1].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=float)"]
str1 = "interp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=complex)"]
str1 = "zinterp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
spline_val += 1
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, str1)]
kk += self.coupled_lengths[coupled_val]
coupled_val += 1
return aop_func_str
def ham_add_and_eigsolve(self):
ham_str = []
ham_str += ['cdef complex[::1, :] H = farray_alloc(nrows)']
ham_str += ['cdef complex[::1, :] evecs = farray_alloc(nrows)']
ham_str += ['cdef double * eigvals = <double *>PyDataMem_NEW_ZEROED(nrows,sizeof(double))']
for kk in range(self.h_terms):
if isinstance(self.h_td_terms[kk], Cubic_Spline):
S = self.h_td_terms[kk]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
ham_str += ["dense_add_mult(H, H{0}, {1})".format(kk,td_str)]
self.spline += 1
else:
ham_str += ["dense_add_mult(H, H{0}, {1})".format(kk,self.h_td_terms[kk])]
ham_str += ["ZHEEVR(H, eigvals, evecs, nrows)"]
ham_str += ["PyDataMem_FREE(&H[0,0])"]
return ham_str
def br_matvec_terms(self):
br_str = []
br_str += ["cdef double complex * eig_vec = vec_to_eigbasis(vec, evecs, nrows)"]
br_str += ["diag_liou_mult(eigvals, eig_vec, out, nrows)"]
for kk in range(self.c_terms):
if isinstance(self.c_td_terms[kk], Cubic_Spline):
S = self.c_td_terms[kk]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
if self.use_openmp:
br_str += ["cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})".format(kk,
td_str, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})".format(kk, td_str, self.atol)]
self.spline += 1
else:
if self.use_openmp:
br_str += ["cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})".format(kk,
self.c_td_terms[kk], self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})".format(kk, self.c_td_terms[kk], self.atol)]
if self.a_terms != 0:
br_str += ["cdef double[:,::1] skew = <double[:nrows,:nrows]><double *>PyDataMem_NEW_ZEROED(nrows**2,sizeof(double))"]
br_str += ["cdef double dw_min = skew_and_dwmin(eigvals, skew, nrows)"]
kk = 0
coupled_val = 0
while kk < self.a_terms:
if kk not in self.coupled_ops:
if self.use_openmp:
br_str += ["br_term_mult_openmp(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})".format(kk,
self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["br_term_mult(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})".format(kk, self.use_secular, self.sec_cutoff, self.atol)]
kk += 1
else:
br_str += ['cdef complex[::1, :] Ac{0} = farray_alloc(nrows)'.format(kk)]
for nn in range(self.coupled_lengths[coupled_val]):
if isinstance(self.a_td_terms[kk+nn], str):
br_str += ["dense_add_mult(Ac{0}, A{1}, {2})".format(kk,kk+nn,self.a_td_terms[kk+nn])]
elif isinstance(self.a_td_terms[kk+nn], Cubic_Spline):
S = self.a_td_terms[kk+nn]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
br_str += ["dense_add_mult(Ac{0}, A{1}, {2})".format(kk,kk+nn,td_str)]
else:
raise Exception('Invalid time-dependence fot a_op.')
if self.use_openmp:
br_str += ["br_term_mult_openmp(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})".format(kk,
self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["br_term_mult(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})".format(kk, self.use_secular, self.sec_cutoff, self.atol)]
br_str += ["PyDataMem_FREE(&Ac{0}[0,0])".format(kk)]
kk += self.coupled_lengths[coupled_val]
coupled_val += 1
return br_str
def func_end(self):
end_str = []
end_str += ["cdef np.ndarray[complex, ndim=1, mode='c'] arr_out = vec_to_fockbasis(out, evecs, nrows)"]
if self.a_terms != 0:
end_str += ["PyDataMem_FREE(&skew[0,0])"]
end_str += ["PyDataMem_FREE(&evecs[0,0])"]
end_str += ["PyDataMem_FREE(eigvals)"]
end_str += ["PyDataMem_FREE(eig_vec)"]
end_str += ["PyDataMem_FREE(out)"]
end_str += ["return arr_out"]
return end_str
def cython_preamble(use_omp=False):
if use_omp:
call_str = "from qutip.cy.openmp.br_omp cimport (cop_super_mult_openmp, br_term_mult_openmp)"
else:
call_str = "from qutip.cy.brtools cimport (cop_super_mult, br_term_mult)"
return ["""#!python
#cython: language_level=3
# This file is generated automatically by QuTiP.
# (C) 2011 and later, QuSTaR
import numpy as np
cimport numpy as np
cimport cython
np.import_array()
cdef extern from "numpy/arrayobject.h" nogil:
void PyDataMem_NEW_ZEROED(size_t size, size_t elsize)
void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
void PyDataMem_FREE(void * ptr)
from qutip.cy.interpolate cimport interp, zinterp
from qutip.cy.math cimport erf, zerf
cdef double pi = 3.14159265358979323
from qutip.cy.brtools cimport (dense_add_mult, ZHEEVR, dense_to_eigbasis,
vec_to_eigbasis, vec_to_fockbasis, skew_and_dwmin,
diag_liou_mult, spec_func, farray_alloc)
"""
+call_str+
"""
include """+_include_string+"""
"""]
def cython_checks():
return ["""
@cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)"""]
| true | true |
f732d31c68384f61be8ec811efa39446d1f8762e | 19,429 | py | Python | fitlins/interfaces/bids.py | yarikoptic/fitlins | ee7e06330b9cdd5a9b812d51eb545daa84b0d066 | [
"Apache-2.0"
] | null | null | null | fitlins/interfaces/bids.py | yarikoptic/fitlins | ee7e06330b9cdd5a9b812d51eb545daa84b0d066 | [
"Apache-2.0"
] | null | null | null | fitlins/interfaces/bids.py | yarikoptic/fitlins | ee7e06330b9cdd5a9b812d51eb545daa84b0d066 | [
"Apache-2.0"
] | null | null | null | import os
from functools import reduce
from pathlib import Path
from gzip import GzipFile
import json
import shutil
import numpy as np
import nibabel as nb
from collections import defaultdict
from nipype import logging
from nipype.utils.filemanip import makedirs, copyfile
from nipype.interfaces.base import (
BaseInterfaceInputSpec, TraitedSpec, SimpleInterface,
InputMultiPath, OutputMultiPath, File, Directory,
traits, isdefined
)
from nipype.interfaces.io import IOBase
from ..utils import dict_intersection, snake_to_camel
iflogger = logging.getLogger('nipype.interface')
def bids_split_filename(fname):
"""Split a filename into parts: path, base filename, and extension
Respects multi-part file types used in BIDS standard and draft extensions
Largely copied from nipype.utils.filemanip.split_filename
Parameters
----------
fname : str
file or path name
Returns
-------
pth : str
path of fname
fname : str
basename of filename, without extension
ext : str
file extension of fname
"""
special_extensions = [
".R.surf.gii", ".L.surf.gii",
".R.func.gii", ".L.func.gii",
".nii.gz", ".tsv.gz",
]
pth = os.path.dirname(fname)
fname = os.path.basename(fname)
for special_ext in special_extensions:
if fname.lower().endswith(special_ext.lower()):
ext_len = len(special_ext)
ext = fname[-ext_len:]
fname = fname[:-ext_len]
break
else:
fname, ext = os.path.splitext(fname)
return pth, fname, ext
def _ensure_model(model):
model = getattr(model, 'filename', model)
if isinstance(model, str):
if os.path.exists(model):
with open(model) as fobj:
model = json.load(fobj)
else:
model = json.loads(model)
return model
class ModelSpecLoaderInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(exists=True,
mandatory=True,
desc='BIDS dataset root directory')
model = traits.Either('default', InputMultiPath(File(exists=True)),
desc='Model filename')
selectors = traits.Dict(desc='Limit models to those with matching inputs')
class ModelSpecLoaderOutputSpec(TraitedSpec):
model_spec = OutputMultiPath(traits.Dict())
class ModelSpecLoader(SimpleInterface):
input_spec = ModelSpecLoaderInputSpec
output_spec = ModelSpecLoaderOutputSpec
def _run_interface(self, runtime):
import bids
from bids.analysis import auto_model
models = self.inputs.model
if not isinstance(models, list):
layout = bids.BIDSLayout(self.inputs.bids_dir)
if not isdefined(models):
models = layout.get(type='model')
if not models:
raise ValueError("No models found")
elif models == 'default':
models = auto_model(layout)
models = [_ensure_model(m) for m in models]
if self.inputs.selectors:
# This is almost certainly incorrect
models = [model for model in models
if all(val in model['input'].get(key, [val])
for key, val in self.inputs.selectors.items())]
self._results['model_spec'] = models
return runtime
IMPUTATION_SNIPPET = """\
<div class="warning">
The following confounds had NaN values for the first volume: {}.
The mean of non-zero values for the remaining entries was imputed.
If another strategy is desired, it must be explicitly specified in
the model.
</div>
"""
class LoadBIDSModelInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(exists=True,
mandatory=True,
desc='BIDS dataset root directory')
preproc_dir = Directory(exists=True,
desc='Optional preprocessed files directory')
model = traits.Dict(desc='Model specification', mandatory=True)
selectors = traits.Dict(desc='Limit collected sessions', usedefault=True)
include_pattern = InputMultiPath(
traits.Str, xor=['exclude_pattern'],
desc='Patterns to select sub-directories of BIDS root')
exclude_pattern = InputMultiPath(
traits.Str, xor=['include_pattern'],
desc='Patterns to ignore sub-directories of BIDS root')
class LoadBIDSModelOutputSpec(TraitedSpec):
session_info = traits.List(traits.Dict())
contrast_info = traits.List(traits.List(File()))
contrast_indices = traits.List(traits.List(traits.List(traits.Dict)))
entities = traits.List(traits.List(traits.Dict()))
warnings = traits.List(File)
class LoadBIDSModel(SimpleInterface):
input_spec = LoadBIDSModelInputSpec
output_spec = LoadBIDSModelOutputSpec
def _run_interface(self, runtime):
import bids
bids.config.set_options(loop_preproc=True)
include = self.inputs.include_pattern
exclude = self.inputs.exclude_pattern
if not isdefined(include):
include = None
if not isdefined(exclude):
exclude = None
paths = [(self.inputs.bids_dir, 'bids')]
if isdefined(self.inputs.preproc_dir):
paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))
layout = bids.BIDSLayout(paths, include=include, exclude=exclude)
selectors = self.inputs.selectors
analysis = bids.Analysis(model=self.inputs.model, layout=layout)
analysis.setup(drop_na=False, **selectors)
self._load_level1(runtime, analysis)
self._load_higher_level(runtime, analysis)
# Debug - remove, eventually
runtime.analysis = analysis
return runtime
def _load_level1(self, runtime, analysis):
block = analysis.blocks[0]
block_subdir = Path(runtime.cwd) / block.level
block_subdir.mkdir(parents=True, exist_ok=True)
entities = []
session_info = []
contrast_indices = []
contrast_info = []
warnings = []
for paradigm, _, ents in block.get_design_matrix(
block.model['HRF_variables'], mode='sparse', force=True):
info = {}
space = analysis.layout.get_spaces(type='preproc',
extensions=['.nii', '.nii.gz'])[0]
preproc_files = analysis.layout.get(type='preproc',
extensions=['.nii', '.nii.gz'],
space=space,
**ents)
if len(preproc_files) != 1:
raise ValueError('Too many BOLD files found')
fname = preproc_files[0].filename
# Required field in seconds
TR = analysis.layout.get_metadata(fname, type='bold',
full_search=True)['RepetitionTime']
dense_vars = set(block.model['variables']) - set(block.model['HRF_variables'])
_, confounds, _ = block.get_design_matrix(dense_vars,
mode='dense',
force=True,
sampling_rate=1/TR,
**ents)[0]
ent_string = '_'.join('{}-{}'.format(key, val)
for key, val in ents.items())
events_file = block_subdir / '{}_events.h5'.format(ent_string)
paradigm.to_hdf(events_file, key='events')
imputed = []
if confounds is not None:
# Note that FMRIPREP includes CosineXX columns to accompany
# t/aCompCor
# We may want to add criteria to include HPF columns that are not
# explicitly listed in the model
names = [col for col in confounds.columns
if col.startswith('NonSteadyStateOutlier') or
col in block.model['variables']]
confounds = confounds[names]
# These confounds are defined pairwise with the current volume
# and its predecessor, and thus may be undefined (have value
# NaN) at the first volume.
# In these cases, we impute the mean non-zero value, for the
# expected NaN only.
# Any other NaNs must be handled by an explicit transform in
# the BIDS model.
for imputable in ('FramewiseDisplacement',
'stdDVARS', 'non-stdDVARS',
'vx-wisestdDVARS'):
if imputable in confounds.columns:
vals = confounds[imputable].values
if not np.isnan(vals[0]):
continue
# Impute the mean non-zero, non-NaN value
confounds[imputable][0] = np.nanmean(vals[vals != 0])
imputed.append(imputable)
if np.isnan(confounds.values).any():
iflogger.warning('Unexpected NaNs found in confounds; '
'regression may fail.')
confounds_file = block_subdir / '{}_confounds.h5'.format(ent_string)
confounds.to_hdf(confounds_file, key='confounds')
else:
confounds_file = None
info['events'] = str(events_file)
info['confounds'] = str(confounds_file)
info['repetition_time'] = TR
# Transpose so each contrast gets a row of data instead of column
contrasts, index, _ = block.get_contrasts(**ents)[0]
contrast_type_map = defaultdict(lambda: 'T')
contrast_type_map.update({contrast['name']: contrast['type']
for contrast in block.contrasts})
contrast_type_list = [contrast_type_map[contrast]
for contrast in contrasts.columns]
contrasts = contrasts.T
# Add test indicator column
contrasts['type'] = contrast_type_list
contrasts_file = block_subdir / '{}_contrasts.h5'.format(ent_string)
contrasts_file.parent.mkdir(parents=True, exist_ok=True)
contrasts.to_hdf(contrasts_file, key='contrasts')
warning_file = block_subdir / '{}_warning.html'.format(ent_string)
with warning_file.open('w') as fobj:
if imputed:
fobj.write(IMPUTATION_SNIPPET.format(', '.join(imputed)))
entities.append(ents)
session_info.append(info)
contrast_indices.append(index.to_dict('records'))
contrast_info.append(str(contrasts_file))
warnings.append(str(warning_file))
self._results['session_info'] = session_info
self._results['warnings'] = warnings
self._results.setdefault('entities', []).append(entities)
self._results.setdefault('contrast_indices', []).append(contrast_indices)
self._results.setdefault('contrast_info', []).append(contrast_info)
def _load_higher_level(self, runtime, analysis):
cwd = Path(runtime.cwd)
for block in analysis.blocks[1:]:
block_subdir = cwd / block.level
block_subdir.mkdir(parents=True, exist_ok=True)
entities = []
contrast_indices = []
contrast_info = []
for contrasts, index, ents in block.get_contrasts():
if contrasts.empty:
continue
# The contrast index is the name of the input contrasts,
# which will very frequently be non-unique
# Hence, add the contrast to the index (table of entities)
# and switch to a matching numeric index
index['contrast'] = contrasts.index
contrasts.index = index.index
contrast_type_map = defaultdict(lambda: 'T')
contrast_type_map.update({contrast['name']: contrast['type']
for contrast in block.contrasts})
contrast_type_list = [contrast_type_map[contrast]
for contrast in contrasts.columns]
indices = index.to_dict('records')
# Entities for a given contrast matrix include the intersection of
# entities of inputs, e.g., if this level is within-subject, the
# subject should persist
out_ents = reduce(dict_intersection, indices)
# Explicit entities take precedence over derived
out_ents.update(ents)
# Input-level contrasts will be overridden by the current level
out_ents.pop('contrast', None)
ent_string = '_'.join('{}-{}'.format(key, val)
for key, val in out_ents.items())
# Transpose so each contrast gets a row of data instead of column
contrasts = contrasts.T
# Add test indicator column
contrasts['type'] = contrast_type_list
contrasts_file = block_subdir / '{}_contrasts.h5'.format(ent_string)
contrasts_file.parent.mkdir(parents=True, exist_ok=True)
contrasts.to_hdf(contrasts_file, key='contrasts')
entities.append(out_ents)
contrast_indices.append(indices)
contrast_info.append(str(contrasts_file))
self._results['entities'].append(entities)
self._results['contrast_info'].append(contrast_info)
self._results['contrast_indices'].append(contrast_indices)
class BIDSSelectInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(exists=True,
mandatory=True,
desc='BIDS dataset root directories')
preproc_dir = Directory(exists=True,
desc='Optional preprocessed files directory')
entities = InputMultiPath(traits.Dict(), mandatory=True)
selectors = traits.Dict(desc='Additional selectors to be applied',
usedefault=True)
class BIDSSelectOutputSpec(TraitedSpec):
bold_files = OutputMultiPath(File)
mask_files = OutputMultiPath(traits.Either(File, None))
entities = OutputMultiPath(traits.Dict)
class BIDSSelect(SimpleInterface):
input_spec = BIDSSelectInputSpec
output_spec = BIDSSelectOutputSpec
def _run_interface(self, runtime):
import bids
paths = [(self.inputs.bids_dir, 'bids')]
if isdefined(self.inputs.preproc_dir):
paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))
layout = bids.BIDSLayout(paths)
bold_files = []
mask_files = []
entities = []
for ents in self.inputs.entities:
selectors = {**self.inputs.selectors, **ents}
bold_file = layout.get(extensions=['.nii', '.nii.gz'], **selectors)
if len(bold_file) == 0:
raise FileNotFoundError(
"Could not find BOLD file in {} with entities {}"
"".format(self.inputs.bids_dir, selectors))
elif len(bold_file) > 1:
raise ValueError(
"Non-unique BOLD file in {} with entities {}.\n"
"Matches:\n\t{}"
"".format(self.inputs.bids_dir, selectors,
"\n\t".join(
'{} ({})'.format(
f.filename,
layout.files[f.filename].entities)
for f in bold_file)))
# Select exactly matching mask file (may be over-cautious)
bold_ents = layout.parse_file_entities(
bold_file[0].filename)
bold_ents['type'] = 'brainmask'
mask_file = layout.get(extensions=['.nii', '.nii.gz'], **bold_ents)
bold_ents.pop('type')
bold_files.append(bold_file[0].filename)
mask_files.append(mask_file[0].filename if mask_file else None)
entities.append(bold_ents)
self._results['bold_files'] = bold_files
self._results['mask_files'] = mask_files
self._results['entities'] = entities
return runtime
def _copy_or_convert(in_file, out_file):
in_ext = bids_split_filename(in_file)[2]
out_ext = bids_split_filename(out_file)[2]
# Copy if filename matches
if in_ext == out_ext:
copyfile(in_file, out_file, copy=True, use_hardlink=True)
return
# gzip/gunzip if it's easy
if in_ext == out_ext + '.gz' or in_ext + '.gz' == out_ext:
read_open = GzipFile if in_ext.endswith('.gz') else open
write_open = GzipFile if out_ext.endswith('.gz') else open
with read_open(in_file, mode='rb') as in_fobj:
with write_open(out_file, mode='wb') as out_fobj:
shutil.copyfileobj(in_fobj, out_fobj)
return
# Let nibabel take a shot
try:
nb.save(nb.load(in_file), out_file)
except Exception:
pass
else:
return
raise RuntimeError("Cannot convert {} to {}".format(in_ext, out_ext))
class BIDSDataSinkInputSpec(BaseInterfaceInputSpec):
base_directory = Directory(
mandatory=True,
desc='Path to BIDS (or derivatives) root directory')
in_file = InputMultiPath(File(exists=True), mandatory=True)
entities = InputMultiPath(traits.Dict, usedefault=True,
desc='Per-file entities to include in filename')
fixed_entities = traits.Dict(usedefault=True,
desc='Entities to include in all filenames')
path_patterns = InputMultiPath(
traits.Str, desc='BIDS path patterns describing format of file names')
class BIDSDataSinkOutputSpec(TraitedSpec):
out_file = OutputMultiPath(File, desc='output file')
class BIDSDataSink(IOBase):
input_spec = BIDSDataSinkInputSpec
output_spec = BIDSDataSinkOutputSpec
_always_run = True
def _list_outputs(self):
import bids
base_dir = self.inputs.base_directory
os.makedirs(base_dir, exist_ok=True)
layout = bids.BIDSLayout(base_dir)
path_patterns = self.inputs.path_patterns
if not isdefined(path_patterns):
path_patterns = None
out_files = []
for entities, in_file in zip(self.inputs.entities,
self.inputs.in_file):
ents = {**self.inputs.fixed_entities}
ents.update(entities)
ents = {k: snake_to_camel(str(v)) for k, v in ents.items()}
out_fname = os.path.join(
base_dir, layout.build_path(ents, path_patterns))
makedirs(os.path.dirname(out_fname), exist_ok=True)
_copy_or_convert(in_file, out_fname)
out_files.append(out_fname)
return {'out_file': out_files}
| 37.799611 | 90 | 0.583664 | import os
from functools import reduce
from pathlib import Path
from gzip import GzipFile
import json
import shutil
import numpy as np
import nibabel as nb
from collections import defaultdict
from nipype import logging
from nipype.utils.filemanip import makedirs, copyfile
from nipype.interfaces.base import (
BaseInterfaceInputSpec, TraitedSpec, SimpleInterface,
InputMultiPath, OutputMultiPath, File, Directory,
traits, isdefined
)
from nipype.interfaces.io import IOBase
from ..utils import dict_intersection, snake_to_camel
iflogger = logging.getLogger('nipype.interface')
def bids_split_filename(fname):
special_extensions = [
".R.surf.gii", ".L.surf.gii",
".R.func.gii", ".L.func.gii",
".nii.gz", ".tsv.gz",
]
pth = os.path.dirname(fname)
fname = os.path.basename(fname)
for special_ext in special_extensions:
if fname.lower().endswith(special_ext.lower()):
ext_len = len(special_ext)
ext = fname[-ext_len:]
fname = fname[:-ext_len]
break
else:
fname, ext = os.path.splitext(fname)
return pth, fname, ext
def _ensure_model(model):
model = getattr(model, 'filename', model)
if isinstance(model, str):
if os.path.exists(model):
with open(model) as fobj:
model = json.load(fobj)
else:
model = json.loads(model)
return model
class ModelSpecLoaderInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(exists=True,
mandatory=True,
desc='BIDS dataset root directory')
model = traits.Either('default', InputMultiPath(File(exists=True)),
desc='Model filename')
selectors = traits.Dict(desc='Limit models to those with matching inputs')
class ModelSpecLoaderOutputSpec(TraitedSpec):
model_spec = OutputMultiPath(traits.Dict())
class ModelSpecLoader(SimpleInterface):
input_spec = ModelSpecLoaderInputSpec
output_spec = ModelSpecLoaderOutputSpec
def _run_interface(self, runtime):
import bids
from bids.analysis import auto_model
models = self.inputs.model
if not isinstance(models, list):
layout = bids.BIDSLayout(self.inputs.bids_dir)
if not isdefined(models):
models = layout.get(type='model')
if not models:
raise ValueError("No models found")
elif models == 'default':
models = auto_model(layout)
models = [_ensure_model(m) for m in models]
if self.inputs.selectors:
models = [model for model in models
if all(val in model['input'].get(key, [val])
for key, val in self.inputs.selectors.items())]
self._results['model_spec'] = models
return runtime
IMPUTATION_SNIPPET = """\
<div class="warning">
The following confounds had NaN values for the first volume: {}.
The mean of non-zero values for the remaining entries was imputed.
If another strategy is desired, it must be explicitly specified in
the model.
</div>
"""
class LoadBIDSModelInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(exists=True,
mandatory=True,
desc='BIDS dataset root directory')
preproc_dir = Directory(exists=True,
desc='Optional preprocessed files directory')
model = traits.Dict(desc='Model specification', mandatory=True)
selectors = traits.Dict(desc='Limit collected sessions', usedefault=True)
include_pattern = InputMultiPath(
traits.Str, xor=['exclude_pattern'],
desc='Patterns to select sub-directories of BIDS root')
exclude_pattern = InputMultiPath(
traits.Str, xor=['include_pattern'],
desc='Patterns to ignore sub-directories of BIDS root')
class LoadBIDSModelOutputSpec(TraitedSpec):
session_info = traits.List(traits.Dict())
contrast_info = traits.List(traits.List(File()))
contrast_indices = traits.List(traits.List(traits.List(traits.Dict)))
entities = traits.List(traits.List(traits.Dict()))
warnings = traits.List(File)
class LoadBIDSModel(SimpleInterface):
input_spec = LoadBIDSModelInputSpec
output_spec = LoadBIDSModelOutputSpec
def _run_interface(self, runtime):
import bids
bids.config.set_options(loop_preproc=True)
include = self.inputs.include_pattern
exclude = self.inputs.exclude_pattern
if not isdefined(include):
include = None
if not isdefined(exclude):
exclude = None
paths = [(self.inputs.bids_dir, 'bids')]
if isdefined(self.inputs.preproc_dir):
paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))
layout = bids.BIDSLayout(paths, include=include, exclude=exclude)
selectors = self.inputs.selectors
analysis = bids.Analysis(model=self.inputs.model, layout=layout)
analysis.setup(drop_na=False, **selectors)
self._load_level1(runtime, analysis)
self._load_higher_level(runtime, analysis)
runtime.analysis = analysis
return runtime
def _load_level1(self, runtime, analysis):
block = analysis.blocks[0]
block_subdir = Path(runtime.cwd) / block.level
block_subdir.mkdir(parents=True, exist_ok=True)
entities = []
session_info = []
contrast_indices = []
contrast_info = []
warnings = []
for paradigm, _, ents in block.get_design_matrix(
block.model['HRF_variables'], mode='sparse', force=True):
info = {}
space = analysis.layout.get_spaces(type='preproc',
extensions=['.nii', '.nii.gz'])[0]
preproc_files = analysis.layout.get(type='preproc',
extensions=['.nii', '.nii.gz'],
space=space,
**ents)
if len(preproc_files) != 1:
raise ValueError('Too many BOLD files found')
fname = preproc_files[0].filename
TR = analysis.layout.get_metadata(fname, type='bold',
full_search=True)['RepetitionTime']
dense_vars = set(block.model['variables']) - set(block.model['HRF_variables'])
_, confounds, _ = block.get_design_matrix(dense_vars,
mode='dense',
force=True,
sampling_rate=1/TR,
**ents)[0]
ent_string = '_'.join('{}-{}'.format(key, val)
for key, val in ents.items())
events_file = block_subdir / '{}_events.h5'.format(ent_string)
paradigm.to_hdf(events_file, key='events')
imputed = []
if confounds is not None:
names = [col for col in confounds.columns
if col.startswith('NonSteadyStateOutlier') or
col in block.model['variables']]
confounds = confounds[names]
for imputable in ('FramewiseDisplacement',
'stdDVARS', 'non-stdDVARS',
'vx-wisestdDVARS'):
if imputable in confounds.columns:
vals = confounds[imputable].values
if not np.isnan(vals[0]):
continue
confounds[imputable][0] = np.nanmean(vals[vals != 0])
imputed.append(imputable)
if np.isnan(confounds.values).any():
iflogger.warning('Unexpected NaNs found in confounds; '
'regression may fail.')
confounds_file = block_subdir / '{}_confounds.h5'.format(ent_string)
confounds.to_hdf(confounds_file, key='confounds')
else:
confounds_file = None
info['events'] = str(events_file)
info['confounds'] = str(confounds_file)
info['repetition_time'] = TR
contrasts, index, _ = block.get_contrasts(**ents)[0]
contrast_type_map = defaultdict(lambda: 'T')
contrast_type_map.update({contrast['name']: contrast['type']
for contrast in block.contrasts})
contrast_type_list = [contrast_type_map[contrast]
for contrast in contrasts.columns]
contrasts = contrasts.T
contrasts['type'] = contrast_type_list
contrasts_file = block_subdir / '{}_contrasts.h5'.format(ent_string)
contrasts_file.parent.mkdir(parents=True, exist_ok=True)
contrasts.to_hdf(contrasts_file, key='contrasts')
warning_file = block_subdir / '{}_warning.html'.format(ent_string)
with warning_file.open('w') as fobj:
if imputed:
fobj.write(IMPUTATION_SNIPPET.format(', '.join(imputed)))
entities.append(ents)
session_info.append(info)
contrast_indices.append(index.to_dict('records'))
contrast_info.append(str(contrasts_file))
warnings.append(str(warning_file))
self._results['session_info'] = session_info
self._results['warnings'] = warnings
self._results.setdefault('entities', []).append(entities)
self._results.setdefault('contrast_indices', []).append(contrast_indices)
self._results.setdefault('contrast_info', []).append(contrast_info)
def _load_higher_level(self, runtime, analysis):
cwd = Path(runtime.cwd)
for block in analysis.blocks[1:]:
block_subdir = cwd / block.level
block_subdir.mkdir(parents=True, exist_ok=True)
entities = []
contrast_indices = []
contrast_info = []
for contrasts, index, ents in block.get_contrasts():
if contrasts.empty:
continue
index['contrast'] = contrasts.index
contrasts.index = index.index
contrast_type_map = defaultdict(lambda: 'T')
contrast_type_map.update({contrast['name']: contrast['type']
for contrast in block.contrasts})
contrast_type_list = [contrast_type_map[contrast]
for contrast in contrasts.columns]
indices = index.to_dict('records')
out_ents = reduce(dict_intersection, indices)
out_ents.update(ents)
out_ents.pop('contrast', None)
ent_string = '_'.join('{}-{}'.format(key, val)
for key, val in out_ents.items())
contrasts = contrasts.T
contrasts['type'] = contrast_type_list
contrasts_file = block_subdir / '{}_contrasts.h5'.format(ent_string)
contrasts_file.parent.mkdir(parents=True, exist_ok=True)
contrasts.to_hdf(contrasts_file, key='contrasts')
entities.append(out_ents)
contrast_indices.append(indices)
contrast_info.append(str(contrasts_file))
self._results['entities'].append(entities)
self._results['contrast_info'].append(contrast_info)
self._results['contrast_indices'].append(contrast_indices)
class BIDSSelectInputSpec(BaseInterfaceInputSpec):
bids_dir = Directory(exists=True,
mandatory=True,
desc='BIDS dataset root directories')
preproc_dir = Directory(exists=True,
desc='Optional preprocessed files directory')
entities = InputMultiPath(traits.Dict(), mandatory=True)
selectors = traits.Dict(desc='Additional selectors to be applied',
usedefault=True)
class BIDSSelectOutputSpec(TraitedSpec):
bold_files = OutputMultiPath(File)
mask_files = OutputMultiPath(traits.Either(File, None))
entities = OutputMultiPath(traits.Dict)
class BIDSSelect(SimpleInterface):
input_spec = BIDSSelectInputSpec
output_spec = BIDSSelectOutputSpec
def _run_interface(self, runtime):
import bids
paths = [(self.inputs.bids_dir, 'bids')]
if isdefined(self.inputs.preproc_dir):
paths.append((self.inputs.preproc_dir, ['bids', 'derivatives']))
layout = bids.BIDSLayout(paths)
bold_files = []
mask_files = []
entities = []
for ents in self.inputs.entities:
selectors = {**self.inputs.selectors, **ents}
bold_file = layout.get(extensions=['.nii', '.nii.gz'], **selectors)
if len(bold_file) == 0:
raise FileNotFoundError(
"Could not find BOLD file in {} with entities {}"
"".format(self.inputs.bids_dir, selectors))
elif len(bold_file) > 1:
raise ValueError(
"Non-unique BOLD file in {} with entities {}.\n"
"Matches:\n\t{}"
"".format(self.inputs.bids_dir, selectors,
"\n\t".join(
'{} ({})'.format(
f.filename,
layout.files[f.filename].entities)
for f in bold_file)))
bold_ents = layout.parse_file_entities(
bold_file[0].filename)
bold_ents['type'] = 'brainmask'
mask_file = layout.get(extensions=['.nii', '.nii.gz'], **bold_ents)
bold_ents.pop('type')
bold_files.append(bold_file[0].filename)
mask_files.append(mask_file[0].filename if mask_file else None)
entities.append(bold_ents)
self._results['bold_files'] = bold_files
self._results['mask_files'] = mask_files
self._results['entities'] = entities
return runtime
def _copy_or_convert(in_file, out_file):
in_ext = bids_split_filename(in_file)[2]
out_ext = bids_split_filename(out_file)[2]
if in_ext == out_ext:
copyfile(in_file, out_file, copy=True, use_hardlink=True)
return
if in_ext == out_ext + '.gz' or in_ext + '.gz' == out_ext:
read_open = GzipFile if in_ext.endswith('.gz') else open
write_open = GzipFile if out_ext.endswith('.gz') else open
with read_open(in_file, mode='rb') as in_fobj:
with write_open(out_file, mode='wb') as out_fobj:
shutil.copyfileobj(in_fobj, out_fobj)
return
# Let nibabel take a shot
try:
nb.save(nb.load(in_file), out_file)
except Exception:
pass
else:
return
raise RuntimeError("Cannot convert {} to {}".format(in_ext, out_ext))
class BIDSDataSinkInputSpec(BaseInterfaceInputSpec):
base_directory = Directory(
mandatory=True,
desc='Path to BIDS (or derivatives) root directory')
in_file = InputMultiPath(File(exists=True), mandatory=True)
entities = InputMultiPath(traits.Dict, usedefault=True,
desc='Per-file entities to include in filename')
fixed_entities = traits.Dict(usedefault=True,
desc='Entities to include in all filenames')
path_patterns = InputMultiPath(
traits.Str, desc='BIDS path patterns describing format of file names')
class BIDSDataSinkOutputSpec(TraitedSpec):
out_file = OutputMultiPath(File, desc='output file')
class BIDSDataSink(IOBase):
input_spec = BIDSDataSinkInputSpec
output_spec = BIDSDataSinkOutputSpec
_always_run = True
def _list_outputs(self):
import bids
base_dir = self.inputs.base_directory
os.makedirs(base_dir, exist_ok=True)
layout = bids.BIDSLayout(base_dir)
path_patterns = self.inputs.path_patterns
if not isdefined(path_patterns):
path_patterns = None
out_files = []
for entities, in_file in zip(self.inputs.entities,
self.inputs.in_file):
ents = {**self.inputs.fixed_entities}
ents.update(entities)
ents = {k: snake_to_camel(str(v)) for k, v in ents.items()}
out_fname = os.path.join(
base_dir, layout.build_path(ents, path_patterns))
makedirs(os.path.dirname(out_fname), exist_ok=True)
_copy_or_convert(in_file, out_fname)
out_files.append(out_fname)
return {'out_file': out_files}
| true | true |
f732d44d2f7a146365d6b31adb45ce306427680e | 3,780 | py | Python | setup.py | wkerzendorf/wsynphot | 1770ebe0d44a729753f9fd2e535803fcf2a4ad33 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-06-25T17:39:08.000Z | 2022-02-11T08:41:06.000Z | setup.py | wkerzendorf/wsynphot | 1770ebe0d44a729753f9fd2e535803fcf2a4ad33 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 23 | 2019-02-26T22:31:56.000Z | 2022-01-04T21:27:28.000Z | setup.py | wkerzendorf/wsynphot | 1770ebe0d44a729753f9fd2e535803fcf2a4ad33 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 9 | 2018-10-18T19:02:40.000Z | 2021-01-28T08:42:58.000Z | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (
register_commands, adjust_compiler, get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
metadata = {str(k): str(v) for k, v in metadata.items()} #Making sure parsed data is in string not unicode
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.0.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
package_info['package_data'][PACKAGENAME].append('data/*/*')
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy'],
install_requires=['astropy'],
provides=[PACKAGENAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
**package_info
)
| 32.586207 | 106 | 0.742328 |
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (
register_commands, adjust_compiler, get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
metadata = {str(k): str(v) for k, v in metadata.items()}
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
VERSION = '0.0.dev'
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
package_info['package_data'][PACKAGENAME].append('data/*/*')
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy'],
install_requires=['astropy'],
provides=[PACKAGENAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
**package_info
)
| true | true |
f732d4512c69c0e652073c1835ee841350b02bfd | 8,873 | py | Python | rhyme.py | qkhy/poetry-seq2seq | 6fa4959ac489c5615008156cbf88817bba8d98be | [
"MIT"
] | 186 | 2017-09-05T06:48:14.000Z | 2022-02-26T15:25:44.000Z | rhyme.py | ZhanHaolan316/poetry-seq2seq | 6fa4959ac489c5615008156cbf88817bba8d98be | [
"MIT"
] | 2 | 2018-06-28T13:41:45.000Z | 2021-03-17T02:51:58.000Z | rhyme.py | ZhanHaolan316/poetry-seq2seq | 6fa4959ac489c5615008156cbf88817bba8d98be | [
"MIT"
] | 67 | 2017-11-02T08:45:45.000Z | 2021-09-27T05:38:18.000Z | #! /usr/bin/env python
#-*- coding:utf-8 -*-
from utils import *
import pypinyin
py_raw = os.path.join(DATA_RAW_DIR, 'pinyin.txt')
_rhy_path = os.path.join(DATA_PROCESSED_DIR, 'rhy_dict.json')
'''
Tonal and rhyming reference from:
https://baike.baidu.com/item/绝句律诗格律
'''
'''
类型一
⊙平平仄仄,⊙仄仄平平。(韵)⊙仄平平仄,平平仄仄平。(韵)
例诗: 山中 王勃
长江悲已滞,万里念将归。况属高秋晚,山中黄叶飞。
'''
five_char_type_a = {
'tone': [
'*ppzz',
'*zzpp',
'*zppz',
'ppzzp'
],
'rhyme': [1, 3]
}
'''
类型二
平平仄仄平,(韵)⊙仄仄平平。(韵)⊙仄⊙平仄,平平仄仄平。(韵)
例诗:壬辰元日试笔呈诸师友 陈忠远(即阿袁)
龙光绚九天,虎幄定三边。一守凤城道:“新年胜旧年!”
'''
five_char_type_b = {
'tone': [
'ppzzp',
'*zzpp',
'*z*pz',
'ppzzp'
],
'rhyme': [0, 1, 3]
}
'''
类型三
⊙仄平平仄,平平仄仄平。(韵)⊙平平仄仄,⊙仄仄平平。(韵)
例诗:南行别第 韦承庆
万里人南去,三春雁北飞。不知何岁月,得与尔同归。
'''
five_char_type_c = {
'tone': [
'*zppz',
'ppzzp',
'*ppzz',
'*zzpp'
],
'rhyme': [1, 3]
}
'''
类型四
⊙仄仄平平,(韵)平平仄仄平。(韵)⊙平平仄仄,⊙仄仄平平。(韵)
例诗: 塞下曲 卢纶
林暗草惊风,将军夜引弓。平明寻白羽,没在石棱中。
'''
five_char_type_d = {
'tone': [
'*zzpp',
'ppzzp',
'*ppzz',
'*zzpp'
],
'rhyme': [0, 1, 3]
}
five_char_tones = [
five_char_type_a,
five_char_type_b,
five_char_type_c,
five_char_type_d
]
'''
类型一
平起、首句不押韵
⊙平⊙仄平平仄, ⊙仄平平仄仄平。(韵) ⊙仄⊙平平仄仄, ⊙平⊙仄仄平平。(韵)
例诗:南游感兴 窦巩
伤心欲问前朝事, 惟见江流去不回。 日暮东风春草绿, 鹧鸪飞上越王台。
'''
seven_char_type_a = {
'tone': [
'*p*zppz',
'*zppzzp',
'*z*ppzz',
'*p*zzpp'
],
'rhyme': [1, 3]
}
'''
类型二
平起、首句押韵
⊙平⊙仄仄平平,(韵) ⊙仄平平仄仄平。(韵) ⊙仄⊙平平仄仄, ⊙平⊙仄仄平平。(韵)
例诗:出塞 王昌龄
秦时明月汉时关, 万里长征人未还。 但使龙城飞将在, 不教胡马度阴山。
'''
seven_char_type_b = {
'tone': [
'*p*zzpp',
'*zppzzp',
'*z*ppzz',
'*p*zzpp'
],
'rhyme': [0, 1, 3]
}
'''
类型三
仄起、首句不押韵
⊙仄⊙平平仄仄, ⊙平⊙仄仄平平。(韵) ⊙平⊙仄平平仄, ⊙仄平平仄仄平。(韵)
例诗:九月九日忆山东兄弟王维
独在异乡为异客, 每逢佳节倍思亲。 遥知兄弟登高处, 遍插茱萸少一人。
'''
seven_char_type_c = {
'tone': [
'*z*ppzz',
'*p*zzpp',
'*p*zppz',
'*zppzzp'
],
'rhyme': [1, 3]
}
'''
类型四
仄起、首句押韵
⊙仄平平仄仄平,(韵) ⊙平⊙仄仄平平。(韵) ⊙平⊙仄平平仄, ⊙仄平平仄仄平。(韵)
例诗:从军行 王昌龄
青海长云暗雪山, 孤城遥望玉门关。 黄沙百战穿金甲, 不破楼兰终不还!
'''
seven_char_type_d = {
'tone': [
'*zppzzp',
'*p*zzpp',
'*p*zppz',
'*zppzzp'
],
'rhyme': [0, 1, 3]
}
seven_char_tones = [
seven_char_type_a,
seven_char_type_b,
seven_char_type_c,
seven_char_type_d
]
tone_rules = {
5: five_char_tones,
7: seven_char_tones
}
class RhymeUtil:
def get_rhyme_category(self, vowel):
vowel = vowel.upper()
if vowel in ['A', 'IA', 'UA']:
return 1
elif vowel in ['O', 'E', 'UO']:
return 2
elif vowel in ['IE', 'VE']:
return 3
elif vowel in ['AI', 'UAI']:
return 4
elif vowel in ['EI', 'UI']:
return 5
elif vowel in ['AO', 'IAO']:
return 6
elif vowel in ['OU', 'IU']:
return 7
elif vowel in ['AN', 'IAN', 'UAN', 'VAN']:
return 8
elif vowel in ['EN', 'IN', 'UN', 'VN']:
return 9
elif vowel in ['ANG', 'IANG', 'UANG']:
return 10
elif vowel in ['ENG', 'ING']:
return 11
elif vowel in ['ONG', 'IONG']:
return 12
# elif (vowels == 'I' and not pinyin[0] in ['Z', 'C', 'S', 'R']) \
# or vowels == 'V':
# return 13
elif vowel == 'I':
return 14
elif vowel == 'U':
return 15
else:
return None
def has_char(self, ch):
"""
Args:
ch: A unicode character
Returns:
bool: Whether rhyming information exists for this character
"""
return True
def get_possible_tones(self, ch):
"""
Args:
ch: A unicode character
Returns:
[int]: A list of possible tones
"""
final_tones = pypinyin.pinyin(ch, style=pypinyin.FINALS_TONE3, heteronym=True, errors=u'default')[0] # select results for first and only char
tones = map(lambda final_tone: final_tone[-1], final_tones)
filtered_tones = filter(unicode.isdigit, tones)
tones_int = map(int, filtered_tones)
# deduplication
deduped_tones = []
for tone in tones_int:
if tone not in deduped_tones:
deduped_tones.append(tone)
return deduped_tones
def get_possible_vowels(self, ch):
"""
Args:
ch: A unicode character
Returns:
[str]: A list of possible vowels
"""
vowels = pypinyin.pinyin(ch, style=pypinyin.FINALS, heteronym=True, errors=u'default')[0] # select results for first and only char
return vowels
def get_possible_tone_types(self, ch):
"""
Args:
ch: A unicode character
Returns:
str: 'p' or 'z' or '*' representing possible tone types
"""
tones = self.get_possible_tones(ch)
pin_tones = {1, 2} & set(tones)
ze_tones = {3, 4} & set(tones)
if pin_tones and ze_tones:
return '*'
elif pin_tones:
return 'p'
elif ze_tones:
return 'z'
else:
raise Exception('No tones associated with the character')
def get_possible_rhyme_categories(self, ch):
"""
Args:
ch: A unicode character
Returns:
[int]: A list of possible rhyme categories
"""
vowels = self.get_possible_vowels(ch)
rhyme_categories = map(self.get_rhyme_category, vowels)
filtered_categories = filter(None, rhyme_categories)
return filtered_categories
def can_rhyme(self, ch_list):
"""
Args:
ch_list: A list of unicode characters
Returns:
bool: Whether if a list of unicode characters can rhyme
"""
rhyme_categories_list = [set(self.get_possible_rhyme_categories(ch)) for ch in ch_list]
common_categories = set.intersection(*rhyme_categories_list)
result = True if common_categories else False
return result
class RhymeEvaluator:
def __init__(self):
self.rhyme_util = RhymeUtil()
def score_tone(self, rule, sentences):
tone_rule = rule['tone']
score = 0.
max_score = float(len(sentences) * len(sentences[0]))
for line_index, line in enumerate(sentences):
for ch_index, ch in enumerate(line):
expected_tone_type = tone_rule[line_index][ch_index]
possible_tone_type = self.rhyme_util.get_possible_tone_types(ch)
tone_type_set = {expected_tone_type, possible_tone_type}
if '*' in tone_type_set or len(tone_type_set) == 1:
score += 1.
percentage_score = score / max_score
return percentage_score
def score_rhyme(self, rule, sentences):
rhyme_rule = rule['rhyme']
rhyme_chars = [sentences[line_number][-1] for line_number in rhyme_rule]
score = 1. if self.rhyme_util.can_rhyme(rhyme_chars) else 0.
return score
def score(self, rule, sentences, split=0.5, output_split=False):
tone_score = self.score_tone(rule, sentences)
rhyme_score = self.score_rhyme(rule, sentences)
tone_weight = split
rhyme_weight = 1. - split
combined_score = tone_score * tone_weight + rhyme_score * rhyme_weight
if output_split:
return combined_score, tone_score, rhyme_score
else:
return combined_score
def eval(self, sentences, output_all_scores=False, output_split=False):
"""
Args:
sentences: A list of unicode strings
Returns:
float: A score from 0 to 1
"""
# check 4 lines
if len(sentences) != 4:
return 0.
# check all lines are either 5 or 7 characters and same number of characters
sentence_lengths = set([len(sentence) for sentence in sentences])
sentence_length = list(sentence_lengths)[0]
if len(sentence_lengths) != 1 or sentence_length not in [5, 7]:
return 0.
rules = tone_rules[sentence_length]
scores = map(lambda rule: self.score(rule, sentences, output_split=output_split), rules)
if output_split:
max_combined = max([score[0] for score in scores])
max_tone = max([score[1] for score in scores])
max_rhyme = max([score[2] for score in scores])
max_score = (max_combined, max_tone, max_rhyme)
else:
max_score = max(scores)
if output_all_scores:
return max_score, scores
else:
return max_score
| 22.577608 | 149 | 0.548518 |
from utils import *
import pypinyin
py_raw = os.path.join(DATA_RAW_DIR, 'pinyin.txt')
_rhy_path = os.path.join(DATA_PROCESSED_DIR, 'rhy_dict.json')
five_char_type_a = {
'tone': [
'*ppzz',
'*zzpp',
'*zppz',
'ppzzp'
],
'rhyme': [1, 3]
}
five_char_type_b = {
'tone': [
'ppzzp',
'*zzpp',
'*z*pz',
'ppzzp'
],
'rhyme': [0, 1, 3]
}
five_char_type_c = {
'tone': [
'*zppz',
'ppzzp',
'*ppzz',
'*zzpp'
],
'rhyme': [1, 3]
}
five_char_type_d = {
'tone': [
'*zzpp',
'ppzzp',
'*ppzz',
'*zzpp'
],
'rhyme': [0, 1, 3]
}
five_char_tones = [
five_char_type_a,
five_char_type_b,
five_char_type_c,
five_char_type_d
]
seven_char_type_a = {
'tone': [
'*p*zppz',
'*zppzzp',
'*z*ppzz',
'*p*zzpp'
],
'rhyme': [1, 3]
}
seven_char_type_b = {
'tone': [
'*p*zzpp',
'*zppzzp',
'*z*ppzz',
'*p*zzpp'
],
'rhyme': [0, 1, 3]
}
seven_char_type_c = {
'tone': [
'*z*ppzz',
'*p*zzpp',
'*p*zppz',
'*zppzzp'
],
'rhyme': [1, 3]
}
seven_char_type_d = {
'tone': [
'*zppzzp',
'*p*zzpp',
'*p*zppz',
'*zppzzp'
],
'rhyme': [0, 1, 3]
}
seven_char_tones = [
seven_char_type_a,
seven_char_type_b,
seven_char_type_c,
seven_char_type_d
]
tone_rules = {
5: five_char_tones,
7: seven_char_tones
}
class RhymeUtil:
def get_rhyme_category(self, vowel):
vowel = vowel.upper()
if vowel in ['A', 'IA', 'UA']:
return 1
elif vowel in ['O', 'E', 'UO']:
return 2
elif vowel in ['IE', 'VE']:
return 3
elif vowel in ['AI', 'UAI']:
return 4
elif vowel in ['EI', 'UI']:
return 5
elif vowel in ['AO', 'IAO']:
return 6
elif vowel in ['OU', 'IU']:
return 7
elif vowel in ['AN', 'IAN', 'UAN', 'VAN']:
return 8
elif vowel in ['EN', 'IN', 'UN', 'VN']:
return 9
elif vowel in ['ANG', 'IANG', 'UANG']:
return 10
elif vowel in ['ENG', 'ING']:
return 11
elif vowel in ['ONG', 'IONG']:
return 12
elif vowel == 'I':
return 14
elif vowel == 'U':
return 15
else:
return None
def has_char(self, ch):
return True
def get_possible_tones(self, ch):
final_tones = pypinyin.pinyin(ch, style=pypinyin.FINALS_TONE3, heteronym=True, errors=u'default')[0]
tones = map(lambda final_tone: final_tone[-1], final_tones)
filtered_tones = filter(unicode.isdigit, tones)
tones_int = map(int, filtered_tones)
deduped_tones = []
for tone in tones_int:
if tone not in deduped_tones:
deduped_tones.append(tone)
return deduped_tones
def get_possible_vowels(self, ch):
vowels = pypinyin.pinyin(ch, style=pypinyin.FINALS, heteronym=True, errors=u'default')[0]
return vowels
def get_possible_tone_types(self, ch):
tones = self.get_possible_tones(ch)
pin_tones = {1, 2} & set(tones)
ze_tones = {3, 4} & set(tones)
if pin_tones and ze_tones:
return '*'
elif pin_tones:
return 'p'
elif ze_tones:
return 'z'
else:
raise Exception('No tones associated with the character')
def get_possible_rhyme_categories(self, ch):
vowels = self.get_possible_vowels(ch)
rhyme_categories = map(self.get_rhyme_category, vowels)
filtered_categories = filter(None, rhyme_categories)
return filtered_categories
def can_rhyme(self, ch_list):
rhyme_categories_list = [set(self.get_possible_rhyme_categories(ch)) for ch in ch_list]
common_categories = set.intersection(*rhyme_categories_list)
result = True if common_categories else False
return result
class RhymeEvaluator:
def __init__(self):
self.rhyme_util = RhymeUtil()
def score_tone(self, rule, sentences):
tone_rule = rule['tone']
score = 0.
max_score = float(len(sentences) * len(sentences[0]))
for line_index, line in enumerate(sentences):
for ch_index, ch in enumerate(line):
expected_tone_type = tone_rule[line_index][ch_index]
possible_tone_type = self.rhyme_util.get_possible_tone_types(ch)
tone_type_set = {expected_tone_type, possible_tone_type}
if '*' in tone_type_set or len(tone_type_set) == 1:
score += 1.
percentage_score = score / max_score
return percentage_score
def score_rhyme(self, rule, sentences):
rhyme_rule = rule['rhyme']
rhyme_chars = [sentences[line_number][-1] for line_number in rhyme_rule]
score = 1. if self.rhyme_util.can_rhyme(rhyme_chars) else 0.
return score
def score(self, rule, sentences, split=0.5, output_split=False):
tone_score = self.score_tone(rule, sentences)
rhyme_score = self.score_rhyme(rule, sentences)
tone_weight = split
rhyme_weight = 1. - split
combined_score = tone_score * tone_weight + rhyme_score * rhyme_weight
if output_split:
return combined_score, tone_score, rhyme_score
else:
return combined_score
def eval(self, sentences, output_all_scores=False, output_split=False):
if len(sentences) != 4:
return 0.
sentence_lengths = set([len(sentence) for sentence in sentences])
sentence_length = list(sentence_lengths)[0]
if len(sentence_lengths) != 1 or sentence_length not in [5, 7]:
return 0.
rules = tone_rules[sentence_length]
scores = map(lambda rule: self.score(rule, sentences, output_split=output_split), rules)
if output_split:
max_combined = max([score[0] for score in scores])
max_tone = max([score[1] for score in scores])
max_rhyme = max([score[2] for score in scores])
max_score = (max_combined, max_tone, max_rhyme)
else:
max_score = max(scores)
if output_all_scores:
return max_score, scores
else:
return max_score
| true | true |
f732d466d9a9cefc73ad200dd12017111ad59fb6 | 994 | py | Python | benchmarks/pcap_gen.py | Nic30/pclass-vectorized | 33bc92c66f717896fb48bd5c382729f8c76bc882 | [
"MIT"
] | 1 | 2020-07-14T17:24:33.000Z | 2020-07-14T17:24:33.000Z | benchmarks/pcap_gen.py | Nic30/pclass-vectorized | 33bc92c66f717896fb48bd5c382729f8c76bc882 | [
"MIT"
] | 14 | 2019-03-14T09:24:37.000Z | 2019-12-19T17:44:21.000Z | benchmarks/pcap_gen.py | Nic30/pclass-vectorized | 33bc92c66f717896fb48bd5c382729f8c76bc882 | [
"MIT"
] | null | null | null | from scapy.all import *
def basic_flows():
flow_numbers = [
#1,
#100,
#5000,
10000,
50000,
75000,
85000,
95000,
#100000
]
for f_n in flow_numbers:
pkts = []
rules = []
for i in range(f_n):
a, b, c = ((i >> 16) & 0xff, (i >> 8) & 0xff, i & 0xff)
src = f"2.{a}.{b}.{c}"
dst = f"1.{a}.{b}.{c}"
pkt = Ether(dst="FF:FF:FF:FF:FF:FF") / IP(dst=dst, src=src) / TCP(sport=1, dport=1) / "0000"
pkts.append(pkt)
r = f"in_port=1,ip,nw_dst={dst},nw_src={src},tcp,tp_src=1,tp_dst=1,actions=output:2"
rules.append(r)
wrpcap(f'test_data/flows_{f_n}.pcap', pkts)
with open(f"test_data/rules_{f_n}.txt", "w") as f:
for r in rules:
f.write(r + "\n")
print(f"done {f_n}")
def lpm_flows():
for i in range(1, 32 + 32 + 16 + 16 + 1):
pass
basic_flows() | 23.666667 | 104 | 0.454728 | from scapy.all import *
def basic_flows():
flow_numbers = [
10000,
50000,
75000,
85000,
95000,
]
for f_n in flow_numbers:
pkts = []
rules = []
for i in range(f_n):
a, b, c = ((i >> 16) & 0xff, (i >> 8) & 0xff, i & 0xff)
src = f"2.{a}.{b}.{c}"
dst = f"1.{a}.{b}.{c}"
pkt = Ether(dst="FF:FF:FF:FF:FF:FF") / IP(dst=dst, src=src) / TCP(sport=1, dport=1) / "0000"
pkts.append(pkt)
r = f"in_port=1,ip,nw_dst={dst},nw_src={src},tcp,tp_src=1,tp_dst=1,actions=output:2"
rules.append(r)
wrpcap(f'test_data/flows_{f_n}.pcap', pkts)
with open(f"test_data/rules_{f_n}.txt", "w") as f:
for r in rules:
f.write(r + "\n")
print(f"done {f_n}")
def lpm_flows():
for i in range(1, 32 + 32 + 16 + 16 + 1):
pass
basic_flows() | true | true |
f732d525d6d359c0b4bcf626a94760f71d7e2b2a | 5,458 | py | Python | deephyper/search/nas/model/space/keras_search_space.py | jtchilders/deephyper | 06f9653599757a69fa5720820f4de3a1f154b081 | [
"BSD-3-Clause"
] | null | null | null | deephyper/search/nas/model/space/keras_search_space.py | jtchilders/deephyper | 06f9653599757a69fa5720820f4de3a1f154b081 | [
"BSD-3-Clause"
] | null | null | null | deephyper/search/nas/model/space/keras_search_space.py | jtchilders/deephyper | 06f9653599757a69fa5720820f4de3a1f154b081 | [
"BSD-3-Clause"
] | null | null | null | from collections.abc import Iterable
from functools import reduce
import networkx as nx
from tensorflow import keras
from tensorflow.python.keras.utils.vis_utils import model_to_dot
from deephyper.core.exceptions.nas.space import (InputShapeOfWrongType,
NodeAlreadyAdded,
StructureHasACycle,
WrongOutputShape,
WrongSequenceToSetOperations)
from deephyper.search.nas.model.space import NxSearchSpace
from deephyper.search.nas.model.space.node import (ConstantNode, Node,
VariableNode)
from deephyper.search.nas.model.space.op.basic import Tensor
from deephyper.search.nas.model.space.op.merge import Concatenate
from deephyper.search.nas.model.space.op.op1d import Identity
class KSearchSpace(NxSearchSpace):
"""A KSearchSpace represents a search space of neural networks.
>>> from tensorflow.keras.utils import plot_model
>>> from deephyper.search.nas.model.space import KSearchSpace
>>> from deephyper.search.nas.model.space.node import VariableNode, ConstantNode
>>> from deephyper.search.nas.model.space.op.op1d import Dense
>>> struct = KSearchSpace((5, ), (1, ))
>>> vnode = VariableNode()
>>> struct.connect(struct.input_nodes[0], vnode)
>>> vnode.add_op(Dense(10))
>>> vnode.add_op(Dense(20))
>>> output_node = ConstantNode(op=Dense(1))
>>> struct.connect(vnode, output_node)
>>> struct.set_ops([0])
>>> model = struct.create_model()
Args:
input_shape (list(tuple(int))): list of shapes of all inputs.
output_shape (tuple(int)): shape of output.
Raises:
InputShapeOfWrongType: [description]
"""
def __init__(self, input_shape, output_shape, *args, **kwargs):
super().__init__()
if type(input_shape) is tuple:
# we have only one input tensor here
op = Tensor(keras.layers.Input(input_shape, name="input_0"))
self.input_nodes = [ConstantNode(op=op, name='Input_0')]
elif type(input_shape) is list and all(map(lambda x: type(x) is tuple, input_shape)):
# we have a list of input tensors here
self.input_nodes = list()
for i in range(len(input_shape)):
op = Tensor(keras.layers.Input(
input_shape[i], name=f"input_{i}"))
inode = ConstantNode(op=op, name=f'Input_{i}')
self.input_nodes.append(inode)
else:
raise InputShapeOfWrongType(input_shape)
for node in self.input_nodes:
self.graph.add_node(node)
self.output_shape = output_shape
self.output_node = None
self._model = None
@property
def depth(self):
if self._model is None:
raise RuntimeError(
"Can't compute depth of model without creating a model.")
return len(self.longest_path)
@property
def longest_path(self):
if self._model is None:
raise RuntimeError(
"Can't compute longest path of model without creating a model.")
nx_graph = nx.drawing.nx_pydot.from_pydot(model_to_dot(self._model))
return nx.algorithms.dag.dag_longest_path(nx_graph)
def set_ops(self, indexes):
"""Set the operations for each node of each cell of the search_space.
Args:
indexes (list): element of list can be float in [0, 1] or int.
Raises:
WrongSequenceToSetOperations: raised when 'indexes' is of a wrong length.
"""
if len(indexes) != len(list(self.variable_nodes)):
raise WrongSequenceToSetOperations(
indexes, list(self.variable_nodes))
for op_i, node in zip(indexes, self.variable_nodes):
node.set_op(op_i)
output_nodes = self.get_output_nodes()
self.output_node = self.set_output_node(self.graph, output_nodes)
def set_output_node(self, graph, output_nodes):
"""Set the output node of the search_space.
Args:
graph (nx.DiGraph): graph of the search_space.
output_nodes (Node): nodes of the current search_space without successors.
Returns:
Node: output node of the search_space.
"""
if len(output_nodes) == 1:
node = ConstantNode(op=Identity(), name='Structure_Output')
graph.add_node(node)
graph.add_edge(output_nodes[0], node)
else:
node = ConstantNode(name='Structure_Output')
op = Concatenate(self, output_nodes)
node.set_op(op=op)
return node
def create_model(self):
"""Create the tensors corresponding to the search_space.
Returns:
A keras.Model for the current search_space with the corresponding set of operations.
"""
output_tensor = self.create_tensor_aux(self.graph, self.output_node)
if output_tensor.get_shape()[1:] != self.output_shape:
raise WrongOutputShape(output_tensor, self.output_shape)
input_tensors = [inode._tensor for inode in self.input_nodes]
self._model = keras.Model(inputs=input_tensors, outputs=output_tensor)
return keras.Model(inputs=input_tensors, outputs=output_tensor)
| 37.383562 | 96 | 0.62697 | from collections.abc import Iterable
from functools import reduce
import networkx as nx
from tensorflow import keras
from tensorflow.python.keras.utils.vis_utils import model_to_dot
from deephyper.core.exceptions.nas.space import (InputShapeOfWrongType,
NodeAlreadyAdded,
StructureHasACycle,
WrongOutputShape,
WrongSequenceToSetOperations)
from deephyper.search.nas.model.space import NxSearchSpace
from deephyper.search.nas.model.space.node import (ConstantNode, Node,
VariableNode)
from deephyper.search.nas.model.space.op.basic import Tensor
from deephyper.search.nas.model.space.op.merge import Concatenate
from deephyper.search.nas.model.space.op.op1d import Identity
class KSearchSpace(NxSearchSpace):
def __init__(self, input_shape, output_shape, *args, **kwargs):
super().__init__()
if type(input_shape) is tuple:
op = Tensor(keras.layers.Input(input_shape, name="input_0"))
self.input_nodes = [ConstantNode(op=op, name='Input_0')]
elif type(input_shape) is list and all(map(lambda x: type(x) is tuple, input_shape)):
self.input_nodes = list()
for i in range(len(input_shape)):
op = Tensor(keras.layers.Input(
input_shape[i], name=f"input_{i}"))
inode = ConstantNode(op=op, name=f'Input_{i}')
self.input_nodes.append(inode)
else:
raise InputShapeOfWrongType(input_shape)
for node in self.input_nodes:
self.graph.add_node(node)
self.output_shape = output_shape
self.output_node = None
self._model = None
@property
def depth(self):
if self._model is None:
raise RuntimeError(
"Can't compute depth of model without creating a model.")
return len(self.longest_path)
@property
def longest_path(self):
if self._model is None:
raise RuntimeError(
"Can't compute longest path of model without creating a model.")
nx_graph = nx.drawing.nx_pydot.from_pydot(model_to_dot(self._model))
return nx.algorithms.dag.dag_longest_path(nx_graph)
def set_ops(self, indexes):
if len(indexes) != len(list(self.variable_nodes)):
raise WrongSequenceToSetOperations(
indexes, list(self.variable_nodes))
for op_i, node in zip(indexes, self.variable_nodes):
node.set_op(op_i)
output_nodes = self.get_output_nodes()
self.output_node = self.set_output_node(self.graph, output_nodes)
def set_output_node(self, graph, output_nodes):
if len(output_nodes) == 1:
node = ConstantNode(op=Identity(), name='Structure_Output')
graph.add_node(node)
graph.add_edge(output_nodes[0], node)
else:
node = ConstantNode(name='Structure_Output')
op = Concatenate(self, output_nodes)
node.set_op(op=op)
return node
def create_model(self):
output_tensor = self.create_tensor_aux(self.graph, self.output_node)
if output_tensor.get_shape()[1:] != self.output_shape:
raise WrongOutputShape(output_tensor, self.output_shape)
input_tensors = [inode._tensor for inode in self.input_nodes]
self._model = keras.Model(inputs=input_tensors, outputs=output_tensor)
return keras.Model(inputs=input_tensors, outputs=output_tensor)
| true | true |
f732d581aecc523acc849434530948fe5a2db09a | 622 | py | Python | config.py | YannickBezes/android_server | 08cd8de5d59e92c98ae476935f324a56e88216dc | [
"MIT"
] | null | null | null | config.py | YannickBezes/android_server | 08cd8de5d59e92c98ae476935f324a56e88216dc | [
"MIT"
] | null | null | null | config.py | YannickBezes/android_server | 08cd8de5d59e92c98ae476935f324a56e88216dc | [
"MIT"
] | null | null | null | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# Get base directory
base_dir = os.path.abspath(os.path.dirname(__file__))
base_url = '' # Base url
app = Flask(__name__)
# CONFIG
app.config['SECRET_KEY'] = '$tfx37h5kqv*!$4hMfHAvrfEZQFyz0e4r6$49$t3-i0(uN1uwSBQKh!y%6HVnw4n'
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:////" + os.path.join(base_dir, "database/data.db")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
OPENWEATHER_API_KEY = "01d414111208781957ed74b5cd09289c"
NEWS_API_KEY = "71487f82-d68e-44f2-b018-2a71aca2188e"
# Create the SqlAlchemy db instance
db = SQLAlchemy(app) | 32.736842 | 98 | 0.779743 | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
base_dir = os.path.abspath(os.path.dirname(__file__))
base_url = ''
app = Flask(__name__)
app.config['SECRET_KEY'] = '$tfx37h5kqv*!$4hMfHAvrfEZQFyz0e4r6$49$t3-i0(uN1uwSBQKh!y%6HVnw4n'
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:////" + os.path.join(base_dir, "database/data.db")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
OPENWEATHER_API_KEY = "01d414111208781957ed74b5cd09289c"
NEWS_API_KEY = "71487f82-d68e-44f2-b018-2a71aca2188e"
db = SQLAlchemy(app) | true | true |
f732d7d713d9fbb434e3c5215b1dcb388f829866 | 6,960 | py | Python | display3d/msic.py | leon-liangwu/PillarsRNN | b6e7d64af4e2819098ae9a87a9dd676ee8288874 | [
"MIT"
] | 1 | 2019-07-30T08:09:24.000Z | 2019-07-30T08:09:24.000Z | display3d/msic.py | leon-liangwu/second.pytorch | b6e7d64af4e2819098ae9a87a9dd676ee8288874 | [
"MIT"
] | null | null | null | display3d/msic.py | leon-liangwu/second.pytorch | b6e7d64af4e2819098ae9a87a9dd676ee8288874 | [
"MIT"
] | null | null | null | from __future__ import division, print_function
import numpy as np
from shapely.geometry import Polygon
import cv2
from collections import defaultdict
from kitti import Calibration
def camera_to_lidar(points, r_rect, velo2cam):
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = np.dot(points, np.linalg.inv(np.dot(r_rect, velo2cam).T))
return lidar_points[..., :3]
def lidar_to_camera(points, r_rect, velo2cam):
points_shape = list(points.shape[:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
camera_points = np.dot(points, np.dot(r_rect, velo2cam).T)
return camera_points[..., :3]
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def cuboid_to_corners(cuboid):
(cls_id, x, y, z, w, l, h, theta) = cuboid
theta = (theta + np.pi / 2) # (theta + np.pi / 2)
cos_t = np.cos(theta)
sin_t = np.sin(theta)
centre_x = x
centre_y = y
rear_left_x = centre_x - l / 2 * cos_t - w / 2 * sin_t
rear_left_y = centre_y - l / 2 * sin_t + w / 2 * cos_t
rear_right_x = centre_x - l / 2 * cos_t + w / 2 * sin_t
rear_right_y = centre_y - l / 2 * sin_t - w / 2 * cos_t
front_right_x = centre_x + l / 2 * cos_t + w / 2 * sin_t
front_right_y = centre_y + l / 2 * sin_t - w / 2 * cos_t
front_left_x = centre_x + l / 2 * cos_t - w / 2 * sin_t
front_left_y = centre_y + l / 2 * sin_t + w / 2 * cos_t
corners = np.array([rear_left_x, rear_left_y, rear_right_x, rear_right_y,
front_right_x, front_right_y, front_left_x, front_left_y]).reshape((4, 2))
return corners
def get_corners_list(reg_list):
corners_list = []
for reg in reg_list:
(prob, w, l, h, centre_x, centre_y, z, theta) = reg
cos_t = np.cos(theta)
sin_t = np.sin(theta)
rear_left_x = centre_x - l / 2 * cos_t - w / 2 * sin_t
rear_left_y = centre_y - l / 2 * sin_t + w / 2 * cos_t
rear_right_x = centre_x - l / 2 * cos_t + w / 2 * sin_t
rear_right_y = centre_y - l / 2 * sin_t - w / 2 * cos_t
front_right_x = centre_x + l / 2 * cos_t + w / 2 * sin_t
front_right_y = centre_y + l / 2 * sin_t - w / 2 * cos_t
front_left_x = centre_x + l / 2 * cos_t - w / 2 * sin_t
front_left_y = centre_y + l / 2 * sin_t + w / 2 * cos_t
corners = np.array([rear_left_x, rear_left_y, rear_right_x, rear_right_y,
front_right_x, front_right_y, front_left_x, front_left_y]).reshape((4, 2))
corners_list.append(corners)
return corners_list
def roty(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def rotz(t):
''' Rotation about the z-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def get_corners_3d(reg_list):
corners_list = []
for reg in reg_list:
(prob, w, l, h, centre_x, centre_y, z, theta) = reg
R = rotz(-theta-np.pi/2)
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
z_corners = [0, 0, 0, 0, h, h, h, h]
# z_corners = [-h/2, -h/2, -h/2, -h/2, h/2, h/2, h/2, h/2]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
# print corners_3d.shape
corners_3d[0, :] = corners_3d[0, :] + centre_x
corners_3d[1, :] = corners_3d[1, :] + centre_y
corners_3d[2, :] = corners_3d[2, :] + z
corners_3d = corners_3d.transpose(1, 0)
corners_list.append(corners_3d)
corners_list = np.array(corners_list)
return corners_list
def decode_output_box3d(prediction, rpn_mode=False, anchors=None):
reg_list, cls_list = get_reg_list_rpn(prediction, anchors)
corners_3d = get_corners_3d(reg_list)
# corners_list = get_corners_list(reg_list)
return corners_3d, reg_list, cls_list
def get_det_info(prediction, bev_data, img_path, rpn_mode=False, anchors=None):
if not rpn_mode:
reg_list, cls_list = get_reg_list(prediction)
else:
reg_list, cls_list = get_reg_list_rpn(prediction, anchors)
calib_path = img_path.replace('velodyne', 'calib')
calib_path = calib_path.replace('.bin', '.txt')
calib = Calibration(calib_path)
reg_list[:, [5, 6, 4]] = calib.project_velo_to_rect(reg_list[:, 4:7])
reg_list[:, 5] *= -1
corners_list = get_corners_list(reg_list)
prob_list = []
for i in range(len(reg_list)):
prob_list.append(reg_list[i][0])
return corners_list, reg_list, prob_list, cls_list
def convert_format(boxes_array):
"""
:param array: an array of shape [# bboxs, 4, 2]
:return: a shapely.geometry.Polygon object
"""
polygons = [Polygon([(box[i, 0], box[i, 1]) for i in range(4)]) for box in boxes_array]
return np.array(polygons)
def compute_iou(box1, box2):
"""Calculates IoU of the given box with the array of the given boxes.
box: a polygon
boxes: a vector of polygons
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
iou = box1.intersection(box2).area / box1.union(box2).area
return iou
def merge_mini_batch(batch_list, _unused=False):
batch_size = len(batch_list)
example_merged = defaultdict(list)
for example in batch_list:
for k, v in example.items():
example_merged[k].append(v)
ret = {}
for key, elems in example_merged.items():
if key in ['pillar']:
print('pillar shape', elems[0].shape)
ret[key] = np.concatenate(elems, axis=0)
elif key == 'coords':
coors = []
for i, coor in enumerate(elems):
print('coor shape', coor.shape)
coor_pad = np.pad(
coor, ((0, 0), (1, 0)),
mode='constant',
constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
else:
ret[key] = np.stack(elems, axis=0)
return ret
| 32.372093 | 102 | 0.590086 | from __future__ import division, print_function
import numpy as np
from shapely.geometry import Polygon
import cv2
from collections import defaultdict
from kitti import Calibration
def camera_to_lidar(points, r_rect, velo2cam):
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = np.dot(points, np.linalg.inv(np.dot(r_rect, velo2cam).T))
return lidar_points[..., :3]
def lidar_to_camera(points, r_rect, velo2cam):
points_shape = list(points.shape[:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
camera_points = np.dot(points, np.dot(r_rect, velo2cam).T)
return camera_points[..., :3]
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def cuboid_to_corners(cuboid):
(cls_id, x, y, z, w, l, h, theta) = cuboid
theta = (theta + np.pi / 2)
cos_t = np.cos(theta)
sin_t = np.sin(theta)
centre_x = x
centre_y = y
rear_left_x = centre_x - l / 2 * cos_t - w / 2 * sin_t
rear_left_y = centre_y - l / 2 * sin_t + w / 2 * cos_t
rear_right_x = centre_x - l / 2 * cos_t + w / 2 * sin_t
rear_right_y = centre_y - l / 2 * sin_t - w / 2 * cos_t
front_right_x = centre_x + l / 2 * cos_t + w / 2 * sin_t
front_right_y = centre_y + l / 2 * sin_t - w / 2 * cos_t
front_left_x = centre_x + l / 2 * cos_t - w / 2 * sin_t
front_left_y = centre_y + l / 2 * sin_t + w / 2 * cos_t
corners = np.array([rear_left_x, rear_left_y, rear_right_x, rear_right_y,
front_right_x, front_right_y, front_left_x, front_left_y]).reshape((4, 2))
return corners
def get_corners_list(reg_list):
corners_list = []
for reg in reg_list:
(prob, w, l, h, centre_x, centre_y, z, theta) = reg
cos_t = np.cos(theta)
sin_t = np.sin(theta)
rear_left_x = centre_x - l / 2 * cos_t - w / 2 * sin_t
rear_left_y = centre_y - l / 2 * sin_t + w / 2 * cos_t
rear_right_x = centre_x - l / 2 * cos_t + w / 2 * sin_t
rear_right_y = centre_y - l / 2 * sin_t - w / 2 * cos_t
front_right_x = centre_x + l / 2 * cos_t + w / 2 * sin_t
front_right_y = centre_y + l / 2 * sin_t - w / 2 * cos_t
front_left_x = centre_x + l / 2 * cos_t - w / 2 * sin_t
front_left_y = centre_y + l / 2 * sin_t + w / 2 * cos_t
corners = np.array([rear_left_x, rear_left_y, rear_right_x, rear_right_y,
front_right_x, front_right_y, front_left_x, front_left_y]).reshape((4, 2))
corners_list.append(corners)
return corners_list
def roty(t):
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def rotz(t):
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def get_corners_3d(reg_list):
corners_list = []
for reg in reg_list:
(prob, w, l, h, centre_x, centre_y, z, theta) = reg
R = rotz(-theta-np.pi/2)
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
z_corners = [0, 0, 0, 0, h, h, h, h]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + centre_x
corners_3d[1, :] = corners_3d[1, :] + centre_y
corners_3d[2, :] = corners_3d[2, :] + z
corners_3d = corners_3d.transpose(1, 0)
corners_list.append(corners_3d)
corners_list = np.array(corners_list)
return corners_list
def decode_output_box3d(prediction, rpn_mode=False, anchors=None):
reg_list, cls_list = get_reg_list_rpn(prediction, anchors)
corners_3d = get_corners_3d(reg_list)
return corners_3d, reg_list, cls_list
def get_det_info(prediction, bev_data, img_path, rpn_mode=False, anchors=None):
if not rpn_mode:
reg_list, cls_list = get_reg_list(prediction)
else:
reg_list, cls_list = get_reg_list_rpn(prediction, anchors)
calib_path = img_path.replace('velodyne', 'calib')
calib_path = calib_path.replace('.bin', '.txt')
calib = Calibration(calib_path)
reg_list[:, [5, 6, 4]] = calib.project_velo_to_rect(reg_list[:, 4:7])
reg_list[:, 5] *= -1
corners_list = get_corners_list(reg_list)
prob_list = []
for i in range(len(reg_list)):
prob_list.append(reg_list[i][0])
return corners_list, reg_list, prob_list, cls_list
def convert_format(boxes_array):
polygons = [Polygon([(box[i, 0], box[i, 1]) for i in range(4)]) for box in boxes_array]
return np.array(polygons)
def compute_iou(box1, box2):
iou = box1.intersection(box2).area / box1.union(box2).area
return iou
def merge_mini_batch(batch_list, _unused=False):
batch_size = len(batch_list)
example_merged = defaultdict(list)
for example in batch_list:
for k, v in example.items():
example_merged[k].append(v)
ret = {}
for key, elems in example_merged.items():
if key in ['pillar']:
print('pillar shape', elems[0].shape)
ret[key] = np.concatenate(elems, axis=0)
elif key == 'coords':
coors = []
for i, coor in enumerate(elems):
print('coor shape', coor.shape)
coor_pad = np.pad(
coor, ((0, 0), (1, 0)),
mode='constant',
constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
else:
ret[key] = np.stack(elems, axis=0)
return ret
| true | true |
f732d7f3a8fb5a1b8081d2dda04b1b24da73078f | 12,981 | py | Python | farm/file_utils.py | cregouby/FARM | 552bc07acffbce4f1f84d926c040fdd17b4ddeb3 | [
"Apache-2.0"
] | null | null | null | farm/file_utils.py | cregouby/FARM | 552bc07acffbce4f1f84d926c040fdd17b4ddeb3 | [
"Apache-2.0"
] | null | null | null | farm/file_utils.py | cregouby/FARM | 552bc07acffbce4f1f84d926c040fdd17b4ddeb3 | [
"Apache-2.0"
] | null | null | null | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import json
import logging
import os
import shutil
import sys
import tempfile
from functools import wraps
from hashlib import sha256
from io import open
import boto3
import numpy as np
import requests
from botocore.exceptions import ClientError
from dotmap import DotMap
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "farm")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
FARM_CACHE = Path(os.getenv("FARM_CACHE", default_cache_path))
except (AttributeError, ImportError):
FARM_CACHE = os.getenv("FARM_CACHE", default_cache_path)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None):
req = requests.get(url, stream=True, proxies=proxies)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode("utf-8")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(
output_string, "utf-8"
) # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def read_config(path, flattend=False):
if path:
with open(path) as json_data_file:
conf_args = json.load(json_data_file)
else:
raise ValueError("No config provided for classifier")
def getArgValue(arg):
if "value" not in arg:
logger.error(
"Only depth 2 config files supported. Failed to convert: %s" % str(arg)
)
return arg["value"] if (arg["value"] is not None) else arg["default"]
# flatten last part of config, take either value or default as value
for gk, gv in conf_args.items():
for k, v in gv.items():
if isinstance(getArgValue(v), dict):
logger.error("Config is too deeply nested, at %s" % str(v))
conf_args[gk][k] = getArgValue(v)
# DotMap for making nested dictionary accessible through dot notation
flat_args = dict(
conf_args["general"],
**conf_args["task"],
**conf_args["parameter"],
**conf_args["logging"],
)
if flattend:
args = DotMap(flat_args, _dynamic=False)
else:
args = DotMap(conf_args, _dynamic=False)
return args
def unnestConfig(config, flattened=False):
"""
This function creates a list of config files for evaluating parameters with different values. If a config parameter
is of type list this list is iterated over and a config object without lists is returned. Can handle lists inside any
number of parameters.
Can handle shallow or nested (one level) configs
"""
nestedKeys = []
nestedVals = []
if flattened:
for k, v in config.items():
if isinstance(v, list):
if k != "layer_dims": # exclude layer dims, since it is already a list
nestedKeys.append(k)
nestedVals.append(v)
else:
for gk, gv in config.items():
if(gk != "task"):
for k, v in gv.items():
if isinstance(v, list):
if isinstance(v, list):
if (
k != "layer_dims"
): # exclude layer dims, since it is already a list
nestedKeys.append([gk, k])
nestedVals.append(v)
elif isinstance(v, dict):
logger.error("Config too deep!")
if len(nestedKeys) == 0:
unnestedConfig = [config]
else:
if flattened:
logger.info("Nested config at parameters: %s" % (", ".join(nestedKeys)))
else:
logger.info(
"Nested config at parameters: %s"
% (", ".join(".".join(x) for x in nestedKeys))
)
unnestedConfig = []
mesh = np.meshgrid(
*nestedVals
) # get all combinations, each dimension corresponds to one parameter type
# flatten mesh into shape: [num_parameters, num_combinations] so we can iterate in 2d over any paramter combinations
mesh = [x.flatten() for x in mesh]
# loop over all combinations
for i in range(len(mesh[0])):
tempconfig = config.copy()
for j, k in enumerate(nestedKeys):
if isinstance(k, str):
tempconfig[k] = mesh[j][
i
] # get ith val of correct param value and overwrite original config
elif len(k) == 2:
tempconfig[k[0]][k[1]] = mesh[j][i] # set nested dictionary keys
else:
logger.error("Config too deep!")
unnestedConfig.append(tempconfig)
return unnestedConfig
| 33.716883 | 124 | 0.616748 | from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import json
import logging
import os
import shutil
import sys
import tempfile
from functools import wraps
from hashlib import sha256
from io import open
import boto3
import numpy as np
import requests
from botocore.exceptions import ClientError
from dotmap import DotMap
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "farm")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
FARM_CACHE = Path(os.getenv("FARM_CACHE", default_cache_path))
except (AttributeError, ImportError):
FARM_CACHE = os.getenv("FARM_CACHE", default_cache_path)
logger = logging.getLogger(__name__)
def url_to_filename(url, etag=None):
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None):
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif parsed.scheme == "":
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None):
req = requests.get(url, stream=True, proxies=proxies)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
if cache_dir is None:
cache_dir = FARM_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode("utf-8")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(
output_string, "utf-8"
) # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def read_config(path, flattend=False):
if path:
with open(path) as json_data_file:
conf_args = json.load(json_data_file)
else:
raise ValueError("No config provided for classifier")
def getArgValue(arg):
if "value" not in arg:
logger.error(
"Only depth 2 config files supported. Failed to convert: %s" % str(arg)
)
return arg["value"] if (arg["value"] is not None) else arg["default"]
# flatten last part of config, take either value or default as value
for gk, gv in conf_args.items():
for k, v in gv.items():
if isinstance(getArgValue(v), dict):
logger.error("Config is too deeply nested, at %s" % str(v))
conf_args[gk][k] = getArgValue(v)
# DotMap for making nested dictionary accessible through dot notation
flat_args = dict(
conf_args["general"],
**conf_args["task"],
**conf_args["parameter"],
**conf_args["logging"],
)
if flattend:
args = DotMap(flat_args, _dynamic=False)
else:
args = DotMap(conf_args, _dynamic=False)
return args
def unnestConfig(config, flattened=False):
nestedKeys = []
nestedVals = []
if flattened:
for k, v in config.items():
if isinstance(v, list):
if k != "layer_dims": # exclude layer dims, since it is already a list
nestedKeys.append(k)
nestedVals.append(v)
else:
for gk, gv in config.items():
if(gk != "task"):
for k, v in gv.items():
if isinstance(v, list):
if isinstance(v, list):
if (
k != "layer_dims"
): # exclude layer dims, since it is already a list
nestedKeys.append([gk, k])
nestedVals.append(v)
elif isinstance(v, dict):
logger.error("Config too deep!")
if len(nestedKeys) == 0:
unnestedConfig = [config]
else:
if flattened:
logger.info("Nested config at parameters: %s" % (", ".join(nestedKeys)))
else:
logger.info(
"Nested config at parameters: %s"
% (", ".join(".".join(x) for x in nestedKeys))
)
unnestedConfig = []
mesh = np.meshgrid(
*nestedVals
) # get all combinations, each dimension corresponds to one parameter type
# flatten mesh into shape: [num_parameters, num_combinations] so we can iterate in 2d over any paramter combinations
mesh = [x.flatten() for x in mesh]
# loop over all combinations
for i in range(len(mesh[0])):
tempconfig = config.copy()
for j, k in enumerate(nestedKeys):
if isinstance(k, str):
tempconfig[k] = mesh[j][
i
] # get ith val of correct param value and overwrite original config
elif len(k) == 2:
tempconfig[k[0]][k[1]] = mesh[j][i] # set nested dictionary keys
else:
logger.error("Config too deep!")
unnestedConfig.append(tempconfig)
return unnestedConfig
| true | true |
f732d8e088970f5fe7578bbc230db3fe4c52c08e | 34,023 | py | Python | swift/proxy/controllers/oio/obj.py | open-io/swift | 267940e6d581ab689c575b4dfaa422eed93bec49 | [
"Apache-2.0"
] | 1 | 2021-09-30T14:00:22.000Z | 2021-09-30T14:00:22.000Z | swift/proxy/controllers/oio/obj.py | open-io/swift | 267940e6d581ab689c575b4dfaa422eed93bec49 | [
"Apache-2.0"
] | 2 | 2020-10-09T13:20:33.000Z | 2020-10-28T16:02:16.000Z | swift/proxy/controllers/oio/obj.py | open-io/swift | 267940e6d581ab689c575b4dfaa422eed93bec49 | [
"Apache-2.0"
] | 2 | 2020-09-21T14:24:56.000Z | 2020-10-01T10:08:46.000Z | # Copyright (c) 2010-2020 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mimetypes
import time
import math
from swift import gettext_ as _
from swift.common.utils import (
clean_content_type, config_true_value, Timestamp, public,
close_if_possible, closing_if_possible)
from swift.common.constraints import check_metadata, check_object_creation
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.middleware.versioned_writes.legacy \
import DELETE_MARKER_CONTENT_TYPE
from swift.common.oio_utils import check_if_none_match, \
handle_not_allowed, handle_oio_timeout, handle_service_busy, \
REQID_HEADER, BUCKET_NAME_PROP, MULTIUPLOAD_SUFFIX, \
obj_version_from_env
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPConflict, HTTPPreconditionFailed, HTTPRequestTimeout, \
HTTPUnprocessableEntity, HTTPClientDisconnect, HTTPCreated, \
HTTPNoContent, Response, HTTPInternalServerError, multi_range_iterator, \
HTTPServiceUnavailable, HTTPException
from swift.common.request_helpers import is_sys_or_user_meta, \
is_object_transient_sysmeta, resolve_etag_is_at_header
from swift.common.wsgi import make_subrequest
from swift.proxy.controllers.base import set_object_info_cache, \
delay_denial, cors_validation, get_object_info
from swift.proxy.controllers.obj import check_content_type
from swift.proxy.controllers.obj import BaseObjectController as \
BaseObjectController
from oio.common import exceptions
from oio.common.constants import FORCEVERSIONING_HEADER
from oio.common.http import ranges_from_http_header
from oio.common.storage_method import STORAGE_METHODS
from oio.api.object_storage import _sort_chunks
from oio.common.exceptions import SourceReadTimeout
BUCKET_NAME_HEADER = 'X-Object-Sysmeta-Oio-Bucket-Name'
SLO = 'x-static-large-object'
# FIXME(FVE): we do support versioning now
SUPPORT_VERSIONING = True
class ObjectControllerRouter(object):
def __getitem__(self, policy):
return ObjectController
class StreamRangeIterator(object):
"""
Data stream wrapper that handles range requests and deals with exceptions.
"""
def __init__(self, request, stream):
self.req = request
self._stream = stream
def app_iter_range(self, _start, _stop):
# This will be called when there is only one range,
# no need to check the number of bytes
return self.stream()
def _chunked_app_iter_range(self, start, stop):
# The stream generator give us one "chunk" per range,
# and as we are called once for each range, we must
# simulate end-of-stream by generating StopIteration
for dat in self.stream():
yield dat
raise StopIteration
def app_iter_ranges(self, ranges, content_type,
boundary, content_size,
*_args, **_kwargs):
for chunk in multi_range_iterator(
ranges, content_type, boundary, content_size,
self._chunked_app_iter_range):
yield chunk
def stream(self, *args, **kwargs):
"""
Get the wrapped data stream.
"""
try:
for dat in self._stream:
yield dat
except (exceptions.ServiceBusy, exceptions.ServiceUnavailable) as err:
# We cannot use the handle_service_busy() decorator
# because it returns the exception object instead of raising it.
headers = dict()
headers['Retry-After'] = '1'
raise HTTPServiceUnavailable(request=self.req, headers=headers,
body=str(err))
def __iter__(self):
return self.stream()
class ExpectedSizeReader(object):
"""Only accept as a valid EOF an exact number of bytes received."""
def __init__(self, source, expected):
self.source = source
self.expected = expected
self.consumed = 0
def read(self, *args, **kwargs):
rc = self.source.read(*args, **kwargs)
if len(rc) == 0:
if self.consumed != self.expected:
raise exceptions.SourceReadError("Truncated input")
else:
self.consumed = self.consumed + len(rc)
return rc
def readline(self, *args, **kwargs):
rc = self.source.readline(*args, **kwargs)
if len(rc) == 0:
if self.consumed != self.expected:
raise exceptions.SourceReadError("Truncated input")
else:
self.consumed = self.consumed + len(rc)
return rc
def close(self):
return close_if_possible(self.source)
class ObjectController(BaseObjectController):
allowed_headers = {'content-disposition', 'content-encoding',
'x-delete-at', 'x-object-manifest',
'x-static-large-object'}
@public
@cors_validation
@delay_denial
def HEAD(self, req):
"""Handle HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
@delay_denial
def GET(self, req):
"""Handle GET requests."""
return self.GETorHEAD(req)
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def GETorHEAD(self, req):
"""Handle HTTP GET or HEAD requests."""
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['read_acl']
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if req.method == 'HEAD':
resp = self.get_object_head_resp(req)
else:
resp = self.get_object_fetch_resp(req)
set_object_info_cache(self.app, req.environ, self.account_name,
self.container_name, self.object_name, resp)
if ';' in resp.headers.get('content-type', ''):
resp.content_type = clean_content_type(
resp.headers['content-type'])
return resp
# TODO(FVE): get rid of this
# This is not needed if we rely on swift's object versioning.
def enforce_versioning(self, req):
"""
Enforce the versioning mode of a container just before executing
an object operation. This is useful when the current object is not
stored in the "main" container but in a shard, where the versioning
mode may not have been set yet.
"""
if not SUPPORT_VERSIONING:
return None
# There is no reason to save several versions of segments:
# a new version of a multipart object manifest will point to a
# completely different set of segments, with another uploadId.
bucket_name = req.environ.get('s3api.bucket')
if not bucket_name \
or self.container_name == bucket_name \
or self.container_name.endswith(MULTIUPLOAD_SUFFIX):
return None
# We can't use _get_info_from_caches as it would use local worker cache
# first and an update of versioning mode may not be detected.
memcache = getattr(self.app, 'memcache', None) or \
req.environ.get('swift.cache')
if memcache is None:
return None
key = "/".join(("versioning", self.account_name, bucket_name))
val = memcache.get(key)
if val is not None:
if val != '':
req.headers[FORCEVERSIONING_HEADER] = val
return
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
try:
meta = self.app.storage.container_get_properties(
self.account_name, bucket_name, headers=oio_headers,
cache=oio_cache, perfdata=perfdata)
except exceptions.NoSuchContainer:
raise HTTPNotFound(request=req)
val = meta['system'].get('sys.m2.policy.version', '')
memcache.set(key, val)
if val:
req.headers[FORCEVERSIONING_HEADER] = val
def get_object_head_resp(self, req):
storage = self.app.storage
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
version = obj_version_from_env(req.environ)
force_master = False
while True:
try:
if self.app.check_state:
metadata, chunks = storage.object_locate(
self.account_name, self.container_name,
self.object_name, version=version,
headers=oio_headers, force_master=force_master,
cache=oio_cache, perfdata=perfdata)
else:
metadata = storage.object_get_properties(
self.account_name, self.container_name,
self.object_name, version=version,
headers=oio_headers, force_master=force_master,
cache=oio_cache, perfdata=perfdata)
break
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
if force_master or not \
self.container_name.endswith(MULTIUPLOAD_SUFFIX):
# Either the request failed with the master,
# or it is not an MPU
return HTTPNotFound(request=req)
# This part appears in the manifest, so it should be there.
# To be sure, we must go check the master
# in case of desynchronization.
force_master = True
if self.app.check_state:
storage_method = STORAGE_METHODS.load(metadata['chunk_method'])
# TODO(mbo): use new property of STORAGE_METHODS
min_chunks = storage_method.ec_nb_data if storage_method.ec else 1
chunks_by_pos = _sort_chunks(chunks, storage_method.ec)
for idx, entries in enumerate(chunks_by_pos.items()):
if idx != entries[0]:
return HTTPBadRequest(request=req)
nb_chunks_ok = 0
for entry in entries[1]:
try:
storage.blob_client.chunk_head(
entry['url'], headers=oio_headers)
nb_chunks_ok += 1
except exceptions.OioException:
pass
if nb_chunks_ok >= min_chunks:
break
else:
return HTTPBadRequest(request=req)
resp = self.make_object_response(req, metadata)
return resp
def get_object_fetch_resp(self, req):
storage = self.app.storage
if req.headers.get('Range'):
ranges = ranges_from_http_header(req.headers.get('Range'))
else:
ranges = None
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
force_master = False
while True:
try:
metadata, stream = storage.object_fetch(
self.account_name, self.container_name, self.object_name,
ranges=ranges, headers=oio_headers,
version=obj_version_from_env(req.environ),
force_master=force_master, cache=oio_cache,
perfdata=perfdata)
break
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
if force_master or not \
self.container_name.endswith(MULTIUPLOAD_SUFFIX):
# Either the request failed with the master,
# or it is not an MPU
return HTTPNotFound(request=req)
# This part appears in the manifest, so it should be there.
# To be sure, we must go check the master
# in case of desynchronization.
force_master = True
resp = self.make_object_response(req, metadata, stream)
return resp
def make_object_response(self, req, metadata, stream=None):
conditional_etag = resolve_etag_is_at_header(
req, metadata.get('properties'))
resp = Response(request=req, conditional_response=True,
conditional_etag=conditional_etag)
if config_true_value(metadata['deleted']):
resp.headers['Content-Type'] = DELETE_MARKER_CONTENT_TYPE
else:
resp.headers['Content-Type'] = metadata.get(
'mime_type', 'application/octet-stream')
properties = metadata.get('properties')
if properties:
for k, v in properties.items():
if is_sys_or_user_meta('object', k) or \
is_object_transient_sysmeta(k) or \
k.lower() in self.allowed_headers:
resp.headers[str(k)] = v
hash_ = metadata.get('hash')
if hash_ is not None:
hash_ = hash_.lower()
resp.headers['etag'] = hash_
resp.headers['x-object-sysmeta-version-id'] = metadata['version']
resp.last_modified = int(metadata['mtime'])
if stream:
# Whether we are bothered with ranges or not, we wrap the
# stream in order to handle exceptions.
resp.app_iter = StreamRangeIterator(req, stream)
length_ = metadata.get('length')
if length_ is not None:
length_ = int(length_)
resp.content_length = length_
resp.content_encoding = metadata.get('encoding')
resp.accept_ranges = 'bytes'
return resp
def load_object_metadata(self, headers):
"""
Load object metadata from response headers.
Also load some well-known headers like x-static-large-object.
"""
metadata = {
k.lower(): v for k, v in headers.items()
if is_sys_or_user_meta('object', k) or
is_object_transient_sysmeta(k)
}
for header_key in self.allowed_headers:
if header_key in headers:
headers_lower = header_key.lower()
metadata[headers_lower] = headers[header_key]
return metadata
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def POST(self, req):
"""HTTP POST request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['write_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
error_response = check_metadata(req, 'object')
if error_response:
return error_response
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
stgpol = self._stgpol_from_policy_index(policy_index)
headers = self._prepare_headers(req)
return self._post_object(req, headers, stgpol)
def _stgpol_from_policy_index(self, policy_index):
# TODO actually convert policy_index to oio stgpol
return 'SINGLE'
def _post_object(self, req, headers, stgpol):
# TODO do something with stgpol
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
try:
# Genuine Swift clears all properties on POST requests.
# But for convenience, keep them when the request originates
# from swift3.
clear = req.environ.get('swift.source') != 'S3'
self.app.storage.object_set_properties(
self.account_name, self.container_name, self.object_name,
metadata, clear=clear, headers=oio_headers,
version=obj_version_from_env(req.environ),
cache=oio_cache, perfdata=perfdata)
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
return HTTPNotFound(request=req)
resp = HTTPAccepted(request=req)
return resp
def _delete_slo_parts(self, req, manifest):
"""Delete parts of an obsolete SLO."""
# We cannot use bulk-delete here,
# because we are at the end of the pipeline, after 'bulk'.
for part in manifest:
path = '/'.join(('', 'v1', self.account_name)) + part['name']
try:
del_req = make_subrequest(req.environ, 'DELETE', path=path)
del_req.get_response(self.app)
except Exception as exc:
self.app.logger.warn('Failed to delete SLO part %s: %s',
path, exc)
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def PUT(self, req):
"""HTTP PUT request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
# is request authorized
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
self.enforce_versioning(req)
old_slo_manifest = None
old_slo_manifest_etag = None
# If versioning is disabled, we must check if the object exists.
# If it's a NEW SLO (we must check it is not the same manifest),
# we will have to delete the parts if the current
# operation is a success.
if (self.app.delete_slo_parts and
not config_true_value(container_info.get(
'sysmeta', {}).get('versions-enabled', False))):
try:
dest_info = get_object_info(req.environ, self.app)
if 'slo-size' in dest_info['sysmeta']:
manifest_env = req.environ.copy()
manifest_env['QUERY_STRING'] = 'multipart-manifest=get'
manifest_req = make_subrequest(manifest_env, 'GET')
manifest_resp = manifest_req.get_response(self.app)
old_slo_manifest = json.loads(manifest_resp.body)
old_slo_manifest_etag = dest_info.get('etag')
except Exception as exc:
self.app.logger.warn(('Failed to check existence of %s. If '
'overwriting a SLO, old parts may '
'remain. Error was: %s') %
(req.path, exc))
self._update_content_type(req)
req.ensure_x_timestamp()
# check constraints on object name and request headers
error_response = check_object_creation(req, self.object_name) or \
check_content_type(req)
if error_response:
return error_response
if req.headers.get('Oio-Copy-From'):
return self._link_object(req)
data_source = req.environ['wsgi.input']
if req.content_length:
data_source = ExpectedSizeReader(data_source, req.content_length)
headers = self._prepare_headers(req)
with closing_if_possible(data_source):
resp = self._store_object(req, data_source, headers)
if (resp.is_success and
old_slo_manifest and resp.etag != old_slo_manifest_etag):
self.app.logger.debug(
'Previous object %s was a different SLO, deleting parts',
req.path)
self._delete_slo_parts(req, old_slo_manifest)
return resp
def _prepare_headers(self, req):
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
headers = self.generate_request_headers(req, additional=req.headers)
return headers
def _get_auto_policy_from_size(self, content_length):
# The default storage policy has an offset of -1
# so should always be chosen
policy = None
for (name, offset) in self.app.oio_stgpol:
if offset > content_length:
break
policy = name
return policy
def _link_object(self, req):
_, container, obj = req.headers['Oio-Copy-From'].split('/', 2)
from_account = req.headers.get('X-Copy-From-Account',
self.account_name)
self.app.logger.info("Creating link from %s/%s/%s to %s/%s/%s",
# Existing
from_account, container, obj,
# New
self.account_name, self.container_name,
self.object_name)
storage = self.app.storage
if req.headers.get('Range'):
raise Exception("Fast Copy with Range is unsupported")
ranges = ranges_from_http_header(req.headers.get('Range'))
if len(ranges) != 1:
raise HTTPInternalServerError(
request=req, body="mutiple ranges unsupported")
ranges = ranges[0]
else:
ranges = None
headers = self._prepare_headers(req)
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# FIXME(FVE): use object_show, cache in req.environ
version = obj_version_from_env(req.environ)
props = storage.object_get_properties(from_account, container, obj,
headers=oio_headers,
version=version,
cache=oio_cache,
perfdata=perfdata)
if props['properties'].get(SLO, None):
raise Exception("Fast Copy with SLO is unsupported")
else:
if ranges:
raise HTTPInternalServerError(
request=req, body="no range supported with single object")
try:
# TODO check return code (values ?)
link_meta = storage.object_link(
from_account, container, obj,
self.account_name, self.container_name, self.object_name,
headers=oio_headers, properties=metadata,
properties_directive='REPLACE', target_version=version,
cache=oio_cache, perfdata=perfdata)
# TODO(FVE): this exception catching block has to be refactored
# TODO check which ones are ok or make non sense
except exceptions.Conflict:
raise HTTPConflict(request=req)
except exceptions.PreconditionFailed:
raise HTTPPreconditionFailed(request=req)
except exceptions.SourceReadError:
req.client_disconnect = True
self.app.logger.warning(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except exceptions.EtagMismatch:
return HTTPUnprocessableEntity(request=req)
except (exceptions.ServiceBusy, exceptions.OioTimeout,
exceptions.DeadlineReached):
raise
except (exceptions.NoSuchContainer, exceptions.NotFound):
raise HTTPNotFound(request=req)
except exceptions.ClientException as err:
# 481 = CODE_POLICY_NOT_SATISFIABLE
if err.status == 481:
raise exceptions.ServiceBusy()
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
except Exception:
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
resp = HTTPCreated(request=req, etag=link_meta['hash'])
return resp
def _get_footers(self, req):
"""
Get extra metadata that may be generated during upload by some
middlewares (e.g. checksum of cyphered data).
"""
footers = HeaderKeyDict()
footer_callback = req.environ.get(
'swift.callback.update_footers', lambda _footer: None)
footer_callback(footers)
return footers
def _object_create(self, account, container, **kwargs):
storage = self.app.storage
if hasattr(storage, 'object_create_ext'):
return storage.object_create_ext(account, container, **kwargs)
_chunks, _size, checksum = storage.object_create(account, container,
**kwargs)
return _chunks, _size, checksum, {}
def _store_object(self, req, data_source, headers):
kwargs = req.environ.get('oio.query', {})
content_type = req.headers.get('content-type', 'octet/stream')
policy = None
container_info = self.container_info(self.account_name,
self.container_name, req)
if 'X-Oio-Storage-Policy' in req.headers:
policy = req.headers.get('X-Oio-Storage-Policy')
if not self.app.POLICIES.get_by_name(policy):
raise HTTPBadRequest(
"invalid policy '%s', must be in %s" %
(policy, self.app.POLICIES.by_name.keys()))
else:
try:
policy_index = int(
req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy']))
except TypeError:
policy_index = 0
if policy_index != 0:
policy = self.app.POLICIES.get_by_index(policy_index).name
else:
content_length = int(req.headers.get('content-length', -1))
policy = self._get_auto_policy_from_size(content_length)
ct_props = {'properties': {}, 'system': {}}
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# only send headers if needed
if SUPPORT_VERSIONING and headers.get(FORCEVERSIONING_HEADER):
oio_headers[FORCEVERSIONING_HEADER] = \
headers.get(FORCEVERSIONING_HEADER)
if req.environ.get('oio.force-version'):
# In a case of MPU, it contains version of the UploadId
# to be able to include version-id of MPU in S3 reponse
kwargs['version'] = req.environ.get('oio.force-version')
bucket_name = req.environ.get('s3api.bucket')
if bucket_name:
# In case a shard is being created, save the name of the S3 bucket
# in a container property. This will be used when aggregating
# container statistics to make bucket statistics.
ct_props['system'][BUCKET_NAME_PROP] = bucket_name
try:
_chunks, _size, checksum, _meta = self._object_create(
self.account_name, self.container_name,
obj_name=self.object_name, file_or_path=data_source,
mime_type=content_type, policy=policy, headers=oio_headers,
etag=req.headers.get('etag', '').strip('"'),
properties=metadata, container_properties=ct_props,
properties_callback=(
lambda: self.load_object_metadata(self._get_footers(req))),
cache=oio_cache, perfdata=perfdata,
**kwargs)
except exceptions.Conflict:
raise HTTPConflict(request=req)
except exceptions.PreconditionFailed:
raise HTTPPreconditionFailed(request=req)
except SourceReadTimeout as err:
self.app.logger.warning(
_('ERROR Client read timeout (%s)'), err)
self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except exceptions.SourceReadError:
req.client_disconnect = True
self.app.logger.warning(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except exceptions.EtagMismatch:
return HTTPUnprocessableEntity(request=req)
except (exceptions.ServiceBusy, exceptions.OioTimeout,
exceptions.DeadlineReached):
raise
except exceptions.NoSuchContainer:
raise HTTPNotFound(request=req)
except exceptions.ClientException as err:
# 481 = CODE_POLICY_NOT_SATISFIABLE
if err.status == 481:
raise exceptions.ServiceBusy()
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
except HTTPException:
# This can happen when the data source raises an exception
raise
except Exception:
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
last_modified = int(_meta.get('mtime', math.ceil(time.time())))
# FIXME(FVE): if \x10 character in object name, decode version
# number and set it in the response headers, instead of the oio
# version number.
version_id = _meta.get('version', 'null')
resp = HTTPCreated(
request=req, etag=checksum,
last_modified=last_modified,
headers={
'x-object-sysmeta-version-id': version_id
})
return resp
def _update_content_type(self, req):
# Sometimes the 'content-type' header exists, but is set to None.
req.content_type_manually_set = True
detect_content_type = \
config_true_value(req.headers.get('x-detect-content-type'))
if detect_content_type or not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
if detect_content_type:
req.headers.pop('x-detect-content-type')
else:
req.content_type_manually_set = False
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
req.ensure_x_timestamp()
self.enforce_versioning(req)
return self._delete_object(req)
def _delete_object(self, req):
storage = self.app.storage
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# only send headers if needed
if SUPPORT_VERSIONING and req.headers.get(FORCEVERSIONING_HEADER):
oio_headers[FORCEVERSIONING_HEADER] = \
req.headers.get(FORCEVERSIONING_HEADER)
try:
storage.object_delete(
self.account_name, self.container_name, self.object_name,
version=obj_version_from_env(req.environ),
headers=oio_headers, cache=oio_cache, perfdata=perfdata)
except exceptions.NoSuchContainer:
return HTTPNotFound(request=req)
except exceptions.NoSuchObject:
# Swift doesn't consider this case as an error
pass
resp = HTTPNoContent(request=req)
return resp
| 41.390511 | 79 | 0.604473 |
import json
import mimetypes
import time
import math
from swift import gettext_ as _
from swift.common.utils import (
clean_content_type, config_true_value, Timestamp, public,
close_if_possible, closing_if_possible)
from swift.common.constraints import check_metadata, check_object_creation
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.middleware.versioned_writes.legacy \
import DELETE_MARKER_CONTENT_TYPE
from swift.common.oio_utils import check_if_none_match, \
handle_not_allowed, handle_oio_timeout, handle_service_busy, \
REQID_HEADER, BUCKET_NAME_PROP, MULTIUPLOAD_SUFFIX, \
obj_version_from_env
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPConflict, HTTPPreconditionFailed, HTTPRequestTimeout, \
HTTPUnprocessableEntity, HTTPClientDisconnect, HTTPCreated, \
HTTPNoContent, Response, HTTPInternalServerError, multi_range_iterator, \
HTTPServiceUnavailable, HTTPException
from swift.common.request_helpers import is_sys_or_user_meta, \
is_object_transient_sysmeta, resolve_etag_is_at_header
from swift.common.wsgi import make_subrequest
from swift.proxy.controllers.base import set_object_info_cache, \
delay_denial, cors_validation, get_object_info
from swift.proxy.controllers.obj import check_content_type
from swift.proxy.controllers.obj import BaseObjectController as \
BaseObjectController
from oio.common import exceptions
from oio.common.constants import FORCEVERSIONING_HEADER
from oio.common.http import ranges_from_http_header
from oio.common.storage_method import STORAGE_METHODS
from oio.api.object_storage import _sort_chunks
from oio.common.exceptions import SourceReadTimeout
BUCKET_NAME_HEADER = 'X-Object-Sysmeta-Oio-Bucket-Name'
SLO = 'x-static-large-object'
SUPPORT_VERSIONING = True
class ObjectControllerRouter(object):
def __getitem__(self, policy):
return ObjectController
class StreamRangeIterator(object):
def __init__(self, request, stream):
self.req = request
self._stream = stream
def app_iter_range(self, _start, _stop):
return self.stream()
def _chunked_app_iter_range(self, start, stop):
for dat in self.stream():
yield dat
raise StopIteration
def app_iter_ranges(self, ranges, content_type,
boundary, content_size,
*_args, **_kwargs):
for chunk in multi_range_iterator(
ranges, content_type, boundary, content_size,
self._chunked_app_iter_range):
yield chunk
def stream(self, *args, **kwargs):
try:
for dat in self._stream:
yield dat
except (exceptions.ServiceBusy, exceptions.ServiceUnavailable) as err:
headers = dict()
headers['Retry-After'] = '1'
raise HTTPServiceUnavailable(request=self.req, headers=headers,
body=str(err))
def __iter__(self):
return self.stream()
class ExpectedSizeReader(object):
def __init__(self, source, expected):
self.source = source
self.expected = expected
self.consumed = 0
def read(self, *args, **kwargs):
rc = self.source.read(*args, **kwargs)
if len(rc) == 0:
if self.consumed != self.expected:
raise exceptions.SourceReadError("Truncated input")
else:
self.consumed = self.consumed + len(rc)
return rc
def readline(self, *args, **kwargs):
rc = self.source.readline(*args, **kwargs)
if len(rc) == 0:
if self.consumed != self.expected:
raise exceptions.SourceReadError("Truncated input")
else:
self.consumed = self.consumed + len(rc)
return rc
def close(self):
return close_if_possible(self.source)
class ObjectController(BaseObjectController):
allowed_headers = {'content-disposition', 'content-encoding',
'x-delete-at', 'x-object-manifest',
'x-static-large-object'}
@public
@cors_validation
@delay_denial
def HEAD(self, req):
return self.GETorHEAD(req)
@public
@cors_validation
@delay_denial
def GET(self, req):
return self.GETorHEAD(req)
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def GETorHEAD(self, req):
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['read_acl']
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if req.method == 'HEAD':
resp = self.get_object_head_resp(req)
else:
resp = self.get_object_fetch_resp(req)
set_object_info_cache(self.app, req.environ, self.account_name,
self.container_name, self.object_name, resp)
if ';' in resp.headers.get('content-type', ''):
resp.content_type = clean_content_type(
resp.headers['content-type'])
return resp
def enforce_versioning(self, req):
if not SUPPORT_VERSIONING:
return None
# There is no reason to save several versions of segments:
# a new version of a multipart object manifest will point to a
# completely different set of segments, with another uploadId.
bucket_name = req.environ.get('s3api.bucket')
if not bucket_name \
or self.container_name == bucket_name \
or self.container_name.endswith(MULTIUPLOAD_SUFFIX):
return None
# We can't use _get_info_from_caches as it would use local worker cache
memcache = getattr(self.app, 'memcache', None) or \
req.environ.get('swift.cache')
if memcache is None:
return None
key = "/".join(("versioning", self.account_name, bucket_name))
val = memcache.get(key)
if val is not None:
if val != '':
req.headers[FORCEVERSIONING_HEADER] = val
return
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
try:
meta = self.app.storage.container_get_properties(
self.account_name, bucket_name, headers=oio_headers,
cache=oio_cache, perfdata=perfdata)
except exceptions.NoSuchContainer:
raise HTTPNotFound(request=req)
val = meta['system'].get('sys.m2.policy.version', '')
memcache.set(key, val)
if val:
req.headers[FORCEVERSIONING_HEADER] = val
def get_object_head_resp(self, req):
storage = self.app.storage
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
version = obj_version_from_env(req.environ)
force_master = False
while True:
try:
if self.app.check_state:
metadata, chunks = storage.object_locate(
self.account_name, self.container_name,
self.object_name, version=version,
headers=oio_headers, force_master=force_master,
cache=oio_cache, perfdata=perfdata)
else:
metadata = storage.object_get_properties(
self.account_name, self.container_name,
self.object_name, version=version,
headers=oio_headers, force_master=force_master,
cache=oio_cache, perfdata=perfdata)
break
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
if force_master or not \
self.container_name.endswith(MULTIUPLOAD_SUFFIX):
return HTTPNotFound(request=req)
force_master = True
if self.app.check_state:
storage_method = STORAGE_METHODS.load(metadata['chunk_method'])
min_chunks = storage_method.ec_nb_data if storage_method.ec else 1
chunks_by_pos = _sort_chunks(chunks, storage_method.ec)
for idx, entries in enumerate(chunks_by_pos.items()):
if idx != entries[0]:
return HTTPBadRequest(request=req)
nb_chunks_ok = 0
for entry in entries[1]:
try:
storage.blob_client.chunk_head(
entry['url'], headers=oio_headers)
nb_chunks_ok += 1
except exceptions.OioException:
pass
if nb_chunks_ok >= min_chunks:
break
else:
return HTTPBadRequest(request=req)
resp = self.make_object_response(req, metadata)
return resp
def get_object_fetch_resp(self, req):
storage = self.app.storage
if req.headers.get('Range'):
ranges = ranges_from_http_header(req.headers.get('Range'))
else:
ranges = None
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
force_master = False
while True:
try:
metadata, stream = storage.object_fetch(
self.account_name, self.container_name, self.object_name,
ranges=ranges, headers=oio_headers,
version=obj_version_from_env(req.environ),
force_master=force_master, cache=oio_cache,
perfdata=perfdata)
break
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
if force_master or not \
self.container_name.endswith(MULTIUPLOAD_SUFFIX):
return HTTPNotFound(request=req)
force_master = True
resp = self.make_object_response(req, metadata, stream)
return resp
def make_object_response(self, req, metadata, stream=None):
conditional_etag = resolve_etag_is_at_header(
req, metadata.get('properties'))
resp = Response(request=req, conditional_response=True,
conditional_etag=conditional_etag)
if config_true_value(metadata['deleted']):
resp.headers['Content-Type'] = DELETE_MARKER_CONTENT_TYPE
else:
resp.headers['Content-Type'] = metadata.get(
'mime_type', 'application/octet-stream')
properties = metadata.get('properties')
if properties:
for k, v in properties.items():
if is_sys_or_user_meta('object', k) or \
is_object_transient_sysmeta(k) or \
k.lower() in self.allowed_headers:
resp.headers[str(k)] = v
hash_ = metadata.get('hash')
if hash_ is not None:
hash_ = hash_.lower()
resp.headers['etag'] = hash_
resp.headers['x-object-sysmeta-version-id'] = metadata['version']
resp.last_modified = int(metadata['mtime'])
if stream:
resp.app_iter = StreamRangeIterator(req, stream)
length_ = metadata.get('length')
if length_ is not None:
length_ = int(length_)
resp.content_length = length_
resp.content_encoding = metadata.get('encoding')
resp.accept_ranges = 'bytes'
return resp
def load_object_metadata(self, headers):
metadata = {
k.lower(): v for k, v in headers.items()
if is_sys_or_user_meta('object', k) or
is_object_transient_sysmeta(k)
}
for header_key in self.allowed_headers:
if header_key in headers:
headers_lower = header_key.lower()
metadata[headers_lower] = headers[header_key]
return metadata
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def POST(self, req):
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['write_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
error_response = check_metadata(req, 'object')
if error_response:
return error_response
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
stgpol = self._stgpol_from_policy_index(policy_index)
headers = self._prepare_headers(req)
return self._post_object(req, headers, stgpol)
def _stgpol_from_policy_index(self, policy_index):
return 'SINGLE'
def _post_object(self, req, headers, stgpol):
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
try:
clear = req.environ.get('swift.source') != 'S3'
self.app.storage.object_set_properties(
self.account_name, self.container_name, self.object_name,
metadata, clear=clear, headers=oio_headers,
version=obj_version_from_env(req.environ),
cache=oio_cache, perfdata=perfdata)
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
return HTTPNotFound(request=req)
resp = HTTPAccepted(request=req)
return resp
def _delete_slo_parts(self, req, manifest):
for part in manifest:
path = '/'.join(('', 'v1', self.account_name)) + part['name']
try:
del_req = make_subrequest(req.environ, 'DELETE', path=path)
del_req.get_response(self.app)
except Exception as exc:
self.app.logger.warn('Failed to delete SLO part %s: %s',
path, exc)
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def PUT(self, req):
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
self.enforce_versioning(req)
old_slo_manifest = None
old_slo_manifest_etag = None
# we will have to delete the parts if the current
# operation is a success.
if (self.app.delete_slo_parts and
not config_true_value(container_info.get(
'sysmeta', {}).get('versions-enabled', False))):
try:
dest_info = get_object_info(req.environ, self.app)
if 'slo-size' in dest_info['sysmeta']:
manifest_env = req.environ.copy()
manifest_env['QUERY_STRING'] = 'multipart-manifest=get'
manifest_req = make_subrequest(manifest_env, 'GET')
manifest_resp = manifest_req.get_response(self.app)
old_slo_manifest = json.loads(manifest_resp.body)
old_slo_manifest_etag = dest_info.get('etag')
except Exception as exc:
self.app.logger.warn(('Failed to check existence of %s. If '
'overwriting a SLO, old parts may '
'remain. Error was: %s') %
(req.path, exc))
self._update_content_type(req)
req.ensure_x_timestamp()
# check constraints on object name and request headers
error_response = check_object_creation(req, self.object_name) or \
check_content_type(req)
if error_response:
return error_response
if req.headers.get('Oio-Copy-From'):
return self._link_object(req)
data_source = req.environ['wsgi.input']
if req.content_length:
data_source = ExpectedSizeReader(data_source, req.content_length)
headers = self._prepare_headers(req)
with closing_if_possible(data_source):
resp = self._store_object(req, data_source, headers)
if (resp.is_success and
old_slo_manifest and resp.etag != old_slo_manifest_etag):
self.app.logger.debug(
'Previous object %s was a different SLO, deleting parts',
req.path)
self._delete_slo_parts(req, old_slo_manifest)
return resp
def _prepare_headers(self, req):
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
headers = self.generate_request_headers(req, additional=req.headers)
return headers
def _get_auto_policy_from_size(self, content_length):
# The default storage policy has an offset of -1
# so should always be chosen
policy = None
for (name, offset) in self.app.oio_stgpol:
if offset > content_length:
break
policy = name
return policy
def _link_object(self, req):
_, container, obj = req.headers['Oio-Copy-From'].split('/', 2)
from_account = req.headers.get('X-Copy-From-Account',
self.account_name)
self.app.logger.info("Creating link from %s/%s/%s to %s/%s/%s",
# Existing
from_account, container, obj,
# New
self.account_name, self.container_name,
self.object_name)
storage = self.app.storage
if req.headers.get('Range'):
raise Exception("Fast Copy with Range is unsupported")
ranges = ranges_from_http_header(req.headers.get('Range'))
if len(ranges) != 1:
raise HTTPInternalServerError(
request=req, body="mutiple ranges unsupported")
ranges = ranges[0]
else:
ranges = None
headers = self._prepare_headers(req)
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# FIXME(FVE): use object_show, cache in req.environ
version = obj_version_from_env(req.environ)
props = storage.object_get_properties(from_account, container, obj,
headers=oio_headers,
version=version,
cache=oio_cache,
perfdata=perfdata)
if props['properties'].get(SLO, None):
raise Exception("Fast Copy with SLO is unsupported")
else:
if ranges:
raise HTTPInternalServerError(
request=req, body="no range supported with single object")
try:
# TODO check return code (values ?)
link_meta = storage.object_link(
from_account, container, obj,
self.account_name, self.container_name, self.object_name,
headers=oio_headers, properties=metadata,
properties_directive='REPLACE', target_version=version,
cache=oio_cache, perfdata=perfdata)
# TODO(FVE): this exception catching block has to be refactored
# TODO check which ones are ok or make non sense
except exceptions.Conflict:
raise HTTPConflict(request=req)
except exceptions.PreconditionFailed:
raise HTTPPreconditionFailed(request=req)
except exceptions.SourceReadError:
req.client_disconnect = True
self.app.logger.warning(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except exceptions.EtagMismatch:
return HTTPUnprocessableEntity(request=req)
except (exceptions.ServiceBusy, exceptions.OioTimeout,
exceptions.DeadlineReached):
raise
except (exceptions.NoSuchContainer, exceptions.NotFound):
raise HTTPNotFound(request=req)
except exceptions.ClientException as err:
# 481 = CODE_POLICY_NOT_SATISFIABLE
if err.status == 481:
raise exceptions.ServiceBusy()
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
except Exception:
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
resp = HTTPCreated(request=req, etag=link_meta['hash'])
return resp
def _get_footers(self, req):
footers = HeaderKeyDict()
footer_callback = req.environ.get(
'swift.callback.update_footers', lambda _footer: None)
footer_callback(footers)
return footers
def _object_create(self, account, container, **kwargs):
storage = self.app.storage
if hasattr(storage, 'object_create_ext'):
return storage.object_create_ext(account, container, **kwargs)
_chunks, _size, checksum = storage.object_create(account, container,
**kwargs)
return _chunks, _size, checksum, {}
def _store_object(self, req, data_source, headers):
kwargs = req.environ.get('oio.query', {})
content_type = req.headers.get('content-type', 'octet/stream')
policy = None
container_info = self.container_info(self.account_name,
self.container_name, req)
if 'X-Oio-Storage-Policy' in req.headers:
policy = req.headers.get('X-Oio-Storage-Policy')
if not self.app.POLICIES.get_by_name(policy):
raise HTTPBadRequest(
"invalid policy '%s', must be in %s" %
(policy, self.app.POLICIES.by_name.keys()))
else:
try:
policy_index = int(
req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy']))
except TypeError:
policy_index = 0
if policy_index != 0:
policy = self.app.POLICIES.get_by_index(policy_index).name
else:
content_length = int(req.headers.get('content-length', -1))
policy = self._get_auto_policy_from_size(content_length)
ct_props = {'properties': {}, 'system': {}}
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# only send headers if needed
if SUPPORT_VERSIONING and headers.get(FORCEVERSIONING_HEADER):
oio_headers[FORCEVERSIONING_HEADER] = \
headers.get(FORCEVERSIONING_HEADER)
if req.environ.get('oio.force-version'):
# In a case of MPU, it contains version of the UploadId
# to be able to include version-id of MPU in S3 reponse
kwargs['version'] = req.environ.get('oio.force-version')
bucket_name = req.environ.get('s3api.bucket')
if bucket_name:
# In case a shard is being created, save the name of the S3 bucket
# in a container property. This will be used when aggregating
# container statistics to make bucket statistics.
ct_props['system'][BUCKET_NAME_PROP] = bucket_name
try:
_chunks, _size, checksum, _meta = self._object_create(
self.account_name, self.container_name,
obj_name=self.object_name, file_or_path=data_source,
mime_type=content_type, policy=policy, headers=oio_headers,
etag=req.headers.get('etag', '').strip('"'),
properties=metadata, container_properties=ct_props,
properties_callback=(
lambda: self.load_object_metadata(self._get_footers(req))),
cache=oio_cache, perfdata=perfdata,
**kwargs)
except exceptions.Conflict:
raise HTTPConflict(request=req)
except exceptions.PreconditionFailed:
raise HTTPPreconditionFailed(request=req)
except SourceReadTimeout as err:
self.app.logger.warning(
_('ERROR Client read timeout (%s)'), err)
self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except exceptions.SourceReadError:
req.client_disconnect = True
self.app.logger.warning(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except exceptions.EtagMismatch:
return HTTPUnprocessableEntity(request=req)
except (exceptions.ServiceBusy, exceptions.OioTimeout,
exceptions.DeadlineReached):
raise
except exceptions.NoSuchContainer:
raise HTTPNotFound(request=req)
except exceptions.ClientException as err:
# 481 = CODE_POLICY_NOT_SATISFIABLE
if err.status == 481:
raise exceptions.ServiceBusy()
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
except HTTPException:
# This can happen when the data source raises an exception
raise
except Exception:
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
last_modified = int(_meta.get('mtime', math.ceil(time.time())))
# FIXME(FVE): if \x10 character in object name, decode version
# number and set it in the response headers, instead of the oio
# version number.
version_id = _meta.get('version', 'null')
resp = HTTPCreated(
request=req, etag=checksum,
last_modified=last_modified,
headers={
'x-object-sysmeta-version-id': version_id
})
return resp
def _update_content_type(self, req):
# Sometimes the 'content-type' header exists, but is set to None.
req.content_type_manually_set = True
detect_content_type = \
config_true_value(req.headers.get('x-detect-content-type'))
if detect_content_type or not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
if detect_content_type:
req.headers.pop('x-detect-content-type')
else:
req.content_type_manually_set = False
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
def DELETE(self, req):
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
req.ensure_x_timestamp()
self.enforce_versioning(req)
return self._delete_object(req)
def _delete_object(self, req):
storage = self.app.storage
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# only send headers if needed
if SUPPORT_VERSIONING and req.headers.get(FORCEVERSIONING_HEADER):
oio_headers[FORCEVERSIONING_HEADER] = \
req.headers.get(FORCEVERSIONING_HEADER)
try:
storage.object_delete(
self.account_name, self.container_name, self.object_name,
version=obj_version_from_env(req.environ),
headers=oio_headers, cache=oio_cache, perfdata=perfdata)
except exceptions.NoSuchContainer:
return HTTPNotFound(request=req)
except exceptions.NoSuchObject:
# Swift doesn't consider this case as an error
pass
resp = HTTPNoContent(request=req)
return resp
| true | true |
f732d8f8a85b16ceadc7a8193c88c59ad20ada7e | 4,204 | py | Python | airflow/operators/__init__.py | bertrand-caron/incubator-airflow | 56bae60c139036ab506af595bd44b31eb21967df | [
"Apache-2.0"
] | 1 | 2019-05-16T02:21:21.000Z | 2019-05-16T02:21:21.000Z | airflow/operators/__init__.py | bertrand-caron/incubator-airflow | 56bae60c139036ab506af595bd44b31eb21967df | [
"Apache-2.0"
] | 6 | 2018-02-10T20:25:16.000Z | 2019-11-20T03:01:03.000Z | airflow/operators/__init__.py | bertrand-caron/incubator-airflow | 56bae60c139036ab506af595bd44b31eb21967df | [
"Apache-2.0"
] | 1 | 2018-12-05T06:59:07.000Z | 2018-12-05T06:59:07.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
from airflow.models import BaseOperator # noqa: F401
# ------------------------------------------------------------------------
#
# #TODO #FIXME Airflow 2.0
#
# Old import machinary below.
#
# This is deprecated but should be kept until Airflow 2.0
# for compatibility.
#
# ------------------------------------------------------------------------
# Imports operators dynamically while keeping the package API clean,
# abstracting the underlying modules
_operators = {
'bash_operator': ['BashOperator'],
'check_operator': [
'CheckOperator',
'ValueCheckOperator',
'IntervalCheckOperator',
],
'python_operator': [
'PythonOperator',
'BranchPythonOperator',
'ShortCircuitOperator',
],
'hive_operator': ['HiveOperator'],
'pig_operator': ['PigOperator'],
'presto_check_operator': [
'PrestoCheckOperator',
'PrestoValueCheckOperator',
'PrestoIntervalCheckOperator',
],
'dagrun_operator': ['TriggerDagRunOperator'],
'dummy_operator': ['DummyOperator'],
'email_operator': ['EmailOperator'],
'hive_to_samba_operator': ['Hive2SambaOperator'],
'latest_only_operator': ['LatestOnlyOperator'],
'mysql_operator': ['MySqlOperator'],
'sqlite_operator': ['SqliteOperator'],
'mysql_to_hive': ['MySqlToHiveTransfer'],
'postgres_operator': ['PostgresOperator'],
'subdag_operator': ['SubDagOperator'],
'hive_stats_operator': ['HiveStatsCollectionOperator'],
's3_to_hive_operator': ['S3ToHiveTransfer'],
'hive_to_mysql': ['HiveToMySqlTransfer'],
'presto_to_mysql': ['PrestoToMySqlTransfer'],
's3_file_transform_operator': ['S3FileTransformOperator'],
'http_operator': ['SimpleHttpOperator'],
'hive_to_druid': ['HiveToDruidTransfer'],
'jdbc_operator': ['JdbcOperator'],
'mssql_operator': ['MsSqlOperator'],
'mssql_to_hive': ['MsSqlToHiveTransfer'],
'slack_operator': ['SlackAPIOperator', 'SlackAPIPostOperator'],
'generic_transfer': ['GenericTransfer'],
'oracle_operator': ['OracleOperator']
}
if not os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from airflow.utils.helpers import AirflowImporter
airflow_importer = AirflowImporter(sys.modules[__name__], _operators)
def _integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import operators_modules
for operators_module in operators_modules:
sys.modules[operators_module.__name__] = operators_module
globals()[operators_module._name] = operators_module
##########################################################
# TODO FIXME Remove in Airflow 2.0
if not os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from zope.deprecation import deprecated
for _operator in operators_module._objects:
operator_name = _operator.__name__
globals()[operator_name] = _operator
deprecated(
operator_name,
"Importing plugin operator '{i}' directly from "
"'airflow.operators' has been deprecated. Please "
"import from 'airflow.operators.[plugin_module]' "
"instead. Support for direct imports will be dropped "
"entirely in Airflow 2.0.".format(i=operator_name))
| 38.568807 | 74 | 0.653901 |
import sys
import os
from airflow.models import BaseOperator
tor': ['BashOperator'],
'check_operator': [
'CheckOperator',
'ValueCheckOperator',
'IntervalCheckOperator',
],
'python_operator': [
'PythonOperator',
'BranchPythonOperator',
'ShortCircuitOperator',
],
'hive_operator': ['HiveOperator'],
'pig_operator': ['PigOperator'],
'presto_check_operator': [
'PrestoCheckOperator',
'PrestoValueCheckOperator',
'PrestoIntervalCheckOperator',
],
'dagrun_operator': ['TriggerDagRunOperator'],
'dummy_operator': ['DummyOperator'],
'email_operator': ['EmailOperator'],
'hive_to_samba_operator': ['Hive2SambaOperator'],
'latest_only_operator': ['LatestOnlyOperator'],
'mysql_operator': ['MySqlOperator'],
'sqlite_operator': ['SqliteOperator'],
'mysql_to_hive': ['MySqlToHiveTransfer'],
'postgres_operator': ['PostgresOperator'],
'subdag_operator': ['SubDagOperator'],
'hive_stats_operator': ['HiveStatsCollectionOperator'],
's3_to_hive_operator': ['S3ToHiveTransfer'],
'hive_to_mysql': ['HiveToMySqlTransfer'],
'presto_to_mysql': ['PrestoToMySqlTransfer'],
's3_file_transform_operator': ['S3FileTransformOperator'],
'http_operator': ['SimpleHttpOperator'],
'hive_to_druid': ['HiveToDruidTransfer'],
'jdbc_operator': ['JdbcOperator'],
'mssql_operator': ['MsSqlOperator'],
'mssql_to_hive': ['MsSqlToHiveTransfer'],
'slack_operator': ['SlackAPIOperator', 'SlackAPIPostOperator'],
'generic_transfer': ['GenericTransfer'],
'oracle_operator': ['OracleOperator']
}
if not os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from airflow.utils.helpers import AirflowImporter
airflow_importer = AirflowImporter(sys.modules[__name__], _operators)
def _integrate_plugins():
from airflow.plugins_manager import operators_modules
for operators_module in operators_modules:
sys.modules[operators_module.__name__] = operators_module
globals()[operators_module._name] = operators_module
| true | true |
f732d938c1d2aa8152d53902e46983e5dea7784e | 3,749 | py | Python | nodes/views.py | Indigo-Uliv/indigo-web | 49674b531830d7f85763c40bac5fe2a50d32690c | [
"Apache-2.0"
] | null | null | null | nodes/views.py | Indigo-Uliv/indigo-web | 49674b531830d7f85763c40bac5fe2a50d32690c | [
"Apache-2.0"
] | 2 | 2020-06-05T16:51:50.000Z | 2021-06-10T17:30:26.000Z | nodes/views.py | Indigo-Uliv/indigo-web | 49674b531830d7f85763c40bac5fe2a50d32690c | [
"Apache-2.0"
] | null | null | null | """Node views
Copyright 2015 Archive Analytics Solutions
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import uuid
import datetime
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import NodeForm
from .client import NodeClient
from indigo.models import Node
from indigo.models.errors import NodeConflictError
import logging
logger = logging.getLogger("indigo")
@login_required
def home(request):
nodes = [n.to_dict() for n in Node.list()]
return render(request, 'nodes/index.html', {"nodes": nodes})
@login_required
def new(request):
form = NodeForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
try:
Node.create(name=form.cleaned_data["name"],
address=form.cleaned_data["address"])
messages.add_message(request, messages.INFO, 'New node was added')
except NodeConflictError:
messages.add_message(request, messages.ERROR, 'That name is already in use')
return HttpResponseRedirect(reverse('nodes:home'))
return render(request, 'nodes/new.html', {'form': form})
@login_required
def edit(request, id):
# TODO: Create the initial_data from the node itself, if we can
# find it.
node = Node.find_by_id(id)
initial_data = node.to_dict()
if request.method == 'POST':
form = NodeForm(request.POST)
if form.is_valid():
node.update(name=form.cleaned_data['name'], address=form.cleaned_data['address'])
messages.add_message(request, messages.INFO,
"Node information for '{}' has been changed".format(form.cleaned_data['name']))
return HttpResponseRedirect(reverse('nodes:home'))
else:
form = NodeForm(initial=initial_data)
return render(request, 'nodes/edit.html', {'form': form})
@login_required
def check(request, id):
node = Node.find_by_id(id)
client = NodeClient(node.address + ":9000")
ok, metrics = client.get_state()
if ok:
node.update(status="UP", last_update=datetime.datetime.now())
messages.add_message(request, messages.INFO, 'The node was reachable')
else:
messages.add_message(request, messages.WARNING, 'The node at {} was unreachable'.format(node.address))
node.update(status="DOWN", last_update=datetime.datetime.now())
return HttpResponseRedirect(reverse("nodes:home"))
@login_required
def metrics(request, id):
node = Node.find_by_id(id)
if not node or not request.user.administrator:
raise PermissionDenied()
client = NodeClient(node.address + ":9000")
ok, metrics = client.get_state()
if not ok:
messages.add_message(request, messages.WARNING, 'The node at {} was unreachable'.format(node.address))
return render(request, 'nodes/metrics.html', { "node": node, "metrics": metrics})
@login_required
def logview(request, id):
node = Node.find_by_id(id)
return render(request, 'nodes/logs.html', { "node": node})
| 33.473214 | 112 | 0.697786 |
import uuid
import datetime
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import NodeForm
from .client import NodeClient
from indigo.models import Node
from indigo.models.errors import NodeConflictError
import logging
logger = logging.getLogger("indigo")
@login_required
def home(request):
nodes = [n.to_dict() for n in Node.list()]
return render(request, 'nodes/index.html', {"nodes": nodes})
@login_required
def new(request):
form = NodeForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
try:
Node.create(name=form.cleaned_data["name"],
address=form.cleaned_data["address"])
messages.add_message(request, messages.INFO, 'New node was added')
except NodeConflictError:
messages.add_message(request, messages.ERROR, 'That name is already in use')
return HttpResponseRedirect(reverse('nodes:home'))
return render(request, 'nodes/new.html', {'form': form})
@login_required
def edit(request, id):
node = Node.find_by_id(id)
initial_data = node.to_dict()
if request.method == 'POST':
form = NodeForm(request.POST)
if form.is_valid():
node.update(name=form.cleaned_data['name'], address=form.cleaned_data['address'])
messages.add_message(request, messages.INFO,
"Node information for '{}' has been changed".format(form.cleaned_data['name']))
return HttpResponseRedirect(reverse('nodes:home'))
else:
form = NodeForm(initial=initial_data)
return render(request, 'nodes/edit.html', {'form': form})
@login_required
def check(request, id):
node = Node.find_by_id(id)
client = NodeClient(node.address + ":9000")
ok, metrics = client.get_state()
if ok:
node.update(status="UP", last_update=datetime.datetime.now())
messages.add_message(request, messages.INFO, 'The node was reachable')
else:
messages.add_message(request, messages.WARNING, 'The node at {} was unreachable'.format(node.address))
node.update(status="DOWN", last_update=datetime.datetime.now())
return HttpResponseRedirect(reverse("nodes:home"))
@login_required
def metrics(request, id):
node = Node.find_by_id(id)
if not node or not request.user.administrator:
raise PermissionDenied()
client = NodeClient(node.address + ":9000")
ok, metrics = client.get_state()
if not ok:
messages.add_message(request, messages.WARNING, 'The node at {} was unreachable'.format(node.address))
return render(request, 'nodes/metrics.html', { "node": node, "metrics": metrics})
@login_required
def logview(request, id):
node = Node.find_by_id(id)
return render(request, 'nodes/logs.html', { "node": node})
| true | true |
f732d957829b1f9689fdb5846bd224af1e8a3a25 | 1,040 | py | Python | scriptures/canons/base.py | beatitud/bible-ref-py | 506a634b6ed7b6ac503eda5bea02be0fb801ed63 | [
"MIT"
] | 6 | 2019-10-11T14:53:16.000Z | 2021-02-06T14:17:57.000Z | scriptures/canons/base.py | beatitud/bible-ref-parser-py | 506a634b6ed7b6ac503eda5bea02be0fb801ed63 | [
"MIT"
] | null | null | null | scriptures/canons/base.py | beatitud/bible-ref-parser-py | 506a634b6ed7b6ac503eda5bea02be0fb801ed63 | [
"MIT"
] | 1 | 2020-12-27T01:14:01.000Z | 2020-12-27T01:14:01.000Z | from __future__ import unicode_literals
import re
class CanonBase:
single_verse_re = {
'en': 'v[.]*',
'fr': '[v]{1,2}[.]?\s{0,2}',
}
def __init__(self, language='en'):
self.language = language
# We check for books
if hasattr(self, 'books'):
# We it is not a dictionary, we raise an error
if not isinstance(self.books, dict):
raise Exception('"books" should be a dictionary, who\'s values are four valued tuples (Book Name, '
'Abbreviation, Regex, [ch1_verse_count, ch2_verse_count, ...])')
# We set the regex instance variables
self.book_re_string = '|'.join(b.get(self.language)[2] for b in self.books.values())
self.book_re = re.compile(self.book_re_string, re.IGNORECASE | re.UNICODE)
self.single_verse_re_string = self.single_verse_re.get(self.language)
# Otherwise we raise an error
else:
raise Exception('Text has no "books"')
| 34.666667 | 115 | 0.586538 | from __future__ import unicode_literals
import re
class CanonBase:
single_verse_re = {
'en': 'v[.]*',
'fr': '[v]{1,2}[.]?\s{0,2}',
}
def __init__(self, language='en'):
self.language = language
if hasattr(self, 'books'):
if not isinstance(self.books, dict):
raise Exception('"books" should be a dictionary, who\'s values are four valued tuples (Book Name, '
'Abbreviation, Regex, [ch1_verse_count, ch2_verse_count, ...])')
# We set the regex instance variables
self.book_re_string = '|'.join(b.get(self.language)[2] for b in self.books.values())
self.book_re = re.compile(self.book_re_string, re.IGNORECASE | re.UNICODE)
self.single_verse_re_string = self.single_verse_re.get(self.language)
# Otherwise we raise an error
else:
raise Exception('Text has no "books"')
| true | true |
f732da0eb421d7b008a1c22b5d7e08c26fd66fe9 | 153 | py | Python | test/run_all_tests.py | sgowris2/sigfig | 299806b548be1ae282077a7b2d8faf2c6ca57f52 | [
"MIT"
] | null | null | null | test/run_all_tests.py | sgowris2/sigfig | 299806b548be1ae282077a7b2d8faf2c6ca57f52 | [
"MIT"
] | 4 | 2021-03-30T15:54:47.000Z | 2021-03-30T16:10:13.000Z | test/run_all_tests.py | sgowris2/sigfig | 299806b548be1ae282077a7b2d8faf2c6ca57f52 | [
"MIT"
] | null | null | null | import unittest
loader = unittest.TestLoader()
start_dir = '.'
suite = loader.discover(start_dir)
runner = unittest.TextTestRunner()
runner.run(suite)
| 17 | 34 | 0.764706 | import unittest
loader = unittest.TestLoader()
start_dir = '.'
suite = loader.discover(start_dir)
runner = unittest.TextTestRunner()
runner.run(suite)
| true | true |
f732db32f65923c4102d7721ea10a815b6d8c226 | 6,957 | py | Python | tornado/utils/commonUtil.py | maqg/wcrobot | 7d026c1a34362c5434105c27c5bd25f08c6fabe2 | [
"MIT"
] | null | null | null | tornado/utils/commonUtil.py | maqg/wcrobot | 7d026c1a34362c5434105c27c5bd25f08c6fabe2 | [
"MIT"
] | null | null | null | tornado/utils/commonUtil.py | maqg/wcrobot | 7d026c1a34362c5434105c27c5bd25f08c6fabe2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import base64
import json
import os
import socket
import struct
import uuid
import time
from hashlib import md5 as MD5
from binascii import crc32
from random import Random
from core.err_code import err_desc_en, err_desc_ch
from utils.timeUtil import get_current_time
DEBIAN_VERSION_FILE = "/etc/debian_version"
CENTOS_VERSION_FILE = "/etc/centos-release"
REDHAT_VERSION_FILE = "/etc/redhat-release"
PLATFORM_DEBIAN = "debian7"
PLATFORM_REDCENT6 = "redcent6"
PLATFORM_REDCENT7 = "redcent7"
# generate random str which len is randomlength.
def random_str(randomlength=8):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
random = Random()
for i in range(randomlength):
str += chars[random.randint(0, length)]
return str
def CRC32(crcStr):
return crc32(crcStr.encode()) & 0xFFFFFFFF
def listFiles(fileDir, keyword=None):
fileList = []
for file in os.listdir(fileDir):
if (not os.path.isdir(file) and (not keyword or file.find(keyword) != -1)):
fileList.append(file)
return fileList
def getPlatform():
if (os.path.exists(DEBIAN_VERSION_FILE)):
fd = open(DEBIAN_VERSION_FILE, "r")
line = fd.readline()
version = line.split(".")[0]
fd.close()
return "debian" + version
elif (os.path.exists(CENTOS_VERSION_FILE)):
filePath = CENTOS_VERSION_FILE
else:
filePath = REDHAT_VERSION_FILE
fd = open(filePath, "r")
line = fd.readline()
version = line.split(".")[0].split(" ")[-1]
fd.close()
return "readcent" + version
def isPlatformDebian():
return getPlatform() == PLATFORM_DEBIAN
def ip2long(ip):
packedIP = socket.inet_aton(ip)
return struct.unpack("!L", packedIP)[0]
def removeFile(filepath):
if (filepath == None or os.path.exists(filepath) == False):
return
os.remove(filepath)
def buildRetMsg(errorCode, data=None, errorLog=None):
if (not errorLog):
return (errorCode, data)
else:
return (errorCode, data, errorLog)
def buildRetObj(errorCode, data=None, errorLog=""):
return {
"RetCode": errorCode,
"RetObj": data,
"ErrorLog": errorLog
}
def toString(src, encoding="utf-8"):
if (type(src) == str):
try:
return src.encode(encoding)
except:
return octUnicode(src).encode(encoding)
else:
return src
def transToObj(string):
if (string == None):
return None
if (type(string) != type("a") and type(string) != type('a')):
string = string.encode()
if (len(string) < 2):
return None
try:
obj = json.loads(string, encoding="utf-8")
except:
obj = {}
return obj
def tryToDump(string):
if (string == None):
return {}
if (type(string) != type("a")):
string = string.encode()
if (len(string) < 2):
return {}
try:
obj = json.loads(string)
except:
obj = string
return json.dumps(obj, sort_keys=True, indent=4)
def getStrTime(milisecs):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(milisecs) / 1000))
def isSystemWindows():
import platform
if (platform.system() == "Windows"):
return True
else:
return False
def isSystemMac():
import platform
if (platform.system() == "Darwin"):
return True
else:
return False
def transToStr(obj, indent=False):
if (indent != False):
return json.dumps(obj, ensure_ascii=False, indent=indent)
else:
return json.dumps(obj, ensure_ascii=False)
def oct_trim(inStr):
segs = inStr.split(" ")
result = ""
for seg in segs:
if (seg == ''):
continue
result += seg
result += " "
return result.rstrip()
def OCT_SYSTEM(formatStr, arg=None):
TEMPFILE_NAME = "/tmp/OCTTEMP_FILE_%ld%s" % (get_current_time(), getUuid())
if (arg):
CMD = formatStr % arg
else:
CMD = formatStr
CMD += " > %s" % (TEMPFILE_NAME)
ret = os.system(CMD)
fp = open(TEMPFILE_NAME, 'r')
if (fp == None):
return (ret >> 8 & 0XFF, None)
data = fp.read()
fp.close()
os.remove(TEMPFILE_NAME)
if (len(data) == 0):
return (ret >> 8 & 0XFF, None)
if (data[-1] == '\n'):
data = data[:-1] # to remove last "\n"
if (len(data) == 0):
data = None
return (ret >> 8 & 0XFF, data)
def OCT_PIPERUN(cmd):
import subprocess
if (cmd == None):
return (0, None)
args = cmd.split()
p = subprocess.Popen(args, close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
p.wait()
ret = p.returncode
msg = p.stdout.read()
return (ret, msg)
def getUuid(spilt=None):
if (spilt):
return str(uuid.uuid4())
else:
x = uuid.uuid4().hex
return x
def allocVmMac(vmId, nicId):
m = MD5()
string = "%s/%s" % (vmId, nicId)
m.update(string.encode())
v = m.hexdigest()
return "52:54:%s:%s:%s:%s" % (v[0:2], v[4:6], v[8:10], v[12:14])
def trimUuid(uuid):
segs = uuid.split("-")
if (len(segs) != 5):
return uuid
return "%s%s%s%s%s" % (uuid[0:8],
uuid[9:13],
uuid[14:18],
uuid[19:23],
uuid[24:36])
def expandUuid(uuid):
if (uuid[8] == '-'):
return uuid
return "%s-%s-%s-%s-%s" % (uuid[0:8],
uuid[8:12],
uuid[12:16],
uuid[16:20],
uuid[20:32])
def jsonStringFormat(objString):
if (type(objString) == str):
obj = transToObj(objString)
toString = objString
else:
obj = objString
toString = transToStr(objString)
try:
result = json.dumps(obj, sort_keys=True, indent=2)
except:
result = toString
return result
def octUnicode(src):
if (type(src) == str):
return src
else:
try:
return str(src, "utf-8")
except:
return src
def fileToObj(filePath):
if (not os.path.exists(filePath)):
print(("file %s not exist" % (filePath)))
return None
fd = open(filePath, "r", encoding="utf-8")
if (not fd):
print(("open file %s error" % (filePath)))
return None
obj = transToObj(fd.read())
fd.close()
return obj
def getErrorMsgCN(error):
return err_desc_ch.get(error) or ""
def getErrorMsg(error):
return err_desc_en.get(error) or ""
def isValidJson(string):
if (string == None):
return False
try:
eval(string)
except Exception as e:
return False
return True
def format_path_net(path):
flag = 0
if path == None:
return None
path = path.replace(' ', '')
path_temp = path.split(':')
path_t = '/' + path_temp[1] + '/'
path = path_temp[0] + ':' + path_t
path_str = ''
for s_temp in path:
if flag == 1 and s_temp == '/':
continue
if s_temp == '/':
flag = 1
else:
flag = 0
path_str = path_str + s_temp
return path_str
def get_pid_by_process_name(name):
cmd = 'ps -ae | grep -w %s' % name
ret, data = OCT_SYSTEM(cmd)
if ret != 0:
return None
return data.split()[0]
def b64_decode(src):
if not src:
return ""
return base64.b64decode(src.encode()).decode()
def b64_encode(src):
if not src:
return ""
return base64.b64encode(src.encode()).decode()
| 18.07013 | 80 | 0.633319 |
import base64
import json
import os
import socket
import struct
import uuid
import time
from hashlib import md5 as MD5
from binascii import crc32
from random import Random
from core.err_code import err_desc_en, err_desc_ch
from utils.timeUtil import get_current_time
DEBIAN_VERSION_FILE = "/etc/debian_version"
CENTOS_VERSION_FILE = "/etc/centos-release"
REDHAT_VERSION_FILE = "/etc/redhat-release"
PLATFORM_DEBIAN = "debian7"
PLATFORM_REDCENT6 = "redcent6"
PLATFORM_REDCENT7 = "redcent7"
def random_str(randomlength=8):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
random = Random()
for i in range(randomlength):
str += chars[random.randint(0, length)]
return str
def CRC32(crcStr):
return crc32(crcStr.encode()) & 0xFFFFFFFF
def listFiles(fileDir, keyword=None):
fileList = []
for file in os.listdir(fileDir):
if (not os.path.isdir(file) and (not keyword or file.find(keyword) != -1)):
fileList.append(file)
return fileList
def getPlatform():
if (os.path.exists(DEBIAN_VERSION_FILE)):
fd = open(DEBIAN_VERSION_FILE, "r")
line = fd.readline()
version = line.split(".")[0]
fd.close()
return "debian" + version
elif (os.path.exists(CENTOS_VERSION_FILE)):
filePath = CENTOS_VERSION_FILE
else:
filePath = REDHAT_VERSION_FILE
fd = open(filePath, "r")
line = fd.readline()
version = line.split(".")[0].split(" ")[-1]
fd.close()
return "readcent" + version
def isPlatformDebian():
return getPlatform() == PLATFORM_DEBIAN
def ip2long(ip):
packedIP = socket.inet_aton(ip)
return struct.unpack("!L", packedIP)[0]
def removeFile(filepath):
if (filepath == None or os.path.exists(filepath) == False):
return
os.remove(filepath)
def buildRetMsg(errorCode, data=None, errorLog=None):
if (not errorLog):
return (errorCode, data)
else:
return (errorCode, data, errorLog)
def buildRetObj(errorCode, data=None, errorLog=""):
return {
"RetCode": errorCode,
"RetObj": data,
"ErrorLog": errorLog
}
def toString(src, encoding="utf-8"):
if (type(src) == str):
try:
return src.encode(encoding)
except:
return octUnicode(src).encode(encoding)
else:
return src
def transToObj(string):
if (string == None):
return None
if (type(string) != type("a") and type(string) != type('a')):
string = string.encode()
if (len(string) < 2):
return None
try:
obj = json.loads(string, encoding="utf-8")
except:
obj = {}
return obj
def tryToDump(string):
if (string == None):
return {}
if (type(string) != type("a")):
string = string.encode()
if (len(string) < 2):
return {}
try:
obj = json.loads(string)
except:
obj = string
return json.dumps(obj, sort_keys=True, indent=4)
def getStrTime(milisecs):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(milisecs) / 1000))
def isSystemWindows():
import platform
if (platform.system() == "Windows"):
return True
else:
return False
def isSystemMac():
import platform
if (platform.system() == "Darwin"):
return True
else:
return False
def transToStr(obj, indent=False):
if (indent != False):
return json.dumps(obj, ensure_ascii=False, indent=indent)
else:
return json.dumps(obj, ensure_ascii=False)
def oct_trim(inStr):
segs = inStr.split(" ")
result = ""
for seg in segs:
if (seg == ''):
continue
result += seg
result += " "
return result.rstrip()
def OCT_SYSTEM(formatStr, arg=None):
TEMPFILE_NAME = "/tmp/OCTTEMP_FILE_%ld%s" % (get_current_time(), getUuid())
if (arg):
CMD = formatStr % arg
else:
CMD = formatStr
CMD += " > %s" % (TEMPFILE_NAME)
ret = os.system(CMD)
fp = open(TEMPFILE_NAME, 'r')
if (fp == None):
return (ret >> 8 & 0XFF, None)
data = fp.read()
fp.close()
os.remove(TEMPFILE_NAME)
if (len(data) == 0):
return (ret >> 8 & 0XFF, None)
if (data[-1] == '\n'):
data = data[:-1]
if (len(data) == 0):
data = None
return (ret >> 8 & 0XFF, data)
def OCT_PIPERUN(cmd):
import subprocess
if (cmd == None):
return (0, None)
args = cmd.split()
p = subprocess.Popen(args, close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
p.wait()
ret = p.returncode
msg = p.stdout.read()
return (ret, msg)
def getUuid(spilt=None):
if (spilt):
return str(uuid.uuid4())
else:
x = uuid.uuid4().hex
return x
def allocVmMac(vmId, nicId):
m = MD5()
string = "%s/%s" % (vmId, nicId)
m.update(string.encode())
v = m.hexdigest()
return "52:54:%s:%s:%s:%s" % (v[0:2], v[4:6], v[8:10], v[12:14])
def trimUuid(uuid):
segs = uuid.split("-")
if (len(segs) != 5):
return uuid
return "%s%s%s%s%s" % (uuid[0:8],
uuid[9:13],
uuid[14:18],
uuid[19:23],
uuid[24:36])
def expandUuid(uuid):
if (uuid[8] == '-'):
return uuid
return "%s-%s-%s-%s-%s" % (uuid[0:8],
uuid[8:12],
uuid[12:16],
uuid[16:20],
uuid[20:32])
def jsonStringFormat(objString):
if (type(objString) == str):
obj = transToObj(objString)
toString = objString
else:
obj = objString
toString = transToStr(objString)
try:
result = json.dumps(obj, sort_keys=True, indent=2)
except:
result = toString
return result
def octUnicode(src):
if (type(src) == str):
return src
else:
try:
return str(src, "utf-8")
except:
return src
def fileToObj(filePath):
if (not os.path.exists(filePath)):
print(("file %s not exist" % (filePath)))
return None
fd = open(filePath, "r", encoding="utf-8")
if (not fd):
print(("open file %s error" % (filePath)))
return None
obj = transToObj(fd.read())
fd.close()
return obj
def getErrorMsgCN(error):
return err_desc_ch.get(error) or ""
def getErrorMsg(error):
return err_desc_en.get(error) or ""
def isValidJson(string):
if (string == None):
return False
try:
eval(string)
except Exception as e:
return False
return True
def format_path_net(path):
flag = 0
if path == None:
return None
path = path.replace(' ', '')
path_temp = path.split(':')
path_t = '/' + path_temp[1] + '/'
path = path_temp[0] + ':' + path_t
path_str = ''
for s_temp in path:
if flag == 1 and s_temp == '/':
continue
if s_temp == '/':
flag = 1
else:
flag = 0
path_str = path_str + s_temp
return path_str
def get_pid_by_process_name(name):
cmd = 'ps -ae | grep -w %s' % name
ret, data = OCT_SYSTEM(cmd)
if ret != 0:
return None
return data.split()[0]
def b64_decode(src):
if not src:
return ""
return base64.b64decode(src.encode()).decode()
def b64_encode(src):
if not src:
return ""
return base64.b64encode(src.encode()).decode()
| true | true |
f732ddbc9b17eacc53c342a4a9303bc33ce1d7ad | 4,891 | py | Python | alpha-zero-general_one_step/MCTS_Bleu.py | rubenrtorrado/NLP | 2ba6f153e428227fcf6f27080bdd0183d395ef64 | [
"Apache-2.0"
] | null | null | null | alpha-zero-general_one_step/MCTS_Bleu.py | rubenrtorrado/NLP | 2ba6f153e428227fcf6f27080bdd0183d395ef64 | [
"Apache-2.0"
] | null | null | null | alpha-zero-general_one_step/MCTS_Bleu.py | rubenrtorrado/NLP | 2ba6f153e428227fcf6f27080bdd0183d395ef64 | [
"Apache-2.0"
] | 1 | 2021-09-22T17:43:26.000Z | 2021-09-22T17:43:26.000Z | import math
import numpy as np
EPS = 1e-8
class MCTS():
"""
This class handles the MCTS tree.
"""
def __init__(self, game, nnet, args):
self.game = game
self.nnet = nnet
self.args = args
self.Qsa = {} # stores Q values for s,a (as defined in the paper)
self.Nsa = {} # stores #times edge s,a was visited
self.Ns = {} # stores #times board s was visited
self.Ps = {} # stores initial policy (returned by neural net)
self.Es = {} # stores game.getGameEnded ended for board s
self.Vs = {} # stores game.getValidMoves for board s
def getActionProb(self, canonicalBoard, temp=1):
"""
This function performs numMCTSSims simulations of MCTS starting from
canonicalBoard.
Returns:
probs: a policy vector where the probability of the ith action is
proportional to Nsa[(s,a)]**(1./temp)
"""
for i in range(self.args.numMCTSSims):
self.search(canonicalBoard)
s = self.game.stringRepresentation(canonicalBoard)
counts = [self.Nsa[(s,a)] if (s,a) in self.Nsa else 0 for a in range(self.game.getActionSize())]
if temp==0:
bestA = np.argmax(counts)
probs = [0]*len(counts)
probs[bestA]=1
return probs
counts = [x**(1./temp) for x in counts]
probs = [x/float(sum(counts)) for x in counts]
return probs
def search(self, canonicalBoard):
"""
This function performs one iteration of MCTS. It is recursively called
till a leaf node is found. The action chosen at each node is one that
has the maximum upper confidence bound as in the paper.
Once a leaf node is found, the neural network is called to return an
initial policy P and a value v for the state. This value is propogated
up the search path. In case the leaf node is a terminal state, the
outcome is propogated up the search path. The values of Ns, Nsa, Qsa are
updated.
NOTE: the return values are the negative of the value of the current
state. This is done since v is in [-1,1] and if v is the value of a
state for the current player, then its value is -v for the other player.
Returns:
v: the negative of the value of the current canonicalBoard
"""
s = self.game.stringRepresentation(canonicalBoard)
if s not in self.Es:
self.Es[s] = self.game.getGameEnded_BLEU(canonicalBoard, 1)
if self.Es[s]!=0:
# terminal node
#test=self.Es[s]
return self.Es[s]
if s not in self.Ps:
# leaf node
self.Ps[s], v = self.nnet.predict(canonicalBoard)
valids = self.game.getValidMoves(canonicalBoard, 1)
self.Ps[s] = self.Ps[s]*valids # masking invalid moves
#Ruben
self.Ps[s]=self.Ps[s].T
sum_Ps_s = np.sum(self.Ps[s])
if sum_Ps_s > 0:
self.Ps[s] /= sum_Ps_s # renormalize
else:
# if all valid moves were masked make all valid moves equally probable
# NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.
# If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process.
print("All valid moves were masked, do workaround.")
self.Ps[s] = self.Ps[s] + valids
self.Ps[s] /= np.sum(self.Ps[s])
self.Vs[s] = valids
self.Ns[s] = 0
return v#-v
valids = self.Vs[s]
cur_best = -float('inf')
best_act = -1
# pick the action with the highest upper confidence bound
for a in range(self.game.getActionSize()):
if valids[a]:
if (s,a) in self.Qsa:
u = self.Qsa[(s,a)] + self.args.cpuct*self.Ps[s][a]*math.sqrt(self.Ns[s])/(1+self.Nsa[(s,a)])
else:
u = self.args.cpuct*self.Ps[s][a]*math.sqrt(self.Ns[s] + EPS) # Q = 0 ?
if u > cur_best:
cur_best = u
best_act = a
a = best_act
next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)
next_s = self.game.getCanonicalForm(next_s, next_player)
v = self.search(next_s)
if (s,a) in self.Qsa:
self.Qsa[(s,a)] = (self.Nsa[(s,a)]*self.Qsa[(s,a)] + v)/(self.Nsa[(s,a)]+1)
self.Nsa[(s,a)] += 1
else:
self.Qsa[(s,a)] = v
self.Nsa[(s,a)] = 1
self.Ns[s] += 1
return v#-v
| 35.70073 | 145 | 0.553261 | import math
import numpy as np
EPS = 1e-8
class MCTS():
def __init__(self, game, nnet, args):
self.game = game
self.nnet = nnet
self.args = args
self.Qsa = {}
self.Nsa = {}
self.Es = {}
self.Vs = {}
def getActionProb(self, canonicalBoard, temp=1):
for i in range(self.args.numMCTSSims):
self.search(canonicalBoard)
s = self.game.stringRepresentation(canonicalBoard)
counts = [self.Nsa[(s,a)] if (s,a) in self.Nsa else 0 for a in range(self.game.getActionSize())]
if temp==0:
bestA = np.argmax(counts)
probs = [0]*len(counts)
probs[bestA]=1
return probs
counts = [x**(1./temp) for x in counts]
probs = [x/float(sum(counts)) for x in counts]
return probs
def search(self, canonicalBoard):
s = self.game.stringRepresentation(canonicalBoard)
if s not in self.Es:
self.Es[s] = self.game.getGameEnded_BLEU(canonicalBoard, 1)
if self.Es[s]!=0:
return self.Es[s]
if s not in self.Ps:
self.Ps[s], v = self.nnet.predict(canonicalBoard)
valids = self.game.getValidMoves(canonicalBoard, 1)
self.Ps[s] = self.Ps[s]*valids
self.Ps[s]=self.Ps[s].T
sum_Ps_s = np.sum(self.Ps[s])
if sum_Ps_s > 0:
self.Ps[s] /= sum_Ps_s
else:
# If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process.
print("All valid moves were masked, do workaround.")
self.Ps[s] = self.Ps[s] + valids
self.Ps[s] /= np.sum(self.Ps[s])
self.Vs[s] = valids
self.Ns[s] = 0
return v#-v
valids = self.Vs[s]
cur_best = -float('inf')
best_act = -1
# pick the action with the highest upper confidence bound
for a in range(self.game.getActionSize()):
if valids[a]:
if (s,a) in self.Qsa:
u = self.Qsa[(s,a)] + self.args.cpuct*self.Ps[s][a]*math.sqrt(self.Ns[s])/(1+self.Nsa[(s,a)])
else:
u = self.args.cpuct*self.Ps[s][a]*math.sqrt(self.Ns[s] + EPS) # Q = 0 ?
if u > cur_best:
cur_best = u
best_act = a
a = best_act
next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)
next_s = self.game.getCanonicalForm(next_s, next_player)
v = self.search(next_s)
if (s,a) in self.Qsa:
self.Qsa[(s,a)] = (self.Nsa[(s,a)]*self.Qsa[(s,a)] + v)/(self.Nsa[(s,a)]+1)
self.Nsa[(s,a)] += 1
else:
self.Qsa[(s,a)] = v
self.Nsa[(s,a)] = 1
self.Ns[s] += 1
return v#-v
| true | true |
f732df450eec8a2ee95c3a675afe0a7ccec9eb4f | 6,604 | py | Python | bentoml/adapters/multi_file_input.py | HenryDashwood/BentoML | 49709c72dd8f3f45659e860ff751b1d191fa1fb4 | [
"Apache-2.0"
] | null | null | null | bentoml/adapters/multi_file_input.py | HenryDashwood/BentoML | 49709c72dd8f3f45659e860ff751b1d191fa1fb4 | [
"Apache-2.0"
] | null | null | null | bentoml/adapters/multi_file_input.py | HenryDashwood/BentoML | 49709c72dd8f3f45659e860ff751b1d191fa1fb4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterator, Sequence, Tuple
from bentoml.adapters.base_input import BaseInputAdapter, parse_cli_inputs
from bentoml.adapters.utils import decompress_gzip_request
from bentoml.types import AwsLambdaEvent, FileLike, HTTPRequest, InferenceTask
ApiFuncArgs = Tuple[Sequence[FileLike], ...]
MultiFileTask = InferenceTask[Tuple[FileLike, ...]]
class MultiFileInput(BaseInputAdapter):
""" Low level input adapters that transform incoming files data from http request,
CLI or AWS lambda event into binary stream objects, then pass down to user defined
API functions.
Parameters
----------
input_names : List[str]
list of input names. For HTTP they are form input names. For CLI
they are CLI args --input-<name1> or --input-file-<name1>
allow_none : bool
accept HTTP requests or AWS Lambda events without all files
provided. Does not take effect on CLI.
Examples
----------
Service using MultiFileInput:
.. code-block:: python
from typing import List
from PIL import Image
import numpy as np
import bentoml
from bentoml.types import FileLike
from bentoml.framework.pytroch import PytorchModelArtifact
from bentoml.adapters import MultiFileInput
@bentoml.env(pip_packages=['torch', 'pillow', 'numpy'])
@bentoml.artifacts([PytorchModelArtifact('classifier')])
class PyTorchFashionClassifier(bentoml.BentoService):
@bentoml.api(
input=MultiFileInput(input_names=['image', 'json']), batch=True)
def predict(self, image_list: List[FileLike], json_list: List[FileLike]):
inputs = []
for img_io, json_io in zip(image_list, json_list):
img = Image.open(img_io)
json_obj = json.load(json_io)
inputs.append([img, json_obj])
outputs = self.artifacts.classifier(inputs)
return outputs
Query with HTTP request performed by cURL::
curl -i \\
-F image=@test.jpg \\
-F json=@test.json \\
localhost:5000/predict
OR by an HTML form that sends multipart data:
.. code-block:: html
<form action="http://localhost:8000" method="POST"
enctype="multipart/form-data">
<input name="image" type="file">
<input name="json" type="file">
<input type="submit">
</form>
Query with CLI command::
bentoml run PyTorchFashionClassifier:latest predict \\
--input-file-image test.jpg \\
--input-file-json test.json
OR infer all file pairs under a folder with ten pairs each batch::
bentoml run PyTorchFashionClassifier:latest predict --max-batch-size 10 \\
--input-file-image folder/*.jpg \\
--input-file-json folder/*.json
Note: jpg files and json files should be in same prefix like this::
folder:
- apple.jpg
- apple.json
- banana.jpg
- banana.json
...
"""
HTTP_METHODS = ["POST"]
BATCH_MODE_SUPPORTED = True
def __init__(
self, input_names: Sequence[str], allow_none: bool = False, **base_kwargs,
):
super().__init__(**base_kwargs)
self.input_names = input_names
self.allow_none = allow_none
@property
def config(self):
return {
# Converting to list, google.protobuf.Struct does not work with tuple type
"input_names": list(self.input_names)
}
@property
def request_schema(self):
return {
"multipart/form-data": {
"schema": {
"type": "object",
"properties": {
k: {"type": "string", "format": "binary"}
for k in self.input_names
},
}
},
}
@decompress_gzip_request
def from_http_request(self, req: HTTPRequest) -> MultiFileTask:
if req.headers.content_type != 'multipart/form-data':
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} only accepts requests "
"with Content-Type: multipart/form-data",
)
else:
_, _, files = HTTPRequest.parse_form_data(req)
files = tuple(files.get(k) for k in self.input_names)
if not any(files):
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
f"fields {self.input_names}",
)
elif not all(files) and not self.allow_none:
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
f"fields {self.input_names}",
)
else:
task = InferenceTask(http_headers=req.headers, data=files,)
return task
def from_aws_lambda_event(self, event: AwsLambdaEvent) -> MultiFileTask:
request = HTTPRequest(
headers=tuple((k, v) for k, v in event.get('headers', {}).items()),
body=event['body'],
)
return self.from_http_request(request)
def from_cli(self, cli_args: Sequence[str]) -> Iterator[MultiFileTask]:
for inputs in parse_cli_inputs(cli_args, self.input_names):
yield InferenceTask(cli_args=cli_args, data=inputs)
def extract_user_func_args(self, tasks: Sequence[MultiFileTask]) -> ApiFuncArgs:
args = tuple(map(tuple, zip(*map(lambda t: t.data, tasks))))
if not args:
args = (tuple(),) * len(self.input_names)
return args
| 35.12766 | 86 | 0.600545 |
from typing import Iterator, Sequence, Tuple
from bentoml.adapters.base_input import BaseInputAdapter, parse_cli_inputs
from bentoml.adapters.utils import decompress_gzip_request
from bentoml.types import AwsLambdaEvent, FileLike, HTTPRequest, InferenceTask
ApiFuncArgs = Tuple[Sequence[FileLike], ...]
MultiFileTask = InferenceTask[Tuple[FileLike, ...]]
class MultiFileInput(BaseInputAdapter):
HTTP_METHODS = ["POST"]
BATCH_MODE_SUPPORTED = True
def __init__(
self, input_names: Sequence[str], allow_none: bool = False, **base_kwargs,
):
super().__init__(**base_kwargs)
self.input_names = input_names
self.allow_none = allow_none
@property
def config(self):
return {
"input_names": list(self.input_names)
}
@property
def request_schema(self):
return {
"multipart/form-data": {
"schema": {
"type": "object",
"properties": {
k: {"type": "string", "format": "binary"}
for k in self.input_names
},
}
},
}
@decompress_gzip_request
def from_http_request(self, req: HTTPRequest) -> MultiFileTask:
if req.headers.content_type != 'multipart/form-data':
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} only accepts requests "
"with Content-Type: multipart/form-data",
)
else:
_, _, files = HTTPRequest.parse_form_data(req)
files = tuple(files.get(k) for k in self.input_names)
if not any(files):
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
f"fields {self.input_names}",
)
elif not all(files) and not self.allow_none:
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
f"fields {self.input_names}",
)
else:
task = InferenceTask(http_headers=req.headers, data=files,)
return task
def from_aws_lambda_event(self, event: AwsLambdaEvent) -> MultiFileTask:
request = HTTPRequest(
headers=tuple((k, v) for k, v in event.get('headers', {}).items()),
body=event['body'],
)
return self.from_http_request(request)
def from_cli(self, cli_args: Sequence[str]) -> Iterator[MultiFileTask]:
for inputs in parse_cli_inputs(cli_args, self.input_names):
yield InferenceTask(cli_args=cli_args, data=inputs)
def extract_user_func_args(self, tasks: Sequence[MultiFileTask]) -> ApiFuncArgs:
args = tuple(map(tuple, zip(*map(lambda t: t.data, tasks))))
if not args:
args = (tuple(),) * len(self.input_names)
return args
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.