max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
Language Proficiency/Python/Write a function.py
|
RishabhArya/HackerRank-Solutions
| 3
|
6627351
|
def is_leap(year):
leap = False
if (year%400 == 0):
leap = True
elif (year%100 == 0):
leap = False
elif (year%4 == 0):
leap = True
return leap
year = int(input())
print(is_leap(year))
|
def is_leap(year):
leap = False
if (year%400 == 0):
leap = True
elif (year%100 == 0):
leap = False
elif (year%4 == 0):
leap = True
return leap
year = int(input())
print(is_leap(year))
|
none
| 1
| 4.093542
| 4
|
|
flask_monitoringdashboard/views/__init__.py
|
bokal2/Flask-MonitoringDashboard
| 3
|
6627352
|
"""
Main class for adding all route-functions to user_app.
Setup requires only to import this file. All other imports are done in this file
"""
from flask import render_template
from flask.helpers import send_from_directory
from flask_monitoringdashboard import loc, blueprint
from flask_monitoringdashboard.core.auth import secure
@blueprint.route('/static/<path:filename>')
def static(filename):
"""
Serve static files
:param filename: filename in the /static file
:return: content of the file
"""
return send_from_directory(loc() + 'static', filename)
@blueprint.route('/', defaults={'path': ''})
@blueprint.route('/<path:path>') # Catch-All URL: http://flask.pocoo.org/snippets/57/
@secure
def index(path):
return render_template('fmd_base.html')
|
"""
Main class for adding all route-functions to user_app.
Setup requires only to import this file. All other imports are done in this file
"""
from flask import render_template
from flask.helpers import send_from_directory
from flask_monitoringdashboard import loc, blueprint
from flask_monitoringdashboard.core.auth import secure
@blueprint.route('/static/<path:filename>')
def static(filename):
"""
Serve static files
:param filename: filename in the /static file
:return: content of the file
"""
return send_from_directory(loc() + 'static', filename)
@blueprint.route('/', defaults={'path': ''})
@blueprint.route('/<path:path>') # Catch-All URL: http://flask.pocoo.org/snippets/57/
@secure
def index(path):
return render_template('fmd_base.html')
|
en
| 0.761097
|
Main class for adding all route-functions to user_app. Setup requires only to import this file. All other imports are done in this file Serve static files :param filename: filename in the /static file :return: content of the file # Catch-All URL: http://flask.pocoo.org/snippets/57/
| 2.221347
| 2
|
dit_helpdesk/iee_contact/views.py
|
uktrade/dit-helpdesk
| 3
|
6627353
|
<gh_stars>1-10
import logging
from directory_forms_api_client import helpers
from django.conf import settings
from django.core.mail import EmailMessage
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template.loader import get_template
from formtools.wizard.views import SessionWizardView
from iee_contact.forms import (
IEEContactFormStepOne,
IEEContactFormStepTwo,
IEEContactFormStepThree,
IEE_LOCATION_CHOICES,
IEE_TOPIC_CHOICES,
IEEZendeskForm,
)
logger = logging.getLogger(__name__)
FORMS = [
("step_one", IEEContactFormStepOne),
("step_two", IEEContactFormStepTwo),
("step_three", IEEContactFormStepThree),
]
TEMPLATES = {
"step_one": "iee_contact/step_one.html",
"step_two": "iee_contact/step_two.html",
"step_three": "iee_contact/step_three.html",
}
LOCATIONS, TOPICS = (dict(IEE_LOCATION_CHOICES), dict(IEE_TOPIC_CHOICES))
class IEEContactFormWizardView(SessionWizardView):
def get_template_names(self):
return [TEMPLATES[self.steps.current]]
form_list = FORMS
def done(self, form_list, **kwargs):
context = self.process_form_data(form_list)
# if the chosen topic is in the last four options then go to zendesk
# otherwise send an email
if context["type"] == "Zendesk":
IEEContactFormWizardView.send_to_zenddesk(context)
else:
IEEContactFormWizardView.send_mail(context)
return render("iee_contact/done.html", {"context": context})
def render_next_step(self, form, **kwargs):
"""
override next steps for step five if enquiry_topic is
Commodity codes, tariffs and measures, import procedures
:param form: submitted form
:param kwargs: passed keyword arguments
:return: render to response
"""
if (
self.steps.next == "step_three"
and form.cleaned_data["enquiry_topic"] == "1"
):
return HttpResponseRedirect(settings.HMRC_TAX_FORM_URL)
else:
return super(IEEContactFormWizardView, self).render_next_step(
form, **kwargs
)
@staticmethod
def process_form_data(form_list):
form_data = [form.cleaned_data for form in form_list]
context = {"subject": "New IEE Enquiry", "service_name": "UK IEE"}
for form in form_data:
if "location" in form.keys():
context["location"] = LOCATIONS[int(form["location"])]
if "enquiry_topic" in form.keys():
context["topic"] = TOPICS[int(form["enquiry_topic"])]
if "email_address" in form.keys():
context["email_address"] = form["email_address"]
if "name" in form.keys():
context["name"] = form["name"]
if "message" in form.keys():
context["message"] = form["message"]
if context["topic"] == TOPICS[2]:
context["type"] = "email"
context["recipient_email"] = "<EMAIL>"
context["recipient_full_name"] = "DEFRA"
elif context["topic"] == TOPICS[3]:
context["type"] = "email"
context["recipient_email"] = "TBC"
context["recipient_full_name"] = "DCMS"
elif context["topic"] == TOPICS[4]:
context["type"] = "Zendesk"
context["recipient_email"] = "<EMAIL>"
context["recipient_full_name"] = "euexit"
elif context["topic"] == TOPICS[5]:
context["type"] = "Zendesk"
context["destination"] = "<EMAIL>"
context["recipient_full_name"] = "<NAME>"
template = get_template("iee_contact/contact_message_tmpl.txt")
context["content"] = template.render(context)
return context
@staticmethod
def send_mail(context):
headers = {"Reply-To": context["email_address"]}
email = EmailMessage(
context["subject"],
context["content"],
context["email_address"],
[context["recipient_email"]],
headers=headers,
)
try:
email.send()
except Exception as ex:
print(ex.args)
@staticmethod
def send_to_zenddesk(context):
zendesk_form = IEEZendeskForm(
data={
"message": context["message"],
"email_address": context["email_address"],
"name": context["name"],
}
)
spam_control = helpers.SpamControl(contents=context["content"])
sender = helpers.Sender(email_address=context["email_address"])
assert zendesk_form.is_valid()
if settings.DIRECTORY_FORMS_API_BASE_URL:
zendesk_form.save(
email_address=context["recipient_email"],
form_url="/iee_contact/",
service_name=context["service_name"],
spam_control=spam_control,
sender=sender,
)
|
import logging
from directory_forms_api_client import helpers
from django.conf import settings
from django.core.mail import EmailMessage
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template.loader import get_template
from formtools.wizard.views import SessionWizardView
from iee_contact.forms import (
IEEContactFormStepOne,
IEEContactFormStepTwo,
IEEContactFormStepThree,
IEE_LOCATION_CHOICES,
IEE_TOPIC_CHOICES,
IEEZendeskForm,
)
logger = logging.getLogger(__name__)
FORMS = [
("step_one", IEEContactFormStepOne),
("step_two", IEEContactFormStepTwo),
("step_three", IEEContactFormStepThree),
]
TEMPLATES = {
"step_one": "iee_contact/step_one.html",
"step_two": "iee_contact/step_two.html",
"step_three": "iee_contact/step_three.html",
}
LOCATIONS, TOPICS = (dict(IEE_LOCATION_CHOICES), dict(IEE_TOPIC_CHOICES))
class IEEContactFormWizardView(SessionWizardView):
def get_template_names(self):
return [TEMPLATES[self.steps.current]]
form_list = FORMS
def done(self, form_list, **kwargs):
context = self.process_form_data(form_list)
# if the chosen topic is in the last four options then go to zendesk
# otherwise send an email
if context["type"] == "Zendesk":
IEEContactFormWizardView.send_to_zenddesk(context)
else:
IEEContactFormWizardView.send_mail(context)
return render("iee_contact/done.html", {"context": context})
def render_next_step(self, form, **kwargs):
"""
override next steps for step five if enquiry_topic is
Commodity codes, tariffs and measures, import procedures
:param form: submitted form
:param kwargs: passed keyword arguments
:return: render to response
"""
if (
self.steps.next == "step_three"
and form.cleaned_data["enquiry_topic"] == "1"
):
return HttpResponseRedirect(settings.HMRC_TAX_FORM_URL)
else:
return super(IEEContactFormWizardView, self).render_next_step(
form, **kwargs
)
@staticmethod
def process_form_data(form_list):
form_data = [form.cleaned_data for form in form_list]
context = {"subject": "New IEE Enquiry", "service_name": "UK IEE"}
for form in form_data:
if "location" in form.keys():
context["location"] = LOCATIONS[int(form["location"])]
if "enquiry_topic" in form.keys():
context["topic"] = TOPICS[int(form["enquiry_topic"])]
if "email_address" in form.keys():
context["email_address"] = form["email_address"]
if "name" in form.keys():
context["name"] = form["name"]
if "message" in form.keys():
context["message"] = form["message"]
if context["topic"] == TOPICS[2]:
context["type"] = "email"
context["recipient_email"] = "<EMAIL>"
context["recipient_full_name"] = "DEFRA"
elif context["topic"] == TOPICS[3]:
context["type"] = "email"
context["recipient_email"] = "TBC"
context["recipient_full_name"] = "DCMS"
elif context["topic"] == TOPICS[4]:
context["type"] = "Zendesk"
context["recipient_email"] = "<EMAIL>"
context["recipient_full_name"] = "euexit"
elif context["topic"] == TOPICS[5]:
context["type"] = "Zendesk"
context["destination"] = "<EMAIL>"
context["recipient_full_name"] = "<NAME>"
template = get_template("iee_contact/contact_message_tmpl.txt")
context["content"] = template.render(context)
return context
@staticmethod
def send_mail(context):
headers = {"Reply-To": context["email_address"]}
email = EmailMessage(
context["subject"],
context["content"],
context["email_address"],
[context["recipient_email"]],
headers=headers,
)
try:
email.send()
except Exception as ex:
print(ex.args)
@staticmethod
def send_to_zenddesk(context):
zendesk_form = IEEZendeskForm(
data={
"message": context["message"],
"email_address": context["email_address"],
"name": context["name"],
}
)
spam_control = helpers.SpamControl(contents=context["content"])
sender = helpers.Sender(email_address=context["email_address"])
assert zendesk_form.is_valid()
if settings.DIRECTORY_FORMS_API_BASE_URL:
zendesk_form.save(
email_address=context["recipient_email"],
form_url="/iee_contact/",
service_name=context["service_name"],
spam_control=spam_control,
sender=sender,
)
|
en
| 0.686131
|
# if the chosen topic is in the last four options then go to zendesk # otherwise send an email override next steps for step five if enquiry_topic is Commodity codes, tariffs and measures, import procedures :param form: submitted form :param kwargs: passed keyword arguments :return: render to response
| 1.895047
| 2
|
model/tests/conftest.py
|
logan-connolly/imdb
| 0
|
6627354
|
from pathlib import Path
import pytest
from src.data import paths
@pytest.fixture
def mock_data_dir(monkeypatch):
"""Patch DATAPATH constant to point to test data directory"""
mock_data_dir = Path(__file__).parent / "data"
monkeypatch.setattr(paths, "DATAPATH", mock_data_dir)
return mock_data_dir
|
from pathlib import Path
import pytest
from src.data import paths
@pytest.fixture
def mock_data_dir(monkeypatch):
"""Patch DATAPATH constant to point to test data directory"""
mock_data_dir = Path(__file__).parent / "data"
monkeypatch.setattr(paths, "DATAPATH", mock_data_dir)
return mock_data_dir
|
en
| 0.734947
|
Patch DATAPATH constant to point to test data directory
| 2.187303
| 2
|
python/obsolete/sketcher/sketch_4578.py
|
geometer/sandbox
| 6
|
6627355
|
# "Romantics of Geometry" group on Facebook, problem 4578
# https://www.facebook.com/groups/parmenides52/permalink/2779763428804012/
from sandbox import Scene
from sketcher import sketch
scene = Scene()
triangle = scene.nondegenerate_triangle(labels=('A', 'B', 'C'))
D = scene.orthocentre_point(triangle, label='D')
D.inside_triangle_constraint(triangle)
A, B, C = triangle.points
H = A.line_through(D).intersection_point(B.line_through(C), layer='auxiliary')
G = C.line_through(D).intersection_point(A.line_through(B), layer='auxiliary')
A.segment(B).congruent_constraint(C.segment(D), comment='Given: |AB| = |CD|')
sketch(scene, extra_points=(H, G))
|
# "Romantics of Geometry" group on Facebook, problem 4578
# https://www.facebook.com/groups/parmenides52/permalink/2779763428804012/
from sandbox import Scene
from sketcher import sketch
scene = Scene()
triangle = scene.nondegenerate_triangle(labels=('A', 'B', 'C'))
D = scene.orthocentre_point(triangle, label='D')
D.inside_triangle_constraint(triangle)
A, B, C = triangle.points
H = A.line_through(D).intersection_point(B.line_through(C), layer='auxiliary')
G = C.line_through(D).intersection_point(A.line_through(B), layer='auxiliary')
A.segment(B).congruent_constraint(C.segment(D), comment='Given: |AB| = |CD|')
sketch(scene, extra_points=(H, G))
|
en
| 0.583898
|
# "Romantics of Geometry" group on Facebook, problem 4578 # https://www.facebook.com/groups/parmenides52/permalink/2779763428804012/
| 2.701976
| 3
|
tests/selenium/alarms_test/Alarms_Menu_test.py
|
sivaanil/laravel
| 1
|
6627356
|
<reponame>sivaanil/laravel<filename>tests/selenium/alarms_test/Alarms_Menu_test.py<gh_stars>1-10
__author__ = 'andrew.bascom'
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
import c2_test_case
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import TimeoutException
import unittest, time
class AlarmsMenuTest(c2_test_case.C2TestCase):
def test_uncheck_auto_resize_ensure_it_stays_checked_C10826(self):
#Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the auto resize checkbox to be displayed then store it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGrid_resizeCB"))
)
except TimeoutException:
self.fail("Alarm grid Auto Resize checkbox could not be found within the allotted " + str(self.config.mid_timeout) + " seconds")
auto_resize_button = driver.find_element_by_id("alarmsGrid_resizeCB")
# If the auto resize chackbox is selected click it to deselect it
if (auto_resize_button.is_selected() == True):
auto_resize_button.click()
# Wait till the divider is available and store it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div[@id='splitter']/div[2]"))
)
except TimeoutException:
self.fail("The canvas divider did not load within " + str(self.config.mid_timeout) + " seconds")
divider_element = driver.find_element_by_xpath("//div[@id='splitter']/div[2]")
# Emulate moving the mouse to the divider and move it off to the dragable part
actions = ActionChains(driver)
actions.move_to_element(divider_element)
actions.move_by_offset(0, 120)
actions.perform()
# Emulate clicking down on the divider
actions = ActionChains(driver)
actions.click_and_hold()
actions.perform()
# Emulate moving the mouse to the right 150 pixels
for index in range(0, 150):
actions = ActionChains(driver)
actions.move_by_offset(1, 0)
actions.perform()
# Emulate the mouse releasing the divider
actions = ActionChains(driver)
actions.release()
actions.perform()
# Wait for the collapsed options menu button to display and click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmMenu"))
)
except TimeoutException:
self.fail("alarm menu button not displaying in the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("alarmMenu").click()
# Wait for the auto resize button to be displayed and store it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGrid_resizeCB"))
)
except TimeoutException:
self.fail("Alarm grid auto resize checkbox did not display within the allotted " + str(self.config.mid_timeout) + " seconds")
auto_resize_button = driver.find_element_by_id("alarmsGrid_resizeCB")
# Check that the auto resize button is not selected if it is selected fail the test
self.assertEqual(auto_resize_button.is_selected(), False, "Auto Resize button state did not remain set as expected")
def test_select_columns_displays_dialog_C10218(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs, and click the select column button
AlarmsMenuTest.change_panel_widths(self, driver)
driver.find_element_by_id("alarmsGridColumnButton").click()
# Wait for the select column dialog to display and then click its close button
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
except TimeoutException:
self.fail("The Select Columns dialog did not load within the allotted " + str(self.config.mid_timeout) + " seconds.")
driver.find_element_by_xpath("//div[@id='alarmsGridColumnPopup']/div/div/div[2]/div").click()
def test_clicking_excel_export_generates_excel(self):
# Get the driver
driver = self.config.driver
# Get the number of windows currently open
num_windows_open = len(driver.window_handles)
# Move the divider to ensure best access to all buttons, tabs, and columns; Wait for the export excel button to display then click it
AlarmsMenuTest.change_panel_widths(self, driver)
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridExcelExportButton"))
)
except TimeoutException:
self.fail("Export button didn't unhide")
driver.find_element_by_id("alarmsGridExcelExportButton").click()
# Check every second for the number of windows open to become greater then the original number of windows open. After the max amount
# of time if the number of windows open didn't increase fail the test. (A window opens briefly when a download link is clicked)
for sec in range(0, self.config.long_timeout):
window_list = driver.window_handles
if (len(window_list) >= num_windows_open + 1):
break
elif (sec >= self.config.long_timeout - 1):
self.fail("Excel download page did not open within the allotted " + str(self.config.long_timeout) + " seconds")
time.sleep(1)
def test_select_active_alarms_tab(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the active tab to display and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-active-alarms"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-active-alarms"))
)
except TimeoutException:
self.fail("Active Alarms tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-active-alarms").click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Find the select columns button and click it
driver.find_element_by_id("alarmsGridColumnButton").click()
# Wait for the select columns dialog to display
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
except TimeoutException:
self.fail("The Select Columns dialog did not load within the allotted " + str(self.config.mid_timeout) + " seconds.")
# Get the list of available columns and loop through it to find the Cleared Time column then click it
available_columns = driver.find_element_by_id("alarmsGrid_availableColumnsList").find_elements_by_tag_name("li")
for column in available_columns:
if (column.text == "Cleared Time"):
column.click()
break
# Find the add column button and click it then close the select column dialog. Wait for the alarm grid to update.
driver.find_element_by_id("alarmsGrid_selectButtonAdd").click()
driver.find_element_by_xpath("//div[@id='alarmsGridColumnPopup']/div/div/div[2]/div").click()
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the alarm grid columns then loop through them
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_columns)):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
# find the Cleared Time column then loop 3 times, clicking the column header to sort
if (column.text == "Cleared Time"):
for num in range(0, 3):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
column.click()
# Wait for the alarm grid to update then ensure the alarm has no clear date, if it does fail the test
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
self.assertEqual(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[6]/div").text, "",
"Found alarm that is not Active, test failed!")
break
def test_select_alarm_history_tab_C10231(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the cleared tab to display and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-cleared-alarms"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-cleared-alarms"))
)
except TimeoutException:
self.fail("Alarm History tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-cleared-alarms").click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the alarm columns and loop through them
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_columns)):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
# Find the Cleared Time column then loop 3 times clicking the header to sort
if (column.text == "Cleared Time"):
for num in range(0, 3):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
column.click()
# Wait for the alarm grid to update then check that there is a date if not then fail the test.
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
self.assertNotEqual(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[6]/div").text, "",
"Found alarm that is not cleared, test failed!")
break
def test_select_all_alarms_tab_C10229(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the all alarms tab to display and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-all-alarms"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-all-alarms"))
)
except TimeoutException:
self.fail("All Alarms tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-all-alarms").click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the alarm columns and loop through them
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_columns)):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
# Find the Cleared Time column then loop 3 times clicking the header to sort
if (column.text == "Cleared Time"):
for num in range(0, 3):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
column.click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# If the sort is ascending then the first alarm should be without a cleared date, check this if failed fail the test. If
# the sort is descending then the first alarm should have a cleared date, check this if failed then fail the test.
if (index == 0):
self.assertEqual(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[6]/div").text, "",
"Found alarm that is not cleared, test failed!")
elif (index == 1):
self.assertNotEqual(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[6]/div").text, "",
"Found alarm that is not cleared, test failed!")
break
# This test case is unfinished since there is no way to distingish an ignored alarm from a regular one
def test_select_ignored_alarms_tab_C10232(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the ignored tab to display and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-ignored-alarms"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-ignored-alarms"))
)
except TimeoutException:
self.fail("Ignored Alarms tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-ignored-alarms").click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the columns from the alarm grid and loop through them
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_columns)):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
# Find the severity column then break
if (column.text == "Severity"):
break
def test_select_custom_filters_tab_C10233(self):
# Get the driver
driver = self.config.driver
# Wait for the custom tab to load and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-custom-filters"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-custom-filters"))
)
except TimeoutException:
self.fail("Custom filters tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-custom-filters").click()
# Wait for the custom filters dialog to display and then click its close button
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsFilter"))
)
except TimeoutException:
self.fail("Custom filters dialog didn't load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_xpath("//div[@id='alarmsFilter']/div/div/div[2]/div").click()
def test_reset_columns_button_C135244(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Find the select column button and click it then wait for the select column dialog to display
driver.find_element_by_id("alarmsGridColumnButton").click()
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
except TimeoutException:
self.fail("The Select Columns dialog did not load within the allotted " + str(self.config.mid_timeout) + " seconds.")
# Get the available columns and find the Cleared Time column click it and then click the add button. Close the select columns dialog
# and then wait for the alarm grid to update
available_columns = driver.find_element_by_id("alarmsGrid_availableColumnsList").find_elements_by_tag_name("li")
for column in available_columns:
if (column.text == "Cleared Time"):
column.click()
break
driver.find_element_by_id("alarmsGrid_selectButtonAdd").click()
driver.find_element_by_xpath("//div[@id='alarmsGridColumnPopup']/div/div/div[2]/div").click()
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Find and click the reset button then wait for the alarm grid to update
driver.find_element_by_id("alarmsGridResetColumnsButton").click()
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the alarm columns and loop through them, check that there is no cleared time column, if there is fail the test.
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for column in alarm_columns:
self.assertNotEqual(column.text, "Cleared Time", "The Alarm grid columns did not reset.")
## helper methods ##
def change_panel_widths(self, web_driver):
# Wait for the splitter to be available and then store it.
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div[@id='splitter']/div[2]"))
)
except TimeoutException:
self.fail("The canvas divider did not load within " + str(self.config.mid_timeout) + " seconds")
divider_element = web_driver.find_element_by_xpath("//div[@id='splitter']/div[2]")
# Find the location of the divider horizontally, check that it isn't more then the max chosen to allow best viewing of the grid (309).
left_pos = int(divider_element.value_of_css_property("left").replace("px", ""))
if (left_pos > 309):
# Set up an action chain to emulate moving the mouse to the divider and offsetting it a bit.
actions = ActionChains(web_driver)
actions.move_to_element(divider_element)
actions.move_by_offset(0, 120)
actions.perform()
# Set up an action chain to emulate holding down on the mouse's location
actions = ActionChains(web_driver)
actions.click_and_hold()
actions.perform()
# loop through the necessary amount of pixels to get the divider to the intended location. On each iteration set up an action
# chain to emulate moving the mouse by -1 pixel. (I'm not sure why you can't just emulate the whole movement at once, but I
# tried and it wouldn't work, for some reason this does so I go with what works)
for index in range(0, left_pos - 309):
actions = ActionChains(web_driver)
actions.move_by_offset(-1, 0)
actions.perform()
# Set up an action chain to emulate releasing the mouse.
actions = ActionChains(web_driver)
actions.release()
actions.perform()
# Lastly check the position of the divider every second just to make sure it is in the right location before leaving the function.
for sec in range(0, self.config.mid_timeout):
left_pos = int(divider_element.value_of_css_property("left").replace("px", ""))
if (left_pos <= 309):
break
time.sleep(1)
def get_alarm_columns(self, web_driver):
# Wait for the column headers to load then store em.
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "columntablealarmsGrid"))
)
except TimeoutException:
self.fail("column headers did not load within " + str(self.config.mid_timeout) + " seconds")
column_header_container_element = web_driver.find_element_by_id("columntablealarmsGrid")
# Return a list of each column header
return column_header_container_element.find_elements_by_css_selector('[role="columnheader"]')
if __name__ == "__main__":
unittest.main()
|
__author__ = 'andrew.bascom'
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
import c2_test_case
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import TimeoutException
import unittest, time
class AlarmsMenuTest(c2_test_case.C2TestCase):
def test_uncheck_auto_resize_ensure_it_stays_checked_C10826(self):
#Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the auto resize checkbox to be displayed then store it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGrid_resizeCB"))
)
except TimeoutException:
self.fail("Alarm grid Auto Resize checkbox could not be found within the allotted " + str(self.config.mid_timeout) + " seconds")
auto_resize_button = driver.find_element_by_id("alarmsGrid_resizeCB")
# If the auto resize chackbox is selected click it to deselect it
if (auto_resize_button.is_selected() == True):
auto_resize_button.click()
# Wait till the divider is available and store it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div[@id='splitter']/div[2]"))
)
except TimeoutException:
self.fail("The canvas divider did not load within " + str(self.config.mid_timeout) + " seconds")
divider_element = driver.find_element_by_xpath("//div[@id='splitter']/div[2]")
# Emulate moving the mouse to the divider and move it off to the dragable part
actions = ActionChains(driver)
actions.move_to_element(divider_element)
actions.move_by_offset(0, 120)
actions.perform()
# Emulate clicking down on the divider
actions = ActionChains(driver)
actions.click_and_hold()
actions.perform()
# Emulate moving the mouse to the right 150 pixels
for index in range(0, 150):
actions = ActionChains(driver)
actions.move_by_offset(1, 0)
actions.perform()
# Emulate the mouse releasing the divider
actions = ActionChains(driver)
actions.release()
actions.perform()
# Wait for the collapsed options menu button to display and click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmMenu"))
)
except TimeoutException:
self.fail("alarm menu button not displaying in the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("alarmMenu").click()
# Wait for the auto resize button to be displayed and store it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGrid_resizeCB"))
)
except TimeoutException:
self.fail("Alarm grid auto resize checkbox did not display within the allotted " + str(self.config.mid_timeout) + " seconds")
auto_resize_button = driver.find_element_by_id("alarmsGrid_resizeCB")
# Check that the auto resize button is not selected if it is selected fail the test
self.assertEqual(auto_resize_button.is_selected(), False, "Auto Resize button state did not remain set as expected")
def test_select_columns_displays_dialog_C10218(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs, and click the select column button
AlarmsMenuTest.change_panel_widths(self, driver)
driver.find_element_by_id("alarmsGridColumnButton").click()
# Wait for the select column dialog to display and then click its close button
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
except TimeoutException:
self.fail("The Select Columns dialog did not load within the allotted " + str(self.config.mid_timeout) + " seconds.")
driver.find_element_by_xpath("//div[@id='alarmsGridColumnPopup']/div/div/div[2]/div").click()
def test_clicking_excel_export_generates_excel(self):
# Get the driver
driver = self.config.driver
# Get the number of windows currently open
num_windows_open = len(driver.window_handles)
# Move the divider to ensure best access to all buttons, tabs, and columns; Wait for the export excel button to display then click it
AlarmsMenuTest.change_panel_widths(self, driver)
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridExcelExportButton"))
)
except TimeoutException:
self.fail("Export button didn't unhide")
driver.find_element_by_id("alarmsGridExcelExportButton").click()
# Check every second for the number of windows open to become greater then the original number of windows open. After the max amount
# of time if the number of windows open didn't increase fail the test. (A window opens briefly when a download link is clicked)
for sec in range(0, self.config.long_timeout):
window_list = driver.window_handles
if (len(window_list) >= num_windows_open + 1):
break
elif (sec >= self.config.long_timeout - 1):
self.fail("Excel download page did not open within the allotted " + str(self.config.long_timeout) + " seconds")
time.sleep(1)
def test_select_active_alarms_tab(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the active tab to display and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-active-alarms"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-active-alarms"))
)
except TimeoutException:
self.fail("Active Alarms tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-active-alarms").click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Find the select columns button and click it
driver.find_element_by_id("alarmsGridColumnButton").click()
# Wait for the select columns dialog to display
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
except TimeoutException:
self.fail("The Select Columns dialog did not load within the allotted " + str(self.config.mid_timeout) + " seconds.")
# Get the list of available columns and loop through it to find the Cleared Time column then click it
available_columns = driver.find_element_by_id("alarmsGrid_availableColumnsList").find_elements_by_tag_name("li")
for column in available_columns:
if (column.text == "Cleared Time"):
column.click()
break
# Find the add column button and click it then close the select column dialog. Wait for the alarm grid to update.
driver.find_element_by_id("alarmsGrid_selectButtonAdd").click()
driver.find_element_by_xpath("//div[@id='alarmsGridColumnPopup']/div/div/div[2]/div").click()
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the alarm grid columns then loop through them
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_columns)):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
# find the Cleared Time column then loop 3 times, clicking the column header to sort
if (column.text == "Cleared Time"):
for num in range(0, 3):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
column.click()
# Wait for the alarm grid to update then ensure the alarm has no clear date, if it does fail the test
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
self.assertEqual(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[6]/div").text, "",
"Found alarm that is not Active, test failed!")
break
def test_select_alarm_history_tab_C10231(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the cleared tab to display and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-cleared-alarms"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-cleared-alarms"))
)
except TimeoutException:
self.fail("Alarm History tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-cleared-alarms").click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the alarm columns and loop through them
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_columns)):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
# Find the Cleared Time column then loop 3 times clicking the header to sort
if (column.text == "Cleared Time"):
for num in range(0, 3):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
column.click()
# Wait for the alarm grid to update then check that there is a date if not then fail the test.
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
self.assertNotEqual(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[6]/div").text, "",
"Found alarm that is not cleared, test failed!")
break
def test_select_all_alarms_tab_C10229(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the all alarms tab to display and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-all-alarms"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-all-alarms"))
)
except TimeoutException:
self.fail("All Alarms tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-all-alarms").click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the alarm columns and loop through them
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_columns)):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
# Find the Cleared Time column then loop 3 times clicking the header to sort
if (column.text == "Cleared Time"):
for num in range(0, 3):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
column.click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# If the sort is ascending then the first alarm should be without a cleared date, check this if failed fail the test. If
# the sort is descending then the first alarm should have a cleared date, check this if failed then fail the test.
if (index == 0):
self.assertEqual(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[6]/div").text, "",
"Found alarm that is not cleared, test failed!")
elif (index == 1):
self.assertNotEqual(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[6]/div").text, "",
"Found alarm that is not cleared, test failed!")
break
# This test case is unfinished since there is no way to distingish an ignored alarm from a regular one
def test_select_ignored_alarms_tab_C10232(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Wait for the ignored tab to display and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-ignored-alarms"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-ignored-alarms"))
)
except TimeoutException:
self.fail("Ignored Alarms tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-ignored-alarms").click()
# Wait for the alarm grid to update
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the columns from the alarm grid and loop through them
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_columns)):
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
column = alarm_columns[index]
# Find the severity column then break
if (column.text == "Severity"):
break
def test_select_custom_filters_tab_C10233(self):
# Get the driver
driver = self.config.driver
# Wait for the custom tab to load and then click it
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "tab-custom-filters"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "tab-custom-filters"))
)
except TimeoutException:
self.fail("Custom filters tab did not load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_id("tab-custom-filters").click()
# Wait for the custom filters dialog to display and then click its close button
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsFilter"))
)
except TimeoutException:
self.fail("Custom filters dialog didn't load within the allotted " + str(self.config.mid_timeout) + " seconds")
driver.find_element_by_xpath("//div[@id='alarmsFilter']/div/div/div[2]/div").click()
def test_reset_columns_button_C135244(self):
# Get the driver
driver = self.config.driver
# Move the divider to allow room for best viewing of the buttons, columns, and tabs
AlarmsMenuTest.change_panel_widths(self, driver)
# Find the select column button and click it then wait for the select column dialog to display
driver.find_element_by_id("alarmsGridColumnButton").click()
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
except TimeoutException:
self.fail("The Select Columns dialog did not load within the allotted " + str(self.config.mid_timeout) + " seconds.")
# Get the available columns and find the Cleared Time column click it and then click the add button. Close the select columns dialog
# and then wait for the alarm grid to update
available_columns = driver.find_element_by_id("alarmsGrid_availableColumnsList").find_elements_by_tag_name("li")
for column in available_columns:
if (column.text == "Cleared Time"):
column.click()
break
driver.find_element_by_id("alarmsGrid_selectButtonAdd").click()
driver.find_element_by_xpath("//div[@id='alarmsGridColumnPopup']/div/div/div[2]/div").click()
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Find and click the reset button then wait for the alarm grid to update
driver.find_element_by_id("alarmsGridResetColumnsButton").click()
try:
WebDriverWait(driver, self.config.mid_timeout).until(
expected_conditions.invisibility_of_element_located((By.XPATH, "//div[@id='alarmsGrid']/div/div"))
)
except TimeoutException:
self.fail("Alarm row failed to load within alotted " + str(self.config.mid_timeout) + " seconds")
# Get the alarm columns and loop through them, check that there is no cleared time column, if there is fail the test.
alarm_columns = AlarmsMenuTest.get_alarm_columns(self, driver)
for column in alarm_columns:
self.assertNotEqual(column.text, "Cleared Time", "The Alarm grid columns did not reset.")
## helper methods ##
def change_panel_widths(self, web_driver):
# Wait for the splitter to be available and then store it.
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div[@id='splitter']/div[2]"))
)
except TimeoutException:
self.fail("The canvas divider did not load within " + str(self.config.mid_timeout) + " seconds")
divider_element = web_driver.find_element_by_xpath("//div[@id='splitter']/div[2]")
# Find the location of the divider horizontally, check that it isn't more then the max chosen to allow best viewing of the grid (309).
left_pos = int(divider_element.value_of_css_property("left").replace("px", ""))
if (left_pos > 309):
# Set up an action chain to emulate moving the mouse to the divider and offsetting it a bit.
actions = ActionChains(web_driver)
actions.move_to_element(divider_element)
actions.move_by_offset(0, 120)
actions.perform()
# Set up an action chain to emulate holding down on the mouse's location
actions = ActionChains(web_driver)
actions.click_and_hold()
actions.perform()
# loop through the necessary amount of pixels to get the divider to the intended location. On each iteration set up an action
# chain to emulate moving the mouse by -1 pixel. (I'm not sure why you can't just emulate the whole movement at once, but I
# tried and it wouldn't work, for some reason this does so I go with what works)
for index in range(0, left_pos - 309):
actions = ActionChains(web_driver)
actions.move_by_offset(-1, 0)
actions.perform()
# Set up an action chain to emulate releasing the mouse.
actions = ActionChains(web_driver)
actions.release()
actions.perform()
# Lastly check the position of the divider every second just to make sure it is in the right location before leaving the function.
for sec in range(0, self.config.mid_timeout):
left_pos = int(divider_element.value_of_css_property("left").replace("px", ""))
if (left_pos <= 309):
break
time.sleep(1)
def get_alarm_columns(self, web_driver):
# Wait for the column headers to load then store em.
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "columntablealarmsGrid"))
)
except TimeoutException:
self.fail("column headers did not load within " + str(self.config.mid_timeout) + " seconds")
column_header_container_element = web_driver.find_element_by_id("columntablealarmsGrid")
# Return a list of each column header
return column_header_container_element.find_elements_by_css_selector('[role="columnheader"]')
if __name__ == "__main__":
unittest.main()
|
en
| 0.801883
|
# -*- coding: utf-8 -*- #Get the driver # Move the divider to allow room for best viewing of the buttons, columns, and tabs # Wait for the auto resize checkbox to be displayed then store it # If the auto resize chackbox is selected click it to deselect it # Wait till the divider is available and store it # Emulate moving the mouse to the divider and move it off to the dragable part # Emulate clicking down on the divider # Emulate moving the mouse to the right 150 pixels # Emulate the mouse releasing the divider # Wait for the collapsed options menu button to display and click it # Wait for the auto resize button to be displayed and store it # Check that the auto resize button is not selected if it is selected fail the test # Get the driver # Move the divider to allow room for best viewing of the buttons, columns, and tabs, and click the select column button # Wait for the select column dialog to display and then click its close button # Get the driver # Get the number of windows currently open # Move the divider to ensure best access to all buttons, tabs, and columns; Wait for the export excel button to display then click it # Check every second for the number of windows open to become greater then the original number of windows open. After the max amount # of time if the number of windows open didn't increase fail the test. (A window opens briefly when a download link is clicked) # Get the driver # Move the divider to allow room for best viewing of the buttons, columns, and tabs # Wait for the active tab to display and then click it # Wait for the alarm grid to update # Find the select columns button and click it # Wait for the select columns dialog to display # Get the list of available columns and loop through it to find the Cleared Time column then click it # Find the add column button and click it then close the select column dialog. Wait for the alarm grid to update. # Get the alarm grid columns then loop through them # find the Cleared Time column then loop 3 times, clicking the column header to sort # Wait for the alarm grid to update then ensure the alarm has no clear date, if it does fail the test # Get the driver # Move the divider to allow room for best viewing of the buttons, columns, and tabs # Wait for the cleared tab to display and then click it # Wait for the alarm grid to update # Get the alarm columns and loop through them # Find the Cleared Time column then loop 3 times clicking the header to sort # Wait for the alarm grid to update then check that there is a date if not then fail the test. # Get the driver # Move the divider to allow room for best viewing of the buttons, columns, and tabs # Wait for the all alarms tab to display and then click it # Wait for the alarm grid to update # Get the alarm columns and loop through them # Find the Cleared Time column then loop 3 times clicking the header to sort # Wait for the alarm grid to update # If the sort is ascending then the first alarm should be without a cleared date, check this if failed fail the test. If # the sort is descending then the first alarm should have a cleared date, check this if failed then fail the test. # This test case is unfinished since there is no way to distingish an ignored alarm from a regular one # Get the driver # Move the divider to allow room for best viewing of the buttons, columns, and tabs # Wait for the ignored tab to display and then click it # Wait for the alarm grid to update # Get the columns from the alarm grid and loop through them # Find the severity column then break # Get the driver # Wait for the custom tab to load and then click it # Wait for the custom filters dialog to display and then click its close button # Get the driver # Move the divider to allow room for best viewing of the buttons, columns, and tabs # Find the select column button and click it then wait for the select column dialog to display # Get the available columns and find the Cleared Time column click it and then click the add button. Close the select columns dialog # and then wait for the alarm grid to update # Find and click the reset button then wait for the alarm grid to update # Get the alarm columns and loop through them, check that there is no cleared time column, if there is fail the test. ## helper methods ## # Wait for the splitter to be available and then store it. # Find the location of the divider horizontally, check that it isn't more then the max chosen to allow best viewing of the grid (309). # Set up an action chain to emulate moving the mouse to the divider and offsetting it a bit. # Set up an action chain to emulate holding down on the mouse's location # loop through the necessary amount of pixels to get the divider to the intended location. On each iteration set up an action # chain to emulate moving the mouse by -1 pixel. (I'm not sure why you can't just emulate the whole movement at once, but I # tried and it wouldn't work, for some reason this does so I go with what works) # Set up an action chain to emulate releasing the mouse. # Lastly check the position of the divider every second just to make sure it is in the right location before leaving the function. # Wait for the column headers to load then store em. # Return a list of each column header
| 2.247168
| 2
|
mac_face_detection.py
|
MinxZ/iqiyi_ai
| 0
|
6627357
|
<reponame>MinxZ/iqiyi_ai<gh_stars>0
import glob
import multiprocessing
import os
import pickle
from collections import defaultdict
import cv2 as cv2
import face_recognition
import numpy as np
from tqdm import tqdm
from model import resizeAndPad
# %matplotlib inline
# %%
data_path = '../data/IQIYI_VID_DATA_Part1'
png_list = glob.glob(f'{data_path}/png_train/*.png')
with open(data_path + '/train.txt') as f:
contents = f.readlines()
id2file = defaultdict(list)
file2id = {}
for index, line in enumerate(contents):
file_path, id = line.rstrip('\n').split(' ')
id2file[int(id)].append(file_path)
file2id[file_path[-10:]] = int(id)
# %%
faces = []
ids = []
for index1, img_path in tqdm(enumerate(png_list)):
bgr_image = cv2.imread(img_path)
image = face_recognition.load_image_file(img_path)
face_locations = face_recognition.face_locations(image)
if not len(face_locations) == 0:
crop = face_locations[0]
y1, x1, y2, x2 = crop
bgr_image = bgr_image[y1:y2, x2:x1, :]
bgr_image = resizeAndPad(bgr_image, (224, 224))
cv2.imwrite(f'{data_path}/face_train/{img_path[-20:]}', bgr_image)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces.append(rgb_image)
id = file2id[img_path[-19:-9]]
ids.append(id)
faces = np.array(faces)
ids = np.array(ids)
np.save(f'../x_train', faces)
num_class = np.max(ids)
num_sample = ids.shape[0]
y = np.zeros((num_sample, num_class), dtype=np.int8)
for i in range(num_sample):
id = ids[i]
y[i, id - 1] = 1
np.save(f'../y_train', y)
# %%
val_png_list = glob.glob(f'{data_path}/png_val/*.png')
with open(data_path + '/val.txt') as f:
contents = f.readlines()
val_id2file = defaultdict(list)
val_file2id = {}
for index, line in enumerate(contents):
id_file_path = line.rstrip('\n').split(' ')
id = int(id_file_path[0])
val_id2file[id].extend(id_file_path[1:])
for index1, file_path in enumerate(id_file_path[1:]):
val_file2id[file_path[-10:]] = id
pickle.dump(val_file2id, open("../data/val_file2id.p", "wb"))
faces = []
val2face = defaultdict(list)
i = -1
for index, img_path in tqdm(enumerate(val_png_list)):
bgr_image = cv2.imread(img_path)
image = face_recognition.load_image_file(img_path)
face_locations = face_recognition.face_locations(image)
if not len(face_locations) == 0:
crop = face_locations[0]
y1, x1, y2, x2 = crop
bgr_image = bgr_image[y1:y2, x2:x1, :]
bgr_image = resizeAndPad(bgr_image, (224, 224))
faces.append(bgr_image)
i += 1
val2face[img_path[-19:-9]].append(i)
pickle.dump(val2face, open("../data/val2face.p", "wb"))
favorite_color = pickle.load(open("average_color_feature.p", "rb"))
faces = np.array(faces)
np.save(f'../data/x_val', faces)
num_sample = x_val.shape[0]
y_val = np.zeros((num_sample, 574), dtype=np.int8)
for file, faces in file2face.items():
id_true = file2id[file]
for i in faces:
y_val[i][id_true - 1] = 1
np.save('../data/y_val', y_val)
|
import glob
import multiprocessing
import os
import pickle
from collections import defaultdict
import cv2 as cv2
import face_recognition
import numpy as np
from tqdm import tqdm
from model import resizeAndPad
# %matplotlib inline
# %%
data_path = '../data/IQIYI_VID_DATA_Part1'
png_list = glob.glob(f'{data_path}/png_train/*.png')
with open(data_path + '/train.txt') as f:
contents = f.readlines()
id2file = defaultdict(list)
file2id = {}
for index, line in enumerate(contents):
file_path, id = line.rstrip('\n').split(' ')
id2file[int(id)].append(file_path)
file2id[file_path[-10:]] = int(id)
# %%
faces = []
ids = []
for index1, img_path in tqdm(enumerate(png_list)):
bgr_image = cv2.imread(img_path)
image = face_recognition.load_image_file(img_path)
face_locations = face_recognition.face_locations(image)
if not len(face_locations) == 0:
crop = face_locations[0]
y1, x1, y2, x2 = crop
bgr_image = bgr_image[y1:y2, x2:x1, :]
bgr_image = resizeAndPad(bgr_image, (224, 224))
cv2.imwrite(f'{data_path}/face_train/{img_path[-20:]}', bgr_image)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces.append(rgb_image)
id = file2id[img_path[-19:-9]]
ids.append(id)
faces = np.array(faces)
ids = np.array(ids)
np.save(f'../x_train', faces)
num_class = np.max(ids)
num_sample = ids.shape[0]
y = np.zeros((num_sample, num_class), dtype=np.int8)
for i in range(num_sample):
id = ids[i]
y[i, id - 1] = 1
np.save(f'../y_train', y)
# %%
val_png_list = glob.glob(f'{data_path}/png_val/*.png')
with open(data_path + '/val.txt') as f:
contents = f.readlines()
val_id2file = defaultdict(list)
val_file2id = {}
for index, line in enumerate(contents):
id_file_path = line.rstrip('\n').split(' ')
id = int(id_file_path[0])
val_id2file[id].extend(id_file_path[1:])
for index1, file_path in enumerate(id_file_path[1:]):
val_file2id[file_path[-10:]] = id
pickle.dump(val_file2id, open("../data/val_file2id.p", "wb"))
faces = []
val2face = defaultdict(list)
i = -1
for index, img_path in tqdm(enumerate(val_png_list)):
bgr_image = cv2.imread(img_path)
image = face_recognition.load_image_file(img_path)
face_locations = face_recognition.face_locations(image)
if not len(face_locations) == 0:
crop = face_locations[0]
y1, x1, y2, x2 = crop
bgr_image = bgr_image[y1:y2, x2:x1, :]
bgr_image = resizeAndPad(bgr_image, (224, 224))
faces.append(bgr_image)
i += 1
val2face[img_path[-19:-9]].append(i)
pickle.dump(val2face, open("../data/val2face.p", "wb"))
favorite_color = pickle.load(open("average_color_feature.p", "rb"))
faces = np.array(faces)
np.save(f'../data/x_val', faces)
num_sample = x_val.shape[0]
y_val = np.zeros((num_sample, 574), dtype=np.int8)
for file, faces in file2face.items():
id_true = file2id[file]
for i in faces:
y_val[i][id_true - 1] = 1
np.save('../data/y_val', y_val)
|
tr
| 0.120857
|
# %matplotlib inline # %% # %% # %%
| 2.473807
| 2
|
openks/models/pytorch/mmd_modules/ThreeDVG/scripts/ScanRefer_train.py
|
vivym/OpenKS
| 0
|
6627358
|
<filename>openks/models/pytorch/mmd_modules/ThreeDVG/scripts/ScanRefer_train.py<gh_stars>0
import os
import sys
import json
import h5py
import argparse
import importlib
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import pickle
from torch.utils.data import DataLoader
from datetime import datetime
from copy import deepcopy
sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder
from ..data.scannet.model_util_scannet import ScannetDatasetConfig
from ..lib.dataset import ScannetReferenceDataset
from ..lib.solver import Solver
from ..lib.config import CONF
from ..models.refnet import RefNet
from ..scripts.utils.AdamW import AdamW
from ..scripts.utils.script_utils import set_params_lr_dict
SCANREFER_TRAIN = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_train.json")))
SCANREFER_VAL = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_val.json")))
# constants
DC = ScannetDatasetConfig()
def get_dataloader(args, scanrefer, scanrefer_new, all_scene_list, split, config, augment, shuffle=True):
dataset = ScannetReferenceDataset(
scanrefer=scanrefer[split],
scanrefer_new=scanrefer_new[split],
scanrefer_all_scene=all_scene_list,
split=split,
num_points=args.num_points,
use_height=(not args.no_height),
use_color=args.use_color,
use_normal=args.use_normal,
use_multiview=args.use_multiview,
lang_num_max=args.lang_num_max,
augment=augment,
shuffle=shuffle
)
# dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=shuffle, num_workers=4)
return dataset, dataloader
def get_model(args):
# initiate model
input_channels = int(args.use_multiview) * 128 + int(args.use_normal) * 3 + int(args.use_color) * 3 + int(not args.no_height)
model = RefNet(
num_class=DC.num_class,
num_heading_bin=DC.num_heading_bin,
num_size_cluster=DC.num_size_cluster,
mean_size_arr=DC.mean_size_arr,
input_feature_dim=input_channels,
num_proposal=args.num_proposals,
use_lang_classifier=(not args.no_lang_cls),
use_bidir=args.use_bidir,
no_reference=args.no_reference,
dataset_config=DC
)
# trainable model
if args.use_pretrained:
# load model
print("loading pretrained VoteNet...")
pretrained_model = RefNet(
num_class=DC.num_class,
num_heading_bin=DC.num_heading_bin,
num_size_cluster=DC.num_size_cluster,
mean_size_arr=DC.mean_size_arr,
num_proposal=args.num_proposals,
input_feature_dim=input_channels,
use_bidir=args.use_bidir,
no_reference=True,
dataset_config=DC
)
pretrained_path = os.path.join(CONF.PATH.OUTPUT, args.use_pretrained, "model_last.pth")
pretrained_model.load_state_dict(torch.load(pretrained_path), strict=False)
# mount
model.backbone_net = pretrained_model.backbone_net
model.vgen = pretrained_model.vgen
model.proposal = pretrained_model.proposal
if args.no_detection:
# freeze pointnet++ backbone
for param in model.backbone_net.parameters():
param.requires_grad = False
# freeze voting
for param in model.vgen.parameters():
param.requires_grad = False
# freeze detector
for param in model.proposal.parameters():
param.requires_grad = False
# to CUDA
model = model.cuda()
return model
def get_num_params(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
num_params = int(sum([np.prod(p.size()) for p in model_parameters]))
return num_params
def get_solver(args, dataloader):
model = get_model(args)
# TODO
weight_dict = {
'detr': {'lr': 0.0001},
'lang': {'lr': 0.0005},
'match': {'lr': 0.0005},
}
params = set_params_lr_dict(model, base_lr=args.lr, weight_decay=args.wd, weight_dict=weight_dict)
# params = model.parameters()
optimizer = AdamW(params, lr=args.lr, weight_decay=args.wd, amsgrad=args.amsgrad)
if args.use_checkpoint:
print("loading checkpoint {}...".format(args.use_checkpoint))
stamp = args.use_checkpoint
root = os.path.join(CONF.PATH.OUTPUT, stamp)
checkpoint = torch.load(os.path.join(CONF.PATH.OUTPUT, args.use_checkpoint, "checkpoint.tar"))
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
else:
stamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if args.tag: stamp += "_"+args.tag.upper()
root = os.path.join(CONF.PATH.OUTPUT, stamp)
os.makedirs(root, exist_ok=True)
# scheduler parameters for training solely the detection pipeline
LR_DECAY_STEP = [80, 120, 160] if args.no_reference else None
if args.coslr:
LR_DECAY_STEP = {
'type': 'cosine',
'T_max': args.epoch,
'eta_min': 1e-5,
}
LR_DECAY_RATE = 0.1 if args.no_reference else None
BN_DECAY_STEP = 20 if args.no_reference else None
BN_DECAY_RATE = 0.5 if args.no_reference else None
print('LR&BN_DECAY', LR_DECAY_STEP, LR_DECAY_RATE, BN_DECAY_STEP, BN_DECAY_RATE, flush=True)
solver = Solver(
model=model,
config=DC,
dataloader=dataloader,
optimizer=optimizer,
stamp=stamp,
val_step=args.val_step,
detection=not args.no_detection,
reference=not args.no_reference,
use_lang_classifier=not args.no_lang_cls,
lr_decay_step=LR_DECAY_STEP,
lr_decay_rate=LR_DECAY_RATE,
bn_decay_step=BN_DECAY_STEP,
bn_decay_rate=BN_DECAY_RATE
)
num_params = get_num_params(model)
return solver, num_params, root
def save_info(args, root, num_params, train_dataset, val_dataset):
info = {}
for key, value in vars(args).items():
info[key] = value
info["num_train"] = len(train_dataset)
info["num_val"] = len(val_dataset)
info["num_train_scenes"] = len(train_dataset.scene_list)
info["num_val_scenes"] = len(val_dataset.scene_list)
info["num_params"] = num_params
with open(os.path.join(root, "info.json"), "w") as f:
json.dump(info, f, indent=4)
def get_scannet_scene_list(split):
scene_list = sorted(
[line.rstrip() for line in open(os.path.join(CONF.PATH.SCANNET_META, "scannetv2_{}.txt".format(split)))])
return scene_list
def get_scanrefer(scanrefer_train, scanrefer_val, num_scenes, lang_num_max):
if args.no_reference:
train_scene_list = get_scannet_scene_list("train")
new_scanrefer_train = []
for scene_id in train_scene_list:
data = deepcopy(SCANREFER_TRAIN[0])
data["scene_id"] = scene_id
new_scanrefer_train.append(data)
val_scene_list = get_scannet_scene_list("val")
new_scanrefer_val = []
for scene_id in val_scene_list:
data = deepcopy(SCANREFER_VAL[0])
data["scene_id"] = scene_id
new_scanrefer_val.append(data)
else:
# get initial scene list
train_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer_train])))
val_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer_val])))
if num_scenes == -1:
num_scenes = len(train_scene_list)
else:
assert len(train_scene_list) >= num_scenes
# slice train_scene_list
train_scene_list = train_scene_list[:num_scenes]
# filter data in chosen scenes
new_scanrefer_train = []
scanrefer_train_new = []
scanrefer_train_new_scene = []
scene_id = ""
for data in scanrefer_train:
if data["scene_id"] in train_scene_list:
new_scanrefer_train.append(data)
if scene_id != data["scene_id"]:
scene_id = data["scene_id"]
if len(scanrefer_train_new_scene) > 0:
scanrefer_train_new.append(scanrefer_train_new_scene)
scanrefer_train_new_scene = []
if len(scanrefer_train_new_scene) >= lang_num_max:
scanrefer_train_new.append(scanrefer_train_new_scene)
scanrefer_train_new_scene = []
scanrefer_train_new_scene.append(data)
"""
if data["scene_id"] not in scanrefer_train_new:
scanrefer_train_new[data["scene_id"]] = []
scanrefer_train_new[data["scene_id"]].append(data)
"""
scanrefer_train_new.append(scanrefer_train_new_scene)
new_scanrefer_val = scanrefer_val
scanrefer_val_new = []
scanrefer_val_new_scene = []
scene_id = ""
for data in scanrefer_val:
# if data["scene_id"] not in scanrefer_val_new:
# scanrefer_val_new[data["scene_id"]] = []
# scanrefer_val_new[data["scene_id"]].append(data)
if scene_id != data["scene_id"]:
scene_id = data["scene_id"]
if len(scanrefer_val_new_scene) > 0:
scanrefer_val_new.append(scanrefer_val_new_scene)
scanrefer_val_new_scene = []
if len(scanrefer_val_new_scene) >= lang_num_max:
scanrefer_val_new.append(scanrefer_val_new_scene)
scanrefer_val_new_scene = []
scanrefer_val_new_scene.append(data)
scanrefer_val_new.append(scanrefer_val_new_scene)
print("scanrefer_train_new", len(scanrefer_train_new), len(scanrefer_val_new), len(scanrefer_train_new[0]))
sum = 0
for i in range(len(scanrefer_train_new)):
sum += len(scanrefer_train_new[i])
# print(len(scanrefer_train_new[i]))
# for i in range(len(scanrefer_val_new)):
# print(len(scanrefer_val_new[i]))
print("sum", sum) # 1418 363
# all scanrefer scene
all_scene_list = train_scene_list + val_scene_list
print("train on {} samples and val on {} samples".format(len(new_scanrefer_train), len(new_scanrefer_val)))
return new_scanrefer_train, new_scanrefer_val, all_scene_list, scanrefer_train_new, scanrefer_val_new
def train(args):
# init training dataset
print("preparing data...")
scanrefer_train, scanrefer_val, all_scene_list, scanrefer_train_new, scanrefer_val_new = get_scanrefer(
SCANREFER_TRAIN, SCANREFER_VAL, args.num_scenes, args.lang_num_max)
scanrefer = {
"train": scanrefer_train,
"val": scanrefer_val
}
scanrefer_new = {
"train": scanrefer_train_new,
"val": scanrefer_val_new
}
# dataloader
train_dataset, train_dataloader = get_dataloader(args, scanrefer, scanrefer_new, all_scene_list, "train", DC, augment=True)
val_dataset, val_dataloader = get_dataloader(args, scanrefer, scanrefer_new, all_scene_list, "val", DC, augment=False)
dataloader = {
"train": train_dataloader,
"val": val_dataloader
}
print("initializing...")
solver, num_params, root = get_solver(args, dataloader)
print("Start training...\n")
save_info(args, root, num_params, train_dataset, val_dataset)
solver(args.epoch, args.verbose)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tag", type=str, help="tag for the training, e.g. cuda_wl", default="")
parser.add_argument("--gpu", type=str, help="gpu", default="0")
parser.add_argument("--batch_size", type=int, help="batch size", default=14)
parser.add_argument("--epoch", type=int, help="number of epochs", default=50)
parser.add_argument("--verbose", type=int, help="iterations of showing verbose", default=10)
parser.add_argument("--val_step", type=int, help="iterations of validating", default=5000)
parser.add_argument("--lr", type=float, help="learning rate", default=1e-3)
parser.add_argument("--wd", type=float, help="weight decay", default=1e-5)
parser.add_argument("--lang_num_max", type=int, help="lang num max", default=32)
parser.add_argument("--num_points", type=int, default=40000, help="Point Number [default: 40000]")
parser.add_argument("--num_proposals", type=int, default=256, help="Proposal number [default: 256]")
parser.add_argument("--num_scenes", type=int, default=-1, help="Number of scenes [default: -1]")
parser.add_argument("--seed", type=int, default=42, help="random seed")
parser.add_argument("--coslr", action='store_true', help="cosine learning rate")
parser.add_argument("--amsgrad", action='store_true', help="optimizer with amsgrad")
parser.add_argument("--no_height", action="store_true", help="Do NOT use height signal in input.")
parser.add_argument("--no_augment", action="store_true", help="Do NOT use augment on trainingset (not used)")
parser.add_argument("--no_lang_cls", action="store_true", help="Do NOT use language classifier.")
parser.add_argument("--no_detection", action="store_true", help="Do NOT train the detection module.")
parser.add_argument("--no_reference", action="store_true", help="Do NOT train the localization module.")
parser.add_argument("--use_color", action="store_true", help="Use RGB color in input.")
parser.add_argument("--use_normal", action="store_true", help="Use RGB color in input.")
parser.add_argument("--use_multiview", action="store_true", help="Use multiview images.")
parser.add_argument("--use_bidir", action="store_true", help="Use bi-directional GRU.")
parser.add_argument("--use_pretrained", type=str,
help="Specify the folder name containing the pretrained detection module.")
parser.add_argument("--use_checkpoint", type=str, help="Specify the checkpoint root", default="")
args = parser.parse_args()
# setting
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
# reproducibility
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
train(args)
|
<filename>openks/models/pytorch/mmd_modules/ThreeDVG/scripts/ScanRefer_train.py<gh_stars>0
import os
import sys
import json
import h5py
import argparse
import importlib
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import pickle
from torch.utils.data import DataLoader
from datetime import datetime
from copy import deepcopy
sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder
from ..data.scannet.model_util_scannet import ScannetDatasetConfig
from ..lib.dataset import ScannetReferenceDataset
from ..lib.solver import Solver
from ..lib.config import CONF
from ..models.refnet import RefNet
from ..scripts.utils.AdamW import AdamW
from ..scripts.utils.script_utils import set_params_lr_dict
SCANREFER_TRAIN = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_train.json")))
SCANREFER_VAL = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_val.json")))
# constants
DC = ScannetDatasetConfig()
def get_dataloader(args, scanrefer, scanrefer_new, all_scene_list, split, config, augment, shuffle=True):
dataset = ScannetReferenceDataset(
scanrefer=scanrefer[split],
scanrefer_new=scanrefer_new[split],
scanrefer_all_scene=all_scene_list,
split=split,
num_points=args.num_points,
use_height=(not args.no_height),
use_color=args.use_color,
use_normal=args.use_normal,
use_multiview=args.use_multiview,
lang_num_max=args.lang_num_max,
augment=augment,
shuffle=shuffle
)
# dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=shuffle, num_workers=4)
return dataset, dataloader
def get_model(args):
# initiate model
input_channels = int(args.use_multiview) * 128 + int(args.use_normal) * 3 + int(args.use_color) * 3 + int(not args.no_height)
model = RefNet(
num_class=DC.num_class,
num_heading_bin=DC.num_heading_bin,
num_size_cluster=DC.num_size_cluster,
mean_size_arr=DC.mean_size_arr,
input_feature_dim=input_channels,
num_proposal=args.num_proposals,
use_lang_classifier=(not args.no_lang_cls),
use_bidir=args.use_bidir,
no_reference=args.no_reference,
dataset_config=DC
)
# trainable model
if args.use_pretrained:
# load model
print("loading pretrained VoteNet...")
pretrained_model = RefNet(
num_class=DC.num_class,
num_heading_bin=DC.num_heading_bin,
num_size_cluster=DC.num_size_cluster,
mean_size_arr=DC.mean_size_arr,
num_proposal=args.num_proposals,
input_feature_dim=input_channels,
use_bidir=args.use_bidir,
no_reference=True,
dataset_config=DC
)
pretrained_path = os.path.join(CONF.PATH.OUTPUT, args.use_pretrained, "model_last.pth")
pretrained_model.load_state_dict(torch.load(pretrained_path), strict=False)
# mount
model.backbone_net = pretrained_model.backbone_net
model.vgen = pretrained_model.vgen
model.proposal = pretrained_model.proposal
if args.no_detection:
# freeze pointnet++ backbone
for param in model.backbone_net.parameters():
param.requires_grad = False
# freeze voting
for param in model.vgen.parameters():
param.requires_grad = False
# freeze detector
for param in model.proposal.parameters():
param.requires_grad = False
# to CUDA
model = model.cuda()
return model
def get_num_params(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
num_params = int(sum([np.prod(p.size()) for p in model_parameters]))
return num_params
def get_solver(args, dataloader):
model = get_model(args)
# TODO
weight_dict = {
'detr': {'lr': 0.0001},
'lang': {'lr': 0.0005},
'match': {'lr': 0.0005},
}
params = set_params_lr_dict(model, base_lr=args.lr, weight_decay=args.wd, weight_dict=weight_dict)
# params = model.parameters()
optimizer = AdamW(params, lr=args.lr, weight_decay=args.wd, amsgrad=args.amsgrad)
if args.use_checkpoint:
print("loading checkpoint {}...".format(args.use_checkpoint))
stamp = args.use_checkpoint
root = os.path.join(CONF.PATH.OUTPUT, stamp)
checkpoint = torch.load(os.path.join(CONF.PATH.OUTPUT, args.use_checkpoint, "checkpoint.tar"))
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
else:
stamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if args.tag: stamp += "_"+args.tag.upper()
root = os.path.join(CONF.PATH.OUTPUT, stamp)
os.makedirs(root, exist_ok=True)
# scheduler parameters for training solely the detection pipeline
LR_DECAY_STEP = [80, 120, 160] if args.no_reference else None
if args.coslr:
LR_DECAY_STEP = {
'type': 'cosine',
'T_max': args.epoch,
'eta_min': 1e-5,
}
LR_DECAY_RATE = 0.1 if args.no_reference else None
BN_DECAY_STEP = 20 if args.no_reference else None
BN_DECAY_RATE = 0.5 if args.no_reference else None
print('LR&BN_DECAY', LR_DECAY_STEP, LR_DECAY_RATE, BN_DECAY_STEP, BN_DECAY_RATE, flush=True)
solver = Solver(
model=model,
config=DC,
dataloader=dataloader,
optimizer=optimizer,
stamp=stamp,
val_step=args.val_step,
detection=not args.no_detection,
reference=not args.no_reference,
use_lang_classifier=not args.no_lang_cls,
lr_decay_step=LR_DECAY_STEP,
lr_decay_rate=LR_DECAY_RATE,
bn_decay_step=BN_DECAY_STEP,
bn_decay_rate=BN_DECAY_RATE
)
num_params = get_num_params(model)
return solver, num_params, root
def save_info(args, root, num_params, train_dataset, val_dataset):
info = {}
for key, value in vars(args).items():
info[key] = value
info["num_train"] = len(train_dataset)
info["num_val"] = len(val_dataset)
info["num_train_scenes"] = len(train_dataset.scene_list)
info["num_val_scenes"] = len(val_dataset.scene_list)
info["num_params"] = num_params
with open(os.path.join(root, "info.json"), "w") as f:
json.dump(info, f, indent=4)
def get_scannet_scene_list(split):
scene_list = sorted(
[line.rstrip() for line in open(os.path.join(CONF.PATH.SCANNET_META, "scannetv2_{}.txt".format(split)))])
return scene_list
def get_scanrefer(scanrefer_train, scanrefer_val, num_scenes, lang_num_max):
if args.no_reference:
train_scene_list = get_scannet_scene_list("train")
new_scanrefer_train = []
for scene_id in train_scene_list:
data = deepcopy(SCANREFER_TRAIN[0])
data["scene_id"] = scene_id
new_scanrefer_train.append(data)
val_scene_list = get_scannet_scene_list("val")
new_scanrefer_val = []
for scene_id in val_scene_list:
data = deepcopy(SCANREFER_VAL[0])
data["scene_id"] = scene_id
new_scanrefer_val.append(data)
else:
# get initial scene list
train_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer_train])))
val_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer_val])))
if num_scenes == -1:
num_scenes = len(train_scene_list)
else:
assert len(train_scene_list) >= num_scenes
# slice train_scene_list
train_scene_list = train_scene_list[:num_scenes]
# filter data in chosen scenes
new_scanrefer_train = []
scanrefer_train_new = []
scanrefer_train_new_scene = []
scene_id = ""
for data in scanrefer_train:
if data["scene_id"] in train_scene_list:
new_scanrefer_train.append(data)
if scene_id != data["scene_id"]:
scene_id = data["scene_id"]
if len(scanrefer_train_new_scene) > 0:
scanrefer_train_new.append(scanrefer_train_new_scene)
scanrefer_train_new_scene = []
if len(scanrefer_train_new_scene) >= lang_num_max:
scanrefer_train_new.append(scanrefer_train_new_scene)
scanrefer_train_new_scene = []
scanrefer_train_new_scene.append(data)
"""
if data["scene_id"] not in scanrefer_train_new:
scanrefer_train_new[data["scene_id"]] = []
scanrefer_train_new[data["scene_id"]].append(data)
"""
scanrefer_train_new.append(scanrefer_train_new_scene)
new_scanrefer_val = scanrefer_val
scanrefer_val_new = []
scanrefer_val_new_scene = []
scene_id = ""
for data in scanrefer_val:
# if data["scene_id"] not in scanrefer_val_new:
# scanrefer_val_new[data["scene_id"]] = []
# scanrefer_val_new[data["scene_id"]].append(data)
if scene_id != data["scene_id"]:
scene_id = data["scene_id"]
if len(scanrefer_val_new_scene) > 0:
scanrefer_val_new.append(scanrefer_val_new_scene)
scanrefer_val_new_scene = []
if len(scanrefer_val_new_scene) >= lang_num_max:
scanrefer_val_new.append(scanrefer_val_new_scene)
scanrefer_val_new_scene = []
scanrefer_val_new_scene.append(data)
scanrefer_val_new.append(scanrefer_val_new_scene)
print("scanrefer_train_new", len(scanrefer_train_new), len(scanrefer_val_new), len(scanrefer_train_new[0]))
sum = 0
for i in range(len(scanrefer_train_new)):
sum += len(scanrefer_train_new[i])
# print(len(scanrefer_train_new[i]))
# for i in range(len(scanrefer_val_new)):
# print(len(scanrefer_val_new[i]))
print("sum", sum) # 1418 363
# all scanrefer scene
all_scene_list = train_scene_list + val_scene_list
print("train on {} samples and val on {} samples".format(len(new_scanrefer_train), len(new_scanrefer_val)))
return new_scanrefer_train, new_scanrefer_val, all_scene_list, scanrefer_train_new, scanrefer_val_new
def train(args):
# init training dataset
print("preparing data...")
scanrefer_train, scanrefer_val, all_scene_list, scanrefer_train_new, scanrefer_val_new = get_scanrefer(
SCANREFER_TRAIN, SCANREFER_VAL, args.num_scenes, args.lang_num_max)
scanrefer = {
"train": scanrefer_train,
"val": scanrefer_val
}
scanrefer_new = {
"train": scanrefer_train_new,
"val": scanrefer_val_new
}
# dataloader
train_dataset, train_dataloader = get_dataloader(args, scanrefer, scanrefer_new, all_scene_list, "train", DC, augment=True)
val_dataset, val_dataloader = get_dataloader(args, scanrefer, scanrefer_new, all_scene_list, "val", DC, augment=False)
dataloader = {
"train": train_dataloader,
"val": val_dataloader
}
print("initializing...")
solver, num_params, root = get_solver(args, dataloader)
print("Start training...\n")
save_info(args, root, num_params, train_dataset, val_dataset)
solver(args.epoch, args.verbose)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tag", type=str, help="tag for the training, e.g. cuda_wl", default="")
parser.add_argument("--gpu", type=str, help="gpu", default="0")
parser.add_argument("--batch_size", type=int, help="batch size", default=14)
parser.add_argument("--epoch", type=int, help="number of epochs", default=50)
parser.add_argument("--verbose", type=int, help="iterations of showing verbose", default=10)
parser.add_argument("--val_step", type=int, help="iterations of validating", default=5000)
parser.add_argument("--lr", type=float, help="learning rate", default=1e-3)
parser.add_argument("--wd", type=float, help="weight decay", default=1e-5)
parser.add_argument("--lang_num_max", type=int, help="lang num max", default=32)
parser.add_argument("--num_points", type=int, default=40000, help="Point Number [default: 40000]")
parser.add_argument("--num_proposals", type=int, default=256, help="Proposal number [default: 256]")
parser.add_argument("--num_scenes", type=int, default=-1, help="Number of scenes [default: -1]")
parser.add_argument("--seed", type=int, default=42, help="random seed")
parser.add_argument("--coslr", action='store_true', help="cosine learning rate")
parser.add_argument("--amsgrad", action='store_true', help="optimizer with amsgrad")
parser.add_argument("--no_height", action="store_true", help="Do NOT use height signal in input.")
parser.add_argument("--no_augment", action="store_true", help="Do NOT use augment on trainingset (not used)")
parser.add_argument("--no_lang_cls", action="store_true", help="Do NOT use language classifier.")
parser.add_argument("--no_detection", action="store_true", help="Do NOT train the detection module.")
parser.add_argument("--no_reference", action="store_true", help="Do NOT train the localization module.")
parser.add_argument("--use_color", action="store_true", help="Use RGB color in input.")
parser.add_argument("--use_normal", action="store_true", help="Use RGB color in input.")
parser.add_argument("--use_multiview", action="store_true", help="Use multiview images.")
parser.add_argument("--use_bidir", action="store_true", help="Use bi-directional GRU.")
parser.add_argument("--use_pretrained", type=str,
help="Specify the folder name containing the pretrained detection module.")
parser.add_argument("--use_checkpoint", type=str, help="Specify the checkpoint root", default="")
args = parser.parse_args()
# setting
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
# reproducibility
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
train(args)
|
en
| 0.428367
|
# HACK add the root folder # constants # dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True) # initiate model # trainable model # load model # mount # freeze pointnet++ backbone # freeze voting # freeze detector # to CUDA # TODO # params = model.parameters() # scheduler parameters for training solely the detection pipeline # get initial scene list # slice train_scene_list # filter data in chosen scenes if data["scene_id"] not in scanrefer_train_new: scanrefer_train_new[data["scene_id"]] = [] scanrefer_train_new[data["scene_id"]].append(data) # if data["scene_id"] not in scanrefer_val_new: # scanrefer_val_new[data["scene_id"]] = [] # scanrefer_val_new[data["scene_id"]].append(data) # print(len(scanrefer_train_new[i])) # for i in range(len(scanrefer_val_new)): # print(len(scanrefer_val_new[i])) # 1418 363 # all scanrefer scene # init training dataset # dataloader # setting # reproducibility
| 1.832826
| 2
|
src/api/bkuser_core/categories/constants.py
|
Canway-shiisa/bk-user
| 0
|
6627359
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from datetime import timedelta
from enum import auto
from django.utils.translation import ugettext_lazy as _
from bkuser_core.common.enum import AutoLowerEnum
TIMEOUT_THRESHOLD = timedelta(hours=1)
class CategoryStatus(AutoLowerEnum):
NORMAL = auto()
INACTIVE = auto()
_choices_labels = (
(NORMAL, _("正常")),
(INACTIVE, _("停用")),
)
class CategoryType(AutoLowerEnum):
"""目录类型
TODO: 当目录和数据源解耦完成吼,这里的类型实际上就应该去除
"""
LOCAL = auto()
MAD = auto()
LDAP = auto()
CUSTOM = auto()
# 特殊的类型,仅在未解耦前桥接
PLUGGABLE = auto()
_choices_labels = (
(LOCAL, _("本地目录")),
(MAD, _("Microsoft Active Directory")),
(LDAP, _("OpenLDAP")),
(CUSTOM, "自定义目录"),
(PLUGGABLE, "可插拔目录"),
)
@classmethod
def get_description(cls, value: "CategoryType"):
_map = {
cls.LOCAL: _("本地支持用户的新增、删除、编辑、查询,以及用户的登录认证。"),
cls.MAD: _("支持对接 Microsoft Active Directory,将用户信息同步到本地或者直接通过接口完成用户登录验证。"),
cls.LDAP: _("支持对接 OpenLDAP,将用户信息同步到本地或者直接通过接口完成用户登录验证。"),
cls.CUSTOM: _("支持对接任意符合自定义数据拉取协议的用户系统。"),
cls.LOCAL.value: _("本地支持用户的新增、删除、编辑、查询,以及用户的登录认证。"),
cls.MAD.value: _("支持对接 Microsoft Active Directory,将用户信息同步到本地或者直接通过接口完成用户登录验证。"),
cls.LDAP.value: _("支持对接 OpenLDAP,将用户信息同步到本地或者直接通过接口完成用户登录验证。"),
cls.CUSTOM.value: _("支持对接任意符合自定义数据拉取协议的用户系统。"),
}
return _map[value]
class SyncStep(AutoLowerEnum):
USERS = auto()
DEPARTMENTS = auto()
USERS_RELATIONSHIP = auto()
DEPT_USER_RELATIONSHIP = auto()
_choices_labels = (
(USERS, _("用户数据更新")),
(DEPARTMENTS, _("组织数据更新")),
(USERS_RELATIONSHIP, _("用户间关系数据更新")),
(DEPT_USER_RELATIONSHIP, _("用户和组织关系数据更新")),
)
class SyncTaskType(AutoLowerEnum):
MANUAL = auto()
AUTO = auto()
_choices_labels = ((MANUAL, _("手动导入")), (AUTO, _("定时同步")))
class SyncTaskStatus(AutoLowerEnum):
SUCCESSFUL = auto()
FAILED = auto()
RUNNING = auto()
RETRYING = auto()
_choices_labels = ((SUCCESSFUL, _("成功")), (FAILED, _("失败")), (RUNNING, _("同步中")), (RETRYING, _("失败重试中")))
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from datetime import timedelta
from enum import auto
from django.utils.translation import ugettext_lazy as _
from bkuser_core.common.enum import AutoLowerEnum
TIMEOUT_THRESHOLD = timedelta(hours=1)
class CategoryStatus(AutoLowerEnum):
NORMAL = auto()
INACTIVE = auto()
_choices_labels = (
(NORMAL, _("正常")),
(INACTIVE, _("停用")),
)
class CategoryType(AutoLowerEnum):
"""目录类型
TODO: 当目录和数据源解耦完成吼,这里的类型实际上就应该去除
"""
LOCAL = auto()
MAD = auto()
LDAP = auto()
CUSTOM = auto()
# 特殊的类型,仅在未解耦前桥接
PLUGGABLE = auto()
_choices_labels = (
(LOCAL, _("本地目录")),
(MAD, _("Microsoft Active Directory")),
(LDAP, _("OpenLDAP")),
(CUSTOM, "自定义目录"),
(PLUGGABLE, "可插拔目录"),
)
@classmethod
def get_description(cls, value: "CategoryType"):
_map = {
cls.LOCAL: _("本地支持用户的新增、删除、编辑、查询,以及用户的登录认证。"),
cls.MAD: _("支持对接 Microsoft Active Directory,将用户信息同步到本地或者直接通过接口完成用户登录验证。"),
cls.LDAP: _("支持对接 OpenLDAP,将用户信息同步到本地或者直接通过接口完成用户登录验证。"),
cls.CUSTOM: _("支持对接任意符合自定义数据拉取协议的用户系统。"),
cls.LOCAL.value: _("本地支持用户的新增、删除、编辑、查询,以及用户的登录认证。"),
cls.MAD.value: _("支持对接 Microsoft Active Directory,将用户信息同步到本地或者直接通过接口完成用户登录验证。"),
cls.LDAP.value: _("支持对接 OpenLDAP,将用户信息同步到本地或者直接通过接口完成用户登录验证。"),
cls.CUSTOM.value: _("支持对接任意符合自定义数据拉取协议的用户系统。"),
}
return _map[value]
class SyncStep(AutoLowerEnum):
USERS = auto()
DEPARTMENTS = auto()
USERS_RELATIONSHIP = auto()
DEPT_USER_RELATIONSHIP = auto()
_choices_labels = (
(USERS, _("用户数据更新")),
(DEPARTMENTS, _("组织数据更新")),
(USERS_RELATIONSHIP, _("用户间关系数据更新")),
(DEPT_USER_RELATIONSHIP, _("用户和组织关系数据更新")),
)
class SyncTaskType(AutoLowerEnum):
MANUAL = auto()
AUTO = auto()
_choices_labels = ((MANUAL, _("手动导入")), (AUTO, _("定时同步")))
class SyncTaskStatus(AutoLowerEnum):
SUCCESSFUL = auto()
FAILED = auto()
RUNNING = auto()
RETRYING = auto()
_choices_labels = ((SUCCESSFUL, _("成功")), (FAILED, _("失败")), (RUNNING, _("同步中")), (RETRYING, _("失败重试中")))
|
en
| 0.798374
|
# -*- coding: utf-8 -*- TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 目录类型 TODO: 当目录和数据源解耦完成吼,这里的类型实际上就应该去除 # 特殊的类型,仅在未解耦前桥接
| 1.69763
| 2
|
recipes/Python/577612_Seven_Bit_Colored_Analogue_Bar_Graph_Generator/recipe-577612.py
|
tdiprima/code
| 2,023
|
6627360
|
# SevenBitBargraph2x.py
#
# A DEMO 7 bit analogue bargraph generator in colour for STANDARD Python 2.6.x and Linux...
#
# (Original copyright, (C)2010, B.Walker, G0LCU.)
# A Python 3.x version can be found here:-
# http://www.linuxformat.com/forums/viewtopic.php?t=13443
#
# Saved as SevenBitBargraph2x.py wherever you like.
#
# This DEMO goes from safe green, to warning amber, to danger red, with a crirical
# error beep above 120 on the horizontal scale...
#
# Two system commands are required, "clear" and "setterm", for this to work.
# I assume that these are available on all recent and current Linux distros.
# The device /dev/audio is used so this must be free also.
#
# It is useful for quick glance readings from say an 8 bit ADC used as a simple
# voltmeter, etc. Getting a digital readout is SO simple I left it out this time...
#
# To run use the following from inside a Python prompt...
# >>> exec(open("/full/path/to/code/SevenBitBargraph2x.py").read())
# OR...
# >>> execfile("/full/path/to/code/SevenBitBargraph2x.py").read()
# Tested on Debian 6.0.0 with Python 2.6.6.
# Add the required imports for this DEMO.
import os
import random
import time
def main():
# For this DEMO set up variables as global...
global column
global count
global bargraph
column=0
count=2
bargraph="(C)2010, B.Walker, G0CLU. Now Public Domain"
# Disable the cursor as it looks much nicer... ;o)
os.system("setterm -cursor off")
while 1:
# Do a full, clean, clear screen and start looping.
print os.system("clear"),unichr(13)," ",unichr(13),
# Set to terminal default colour(s).
print "\033[0mSeven Bit Level Horizontal Analogue Bar Graph Display..."
print
print "Original copyright, (C)2010, B.Walker, G0LCU."
print
print "Issued to all as Public Domain."
print
print
# Set the bargraph to light green for this DEMO.
# This is equivalent to 0 for the column value.
bargraph=" \033[1;32m|"
# Generate a byte value as though grabbed from a serial, parallel or USB port.
column=int(random.random()*256)
# Now divide by 2 to simulate a 7 bit value.
column=int(column/2)
# Although this should never occur, don't allow any error.
if column>=127: column=127
if column<=0: column=0
# Now to generate the bargraph...
count=0
while count<=column:
# It is equivalent to BIOS character 222 for column value of 1 ONLY.
if count==1: bargraph=" \033[1;32m"+unichr(0x2590)
count=count+1
if count>=2:
while count<=column:
# Change bargraph colour on the fly when entering the YELLOW zone... :)
if count>=90: bargraph=bargraph+"\033[1;33m"
# Change bargraph colour on the fly when entering the RED zone... :)
if count>=100: bargraph=bargraph+"\033[1;31m"
if count%2==0:
# For every odd column value print this BIOS character 221.
bargraph=bargraph+unichr(0x258c)
if count%2==1:
# For every even column value OVERWRITE the above with BIOS character 219.
bargraph=bargraph+"\b"+unichr(0x2588)
count=count+1
# Print the "scale" in the default colour(s)...
print "\033[0m 0 10 20 30 40 50 60 70 80 90 100 110 120"
# Now print the meter and bargraph in colours of your choice... :)
print "\033[1;32m | | | | | | | | | \033[1;33m| \033[1;31m| | |"
print "\033[1;32m +++++++++++++++++++++++++++++++++++++++++++++\033[1;33m+++++\033[1;31m+++++++++++++++"
print bargraph
print "\033[1;32m +++++++++++++++++++++++++++++++++++++++++++++\033[1;33m+++++\033[1;31m+++++++++++++++"
print
print " \033[1;34m Analogue resolution is half of one division, that is 1."
print
# Return back to the default colours and for this DEMO the column value...
print "\033[0mColumn number",column,"\b... "
print
print "Press Ctrl-C to stop..."
# Do a critical error beep, [sine wave(ish)] for about 1second.
if column>=120:
# Set up the binary code as a crude sinewave.
waveform=b"\x0f\x2d\x3f\x2d\x0f\x03\x00\x03"
# Set audio timing to zero, "0".
count=0
# Open up the audio device to write to.
# This could be /dev/dsp also...
audio=open("/dev/audio", "wb")
# A "count" value of 1 = 1mS, so 1000 = 1S.
while count<=1000:
# Send 8 bytes of data to the audio device 1000 times.
# This is VERY close to 1KHz and almost sinewave.
audio.write(waveform)
count=count+1
# Close the audio device access.
audio.close()
# Add a DEMO delay to simulate a simple digital voltmeter speed...
if column<=119: time.sleep(1)
# Enable the cursor again if it ever gets here... ;oO
os.system("setterm -cursor on")
main()
# DEMO end.
# Enjoy finding simple solutions to often very difficult problems...
|
# SevenBitBargraph2x.py
#
# A DEMO 7 bit analogue bargraph generator in colour for STANDARD Python 2.6.x and Linux...
#
# (Original copyright, (C)2010, B.Walker, G0LCU.)
# A Python 3.x version can be found here:-
# http://www.linuxformat.com/forums/viewtopic.php?t=13443
#
# Saved as SevenBitBargraph2x.py wherever you like.
#
# This DEMO goes from safe green, to warning amber, to danger red, with a crirical
# error beep above 120 on the horizontal scale...
#
# Two system commands are required, "clear" and "setterm", for this to work.
# I assume that these are available on all recent and current Linux distros.
# The device /dev/audio is used so this must be free also.
#
# It is useful for quick glance readings from say an 8 bit ADC used as a simple
# voltmeter, etc. Getting a digital readout is SO simple I left it out this time...
#
# To run use the following from inside a Python prompt...
# >>> exec(open("/full/path/to/code/SevenBitBargraph2x.py").read())
# OR...
# >>> execfile("/full/path/to/code/SevenBitBargraph2x.py").read()
# Tested on Debian 6.0.0 with Python 2.6.6.
# Add the required imports for this DEMO.
import os
import random
import time
def main():
# For this DEMO set up variables as global...
global column
global count
global bargraph
column=0
count=2
bargraph="(C)2010, B.Walker, G0CLU. Now Public Domain"
# Disable the cursor as it looks much nicer... ;o)
os.system("setterm -cursor off")
while 1:
# Do a full, clean, clear screen and start looping.
print os.system("clear"),unichr(13)," ",unichr(13),
# Set to terminal default colour(s).
print "\033[0mSeven Bit Level Horizontal Analogue Bar Graph Display..."
print
print "Original copyright, (C)2010, B.Walker, G0LCU."
print
print "Issued to all as Public Domain."
print
print
# Set the bargraph to light green for this DEMO.
# This is equivalent to 0 for the column value.
bargraph=" \033[1;32m|"
# Generate a byte value as though grabbed from a serial, parallel or USB port.
column=int(random.random()*256)
# Now divide by 2 to simulate a 7 bit value.
column=int(column/2)
# Although this should never occur, don't allow any error.
if column>=127: column=127
if column<=0: column=0
# Now to generate the bargraph...
count=0
while count<=column:
# It is equivalent to BIOS character 222 for column value of 1 ONLY.
if count==1: bargraph=" \033[1;32m"+unichr(0x2590)
count=count+1
if count>=2:
while count<=column:
# Change bargraph colour on the fly when entering the YELLOW zone... :)
if count>=90: bargraph=bargraph+"\033[1;33m"
# Change bargraph colour on the fly when entering the RED zone... :)
if count>=100: bargraph=bargraph+"\033[1;31m"
if count%2==0:
# For every odd column value print this BIOS character 221.
bargraph=bargraph+unichr(0x258c)
if count%2==1:
# For every even column value OVERWRITE the above with BIOS character 219.
bargraph=bargraph+"\b"+unichr(0x2588)
count=count+1
# Print the "scale" in the default colour(s)...
print "\033[0m 0 10 20 30 40 50 60 70 80 90 100 110 120"
# Now print the meter and bargraph in colours of your choice... :)
print "\033[1;32m | | | | | | | | | \033[1;33m| \033[1;31m| | |"
print "\033[1;32m +++++++++++++++++++++++++++++++++++++++++++++\033[1;33m+++++\033[1;31m+++++++++++++++"
print bargraph
print "\033[1;32m +++++++++++++++++++++++++++++++++++++++++++++\033[1;33m+++++\033[1;31m+++++++++++++++"
print
print " \033[1;34m Analogue resolution is half of one division, that is 1."
print
# Return back to the default colours and for this DEMO the column value...
print "\033[0mColumn number",column,"\b... "
print
print "Press Ctrl-C to stop..."
# Do a critical error beep, [sine wave(ish)] for about 1second.
if column>=120:
# Set up the binary code as a crude sinewave.
waveform=b"\x0f\x2d\x3f\x2d\x0f\x03\x00\x03"
# Set audio timing to zero, "0".
count=0
# Open up the audio device to write to.
# This could be /dev/dsp also...
audio=open("/dev/audio", "wb")
# A "count" value of 1 = 1mS, so 1000 = 1S.
while count<=1000:
# Send 8 bytes of data to the audio device 1000 times.
# This is VERY close to 1KHz and almost sinewave.
audio.write(waveform)
count=count+1
# Close the audio device access.
audio.close()
# Add a DEMO delay to simulate a simple digital voltmeter speed...
if column<=119: time.sleep(1)
# Enable the cursor again if it ever gets here... ;oO
os.system("setterm -cursor on")
main()
# DEMO end.
# Enjoy finding simple solutions to often very difficult problems...
|
en
| 0.80158
|
# SevenBitBargraph2x.py # # A DEMO 7 bit analogue bargraph generator in colour for STANDARD Python 2.6.x and Linux... # # (Original copyright, (C)2010, B.Walker, G0LCU.) # A Python 3.x version can be found here:- # http://www.linuxformat.com/forums/viewtopic.php?t=13443 # # Saved as SevenBitBargraph2x.py wherever you like. # # This DEMO goes from safe green, to warning amber, to danger red, with a crirical # error beep above 120 on the horizontal scale... # # Two system commands are required, "clear" and "setterm", for this to work. # I assume that these are available on all recent and current Linux distros. # The device /dev/audio is used so this must be free also. # # It is useful for quick glance readings from say an 8 bit ADC used as a simple # voltmeter, etc. Getting a digital readout is SO simple I left it out this time... # # To run use the following from inside a Python prompt... # >>> exec(open("/full/path/to/code/SevenBitBargraph2x.py").read()) # OR... # >>> execfile("/full/path/to/code/SevenBitBargraph2x.py").read() # Tested on Debian 6.0.0 with Python 2.6.6. # Add the required imports for this DEMO. # For this DEMO set up variables as global... # Disable the cursor as it looks much nicer... ;o) # Do a full, clean, clear screen and start looping. # Set to terminal default colour(s). # Set the bargraph to light green for this DEMO. # This is equivalent to 0 for the column value. # Generate a byte value as though grabbed from a serial, parallel or USB port. # Now divide by 2 to simulate a 7 bit value. # Although this should never occur, don't allow any error. # Now to generate the bargraph... # It is equivalent to BIOS character 222 for column value of 1 ONLY. # Change bargraph colour on the fly when entering the YELLOW zone... :) # Change bargraph colour on the fly when entering the RED zone... :) # For every odd column value print this BIOS character 221. # For every even column value OVERWRITE the above with BIOS character 219. # Print the "scale" in the default colour(s)... # Now print the meter and bargraph in colours of your choice... :) # Return back to the default colours and for this DEMO the column value... # Do a critical error beep, [sine wave(ish)] for about 1second. # Set up the binary code as a crude sinewave. # Set audio timing to zero, "0". # Open up the audio device to write to. # This could be /dev/dsp also... # A "count" value of 1 = 1mS, so 1000 = 1S. # Send 8 bytes of data to the audio device 1000 times. # This is VERY close to 1KHz and almost sinewave. # Close the audio device access. # Add a DEMO delay to simulate a simple digital voltmeter speed... # Enable the cursor again if it ever gets here... ;oO # DEMO end. # Enjoy finding simple solutions to often very difficult problems...
| 3.137243
| 3
|
eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/mercurial/ignore.py
|
bopopescu/phyG
| 0
|
6627361
|
<reponame>bopopescu/phyG
# ignore.py - ignored file handling for mercurial
#
# Copyright 2007 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import util, match
import re
_commentre = None
def ignorepats(lines):
'''parse lines (iterable) of .hgignore text, returning a tuple of
(patterns, parse errors). These patterns should be given to compile()
to be validated and converted into a match function.'''
syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
syntax = 'relre:'
patterns = []
warnings = []
for line in lines:
if "#" in line:
global _commentre
if not _commentre:
_commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
# remove comments prefixed by an even number of escapes
line = _commentre.sub(r'\1', line)
# fixup properly escaped comments that survived the above
line = line.replace("\\#", "#")
line = line.rstrip()
if not line:
continue
if line.startswith('syntax:'):
s = line[7:].strip()
try:
syntax = syntaxes[s]
except KeyError:
warnings.append(_("ignoring invalid syntax '%s'") % s)
continue
pat = syntax + line
for s, rels in syntaxes.iteritems():
if line.startswith(rels):
pat = line
break
elif line.startswith(s+':'):
pat = rels + line[len(s)+1:]
break
patterns.append(pat)
return patterns, warnings
def ignore(root, files, warn):
'''return matcher covering patterns in 'files'.
the files parsed for patterns include:
.hgignore in the repository root
any additional files specified in the [ui] section of ~/.hgrc
trailing white space is dropped.
the escape character is backslash.
comments start with #.
empty lines are skipped.
lines can be of the following formats:
syntax: regexp # defaults following lines to non-rooted regexps
syntax: glob # defaults following lines to non-rooted globs
re:pattern # non-rooted regular expression
glob:pattern # non-rooted glob
pattern # pattern of the current default type'''
pats = {}
for f in files:
try:
pats[f] = []
fp = open(f)
pats[f], warnings = ignorepats(fp)
fp.close()
for warning in warnings:
warn("%s: %s\n" % (f, warning))
except IOError, inst:
if f != files[0]:
warn(_("skipping unreadable ignore file '%s': %s\n") %
(f, inst.strerror))
allpats = []
for patlist in pats.values():
allpats.extend(patlist)
if not allpats:
return util.never
try:
ignorefunc = match.match(root, '', [], allpats)
except util.Abort:
# Re-raise an exception where the src is the right file
for f, patlist in pats.iteritems():
try:
match.match(root, '', [], patlist)
except util.Abort, inst:
raise util.Abort('%s: %s' % (f, inst[0]))
return ignorefunc
|
# ignore.py - ignored file handling for mercurial
#
# Copyright 2007 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import util, match
import re
_commentre = None
def ignorepats(lines):
'''parse lines (iterable) of .hgignore text, returning a tuple of
(patterns, parse errors). These patterns should be given to compile()
to be validated and converted into a match function.'''
syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
syntax = 'relre:'
patterns = []
warnings = []
for line in lines:
if "#" in line:
global _commentre
if not _commentre:
_commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
# remove comments prefixed by an even number of escapes
line = _commentre.sub(r'\1', line)
# fixup properly escaped comments that survived the above
line = line.replace("\\#", "#")
line = line.rstrip()
if not line:
continue
if line.startswith('syntax:'):
s = line[7:].strip()
try:
syntax = syntaxes[s]
except KeyError:
warnings.append(_("ignoring invalid syntax '%s'") % s)
continue
pat = syntax + line
for s, rels in syntaxes.iteritems():
if line.startswith(rels):
pat = line
break
elif line.startswith(s+':'):
pat = rels + line[len(s)+1:]
break
patterns.append(pat)
return patterns, warnings
def ignore(root, files, warn):
'''return matcher covering patterns in 'files'.
the files parsed for patterns include:
.hgignore in the repository root
any additional files specified in the [ui] section of ~/.hgrc
trailing white space is dropped.
the escape character is backslash.
comments start with #.
empty lines are skipped.
lines can be of the following formats:
syntax: regexp # defaults following lines to non-rooted regexps
syntax: glob # defaults following lines to non-rooted globs
re:pattern # non-rooted regular expression
glob:pattern # non-rooted glob
pattern # pattern of the current default type'''
pats = {}
for f in files:
try:
pats[f] = []
fp = open(f)
pats[f], warnings = ignorepats(fp)
fp.close()
for warning in warnings:
warn("%s: %s\n" % (f, warning))
except IOError, inst:
if f != files[0]:
warn(_("skipping unreadable ignore file '%s': %s\n") %
(f, inst.strerror))
allpats = []
for patlist in pats.values():
allpats.extend(patlist)
if not allpats:
return util.never
try:
ignorefunc = match.match(root, '', [], allpats)
except util.Abort:
# Re-raise an exception where the src is the right file
for f, patlist in pats.iteritems():
try:
match.match(root, '', [], patlist)
except util.Abort, inst:
raise util.Abort('%s: %s' % (f, inst[0]))
return ignorefunc
|
en
| 0.794381
|
# ignore.py - ignored file handling for mercurial # # Copyright 2007 <NAME> <<EMAIL>> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. parse lines (iterable) of .hgignore text, returning a tuple of (patterns, parse errors). These patterns should be given to compile() to be validated and converted into a match function. #.*') # remove comments prefixed by an even number of escapes # fixup properly escaped comments that survived the above #", "#") return matcher covering patterns in 'files'. the files parsed for patterns include: .hgignore in the repository root any additional files specified in the [ui] section of ~/.hgrc trailing white space is dropped. the escape character is backslash. comments start with #. empty lines are skipped. lines can be of the following formats: syntax: regexp # defaults following lines to non-rooted regexps syntax: glob # defaults following lines to non-rooted globs re:pattern # non-rooted regular expression glob:pattern # non-rooted glob pattern # pattern of the current default type # Re-raise an exception where the src is the right file
| 2.663407
| 3
|
netbox/utilities/testing/utils.py
|
esljaz/netbox
| 2
|
6627362
|
import logging
import re
from contextlib import contextmanager
from django.contrib.auth.models import Permission, User
def post_data(data):
"""
Take a dictionary of test data (suitable for comparison to an instance) and return a dict suitable for POSTing.
"""
ret = {}
for key, value in data.items():
if value is None:
ret[key] = ''
elif type(value) in (list, tuple):
if value and hasattr(value[0], 'pk'):
# Value is a list of instances
ret[key] = [v.pk for v in value]
else:
ret[key] = value
elif hasattr(value, 'pk'):
# Value is an instance
ret[key] = value.pk
else:
ret[key] = str(value)
return ret
def create_test_user(username='testuser', permissions=None):
"""
Create a User with the given permissions.
"""
user = User.objects.create_user(username=username)
if permissions is None:
permissions = ()
for perm_name in permissions:
app, codename = perm_name.split('.')
perm = Permission.objects.get(content_type__app_label=app, codename=codename)
user.user_permissions.add(perm)
return user
def extract_form_failures(content):
"""
Given raw HTML content from an HTTP response, return a list of form errors.
"""
FORM_ERROR_REGEX = r'<!-- FORM-ERROR (.*) -->'
return re.findall(FORM_ERROR_REGEX, str(content))
@contextmanager
def disable_warnings(logger_name):
"""
Temporarily suppress expected warning messages to keep the test output clean.
"""
logger = logging.getLogger(logger_name)
current_level = logger.level
logger.setLevel(logging.ERROR)
yield
logger.setLevel(current_level)
|
import logging
import re
from contextlib import contextmanager
from django.contrib.auth.models import Permission, User
def post_data(data):
"""
Take a dictionary of test data (suitable for comparison to an instance) and return a dict suitable for POSTing.
"""
ret = {}
for key, value in data.items():
if value is None:
ret[key] = ''
elif type(value) in (list, tuple):
if value and hasattr(value[0], 'pk'):
# Value is a list of instances
ret[key] = [v.pk for v in value]
else:
ret[key] = value
elif hasattr(value, 'pk'):
# Value is an instance
ret[key] = value.pk
else:
ret[key] = str(value)
return ret
def create_test_user(username='testuser', permissions=None):
"""
Create a User with the given permissions.
"""
user = User.objects.create_user(username=username)
if permissions is None:
permissions = ()
for perm_name in permissions:
app, codename = perm_name.split('.')
perm = Permission.objects.get(content_type__app_label=app, codename=codename)
user.user_permissions.add(perm)
return user
def extract_form_failures(content):
"""
Given raw HTML content from an HTTP response, return a list of form errors.
"""
FORM_ERROR_REGEX = r'<!-- FORM-ERROR (.*) -->'
return re.findall(FORM_ERROR_REGEX, str(content))
@contextmanager
def disable_warnings(logger_name):
"""
Temporarily suppress expected warning messages to keep the test output clean.
"""
logger = logging.getLogger(logger_name)
current_level = logger.level
logger.setLevel(logging.ERROR)
yield
logger.setLevel(current_level)
|
en
| 0.765246
|
Take a dictionary of test data (suitable for comparison to an instance) and return a dict suitable for POSTing. # Value is a list of instances # Value is an instance Create a User with the given permissions. Given raw HTML content from an HTTP response, return a list of form errors. Temporarily suppress expected warning messages to keep the test output clean.
| 2.5715
| 3
|
bot.py
|
superik032/xIGDLBot
| 0
|
6627363
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is dedicated to the public domain under the CC0 license.
# Coded with ❤️ by <NAME> (@NandiyaLive)
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, run_async
import requests
from bs4 import BeautifulSoup as bs
from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup
# from instaloader import Instaloader, Profile, Post
import sys
import shutil
import glob
import os
import zipfile
import pathlib
bot_token = os.environ.get("BOT_TOKEN", "<KEY>")
bot = Bot(token=bot_token)
help_keyboard = [[InlineKeyboardButton("Updates Channel", url="https://t.me/MBNUpdates"),
InlineKeyboardButton("Support Chat", url="https://t.me/MBNChat")]]
help_reply_markup = InlineKeyboardMarkup(help_keyboard)
def start(update, context):
user = update.message.from_user
chat_member = context.bot.get_chat_member(
chat_id='-1001225141087', user_id=update.message.chat_id)
status = chat_member["status"]
if(status == 'left'):
context.bot.send_message(chat_id=update.message.chat_id,
text=f"Hi {user.first_name}, to use me you have to be a member of the updates channel in order to stay updated with the latest developments.\nPlease click below button to join and /start the bot again.", reply_markup=help_reply_markup)
return
else:
context.bot.send_message(chat_id=update.message.chat_id,
text=f"Hi {user.first_name}!\nI'm Instagram Media Downloader Bot. I can help you to download Stories and IGTV Videos from any public instagram account.\nPlease read the /help before using me.", parse_mode=telegram.ParseMode.HTML, reply_markup=help_reply_markup)
def help(update, context):
keyboard = [[InlineKeyboardButton("Updates Channel", url="https://t.me/MBNUpdates"),
InlineKeyboardButton("Support Chat", url="https://t.me/MBNChat")]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('''<b>Usage:</b>\n/stories username - Download stories from the username’s profile.\n/igtv username - Download IGTV videos from the username’s profile.\n/feed username - Download all posts from the username’s profile as a zip file.\n\n<b>How to find the username?</b>\nOpen Instagram app & then go to a profile that you want to download items. Username must be on the top.\nIn case you are using a browser you can find it in the Address bar.\n<b>Example : </b>Username for instagram.com/rashmika_mandanna & @rashmika_mandanna is 'rashmika_mandanna' 😉''', parse_mode=telegram.ParseMode.HTML, reply_markup=reply_markup)
def about(update, context):
keyboard = [[InlineKeyboardButton(
"Source Code", url="https://github.com/NandiyaLive/xIGDLBot")]]
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.send_message(chat_id=update.message.chat_id,
text='''I can help you to download media from any public instagram account without leaving Telegram.\n\nMade with ❤️ + python-telegram-bot by @NandiyaLive''', parse_mode=telegram.ParseMode.HTML, reply_markup=reply_markup)
def echo(update, context):
context.bot.send_message(
chat_id=update.message.chat_id, text='''Please read /help''')
def stories(update, context):
user = context.bot.get_chat_member(
chat_id='-1001225141087', user_id=update.message.chat_id)
status = user["status"]
if(status == 'left'):
context.bot.send_message(chat_id=update.message.chat_id,
text="To use to bot you need to be a member of @MBNUpdates in order to stay updated with the latest developments.")
return
else:
status_page = "https://www.insta-stories.com/en/status"
req_status = requests.get(status_page).text
status = bs(req_status, "lxml")
if status.find("div", class_="status status--ok"):
fullmsg = update.message.text
if fullmsg == "/stories":
update.message.reply_text(
'/stories [instagram username]\nPlease read /help')
else:
msg = fullmsg.replace("/stories ", "")
if "@" in msg.lower():
query = msg.replace("@", "")
else:
query = msg
url = f"https://www.insta-stories.com/en/stories/{query}"
r = requests.get(url).text
soup = bs(r, "lxml")
if soup.find("div", class_="msg msg-user-not-found"):
update.message.reply_text(
"This username doesn't exist. Please try with another one.")
elif soup.find("div", class_="error"):
update.message.reply_text(
"API Error 🤒\nPlease try again later.")
else:
if soup.find("div", class_="msg msg-no-stories"):
update.message.reply_text(
"No stories available. Please try again later.")
else:
try:
profile = soup.find("div", class_="user-name").text
update.message.reply_text(
f"Downloading stories of {profile}")
videos = soup.findAll(class_='story-video')
photos = soup.findAll(class_='story-image')
for video in videos:
src = video.find("source")['src']
context.bot.send_video(
chat_id=update.message.chat_id, video=f"https://www.insta-stories.com{src}")
for photo in photos:
context.bot.send_photo(
chat_id=update.message.chat_id, photo=f"https://www.insta-stories.com{photo['src']}")
bot.send_message(
text="Thanks for using @xIGDLBot\nPlease /donate to keep this service alive!", chat_id=update.message.chat_id)
except:
context.bot.send_message(chat_id=update.message.chat_id,
text="Something went wrong. Please try again later.", parse_mode=telegram.ParseMode.HTML)
else:
update.message.reply_text(
"API is not working. Please try again later.")
# def igtv(update, context):
# user = context.bot.get_chat_member(
# chat_id='-1001225141087', user_id=update.message.chat_id)
# status = user["status"]
# if(status == 'left'):
# context.bot.send_message(chat_id=update.message.chat_id,
# text="To use to bot you need to be a member of @MBNUpdates in order to stay updated with the latest developments.")
# return
# else:
# fullmsg = update.message.text
# if fullmsg == "/igtv":
# update.message.reply_text(
# '/igtv [instagram username]\nPlease read /help')
# else:
# msg = fullmsg.replace("/igtv ", "")
# if "@" in msg.lower():
# query = msg.replace("@", "")
# else:
# query = msg
# L = Instaloader(dirname_pattern=query, download_comments=False,
# download_video_thumbnails=False, save_metadata=False, download_geotags=True, compress_json=True, post_metadata_txt_pattern=None, storyitem_metadata_txt_pattern=None)
# profile = Profile.from_username(L.context, query)
# igtv_count = profile.igtvcount
# posts = profile.get_igtv_posts()
# update.message.reply_text("Cooking your request 👨🍳\nProfile : " + query + "\nIGTV Video Count : " + str(
# igtv_count) + "\nThis may take longer, take a nap I can handle this without you.")
# try:
# L.posts_download_loop(posts, query)
# except Exception as e:
# context.bot.send_message(chat_id=update.message.chat_id, text="<b>ERROR</b>\n"+str(
# e), parse_mode=telegram.ParseMode.HTML)
# return
# src_dir = query
# for vidfile in glob.iglob(os.path.join(src_dir, "*.mp4")):
# context.bot.send_video(
# chat_id=update.message.chat_id, video=open(vidfile, 'rb'))
# bot.send_message(
# text="Thanks for using @xIGDLBot\nPlease /donate to keep this service alive!", chat_id=update.message.chat_id)
# try:
# shutil.rmtree(query)
# except Exception:
# pass
def feed(update, context):
bot.send_message(chat_id=update.message.chat_id,
text="This feature is still under development. Please use @MBNBetaBot if you like to beta test this feature.")
# user = context.bot.get_chat_member(chat_id='-1001225141087', user_id=update.message.chat_id)
# status = user["status"]
# if(status == 'left'):
# context.bot.send_message(chat_id=update.message.chat_id,text="To use to bot you need to be a member of @MBNUpdates in order to stay updated with the latest developments.")
# return
# else :
# fullmsg = update.message.text
# if fullmsg == "/feed":
# update.message.reply_text(
# '/feed [instagram username]\nPlease read /help')
# else:
# msg = fullmsg.replace("/feed ", "")
# if "@" in msg.lower():
# query = msg.replace("@", "")
# else:
# query = msg
# L = Instaloader(dirname_pattern=query, download_comments=False,
# download_video_thumbnails=False, save_metadata=False, download_geotags=True, compress_json=True, post_metadata_txt_pattern=None, storyitem_metadata_txt_pattern=None)
# profile = Profile.from_username(L.context, query)
# media = profile.mediacount
# update.message.reply_text("Cooking your request 👨🍳\nProfile : " + query + "\nMedia Count : " + str(media) +
# "\nThis may take longer, take a nap I can handle this without you.")
# posts = profile.get_posts()
# try:
# L.posts_download_loop(posts, query)
# except Exception as e:
# context.bot.send_message(chat_id=update.message.chat_id, text="<b>ERROR\n"+str(
# e), parse_mode=telegram.ParseMode.HTML)
# return
# update.message.reply_text("Download Completed.\n🗄 Archiving files...")
# zf = zipfile.ZipFile(f"{query}.zip", "w")
# for dirname, subdirs, files in os.walk(query):
# zf.write(query)
# for filename in files:
# zf.write(os.path.join(dirname, filename))
# zf.close()
# update.message.reply_text("Uploading to Telegram...")
# for zip_file in glob.glob("*.zip"):
# context.bot.send_document(chat_id=update.message.chat_id,
# document=open(zip_file, 'rb'))
# try:
# shutil.rmtree(query)
# os.remove(f"{query}.zip")
# except Exception:
# pass
def donate(update, context):
user = update.message.from_user
bot.send_message(chat_id=update.message.chat_id,
text=f"Hey {user.first_name}! \nThanks for showing interest in my works\nPlease contact @NandiyaLive for more info. You can send any amount you wish to donate me.")
def main():
updater = Updater(bot_token, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start, run_async=True))
dp.add_handler(CommandHandler("help", help, run_async=True))
dp.add_handler(CommandHandler("stories", stories, run_async=True))
dp.add_handler(CommandHandler("about", about, run_async=True))
# dp.add_handler(CommandHandler("igtv", igtv, run_async=True))
dp.add_handler(CommandHandler("feed", feed, run_async=True))
dp.add_handler(CommandHandler("donate", donate, run_async=True))
dp.add_handler(MessageHandler(Filters.text & ~Filters.command, echo))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is dedicated to the public domain under the CC0 license.
# Coded with ❤️ by <NAME> (@NandiyaLive)
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, run_async
import requests
from bs4 import BeautifulSoup as bs
from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup
# from instaloader import Instaloader, Profile, Post
import sys
import shutil
import glob
import os
import zipfile
import pathlib
bot_token = os.environ.get("BOT_TOKEN", "<KEY>")
bot = Bot(token=bot_token)
help_keyboard = [[InlineKeyboardButton("Updates Channel", url="https://t.me/MBNUpdates"),
InlineKeyboardButton("Support Chat", url="https://t.me/MBNChat")]]
help_reply_markup = InlineKeyboardMarkup(help_keyboard)
def start(update, context):
user = update.message.from_user
chat_member = context.bot.get_chat_member(
chat_id='-1001225141087', user_id=update.message.chat_id)
status = chat_member["status"]
if(status == 'left'):
context.bot.send_message(chat_id=update.message.chat_id,
text=f"Hi {user.first_name}, to use me you have to be a member of the updates channel in order to stay updated with the latest developments.\nPlease click below button to join and /start the bot again.", reply_markup=help_reply_markup)
return
else:
context.bot.send_message(chat_id=update.message.chat_id,
text=f"Hi {user.first_name}!\nI'm Instagram Media Downloader Bot. I can help you to download Stories and IGTV Videos from any public instagram account.\nPlease read the /help before using me.", parse_mode=telegram.ParseMode.HTML, reply_markup=help_reply_markup)
def help(update, context):
keyboard = [[InlineKeyboardButton("Updates Channel", url="https://t.me/MBNUpdates"),
InlineKeyboardButton("Support Chat", url="https://t.me/MBNChat")]]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('''<b>Usage:</b>\n/stories username - Download stories from the username’s profile.\n/igtv username - Download IGTV videos from the username’s profile.\n/feed username - Download all posts from the username’s profile as a zip file.\n\n<b>How to find the username?</b>\nOpen Instagram app & then go to a profile that you want to download items. Username must be on the top.\nIn case you are using a browser you can find it in the Address bar.\n<b>Example : </b>Username for instagram.com/rashmika_mandanna & @rashmika_mandanna is 'rashmika_mandanna' 😉''', parse_mode=telegram.ParseMode.HTML, reply_markup=reply_markup)
def about(update, context):
keyboard = [[InlineKeyboardButton(
"Source Code", url="https://github.com/NandiyaLive/xIGDLBot")]]
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.send_message(chat_id=update.message.chat_id,
text='''I can help you to download media from any public instagram account without leaving Telegram.\n\nMade with ❤️ + python-telegram-bot by @NandiyaLive''', parse_mode=telegram.ParseMode.HTML, reply_markup=reply_markup)
def echo(update, context):
context.bot.send_message(
chat_id=update.message.chat_id, text='''Please read /help''')
def stories(update, context):
user = context.bot.get_chat_member(
chat_id='-1001225141087', user_id=update.message.chat_id)
status = user["status"]
if(status == 'left'):
context.bot.send_message(chat_id=update.message.chat_id,
text="To use to bot you need to be a member of @MBNUpdates in order to stay updated with the latest developments.")
return
else:
status_page = "https://www.insta-stories.com/en/status"
req_status = requests.get(status_page).text
status = bs(req_status, "lxml")
if status.find("div", class_="status status--ok"):
fullmsg = update.message.text
if fullmsg == "/stories":
update.message.reply_text(
'/stories [instagram username]\nPlease read /help')
else:
msg = fullmsg.replace("/stories ", "")
if "@" in msg.lower():
query = msg.replace("@", "")
else:
query = msg
url = f"https://www.insta-stories.com/en/stories/{query}"
r = requests.get(url).text
soup = bs(r, "lxml")
if soup.find("div", class_="msg msg-user-not-found"):
update.message.reply_text(
"This username doesn't exist. Please try with another one.")
elif soup.find("div", class_="error"):
update.message.reply_text(
"API Error 🤒\nPlease try again later.")
else:
if soup.find("div", class_="msg msg-no-stories"):
update.message.reply_text(
"No stories available. Please try again later.")
else:
try:
profile = soup.find("div", class_="user-name").text
update.message.reply_text(
f"Downloading stories of {profile}")
videos = soup.findAll(class_='story-video')
photos = soup.findAll(class_='story-image')
for video in videos:
src = video.find("source")['src']
context.bot.send_video(
chat_id=update.message.chat_id, video=f"https://www.insta-stories.com{src}")
for photo in photos:
context.bot.send_photo(
chat_id=update.message.chat_id, photo=f"https://www.insta-stories.com{photo['src']}")
bot.send_message(
text="Thanks for using @xIGDLBot\nPlease /donate to keep this service alive!", chat_id=update.message.chat_id)
except:
context.bot.send_message(chat_id=update.message.chat_id,
text="Something went wrong. Please try again later.", parse_mode=telegram.ParseMode.HTML)
else:
update.message.reply_text(
"API is not working. Please try again later.")
# def igtv(update, context):
# user = context.bot.get_chat_member(
# chat_id='-1001225141087', user_id=update.message.chat_id)
# status = user["status"]
# if(status == 'left'):
# context.bot.send_message(chat_id=update.message.chat_id,
# text="To use to bot you need to be a member of @MBNUpdates in order to stay updated with the latest developments.")
# return
# else:
# fullmsg = update.message.text
# if fullmsg == "/igtv":
# update.message.reply_text(
# '/igtv [instagram username]\nPlease read /help')
# else:
# msg = fullmsg.replace("/igtv ", "")
# if "@" in msg.lower():
# query = msg.replace("@", "")
# else:
# query = msg
# L = Instaloader(dirname_pattern=query, download_comments=False,
# download_video_thumbnails=False, save_metadata=False, download_geotags=True, compress_json=True, post_metadata_txt_pattern=None, storyitem_metadata_txt_pattern=None)
# profile = Profile.from_username(L.context, query)
# igtv_count = profile.igtvcount
# posts = profile.get_igtv_posts()
# update.message.reply_text("Cooking your request 👨🍳\nProfile : " + query + "\nIGTV Video Count : " + str(
# igtv_count) + "\nThis may take longer, take a nap I can handle this without you.")
# try:
# L.posts_download_loop(posts, query)
# except Exception as e:
# context.bot.send_message(chat_id=update.message.chat_id, text="<b>ERROR</b>\n"+str(
# e), parse_mode=telegram.ParseMode.HTML)
# return
# src_dir = query
# for vidfile in glob.iglob(os.path.join(src_dir, "*.mp4")):
# context.bot.send_video(
# chat_id=update.message.chat_id, video=open(vidfile, 'rb'))
# bot.send_message(
# text="Thanks for using @xIGDLBot\nPlease /donate to keep this service alive!", chat_id=update.message.chat_id)
# try:
# shutil.rmtree(query)
# except Exception:
# pass
def feed(update, context):
bot.send_message(chat_id=update.message.chat_id,
text="This feature is still under development. Please use @MBNBetaBot if you like to beta test this feature.")
# user = context.bot.get_chat_member(chat_id='-1001225141087', user_id=update.message.chat_id)
# status = user["status"]
# if(status == 'left'):
# context.bot.send_message(chat_id=update.message.chat_id,text="To use to bot you need to be a member of @MBNUpdates in order to stay updated with the latest developments.")
# return
# else :
# fullmsg = update.message.text
# if fullmsg == "/feed":
# update.message.reply_text(
# '/feed [instagram username]\nPlease read /help')
# else:
# msg = fullmsg.replace("/feed ", "")
# if "@" in msg.lower():
# query = msg.replace("@", "")
# else:
# query = msg
# L = Instaloader(dirname_pattern=query, download_comments=False,
# download_video_thumbnails=False, save_metadata=False, download_geotags=True, compress_json=True, post_metadata_txt_pattern=None, storyitem_metadata_txt_pattern=None)
# profile = Profile.from_username(L.context, query)
# media = profile.mediacount
# update.message.reply_text("Cooking your request 👨🍳\nProfile : " + query + "\nMedia Count : " + str(media) +
# "\nThis may take longer, take a nap I can handle this without you.")
# posts = profile.get_posts()
# try:
# L.posts_download_loop(posts, query)
# except Exception as e:
# context.bot.send_message(chat_id=update.message.chat_id, text="<b>ERROR\n"+str(
# e), parse_mode=telegram.ParseMode.HTML)
# return
# update.message.reply_text("Download Completed.\n🗄 Archiving files...")
# zf = zipfile.ZipFile(f"{query}.zip", "w")
# for dirname, subdirs, files in os.walk(query):
# zf.write(query)
# for filename in files:
# zf.write(os.path.join(dirname, filename))
# zf.close()
# update.message.reply_text("Uploading to Telegram...")
# for zip_file in glob.glob("*.zip"):
# context.bot.send_document(chat_id=update.message.chat_id,
# document=open(zip_file, 'rb'))
# try:
# shutil.rmtree(query)
# os.remove(f"{query}.zip")
# except Exception:
# pass
def donate(update, context):
user = update.message.from_user
bot.send_message(chat_id=update.message.chat_id,
text=f"Hey {user.first_name}! \nThanks for showing interest in my works\nPlease contact @NandiyaLive for more info. You can send any amount you wish to donate me.")
def main():
updater = Updater(bot_token, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start, run_async=True))
dp.add_handler(CommandHandler("help", help, run_async=True))
dp.add_handler(CommandHandler("stories", stories, run_async=True))
dp.add_handler(CommandHandler("about", about, run_async=True))
# dp.add_handler(CommandHandler("igtv", igtv, run_async=True))
dp.add_handler(CommandHandler("feed", feed, run_async=True))
dp.add_handler(CommandHandler("donate", donate, run_async=True))
dp.add_handler(MessageHandler(Filters.text & ~Filters.command, echo))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
en
| 0.464538
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # This program is dedicated to the public domain under the CC0 license. # Coded with ❤️ by <NAME> (@NandiyaLive) # from instaloader import Instaloader, Profile, Post <b>Usage:</b>\n/stories username - Download stories from the username’s profile.\n/igtv username - Download IGTV videos from the username’s profile.\n/feed username - Download all posts from the username’s profile as a zip file.\n\n<b>How to find the username?</b>\nOpen Instagram app & then go to a profile that you want to download items. Username must be on the top.\nIn case you are using a browser you can find it in the Address bar.\n<b>Example : </b>Username for instagram.com/rashmika_mandanna & @rashmika_mandanna is 'rashmika_mandanna' 😉 I can help you to download media from any public instagram account without leaving Telegram.\n\nMade with ❤️ + python-telegram-bot by @NandiyaLive Please read /help # def igtv(update, context): # user = context.bot.get_chat_member( # chat_id='-1001225141087', user_id=update.message.chat_id) # status = user["status"] # if(status == 'left'): # context.bot.send_message(chat_id=update.message.chat_id, # text="To use to bot you need to be a member of @MBNUpdates in order to stay updated with the latest developments.") # return # else: # fullmsg = update.message.text # if fullmsg == "/igtv": # update.message.reply_text( # '/igtv [instagram username]\nPlease read /help') # else: # msg = fullmsg.replace("/igtv ", "") # if "@" in msg.lower(): # query = msg.replace("@", "") # else: # query = msg # L = Instaloader(dirname_pattern=query, download_comments=False, # download_video_thumbnails=False, save_metadata=False, download_geotags=True, compress_json=True, post_metadata_txt_pattern=None, storyitem_metadata_txt_pattern=None) # profile = Profile.from_username(L.context, query) # igtv_count = profile.igtvcount # posts = profile.get_igtv_posts() # update.message.reply_text("Cooking your request 👨🍳\nProfile : " + query + "\nIGTV Video Count : " + str( # igtv_count) + "\nThis may take longer, take a nap I can handle this without you.") # try: # L.posts_download_loop(posts, query) # except Exception as e: # context.bot.send_message(chat_id=update.message.chat_id, text="<b>ERROR</b>\n"+str( # e), parse_mode=telegram.ParseMode.HTML) # return # src_dir = query # for vidfile in glob.iglob(os.path.join(src_dir, "*.mp4")): # context.bot.send_video( # chat_id=update.message.chat_id, video=open(vidfile, 'rb')) # bot.send_message( # text="Thanks for using @xIGDLBot\nPlease /donate to keep this service alive!", chat_id=update.message.chat_id) # try: # shutil.rmtree(query) # except Exception: # pass # user = context.bot.get_chat_member(chat_id='-1001225141087', user_id=update.message.chat_id) # status = user["status"] # if(status == 'left'): # context.bot.send_message(chat_id=update.message.chat_id,text="To use to bot you need to be a member of @MBNUpdates in order to stay updated with the latest developments.") # return # else : # fullmsg = update.message.text # if fullmsg == "/feed": # update.message.reply_text( # '/feed [instagram username]\nPlease read /help') # else: # msg = fullmsg.replace("/feed ", "") # if "@" in msg.lower(): # query = msg.replace("@", "") # else: # query = msg # L = Instaloader(dirname_pattern=query, download_comments=False, # download_video_thumbnails=False, save_metadata=False, download_geotags=True, compress_json=True, post_metadata_txt_pattern=None, storyitem_metadata_txt_pattern=None) # profile = Profile.from_username(L.context, query) # media = profile.mediacount # update.message.reply_text("Cooking your request 👨🍳\nProfile : " + query + "\nMedia Count : " + str(media) + # "\nThis may take longer, take a nap I can handle this without you.") # posts = profile.get_posts() # try: # L.posts_download_loop(posts, query) # except Exception as e: # context.bot.send_message(chat_id=update.message.chat_id, text="<b>ERROR\n"+str( # e), parse_mode=telegram.ParseMode.HTML) # return # update.message.reply_text("Download Completed.\n🗄 Archiving files...") # zf = zipfile.ZipFile(f"{query}.zip", "w") # for dirname, subdirs, files in os.walk(query): # zf.write(query) # for filename in files: # zf.write(os.path.join(dirname, filename)) # zf.close() # update.message.reply_text("Uploading to Telegram...") # for zip_file in glob.glob("*.zip"): # context.bot.send_document(chat_id=update.message.chat_id, # document=open(zip_file, 'rb')) # try: # shutil.rmtree(query) # os.remove(f"{query}.zip") # except Exception: # pass # dp.add_handler(CommandHandler("igtv", igtv, run_async=True))
| 2.479207
| 2
|
ui/wm/wm.gyp
|
sunjc53yy/chromium
| 0
|
6627364
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
# GN version: //ui/wm
'target_name': 'wm',
'type': '<(component)',
'dependencies': [
'../../base/base.gyp:base',
'../../skia/skia.gyp:skia',
'../aura/aura.gyp:aura',
'../compositor/compositor.gyp:compositor',
'../events/devices/events_devices.gyp:events_devices',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/platform/events_platform.gyp:events_platform',
'../gfx/gfx.gyp:gfx_geometry',
'../gfx/gfx.gyp:gfx',
'../resources/ui_resources.gyp:ui_resources',
'../base/ui_base.gyp:ui_base',
],
'defines': [
'WM_IMPLEMENTATION',
],
'sources': [
# Note: sources list duplicated in GN build.
'core/accelerator_delegate.h',
'core/accelerator_filter.cc',
'core/accelerator_filter.h',
'core/base_focus_rules.h',
'core/base_focus_rules.cc',
'core/base_focus_rules.h',
'core/capture_controller.cc',
'core/capture_controller.h',
'core/compound_event_filter.cc',
'core/compound_event_filter.h',
'core/coordinate_conversion.cc',
'core/coordinate_conversion.h',
'core/cursor_manager.cc',
'core/cursor_manager.h',
'core/default_activation_client.cc',
'core/default_activation_client.h',
'core/default_screen_position_client.cc',
'core/default_screen_position_client.h',
'core/easy_resize_window_targeter.cc',
'core/easy_resize_window_targeter.h',
'core/focus_controller.cc',
'core/focus_controller.h',
'core/focus_rules.h',
'core/image_grid.cc',
'core/image_grid.h',
'core/input_method_event_filter.cc',
'core/input_method_event_filter.h',
'core/masked_window_targeter.cc',
'core/masked_window_targeter.h',
'core/native_cursor_manager.h',
'core/native_cursor_manager_delegate.h',
'core/nested_accelerator_dispatcher_linux.cc',
'core/nested_accelerator_dispatcher_win.cc',
'core/nested_accelerator_dispatcher.cc',
'core/nested_accelerator_dispatcher.h',
'core/nested_accelerator_delegate.h',
'core/nested_accelerator_controller.cc',
'core/nested_accelerator_controller.h',
'core/shadow.cc',
'core/shadow.h',
'core/shadow_controller.cc',
'core/shadow_controller.h',
'core/shadow_types.cc',
'core/shadow_types.h',
'core/transient_window_controller.cc',
'core/transient_window_controller.h',
'core/transient_window_manager.cc',
'core/transient_window_manager.h',
'core/transient_window_observer.h',
'core/transient_window_stacking_client.cc',
'core/transient_window_stacking_client.h',
'core/user_activity_detector.cc',
'core/user_activity_detector.h',
'core/user_activity_observer.h',
'core/visibility_controller.cc',
'core/visibility_controller.h',
'core/window_animations.cc',
'core/window_animations.h',
'core/window_modality_controller.cc',
'core/window_modality_controller.h',
'core/window_util.cc',
'core/window_util.h',
'core/wm_core_switches.cc',
'core/wm_core_switches.h',
'core/wm_state.cc',
'core/wm_state.h',
'wm_export.h',
],
},
{
# GN version: //ui/wm:test_support
'target_name': 'wm_test_support',
'type': 'static_library',
'dependencies': [
'../../skia/skia.gyp:skia',
'../aura/aura.gyp:aura',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
],
'sources': [
'test/wm_test_helper.cc',
'test/wm_test_helper.h',
],
},
{
# GN version: //ui/wm:wm_unittests
'target_name': 'wm_unittests',
'type': 'executable',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../skia/skia.gyp:skia',
'../../testing/gtest.gyp:gtest',
'../aura/aura.gyp:aura',
'../aura/aura.gyp:aura_test_support',
'../base/ui_base.gyp:ui_base',
'../compositor/compositor.gyp:compositor',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/platform/events_platform.gyp:events_platform',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'wm',
'wm_test_support',
],
'sources': [
'test/run_all_unittests.cc',
'core/compound_event_filter_unittest.cc',
'core/cursor_manager_unittest.cc',
'core/focus_controller_unittest.cc',
'core/input_method_event_filter_unittest.cc',
'core/image_grid_unittest.cc',
'core/nested_accelerator_controller_unittest.cc',
'core/shadow_controller_unittest.cc',
'core/shadow_unittest.cc',
'core/transient_window_manager_unittest.cc',
'core/transient_window_stacking_client_unittest.cc',
'core/user_activity_detector_unittest.cc',
'core/visibility_controller_unittest.cc',
'core/window_animations_unittest.cc',
'core/window_util_unittest.cc',
],
},
],
}
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
# GN version: //ui/wm
'target_name': 'wm',
'type': '<(component)',
'dependencies': [
'../../base/base.gyp:base',
'../../skia/skia.gyp:skia',
'../aura/aura.gyp:aura',
'../compositor/compositor.gyp:compositor',
'../events/devices/events_devices.gyp:events_devices',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/platform/events_platform.gyp:events_platform',
'../gfx/gfx.gyp:gfx_geometry',
'../gfx/gfx.gyp:gfx',
'../resources/ui_resources.gyp:ui_resources',
'../base/ui_base.gyp:ui_base',
],
'defines': [
'WM_IMPLEMENTATION',
],
'sources': [
# Note: sources list duplicated in GN build.
'core/accelerator_delegate.h',
'core/accelerator_filter.cc',
'core/accelerator_filter.h',
'core/base_focus_rules.h',
'core/base_focus_rules.cc',
'core/base_focus_rules.h',
'core/capture_controller.cc',
'core/capture_controller.h',
'core/compound_event_filter.cc',
'core/compound_event_filter.h',
'core/coordinate_conversion.cc',
'core/coordinate_conversion.h',
'core/cursor_manager.cc',
'core/cursor_manager.h',
'core/default_activation_client.cc',
'core/default_activation_client.h',
'core/default_screen_position_client.cc',
'core/default_screen_position_client.h',
'core/easy_resize_window_targeter.cc',
'core/easy_resize_window_targeter.h',
'core/focus_controller.cc',
'core/focus_controller.h',
'core/focus_rules.h',
'core/image_grid.cc',
'core/image_grid.h',
'core/input_method_event_filter.cc',
'core/input_method_event_filter.h',
'core/masked_window_targeter.cc',
'core/masked_window_targeter.h',
'core/native_cursor_manager.h',
'core/native_cursor_manager_delegate.h',
'core/nested_accelerator_dispatcher_linux.cc',
'core/nested_accelerator_dispatcher_win.cc',
'core/nested_accelerator_dispatcher.cc',
'core/nested_accelerator_dispatcher.h',
'core/nested_accelerator_delegate.h',
'core/nested_accelerator_controller.cc',
'core/nested_accelerator_controller.h',
'core/shadow.cc',
'core/shadow.h',
'core/shadow_controller.cc',
'core/shadow_controller.h',
'core/shadow_types.cc',
'core/shadow_types.h',
'core/transient_window_controller.cc',
'core/transient_window_controller.h',
'core/transient_window_manager.cc',
'core/transient_window_manager.h',
'core/transient_window_observer.h',
'core/transient_window_stacking_client.cc',
'core/transient_window_stacking_client.h',
'core/user_activity_detector.cc',
'core/user_activity_detector.h',
'core/user_activity_observer.h',
'core/visibility_controller.cc',
'core/visibility_controller.h',
'core/window_animations.cc',
'core/window_animations.h',
'core/window_modality_controller.cc',
'core/window_modality_controller.h',
'core/window_util.cc',
'core/window_util.h',
'core/wm_core_switches.cc',
'core/wm_core_switches.h',
'core/wm_state.cc',
'core/wm_state.h',
'wm_export.h',
],
},
{
# GN version: //ui/wm:test_support
'target_name': 'wm_test_support',
'type': 'static_library',
'dependencies': [
'../../skia/skia.gyp:skia',
'../aura/aura.gyp:aura',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
],
'sources': [
'test/wm_test_helper.cc',
'test/wm_test_helper.h',
],
},
{
# GN version: //ui/wm:wm_unittests
'target_name': 'wm_unittests',
'type': 'executable',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../skia/skia.gyp:skia',
'../../testing/gtest.gyp:gtest',
'../aura/aura.gyp:aura',
'../aura/aura.gyp:aura_test_support',
'../base/ui_base.gyp:ui_base',
'../compositor/compositor.gyp:compositor',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/platform/events_platform.gyp:events_platform',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'wm',
'wm_test_support',
],
'sources': [
'test/run_all_unittests.cc',
'core/compound_event_filter_unittest.cc',
'core/cursor_manager_unittest.cc',
'core/focus_controller_unittest.cc',
'core/input_method_event_filter_unittest.cc',
'core/image_grid_unittest.cc',
'core/nested_accelerator_controller_unittest.cc',
'core/shadow_controller_unittest.cc',
'core/shadow_unittest.cc',
'core/transient_window_manager_unittest.cc',
'core/transient_window_stacking_client_unittest.cc',
'core/user_activity_detector_unittest.cc',
'core/visibility_controller_unittest.cc',
'core/window_animations_unittest.cc',
'core/window_util_unittest.cc',
],
},
],
}
|
en
| 0.84014
|
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # GN version: //ui/wm # Note: sources list duplicated in GN build. # GN version: //ui/wm:test_support # GN version: //ui/wm:wm_unittests
| 1.115604
| 1
|
third_party/blink/public/mojom/feature_policy/PRESUBMIT.py
|
sarang-apps/darshan_browser
| 0
|
6627365
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Blink feature-policy presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def _RunUmaHistogramChecks(input_api, output_api): # pylint: disable=C0103
import sys
original_sys_path = sys.path
try:
sys.path = sys.path + [
input_api.os_path.join(input_api.PresubmitLocalPath(), '..', '..',
'..', '..', '..', 'tools', 'metrics',
'histograms')
]
import update_histogram_enum
finally:
sys.path = original_sys_path
source_path = ''
for f in input_api.AffectedFiles():
if f.LocalPath().endswith('feature_policy_feature.mojom'):
source_path = f.LocalPath()
break
else:
return []
start_marker = '^enum FeaturePolicyFeature {'
end_marker = '^};'
presubmit_error = update_histogram_enum.CheckPresubmitErrors(
histogram_enum_name='FeaturePolicyFeature',
update_script_name='update_feature_policy_enum.py',
source_enum_path=source_path,
start_marker=start_marker,
end_marker=end_marker,
strip_k_prefix=True)
if presubmit_error:
return [
output_api.PresubmitPromptWarning(
presubmit_error, items=[source_path])
]
return []
def CheckChangeOnUpload(input_api, output_api): # pylint: disable=C0103
results = []
results.extend(_RunUmaHistogramChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api): # pylint: disable=C0103
results = []
results.extend(_RunUmaHistogramChecks(input_api, output_api))
return results
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Blink feature-policy presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def _RunUmaHistogramChecks(input_api, output_api): # pylint: disable=C0103
import sys
original_sys_path = sys.path
try:
sys.path = sys.path + [
input_api.os_path.join(input_api.PresubmitLocalPath(), '..', '..',
'..', '..', '..', 'tools', 'metrics',
'histograms')
]
import update_histogram_enum
finally:
sys.path = original_sys_path
source_path = ''
for f in input_api.AffectedFiles():
if f.LocalPath().endswith('feature_policy_feature.mojom'):
source_path = f.LocalPath()
break
else:
return []
start_marker = '^enum FeaturePolicyFeature {'
end_marker = '^};'
presubmit_error = update_histogram_enum.CheckPresubmitErrors(
histogram_enum_name='FeaturePolicyFeature',
update_script_name='update_feature_policy_enum.py',
source_enum_path=source_path,
start_marker=start_marker,
end_marker=end_marker,
strip_k_prefix=True)
if presubmit_error:
return [
output_api.PresubmitPromptWarning(
presubmit_error, items=[source_path])
]
return []
def CheckChangeOnUpload(input_api, output_api): # pylint: disable=C0103
results = []
results.extend(_RunUmaHistogramChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api): # pylint: disable=C0103
results = []
results.extend(_RunUmaHistogramChecks(input_api, output_api))
return results
|
en
| 0.809231
|
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Blink feature-policy presubmit script. See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details about the presubmit API built into gcl. # pylint: disable=C0103 # pylint: disable=C0103 # pylint: disable=C0103
| 1.943273
| 2
|
Courses/EE6226/Software/make_language_inclusion_test.py
|
ldkong1205/ntu-graduate-courses
| 22
|
6627366
|
<reponame>ldkong1205/ntu-graduate-courses
#!/usr/bin/env python
# $Id: make_language_inclusion_test.py 538 2009-11-09 14:51:19Z hat $
"""
Check whether the 'small' language is contained in the 'big' language.
"""
import sys
# Make sure the installed version is used (is in the Python path).
site_packages = "%SITEPACKAGES%"
if not site_packages.startswith("%SITE") and site_packages not in sys.path:
# Installed version, prefix installed path.
sys.path = [site_packages] + sys.path
from automata import frontend
class TestLanguageInclusionApplication(frontend.Application):
def __init__(self):
cmd = 'make_language_inclusion_test'
desc = "Check whether the 'small' language is contained in the " \
"'big' language."
frontend.Application.__init__(self, cmd, desc)
def add_options(self):
self.add_parm(frontend.CmdParm('small', frontend.EXIST_AUT,
'i+s', 'first (smallest) model'))
self.add_parm(frontend.CmdParm('big', frontend.EXIST_AUT,
'i+b', 'second (biggest) model'))
def main(self, args):
frontend.make_language_inclusion_test(args['small'], args['big'])
if __name__ == '__main__':
app = TestLanguageInclusionApplication()
app.run()
|
#!/usr/bin/env python
# $Id: make_language_inclusion_test.py 538 2009-11-09 14:51:19Z hat $
"""
Check whether the 'small' language is contained in the 'big' language.
"""
import sys
# Make sure the installed version is used (is in the Python path).
site_packages = "%SITEPACKAGES%"
if not site_packages.startswith("%SITE") and site_packages not in sys.path:
# Installed version, prefix installed path.
sys.path = [site_packages] + sys.path
from automata import frontend
class TestLanguageInclusionApplication(frontend.Application):
def __init__(self):
cmd = 'make_language_inclusion_test'
desc = "Check whether the 'small' language is contained in the " \
"'big' language."
frontend.Application.__init__(self, cmd, desc)
def add_options(self):
self.add_parm(frontend.CmdParm('small', frontend.EXIST_AUT,
'i+s', 'first (smallest) model'))
self.add_parm(frontend.CmdParm('big', frontend.EXIST_AUT,
'i+b', 'second (biggest) model'))
def main(self, args):
frontend.make_language_inclusion_test(args['small'], args['big'])
if __name__ == '__main__':
app = TestLanguageInclusionApplication()
app.run()
|
en
| 0.66318
|
#!/usr/bin/env python # $Id: make_language_inclusion_test.py 538 2009-11-09 14:51:19Z hat $ Check whether the 'small' language is contained in the 'big' language. # Make sure the installed version is used (is in the Python path). # Installed version, prefix installed path.
| 2.664626
| 3
|
reportlab/lib/attrmap.py
|
gustavohenrique/wms
| 1
|
6627367
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/attrmap.py
__version__=''' $Id: attrmap.py 3342 2008-12-12 15:55:34Z andy $ '''
__doc__='''Framework for objects whose assignments are checked. Used by graphics.
We developed reportlab/graphics prior to Python 2 and metaclasses. For the
graphics, we wanted to be able to declare the attributes of a class, check
them on assignment, and convert from string arguments. Examples of
attrmap-based objects can be found in reportlab/graphics/shapes. It lets
us defined structures like the one below, which are seen more modern form in
Django models and other frameworks.
We'll probably replace this one day soon, hopefully with no impact on client
code.
class Rect(SolidShape):
"""Rectangle, possibly with rounded corners."""
_attrMap = AttrMap(BASE=SolidShape,
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
width = AttrMapValue(isNumber),
height = AttrMapValue(isNumber),
rx = AttrMapValue(isNumber),
ry = AttrMapValue(isNumber),
)
'''
from UserDict import UserDict
from reportlab.lib.validators import isAnything, _SequenceTypes, DerivedValue
from reportlab import rl_config
class CallableValue:
'''a class to allow callable initial values'''
def __init__(self,func,*args,**kw):
#assert iscallable(func)
self.func = func
self.args = args
self.kw = kw
def __call__(self):
return apply(self.func,self.args,self.kw)
class AttrMapValue:
'''Simple multi-value holder for attribute maps'''
def __init__(self,validate=None,desc=None,initial=None, **kw):
self.validate = validate or isAnything
self.desc = desc
self._initial = initial
for k,v in kw.items():
setattr(self,k,v)
def __getattr__(self,name):
#hack to allow callable initial values
if name=='initial':
if isinstance(self._initial,CallableValue): return self._initial()
return self._initial
elif name=='hidden':
return 0
raise AttributeError, name
class AttrMap(UserDict):
def __init__(self,BASE=None,UNWANTED=[],**kw):
data = {}
if BASE:
if isinstance(BASE,AttrMap):
data = BASE.data #they used BASECLASS._attrMap
else:
if type(BASE) not in (type(()),type([])): BASE = (BASE,)
for B in BASE:
if hasattr(B,'_attrMap'):
data.update(getattr(B._attrMap,'data',{}))
else:
raise ValueError, 'BASE=%s has wrong kind of value' % str(B)
UserDict.__init__(self,data)
self.remove(UNWANTED)
self.data.update(kw)
def update(self,kw):
if isinstance(kw,AttrMap): kw = kw.data
self.data.update(kw)
def remove(self,unwanted):
for k in unwanted:
try:
del self[k]
except KeyError:
pass
def clone(self,UNWANTED=[],**kw):
c = AttrMap(BASE=self,UNWANTED=UNWANTED)
c.update(kw)
return c
def validateSetattr(obj,name,value):
'''validate setattr(obj,name,value)'''
if rl_config.shapeChecking:
map = obj._attrMap
if map and name[0]!= '_':
#we always allow the inherited values; they cannot
#be checked until draw time.
if isinstance(value, DerivedValue):
#let it through
pass
else:
try:
validate = map[name].validate
if not validate(value):
raise AttributeError, "Illegal assignment of '%s' to '%s' in class %s" % (value, name, obj.__class__.__name__)
except KeyError:
raise AttributeError, "Illegal attribute '%s' in class %s" % (name, obj.__class__.__name__)
obj.__dict__[name] = value
def _privateAttrMap(obj,ret=0):
'''clone obj._attrMap if required'''
A = obj._attrMap
oA = getattr(obj.__class__,'_attrMap',None)
if ret:
if oA is A:
return A.clone(), oA
else:
return A, None
else:
if oA is A:
obj._attrMap = A.clone()
def _findObjectAndAttr(src, P):
'''Locate the object src.P for P a string, return parent and name of attribute
'''
P = string.split(P, '.')
if len(P) == 0:
return None, None
else:
for p in P[0:-1]:
src = getattr(src, p)
return src, P[-1]
def hook__setattr__(obj):
if not hasattr(obj,'__attrproxy__'):
C = obj.__class__
import new
obj.__class__=new.classobj(C.__name__,(C,)+C.__bases__,
{'__attrproxy__':[],
'__setattr__':lambda self,k,v,osa=getattr(obj,'__setattr__',None),hook=hook: hook(self,k,v,osa)})
def addProxyAttribute(src,name,validate=None,desc=None,initial=None,dst=None):
'''
Add a proxy attribute 'name' to src with targets dst
'''
#sanity
assert hasattr(src,'_attrMap'), 'src object has no _attrMap'
A, oA = _privateAttrMap(src,1)
if type(dst) not in _SequenceTypes: dst = dst,
D = []
DV = []
for d in dst:
if type(d) in _SequenceTypes:
d, e = d[0], d[1:]
obj, attr = _findObjectAndAttr(src,d)
if obj:
dA = getattr(obj,'_attrMap',None)
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/attrmap.py
__version__=''' $Id: attrmap.py 3342 2008-12-12 15:55:34Z andy $ '''
__doc__='''Framework for objects whose assignments are checked. Used by graphics.
We developed reportlab/graphics prior to Python 2 and metaclasses. For the
graphics, we wanted to be able to declare the attributes of a class, check
them on assignment, and convert from string arguments. Examples of
attrmap-based objects can be found in reportlab/graphics/shapes. It lets
us defined structures like the one below, which are seen more modern form in
Django models and other frameworks.
We'll probably replace this one day soon, hopefully with no impact on client
code.
class Rect(SolidShape):
"""Rectangle, possibly with rounded corners."""
_attrMap = AttrMap(BASE=SolidShape,
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
width = AttrMapValue(isNumber),
height = AttrMapValue(isNumber),
rx = AttrMapValue(isNumber),
ry = AttrMapValue(isNumber),
)
'''
from UserDict import UserDict
from reportlab.lib.validators import isAnything, _SequenceTypes, DerivedValue
from reportlab import rl_config
class CallableValue:
'''a class to allow callable initial values'''
def __init__(self,func,*args,**kw):
#assert iscallable(func)
self.func = func
self.args = args
self.kw = kw
def __call__(self):
return apply(self.func,self.args,self.kw)
class AttrMapValue:
'''Simple multi-value holder for attribute maps'''
def __init__(self,validate=None,desc=None,initial=None, **kw):
self.validate = validate or isAnything
self.desc = desc
self._initial = initial
for k,v in kw.items():
setattr(self,k,v)
def __getattr__(self,name):
#hack to allow callable initial values
if name=='initial':
if isinstance(self._initial,CallableValue): return self._initial()
return self._initial
elif name=='hidden':
return 0
raise AttributeError, name
class AttrMap(UserDict):
def __init__(self,BASE=None,UNWANTED=[],**kw):
data = {}
if BASE:
if isinstance(BASE,AttrMap):
data = BASE.data #they used BASECLASS._attrMap
else:
if type(BASE) not in (type(()),type([])): BASE = (BASE,)
for B in BASE:
if hasattr(B,'_attrMap'):
data.update(getattr(B._attrMap,'data',{}))
else:
raise ValueError, 'BASE=%s has wrong kind of value' % str(B)
UserDict.__init__(self,data)
self.remove(UNWANTED)
self.data.update(kw)
def update(self,kw):
if isinstance(kw,AttrMap): kw = kw.data
self.data.update(kw)
def remove(self,unwanted):
for k in unwanted:
try:
del self[k]
except KeyError:
pass
def clone(self,UNWANTED=[],**kw):
c = AttrMap(BASE=self,UNWANTED=UNWANTED)
c.update(kw)
return c
def validateSetattr(obj,name,value):
'''validate setattr(obj,name,value)'''
if rl_config.shapeChecking:
map = obj._attrMap
if map and name[0]!= '_':
#we always allow the inherited values; they cannot
#be checked until draw time.
if isinstance(value, DerivedValue):
#let it through
pass
else:
try:
validate = map[name].validate
if not validate(value):
raise AttributeError, "Illegal assignment of '%s' to '%s' in class %s" % (value, name, obj.__class__.__name__)
except KeyError:
raise AttributeError, "Illegal attribute '%s' in class %s" % (name, obj.__class__.__name__)
obj.__dict__[name] = value
def _privateAttrMap(obj,ret=0):
'''clone obj._attrMap if required'''
A = obj._attrMap
oA = getattr(obj.__class__,'_attrMap',None)
if ret:
if oA is A:
return A.clone(), oA
else:
return A, None
else:
if oA is A:
obj._attrMap = A.clone()
def _findObjectAndAttr(src, P):
'''Locate the object src.P for P a string, return parent and name of attribute
'''
P = string.split(P, '.')
if len(P) == 0:
return None, None
else:
for p in P[0:-1]:
src = getattr(src, p)
return src, P[-1]
def hook__setattr__(obj):
if not hasattr(obj,'__attrproxy__'):
C = obj.__class__
import new
obj.__class__=new.classobj(C.__name__,(C,)+C.__bases__,
{'__attrproxy__':[],
'__setattr__':lambda self,k,v,osa=getattr(obj,'__setattr__',None),hook=hook: hook(self,k,v,osa)})
def addProxyAttribute(src,name,validate=None,desc=None,initial=None,dst=None):
'''
Add a proxy attribute 'name' to src with targets dst
'''
#sanity
assert hasattr(src,'_attrMap'), 'src object has no _attrMap'
A, oA = _privateAttrMap(src,1)
if type(dst) not in _SequenceTypes: dst = dst,
D = []
DV = []
for d in dst:
if type(d) in _SequenceTypes:
d, e = d[0], d[1:]
obj, attr = _findObjectAndAttr(src,d)
if obj:
dA = getattr(obj,'_attrMap',None)
|
en
| 0.713501
|
#Copyright ReportLab Europe Ltd. 2000-2004 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/attrmap.py $Id: attrmap.py 3342 2008-12-12 15:55:34Z andy $ Framework for objects whose assignments are checked. Used by graphics. We developed reportlab/graphics prior to Python 2 and metaclasses. For the graphics, we wanted to be able to declare the attributes of a class, check them on assignment, and convert from string arguments. Examples of attrmap-based objects can be found in reportlab/graphics/shapes. It lets us defined structures like the one below, which are seen more modern form in Django models and other frameworks. We'll probably replace this one day soon, hopefully with no impact on client code. class Rect(SolidShape): """Rectangle, possibly with rounded corners.""" _attrMap = AttrMap(BASE=SolidShape, x = AttrMapValue(isNumber), y = AttrMapValue(isNumber), width = AttrMapValue(isNumber), height = AttrMapValue(isNumber), rx = AttrMapValue(isNumber), ry = AttrMapValue(isNumber), ) a class to allow callable initial values #assert iscallable(func) Simple multi-value holder for attribute maps #hack to allow callable initial values #they used BASECLASS._attrMap validate setattr(obj,name,value) #we always allow the inherited values; they cannot #be checked until draw time. #let it through clone obj._attrMap if required Locate the object src.P for P a string, return parent and name of attribute Add a proxy attribute 'name' to src with targets dst #sanity
| 2.275162
| 2
|
genomicfeatures/migrations/0002_auto_20190523_1346.py
|
brand-fabian/varfish-server
| 14
|
6627368
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-23 13:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("genomicfeatures", "0001_initial")]
operations = [
migrations.AddIndex(
model_name="geneinterval",
index=models.Index(fields=["gene_id"], name="genomicfeat_gene_id_87aab6_idx"),
)
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-23 13:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("genomicfeatures", "0001_initial")]
operations = [
migrations.AddIndex(
model_name="geneinterval",
index=models.Index(fields=["gene_id"], name="genomicfeat_gene_id_87aab6_idx"),
)
]
|
en
| 0.68232
|
# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-05-23 13:46
| 1.344908
| 1
|
bin/Reinteract.pyw
|
alexey4petrov/reinteract
| 1
|
6627369
|
<reponame>alexey4petrov/reinteract
#!/usr/bin/env python
#
# Copyright 2007-2009 <NAME>
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import os
import sys
script_path = os.path.realpath(os.path.abspath(sys.argv[0].decode('mbcs')))
topdir = os.path.dirname(os.path.dirname(script_path))
libdir = os.path.join(topdir, 'python')
externaldir = os.path.join(topdir, 'external')
builderdir = os.path.join(topdir, 'dialogs')
examplesdir = os.path.join(topdir, 'examples')
icon_file = os.path.join(topdir, 'Reinteract.ico')
sys.path[0:0] = [libdir, externaldir]
import reinteract
from reinteract.global_settings import global_settings
global_settings.dialogs_dir = builderdir
global_settings.examples_dir = examplesdir
global_settings.icon_file = icon_file
global_settings.version = "@VERSION@"
import reinteract.main
reinteract.main.main()
|
#!/usr/bin/env python
#
# Copyright 2007-2009 <NAME>
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import os
import sys
script_path = os.path.realpath(os.path.abspath(sys.argv[0].decode('mbcs')))
topdir = os.path.dirname(os.path.dirname(script_path))
libdir = os.path.join(topdir, 'python')
externaldir = os.path.join(topdir, 'external')
builderdir = os.path.join(topdir, 'dialogs')
examplesdir = os.path.join(topdir, 'examples')
icon_file = os.path.join(topdir, 'Reinteract.ico')
sys.path[0:0] = [libdir, externaldir]
import reinteract
from reinteract.global_settings import global_settings
global_settings.dialogs_dir = builderdir
global_settings.examples_dir = examplesdir
global_settings.icon_file = icon_file
global_settings.version = "@VERSION@"
import reinteract.main
reinteract.main.main()
|
en
| 0.39727
|
#!/usr/bin/env python # # Copyright 2007-2009 <NAME> # # This file is part of Reinteract and distributed under the terms # of the BSD license. See the file COPYING in the Reinteract # distribution for full details. # ########################################################################
| 1.904929
| 2
|
eonr/tests/test_eonr.py
|
tnigon/eonr
| 3
|
6627370
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
'''
Set up a test to run through all of the functions (and various options) and
provide a report on how many were returned with errors (coverage).
Then this file can be run anytime a change is made and changes are pushed to
see if anything was broken
The following test uses a very small "test" datacube that is only 3x3x240
(8,640 bytes)
'''
import numpy as np
import os
import pandas as pd
import shutil, tempfile
import unittest
from eonr import EONR
NAME_DATA = r'minnesota_2012.csv'
FILENAME_DATA = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', NAME_DATA)
# print(FILENAME_HDR)
if not os.path.isfile(FILENAME_DATA):
FILENAME_DATA = os.path.join(os.path.dirname(os.getcwd()), 'data', NAME_DATA)
class Test_EONR_set_init_vs_post(unittest.TestCase):
'''
Test to be sure results are the same if setting variables before or
after initialization
'''
@classmethod
def setUpClass(self):
self.test_dir = tempfile.mkdtemp()
model = None
col_n_app = 'rate_n_applied_kgha'
col_yld = 'yld_grain_dry_kgha'
unit_currency = 'USD'
unit_fert = 'kg'
unit_grain = 'kg'
unit_area = 'ha'
cost_n_fert = 0.88
price_grain = 0.157
self.df_data = pd.read_csv(FILENAME_DATA)
self.my_eonr1 = EONR(base_dir=self.test_dir, model=model)
self.my_eonr1.set_column_names(col_yld=col_yld,
col_n_app=col_n_app)
self.my_eonr1.set_units(unit_currency=unit_currency, unit_fert=unit_fert,
unit_grain=unit_grain, unit_area=unit_area)
self.my_eonr1.update_econ(cost_n_fert=cost_n_fert,
price_grain=price_grain)
self.my_eonr1.calculate_eonr(self.df_data)
self.my_eonr2 = EONR(cost_n_fert=cost_n_fert,
price_grain=price_grain,
col_n_app=col_n_app,
col_yld=col_yld,
unit_currency=unit_currency,
unit_grain=unit_grain,
unit_fert=unit_fert,
unit_area=unit_area,
model=None,
base_dir=self.test_dir)
self.my_eonr2.calculate_eonr(self.df_data)
@classmethod
def tearDownClass(self):
self.my_eonr1 = None
self.my_eonr2 = None
self.df_data = None
shutil.rmtree(self.test_dir)
self.test_dir = None
def test_init_eonr(self):
self.assertAlmostEqual(self.my_eonr1.eonr, 162.3, 1,
'init EONR result is not correct')
def test_init_mrtn(self):
self.assertAlmostEqual(self.my_eonr1.mrtn, 767.93, 2,
'init MRTN result is not correct')
def test_init_crit_x(self):
self.assertAlmostEqual(self.my_eonr1.coefs_grtn['crit_x'], 177.440, 1,
'init crit_x result is not correct')
def test_init_pl_l_90(self):
pl_l_90 = self.my_eonr1.df_ci[self.my_eonr1.df_ci['level'] == 0.9]['pl_l'].item()
self.assertAlmostEqual(pl_l_90, 130.46, 1,
'init profile lower 90th result is not correct')
def test_init_pl_u_90(self):
pl_u_90 = self.my_eonr1.df_ci[self.my_eonr1.df_ci['level'] == 0.9]['pl_u'].item()
self.assertAlmostEqual(pl_u_90, 207.83, 1,
'init profile upper 90th result is not correct')
def test_init_wald_l_90(self):
wald_l_90 = self.my_eonr1.df_ci[self.my_eonr1.df_ci['level'] == 0.9]['wald_l'].item()
self.assertAlmostEqual(wald_l_90, 107.489, 1,
'init wald lower 90th result is not correct')
def test_init_wald_u_90(self):
wald_u_90 = self.my_eonr1.df_ci[self.my_eonr1.df_ci['level'] == 0.9]['wald_u'].item()
self.assertAlmostEqual(wald_u_90, 217.237, 1,
'init wald upper 90th result is not correct')
def test_post_eonr(self):
self.assertAlmostEqual(self.my_eonr2.eonr, 162.3, 1,
'post EONR result is not correct')
def test_post_mrtn(self):
self.assertAlmostEqual(self.my_eonr2.mrtn, 767.93, 2,
'post MRTN result is not correct')
def test_post_crit_x(self):
self.assertAlmostEqual(self.my_eonr2.coefs_grtn['crit_x'], 177.440, 1,
'post crit_x result is not correct')
def test_post_pl_l_90(self):
pl_l_90 = self.my_eonr2.df_ci[self.my_eonr2.df_ci['level'] == 0.9]['pl_l'].item()
self.assertAlmostEqual(pl_l_90, 130.46, 1,
'post profile lower 90th result is not correct')
def test_post_pl_u_90(self):
pl_u_90 = self.my_eonr2.df_ci[self.my_eonr2.df_ci['level'] == 0.9]['pl_u'].item()
self.assertAlmostEqual(pl_u_90, 207.83, 1,
'post profile upper 90th result is not correct')
def test_post_wald_l_90(self):
wald_l_90 = self.my_eonr2.df_ci[self.my_eonr2.df_ci['level'] == 0.9]['wald_l'].item()
self.assertAlmostEqual(wald_l_90, 107.489, 1,
'post wald lower 90th result is not correct')
def test_post_wald_u_90(self):
wald_u_90 = self.my_eonr2.df_ci[self.my_eonr2.df_ci['level'] == 0.9]['wald_u'].item()
self.assertAlmostEqual(wald_u_90, 217.237, 1,
'post wald upper 90th result is not correct')
def test_init_post_col_n_app(self):
self.assertEqual(self.my_eonr1.col_n_app, self.my_eonr2.col_n_app,
'"col_n_app" is not the same between "init" and "post" instance.')
def test_init_post_col_yld(self):
self.assertEqual(self.my_eonr1.col_yld, self.my_eonr2.col_yld,
'"col_yld" is not the same between "init" and "post" instance.')
def test_init_post_unit_currency(self):
self.assertEqual(self.my_eonr1.unit_currency, self.my_eonr2.unit_currency,
'"unit_currency" is not the same between "init" and "post" instance.')
def test_init_post_unit_fert(self):
self.assertEqual(self.my_eonr1.unit_fert, self.my_eonr2.unit_fert,
'"unit_fert" is not the same between "init" and "post" instance.')
def test_init_post_unit_grain(self):
self.assertEqual(self.my_eonr1.unit_grain, self.my_eonr2.unit_grain,
'"unit_grain" is not the same between "init" and "post" instance.')
def test_init_post_unit_area(self):
self.assertEqual(self.my_eonr1.unit_area, self.my_eonr2.unit_area,
'"unit_area" is not the same between "init" and "post" instance.')
def test_init_post_cost_n_fert(self):
self.assertEqual(self.my_eonr1.cost_n_fert, self.my_eonr2.cost_n_fert,
'"cost_n_fert" is not the same between "init" and "post" instance.')
def test_init_post_price_grain(self):
self.assertEqual(self.my_eonr1.price_grain, self.my_eonr2.price_grain,
'"price_grain" is not the same between "init" and "post" instance.')
class Test_EONR_model_results(unittest.TestCase):
'''
Test to be sure results are the same if setting variables before or
after initialization
'''
@classmethod
def setUpClass(self):
self.test_dir = tempfile.mkdtemp()
col_n_app = 'rate_n_applied_kgha'
col_yld = 'yld_grain_dry_kgha'
unit_currency = 'USD'
unit_fert = 'kg'
unit_grain = 'kg'
unit_area = 'ha'
cost_n_fert = 0.88
price_grain = 0.157
self.df_data = pd.read_csv(FILENAME_DATA)
self.my_eonr_q = EONR(cost_n_fert=cost_n_fert,
price_grain=price_grain,
col_n_app=col_n_app,
col_yld=col_yld,
unit_currency=unit_currency,
unit_grain=unit_grain,
unit_fert=unit_fert,
unit_area=unit_area,
model='quadratic',
base_dir=self.test_dir)
self.my_eonr_q.calculate_eonr(self.df_data)
@classmethod
def tearDownClass(self):
self.my_eonr_q = None
self.df_data = None
shutil.rmtree(self.test_dir)
self.test_dir = None
def test_quadratic_eonr(self):
self.assertAlmostEqual(self.my_eonr_q.eonr, 174.238, 1,
'quadratic EONR result is not correct')
def test_quadratic_mrtn(self):
self.assertAlmostEqual(self.my_eonr_q.mrtn, 770.206, 1,
'quadratic MRTN result is not correct')
def test_quadratic_crit_x(self):
self.assertAlmostEqual(self.my_eonr_q.coefs_grtn['crit_x'], 191.581, 1,
'quadratic crit_x result is not correct')
def test_quadratic_pl_l_90(self):
pl_l_90 = self.my_eonr_q.df_ci[self.my_eonr_q.df_ci['level'] == 0.9]['pl_l'].item()
self.assertAlmostEqual(pl_l_90, 152.882, 1,
'quadratic profile lower 90th result is not correct')
def test_quadratic_pl_u_90(self):
pl_u_90 = self.my_eonr_q.df_ci[self.my_eonr_q.df_ci['level'] == 0.9]['pl_u'].item()
self.assertAlmostEqual(pl_u_90, 222.560, 1,
'quadratic profile upper 90th result is not correct')
def test_quadratic_wald_l_90(self):
wald_l_90 = self.my_eonr_q.df_ci[self.my_eonr_q.df_ci['level'] == 0.9]['wald_l'].item()
self.assertAlmostEqual(wald_l_90, 132.785, 1,
'quadratic wald lower 90th result is not correct')
def test_quadratic_wald_u_90(self):
wald_u_90 = self.my_eonr_q.df_ci[self.my_eonr_q.df_ci['level'] == 0.9]['wald_u'].item()
self.assertAlmostEqual(wald_u_90, 215.691, 1,
'quadratic wald upper 90th result is not correct')
def suite():
suite = unittest.TestSuite()
# Test_EONR_set_column_names_init
suite.addTest(Test_EONR_set_init_vs_post('test_init_eonr'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_mrtn'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_crit_x'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_pl_l_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_pl_u_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_wald_l_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_wald_u_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_eonr'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_mrtn'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_crit_x'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_pl_l_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_pl_u_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_wald_l_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_wald_u_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_col_n_app'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_col_yld'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_unit_currency'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_unit_fert'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_unit_grain'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_unit_area'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_cost_n_fert'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_price_grain'))
suite.addTest(Test_EONR_model_results('test_quadratic_eonr'))
suite.addTest(Test_EONR_model_results('test_quadratic_mrtn'))
suite.addTest(Test_EONR_model_results('test_quadratic_crit_x'))
suite.addTest(Test_EONR_model_results('test_quadratic_pl_l_90'))
suite.addTest(Test_EONR_model_results('test_quadratic_pl_u_90'))
suite.addTest(Test_EONR_model_results('test_quadratic_wald_l_90'))
suite.addTest(Test_EONR_model_results('test_quadratic_wald_u_90'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
|
# -*- coding: utf-8 -*-
'''
Set up a test to run through all of the functions (and various options) and
provide a report on how many were returned with errors (coverage).
Then this file can be run anytime a change is made and changes are pushed to
see if anything was broken
The following test uses a very small "test" datacube that is only 3x3x240
(8,640 bytes)
'''
import numpy as np
import os
import pandas as pd
import shutil, tempfile
import unittest
from eonr import EONR
NAME_DATA = r'minnesota_2012.csv'
FILENAME_DATA = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', NAME_DATA)
# print(FILENAME_HDR)
if not os.path.isfile(FILENAME_DATA):
FILENAME_DATA = os.path.join(os.path.dirname(os.getcwd()), 'data', NAME_DATA)
class Test_EONR_set_init_vs_post(unittest.TestCase):
'''
Test to be sure results are the same if setting variables before or
after initialization
'''
@classmethod
def setUpClass(self):
self.test_dir = tempfile.mkdtemp()
model = None
col_n_app = 'rate_n_applied_kgha'
col_yld = 'yld_grain_dry_kgha'
unit_currency = 'USD'
unit_fert = 'kg'
unit_grain = 'kg'
unit_area = 'ha'
cost_n_fert = 0.88
price_grain = 0.157
self.df_data = pd.read_csv(FILENAME_DATA)
self.my_eonr1 = EONR(base_dir=self.test_dir, model=model)
self.my_eonr1.set_column_names(col_yld=col_yld,
col_n_app=col_n_app)
self.my_eonr1.set_units(unit_currency=unit_currency, unit_fert=unit_fert,
unit_grain=unit_grain, unit_area=unit_area)
self.my_eonr1.update_econ(cost_n_fert=cost_n_fert,
price_grain=price_grain)
self.my_eonr1.calculate_eonr(self.df_data)
self.my_eonr2 = EONR(cost_n_fert=cost_n_fert,
price_grain=price_grain,
col_n_app=col_n_app,
col_yld=col_yld,
unit_currency=unit_currency,
unit_grain=unit_grain,
unit_fert=unit_fert,
unit_area=unit_area,
model=None,
base_dir=self.test_dir)
self.my_eonr2.calculate_eonr(self.df_data)
@classmethod
def tearDownClass(self):
self.my_eonr1 = None
self.my_eonr2 = None
self.df_data = None
shutil.rmtree(self.test_dir)
self.test_dir = None
def test_init_eonr(self):
self.assertAlmostEqual(self.my_eonr1.eonr, 162.3, 1,
'init EONR result is not correct')
def test_init_mrtn(self):
self.assertAlmostEqual(self.my_eonr1.mrtn, 767.93, 2,
'init MRTN result is not correct')
def test_init_crit_x(self):
self.assertAlmostEqual(self.my_eonr1.coefs_grtn['crit_x'], 177.440, 1,
'init crit_x result is not correct')
def test_init_pl_l_90(self):
pl_l_90 = self.my_eonr1.df_ci[self.my_eonr1.df_ci['level'] == 0.9]['pl_l'].item()
self.assertAlmostEqual(pl_l_90, 130.46, 1,
'init profile lower 90th result is not correct')
def test_init_pl_u_90(self):
pl_u_90 = self.my_eonr1.df_ci[self.my_eonr1.df_ci['level'] == 0.9]['pl_u'].item()
self.assertAlmostEqual(pl_u_90, 207.83, 1,
'init profile upper 90th result is not correct')
def test_init_wald_l_90(self):
wald_l_90 = self.my_eonr1.df_ci[self.my_eonr1.df_ci['level'] == 0.9]['wald_l'].item()
self.assertAlmostEqual(wald_l_90, 107.489, 1,
'init wald lower 90th result is not correct')
def test_init_wald_u_90(self):
wald_u_90 = self.my_eonr1.df_ci[self.my_eonr1.df_ci['level'] == 0.9]['wald_u'].item()
self.assertAlmostEqual(wald_u_90, 217.237, 1,
'init wald upper 90th result is not correct')
def test_post_eonr(self):
self.assertAlmostEqual(self.my_eonr2.eonr, 162.3, 1,
'post EONR result is not correct')
def test_post_mrtn(self):
self.assertAlmostEqual(self.my_eonr2.mrtn, 767.93, 2,
'post MRTN result is not correct')
def test_post_crit_x(self):
self.assertAlmostEqual(self.my_eonr2.coefs_grtn['crit_x'], 177.440, 1,
'post crit_x result is not correct')
def test_post_pl_l_90(self):
pl_l_90 = self.my_eonr2.df_ci[self.my_eonr2.df_ci['level'] == 0.9]['pl_l'].item()
self.assertAlmostEqual(pl_l_90, 130.46, 1,
'post profile lower 90th result is not correct')
def test_post_pl_u_90(self):
pl_u_90 = self.my_eonr2.df_ci[self.my_eonr2.df_ci['level'] == 0.9]['pl_u'].item()
self.assertAlmostEqual(pl_u_90, 207.83, 1,
'post profile upper 90th result is not correct')
def test_post_wald_l_90(self):
wald_l_90 = self.my_eonr2.df_ci[self.my_eonr2.df_ci['level'] == 0.9]['wald_l'].item()
self.assertAlmostEqual(wald_l_90, 107.489, 1,
'post wald lower 90th result is not correct')
def test_post_wald_u_90(self):
wald_u_90 = self.my_eonr2.df_ci[self.my_eonr2.df_ci['level'] == 0.9]['wald_u'].item()
self.assertAlmostEqual(wald_u_90, 217.237, 1,
'post wald upper 90th result is not correct')
def test_init_post_col_n_app(self):
self.assertEqual(self.my_eonr1.col_n_app, self.my_eonr2.col_n_app,
'"col_n_app" is not the same between "init" and "post" instance.')
def test_init_post_col_yld(self):
self.assertEqual(self.my_eonr1.col_yld, self.my_eonr2.col_yld,
'"col_yld" is not the same between "init" and "post" instance.')
def test_init_post_unit_currency(self):
self.assertEqual(self.my_eonr1.unit_currency, self.my_eonr2.unit_currency,
'"unit_currency" is not the same between "init" and "post" instance.')
def test_init_post_unit_fert(self):
self.assertEqual(self.my_eonr1.unit_fert, self.my_eonr2.unit_fert,
'"unit_fert" is not the same between "init" and "post" instance.')
def test_init_post_unit_grain(self):
self.assertEqual(self.my_eonr1.unit_grain, self.my_eonr2.unit_grain,
'"unit_grain" is not the same between "init" and "post" instance.')
def test_init_post_unit_area(self):
self.assertEqual(self.my_eonr1.unit_area, self.my_eonr2.unit_area,
'"unit_area" is not the same between "init" and "post" instance.')
def test_init_post_cost_n_fert(self):
self.assertEqual(self.my_eonr1.cost_n_fert, self.my_eonr2.cost_n_fert,
'"cost_n_fert" is not the same between "init" and "post" instance.')
def test_init_post_price_grain(self):
self.assertEqual(self.my_eonr1.price_grain, self.my_eonr2.price_grain,
'"price_grain" is not the same between "init" and "post" instance.')
class Test_EONR_model_results(unittest.TestCase):
'''
Test to be sure results are the same if setting variables before or
after initialization
'''
@classmethod
def setUpClass(self):
self.test_dir = tempfile.mkdtemp()
col_n_app = 'rate_n_applied_kgha'
col_yld = 'yld_grain_dry_kgha'
unit_currency = 'USD'
unit_fert = 'kg'
unit_grain = 'kg'
unit_area = 'ha'
cost_n_fert = 0.88
price_grain = 0.157
self.df_data = pd.read_csv(FILENAME_DATA)
self.my_eonr_q = EONR(cost_n_fert=cost_n_fert,
price_grain=price_grain,
col_n_app=col_n_app,
col_yld=col_yld,
unit_currency=unit_currency,
unit_grain=unit_grain,
unit_fert=unit_fert,
unit_area=unit_area,
model='quadratic',
base_dir=self.test_dir)
self.my_eonr_q.calculate_eonr(self.df_data)
@classmethod
def tearDownClass(self):
self.my_eonr_q = None
self.df_data = None
shutil.rmtree(self.test_dir)
self.test_dir = None
def test_quadratic_eonr(self):
self.assertAlmostEqual(self.my_eonr_q.eonr, 174.238, 1,
'quadratic EONR result is not correct')
def test_quadratic_mrtn(self):
self.assertAlmostEqual(self.my_eonr_q.mrtn, 770.206, 1,
'quadratic MRTN result is not correct')
def test_quadratic_crit_x(self):
self.assertAlmostEqual(self.my_eonr_q.coefs_grtn['crit_x'], 191.581, 1,
'quadratic crit_x result is not correct')
def test_quadratic_pl_l_90(self):
pl_l_90 = self.my_eonr_q.df_ci[self.my_eonr_q.df_ci['level'] == 0.9]['pl_l'].item()
self.assertAlmostEqual(pl_l_90, 152.882, 1,
'quadratic profile lower 90th result is not correct')
def test_quadratic_pl_u_90(self):
pl_u_90 = self.my_eonr_q.df_ci[self.my_eonr_q.df_ci['level'] == 0.9]['pl_u'].item()
self.assertAlmostEqual(pl_u_90, 222.560, 1,
'quadratic profile upper 90th result is not correct')
def test_quadratic_wald_l_90(self):
wald_l_90 = self.my_eonr_q.df_ci[self.my_eonr_q.df_ci['level'] == 0.9]['wald_l'].item()
self.assertAlmostEqual(wald_l_90, 132.785, 1,
'quadratic wald lower 90th result is not correct')
def test_quadratic_wald_u_90(self):
wald_u_90 = self.my_eonr_q.df_ci[self.my_eonr_q.df_ci['level'] == 0.9]['wald_u'].item()
self.assertAlmostEqual(wald_u_90, 215.691, 1,
'quadratic wald upper 90th result is not correct')
def suite():
suite = unittest.TestSuite()
# Test_EONR_set_column_names_init
suite.addTest(Test_EONR_set_init_vs_post('test_init_eonr'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_mrtn'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_crit_x'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_pl_l_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_pl_u_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_wald_l_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_wald_u_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_eonr'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_mrtn'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_crit_x'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_pl_l_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_pl_u_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_wald_l_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_post_wald_u_90'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_col_n_app'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_col_yld'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_unit_currency'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_unit_fert'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_unit_grain'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_unit_area'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_cost_n_fert'))
suite.addTest(Test_EONR_set_init_vs_post('test_init_post_price_grain'))
suite.addTest(Test_EONR_model_results('test_quadratic_eonr'))
suite.addTest(Test_EONR_model_results('test_quadratic_mrtn'))
suite.addTest(Test_EONR_model_results('test_quadratic_crit_x'))
suite.addTest(Test_EONR_model_results('test_quadratic_pl_l_90'))
suite.addTest(Test_EONR_model_results('test_quadratic_pl_u_90'))
suite.addTest(Test_EONR_model_results('test_quadratic_wald_l_90'))
suite.addTest(Test_EONR_model_results('test_quadratic_wald_u_90'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
|
en
| 0.939362
|
# -*- coding: utf-8 -*- Set up a test to run through all of the functions (and various options) and provide a report on how many were returned with errors (coverage). Then this file can be run anytime a change is made and changes are pushed to see if anything was broken The following test uses a very small "test" datacube that is only 3x3x240 (8,640 bytes) # print(FILENAME_HDR) Test to be sure results are the same if setting variables before or after initialization Test to be sure results are the same if setting variables before or after initialization # Test_EONR_set_column_names_init
| 2.669887
| 3
|
utils/utils.py
|
kaitolucifer/flask-restful-web-app
| 0
|
6627371
|
<reponame>kaitolucifer/flask-restful-web-app
import re
import base64
class AuthenticationFailedError(Exception):
def __init__(self, message="Authentication Failed"):
super().__init__(message)
class NotUpdatableError(Exception):
def __init__(self, message="not updatable user_id and password"):
super().__init__(message)
def get_auth(request):
try:
auth = request.headers.get("Authorization")
m = re.match(r"Basic (.*)", auth)
token = base64.b64decode(m.group(1))
auth_user_id, auth_password = token.decode().split(":")
return auth_user_id, auth_password
except:
raise AuthenticationFailedError()
def check_updatable(request):
if request.get_json().get("user_id") or request.get_json().get("password"):
raise NotUpdatableError()
|
import re
import base64
class AuthenticationFailedError(Exception):
def __init__(self, message="Authentication Failed"):
super().__init__(message)
class NotUpdatableError(Exception):
def __init__(self, message="not updatable user_id and password"):
super().__init__(message)
def get_auth(request):
try:
auth = request.headers.get("Authorization")
m = re.match(r"Basic (.*)", auth)
token = base64.b64decode(m.group(1))
auth_user_id, auth_password = token.decode().split(":")
return auth_user_id, auth_password
except:
raise AuthenticationFailedError()
def check_updatable(request):
if request.get_json().get("user_id") or request.get_json().get("password"):
raise NotUpdatableError()
|
none
| 1
| 2.751386
| 3
|
|
pysyncobj/tcp_connection.py
|
briangu/PySyncObj
| 602
|
6627372
|
import time
import socket
import zlib
import struct
import pysyncobj.pickle as pickle
import pysyncobj.win_inet_pton
from .poller import POLL_EVENT_TYPE
from .monotonic import monotonic as monotonicTime
class CONNECTION_STATE:
DISCONNECTED = 0
CONNECTING = 1
CONNECTED = 2
def _getAddrType(addr):
try:
socket.inet_aton(addr)
return socket.AF_INET
except socket.error:
pass
try:
socket.inet_pton(socket.AF_INET6, addr)
return socket.AF_INET6
except socket.error:
pass
raise Exception('unknown address type')
class TcpConnection(object):
def __init__(self, poller, onMessageReceived = None, onConnected = None, onDisconnected = None,
socket=None, timeout=10.0, sendBufferSize = 2 ** 13, recvBufferSize = 2 ** 13):
self.sendRandKey = None
self.recvRandKey = None
self.encryptor = None
self.__socket = socket
self.__readBuffer = bytes()
self.__writeBuffer = bytes()
self.__lastReadTime = monotonicTime()
self.__timeout = timeout
self.__poller = poller
if socket is not None:
self.__socket = socket
self.__fileno = socket.fileno()
self.__state = CONNECTION_STATE.CONNECTED
self.__poller.subscribe(self.__fileno,
self.__processConnection,
POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.WRITE | POLL_EVENT_TYPE.ERROR)
else:
self.__state = CONNECTION_STATE.DISCONNECTED
self.__fileno = None
self.__socket = None
self.__onMessageReceived = onMessageReceived
self.__onConnected = onConnected
self.__onDisconnected = onDisconnected
self.__sendBufferSize = sendBufferSize
self.__recvBufferSize = recvBufferSize
def setOnConnectedCallback(self, onConnected):
self.__onConnected = onConnected
def setOnMessageReceivedCallback(self, onMessageReceived):
self.__onMessageReceived = onMessageReceived
def setOnDisconnectedCallback(self, onDisconnected):
self.__onDisconnected = onDisconnected
def connect(self, host, port):
if host is None:
return False
self.__state = CONNECTION_STATE.DISCONNECTED
self.__fileno = None
self.__socket = socket.socket(_getAddrType(host), socket.SOCK_STREAM)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__sendBufferSize)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__recvBufferSize)
self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.__socket.setblocking(0)
self.__readBuffer = bytes()
self.__writeBuffer = bytes()
self.__lastReadTime = monotonicTime()
try:
self.__socket.connect((host, port))
except socket.error as e:
if e.errno not in (socket.errno.EINPROGRESS, socket.errno.EWOULDBLOCK):
return False
self.__fileno = self.__socket.fileno()
self.__state = CONNECTION_STATE.CONNECTING
self.__poller.subscribe(self.__fileno,
self.__processConnection,
POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.WRITE | POLL_EVENT_TYPE.ERROR)
return True
def send(self, message):
if self.sendRandKey:
message = (self.sendRandKey, message)
data = zlib.compress(pickle.dumps(message), 3)
if self.encryptor:
data = self.encryptor.encrypt(data)
data = struct.pack('i', len(data)) + data
self.__writeBuffer += data
self.__trySendBuffer()
def fileno(self):
return self.__fileno
def disconnect(self):
needCallDisconnect = False
if self.__onDisconnected is not None and self.__state != CONNECTION_STATE.DISCONNECTED:
needCallDisconnect = True
self.sendRandKey = None
self.recvRandKey = None
if self.__socket is not None:
self.__socket.close()
self.__socket = None
if self.__fileno is not None:
self.__poller.unsubscribe(self.__fileno)
self.__fileno = None
self.__writeBuffer = bytes()
self.__readBuffer = bytes()
self.__state = CONNECTION_STATE.DISCONNECTED
if needCallDisconnect:
self.__onDisconnected()
def getSendBufferSize(self):
return len(self.__writeBuffer)
def __processConnection(self, descr, eventType):
poller = self.__poller
if descr != self.__fileno:
poller.unsubscribe(descr)
return
if eventType & POLL_EVENT_TYPE.ERROR:
self.disconnect()
return
if monotonicTime() - self.__lastReadTime > self.__timeout:
self.disconnect()
return
if eventType & POLL_EVENT_TYPE.READ or eventType & POLL_EVENT_TYPE.WRITE:
if self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR):
self.disconnect()
return
if self.__state == CONNECTION_STATE.CONNECTING:
if self.__onConnected is not None:
self.__onConnected()
self.__state = CONNECTION_STATE.CONNECTED
self.__lastReadTime = monotonicTime()
return
if eventType & POLL_EVENT_TYPE.WRITE:
self.__trySendBuffer()
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
event = POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.ERROR
if len(self.__writeBuffer) > 0:
event |= POLL_EVENT_TYPE.WRITE
poller.subscribe(descr, self.__processConnection, event)
if eventType & POLL_EVENT_TYPE.READ:
self.__tryReadBuffer()
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
while True:
message = self.__processParseMessage()
if message is None:
break
if self.__onMessageReceived is not None:
self.__onMessageReceived(message)
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
def __trySendBuffer(self):
while self.__processSend():
pass
def __processSend(self):
if not self.__writeBuffer:
return False
try:
res = self.__socket.send(self.__writeBuffer)
if res < 0:
self.disconnect()
return False
if res == 0:
return False
self.__writeBuffer = self.__writeBuffer[res:]
return True
except socket.error as e:
if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK):
self.disconnect()
return False
def __tryReadBuffer(self):
while self.__processRead():
pass
self.__lastReadTime = monotonicTime()
def __processRead(self):
try:
incoming = self.__socket.recv(self.__recvBufferSize)
except socket.error as e:
if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK):
self.disconnect()
return False
if self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR):
self.disconnect()
return False
if not incoming:
self.disconnect()
return False
self.__readBuffer += incoming
return True
def __processParseMessage(self):
if len(self.__readBuffer) < 4:
return None
l = struct.unpack('i', self.__readBuffer[:4])[0]
if len(self.__readBuffer) - 4 < l:
return None
data = self.__readBuffer[4:4 + l]
try:
if self.encryptor:
data = self.encryptor.decrypt(data)
message = pickle.loads(zlib.decompress(data))
if self.recvRandKey:
randKey, message = message
assert randKey == self.recvRandKey
except:
self.disconnect()
return None
self.__readBuffer = self.__readBuffer[4 + l:]
return message
@property
def state(self):
return self.__state
|
import time
import socket
import zlib
import struct
import pysyncobj.pickle as pickle
import pysyncobj.win_inet_pton
from .poller import POLL_EVENT_TYPE
from .monotonic import monotonic as monotonicTime
class CONNECTION_STATE:
DISCONNECTED = 0
CONNECTING = 1
CONNECTED = 2
def _getAddrType(addr):
try:
socket.inet_aton(addr)
return socket.AF_INET
except socket.error:
pass
try:
socket.inet_pton(socket.AF_INET6, addr)
return socket.AF_INET6
except socket.error:
pass
raise Exception('unknown address type')
class TcpConnection(object):
def __init__(self, poller, onMessageReceived = None, onConnected = None, onDisconnected = None,
socket=None, timeout=10.0, sendBufferSize = 2 ** 13, recvBufferSize = 2 ** 13):
self.sendRandKey = None
self.recvRandKey = None
self.encryptor = None
self.__socket = socket
self.__readBuffer = bytes()
self.__writeBuffer = bytes()
self.__lastReadTime = monotonicTime()
self.__timeout = timeout
self.__poller = poller
if socket is not None:
self.__socket = socket
self.__fileno = socket.fileno()
self.__state = CONNECTION_STATE.CONNECTED
self.__poller.subscribe(self.__fileno,
self.__processConnection,
POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.WRITE | POLL_EVENT_TYPE.ERROR)
else:
self.__state = CONNECTION_STATE.DISCONNECTED
self.__fileno = None
self.__socket = None
self.__onMessageReceived = onMessageReceived
self.__onConnected = onConnected
self.__onDisconnected = onDisconnected
self.__sendBufferSize = sendBufferSize
self.__recvBufferSize = recvBufferSize
def setOnConnectedCallback(self, onConnected):
self.__onConnected = onConnected
def setOnMessageReceivedCallback(self, onMessageReceived):
self.__onMessageReceived = onMessageReceived
def setOnDisconnectedCallback(self, onDisconnected):
self.__onDisconnected = onDisconnected
def connect(self, host, port):
if host is None:
return False
self.__state = CONNECTION_STATE.DISCONNECTED
self.__fileno = None
self.__socket = socket.socket(_getAddrType(host), socket.SOCK_STREAM)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__sendBufferSize)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__recvBufferSize)
self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.__socket.setblocking(0)
self.__readBuffer = bytes()
self.__writeBuffer = bytes()
self.__lastReadTime = monotonicTime()
try:
self.__socket.connect((host, port))
except socket.error as e:
if e.errno not in (socket.errno.EINPROGRESS, socket.errno.EWOULDBLOCK):
return False
self.__fileno = self.__socket.fileno()
self.__state = CONNECTION_STATE.CONNECTING
self.__poller.subscribe(self.__fileno,
self.__processConnection,
POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.WRITE | POLL_EVENT_TYPE.ERROR)
return True
def send(self, message):
if self.sendRandKey:
message = (self.sendRandKey, message)
data = zlib.compress(pickle.dumps(message), 3)
if self.encryptor:
data = self.encryptor.encrypt(data)
data = struct.pack('i', len(data)) + data
self.__writeBuffer += data
self.__trySendBuffer()
def fileno(self):
return self.__fileno
def disconnect(self):
needCallDisconnect = False
if self.__onDisconnected is not None and self.__state != CONNECTION_STATE.DISCONNECTED:
needCallDisconnect = True
self.sendRandKey = None
self.recvRandKey = None
if self.__socket is not None:
self.__socket.close()
self.__socket = None
if self.__fileno is not None:
self.__poller.unsubscribe(self.__fileno)
self.__fileno = None
self.__writeBuffer = bytes()
self.__readBuffer = bytes()
self.__state = CONNECTION_STATE.DISCONNECTED
if needCallDisconnect:
self.__onDisconnected()
def getSendBufferSize(self):
return len(self.__writeBuffer)
def __processConnection(self, descr, eventType):
poller = self.__poller
if descr != self.__fileno:
poller.unsubscribe(descr)
return
if eventType & POLL_EVENT_TYPE.ERROR:
self.disconnect()
return
if monotonicTime() - self.__lastReadTime > self.__timeout:
self.disconnect()
return
if eventType & POLL_EVENT_TYPE.READ or eventType & POLL_EVENT_TYPE.WRITE:
if self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR):
self.disconnect()
return
if self.__state == CONNECTION_STATE.CONNECTING:
if self.__onConnected is not None:
self.__onConnected()
self.__state = CONNECTION_STATE.CONNECTED
self.__lastReadTime = monotonicTime()
return
if eventType & POLL_EVENT_TYPE.WRITE:
self.__trySendBuffer()
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
event = POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.ERROR
if len(self.__writeBuffer) > 0:
event |= POLL_EVENT_TYPE.WRITE
poller.subscribe(descr, self.__processConnection, event)
if eventType & POLL_EVENT_TYPE.READ:
self.__tryReadBuffer()
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
while True:
message = self.__processParseMessage()
if message is None:
break
if self.__onMessageReceived is not None:
self.__onMessageReceived(message)
if self.__state == CONNECTION_STATE.DISCONNECTED:
return
def __trySendBuffer(self):
while self.__processSend():
pass
def __processSend(self):
if not self.__writeBuffer:
return False
try:
res = self.__socket.send(self.__writeBuffer)
if res < 0:
self.disconnect()
return False
if res == 0:
return False
self.__writeBuffer = self.__writeBuffer[res:]
return True
except socket.error as e:
if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK):
self.disconnect()
return False
def __tryReadBuffer(self):
while self.__processRead():
pass
self.__lastReadTime = monotonicTime()
def __processRead(self):
try:
incoming = self.__socket.recv(self.__recvBufferSize)
except socket.error as e:
if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK):
self.disconnect()
return False
if self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR):
self.disconnect()
return False
if not incoming:
self.disconnect()
return False
self.__readBuffer += incoming
return True
def __processParseMessage(self):
if len(self.__readBuffer) < 4:
return None
l = struct.unpack('i', self.__readBuffer[:4])[0]
if len(self.__readBuffer) - 4 < l:
return None
data = self.__readBuffer[4:4 + l]
try:
if self.encryptor:
data = self.encryptor.decrypt(data)
message = pickle.loads(zlib.decompress(data))
if self.recvRandKey:
randKey, message = message
assert randKey == self.recvRandKey
except:
self.disconnect()
return None
self.__readBuffer = self.__readBuffer[4 + l:]
return message
@property
def state(self):
return self.__state
|
none
| 1
| 2.520619
| 3
|
|
test/test_download_with_metadata.py
|
volgar1x/spotify-downloader
| 0
|
6627373
|
<filename>test/test_download_with_metadata.py
import subprocess
import os
from spotdl import const
from spotdl import internals
from spotdl import spotify_tools
from spotdl import youtube_tools
from spotdl import convert
from spotdl import metadata
from spotdl import downloader
import pytest
import loader
loader.load_defaults()
SPOTIFY_TRACK_URL = "https://open.spotify.com/track/3SipFlNddvL0XNZRLXvdZD"
EXPECTED_YOUTUBE_TITLE = "Janji - Heroes Tonight (feat. Johnning) [NCS Release]"
EXPECTED_SPOTIFY_TITLE = "Janji - Heroes Tonight"
EXPECTED_YOUTUBE_URL = "http://youtube.com/watch?v=3nQNiWdeH2Q"
# GIST_URL is the monkeypatched version of: https://www.youtube.com/results?search_query=janji+-+heroes
# so that we get same results even if YouTube changes the list/order of videos on their page.
GIST_URL = "https://gist.githubusercontent.com/ritiek/e731338e9810e31c2f00f13c249a45f5/raw/c11a27f3b5d11a8d082976f1cdd237bd605ec2c2/search_results.html"
def pytest_namespace():
# XXX: We override the value of `content_fixture` later in the tests.
# We do not use an acutal @pytest.fixture because it does not accept
# the monkeypatch parameter and we need to monkeypatch the network
# request before creating the Pafy object.
return {"content_fixture": None}
@pytest.fixture(scope="module")
def metadata_fixture():
meta_tags = spotify_tools.generate_metadata(SPOTIFY_TRACK_URL)
return meta_tags
def test_metadata(metadata_fixture):
expect_number = 24
assert len(metadata_fixture) == expect_number
class TestFileFormat:
def test_with_spaces(self, metadata_fixture):
title = internals.format_string(const.args.file_format, metadata_fixture)
assert title == EXPECTED_SPOTIFY_TITLE
def test_without_spaces(self, metadata_fixture):
const.args.no_spaces = True
title = internals.format_string(const.args.file_format, metadata_fixture)
assert title == EXPECTED_SPOTIFY_TITLE.replace(" ", "_")
def test_youtube_url(metadata_fixture, monkeypatch):
monkeypatch.setattr(
youtube_tools.GenerateYouTubeURL,
"_fetch_response",
loader.monkeypatch_youtube_search_page,
)
url = youtube_tools.generate_youtube_url(SPOTIFY_TRACK_URL, metadata_fixture)
assert url == EXPECTED_YOUTUBE_URL
def test_youtube_title(metadata_fixture, monkeypatch):
monkeypatch.setattr(
youtube_tools.GenerateYouTubeURL,
"_fetch_response",
loader.monkeypatch_youtube_search_page,
)
content = youtube_tools.go_pafy(SPOTIFY_TRACK_URL, metadata_fixture)
pytest.content_fixture = content
title = youtube_tools.get_youtube_title(content)
assert title == EXPECTED_YOUTUBE_TITLE
@pytest.fixture(scope="module")
def filename_fixture(metadata_fixture):
songname = internals.format_string(const.args.file_format, metadata_fixture)
filename = internals.sanitize_title(songname)
return filename
def test_check_track_exists_before_download(tmpdir, metadata_fixture, filename_fixture):
expect_check = False
const.args.folder = str(tmpdir)
# prerequisites for determining filename
track_existence = downloader.CheckExists(filename_fixture, metadata_fixture)
check = track_existence.already_exists(SPOTIFY_TRACK_URL)
assert check == expect_check
class TestDownload:
def blank_audio_generator(self, filepath):
if filepath.endswith(".m4a"):
cmd = "ffmpeg -f lavfi -i anullsrc -t 1 -c:a aac {}".format(filepath)
elif filepath.endswith(".webm"):
cmd = "ffmpeg -f lavfi -i anullsrc -t 1 -c:a libopus {}".format(filepath)
subprocess.call(cmd.split(" "))
def test_m4a(self, monkeypatch, filename_fixture):
expect_download = True
monkeypatch.setattr("pafy.backend_shared.BaseStream.download", self.blank_audio_generator)
monkeypatch.setattr("pafy.backend_youtube_dl.YtdlStream.download", self.blank_audio_generator)
download = youtube_tools.download_song(filename_fixture + ".m4a", pytest.content_fixture)
assert download == expect_download
def test_webm(self, monkeypatch, filename_fixture):
expect_download = True
monkeypatch.setattr("pafy.backend_shared.BaseStream.download", self.blank_audio_generator)
monkeypatch.setattr("pafy.backend_youtube_dl.YtdlStream.download", self.blank_audio_generator)
download = youtube_tools.download_song(filename_fixture + ".webm", pytest.content_fixture)
assert download == expect_download
class TestFFmpeg:
def test_convert_from_webm_to_mp3(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.webm -codec:a libmp3lame -ar 44100 -b:a 192k -vn {0}.mp3".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".webm", filename_fixture + ".mp3", const.args.folder
)
assert " ".join(command) == expect_command
def test_convert_from_webm_to_m4a(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.webm -cutoff 20000 -codec:a aac -ar 44100 -b:a 192k -vn {0}.m4a".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".webm", filename_fixture + ".m4a", const.args.folder
)
assert " ".join(command) == expect_command
def test_convert_from_m4a_to_mp3(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.m4a -codec:v copy -codec:a libmp3lame -ar 44100 -b:a 192k -vn {0}.mp3".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".m4a", filename_fixture + ".mp3", const.args.folder
)
assert " ".join(command) == expect_command
def test_convert_from_m4a_to_webm(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.m4a -codec:a libopus -vbr on -b:a 192k -vn {0}.webm".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".m4a", filename_fixture + ".webm", const.args.folder
)
assert " ".join(command) == expect_command
def test_convert_from_m4a_to_flac(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.m4a -codec:a flac -ar 44100 -b:a 192k -vn {0}.flac".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".m4a", filename_fixture + ".flac", const.args.folder
)
assert " ".join(command) == expect_command
def test_correct_container_for_m4a(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.m4a.temp -acodec copy -b:a 192k -vn {0}.m4a".format(
os.path.join(const.args.folder, filename_fixture)
)
_, command = convert.song(
filename_fixture + ".m4a", filename_fixture + ".m4a", const.args.folder
)
assert " ".join(command) == expect_command
class TestAvconv:
def test_convert_from_m4a_to_mp3(self, filename_fixture, monkeypatch):
monkeypatch.setattr("os.remove", lambda x: None)
expect_command = "avconv -loglevel 0 -i {0}.m4a -ab 192k {0}.mp3 -y".format(
os.path.join(const.args.folder, filename_fixture)
)
_, command = convert.song(
filename_fixture + ".m4a",
filename_fixture + ".mp3",
const.args.folder,
avconv=True,
)
assert " ".join(command) == expect_command
@pytest.fixture(scope="module")
def trackpath_fixture(filename_fixture):
trackpath = os.path.join(const.args.folder, filename_fixture)
return trackpath
class TestEmbedMetadata:
def test_embed_in_mp3(self, metadata_fixture, trackpath_fixture):
expect_embed = True
embed = metadata.embed(trackpath_fixture + ".mp3", metadata_fixture)
assert embed == expect_embed
def test_embed_in_m4a(self, metadata_fixture, trackpath_fixture):
expect_embed = True
embed = metadata.embed(trackpath_fixture + ".m4a", metadata_fixture)
os.remove(trackpath_fixture + ".m4a")
assert embed == expect_embed
def test_embed_in_webm(self, metadata_fixture, trackpath_fixture):
expect_embed = False
embed = metadata.embed(trackpath_fixture + ".webm", metadata_fixture)
os.remove(trackpath_fixture + ".webm")
assert embed == expect_embed
def test_embed_in_flac(self, metadata_fixture, trackpath_fixture):
expect_embed = True
embed = metadata.embed(trackpath_fixture + ".flac", metadata_fixture)
os.remove(trackpath_fixture + ".flac")
assert embed == expect_embed
def test_check_track_exists_after_download(
metadata_fixture, filename_fixture, trackpath_fixture
):
expect_check = True
track_existence = downloader.CheckExists(filename_fixture, metadata_fixture)
check = track_existence.already_exists(SPOTIFY_TRACK_URL)
os.remove(trackpath_fixture + ".mp3")
assert check == expect_check
|
<filename>test/test_download_with_metadata.py
import subprocess
import os
from spotdl import const
from spotdl import internals
from spotdl import spotify_tools
from spotdl import youtube_tools
from spotdl import convert
from spotdl import metadata
from spotdl import downloader
import pytest
import loader
loader.load_defaults()
SPOTIFY_TRACK_URL = "https://open.spotify.com/track/3SipFlNddvL0XNZRLXvdZD"
EXPECTED_YOUTUBE_TITLE = "Janji - Heroes Tonight (feat. Johnning) [NCS Release]"
EXPECTED_SPOTIFY_TITLE = "Janji - Heroes Tonight"
EXPECTED_YOUTUBE_URL = "http://youtube.com/watch?v=3nQNiWdeH2Q"
# GIST_URL is the monkeypatched version of: https://www.youtube.com/results?search_query=janji+-+heroes
# so that we get same results even if YouTube changes the list/order of videos on their page.
GIST_URL = "https://gist.githubusercontent.com/ritiek/e731338e9810e31c2f00f13c249a45f5/raw/c11a27f3b5d11a8d082976f1cdd237bd605ec2c2/search_results.html"
def pytest_namespace():
# XXX: We override the value of `content_fixture` later in the tests.
# We do not use an acutal @pytest.fixture because it does not accept
# the monkeypatch parameter and we need to monkeypatch the network
# request before creating the Pafy object.
return {"content_fixture": None}
@pytest.fixture(scope="module")
def metadata_fixture():
meta_tags = spotify_tools.generate_metadata(SPOTIFY_TRACK_URL)
return meta_tags
def test_metadata(metadata_fixture):
expect_number = 24
assert len(metadata_fixture) == expect_number
class TestFileFormat:
def test_with_spaces(self, metadata_fixture):
title = internals.format_string(const.args.file_format, metadata_fixture)
assert title == EXPECTED_SPOTIFY_TITLE
def test_without_spaces(self, metadata_fixture):
const.args.no_spaces = True
title = internals.format_string(const.args.file_format, metadata_fixture)
assert title == EXPECTED_SPOTIFY_TITLE.replace(" ", "_")
def test_youtube_url(metadata_fixture, monkeypatch):
monkeypatch.setattr(
youtube_tools.GenerateYouTubeURL,
"_fetch_response",
loader.monkeypatch_youtube_search_page,
)
url = youtube_tools.generate_youtube_url(SPOTIFY_TRACK_URL, metadata_fixture)
assert url == EXPECTED_YOUTUBE_URL
def test_youtube_title(metadata_fixture, monkeypatch):
monkeypatch.setattr(
youtube_tools.GenerateYouTubeURL,
"_fetch_response",
loader.monkeypatch_youtube_search_page,
)
content = youtube_tools.go_pafy(SPOTIFY_TRACK_URL, metadata_fixture)
pytest.content_fixture = content
title = youtube_tools.get_youtube_title(content)
assert title == EXPECTED_YOUTUBE_TITLE
@pytest.fixture(scope="module")
def filename_fixture(metadata_fixture):
songname = internals.format_string(const.args.file_format, metadata_fixture)
filename = internals.sanitize_title(songname)
return filename
def test_check_track_exists_before_download(tmpdir, metadata_fixture, filename_fixture):
expect_check = False
const.args.folder = str(tmpdir)
# prerequisites for determining filename
track_existence = downloader.CheckExists(filename_fixture, metadata_fixture)
check = track_existence.already_exists(SPOTIFY_TRACK_URL)
assert check == expect_check
class TestDownload:
def blank_audio_generator(self, filepath):
if filepath.endswith(".m4a"):
cmd = "ffmpeg -f lavfi -i anullsrc -t 1 -c:a aac {}".format(filepath)
elif filepath.endswith(".webm"):
cmd = "ffmpeg -f lavfi -i anullsrc -t 1 -c:a libopus {}".format(filepath)
subprocess.call(cmd.split(" "))
def test_m4a(self, monkeypatch, filename_fixture):
expect_download = True
monkeypatch.setattr("pafy.backend_shared.BaseStream.download", self.blank_audio_generator)
monkeypatch.setattr("pafy.backend_youtube_dl.YtdlStream.download", self.blank_audio_generator)
download = youtube_tools.download_song(filename_fixture + ".m4a", pytest.content_fixture)
assert download == expect_download
def test_webm(self, monkeypatch, filename_fixture):
expect_download = True
monkeypatch.setattr("pafy.backend_shared.BaseStream.download", self.blank_audio_generator)
monkeypatch.setattr("pafy.backend_youtube_dl.YtdlStream.download", self.blank_audio_generator)
download = youtube_tools.download_song(filename_fixture + ".webm", pytest.content_fixture)
assert download == expect_download
class TestFFmpeg:
def test_convert_from_webm_to_mp3(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.webm -codec:a libmp3lame -ar 44100 -b:a 192k -vn {0}.mp3".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".webm", filename_fixture + ".mp3", const.args.folder
)
assert " ".join(command) == expect_command
def test_convert_from_webm_to_m4a(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.webm -cutoff 20000 -codec:a aac -ar 44100 -b:a 192k -vn {0}.m4a".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".webm", filename_fixture + ".m4a", const.args.folder
)
assert " ".join(command) == expect_command
def test_convert_from_m4a_to_mp3(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.m4a -codec:v copy -codec:a libmp3lame -ar 44100 -b:a 192k -vn {0}.mp3".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".m4a", filename_fixture + ".mp3", const.args.folder
)
assert " ".join(command) == expect_command
def test_convert_from_m4a_to_webm(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.m4a -codec:a libopus -vbr on -b:a 192k -vn {0}.webm".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".m4a", filename_fixture + ".webm", const.args.folder
)
assert " ".join(command) == expect_command
def test_convert_from_m4a_to_flac(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.m4a -codec:a flac -ar 44100 -b:a 192k -vn {0}.flac".format(
os.path.join(const.args.folder, filename_fixture)
)
monkeypatch.setattr("os.remove", lambda x: None)
_, command = convert.song(
filename_fixture + ".m4a", filename_fixture + ".flac", const.args.folder
)
assert " ".join(command) == expect_command
def test_correct_container_for_m4a(self, filename_fixture, monkeypatch):
expect_command = "ffmpeg -y -hide_banner -nostats -v panic -i {0}.m4a.temp -acodec copy -b:a 192k -vn {0}.m4a".format(
os.path.join(const.args.folder, filename_fixture)
)
_, command = convert.song(
filename_fixture + ".m4a", filename_fixture + ".m4a", const.args.folder
)
assert " ".join(command) == expect_command
class TestAvconv:
def test_convert_from_m4a_to_mp3(self, filename_fixture, monkeypatch):
monkeypatch.setattr("os.remove", lambda x: None)
expect_command = "avconv -loglevel 0 -i {0}.m4a -ab 192k {0}.mp3 -y".format(
os.path.join(const.args.folder, filename_fixture)
)
_, command = convert.song(
filename_fixture + ".m4a",
filename_fixture + ".mp3",
const.args.folder,
avconv=True,
)
assert " ".join(command) == expect_command
@pytest.fixture(scope="module")
def trackpath_fixture(filename_fixture):
trackpath = os.path.join(const.args.folder, filename_fixture)
return trackpath
class TestEmbedMetadata:
def test_embed_in_mp3(self, metadata_fixture, trackpath_fixture):
expect_embed = True
embed = metadata.embed(trackpath_fixture + ".mp3", metadata_fixture)
assert embed == expect_embed
def test_embed_in_m4a(self, metadata_fixture, trackpath_fixture):
expect_embed = True
embed = metadata.embed(trackpath_fixture + ".m4a", metadata_fixture)
os.remove(trackpath_fixture + ".m4a")
assert embed == expect_embed
def test_embed_in_webm(self, metadata_fixture, trackpath_fixture):
expect_embed = False
embed = metadata.embed(trackpath_fixture + ".webm", metadata_fixture)
os.remove(trackpath_fixture + ".webm")
assert embed == expect_embed
def test_embed_in_flac(self, metadata_fixture, trackpath_fixture):
expect_embed = True
embed = metadata.embed(trackpath_fixture + ".flac", metadata_fixture)
os.remove(trackpath_fixture + ".flac")
assert embed == expect_embed
def test_check_track_exists_after_download(
metadata_fixture, filename_fixture, trackpath_fixture
):
expect_check = True
track_existence = downloader.CheckExists(filename_fixture, metadata_fixture)
check = track_existence.already_exists(SPOTIFY_TRACK_URL)
os.remove(trackpath_fixture + ".mp3")
assert check == expect_check
|
en
| 0.781535
|
# GIST_URL is the monkeypatched version of: https://www.youtube.com/results?search_query=janji+-+heroes # so that we get same results even if YouTube changes the list/order of videos on their page. # XXX: We override the value of `content_fixture` later in the tests. # We do not use an acutal @pytest.fixture because it does not accept # the monkeypatch parameter and we need to monkeypatch the network # request before creating the Pafy object. # prerequisites for determining filename
| 2.173948
| 2
|
app.py
|
matthewgall/dnsmasq-viewer
| 0
|
6627374
|
#!/usr/bin/env python
import sys, os, logging, json, datetime, time, re, socket
from bottle import route, request, response, redirect, hook, error, default_app, view, static_file, template, HTTPError
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
def display_time(timestamp):
return datetime.datetime.fromtimestamp(
int(timestamp)
).strftime('%d/%m/%Y %H:%M:%S')
def scanDevice(ip):
ports = [80, 8080, 8081, 8888]
i = 0
while i < len(ports):
for port in ports:
try:
s = socket.socket()
s.settimeout(1)
s.connect((ip, port))
s.close()
return port
except:
pass
i = i + 1
return False
def fetch(path=os.getenv('DNSMASQ_LEASES', '/var/lib/misc/dnsmasq.leases')):
with open(path, 'r') as f:
data = []
for line in f.readlines():
line = line.strip().split(' ')
webui = False
port = ''
scanRes = scanDevice(line[3])
if scanRes != False:
webui = True
port = scanRes
data.append({
'expires': display_time(int(line[0])),
'linkAddr': line[1],
'ip': line[2],
'hostname': line[3],
'clientIdent': line[4],
'webui': webui,
'port': port,
})
return data
def fetch_static(path=os.getenv('DNSMASQ_STATIC', '')):
if path == '':
return []
else:
data = []
for file in path.split(','):
with open(file, 'r') as f:
for line in f.readlines():
line = re.split('\t|\s', re.sub(' +',' ', line))
webui = False
port = ''
scanRes = scanDevice(line[0])
if scanRes != False:
webui = True
port = scanRes
data.append({
'ip': line[0],
'hostname': line[1].strip(),
'webui': webui,
'port': port,
})
return data
@route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='views/static')
@route('/purge')
def purge():
cache.clear()
return "Cache successfully purged"
@route('/')
def index():
results = cache.get(key='leases', createfunc=fetch)
staticHosts = cache.get(key='static', createfunc=fetch_static)
return template('index', leases=results, staticHosts=staticHosts)
if __name__ == '__main__':
app = default_app()
cache_opts = {
'cache.type': 'file',
'cache.data_dir': 'cache/data',
'cache.lock_dir': 'cache/lock'
}
cacheMgr = CacheManager(**parse_cache_config_options(cache_opts))
cache = cacheMgr.get_cache('data', expires=300)
serverHost = os.getenv('IP', 'localhost')
serverPort = os.getenv('PORT', '5000')
# Now we're ready, so start the server
# Instantiate the logger
log = logging.getLogger('log')
console = logging.StreamHandler()
log.setLevel(logging.INFO)
log.addHandler(console)
# Now we're ready, so start the server
try:
cache.clear()
app.run(host=serverHost, port=serverPort, server='tornado')
except:
log.error("Failed to start application server")
|
#!/usr/bin/env python
import sys, os, logging, json, datetime, time, re, socket
from bottle import route, request, response, redirect, hook, error, default_app, view, static_file, template, HTTPError
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
def display_time(timestamp):
return datetime.datetime.fromtimestamp(
int(timestamp)
).strftime('%d/%m/%Y %H:%M:%S')
def scanDevice(ip):
ports = [80, 8080, 8081, 8888]
i = 0
while i < len(ports):
for port in ports:
try:
s = socket.socket()
s.settimeout(1)
s.connect((ip, port))
s.close()
return port
except:
pass
i = i + 1
return False
def fetch(path=os.getenv('DNSMASQ_LEASES', '/var/lib/misc/dnsmasq.leases')):
with open(path, 'r') as f:
data = []
for line in f.readlines():
line = line.strip().split(' ')
webui = False
port = ''
scanRes = scanDevice(line[3])
if scanRes != False:
webui = True
port = scanRes
data.append({
'expires': display_time(int(line[0])),
'linkAddr': line[1],
'ip': line[2],
'hostname': line[3],
'clientIdent': line[4],
'webui': webui,
'port': port,
})
return data
def fetch_static(path=os.getenv('DNSMASQ_STATIC', '')):
if path == '':
return []
else:
data = []
for file in path.split(','):
with open(file, 'r') as f:
for line in f.readlines():
line = re.split('\t|\s', re.sub(' +',' ', line))
webui = False
port = ''
scanRes = scanDevice(line[0])
if scanRes != False:
webui = True
port = scanRes
data.append({
'ip': line[0],
'hostname': line[1].strip(),
'webui': webui,
'port': port,
})
return data
@route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='views/static')
@route('/purge')
def purge():
cache.clear()
return "Cache successfully purged"
@route('/')
def index():
results = cache.get(key='leases', createfunc=fetch)
staticHosts = cache.get(key='static', createfunc=fetch_static)
return template('index', leases=results, staticHosts=staticHosts)
if __name__ == '__main__':
app = default_app()
cache_opts = {
'cache.type': 'file',
'cache.data_dir': 'cache/data',
'cache.lock_dir': 'cache/lock'
}
cacheMgr = CacheManager(**parse_cache_config_options(cache_opts))
cache = cacheMgr.get_cache('data', expires=300)
serverHost = os.getenv('IP', 'localhost')
serverPort = os.getenv('PORT', '5000')
# Now we're ready, so start the server
# Instantiate the logger
log = logging.getLogger('log')
console = logging.StreamHandler()
log.setLevel(logging.INFO)
log.addHandler(console)
# Now we're ready, so start the server
try:
cache.clear()
app.run(host=serverHost, port=serverPort, server='tornado')
except:
log.error("Failed to start application server")
|
en
| 0.618548
|
#!/usr/bin/env python # Now we're ready, so start the server # Instantiate the logger # Now we're ready, so start the server
| 2.226084
| 2
|
hardware/makesrams.py
|
luismarques/ChiselGPU
| 40
|
6627375
|
#!/usr/bin/python
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import math
#
# Create SRAM blocks based on .conf file emitted by Chisel
# Each line is:
# name TileBuffer_colorMemory depth 4096 width 32 ports write,read
def emitMemory(file, name, dataWidth, depth):
addrWidth = int(math.ceil(math.log(depth, 2)))
file.write('module ' + name + '''(
input CLK,
input[''' + str(addrWidth - 1) + ''':0] W0A,
input W0E,
input[''' + str(dataWidth - 1) + ''':0] W0I,
input[''' + str(addrWidth - 1) + ''':0] R1A,
input R1E,
output[''' + str(dataWidth - 1) + ''':0] R1O);
ALTSYNCRAM #(
.OPERATION_MODE("DUAL_PORT"),
.READ_DURING_WRITE_MIXED_PORTS("DONT_CARE"),
.WIDTH_A(''' + str(dataWidth) + '''),
.WIDTHAD_A(''' + str(addrWidth) + '''),
.WIDTH_B(''' + str(dataWidth) + '''),
.WIDTHAD_B(''' + str(addrWidth) + ''')
) data0(
.clock0(CLK),
.clock1(CLK),
// Write port
.rden_a(1'b0),
.wren_a(W0E),
.address_a(W0A),
.data_a(W0I),
.q_a(),
// Read port
.rden_b(R1E),
.wren_b(1'b0),
.address_b(R1A),
.q_b(R1O));
endmodule
''')
with open(sys.argv[1], 'r') as inputFile, open(sys.argv[2], 'w') as outputFile:
for line in inputFile:
fields = [ x.strip() for x in line.split(' ') if x != '' ]
if len(fields) > 0 and fields[0] == 'name':
emitMemory(outputFile, fields[1], int(fields[5]), int(fields[3]))
|
#!/usr/bin/python
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import math
#
# Create SRAM blocks based on .conf file emitted by Chisel
# Each line is:
# name TileBuffer_colorMemory depth 4096 width 32 ports write,read
def emitMemory(file, name, dataWidth, depth):
addrWidth = int(math.ceil(math.log(depth, 2)))
file.write('module ' + name + '''(
input CLK,
input[''' + str(addrWidth - 1) + ''':0] W0A,
input W0E,
input[''' + str(dataWidth - 1) + ''':0] W0I,
input[''' + str(addrWidth - 1) + ''':0] R1A,
input R1E,
output[''' + str(dataWidth - 1) + ''':0] R1O);
ALTSYNCRAM #(
.OPERATION_MODE("DUAL_PORT"),
.READ_DURING_WRITE_MIXED_PORTS("DONT_CARE"),
.WIDTH_A(''' + str(dataWidth) + '''),
.WIDTHAD_A(''' + str(addrWidth) + '''),
.WIDTH_B(''' + str(dataWidth) + '''),
.WIDTHAD_B(''' + str(addrWidth) + ''')
) data0(
.clock0(CLK),
.clock1(CLK),
// Write port
.rden_a(1'b0),
.wren_a(W0E),
.address_a(W0A),
.data_a(W0I),
.q_a(),
// Read port
.rden_b(R1E),
.wren_b(1'b0),
.address_b(R1A),
.q_b(R1O));
endmodule
''')
with open(sys.argv[1], 'r') as inputFile, open(sys.argv[2], 'w') as outputFile:
for line in inputFile:
fields = [ x.strip() for x in line.split(' ') if x != '' ]
if len(fields) > 0 and fields[0] == 'name':
emitMemory(outputFile, fields[1], int(fields[5]), int(fields[3]))
|
en
| 0.63909
|
#!/usr/bin/python # # Copyright 2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Create SRAM blocks based on .conf file emitted by Chisel # Each line is: # name TileBuffer_colorMemory depth 4096 width 32 ports write,read ( input CLK, input[ :0] W0A, input W0E, input[ :0] W0I, input[ :0] R1A, input R1E, output[ :0] R1O); ALTSYNCRAM #( .OPERATION_MODE("DUAL_PORT"), .READ_DURING_WRITE_MIXED_PORTS("DONT_CARE"), .WIDTH_A( ), .WIDTHAD_A( ), .WIDTH_B( ), .WIDTHAD_B( ) ) data0( .clock0(CLK), .clock1(CLK), // Write port .rden_a(1'b0), .wren_a(W0E), .address_a(W0A), .data_a(W0I), .q_a(), // Read port .rden_b(R1E), .wren_b(1'b0), .address_b(R1A), .q_b(R1O)); endmodule
| 2.122468
| 2
|
setup.py
|
drblahdblah/djones-covid19-analysis
| 0
|
6627376
|
<reponame>drblahdblah/djones-covid19-analysis
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.0.1',
description=f'A dashboard for visualising my analyses of the Johns Hopkins Univerisity\'s (JHUs)'
f' corona-virus dataset.',
author='<NAME>',
license='MIT',
)
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.0.1',
description=f'A dashboard for visualising my analyses of the Johns Hopkins Univerisity\'s (JHUs)'
f' corona-virus dataset.',
author='<NAME>',
license='MIT',
)
|
none
| 1
| 1.01879
| 1
|
|
nidaqmx/stream_readers.py
|
hboshnak/nidaqmx-python
| 0
|
6627377
|
<filename>nidaqmx/stream_readers.py
import numpy
from nidaqmx import DaqError
from nidaqmx.constants import READ_ALL_AVAILABLE
from nidaqmx._task_modules.read_functions import (
_read_analog_f_64, _read_analog_scalar_f_64, _read_binary_i_16,
_read_binary_i_32, _read_binary_u_16, _read_binary_u_32,
_read_digital_lines, _read_digital_u_8, _read_digital_u_16,
_read_digital_scalar_u_32, _read_digital_u_32, _read_counter_scalar_f_64,
_read_counter_scalar_u_32, _read_counter_f_64_ex, _read_counter_u_32_ex,
_read_ctr_freq_scalar, _read_ctr_ticks_scalar, _read_ctr_time_scalar,
_read_ctr_freq, _read_ctr_ticks, _read_ctr_time)
from nidaqmx.error_codes import DAQmxErrors
__all__ = ['AnalogSingleChannelReader', 'AnalogMultiChannelReader',
'AnalogUnscaledReader', 'CounterReader',
'DigitalSingleChannelReader', 'DigitalMultiChannelReader']
class ChannelReaderBase(object):
"""
Defines base class for all NI-DAQmx stream readers.
"""
def __init__(self, task_in_stream):
"""
Args:
task_in_stream: Specifies the input stream associated with
an NI-DAQmx task from which to read samples.
"""
self._in_stream = task_in_stream
self._task = task_in_stream._task
self._handle = task_in_stream._task._handle
self._verify_array_shape = True
@property
def verify_array_shape(self):
"""
bool: Indicates whether the size and shape of the user-defined
NumPy arrays passed to read methods are verified. Defaults
to True when this object is instantiated.
Setting this property to True may marginally adversely
impact the performance of read methods.
"""
return self._verify_array_shape
@verify_array_shape.setter
def verify_array_shape(self, val):
self._verify_array_shape = val
def _verify_array(self, data, number_of_samples_per_channel,
is_many_chan, is_many_samp):
"""
Verifies that the shape of the specified NumPy array can be used
to read multiple samples from the current task which contains
one or more channels, if the "verify_array_shape" property is
set to True.
Args:
data (numpy.ndarray): Specifies the NumPy array to verify.
number_of_samples_per_channel (int): Specifies the number of
samples per channel requested.
is_many_chan (bool): Specifies if the read method is a many
channel version.
is_many_samp (bool): Specifies if the read method is a many
samples version.
"""
if not self._verify_array_shape:
return
channels_to_read = self._in_stream.channels_to_read
number_of_channels = len(channels_to_read.channel_names)
array_shape = None
if is_many_chan:
if is_many_samp:
array_shape = (number_of_channels,
number_of_samples_per_channel)
else:
array_shape = (number_of_channels,)
else:
if is_many_samp:
array_shape = (number_of_samples_per_channel,)
if array_shape is not None and data.shape != array_shape:
raise DaqError(
'Read cannot be performed because the NumPy array passed into '
'this function is not shaped correctly. You must pass in a '
'NumPy array of the correct shape based on the number of '
'channels in task and the number of samples per channel '
'requested.\n\n'
'Shape of NumPy Array provided: {0}\n'
'Shape of NumPy Array required: {1}'
.format(data.shape, array_shape),
DAQmxErrors.UNKNOWN, task_name=self._task.name)
def _verify_array_digital_lines(
self, data, is_many_chan, is_many_line):
"""
Verifies that the shape of the specified NumPy array can be used
to read samples from the current task which contains one or more
channels that have one or more digital lines per channel, if the
"verify_array_shape" property is set to True.
Args:
data (numpy.ndarray): Specifies the NumPy array to verify.
is_many_chan (bool): Specifies if the read method is a
many channel version.
is_many_line (bool): Specifies if the read method is a
many line version.
"""
if not self._verify_array_shape:
return
channels_to_read = self._in_stream.channels_to_read
number_of_channels = len(channels_to_read.channel_names)
number_of_lines = self._in_stream.di_num_booleans_per_chan
array_shape = None
if is_many_chan:
if is_many_line:
array_shape = (number_of_channels, number_of_lines)
else:
array_shape = (number_of_channels,)
else:
if is_many_line:
array_shape = (number_of_lines,)
if array_shape is not None and data.shape != array_shape:
raise DaqError(
'Read cannot be performed because the NumPy array passed into '
'this function is not shaped correctly. You must pass in a '
'NumPy array of the correct shape based on the number of '
'channels in task and the number of digital lines per '
'channel.\n\n'
'Shape of NumPy Array provided: {0}\n'
'Shape of NumPy Array required: {1}'
.format(data.shape, array_shape),
DAQmxErrors.UNKNOWN, task_name=self._task.name)
class AnalogSingleChannelReader(ChannelReaderBase):
"""
Reads samples from an analog input channel in an NI-DAQmx task.
"""
def read_many_sample(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more floating-point samples from a single analog
input channel in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of floating-point values to hold the samples
requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_analog_f_64(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_one_sample(self, timeout=10):
"""
Reads a single floating-point sample from a single analog input
channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
float:
Indicates a single floating-point sample from the task.
"""
return _read_analog_scalar_f_64(self._handle, timeout)
class AnalogMultiChannelReader(ChannelReaderBase):
"""
Reads samples from one or more analog input channels in an NI-DAQmx
task.
"""
def read_many_sample(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more floating-point samples from one or more analog
input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of floating-point values to hold the samples
requested. The size of the array must be large enough to
hold all requested samples from all channels in the
task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property to True may marginally adversely
impact the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_analog_f_64(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_one_sample(self, data, timeout=10):
"""
Reads a single floating-point sample from one or more analog
input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of floating-point values to hold the samples
requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array(data, 1, True, False)
_read_analog_f_64(self._handle, data, 1, timeout)
class AnalogUnscaledReader(ChannelReaderBase):
"""
Reads unscaled samples from one or more analog input channels in an
NI-DAQmx task.
"""
def read_int16(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more unscaled 16-bit integer samples from one or
more analog input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of unscaled 16-bit integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_binary_i_16(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_int32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more unscaled 32-bit integer samples from one or
more analog input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of unscaled 32-bit integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_binary_i_32(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_uint16(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more unscaled 16-bit unsigned integer samples from
one or more analog input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of unscaled 16-bit unsigned integer values to
hold the samples requested. The size of the array must
be large enough to hold all requested samples from all
channels in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_binary_u_16(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_uint32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more unscaled unsigned 32-bit integer samples from
one or more analog input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of unscaled 32-bit unsigned integer values to
hold the samples requested. The size of the array must
be large enough to hold all requested samples from all
channels in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_binary_u_32(
self._handle, data, number_of_samples_per_channel,
timeout)
class CounterReader(ChannelReaderBase):
"""
Reads samples from a counter input channel in an NI-DAQmx task.
"""
def read_many_sample_double(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more floating-point samples from a single counter
input channel in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of floating-point values to hold the samples
requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_counter_f_64_ex(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_many_sample_pulse_frequency(
self, frequencies, duty_cycles,
number_of_samples_per_channel=READ_ALL_AVAILABLE, timeout=10.0):
"""
Reads one or more pulse samples in terms of frequency from a
single counter input channel in a task.
This read method accepts preallocated NumPy arrays to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in preallocated arrays is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
frequencies (numpy.ndarray): Specifies a preallocated 1D
NumPy array of floating-point values to hold the frequency
portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
duty_cycles (numpy.ndarray): Specifies a preallocated 1D
NumPy array of floating-point values to hold the duty
cycle portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(
frequencies, number_of_samples_per_channel, False, True)
self._verify_array(
duty_cycles, number_of_samples_per_channel, False, True)
return _read_ctr_freq(
self._handle, frequencies, duty_cycles,
number_of_samples_per_channel, timeout)
def read_many_sample_pulse_ticks(
self, high_ticks, low_ticks,
number_of_samples_per_channel=READ_ALL_AVAILABLE, timeout=10.0):
"""
Reads one or more pulse samples in terms of ticks from a single
counter input channel in a task.
This read method accepts preallocated NumPy arrays to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in preallocated arrays is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
high_ticks (numpy.ndarray): Specifies a preallocated 1D
NumPy array of 32-bit unsigned integer values to hold
the high ticks portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
low_ticks (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 32-bit unsigned integer values to hold the low
ticks portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(
high_ticks, number_of_samples_per_channel, False, True)
self._verify_array(
low_ticks, number_of_samples_per_channel, False, True)
return _read_ctr_ticks(
self._handle, high_ticks, low_ticks,
number_of_samples_per_channel, timeout)
def read_many_sample_pulse_time(
self, high_times, low_times,
number_of_samples_per_channel=READ_ALL_AVAILABLE, timeout=10.0):
"""
Reads one or more pulse samples in terms of time from a single
counter input channel in a task.
This read method accepts preallocated NumPy arrays to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in preallocated arrays is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
high_times (numpy.ndarray): Specifies a preallocated 1D
NumPy array of floating-point values to hold the high
time portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
low_times (numpy.ndarray): Specifies a preallocated 1D
NumPy array of floating-point values to hold the low
time portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(
high_times, number_of_samples_per_channel, False, True)
self._verify_array(
low_times, number_of_samples_per_channel, False, True)
return _read_ctr_time(
self._handle, high_times, low_times,
number_of_samples_per_channel, timeout)
def read_many_sample_uint32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 32-bit unsigned integer samples from a single
counter input channel in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 32-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_counter_u_32_ex(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_one_sample_double(self, timeout=10):
"""
Reads a single floating-point sample from a single counter input
channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
float: Indicates a single floating-point sample from the
task.
"""
return _read_counter_scalar_f_64(self._handle, timeout)
def read_one_sample_pulse_frequency(self, timeout=10):
"""
Reads a pulse sample in terms of frequency from a single counter
input channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
nidaqmx.types.CtrFreq:
Indicates a pulse sample in terms of frequency from the task.
"""
return _read_ctr_freq_scalar(self._handle, timeout)
def read_one_sample_pulse_ticks(self, timeout=10):
"""
Reads a pulse sample in terms of ticks from a single counter
input channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
nidaqmx.types.CtrTick:
Indicates a pulse sample in terms of ticks from the task.
"""
return _read_ctr_ticks_scalar(self._handle, timeout)
def read_one_sample_pulse_time(self, timeout=10):
"""
Reads a pulse sample in terms of time from a single counter
input channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
nidaqmx.types.CtrTime:
Indicates a pulse sample in terms of time from the task.
"""
return _read_ctr_time_scalar(self._handle, timeout)
def read_one_sample_uint32(self, timeout=10):
"""
Reads a single 32-bit unsigned integer sample from a single
counter input channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates a single 32-bit unsigned integer sample from the
task.
"""
return _read_counter_scalar_u_32(self._handle, timeout)
class DigitalSingleChannelReader(ChannelReaderBase):
"""
Reads samples from a digital input channel in an NI-DAQmx task.
"""
def read_many_sample_port_byte(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 8-bit unsigned integer samples from a single
digital input channel in a task.
Use this method for devices with up to 8 lines per port.
This read method accepts a preallocated NumPy array to hold
the samples requested, which can be advantageous for performance
and interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 8-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_digital_u_8(
self._handle, data, number_of_samples_per_channel, timeout)
def read_many_sample_port_uint16(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 16-bit unsigned integer samples from a single
digital input channel in a task.
Use this method for devices with up to 16 lines per port.
This read method accepts a preallocated NumPy array to hold
the samples requested, which can be advantageous for performance
and interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 16-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_digital_u_16(
self._handle, data, number_of_samples_per_channel, timeout)
def read_many_sample_port_uint32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 32-bit unsigned integer samples from a single
digital input channel in a task.
Use this method for devices with up to 32 lines per port.
This read method accepts a preallocated NumPy array to hold
the samples requested, which can be advantageous for performance
and interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 32-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_digital_u_32(
self._handle, data, number_of_samples_per_channel, timeout)
def read_one_sample_multi_line(self, data, timeout=10):
"""
Reads a single boolean sample from a single digital input
channel in a task. The channel can contain multiple digital
lines.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of boolean values to hold the samples requested.
Each element in the array corresponds to a sample from
a line in the channel. The size of the array must be
large enough to hold all requested samples from the
channel in the task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array_digital_lines(data, False, True)
_read_digital_lines(self._handle, data, 1, timeout)
def read_one_sample_one_line(self, timeout=10):
"""
Reads a single boolean sample from a single digital input
channel in a task. The channel can contain only one digital
line.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
bool:
Indicates a single boolean sample from the task.
"""
data = numpy.zeros(1, dtype=bool)
_read_digital_lines(self._handle, data, 1, timeout)
return bool(data[0])
def read_one_sample_port_byte(self, timeout=10):
"""
Reads a single 8-bit unsigned integer sample from a single
digital input channel in a task.
Use this method for devices with up to 8 lines per port.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates a single 8-bit unsigned integer sample from the
task.
"""
data = numpy.zeros(1, dtype=numpy.uint8)
_read_digital_u_8(self._handle, data, 1, timeout)
return int(data[0])
def read_one_sample_port_uint16(self, timeout=10):
"""
Reads a single 16-bit unsigned integer sample from a single
digital input channel in a task.
Use this method for devices with up to 16 lines per port.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates a single 16-bit unsigned integer sample from the
task.
"""
data = numpy.zeros(1, dtype=numpy.uint16)
_read_digital_u_16(self._handle, data, 1, timeout)
return int(data[0])
def read_one_sample_port_uint32(self, timeout=10):
"""
Reads a single 32-bit unsigned integer sample from a single
digital input channel in a task.
Use this method for devices with up to 32 lines per port.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates a single 32-bit unsigned integer sample from the
task.
"""
return _read_digital_scalar_u_32(self._handle, timeout)
class DigitalMultiChannelReader(ChannelReaderBase):
"""
Reads samples from one or more digital input channels in an NI-DAQmx
task.
"""
def read_many_sample_port_byte(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 8-bit unsigned integer samples from one or
more digital input channel in a task.
Use this method for devices with up to 8 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of 8-bit unsigned integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_digital_u_8(
self._handle, data, number_of_samples_per_channel, timeout)
def read_many_sample_port_uint16(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 16-bit unsigned integer samples from one or
more digital input channels in a task.
Use this method for devices with up to 16 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of 16-bit unsigned integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_digital_u_16(
self._handle, data, number_of_samples_per_channel, timeout)
def read_many_sample_port_uint32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 32-bit unsigned integer samples from one or
more digital input channels in a task.
Use this method for devices with up to 32 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of 32-bit unsigned integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_digital_u_32(
self._handle, data, number_of_samples_per_channel, timeout)
def read_one_sample_multi_line(self, data, timeout=10):
"""
Reads a single boolean sample from one or more digital input
channels in a task. The channels can contain multiple digital
lines.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of boolean values to hold the samples requested.
The size of the array must be large enough to hold all
requested samples from all channels in the task;
otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a line from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array_digital_lines(data, True, True)
_read_digital_lines(self._handle, data, 1, timeout)
def read_one_sample_one_line(self, data, timeout=10):
"""
Reads a single boolean sample from one or more digital input
channels in a task. The channel can contain only one digital
line.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of boolean values to hold the samples requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array_digital_lines(data, True, False)
_read_digital_lines(self._handle, data, 1, timeout)
def read_one_sample_port_byte(self, data, timeout=10):
"""
Reads a single 8-bit unsigned integer sample from one or more
digital input channels in a task.
Use this method for devices with up to 8 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 8-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array(data, 1, True, False)
_read_digital_u_8(self._handle, data, 1, timeout)
def read_one_sample_port_uint16(self, data, timeout=10):
"""
Reads a single 16-bit unsigned integer sample from one or more
digital input channels in a task.
Use this method for devices with up to 16 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 16-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array(data, 1, True, False)
_read_digital_u_16(self._handle, data, 1, timeout)
def read_one_sample_port_uint32(self, data, timeout=10):
"""
Reads a single 32-bit unsigned integer sample from one or more
digital input channels in a task.
Use this method for devices with up to 32 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 32-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array(data, 1, True, False)
_read_digital_u_32(self._handle, data, 1, timeout)
|
<filename>nidaqmx/stream_readers.py
import numpy
from nidaqmx import DaqError
from nidaqmx.constants import READ_ALL_AVAILABLE
from nidaqmx._task_modules.read_functions import (
_read_analog_f_64, _read_analog_scalar_f_64, _read_binary_i_16,
_read_binary_i_32, _read_binary_u_16, _read_binary_u_32,
_read_digital_lines, _read_digital_u_8, _read_digital_u_16,
_read_digital_scalar_u_32, _read_digital_u_32, _read_counter_scalar_f_64,
_read_counter_scalar_u_32, _read_counter_f_64_ex, _read_counter_u_32_ex,
_read_ctr_freq_scalar, _read_ctr_ticks_scalar, _read_ctr_time_scalar,
_read_ctr_freq, _read_ctr_ticks, _read_ctr_time)
from nidaqmx.error_codes import DAQmxErrors
__all__ = ['AnalogSingleChannelReader', 'AnalogMultiChannelReader',
'AnalogUnscaledReader', 'CounterReader',
'DigitalSingleChannelReader', 'DigitalMultiChannelReader']
class ChannelReaderBase(object):
"""
Defines base class for all NI-DAQmx stream readers.
"""
def __init__(self, task_in_stream):
"""
Args:
task_in_stream: Specifies the input stream associated with
an NI-DAQmx task from which to read samples.
"""
self._in_stream = task_in_stream
self._task = task_in_stream._task
self._handle = task_in_stream._task._handle
self._verify_array_shape = True
@property
def verify_array_shape(self):
"""
bool: Indicates whether the size and shape of the user-defined
NumPy arrays passed to read methods are verified. Defaults
to True when this object is instantiated.
Setting this property to True may marginally adversely
impact the performance of read methods.
"""
return self._verify_array_shape
@verify_array_shape.setter
def verify_array_shape(self, val):
self._verify_array_shape = val
def _verify_array(self, data, number_of_samples_per_channel,
is_many_chan, is_many_samp):
"""
Verifies that the shape of the specified NumPy array can be used
to read multiple samples from the current task which contains
one or more channels, if the "verify_array_shape" property is
set to True.
Args:
data (numpy.ndarray): Specifies the NumPy array to verify.
number_of_samples_per_channel (int): Specifies the number of
samples per channel requested.
is_many_chan (bool): Specifies if the read method is a many
channel version.
is_many_samp (bool): Specifies if the read method is a many
samples version.
"""
if not self._verify_array_shape:
return
channels_to_read = self._in_stream.channels_to_read
number_of_channels = len(channels_to_read.channel_names)
array_shape = None
if is_many_chan:
if is_many_samp:
array_shape = (number_of_channels,
number_of_samples_per_channel)
else:
array_shape = (number_of_channels,)
else:
if is_many_samp:
array_shape = (number_of_samples_per_channel,)
if array_shape is not None and data.shape != array_shape:
raise DaqError(
'Read cannot be performed because the NumPy array passed into '
'this function is not shaped correctly. You must pass in a '
'NumPy array of the correct shape based on the number of '
'channels in task and the number of samples per channel '
'requested.\n\n'
'Shape of NumPy Array provided: {0}\n'
'Shape of NumPy Array required: {1}'
.format(data.shape, array_shape),
DAQmxErrors.UNKNOWN, task_name=self._task.name)
def _verify_array_digital_lines(
self, data, is_many_chan, is_many_line):
"""
Verifies that the shape of the specified NumPy array can be used
to read samples from the current task which contains one or more
channels that have one or more digital lines per channel, if the
"verify_array_shape" property is set to True.
Args:
data (numpy.ndarray): Specifies the NumPy array to verify.
is_many_chan (bool): Specifies if the read method is a
many channel version.
is_many_line (bool): Specifies if the read method is a
many line version.
"""
if not self._verify_array_shape:
return
channels_to_read = self._in_stream.channels_to_read
number_of_channels = len(channels_to_read.channel_names)
number_of_lines = self._in_stream.di_num_booleans_per_chan
array_shape = None
if is_many_chan:
if is_many_line:
array_shape = (number_of_channels, number_of_lines)
else:
array_shape = (number_of_channels,)
else:
if is_many_line:
array_shape = (number_of_lines,)
if array_shape is not None and data.shape != array_shape:
raise DaqError(
'Read cannot be performed because the NumPy array passed into '
'this function is not shaped correctly. You must pass in a '
'NumPy array of the correct shape based on the number of '
'channels in task and the number of digital lines per '
'channel.\n\n'
'Shape of NumPy Array provided: {0}\n'
'Shape of NumPy Array required: {1}'
.format(data.shape, array_shape),
DAQmxErrors.UNKNOWN, task_name=self._task.name)
class AnalogSingleChannelReader(ChannelReaderBase):
"""
Reads samples from an analog input channel in an NI-DAQmx task.
"""
def read_many_sample(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more floating-point samples from a single analog
input channel in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of floating-point values to hold the samples
requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_analog_f_64(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_one_sample(self, timeout=10):
"""
Reads a single floating-point sample from a single analog input
channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
float:
Indicates a single floating-point sample from the task.
"""
return _read_analog_scalar_f_64(self._handle, timeout)
class AnalogMultiChannelReader(ChannelReaderBase):
"""
Reads samples from one or more analog input channels in an NI-DAQmx
task.
"""
def read_many_sample(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more floating-point samples from one or more analog
input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of floating-point values to hold the samples
requested. The size of the array must be large enough to
hold all requested samples from all channels in the
task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property to True may marginally adversely
impact the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_analog_f_64(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_one_sample(self, data, timeout=10):
"""
Reads a single floating-point sample from one or more analog
input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of floating-point values to hold the samples
requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array(data, 1, True, False)
_read_analog_f_64(self._handle, data, 1, timeout)
class AnalogUnscaledReader(ChannelReaderBase):
"""
Reads unscaled samples from one or more analog input channels in an
NI-DAQmx task.
"""
def read_int16(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more unscaled 16-bit integer samples from one or
more analog input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of unscaled 16-bit integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_binary_i_16(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_int32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more unscaled 32-bit integer samples from one or
more analog input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of unscaled 32-bit integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_binary_i_32(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_uint16(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more unscaled 16-bit unsigned integer samples from
one or more analog input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of unscaled 16-bit unsigned integer values to
hold the samples requested. The size of the array must
be large enough to hold all requested samples from all
channels in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_binary_u_16(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_uint32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more unscaled unsigned 32-bit integer samples from
one or more analog input channels in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of unscaled 32-bit unsigned integer values to
hold the samples requested. The size of the array must
be large enough to hold all requested samples from all
channels in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_binary_u_32(
self._handle, data, number_of_samples_per_channel,
timeout)
class CounterReader(ChannelReaderBase):
"""
Reads samples from a counter input channel in an NI-DAQmx task.
"""
def read_many_sample_double(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more floating-point samples from a single counter
input channel in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of floating-point values to hold the samples
requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_counter_f_64_ex(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_many_sample_pulse_frequency(
self, frequencies, duty_cycles,
number_of_samples_per_channel=READ_ALL_AVAILABLE, timeout=10.0):
"""
Reads one or more pulse samples in terms of frequency from a
single counter input channel in a task.
This read method accepts preallocated NumPy arrays to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in preallocated arrays is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
frequencies (numpy.ndarray): Specifies a preallocated 1D
NumPy array of floating-point values to hold the frequency
portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
duty_cycles (numpy.ndarray): Specifies a preallocated 1D
NumPy array of floating-point values to hold the duty
cycle portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(
frequencies, number_of_samples_per_channel, False, True)
self._verify_array(
duty_cycles, number_of_samples_per_channel, False, True)
return _read_ctr_freq(
self._handle, frequencies, duty_cycles,
number_of_samples_per_channel, timeout)
def read_many_sample_pulse_ticks(
self, high_ticks, low_ticks,
number_of_samples_per_channel=READ_ALL_AVAILABLE, timeout=10.0):
"""
Reads one or more pulse samples in terms of ticks from a single
counter input channel in a task.
This read method accepts preallocated NumPy arrays to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in preallocated arrays is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
high_ticks (numpy.ndarray): Specifies a preallocated 1D
NumPy array of 32-bit unsigned integer values to hold
the high ticks portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
low_ticks (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 32-bit unsigned integer values to hold the low
ticks portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(
high_ticks, number_of_samples_per_channel, False, True)
self._verify_array(
low_ticks, number_of_samples_per_channel, False, True)
return _read_ctr_ticks(
self._handle, high_ticks, low_ticks,
number_of_samples_per_channel, timeout)
def read_many_sample_pulse_time(
self, high_times, low_times,
number_of_samples_per_channel=READ_ALL_AVAILABLE, timeout=10.0):
"""
Reads one or more pulse samples in terms of time from a single
counter input channel in a task.
This read method accepts preallocated NumPy arrays to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in preallocated arrays is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
high_times (numpy.ndarray): Specifies a preallocated 1D
NumPy array of floating-point values to hold the high
time portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
low_times (numpy.ndarray): Specifies a preallocated 1D
NumPy array of floating-point values to hold the low
time portion of the pulse samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(
high_times, number_of_samples_per_channel, False, True)
self._verify_array(
low_times, number_of_samples_per_channel, False, True)
return _read_ctr_time(
self._handle, high_times, low_times,
number_of_samples_per_channel, timeout)
def read_many_sample_uint32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 32-bit unsigned integer samples from a single
counter input channel in a task.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 32-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_counter_u_32_ex(
self._handle, data, number_of_samples_per_channel,
timeout)
def read_one_sample_double(self, timeout=10):
"""
Reads a single floating-point sample from a single counter input
channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
float: Indicates a single floating-point sample from the
task.
"""
return _read_counter_scalar_f_64(self._handle, timeout)
def read_one_sample_pulse_frequency(self, timeout=10):
"""
Reads a pulse sample in terms of frequency from a single counter
input channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
nidaqmx.types.CtrFreq:
Indicates a pulse sample in terms of frequency from the task.
"""
return _read_ctr_freq_scalar(self._handle, timeout)
def read_one_sample_pulse_ticks(self, timeout=10):
"""
Reads a pulse sample in terms of ticks from a single counter
input channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
nidaqmx.types.CtrTick:
Indicates a pulse sample in terms of ticks from the task.
"""
return _read_ctr_ticks_scalar(self._handle, timeout)
def read_one_sample_pulse_time(self, timeout=10):
"""
Reads a pulse sample in terms of time from a single counter
input channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
nidaqmx.types.CtrTime:
Indicates a pulse sample in terms of time from the task.
"""
return _read_ctr_time_scalar(self._handle, timeout)
def read_one_sample_uint32(self, timeout=10):
"""
Reads a single 32-bit unsigned integer sample from a single
counter input channel in a task.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates a single 32-bit unsigned integer sample from the
task.
"""
return _read_counter_scalar_u_32(self._handle, timeout)
class DigitalSingleChannelReader(ChannelReaderBase):
"""
Reads samples from a digital input channel in an NI-DAQmx task.
"""
def read_many_sample_port_byte(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 8-bit unsigned integer samples from a single
digital input channel in a task.
Use this method for devices with up to 8 lines per port.
This read method accepts a preallocated NumPy array to hold
the samples requested, which can be advantageous for performance
and interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 8-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_digital_u_8(
self._handle, data, number_of_samples_per_channel, timeout)
def read_many_sample_port_uint16(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 16-bit unsigned integer samples from a single
digital input channel in a task.
Use this method for devices with up to 16 lines per port.
This read method accepts a preallocated NumPy array to hold
the samples requested, which can be advantageous for performance
and interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 16-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_digital_u_16(
self._handle, data, number_of_samples_per_channel, timeout)
def read_many_sample_port_uint32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 32-bit unsigned integer samples from a single
digital input channel in a task.
Use this method for devices with up to 32 lines per port.
This read method accepts a preallocated NumPy array to hold
the samples requested, which can be advantageous for performance
and interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 32-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
the channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, False, True)
return _read_digital_u_32(
self._handle, data, number_of_samples_per_channel, timeout)
def read_one_sample_multi_line(self, data, timeout=10):
"""
Reads a single boolean sample from a single digital input
channel in a task. The channel can contain multiple digital
lines.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of boolean values to hold the samples requested.
Each element in the array corresponds to a sample from
a line in the channel. The size of the array must be
large enough to hold all requested samples from the
channel in the task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array_digital_lines(data, False, True)
_read_digital_lines(self._handle, data, 1, timeout)
def read_one_sample_one_line(self, timeout=10):
"""
Reads a single boolean sample from a single digital input
channel in a task. The channel can contain only one digital
line.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
bool:
Indicates a single boolean sample from the task.
"""
data = numpy.zeros(1, dtype=bool)
_read_digital_lines(self._handle, data, 1, timeout)
return bool(data[0])
def read_one_sample_port_byte(self, timeout=10):
"""
Reads a single 8-bit unsigned integer sample from a single
digital input channel in a task.
Use this method for devices with up to 8 lines per port.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates a single 8-bit unsigned integer sample from the
task.
"""
data = numpy.zeros(1, dtype=numpy.uint8)
_read_digital_u_8(self._handle, data, 1, timeout)
return int(data[0])
def read_one_sample_port_uint16(self, timeout=10):
"""
Reads a single 16-bit unsigned integer sample from a single
digital input channel in a task.
Use this method for devices with up to 16 lines per port.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates a single 16-bit unsigned integer sample from the
task.
"""
data = numpy.zeros(1, dtype=numpy.uint16)
_read_digital_u_16(self._handle, data, 1, timeout)
return int(data[0])
def read_one_sample_port_uint32(self, timeout=10):
"""
Reads a single 32-bit unsigned integer sample from a single
digital input channel in a task.
Use this method for devices with up to 32 lines per port.
Args:
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates a single 32-bit unsigned integer sample from the
task.
"""
return _read_digital_scalar_u_32(self._handle, timeout)
class DigitalMultiChannelReader(ChannelReaderBase):
"""
Reads samples from one or more digital input channels in an NI-DAQmx
task.
"""
def read_many_sample_port_byte(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 8-bit unsigned integer samples from one or
more digital input channel in a task.
Use this method for devices with up to 8 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of 8-bit unsigned integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_digital_u_8(
self._handle, data, number_of_samples_per_channel, timeout)
def read_many_sample_port_uint16(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 16-bit unsigned integer samples from one or
more digital input channels in a task.
Use this method for devices with up to 16 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of 16-bit unsigned integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_digital_u_16(
self._handle, data, number_of_samples_per_channel, timeout)
def read_many_sample_port_uint32(
self, data, number_of_samples_per_channel=READ_ALL_AVAILABLE,
timeout=10.0):
"""
Reads one or more 32-bit unsigned integer samples from one or
more digital input channels in a task.
Use this method for devices with up to 32 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of 32-bit unsigned integer values to hold the
samples requested. The size of the array must be large
enough to hold all requested samples from all channels
in the task; otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a sample from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
number_of_samples_per_channel (Optional[int]): Specifies the
number of samples to read.
If you set this input to nidaqmx.constants.
READ_ALL_AVAILABLE, NI-DAQmx determines how many samples
to read based on if the task acquires samples
continuously or acquires a finite number of samples.
If the task acquires samples continuously and you set
this input to nidaqmx.constants.READ_ALL_AVAILABLE, this
method reads all the samples currently available in the
buffer.
If the task acquires a finite number of samples and you
set this input to nidaqmx.constants.READ_ALL_AVAILABLE,
the method waits for the task to acquire all requested
samples, then reads those samples. If you set the
"read_all_avail_samp" property to True, the method reads
the samples currently available in the buffer and does
not wait for the task to acquire all requested samples.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
Returns:
int:
Indicates the number of samples acquired by each channel.
NI-DAQmx returns a single value because this value is the
same for all channels.
"""
number_of_samples_per_channel = (
self._task._calculate_num_samps_per_chan(
number_of_samples_per_channel))
self._verify_array(data, number_of_samples_per_channel, True, True)
return _read_digital_u_32(
self._handle, data, number_of_samples_per_channel, timeout)
def read_one_sample_multi_line(self, data, timeout=10):
"""
Reads a single boolean sample from one or more digital input
channels in a task. The channels can contain multiple digital
lines.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 2D NumPy
array of boolean values to hold the samples requested.
The size of the array must be large enough to hold all
requested samples from all channels in the task;
otherwise, an error is thrown.
Each row corresponds to a channel in the task. Each
column corresponds to a line from each channel. The
order of the channels in the array corresponds to the
order in which you add the channels to the task or to
the order of the channels you specify with the
"channels_to_read" property.
If the size of the array is too large or the array is
shaped incorrectly, the previous statement may not hold
true as the samples read may not be separated into rows
and columns properly. Set the "verify_array_shape"
property on this channel reader object to True to
validate that the NumPy array object is shaped properly.
Setting this property may marginally adversely impact
the performance of the method.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array_digital_lines(data, True, True)
_read_digital_lines(self._handle, data, 1, timeout)
def read_one_sample_one_line(self, data, timeout=10):
"""
Reads a single boolean sample from one or more digital input
channels in a task. The channel can contain only one digital
line.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of boolean values to hold the samples requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array_digital_lines(data, True, False)
_read_digital_lines(self._handle, data, 1, timeout)
def read_one_sample_port_byte(self, data, timeout=10):
"""
Reads a single 8-bit unsigned integer sample from one or more
digital input channels in a task.
Use this method for devices with up to 8 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 8-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array(data, 1, True, False)
_read_digital_u_8(self._handle, data, 1, timeout)
def read_one_sample_port_uint16(self, data, timeout=10):
"""
Reads a single 16-bit unsigned integer sample from one or more
digital input channels in a task.
Use this method for devices with up to 16 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 16-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array(data, 1, True, False)
_read_digital_u_16(self._handle, data, 1, timeout)
def read_one_sample_port_uint32(self, data, timeout=10):
"""
Reads a single 32-bit unsigned integer sample from one or more
digital input channels in a task.
Use this method for devices with up to 32 lines per port.
This read method accepts a preallocated NumPy array to hold the
samples requested, which can be advantageous for performance and
interoperability with NumPy and SciPy.
Passing in a preallocated array is valuable in continuous
acquisition scenarios, where the same array can be used
repeatedly in each call to the method.
Args:
data (numpy.ndarray): Specifies a preallocated 1D NumPy
array of 32-bit unsigned integer values to hold the
samples requested.
Each element in the array corresponds to a sample from
each channel. The size of the array must be large enough
to hold all requested samples from the channel in the
task; otherwise, an error is thrown.
timeout (Optional[float]): Specifies the amount of time in
seconds to wait for samples to become available. If the
time elapses, the method returns an error and any
samples read before the timeout elapsed. The default
timeout is 10 seconds. If you set timeout to
nidaqmx.constants.WAIT_INFINITELY, the method waits
indefinitely. If you set timeout to 0, the method tries
once to read the requested samples and returns an error
if it is unable to.
"""
self._verify_array(data, 1, True, False)
_read_digital_u_32(self._handle, data, 1, timeout)
|
en
| 0.849678
|
Defines base class for all NI-DAQmx stream readers. Args: task_in_stream: Specifies the input stream associated with an NI-DAQmx task from which to read samples. bool: Indicates whether the size and shape of the user-defined NumPy arrays passed to read methods are verified. Defaults to True when this object is instantiated. Setting this property to True may marginally adversely impact the performance of read methods. Verifies that the shape of the specified NumPy array can be used to read multiple samples from the current task which contains one or more channels, if the "verify_array_shape" property is set to True. Args: data (numpy.ndarray): Specifies the NumPy array to verify. number_of_samples_per_channel (int): Specifies the number of samples per channel requested. is_many_chan (bool): Specifies if the read method is a many channel version. is_many_samp (bool): Specifies if the read method is a many samples version. Verifies that the shape of the specified NumPy array can be used to read samples from the current task which contains one or more channels that have one or more digital lines per channel, if the "verify_array_shape" property is set to True. Args: data (numpy.ndarray): Specifies the NumPy array to verify. is_many_chan (bool): Specifies if the read method is a many channel version. is_many_line (bool): Specifies if the read method is a many line version. Reads samples from an analog input channel in an NI-DAQmx task. Reads one or more floating-point samples from a single analog input channel in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of floating-point values to hold the samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads a single floating-point sample from a single analog input channel in a task. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: float: Indicates a single floating-point sample from the task. Reads samples from one or more analog input channels in an NI-DAQmx task. Reads one or more floating-point samples from one or more analog input channels in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of floating-point values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a sample from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property to True may marginally adversely impact the performance of the method. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads a single floating-point sample from one or more analog input channels in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of floating-point values to hold the samples requested. Each element in the array corresponds to a sample from each channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Reads unscaled samples from one or more analog input channels in an NI-DAQmx task. Reads one or more unscaled 16-bit integer samples from one or more analog input channels in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of unscaled 16-bit integer values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a sample from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property may marginally adversely impact the performance of the method. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more unscaled 32-bit integer samples from one or more analog input channels in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of unscaled 32-bit integer values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a sample from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property may marginally adversely impact the performance of the method. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more unscaled 16-bit unsigned integer samples from one or more analog input channels in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of unscaled 16-bit unsigned integer values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a sample from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property may marginally adversely impact the performance of the method. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more unscaled unsigned 32-bit integer samples from one or more analog input channels in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of unscaled 32-bit unsigned integer values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a sample from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property may marginally adversely impact the performance of the method. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads samples from a counter input channel in an NI-DAQmx task. Reads one or more floating-point samples from a single counter input channel in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of floating-point values to hold the samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more pulse samples in terms of frequency from a single counter input channel in a task. This read method accepts preallocated NumPy arrays to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in preallocated arrays is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: frequencies (numpy.ndarray): Specifies a preallocated 1D NumPy array of floating-point values to hold the frequency portion of the pulse samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. duty_cycles (numpy.ndarray): Specifies a preallocated 1D NumPy array of floating-point values to hold the duty cycle portion of the pulse samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more pulse samples in terms of ticks from a single counter input channel in a task. This read method accepts preallocated NumPy arrays to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in preallocated arrays is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: high_ticks (numpy.ndarray): Specifies a preallocated 1D NumPy array of 32-bit unsigned integer values to hold the high ticks portion of the pulse samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. low_ticks (numpy.ndarray): Specifies a preallocated 1D NumPy array of 32-bit unsigned integer values to hold the low ticks portion of the pulse samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more pulse samples in terms of time from a single counter input channel in a task. This read method accepts preallocated NumPy arrays to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in preallocated arrays is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: high_times (numpy.ndarray): Specifies a preallocated 1D NumPy array of floating-point values to hold the high time portion of the pulse samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. low_times (numpy.ndarray): Specifies a preallocated 1D NumPy array of floating-point values to hold the low time portion of the pulse samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more 32-bit unsigned integer samples from a single counter input channel in a task. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of 32-bit unsigned integer values to hold the samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads a single floating-point sample from a single counter input channel in a task. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: float: Indicates a single floating-point sample from the task. Reads a pulse sample in terms of frequency from a single counter input channel in a task. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: nidaqmx.types.CtrFreq: Indicates a pulse sample in terms of frequency from the task. Reads a pulse sample in terms of ticks from a single counter input channel in a task. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: nidaqmx.types.CtrTick: Indicates a pulse sample in terms of ticks from the task. Reads a pulse sample in terms of time from a single counter input channel in a task. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: nidaqmx.types.CtrTime: Indicates a pulse sample in terms of time from the task. Reads a single 32-bit unsigned integer sample from a single counter input channel in a task. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates a single 32-bit unsigned integer sample from the task. Reads samples from a digital input channel in an NI-DAQmx task. Reads one or more 8-bit unsigned integer samples from a single digital input channel in a task. Use this method for devices with up to 8 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of 8-bit unsigned integer values to hold the samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more 16-bit unsigned integer samples from a single digital input channel in a task. Use this method for devices with up to 16 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of 16-bit unsigned integer values to hold the samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more 32-bit unsigned integer samples from a single digital input channel in a task. Use this method for devices with up to 32 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of 32-bit unsigned integer values to hold the samples requested. Each element in the array corresponds to a sample from the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads a single boolean sample from a single digital input channel in a task. The channel can contain multiple digital lines. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of boolean values to hold the samples requested. Each element in the array corresponds to a sample from a line in the channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Reads a single boolean sample from a single digital input channel in a task. The channel can contain only one digital line. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: bool: Indicates a single boolean sample from the task. Reads a single 8-bit unsigned integer sample from a single digital input channel in a task. Use this method for devices with up to 8 lines per port. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates a single 8-bit unsigned integer sample from the task. Reads a single 16-bit unsigned integer sample from a single digital input channel in a task. Use this method for devices with up to 16 lines per port. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates a single 16-bit unsigned integer sample from the task. Reads a single 32-bit unsigned integer sample from a single digital input channel in a task. Use this method for devices with up to 32 lines per port. Args: timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates a single 32-bit unsigned integer sample from the task. Reads samples from one or more digital input channels in an NI-DAQmx task. Reads one or more 8-bit unsigned integer samples from one or more digital input channel in a task. Use this method for devices with up to 8 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of 8-bit unsigned integer values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a sample from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property may marginally adversely impact the performance of the method. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more 16-bit unsigned integer samples from one or more digital input channels in a task. Use this method for devices with up to 16 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of 16-bit unsigned integer values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a sample from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property may marginally adversely impact the performance of the method. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads one or more 32-bit unsigned integer samples from one or more digital input channels in a task. Use this method for devices with up to 32 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of 32-bit unsigned integer values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a sample from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property may marginally adversely impact the performance of the method. number_of_samples_per_channel (Optional[int]): Specifies the number of samples to read. If you set this input to nidaqmx.constants. READ_ALL_AVAILABLE, NI-DAQmx determines how many samples to read based on if the task acquires samples continuously or acquires a finite number of samples. If the task acquires samples continuously and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, this method reads all the samples currently available in the buffer. If the task acquires a finite number of samples and you set this input to nidaqmx.constants.READ_ALL_AVAILABLE, the method waits for the task to acquire all requested samples, then reads those samples. If you set the "read_all_avail_samp" property to True, the method reads the samples currently available in the buffer and does not wait for the task to acquire all requested samples. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Returns: int: Indicates the number of samples acquired by each channel. NI-DAQmx returns a single value because this value is the same for all channels. Reads a single boolean sample from one or more digital input channels in a task. The channels can contain multiple digital lines. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 2D NumPy array of boolean values to hold the samples requested. The size of the array must be large enough to hold all requested samples from all channels in the task; otherwise, an error is thrown. Each row corresponds to a channel in the task. Each column corresponds to a line from each channel. The order of the channels in the array corresponds to the order in which you add the channels to the task or to the order of the channels you specify with the "channels_to_read" property. If the size of the array is too large or the array is shaped incorrectly, the previous statement may not hold true as the samples read may not be separated into rows and columns properly. Set the "verify_array_shape" property on this channel reader object to True to validate that the NumPy array object is shaped properly. Setting this property may marginally adversely impact the performance of the method. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Reads a single boolean sample from one or more digital input channels in a task. The channel can contain only one digital line. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of boolean values to hold the samples requested. Each element in the array corresponds to a sample from each channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Reads a single 8-bit unsigned integer sample from one or more digital input channels in a task. Use this method for devices with up to 8 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of 8-bit unsigned integer values to hold the samples requested. Each element in the array corresponds to a sample from each channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Reads a single 16-bit unsigned integer sample from one or more digital input channels in a task. Use this method for devices with up to 16 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of 16-bit unsigned integer values to hold the samples requested. Each element in the array corresponds to a sample from each channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to. Reads a single 32-bit unsigned integer sample from one or more digital input channels in a task. Use this method for devices with up to 32 lines per port. This read method accepts a preallocated NumPy array to hold the samples requested, which can be advantageous for performance and interoperability with NumPy and SciPy. Passing in a preallocated array is valuable in continuous acquisition scenarios, where the same array can be used repeatedly in each call to the method. Args: data (numpy.ndarray): Specifies a preallocated 1D NumPy array of 32-bit unsigned integer values to hold the samples requested. Each element in the array corresponds to a sample from each channel. The size of the array must be large enough to hold all requested samples from the channel in the task; otherwise, an error is thrown. timeout (Optional[float]): Specifies the amount of time in seconds to wait for samples to become available. If the time elapses, the method returns an error and any samples read before the timeout elapsed. The default timeout is 10 seconds. If you set timeout to nidaqmx.constants.WAIT_INFINITELY, the method waits indefinitely. If you set timeout to 0, the method tries once to read the requested samples and returns an error if it is unable to.
| 2.321577
| 2
|
examples/animations/house.py
|
colinmford/coldtype
| 0
|
6627378
|
<gh_stars>0
from coldtype import *
from coldtype.time.midi import *
wav = __sibling__("media/house.wav")
obvs = Font("assets/ColdtypeObviously-VF.ttf")
midi = Path("examples/animations/media/house.mid").resolve()
drums = MidiReader(midi, duration=60, bpm=120)[0]
@animation(duration=drums.duration, storyboard=[0], bg=0.1)
def render(f):
kick = drums.fv(f.i, [36], [12, 10]).ease()
snare = drums.fv(f.i, [38], [4, 35]).ease()
hat_count = drums.fv(f.i, [42], [0, 1]).count
cowbell = drums.fv(f.i, [48], [0, 50]).ease()
style = Style(obvs, 500, tu=-150, r=1, ro=1)
cold_pens = StyledString("COLD", style.mod(wdth=1-snare*0.5)).pens()
type_pens = StyledString("TYPE", style.mod(tu=-150-100*kick, rotate=-8*kick)).pens()
chord_change = hat_count < 3
cold_pens.f(hsl(0.6 if chord_change else 0.75, 0.75, 0.5))
type_pens.f(hsl(0.05 if chord_change else 0.9, 0.75, 0.5))
for o in cold_pens.glyphs_named("O"):
o.mod_contour(1, lambda c: c.rotate(hat_count*-45))
for p in type_pens.glyphs_named("P"):
p.mod_contour(1, lambda c: c.translate(70*cowbell, 0))
r = f.a.r.inset(0, 150)
return DATPens([
cold_pens.align(r, y="maxy").understroke(sw=10),
type_pens.align(r, y="miny").understroke(sw=10).translate(-15, 0)
]).rotate(5)
|
from coldtype import *
from coldtype.time.midi import *
wav = __sibling__("media/house.wav")
obvs = Font("assets/ColdtypeObviously-VF.ttf")
midi = Path("examples/animations/media/house.mid").resolve()
drums = MidiReader(midi, duration=60, bpm=120)[0]
@animation(duration=drums.duration, storyboard=[0], bg=0.1)
def render(f):
kick = drums.fv(f.i, [36], [12, 10]).ease()
snare = drums.fv(f.i, [38], [4, 35]).ease()
hat_count = drums.fv(f.i, [42], [0, 1]).count
cowbell = drums.fv(f.i, [48], [0, 50]).ease()
style = Style(obvs, 500, tu=-150, r=1, ro=1)
cold_pens = StyledString("COLD", style.mod(wdth=1-snare*0.5)).pens()
type_pens = StyledString("TYPE", style.mod(tu=-150-100*kick, rotate=-8*kick)).pens()
chord_change = hat_count < 3
cold_pens.f(hsl(0.6 if chord_change else 0.75, 0.75, 0.5))
type_pens.f(hsl(0.05 if chord_change else 0.9, 0.75, 0.5))
for o in cold_pens.glyphs_named("O"):
o.mod_contour(1, lambda c: c.rotate(hat_count*-45))
for p in type_pens.glyphs_named("P"):
p.mod_contour(1, lambda c: c.translate(70*cowbell, 0))
r = f.a.r.inset(0, 150)
return DATPens([
cold_pens.align(r, y="maxy").understroke(sw=10),
type_pens.align(r, y="miny").understroke(sw=10).translate(-15, 0)
]).rotate(5)
|
none
| 1
| 2.161803
| 2
|
|
suplib/config.py
|
chasemp/sup
| 1
|
6627379
|
<reponame>chasemp/sup
import os
import ConfigParser
home = os.path.expanduser('~')
#print os.path.join(home, '.sup.ini')
parser = ConfigParser.SafeConfigParser()
parser.read('/Users/rush/.sup.ini')
def get_config_key(mode, key, strict=False):
if mode is None:
if parser.has_section('default'):
mode = 'default'
elif not parser.has_section(mode):
mode = 'default'
try:
cfgvalue = parser.get(mode, key)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
if strict:
raise
cfgvalue = None
return cfgvalue
|
import os
import ConfigParser
home = os.path.expanduser('~')
#print os.path.join(home, '.sup.ini')
parser = ConfigParser.SafeConfigParser()
parser.read('/Users/rush/.sup.ini')
def get_config_key(mode, key, strict=False):
if mode is None:
if parser.has_section('default'):
mode = 'default'
elif not parser.has_section(mode):
mode = 'default'
try:
cfgvalue = parser.get(mode, key)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
if strict:
raise
cfgvalue = None
return cfgvalue
|
en
| 0.222215
|
#print os.path.join(home, '.sup.ini')
| 2.34839
| 2
|
qtplot/linecut.py
|
geresdi/qtplot
| 15
|
6627380
|
<reponame>geresdi/qtplot<gh_stars>10-100
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import textwrap
from itertools import cycle
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg, NavigationToolbar2QT
from PyQt4 import QtGui, QtCore
from .util import FixedOrderFormatter, eng_format
class Linetrace(plt.Line2D):
"""
Represents a linetrace from the data. The purpose of this class is
to be able to store incremental linetraces in an array.
x/y: Arrays containing x and y data
type: Type of linetrace, 'horizontal' or 'vertical'
position: The x/y coordinate at which the linetrace was taken
"""
def __init__(self, x, y, row_numbers, type, position, **kwargs):
plt.Line2D.__init__(self, x, y, **kwargs)
self.row_numbers = row_numbers
self.type = type
self.position = position
class Linecut(QtGui.QDialog):
def __init__(self, main=None):
super(Linecut, self).__init__(None)
self.main = main
self.fig, self.ax = plt.subplots()
self.x, self.y = None, None
self.linetraces = []
self.marker = None
self.colors = cycle('bgrcmykw')
self.ax.xaxis.set_major_formatter(FixedOrderFormatter())
self.ax.yaxis.set_major_formatter(FixedOrderFormatter())
self.init_ui()
def init_ui(self):
self.setWindowTitle("Linecut")
# Don't show this window in the taskbar
self.setWindowFlags(QtCore.Qt.Tool)
self.canvas = FigureCanvasQTAgg(self.fig)
self.canvas.mpl_connect('pick_event', self.on_pick)
self.canvas.mpl_connect('button_press_event', self.on_press)
self.toolbar = NavigationToolbar2QT(self.canvas, self)
hbox_export = QtGui.QHBoxLayout()
self.cb_reset_cmap = QtGui.QCheckBox('Reset on plot')
self.cb_reset_cmap.setCheckState(QtCore.Qt.Checked)
hbox_export.addWidget(self.cb_reset_cmap)
self.b_save = QtGui.QPushButton('Copy data', self)
self.b_save.clicked.connect(self.on_data_to_clipboard)
hbox_export.addWidget(self.b_save)
self.b_copy = QtGui.QPushButton('Copy figure', self)
self.b_copy.clicked.connect(self.on_figure_to_clipboard)
QtGui.QShortcut(QtGui.QKeySequence("Ctrl+C"),
self, self.on_figure_to_clipboard)
hbox_export.addWidget(self.b_copy)
self.b_to_ppt = QtGui.QPushButton('To PPT (Win)', self)
self.b_to_ppt.clicked.connect(self.on_to_ppt)
hbox_export.addWidget(self.b_to_ppt)
self.b_save_dat = QtGui.QPushButton('Save data...', self)
self.b_save_dat.clicked.connect(self.on_save)
hbox_export.addWidget(self.b_save_dat)
self.b_toggle_info = QtGui.QPushButton('Toggle info')
self.b_toggle_info.clicked.connect(self.on_toggle_datapoint_info)
hbox_export.addWidget(self.b_toggle_info)
# Linecuts
hbox_linecuts = QtGui.QHBoxLayout()
hbox_linecuts.addWidget(QtGui.QLabel('Linecuts'))
self.cb_incremental = QtGui.QCheckBox('Incremental')
self.cb_incremental.setCheckState(QtCore.Qt.Unchecked)
hbox_linecuts.addWidget(self.cb_incremental)
hbox_linecuts.addWidget(QtGui.QLabel('Offset:'))
self.le_offset = QtGui.QLineEdit('0', self)
hbox_linecuts.addWidget(self.le_offset)
self.b_clear_lines = QtGui.QPushButton('Clear', self)
self.b_clear_lines.clicked.connect(self.on_clear_lines)
hbox_linecuts.addWidget(self.b_clear_lines)
# Lines
hbox_style = QtGui.QHBoxLayout()
hbox_style.addWidget(QtGui.QLabel('Line style'))
self.cb_linestyle = QtGui.QComboBox(self)
self.cb_linestyle.addItems(['None', 'solid', 'dashed', 'dotted'])
hbox_style.addWidget(self.cb_linestyle)
hbox_style.addWidget(QtGui.QLabel('Linewidth'))
self.le_linewidth = QtGui.QLineEdit('0.5', self)
hbox_style.addWidget(self.le_linewidth)
# Markers
hbox_style.addWidget(QtGui.QLabel('Marker style'))
self.cb_markerstyle = QtGui.QComboBox(self)
self.cb_markerstyle.addItems(['None', '.', 'o', 'x'])
hbox_style.addWidget(self.cb_markerstyle)
hbox_style.addWidget(QtGui.QLabel('Size'))
self.le_markersize = QtGui.QLineEdit('0.5', self)
hbox_style.addWidget(self.le_markersize)
self.cb_include_z = QtGui.QCheckBox('Include Z')
self.cb_include_z.setCheckState(QtCore.Qt.Checked)
hbox_style.addWidget(self.cb_include_z)
self.row_tree = QtGui.QTreeWidget(self)
self.row_tree.setHeaderLabels(['Parameter', 'Value'])
self.row_tree.setColumnWidth(0, 100)
self.row_tree.setHidden(True)
hbox_plot = QtGui.QHBoxLayout()
hbox_plot.addWidget(self.canvas)
hbox_plot.addWidget(self.row_tree)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addLayout(hbox_plot)
layout.addLayout(hbox_export)
layout.addLayout(hbox_linecuts)
layout.addLayout(hbox_style)
self.setLayout(layout)
self.resize(700, 500)
self.move(630, 100)
# This flag is used to reposition the window next to the main window
# when drawing the first linecut
self.first_linecut = True
self.hide()
def populate_ui(self):
profile = self.main.profile_settings
idx = self.cb_linestyle.findText(profile['line_style'])
self.cb_linestyle.setCurrentIndex(idx)
self.le_linewidth.setText(profile['line_width'])
idx = self.cb_markerstyle.findText(profile['marker_style'])
self.cb_markerstyle.setCurrentIndex(idx)
self.le_markersize.setText(profile['marker_size'])
def get_line_kwargs(self):
return {
'linestyle': str(self.cb_linestyle.currentText()),
'linewidth': float(self.le_linewidth.text()),
'marker': str(self.cb_markerstyle.currentText()),
'markersize': float(self.le_markersize.text()),
}
def on_reset(self):
if self.x is not None and self.y is not None:
minx, maxx = np.min(self.x), np.max(self.x)
miny, maxy = np.min(self.y), np.max(self.y)
xdiff = (maxx - minx) * .1
ydiff = (maxy - miny) * .1
self.ax.axis([minx - xdiff, maxx + xdiff,
miny - ydiff, maxy + ydiff])
self.canvas.draw()
def on_pick(self, event):
if event.mouseevent.button == 1:
line = self.linetraces[0]
ind = event.ind[int(len(event.ind) / 2)]
x = line.get_xdata()[ind]
y = line.get_ydata()[ind]
row = int(line.row_numbers[ind])
data = self.main.dat_file.get_row_info(row)
# Also show the datapoint index
data['N'] = ind
# Fill the treeview with data
self.row_tree.clear()
widgets = []
for name, value in data.items():
if name == 'N':
val = str(value)
else:
val = eng_format(value, 1)
widgets.append(QtGui.QTreeWidgetItem(None, [name, val]))
self.row_tree.insertTopLevelItems(0, widgets)
# Remove the previous datapoint marker
if self.marker is not None:
self.marker.remove()
self.marker = None
# Plot a new datapoint marker
self.marker = self.ax.plot(x, y, '.',
markersize=15,
color='black')[0]
self.fig.canvas.draw()
def on_press(self, event):
if event.button == 3:
self.row_tree.clear()
if self.marker is not None:
self.marker.remove()
self.marker = None
self.fig.canvas.draw()
def on_toggle_datapoint_info(self):
self.row_tree.setHidden(not self.row_tree.isHidden())
def on_data_to_clipboard(self):
if self.x is None or self.y is None:
return
data = pd.DataFrame(np.column_stack((self.x, self.y)),
columns=[self.xlabel, self.ylabel])
data.to_clipboard(index=False)
def on_figure_to_clipboard(self):
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, 'test.png')
self.fig.savefig(path, bbox_inches='tight')
img = QtGui.QImage(path)
QtGui.QApplication.clipboard().setImage(img)
def on_to_ppt(self):
""" Some win32 COM magic to interact with powerpoint """
try:
import win32com.client
except ImportError:
print('ERROR: The win32com library needs to be installed')
return
# First, copy to the clipboard
self.on_figure_to_clipboard()
# Connect to an open PowerPoint application
app = win32com.client.Dispatch('PowerPoint.Application')
# Get the current slide and paste the plot
slide = app.ActiveWindow.View.Slide
shape = slide.Shapes.Paste()
# Add a hyperlink to the data location to easily open the data again
shape.ActionSettings[0].Hyperlink.Address = self.main.abs_filename
def on_save(self):
if self.x is None or self.y is None:
return
path = os.path.dirname(os.path.realpath(__file__))
filename = QtGui.QFileDialog.getSaveFileName(self,
'Save file',
path,
'.dat')
if filename != '':
data = pd.DataFrame(np.column_stack((self.x, self.y)),
columns=[self.xlabel, self.ylabel])
data.to_csv(filename, sep='\t', index=False)
def on_clear_lines(self):
for line in self.linetraces:
line.remove()
self.linetraces = []
self.fig.canvas.draw()
def plot_linetrace(self, x, y, z, row_numbers, type, position, title,
xlabel, ylabel, otherlabel):
# Don't draw lines consisting of one point
if np.count_nonzero(~np.isnan(y)) < 2:
return
self.xlabel, self.ylabel, self.otherlabel = xlabel, ylabel, otherlabel
self.title = title
self.x, self.y, self.z = x, y, z
if self.cb_include_z.checkState() == QtCore.Qt.Checked:
title = '{0}\n{1} = {2}'.format(title, otherlabel, eng_format(z, 1))
title = '\n'.join(textwrap.wrap(title, 40, replace_whitespace=False))
self.ax.set_title(title)
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
# Remove all the existing lines and only plot one if we uncheck
# the incremental box. Else, add a new line to the collection
if self.cb_incremental.checkState() == QtCore.Qt.Unchecked:
for line in self.linetraces:
line.remove()
self.linetraces = []
line = Linetrace(x, y, row_numbers, type, position,
color='red',
picker=5,
**self.get_line_kwargs())
self.linetraces.append(line)
self.ax.add_line(line)
self.total_offset = 0
else:
if len(self.ax.lines) > 0:
if self.linetraces[-1].position == position:
return
index = len(self.linetraces) - 1
offset = float(self.le_offset.text())
line = Linetrace(x, y + index * offset, row_numbers, type, position)
line.set_color(next(self.colors))
self.linetraces.append(line)
self.ax.add_line(line)
if self.cb_reset_cmap.checkState() == QtCore.Qt.Checked:
x, y = np.ma.masked_invalid(x), np.ma.masked_invalid(y)
minx, maxx = np.min(x), np.max(x)
miny, maxy = np.min(y), np.max(y)
xdiff = (maxx - minx) * .05
ydiff = (maxy - miny) * .05
self.ax.axis([minx - xdiff, maxx + xdiff,
miny - ydiff, maxy + ydiff])
self.ax.set_aspect('auto')
self.fig.tight_layout()
self.fig.canvas.draw()
if self.isHidden():
self.show_window()
def resizeEvent(self, event):
self.fig.tight_layout()
self.canvas.draw()
def show_window(self):
if self.first_linecut:
self.move(630, 100)
self.show()
self.raise_()
def closeEvent(self, event):
self.hide()
event.ignore()
|
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import textwrap
from itertools import cycle
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg, NavigationToolbar2QT
from PyQt4 import QtGui, QtCore
from .util import FixedOrderFormatter, eng_format
class Linetrace(plt.Line2D):
"""
Represents a linetrace from the data. The purpose of this class is
to be able to store incremental linetraces in an array.
x/y: Arrays containing x and y data
type: Type of linetrace, 'horizontal' or 'vertical'
position: The x/y coordinate at which the linetrace was taken
"""
def __init__(self, x, y, row_numbers, type, position, **kwargs):
plt.Line2D.__init__(self, x, y, **kwargs)
self.row_numbers = row_numbers
self.type = type
self.position = position
class Linecut(QtGui.QDialog):
def __init__(self, main=None):
super(Linecut, self).__init__(None)
self.main = main
self.fig, self.ax = plt.subplots()
self.x, self.y = None, None
self.linetraces = []
self.marker = None
self.colors = cycle('bgrcmykw')
self.ax.xaxis.set_major_formatter(FixedOrderFormatter())
self.ax.yaxis.set_major_formatter(FixedOrderFormatter())
self.init_ui()
def init_ui(self):
self.setWindowTitle("Linecut")
# Don't show this window in the taskbar
self.setWindowFlags(QtCore.Qt.Tool)
self.canvas = FigureCanvasQTAgg(self.fig)
self.canvas.mpl_connect('pick_event', self.on_pick)
self.canvas.mpl_connect('button_press_event', self.on_press)
self.toolbar = NavigationToolbar2QT(self.canvas, self)
hbox_export = QtGui.QHBoxLayout()
self.cb_reset_cmap = QtGui.QCheckBox('Reset on plot')
self.cb_reset_cmap.setCheckState(QtCore.Qt.Checked)
hbox_export.addWidget(self.cb_reset_cmap)
self.b_save = QtGui.QPushButton('Copy data', self)
self.b_save.clicked.connect(self.on_data_to_clipboard)
hbox_export.addWidget(self.b_save)
self.b_copy = QtGui.QPushButton('Copy figure', self)
self.b_copy.clicked.connect(self.on_figure_to_clipboard)
QtGui.QShortcut(QtGui.QKeySequence("Ctrl+C"),
self, self.on_figure_to_clipboard)
hbox_export.addWidget(self.b_copy)
self.b_to_ppt = QtGui.QPushButton('To PPT (Win)', self)
self.b_to_ppt.clicked.connect(self.on_to_ppt)
hbox_export.addWidget(self.b_to_ppt)
self.b_save_dat = QtGui.QPushButton('Save data...', self)
self.b_save_dat.clicked.connect(self.on_save)
hbox_export.addWidget(self.b_save_dat)
self.b_toggle_info = QtGui.QPushButton('Toggle info')
self.b_toggle_info.clicked.connect(self.on_toggle_datapoint_info)
hbox_export.addWidget(self.b_toggle_info)
# Linecuts
hbox_linecuts = QtGui.QHBoxLayout()
hbox_linecuts.addWidget(QtGui.QLabel('Linecuts'))
self.cb_incremental = QtGui.QCheckBox('Incremental')
self.cb_incremental.setCheckState(QtCore.Qt.Unchecked)
hbox_linecuts.addWidget(self.cb_incremental)
hbox_linecuts.addWidget(QtGui.QLabel('Offset:'))
self.le_offset = QtGui.QLineEdit('0', self)
hbox_linecuts.addWidget(self.le_offset)
self.b_clear_lines = QtGui.QPushButton('Clear', self)
self.b_clear_lines.clicked.connect(self.on_clear_lines)
hbox_linecuts.addWidget(self.b_clear_lines)
# Lines
hbox_style = QtGui.QHBoxLayout()
hbox_style.addWidget(QtGui.QLabel('Line style'))
self.cb_linestyle = QtGui.QComboBox(self)
self.cb_linestyle.addItems(['None', 'solid', 'dashed', 'dotted'])
hbox_style.addWidget(self.cb_linestyle)
hbox_style.addWidget(QtGui.QLabel('Linewidth'))
self.le_linewidth = QtGui.QLineEdit('0.5', self)
hbox_style.addWidget(self.le_linewidth)
# Markers
hbox_style.addWidget(QtGui.QLabel('Marker style'))
self.cb_markerstyle = QtGui.QComboBox(self)
self.cb_markerstyle.addItems(['None', '.', 'o', 'x'])
hbox_style.addWidget(self.cb_markerstyle)
hbox_style.addWidget(QtGui.QLabel('Size'))
self.le_markersize = QtGui.QLineEdit('0.5', self)
hbox_style.addWidget(self.le_markersize)
self.cb_include_z = QtGui.QCheckBox('Include Z')
self.cb_include_z.setCheckState(QtCore.Qt.Checked)
hbox_style.addWidget(self.cb_include_z)
self.row_tree = QtGui.QTreeWidget(self)
self.row_tree.setHeaderLabels(['Parameter', 'Value'])
self.row_tree.setColumnWidth(0, 100)
self.row_tree.setHidden(True)
hbox_plot = QtGui.QHBoxLayout()
hbox_plot.addWidget(self.canvas)
hbox_plot.addWidget(self.row_tree)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addLayout(hbox_plot)
layout.addLayout(hbox_export)
layout.addLayout(hbox_linecuts)
layout.addLayout(hbox_style)
self.setLayout(layout)
self.resize(700, 500)
self.move(630, 100)
# This flag is used to reposition the window next to the main window
# when drawing the first linecut
self.first_linecut = True
self.hide()
def populate_ui(self):
profile = self.main.profile_settings
idx = self.cb_linestyle.findText(profile['line_style'])
self.cb_linestyle.setCurrentIndex(idx)
self.le_linewidth.setText(profile['line_width'])
idx = self.cb_markerstyle.findText(profile['marker_style'])
self.cb_markerstyle.setCurrentIndex(idx)
self.le_markersize.setText(profile['marker_size'])
def get_line_kwargs(self):
return {
'linestyle': str(self.cb_linestyle.currentText()),
'linewidth': float(self.le_linewidth.text()),
'marker': str(self.cb_markerstyle.currentText()),
'markersize': float(self.le_markersize.text()),
}
def on_reset(self):
if self.x is not None and self.y is not None:
minx, maxx = np.min(self.x), np.max(self.x)
miny, maxy = np.min(self.y), np.max(self.y)
xdiff = (maxx - minx) * .1
ydiff = (maxy - miny) * .1
self.ax.axis([minx - xdiff, maxx + xdiff,
miny - ydiff, maxy + ydiff])
self.canvas.draw()
def on_pick(self, event):
if event.mouseevent.button == 1:
line = self.linetraces[0]
ind = event.ind[int(len(event.ind) / 2)]
x = line.get_xdata()[ind]
y = line.get_ydata()[ind]
row = int(line.row_numbers[ind])
data = self.main.dat_file.get_row_info(row)
# Also show the datapoint index
data['N'] = ind
# Fill the treeview with data
self.row_tree.clear()
widgets = []
for name, value in data.items():
if name == 'N':
val = str(value)
else:
val = eng_format(value, 1)
widgets.append(QtGui.QTreeWidgetItem(None, [name, val]))
self.row_tree.insertTopLevelItems(0, widgets)
# Remove the previous datapoint marker
if self.marker is not None:
self.marker.remove()
self.marker = None
# Plot a new datapoint marker
self.marker = self.ax.plot(x, y, '.',
markersize=15,
color='black')[0]
self.fig.canvas.draw()
def on_press(self, event):
if event.button == 3:
self.row_tree.clear()
if self.marker is not None:
self.marker.remove()
self.marker = None
self.fig.canvas.draw()
def on_toggle_datapoint_info(self):
self.row_tree.setHidden(not self.row_tree.isHidden())
def on_data_to_clipboard(self):
if self.x is None or self.y is None:
return
data = pd.DataFrame(np.column_stack((self.x, self.y)),
columns=[self.xlabel, self.ylabel])
data.to_clipboard(index=False)
def on_figure_to_clipboard(self):
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, 'test.png')
self.fig.savefig(path, bbox_inches='tight')
img = QtGui.QImage(path)
QtGui.QApplication.clipboard().setImage(img)
def on_to_ppt(self):
""" Some win32 COM magic to interact with powerpoint """
try:
import win32com.client
except ImportError:
print('ERROR: The win32com library needs to be installed')
return
# First, copy to the clipboard
self.on_figure_to_clipboard()
# Connect to an open PowerPoint application
app = win32com.client.Dispatch('PowerPoint.Application')
# Get the current slide and paste the plot
slide = app.ActiveWindow.View.Slide
shape = slide.Shapes.Paste()
# Add a hyperlink to the data location to easily open the data again
shape.ActionSettings[0].Hyperlink.Address = self.main.abs_filename
def on_save(self):
if self.x is None or self.y is None:
return
path = os.path.dirname(os.path.realpath(__file__))
filename = QtGui.QFileDialog.getSaveFileName(self,
'Save file',
path,
'.dat')
if filename != '':
data = pd.DataFrame(np.column_stack((self.x, self.y)),
columns=[self.xlabel, self.ylabel])
data.to_csv(filename, sep='\t', index=False)
def on_clear_lines(self):
for line in self.linetraces:
line.remove()
self.linetraces = []
self.fig.canvas.draw()
def plot_linetrace(self, x, y, z, row_numbers, type, position, title,
xlabel, ylabel, otherlabel):
# Don't draw lines consisting of one point
if np.count_nonzero(~np.isnan(y)) < 2:
return
self.xlabel, self.ylabel, self.otherlabel = xlabel, ylabel, otherlabel
self.title = title
self.x, self.y, self.z = x, y, z
if self.cb_include_z.checkState() == QtCore.Qt.Checked:
title = '{0}\n{1} = {2}'.format(title, otherlabel, eng_format(z, 1))
title = '\n'.join(textwrap.wrap(title, 40, replace_whitespace=False))
self.ax.set_title(title)
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
# Remove all the existing lines and only plot one if we uncheck
# the incremental box. Else, add a new line to the collection
if self.cb_incremental.checkState() == QtCore.Qt.Unchecked:
for line in self.linetraces:
line.remove()
self.linetraces = []
line = Linetrace(x, y, row_numbers, type, position,
color='red',
picker=5,
**self.get_line_kwargs())
self.linetraces.append(line)
self.ax.add_line(line)
self.total_offset = 0
else:
if len(self.ax.lines) > 0:
if self.linetraces[-1].position == position:
return
index = len(self.linetraces) - 1
offset = float(self.le_offset.text())
line = Linetrace(x, y + index * offset, row_numbers, type, position)
line.set_color(next(self.colors))
self.linetraces.append(line)
self.ax.add_line(line)
if self.cb_reset_cmap.checkState() == QtCore.Qt.Checked:
x, y = np.ma.masked_invalid(x), np.ma.masked_invalid(y)
minx, maxx = np.min(x), np.max(x)
miny, maxy = np.min(y), np.max(y)
xdiff = (maxx - minx) * .05
ydiff = (maxy - miny) * .05
self.ax.axis([minx - xdiff, maxx + xdiff,
miny - ydiff, maxy + ydiff])
self.ax.set_aspect('auto')
self.fig.tight_layout()
self.fig.canvas.draw()
if self.isHidden():
self.show_window()
def resizeEvent(self, event):
self.fig.tight_layout()
self.canvas.draw()
def show_window(self):
if self.first_linecut:
self.move(630, 100)
self.show()
self.raise_()
def closeEvent(self, event):
self.hide()
event.ignore()
|
en
| 0.811889
|
Represents a linetrace from the data. The purpose of this class is to be able to store incremental linetraces in an array. x/y: Arrays containing x and y data type: Type of linetrace, 'horizontal' or 'vertical' position: The x/y coordinate at which the linetrace was taken # Don't show this window in the taskbar # Linecuts # Lines # Markers # This flag is used to reposition the window next to the main window # when drawing the first linecut # Also show the datapoint index # Fill the treeview with data # Remove the previous datapoint marker # Plot a new datapoint marker Some win32 COM magic to interact with powerpoint # First, copy to the clipboard # Connect to an open PowerPoint application # Get the current slide and paste the plot # Add a hyperlink to the data location to easily open the data again # Don't draw lines consisting of one point # Remove all the existing lines and only plot one if we uncheck # the incremental box. Else, add a new line to the collection
| 2.48195
| 2
|
MiGRIDS/UserInterface/formFromXML.py
|
mmuellerstoffels/GBSTools
| 8
|
6627381
|
<gh_stars>1-10
#creates a dynamic form based on the information in xml files
from PyQt5 import QtWidgets, QtCore, QtGui
class formFromXML(QtWidgets.QDialog):
def __init__(self, component, componentSoup, write=True):
super().__init__()
self.componentType = component.type
self.componentName = component.component_name
self.soup = componentSoup
self.fileDir = component.component_directory
self.write = write
self.changes={}
self.initUI()
# initialize and display the form
def initUI(self):
#container widget
widget = QtWidgets.QWidget()
self.setWindowTitle(self.componentName)
self.setObjectName("Component Dialog")
#layout of container widget
windowLayout = QtWidgets.QVBoxLayout()
scrollArea = QtWidgets.QScrollArea()
scrollArea.setWidgetResizable(True)
scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
#a grid layout object
xmlLayout = self.displayXML(self.soup, windowLayout)
widget.setLayout(xmlLayout)
scrollArea.setWidget(widget)
#adding scroll layer
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(scrollArea,6)
self.setLayout(self.layout)
self.show()
self.exec()
#create a layout from the xml that was turned into soup
#BeautifulSoup QVBoxLayout -> QVBoxLayout
def displayXML(self, soup, vlayout):
from bs4 import Comment
from GBSUserInterface.ProjectSQLiteHandler import ProjectSQLiteHandler
from GBSUserInterface.gridLayoutSetup import setupGrid
g1 = {'headers': [1,2,3,4,5],
'rowNames': [],
'columnWidths': [2, 1, 1, 1, 1]}
#this uses a generic units table
dbHandler = ProjectSQLiteHandler('project_manager')
units = dbHandler.cursor.execute("select code from ref_units").fetchall()
dbHandler.closeDatabase()
units = [u[0] for u in units]
for tag in soup.find_all():
if tag.name not in ['component','childOf','type']:
row = 0
hint = "".join(tag.findAll(text=True))
#the tag name is the main label
if tag.parent.name not in ['component', 'childOf', 'type']:
parent = tag.parent.name
pt = '.'.join([parent, tag.name])
else:
pt = tag.name
g1['rowNames'].append(pt)
g1['rowNames'].append(pt + 'input' + str(row))
#every tag gets a grid for data input
#there are 4 columns - 2 for labels, 2 for values
#the default is 2 rows - 1 for tag name, 1 for data input
#more rows are added if more than 2 input attributes exist
#create the overall label
g1[pt] = {1:{'widget':'lbl','name':tag.name}}
column = 1
g1[pt + 'input' + str(row)] ={}
for a in tag.attrs:
name = '.'.join([pt,str(a)])
inputValue = tag[a]
#columns aways starts populating at 2
if column <=4:
column+=1
else:
column = 2
row+=1
widget = 'txt'
items = None
#if setting units attribute use a combo box
if a =='unit':
widget = 'combo'
items = units
#if the value is set to true false use a checkbox
if inputValue in ['TRUE','FALSE']:
widget = 'chk'
#first column is the label
g1[pt + 'input' + str(row)][column] = {'widget':'lbl','name':'lbl' + a, 'default':a, 'hint':hint}
column+=1
if items is None:
g1[pt + 'input' + str(row)][column] = {'widget': widget, 'name':name, 'default':inputValue, 'hint':hint}
else:
g1[pt + 'input' + str(row)][column] = {'widget': widget, 'name':name, 'default': inputValue, 'items':items, 'hint':hint}
#make the grid layout from the dictionary
grid = setupGrid(g1)
#add the grid to the parent layout
vlayout.addLayout(grid)
return vlayout
#updates the soup to reflect changes in the form
#None->None
def update(self):
#for every tag in the soup fillSetInfo its value from the form
for tag in self.soup.find_all():
if tag.parent.name not in ['component', 'childOf', 'type']:
parent = tag.parent.name
pt = '.'.join([parent,tag.name])
else:
pt = tag.name
for a in tag.attrs:
widget = self.findChild((QtWidgets.QLineEdit, QtWidgets.QComboBox,QtWidgets.QCheckBox), '.'.join([pt,str(a)]))
if type(widget) == QtWidgets.QLineEdit:
if tag.attrs[a] != widget.text():
self.changes['.'.join([pt, str(a)])]=widget.text()
tag.attrs[a] = widget.text()
elif type(widget) == QtWidgets.QComboBox:
if tag.attrs[a] != widget.currentText():
self.changes['.'.join([pt, str(a)])]=widget.currentText()
tag.attrs[a]= widget.currentText()
elif type(widget) == QtWidgets.QCheckBox:
if (widget.isChecked()) & (tag.attrs[a] != 'TRUE'):
self.changes['.'.join([pt, str(a)])]= 'TRUE'
tag.attrs[a] = 'TRUE'
elif (not widget.isChecked()) & (tag.attrs[a] != 'FALSE'):
self.changes['.'.join([pt, str(a)])]= 'TRUE'
tag.attrs[a]= 'FALSE'
#when the form is closed the soup gets updated and writtent to an xml file
#Event -> None
def closeEvent(self,evnt):
from GBSController.UIToHandler import UIToHandler
print('closing descriptor file')
#fillSetInfo soup
self.update()
#Tell the controller to tell the InputHandler to write the xml
if self.write:
handler = UIToHandler()
handler.writeComponentSoup(self.componentName, self.fileDir, self.soup)
else:
#return a list of changes
print(self.changes)
|
#creates a dynamic form based on the information in xml files
from PyQt5 import QtWidgets, QtCore, QtGui
class formFromXML(QtWidgets.QDialog):
def __init__(self, component, componentSoup, write=True):
super().__init__()
self.componentType = component.type
self.componentName = component.component_name
self.soup = componentSoup
self.fileDir = component.component_directory
self.write = write
self.changes={}
self.initUI()
# initialize and display the form
def initUI(self):
#container widget
widget = QtWidgets.QWidget()
self.setWindowTitle(self.componentName)
self.setObjectName("Component Dialog")
#layout of container widget
windowLayout = QtWidgets.QVBoxLayout()
scrollArea = QtWidgets.QScrollArea()
scrollArea.setWidgetResizable(True)
scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
#a grid layout object
xmlLayout = self.displayXML(self.soup, windowLayout)
widget.setLayout(xmlLayout)
scrollArea.setWidget(widget)
#adding scroll layer
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(scrollArea,6)
self.setLayout(self.layout)
self.show()
self.exec()
#create a layout from the xml that was turned into soup
#BeautifulSoup QVBoxLayout -> QVBoxLayout
def displayXML(self, soup, vlayout):
from bs4 import Comment
from GBSUserInterface.ProjectSQLiteHandler import ProjectSQLiteHandler
from GBSUserInterface.gridLayoutSetup import setupGrid
g1 = {'headers': [1,2,3,4,5],
'rowNames': [],
'columnWidths': [2, 1, 1, 1, 1]}
#this uses a generic units table
dbHandler = ProjectSQLiteHandler('project_manager')
units = dbHandler.cursor.execute("select code from ref_units").fetchall()
dbHandler.closeDatabase()
units = [u[0] for u in units]
for tag in soup.find_all():
if tag.name not in ['component','childOf','type']:
row = 0
hint = "".join(tag.findAll(text=True))
#the tag name is the main label
if tag.parent.name not in ['component', 'childOf', 'type']:
parent = tag.parent.name
pt = '.'.join([parent, tag.name])
else:
pt = tag.name
g1['rowNames'].append(pt)
g1['rowNames'].append(pt + 'input' + str(row))
#every tag gets a grid for data input
#there are 4 columns - 2 for labels, 2 for values
#the default is 2 rows - 1 for tag name, 1 for data input
#more rows are added if more than 2 input attributes exist
#create the overall label
g1[pt] = {1:{'widget':'lbl','name':tag.name}}
column = 1
g1[pt + 'input' + str(row)] ={}
for a in tag.attrs:
name = '.'.join([pt,str(a)])
inputValue = tag[a]
#columns aways starts populating at 2
if column <=4:
column+=1
else:
column = 2
row+=1
widget = 'txt'
items = None
#if setting units attribute use a combo box
if a =='unit':
widget = 'combo'
items = units
#if the value is set to true false use a checkbox
if inputValue in ['TRUE','FALSE']:
widget = 'chk'
#first column is the label
g1[pt + 'input' + str(row)][column] = {'widget':'lbl','name':'lbl' + a, 'default':a, 'hint':hint}
column+=1
if items is None:
g1[pt + 'input' + str(row)][column] = {'widget': widget, 'name':name, 'default':inputValue, 'hint':hint}
else:
g1[pt + 'input' + str(row)][column] = {'widget': widget, 'name':name, 'default': inputValue, 'items':items, 'hint':hint}
#make the grid layout from the dictionary
grid = setupGrid(g1)
#add the grid to the parent layout
vlayout.addLayout(grid)
return vlayout
#updates the soup to reflect changes in the form
#None->None
def update(self):
#for every tag in the soup fillSetInfo its value from the form
for tag in self.soup.find_all():
if tag.parent.name not in ['component', 'childOf', 'type']:
parent = tag.parent.name
pt = '.'.join([parent,tag.name])
else:
pt = tag.name
for a in tag.attrs:
widget = self.findChild((QtWidgets.QLineEdit, QtWidgets.QComboBox,QtWidgets.QCheckBox), '.'.join([pt,str(a)]))
if type(widget) == QtWidgets.QLineEdit:
if tag.attrs[a] != widget.text():
self.changes['.'.join([pt, str(a)])]=widget.text()
tag.attrs[a] = widget.text()
elif type(widget) == QtWidgets.QComboBox:
if tag.attrs[a] != widget.currentText():
self.changes['.'.join([pt, str(a)])]=widget.currentText()
tag.attrs[a]= widget.currentText()
elif type(widget) == QtWidgets.QCheckBox:
if (widget.isChecked()) & (tag.attrs[a] != 'TRUE'):
self.changes['.'.join([pt, str(a)])]= 'TRUE'
tag.attrs[a] = 'TRUE'
elif (not widget.isChecked()) & (tag.attrs[a] != 'FALSE'):
self.changes['.'.join([pt, str(a)])]= 'TRUE'
tag.attrs[a]= 'FALSE'
#when the form is closed the soup gets updated and writtent to an xml file
#Event -> None
def closeEvent(self,evnt):
from GBSController.UIToHandler import UIToHandler
print('closing descriptor file')
#fillSetInfo soup
self.update()
#Tell the controller to tell the InputHandler to write the xml
if self.write:
handler = UIToHandler()
handler.writeComponentSoup(self.componentName, self.fileDir, self.soup)
else:
#return a list of changes
print(self.changes)
|
en
| 0.77255
|
#creates a dynamic form based on the information in xml files # initialize and display the form #container widget #layout of container widget #a grid layout object #adding scroll layer #create a layout from the xml that was turned into soup #BeautifulSoup QVBoxLayout -> QVBoxLayout #this uses a generic units table #the tag name is the main label #every tag gets a grid for data input #there are 4 columns - 2 for labels, 2 for values #the default is 2 rows - 1 for tag name, 1 for data input #more rows are added if more than 2 input attributes exist #create the overall label #columns aways starts populating at 2 #if setting units attribute use a combo box #if the value is set to true false use a checkbox #first column is the label #make the grid layout from the dictionary #add the grid to the parent layout #updates the soup to reflect changes in the form #None->None #for every tag in the soup fillSetInfo its value from the form #when the form is closed the soup gets updated and writtent to an xml file #Event -> None #fillSetInfo soup #Tell the controller to tell the InputHandler to write the xml #return a list of changes
| 2.7581
| 3
|
dart/build_rules/web.bzl
|
geaden/rules_dart
| 28
|
6627382
|
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dart rules targeting web clients."""
load(":internal.bzl", "collect_files", "layout_action", "make_dart_context", "package_spec_action")
def dart2js_action(ctx, dart_ctx, script_file,
enable_asserts, csp, dump_info, minify, preserve_uris,
js_output, part_outputs, other_outputs):
"""dart2js compile action."""
# Create a build directory.
build_dir = ctx.label.name + ".build/"
# Emit package spec.
package_spec_path = ctx.label.package + "/" + ctx.label.name + ".packages"
package_spec = ctx.actions.declare_file(build_dir + package_spec_path)
package_spec_action(
ctx=ctx,
dart_ctx=dart_ctx,
output=package_spec,
)
# Build a flattened directory of dart2js inputs, including inputs from the
# src tree, genfiles, and bin.
all_srcs, _ = collect_files(dart_ctx)
build_dir_files = layout_action(
ctx=ctx,
srcs=all_srcs,
output_dir=build_dir,
)
out_script = build_dir_files[script_file.short_path]
# Compute action inputs.
inputs = (
build_dir_files.values() + [package_spec]
)
tools = (
ctx.files._dart2js +
ctx.files._dart2js_support
)
# Compute dart2js args.
dart2js_args = [
"--packages=%s" % package_spec.path,
"--out=%s" % js_output.path,
]
if enable_asserts:
dart2js_args += ["--enable-asserts"]
if csp:
dart2js_args += ["--csp"]
if dump_info:
dart2js_args += ["--dump-info"]
if minify:
dart2js_args += ["--minify"]
if preserve_uris:
dart2js_args += ["--preserve-uris"]
dart2js_args += [out_script.path]
ctx.actions.run(
inputs=inputs,
tools=tools,
executable=ctx.executable._dart2js_helper,
arguments=[
str(ctx.label),
str(ctx.attr.deferred_lib_count),
ctx.outputs.js.path,
ctx.executable._dart2js.path,
] + dart2js_args,
outputs=[js_output] + part_outputs + other_outputs,
progress_message="Compiling with dart2js %s" % ctx,
mnemonic="Dart2jsCompile",
)
def _dart_web_application_impl(ctx):
"""Implements the dart_web_application build rule."""
dart_ctx = make_dart_context(ctx.label,
srcs=ctx.files.srcs,
data=ctx.files.data,
deps=ctx.attr.deps)
# Compute outputs.
js_output = ctx.outputs.js
other_outputs = [
ctx.outputs.deps_file,
ctx.outputs.sourcemap,
]
if ctx.attr.dump_info:
other_outputs += [ctx.outputs.info_json]
part_outputs = []
for i in range(1, ctx.attr.deferred_lib_count + 1):
part_outputs += [getattr(ctx.outputs, "part_js%s" % i)]
other_outputs += [getattr(ctx.outputs, "part_sourcemap%s" % i)]
# Invoke dart2js.
dart2js_action(
ctx=ctx,
dart_ctx=dart_ctx,
script_file=ctx.file.script_file,
enable_asserts=ctx.attr.enable_asserts,
csp=ctx.attr.csp,
dump_info=ctx.attr.dump_info,
minify=ctx.attr.minify,
preserve_uris=ctx.attr.preserve_uris,
js_output=js_output,
part_outputs=part_outputs,
other_outputs=other_outputs,
)
# TODO(cbracken) aggregate, inject licenses
return struct()
def _dart_web_application_outputs(dump_info, deferred_lib_count):
"""Returns the expected output map for dart_web_application."""
outputs = {
"js": "%{name}.js",
"deps_file": "%{name}.js.deps",
"sourcemap": "%{name}.js.map",
}
if dump_info:
outputs["info_json"] = "%{name}.js.info.json"
for i in range(1, deferred_lib_count + 1):
outputs["part_js%s" % i] = "%%{name}.js_%s.part.js" % i
outputs["part_sourcemap%s" % i] = "%%{name}.js_%s.part.js.map" % i
return outputs
dart_web_application = rule(
implementation=_dart_web_application_impl,
attrs={
"script_file": attr.label(allow_single_file=True, mandatory=True),
"srcs": attr.label_list(allow_files=True, mandatory=True),
"data": attr.label_list(allow_files=True),
"deps": attr.label_list(providers=["dart"]),
"deferred_lib_count": attr.int(default=0),
# compiler flags
"enable_asserts": attr.bool(default=False),
"csp": attr.bool(default=False),
"dump_info": attr.bool(default=False),
"minify": attr.bool(default=True),
"preserve_uris": attr.bool(default=False),
# tools
"_dart2js": attr.label(
allow_single_file=True,
executable=True,
cfg="host",
default=Label("//dart/build_rules/ext:dart2js")),
"_dart2js_support": attr.label(
allow_files=True,
default=Label("//dart/build_rules/ext:dart2js_support")),
"_dart2js_helper": attr.label(
allow_single_file=True,
executable=True,
cfg="host",
default=Label("//dart/build_rules/tools:dart2js_helper")),
},
outputs=_dart_web_application_outputs,
)
|
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dart rules targeting web clients."""
load(":internal.bzl", "collect_files", "layout_action", "make_dart_context", "package_spec_action")
def dart2js_action(ctx, dart_ctx, script_file,
enable_asserts, csp, dump_info, minify, preserve_uris,
js_output, part_outputs, other_outputs):
"""dart2js compile action."""
# Create a build directory.
build_dir = ctx.label.name + ".build/"
# Emit package spec.
package_spec_path = ctx.label.package + "/" + ctx.label.name + ".packages"
package_spec = ctx.actions.declare_file(build_dir + package_spec_path)
package_spec_action(
ctx=ctx,
dart_ctx=dart_ctx,
output=package_spec,
)
# Build a flattened directory of dart2js inputs, including inputs from the
# src tree, genfiles, and bin.
all_srcs, _ = collect_files(dart_ctx)
build_dir_files = layout_action(
ctx=ctx,
srcs=all_srcs,
output_dir=build_dir,
)
out_script = build_dir_files[script_file.short_path]
# Compute action inputs.
inputs = (
build_dir_files.values() + [package_spec]
)
tools = (
ctx.files._dart2js +
ctx.files._dart2js_support
)
# Compute dart2js args.
dart2js_args = [
"--packages=%s" % package_spec.path,
"--out=%s" % js_output.path,
]
if enable_asserts:
dart2js_args += ["--enable-asserts"]
if csp:
dart2js_args += ["--csp"]
if dump_info:
dart2js_args += ["--dump-info"]
if minify:
dart2js_args += ["--minify"]
if preserve_uris:
dart2js_args += ["--preserve-uris"]
dart2js_args += [out_script.path]
ctx.actions.run(
inputs=inputs,
tools=tools,
executable=ctx.executable._dart2js_helper,
arguments=[
str(ctx.label),
str(ctx.attr.deferred_lib_count),
ctx.outputs.js.path,
ctx.executable._dart2js.path,
] + dart2js_args,
outputs=[js_output] + part_outputs + other_outputs,
progress_message="Compiling with dart2js %s" % ctx,
mnemonic="Dart2jsCompile",
)
def _dart_web_application_impl(ctx):
"""Implements the dart_web_application build rule."""
dart_ctx = make_dart_context(ctx.label,
srcs=ctx.files.srcs,
data=ctx.files.data,
deps=ctx.attr.deps)
# Compute outputs.
js_output = ctx.outputs.js
other_outputs = [
ctx.outputs.deps_file,
ctx.outputs.sourcemap,
]
if ctx.attr.dump_info:
other_outputs += [ctx.outputs.info_json]
part_outputs = []
for i in range(1, ctx.attr.deferred_lib_count + 1):
part_outputs += [getattr(ctx.outputs, "part_js%s" % i)]
other_outputs += [getattr(ctx.outputs, "part_sourcemap%s" % i)]
# Invoke dart2js.
dart2js_action(
ctx=ctx,
dart_ctx=dart_ctx,
script_file=ctx.file.script_file,
enable_asserts=ctx.attr.enable_asserts,
csp=ctx.attr.csp,
dump_info=ctx.attr.dump_info,
minify=ctx.attr.minify,
preserve_uris=ctx.attr.preserve_uris,
js_output=js_output,
part_outputs=part_outputs,
other_outputs=other_outputs,
)
# TODO(cbracken) aggregate, inject licenses
return struct()
def _dart_web_application_outputs(dump_info, deferred_lib_count):
"""Returns the expected output map for dart_web_application."""
outputs = {
"js": "%{name}.js",
"deps_file": "%{name}.js.deps",
"sourcemap": "%{name}.js.map",
}
if dump_info:
outputs["info_json"] = "%{name}.js.info.json"
for i in range(1, deferred_lib_count + 1):
outputs["part_js%s" % i] = "%%{name}.js_%s.part.js" % i
outputs["part_sourcemap%s" % i] = "%%{name}.js_%s.part.js.map" % i
return outputs
dart_web_application = rule(
implementation=_dart_web_application_impl,
attrs={
"script_file": attr.label(allow_single_file=True, mandatory=True),
"srcs": attr.label_list(allow_files=True, mandatory=True),
"data": attr.label_list(allow_files=True),
"deps": attr.label_list(providers=["dart"]),
"deferred_lib_count": attr.int(default=0),
# compiler flags
"enable_asserts": attr.bool(default=False),
"csp": attr.bool(default=False),
"dump_info": attr.bool(default=False),
"minify": attr.bool(default=True),
"preserve_uris": attr.bool(default=False),
# tools
"_dart2js": attr.label(
allow_single_file=True,
executable=True,
cfg="host",
default=Label("//dart/build_rules/ext:dart2js")),
"_dart2js_support": attr.label(
allow_files=True,
default=Label("//dart/build_rules/ext:dart2js_support")),
"_dart2js_helper": attr.label(
allow_single_file=True,
executable=True,
cfg="host",
default=Label("//dart/build_rules/tools:dart2js_helper")),
},
outputs=_dart_web_application_outputs,
)
|
en
| 0.79405
|
# Copyright 2016 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Dart rules targeting web clients. dart2js compile action. # Create a build directory. # Emit package spec. # Build a flattened directory of dart2js inputs, including inputs from the # src tree, genfiles, and bin. # Compute action inputs. # Compute dart2js args. Implements the dart_web_application build rule. # Compute outputs. # Invoke dart2js. # TODO(cbracken) aggregate, inject licenses Returns the expected output map for dart_web_application. # compiler flags # tools
| 1.829386
| 2
|
homeassistant/components/demo.py
|
hemantsangwan/home-assistant
| 2
|
6627383
|
<filename>homeassistant/components/demo.py
"""
homeassistant.components.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sets up a demo environment that mimics interaction with devices
"""
import time
import homeassistant as ha
import homeassistant.bootstrap as bootstrap
import homeassistant.loader as loader
from homeassistant.const import (
CONF_PLATFORM, ATTR_ENTITY_PICTURE,
CONF_LATITUDE, CONF_LONGITUDE)
DOMAIN = "demo"
DEPENDENCIES = []
COMPONENTS_WITH_DEMO_PLATFORM = [
'switch', 'light', 'thermostat', 'sensor', 'media_player']
def setup(hass, config):
""" Setup a demo environment. """
group = loader.get_component('group')
configurator = loader.get_component('configurator')
config.setdefault(ha.DOMAIN, {})
config.setdefault(DOMAIN, {})
if config[DOMAIN].get('hide_demo_state') != '1':
hass.states.set('a.Demo_Mode', 'Enabled')
# Setup sun
if CONF_LATITUDE not in config[ha.DOMAIN]:
config[ha.DOMAIN][CONF_LATITUDE] = '32.87336'
if CONF_LONGITUDE not in config[ha.DOMAIN]:
config[ha.DOMAIN][CONF_LONGITUDE] = '-117.22743'
loader.get_component('sun').setup(hass, config)
# Setup demo platforms
for component in COMPONENTS_WITH_DEMO_PLATFORM:
bootstrap.setup_component(
hass, component, {component: {CONF_PLATFORM: 'demo'}})
# Setup room groups
lights = hass.states.entity_ids('light')
switches = hass.states.entity_ids('switch')
group.setup_group(hass, 'living room', [lights[0], lights[1], switches[0]])
group.setup_group(hass, 'bedroom', [lights[2], switches[1]])
# Setup device tracker
hass.states.set("device_tracker.Paulus", "home",
{ATTR_ENTITY_PICTURE:
"http://graph.facebook.com/schoutsen/picture"})
hass.states.set("device_tracker.Anne_Therese", "not_home",
{ATTR_ENTITY_PICTURE:
"http://graph.facebook.com/anne.t.frederiksen/picture"})
hass.states.set("group.all_devices", "home",
{
"auto": True,
"entity_id": [
"device_tracker.Paulus",
"device_tracker.Anne_Therese"
]
})
# Setup configurator
configurator_ids = []
def hue_configuration_callback(data):
""" Fake callback, mark config as done. """
time.sleep(2)
# First time it is called, pretend it failed.
if len(configurator_ids) == 1:
configurator.notify_errors(
configurator_ids[0],
"Failed to register, please try again.")
configurator_ids.append(0)
else:
configurator.request_done(configurator_ids[0])
request_id = configurator.request_config(
hass, "Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips Hue "
"with Home Assistant."),
description_image="/static/images/config_philips_hue.jpg",
submit_caption="I have pressed the button"
)
configurator_ids.append(request_id)
return True
|
<filename>homeassistant/components/demo.py
"""
homeassistant.components.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sets up a demo environment that mimics interaction with devices
"""
import time
import homeassistant as ha
import homeassistant.bootstrap as bootstrap
import homeassistant.loader as loader
from homeassistant.const import (
CONF_PLATFORM, ATTR_ENTITY_PICTURE,
CONF_LATITUDE, CONF_LONGITUDE)
DOMAIN = "demo"
DEPENDENCIES = []
COMPONENTS_WITH_DEMO_PLATFORM = [
'switch', 'light', 'thermostat', 'sensor', 'media_player']
def setup(hass, config):
""" Setup a demo environment. """
group = loader.get_component('group')
configurator = loader.get_component('configurator')
config.setdefault(ha.DOMAIN, {})
config.setdefault(DOMAIN, {})
if config[DOMAIN].get('hide_demo_state') != '1':
hass.states.set('a.Demo_Mode', 'Enabled')
# Setup sun
if CONF_LATITUDE not in config[ha.DOMAIN]:
config[ha.DOMAIN][CONF_LATITUDE] = '32.87336'
if CONF_LONGITUDE not in config[ha.DOMAIN]:
config[ha.DOMAIN][CONF_LONGITUDE] = '-117.22743'
loader.get_component('sun').setup(hass, config)
# Setup demo platforms
for component in COMPONENTS_WITH_DEMO_PLATFORM:
bootstrap.setup_component(
hass, component, {component: {CONF_PLATFORM: 'demo'}})
# Setup room groups
lights = hass.states.entity_ids('light')
switches = hass.states.entity_ids('switch')
group.setup_group(hass, 'living room', [lights[0], lights[1], switches[0]])
group.setup_group(hass, 'bedroom', [lights[2], switches[1]])
# Setup device tracker
hass.states.set("device_tracker.Paulus", "home",
{ATTR_ENTITY_PICTURE:
"http://graph.facebook.com/schoutsen/picture"})
hass.states.set("device_tracker.Anne_Therese", "not_home",
{ATTR_ENTITY_PICTURE:
"http://graph.facebook.com/anne.t.frederiksen/picture"})
hass.states.set("group.all_devices", "home",
{
"auto": True,
"entity_id": [
"device_tracker.Paulus",
"device_tracker.Anne_Therese"
]
})
# Setup configurator
configurator_ids = []
def hue_configuration_callback(data):
""" Fake callback, mark config as done. """
time.sleep(2)
# First time it is called, pretend it failed.
if len(configurator_ids) == 1:
configurator.notify_errors(
configurator_ids[0],
"Failed to register, please try again.")
configurator_ids.append(0)
else:
configurator.request_done(configurator_ids[0])
request_id = configurator.request_config(
hass, "Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips Hue "
"with Home Assistant."),
description_image="/static/images/config_philips_hue.jpg",
submit_caption="I have pressed the button"
)
configurator_ids.append(request_id)
return True
|
en
| 0.745809
|
homeassistant.components.demo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sets up a demo environment that mimics interaction with devices Setup a demo environment. # Setup sun # Setup demo platforms # Setup room groups # Setup device tracker # Setup configurator Fake callback, mark config as done. # First time it is called, pretend it failed.
| 2.297429
| 2
|
idlealib/__init__.py
|
znsoooo/IDLE-Advance
| 4
|
6627384
|
# Copyright (c) 2021 Lishixian (znsoooo). All Rights Reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
'''
运行__main__.py获得一个加载所有插件的IDLE-Advance的示例文件。
运行__init__.py获得一个打开自身脚本并加载所有插件的editor的例子。
分别运行idlealib目录下的扩展文件,可以得到一个打开自身的editor或shell的例子。
如果需要停用部分扩展,将对应的脚本移出目录后重启IDLE即可。
'''
import os
import sys
import time
PY36 = sys.version_info > (3, 6)
EXTENSIONS = []
# - Functions ----------------------------------------
def fix_path():
# To fix open shell (call run() without sys.argv) or self.load_extension(name) will not work.
import sys
path = os.path.dirname(__file__)
if path not in sys.path:
sys.path.insert(0, path)
def wrap_function(func, before=(), after=()):
def wrapper(*args, **kwargs):
# print(func.__name__) # for test
[f() for f in before]
ret = func(*args, **kwargs)
[f() for f in after]
return ret
return wrapper
# - idleConf ----------------------------------------
if not PY36:
from functools import partial
from idlelib.configHandler import idleConf
idleConf.GetOption = partial(idleConf.GetOption, warn_on_default=False)
# idleConf.userCfg['extensions'].set('ZzDummy', 'enable', 'true') # fix do not show many warning in py34 and py35
#
# if idleConf.userCfg['extensions'].has_option('ZzDummy', 'enable'):
# ret = idleConf.userCfg['extensions'].Get('ZzDummy', 'enable', type='bool')
# print(ret)
#
# print(idleConf.GetOption('extensions', 'ZzDummy', 'enable', default=True, type='bool'))
# sys.exit(0)
# idleConf._GetOption, idleConf.GetOption = idleConf.GetOption, print
# def GetOption(configType, section, option, *args, **kw):
# if section.lower() == 'ZzDummy'.lower():
# print((configType, section, option))
# return False
# ret = idleConf._GetOption(configType, section, option, *args, **kw)
# return ret
# idleConf.GetOption = GetOption
# - Calltip ----------------------------------------
if PY36:
import idlelib.calltip
from idlelib.calltip import Calltip
else:
import idlelib.CallTips
from idlelib.CallTips import CallTips as Calltip
# idlelib.calltip._MAX_LINES = 999
# idlelib.calltip._MAX_COLS = 999
class MyCalltip(Calltip):
def __init__(self, editwin=None):
super().__init__(editwin)
editwin.ctip = self # make hook
# def fetch_tip(self, expression):
# self.expression = expression
# self.argspec = super().fetch_tip(expression)
# return self.argspec
if PY36:
idlelib.calltip.Calltip = MyCalltip
else:
idlelib.CallTips.CallTips = MyCalltip
# - AutoComplete ----------------------------------------
if sys.version_info < (3, 8):
if PY36:
import idlelib.autocomplete
from idlelib.autocomplete import AutoComplete
else:
import idlelib.AutoComplete
from idlelib.AutoComplete import AutoComplete
import keyword
ATTRS = 1 if sys.version_info < (3, 7) else 0
class MyAutoComplete(AutoComplete):
def fetch_completions(self, what, mode):
ret = super().fetch_completions(what, mode)
if mode == ATTRS and what == '':
for lst in ret[:2]:
lst.extend(v for v in keyword.kwlist if v not in lst) # `None/True/False` are repetitive.
lst.sort()
return ret
if PY36:
idlelib.autocomplete.AutoComplete = MyAutoComplete
else:
idlelib.AutoComplete.AutoComplete = MyAutoComplete
# - IOBinding ----------------------------------------
if PY36:
import idlelib.iomenu
from idlelib.iomenu import IOBinding
else:
import idlelib.IOBinding
from idlelib.IOBinding import IOBinding
class MyIOBinding(IOBinding):
def __init__(self, editwin):
# F5保存时,调用idlelib.runscript.getfilename(),设置自动保存时进入self.editwin.io.save(None)进行保存
self.save = wrap_function(self.save, after=editwin.after_save)
IOBinding.__init__(self, editwin)
def defaultfilename(self, mode="open"):
if not self.filename:
return self.dirname, time.strftime('Untitled_%Y%m%d_%H%M%S.py')
return super().defaultfilename(mode)
if PY36:
idlelib.iomenu.IOBinding = MyIOBinding
else:
idlelib.IOBinding.IOBinding = MyIOBinding
# - EditorWindow ----------------------------------------
# editor.EditorWindow -> pyshell.PyShellEditorWindow
# editor.EditorWindow -> outwin.OutputWindow -> pyshell.PyShell
if PY36:
import idlelib.editor
from idlelib.editor import EditorWindow
else:
import idlelib.EditorWindow
from idlelib.EditorWindow import EditorWindow
class MyEditorWindow(EditorWindow):
def __init__(self, *args):
if ('advance', 'Advance') not in self.menu_specs:
self.menu_specs.append(('advance', 'Advance'))
self.before_copy = []
self.after_save = []
self.after_close = []
# must before text binding, so before `EditorWindow.__init__()`
self.cut = wrap_function(self.cut, before=self.before_copy) # same as `copy`
self.copy = wrap_function(self.copy, before=self.before_copy)
self.close = wrap_function(self.close, before=self.after_close) # "<<close-window>>"事件不命中点击窗口关闭事件
EditorWindow.__init__(self, *args)
self.amenu = self.menudict['advance']
self.make_rmenu() # make "self.rmenu"
self.recent_files_menu['postcommand'] = self.update_recent_files_list # fix list not refresh when open another IDLE.
self.load_adv_extensions()
self.text.bind('<F12>', self.test)
def _close(self):
print('handle with edit _close:', self.io.filename)
super()._close()
# raise # TODO 是否还有别的方法阻止清空剪切
def add_adv_menu(self, label, sub, index='end', sp=False):
menu = self.menudict['advance']
if sp and menu.index('end') is not None:
menu.insert_separator(index)
if callable(sub):
menu.insert_command(index, label=label, command=sub)
else:
menu.insert_cascade(index, label=label, menu=sub)
def load_extension(self, name):
# for PY34 always raise error.
if name == 'ZzDummy':
return
return super().load_extension(name)
def load_adv_extensions(self):
for file in EXTENSIONS:
name, ext = os.path.splitext(os.path.basename(file))
if ext == '.py' and name not in ('__init__', '__main__'):
try:
self.load_extension(name) # TODO 支持任意位置文件导入
except:
print('Failed to import IDLE-Adv extension: %s' % name)
import traceback
traceback.print_exc()
menu = self.menudict['advance']
if menu.type('end') == 'separator':
menu.delete('end')
def test(self, e):
print('editor on test')
print('mark_names:', self.text.mark_names())
print('tag_names:', self.text.tag_names())
print('functions:', ' '.join(v for v in dir(self.text) if 'tag' in v or 'mark' in v))
if PY36:
idlelib.editor.EditorWindow = MyEditorWindow
else:
idlelib.EditorWindow.EditorWindow = MyEditorWindow
# - Main ----------------------------------------
# must after hot patch
if PY36:
from idlelib.pyshell import main
else:
from idlelib.PyShell import main
def run(filename=None, exts=()):
fix_path()
EXTENSIONS.extend(exts)
if not EXTENSIONS:
# `abspath` for open in cmd like `python __init__.py` to open script.
EXTENSIONS.extend(file for file in os.listdir(os.path.dirname(os.path.abspath(__file__))))
if filename:
import sys
sys.argv.append(filename) # Idea from "~\Lib\tkinter\__main__.py"
main()
def test_editor(script_file):
run(script_file, [script_file])
def test_shell(script_file):
run(None, [script_file])
if __name__ == '__main__':
run(__file__)
'''
TODO 参考历史文件的打开方法,用于拖拽打开和恢复打开文件
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
# 打开文件并定位位置(恢复打开)
outwin.py:
self.flist.gotofileline(filename, lineno)
'''
# TODO 当不在instance_dict中将不会刷新(新启动的idle线程)
'''
def update_recent_files_list(self, new_file=None):
...
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
'''
|
# Copyright (c) 2021 Lishixian (znsoooo). All Rights Reserved.
#
# Distributed under MIT license.
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
'''
运行__main__.py获得一个加载所有插件的IDLE-Advance的示例文件。
运行__init__.py获得一个打开自身脚本并加载所有插件的editor的例子。
分别运行idlealib目录下的扩展文件,可以得到一个打开自身的editor或shell的例子。
如果需要停用部分扩展,将对应的脚本移出目录后重启IDLE即可。
'''
import os
import sys
import time
PY36 = sys.version_info > (3, 6)
EXTENSIONS = []
# - Functions ----------------------------------------
def fix_path():
# To fix open shell (call run() without sys.argv) or self.load_extension(name) will not work.
import sys
path = os.path.dirname(__file__)
if path not in sys.path:
sys.path.insert(0, path)
def wrap_function(func, before=(), after=()):
def wrapper(*args, **kwargs):
# print(func.__name__) # for test
[f() for f in before]
ret = func(*args, **kwargs)
[f() for f in after]
return ret
return wrapper
# - idleConf ----------------------------------------
if not PY36:
from functools import partial
from idlelib.configHandler import idleConf
idleConf.GetOption = partial(idleConf.GetOption, warn_on_default=False)
# idleConf.userCfg['extensions'].set('ZzDummy', 'enable', 'true') # fix do not show many warning in py34 and py35
#
# if idleConf.userCfg['extensions'].has_option('ZzDummy', 'enable'):
# ret = idleConf.userCfg['extensions'].Get('ZzDummy', 'enable', type='bool')
# print(ret)
#
# print(idleConf.GetOption('extensions', 'ZzDummy', 'enable', default=True, type='bool'))
# sys.exit(0)
# idleConf._GetOption, idleConf.GetOption = idleConf.GetOption, print
# def GetOption(configType, section, option, *args, **kw):
# if section.lower() == 'ZzDummy'.lower():
# print((configType, section, option))
# return False
# ret = idleConf._GetOption(configType, section, option, *args, **kw)
# return ret
# idleConf.GetOption = GetOption
# - Calltip ----------------------------------------
if PY36:
import idlelib.calltip
from idlelib.calltip import Calltip
else:
import idlelib.CallTips
from idlelib.CallTips import CallTips as Calltip
# idlelib.calltip._MAX_LINES = 999
# idlelib.calltip._MAX_COLS = 999
class MyCalltip(Calltip):
def __init__(self, editwin=None):
super().__init__(editwin)
editwin.ctip = self # make hook
# def fetch_tip(self, expression):
# self.expression = expression
# self.argspec = super().fetch_tip(expression)
# return self.argspec
if PY36:
idlelib.calltip.Calltip = MyCalltip
else:
idlelib.CallTips.CallTips = MyCalltip
# - AutoComplete ----------------------------------------
if sys.version_info < (3, 8):
if PY36:
import idlelib.autocomplete
from idlelib.autocomplete import AutoComplete
else:
import idlelib.AutoComplete
from idlelib.AutoComplete import AutoComplete
import keyword
ATTRS = 1 if sys.version_info < (3, 7) else 0
class MyAutoComplete(AutoComplete):
def fetch_completions(self, what, mode):
ret = super().fetch_completions(what, mode)
if mode == ATTRS and what == '':
for lst in ret[:2]:
lst.extend(v for v in keyword.kwlist if v not in lst) # `None/True/False` are repetitive.
lst.sort()
return ret
if PY36:
idlelib.autocomplete.AutoComplete = MyAutoComplete
else:
idlelib.AutoComplete.AutoComplete = MyAutoComplete
# - IOBinding ----------------------------------------
if PY36:
import idlelib.iomenu
from idlelib.iomenu import IOBinding
else:
import idlelib.IOBinding
from idlelib.IOBinding import IOBinding
class MyIOBinding(IOBinding):
def __init__(self, editwin):
# F5保存时,调用idlelib.runscript.getfilename(),设置自动保存时进入self.editwin.io.save(None)进行保存
self.save = wrap_function(self.save, after=editwin.after_save)
IOBinding.__init__(self, editwin)
def defaultfilename(self, mode="open"):
if not self.filename:
return self.dirname, time.strftime('Untitled_%Y%m%d_%H%M%S.py')
return super().defaultfilename(mode)
if PY36:
idlelib.iomenu.IOBinding = MyIOBinding
else:
idlelib.IOBinding.IOBinding = MyIOBinding
# - EditorWindow ----------------------------------------
# editor.EditorWindow -> pyshell.PyShellEditorWindow
# editor.EditorWindow -> outwin.OutputWindow -> pyshell.PyShell
if PY36:
import idlelib.editor
from idlelib.editor import EditorWindow
else:
import idlelib.EditorWindow
from idlelib.EditorWindow import EditorWindow
class MyEditorWindow(EditorWindow):
def __init__(self, *args):
if ('advance', 'Advance') not in self.menu_specs:
self.menu_specs.append(('advance', 'Advance'))
self.before_copy = []
self.after_save = []
self.after_close = []
# must before text binding, so before `EditorWindow.__init__()`
self.cut = wrap_function(self.cut, before=self.before_copy) # same as `copy`
self.copy = wrap_function(self.copy, before=self.before_copy)
self.close = wrap_function(self.close, before=self.after_close) # "<<close-window>>"事件不命中点击窗口关闭事件
EditorWindow.__init__(self, *args)
self.amenu = self.menudict['advance']
self.make_rmenu() # make "self.rmenu"
self.recent_files_menu['postcommand'] = self.update_recent_files_list # fix list not refresh when open another IDLE.
self.load_adv_extensions()
self.text.bind('<F12>', self.test)
def _close(self):
print('handle with edit _close:', self.io.filename)
super()._close()
# raise # TODO 是否还有别的方法阻止清空剪切
def add_adv_menu(self, label, sub, index='end', sp=False):
menu = self.menudict['advance']
if sp and menu.index('end') is not None:
menu.insert_separator(index)
if callable(sub):
menu.insert_command(index, label=label, command=sub)
else:
menu.insert_cascade(index, label=label, menu=sub)
def load_extension(self, name):
# for PY34 always raise error.
if name == 'ZzDummy':
return
return super().load_extension(name)
def load_adv_extensions(self):
for file in EXTENSIONS:
name, ext = os.path.splitext(os.path.basename(file))
if ext == '.py' and name not in ('__init__', '__main__'):
try:
self.load_extension(name) # TODO 支持任意位置文件导入
except:
print('Failed to import IDLE-Adv extension: %s' % name)
import traceback
traceback.print_exc()
menu = self.menudict['advance']
if menu.type('end') == 'separator':
menu.delete('end')
def test(self, e):
print('editor on test')
print('mark_names:', self.text.mark_names())
print('tag_names:', self.text.tag_names())
print('functions:', ' '.join(v for v in dir(self.text) if 'tag' in v or 'mark' in v))
if PY36:
idlelib.editor.EditorWindow = MyEditorWindow
else:
idlelib.EditorWindow.EditorWindow = MyEditorWindow
# - Main ----------------------------------------
# must after hot patch
if PY36:
from idlelib.pyshell import main
else:
from idlelib.PyShell import main
def run(filename=None, exts=()):
fix_path()
EXTENSIONS.extend(exts)
if not EXTENSIONS:
# `abspath` for open in cmd like `python __init__.py` to open script.
EXTENSIONS.extend(file for file in os.listdir(os.path.dirname(os.path.abspath(__file__))))
if filename:
import sys
sys.argv.append(filename) # Idea from "~\Lib\tkinter\__main__.py"
main()
def test_editor(script_file):
run(script_file, [script_file])
def test_shell(script_file):
run(None, [script_file])
if __name__ == '__main__':
run(__file__)
'''
TODO 参考历史文件的打开方法,用于拖拽打开和恢复打开文件
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
# 打开文件并定位位置(恢复打开)
outwin.py:
self.flist.gotofileline(filename, lineno)
'''
# TODO 当不在instance_dict中将不会刷新(新启动的idle线程)
'''
def update_recent_files_list(self, new_file=None):
...
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
'''
|
en
| 0.26492
|
# Copyright (c) 2021 Lishixian (znsoooo). All Rights Reserved. # # Distributed under MIT license. # See file LICENSE for detail or copy at https://opensource.org/licenses/MIT 运行__main__.py获得一个加载所有插件的IDLE-Advance的示例文件。 运行__init__.py获得一个打开自身脚本并加载所有插件的editor的例子。 分别运行idlealib目录下的扩展文件,可以得到一个打开自身的editor或shell的例子。 如果需要停用部分扩展,将对应的脚本移出目录后重启IDLE即可。 # - Functions ---------------------------------------- # To fix open shell (call run() without sys.argv) or self.load_extension(name) will not work. # print(func.__name__) # for test # - idleConf ---------------------------------------- # idleConf.userCfg['extensions'].set('ZzDummy', 'enable', 'true') # fix do not show many warning in py34 and py35 # # if idleConf.userCfg['extensions'].has_option('ZzDummy', 'enable'): # ret = idleConf.userCfg['extensions'].Get('ZzDummy', 'enable', type='bool') # print(ret) # # print(idleConf.GetOption('extensions', 'ZzDummy', 'enable', default=True, type='bool')) # sys.exit(0) # idleConf._GetOption, idleConf.GetOption = idleConf.GetOption, print # def GetOption(configType, section, option, *args, **kw): # if section.lower() == 'ZzDummy'.lower(): # print((configType, section, option)) # return False # ret = idleConf._GetOption(configType, section, option, *args, **kw) # return ret # idleConf.GetOption = GetOption # - Calltip ---------------------------------------- # idlelib.calltip._MAX_LINES = 999 # idlelib.calltip._MAX_COLS = 999 # make hook # def fetch_tip(self, expression): # self.expression = expression # self.argspec = super().fetch_tip(expression) # return self.argspec # - AutoComplete ---------------------------------------- # `None/True/False` are repetitive. # - IOBinding ---------------------------------------- # F5保存时,调用idlelib.runscript.getfilename(),设置自动保存时进入self.editwin.io.save(None)进行保存 # - EditorWindow ---------------------------------------- # editor.EditorWindow -> pyshell.PyShellEditorWindow # editor.EditorWindow -> outwin.OutputWindow -> pyshell.PyShell # must before text binding, so before `EditorWindow.__init__()` # same as `copy` # "<<close-window>>"事件不命中点击窗口关闭事件 # make "self.rmenu" # fix list not refresh when open another IDLE. # raise # TODO 是否还有别的方法阻止清空剪切 # for PY34 always raise error. # TODO 支持任意位置文件导入 # - Main ---------------------------------------- # must after hot patch # `abspath` for open in cmd like `python __init__.py` to open script. # Idea from "~\Lib\tkinter\__main__.py" TODO 参考历史文件的打开方法,用于拖拽打开和恢复打开文件 def __recent_file_callback(self, file_name): def open_recent_file(fn_closure=file_name): self.io.open(editFile=fn_closure) return open_recent_file # 打开文件并定位位置(恢复打开) outwin.py: self.flist.gotofileline(filename, lineno) # TODO 当不在instance_dict中将不会刷新(新启动的idle线程) def update_recent_files_list(self, new_file=None): ... # for each edit window instance, construct the recent files menu for instance in self.top.instance_dict: menu = instance.recent_files_menu
| 2.171301
| 2
|
Exercicios-Desafios python/Exercicios Resolvidos/ex012.py
|
ThiagoPereira232/python-curso-em-video-mundo01
| 0
|
6627385
|
<reponame>ThiagoPereira232/python-curso-em-video-mundo01
preço = float(input('Qual é o preço do produto? R$'))
novo = preço - (preço * 5 / 100)
print('O produto que custava R${:.2f}, na promoção com desconto de 5% vai custar R${:.2f}'.format(preço, novo))
|
preço = float(input('Qual é o preço do produto? R$'))
novo = preço - (preço * 5 / 100)
print('O produto que custava R${:.2f}, na promoção com desconto de 5% vai custar R${:.2f}'.format(preço, novo))
|
none
| 1
| 3.515155
| 4
|
|
python/bank-account/bank_account.py
|
alessandrodalbello/my-exercism
| 0
|
6627386
|
<reponame>alessandrodalbello/my-exercism<gh_stars>0
from threading import Lock
class BankAccount:
def __init__(self):
self.is_open = False
self.lock = Lock()
def open(self):
with self.lock:
if not self.is_open:
self.balance = 0
self.is_open = True
else:
raise ValueError("The bank account is already open.")
def get_balance(self):
with self.lock:
self.__verify_open()
return self.balance
def deposit(self, amount):
if amount < 0:
raise ValueError("Amount not valid. Deposit not allowed.")
with self.lock:
self.__verify_open()
self.balance += amount
def withdraw(self, amount):
if amount < 0:
raise ValueError("Amount not valid. Withdraw not allowed.")
with self.lock:
self.__verify_open()
if self.balance >= amount:
self.balance -= amount
else:
raise ValueError("Insufficient amount. Withdraw not allowed.")
def close(self):
with self.lock:
self.__verify_open()
self.is_open = False
def __verify_open(self):
if not self.is_open:
raise ValueError("Operation denied. The bank account is not open.")
|
from threading import Lock
class BankAccount:
def __init__(self):
self.is_open = False
self.lock = Lock()
def open(self):
with self.lock:
if not self.is_open:
self.balance = 0
self.is_open = True
else:
raise ValueError("The bank account is already open.")
def get_balance(self):
with self.lock:
self.__verify_open()
return self.balance
def deposit(self, amount):
if amount < 0:
raise ValueError("Amount not valid. Deposit not allowed.")
with self.lock:
self.__verify_open()
self.balance += amount
def withdraw(self, amount):
if amount < 0:
raise ValueError("Amount not valid. Withdraw not allowed.")
with self.lock:
self.__verify_open()
if self.balance >= amount:
self.balance -= amount
else:
raise ValueError("Insufficient amount. Withdraw not allowed.")
def close(self):
with self.lock:
self.__verify_open()
self.is_open = False
def __verify_open(self):
if not self.is_open:
raise ValueError("Operation denied. The bank account is not open.")
|
none
| 1
| 3.737791
| 4
|
|
test/test_origin.py
|
frbor/act-admin
| 0
|
6627387
|
<filename>test/test_origin.py
import pytest
from act.admin.origin import float_or_fatal
def test_float_or_fatal_should_fail() -> None:
default = 0.8
for value in ("X", "x0.8", "0.8x", {"trust": 0.8}):
# https://medium.com/python-pandemonium/testing-sys-exit-with-pytest-10c6e5f7726f
with pytest.raises(SystemExit) as pytest_wrapped_e:
float_or_fatal(value, default)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
def test_float_or_fatal_should_succeed() -> None:
default = 0.8
assert float_or_fatal("0.5", default) == 0.5
assert float_or_fatal("0", default) == 0
assert float_or_fatal("1", default) == 1
assert float_or_fatal(None, default) == default
assert float_or_fatal("", default) == default
|
<filename>test/test_origin.py
import pytest
from act.admin.origin import float_or_fatal
def test_float_or_fatal_should_fail() -> None:
default = 0.8
for value in ("X", "x0.8", "0.8x", {"trust": 0.8}):
# https://medium.com/python-pandemonium/testing-sys-exit-with-pytest-10c6e5f7726f
with pytest.raises(SystemExit) as pytest_wrapped_e:
float_or_fatal(value, default)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
def test_float_or_fatal_should_succeed() -> None:
default = 0.8
assert float_or_fatal("0.5", default) == 0.5
assert float_or_fatal("0", default) == 0
assert float_or_fatal("1", default) == 1
assert float_or_fatal(None, default) == default
assert float_or_fatal("", default) == default
|
en
| 0.699872
|
# https://medium.com/python-pandemonium/testing-sys-exit-with-pytest-10c6e5f7726f
| 2.361146
| 2
|
geocamPycroraptor2/signals.py
|
geocam/geocamPycroraptor2
| 0
|
6627388
|
<reponame>geocam/geocamPycroraptor2<gh_stars>0
# __BEGIN_LICENSE__
# Copyright (C) 2008-2010 United States Government as represented by
# the Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# __END_LICENSE__
import signal
SIG_VERBOSE_CONFIG = [('HUP', 'hangup; e.g. tty was closed, unusual under pyraptord'),
('INT', 'interrupt; e.g. a Ctrl-C'),
('ILL', 'illegal instruction; e.g. corrupted binary'),
('ABRT', 'abort; e.g. failed assertion or uncaught exception'),
('BUS', 'bus error; e.g. array out of bounds'),
('KILL', 'kill; e.g. pyraptord stop second attempt'),
('SEGV', 'segmentation fault; e.g. dereferenced null pointer'),
('PIPE', 'broken pipe; e.g. lost network connection'),
('TERM', 'terminate; e.g. pyraptord stop'),
]
SIG_VERBOSE = {}
for name, verbose in SIG_VERBOSE_CONFIG:
try:
sigNum = getattr(signal, 'SIG' + name)
except AttributeError:
continue # doh, can't look up number for signal name on this platform
SIG_VERBOSE[sigNum] = dict(sigName=name, sigVerbose=verbose)
|
# __BEGIN_LICENSE__
# Copyright (C) 2008-2010 United States Government as represented by
# the Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# __END_LICENSE__
import signal
SIG_VERBOSE_CONFIG = [('HUP', 'hangup; e.g. tty was closed, unusual under pyraptord'),
('INT', 'interrupt; e.g. a Ctrl-C'),
('ILL', 'illegal instruction; e.g. corrupted binary'),
('ABRT', 'abort; e.g. failed assertion or uncaught exception'),
('BUS', 'bus error; e.g. array out of bounds'),
('KILL', 'kill; e.g. pyraptord stop second attempt'),
('SEGV', 'segmentation fault; e.g. dereferenced null pointer'),
('PIPE', 'broken pipe; e.g. lost network connection'),
('TERM', 'terminate; e.g. pyraptord stop'),
]
SIG_VERBOSE = {}
for name, verbose in SIG_VERBOSE_CONFIG:
try:
sigNum = getattr(signal, 'SIG' + name)
except AttributeError:
continue # doh, can't look up number for signal name on this platform
SIG_VERBOSE[sigNum] = dict(sigName=name, sigVerbose=verbose)
|
en
| 0.851766
|
# __BEGIN_LICENSE__ # Copyright (C) 2008-2010 United States Government as represented by # the Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # __END_LICENSE__ # doh, can't look up number for signal name on this platform
| 1.930315
| 2
|
rally/common/db/schema.py
|
CSCfi/rally
| 0
|
6627389
|
<reponame>CSCfi/rally<gh_stars>0
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import alembic
import alembic.config
import alembic.migration
import alembic.script
import sqlalchemy as sa
import sqlalchemy.schema # noqa
from rally.common.db import api
from rally.common.db import models
from rally import exceptions
INITIAL_REVISION_UUID = "ca3626f62937"
def _alembic_config():
path = os.path.join(os.path.dirname(__file__), "alembic.ini")
config = alembic.config.Config(path)
return config
def schema_cleanup():
"""Drop all database objects.
Drops all database objects remaining on the default schema of the given
engine. Per-db implementations will also need to drop items specific to
those systems, such as sequences, custom types (e.g. pg ENUM), etc.
"""
engine = api.get_engine()
with engine.begin() as conn:
inspector = sa.inspect(engine)
metadata = sa.schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk["name"]:
continue
fks.append(
sa.schema.ForeignKeyConstraint((), (), name=fk["name"]))
table = sa.schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
if engine.name != "sqlite":
for fkc in all_fks:
conn.execute(sa.schema.DropConstraint(fkc))
for table in tbs:
conn.execute(sa.schema.DropTable(table))
if engine.name == "postgresql":
sqla_100 = int(sa.__version__.split(".")[0]) >= 1
if sqla_100:
enums = [e["name"] for e in sa.inspect(conn).get_enums()]
else:
enums = conn.dialect._load_enums(conn).keys()
for e in enums:
conn.execute("DROP TYPE %s" % e)
def schema_revision(config=None, engine=None, detailed=False):
"""Current database revision.
:param config: Instance of alembic config
:param engine: Instance of DB engine
:param detailed: whether to return a dict with detailed data
:rtype detailed: bool
:returns: Database revision
:rtype: string
:rtype: dict
"""
engine = engine or api.get_engine()
with engine.connect() as conn:
context = alembic.migration.MigrationContext.configure(conn)
revision = context.get_current_revision()
if detailed:
config = config or _alembic_config()
sc_dir = alembic.script.ScriptDirectory.from_config(config)
return {"revision": revision,
"current_head": sc_dir.get_current_head()}
return revision
def schema_upgrade(revision=None, config=None, engine=None):
"""Used for upgrading database.
:param revision: Desired database version
:type revision: string
:param config: Instance of alembic config
:param engine: Instance of DB engine
"""
revision = revision or "head"
config = config or _alembic_config()
engine = engine or api.get_engine()
if schema_revision(engine=engine) is None:
schema_stamp(INITIAL_REVISION_UUID, config=config)
alembic.command.upgrade(config, revision or "head")
def schema_create(config=None, engine=None):
"""Create database schema from models description.
Can be used for initial installation instead of upgrade('head').
:param config: Instance of alembic config
:param engine: Instance of DB engine
"""
engine = engine or api.get_engine()
# NOTE(viktors): If we will use metadata.create_all() for non empty db
# schema, it will only add the new tables, but leave
# existing as is. So we should avoid of this situation.
if schema_revision(engine=engine) is not None:
raise exceptions.DBMigrationError("DB schema is already under version"
" control. Use upgrade() instead")
models.BASE.metadata.create_all(engine)
schema_stamp("head", config=config)
def schema_stamp(revision, config=None):
"""Stamps database with provided revision.
Don't run any migrations.
:param revision: Should match one from repository or head - to stamp
database with most recent revision
:type revision: string
:param config: Instance of alembic config
"""
config = config or _alembic_config()
return alembic.command.stamp(config, revision=revision)
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import alembic
import alembic.config
import alembic.migration
import alembic.script
import sqlalchemy as sa
import sqlalchemy.schema # noqa
from rally.common.db import api
from rally.common.db import models
from rally import exceptions
INITIAL_REVISION_UUID = "ca3626f62937"
def _alembic_config():
path = os.path.join(os.path.dirname(__file__), "alembic.ini")
config = alembic.config.Config(path)
return config
def schema_cleanup():
"""Drop all database objects.
Drops all database objects remaining on the default schema of the given
engine. Per-db implementations will also need to drop items specific to
those systems, such as sequences, custom types (e.g. pg ENUM), etc.
"""
engine = api.get_engine()
with engine.begin() as conn:
inspector = sa.inspect(engine)
metadata = sa.schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk["name"]:
continue
fks.append(
sa.schema.ForeignKeyConstraint((), (), name=fk["name"]))
table = sa.schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
if engine.name != "sqlite":
for fkc in all_fks:
conn.execute(sa.schema.DropConstraint(fkc))
for table in tbs:
conn.execute(sa.schema.DropTable(table))
if engine.name == "postgresql":
sqla_100 = int(sa.__version__.split(".")[0]) >= 1
if sqla_100:
enums = [e["name"] for e in sa.inspect(conn).get_enums()]
else:
enums = conn.dialect._load_enums(conn).keys()
for e in enums:
conn.execute("DROP TYPE %s" % e)
def schema_revision(config=None, engine=None, detailed=False):
"""Current database revision.
:param config: Instance of alembic config
:param engine: Instance of DB engine
:param detailed: whether to return a dict with detailed data
:rtype detailed: bool
:returns: Database revision
:rtype: string
:rtype: dict
"""
engine = engine or api.get_engine()
with engine.connect() as conn:
context = alembic.migration.MigrationContext.configure(conn)
revision = context.get_current_revision()
if detailed:
config = config or _alembic_config()
sc_dir = alembic.script.ScriptDirectory.from_config(config)
return {"revision": revision,
"current_head": sc_dir.get_current_head()}
return revision
def schema_upgrade(revision=None, config=None, engine=None):
"""Used for upgrading database.
:param revision: Desired database version
:type revision: string
:param config: Instance of alembic config
:param engine: Instance of DB engine
"""
revision = revision or "head"
config = config or _alembic_config()
engine = engine or api.get_engine()
if schema_revision(engine=engine) is None:
schema_stamp(INITIAL_REVISION_UUID, config=config)
alembic.command.upgrade(config, revision or "head")
def schema_create(config=None, engine=None):
"""Create database schema from models description.
Can be used for initial installation instead of upgrade('head').
:param config: Instance of alembic config
:param engine: Instance of DB engine
"""
engine = engine or api.get_engine()
# NOTE(viktors): If we will use metadata.create_all() for non empty db
# schema, it will only add the new tables, but leave
# existing as is. So we should avoid of this situation.
if schema_revision(engine=engine) is not None:
raise exceptions.DBMigrationError("DB schema is already under version"
" control. Use upgrade() instead")
models.BASE.metadata.create_all(engine)
schema_stamp("head", config=config)
def schema_stamp(revision, config=None):
"""Stamps database with provided revision.
Don't run any migrations.
:param revision: Should match one from repository or head - to stamp
database with most recent revision
:type revision: string
:param config: Instance of alembic config
"""
config = config or _alembic_config()
return alembic.command.stamp(config, revision=revision)
|
en
| 0.765257
|
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # noqa Drop all database objects. Drops all database objects remaining on the default schema of the given engine. Per-db implementations will also need to drop items specific to those systems, such as sequences, custom types (e.g. pg ENUM), etc. Current database revision. :param config: Instance of alembic config :param engine: Instance of DB engine :param detailed: whether to return a dict with detailed data :rtype detailed: bool :returns: Database revision :rtype: string :rtype: dict Used for upgrading database. :param revision: Desired database version :type revision: string :param config: Instance of alembic config :param engine: Instance of DB engine Create database schema from models description. Can be used for initial installation instead of upgrade('head'). :param config: Instance of alembic config :param engine: Instance of DB engine # NOTE(viktors): If we will use metadata.create_all() for non empty db # schema, it will only add the new tables, but leave # existing as is. So we should avoid of this situation. Stamps database with provided revision. Don't run any migrations. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string :param config: Instance of alembic config
| 1.938086
| 2
|
test/tokenizer_test/tokenize_corpus/local_mode.py
|
AnonymousAuthor2013/KnowAlpha
| 2
|
6627390
|
<reponame>AnonymousAuthor2013/KnowAlpha<gh_stars>1-10
from programmingalpha.tokenizers import get_tokenizer
import programmingalpha
import argparse
import os
import multiprocessing
import tqdm
from programmingalpha.Utility import getLogger
logger=getLogger(__name__)
path_map_tokenizers={
"bert":programmingalpha.BertBaseUnCased,
"gpt2": programmingalpha.GPT2Base,
"xlnet": programmingalpha.XLNetBaseCased,
"roberta":programmingalpha.RoBertaBase
}
def init():
global tokenizer
name= args.tokenizer
tokenizer=get_tokenizer(path_map_tokenizers[name], name)
def tokenize(text):
tokenized_text=tokenizer.tokenizeLine(text, add_sp=False)
return tokenized_text
def tokenizeParallel(doc_data):
cache=[]
batch_size=args.batch_size
batches=[doc_data[i:i+batch_size] for i in range(0,len(doc_data),batch_size)]
workers=multiprocessing.Pool(args.workers, initializer=init)
with open(outputfile,"w") as f:
for batch_doc in tqdm.tqdm(batches,desc="tokenizing documents multi-progress"):
for record in workers.map(tokenize,batch_doc):
if record is not None:
cache.append(record+"\n")
f.writelines(cache)
cache.clear()
workers.close()
workers.join()
if __name__ == '__main__':
parser=argparse.ArgumentParser()
parser.add_argument("--tokenizer",type=str,default="bert", choices=["bert", "roberta", "gpt2","xlnet"])
parser.add_argument("--file",type=str,default="")
parser.add_argument("--workers",type=int,default=30)
parser.add_argument("--batch_size",type=int,default=1000)
args=parser.parse_args()
if args.tokenizer not in path_map_tokenizers:
args.tokenizer="bert"
inputfile=args.file
outputfile=inputfile.replace(".txt",".tokenized-"+args.tokenizer)
with open(inputfile, "r", encoding="utf-8") as f:
docs=f.readlines()
doc_data=filter(lambda s: s and s.strip(), docs)
doc_data=list(doc_data)
logger.info("loaded {}/{} lines of text".format(len(doc_data), len(docs)))
tokenizeParallel(doc_data)
'''s="You can use [NUM] and [CODE] to finish your work."
init()
print(tokenizer.tokenizer.vocab_size, len(tokenizer.tokenizer))
print(tokenizer.tokenizer.tokenize(s))
s_t=tokenize(s)
print(s)
print(s_t)'''
|
from programmingalpha.tokenizers import get_tokenizer
import programmingalpha
import argparse
import os
import multiprocessing
import tqdm
from programmingalpha.Utility import getLogger
logger=getLogger(__name__)
path_map_tokenizers={
"bert":programmingalpha.BertBaseUnCased,
"gpt2": programmingalpha.GPT2Base,
"xlnet": programmingalpha.XLNetBaseCased,
"roberta":programmingalpha.RoBertaBase
}
def init():
global tokenizer
name= args.tokenizer
tokenizer=get_tokenizer(path_map_tokenizers[name], name)
def tokenize(text):
tokenized_text=tokenizer.tokenizeLine(text, add_sp=False)
return tokenized_text
def tokenizeParallel(doc_data):
cache=[]
batch_size=args.batch_size
batches=[doc_data[i:i+batch_size] for i in range(0,len(doc_data),batch_size)]
workers=multiprocessing.Pool(args.workers, initializer=init)
with open(outputfile,"w") as f:
for batch_doc in tqdm.tqdm(batches,desc="tokenizing documents multi-progress"):
for record in workers.map(tokenize,batch_doc):
if record is not None:
cache.append(record+"\n")
f.writelines(cache)
cache.clear()
workers.close()
workers.join()
if __name__ == '__main__':
parser=argparse.ArgumentParser()
parser.add_argument("--tokenizer",type=str,default="bert", choices=["bert", "roberta", "gpt2","xlnet"])
parser.add_argument("--file",type=str,default="")
parser.add_argument("--workers",type=int,default=30)
parser.add_argument("--batch_size",type=int,default=1000)
args=parser.parse_args()
if args.tokenizer not in path_map_tokenizers:
args.tokenizer="bert"
inputfile=args.file
outputfile=inputfile.replace(".txt",".tokenized-"+args.tokenizer)
with open(inputfile, "r", encoding="utf-8") as f:
docs=f.readlines()
doc_data=filter(lambda s: s and s.strip(), docs)
doc_data=list(doc_data)
logger.info("loaded {}/{} lines of text".format(len(doc_data), len(docs)))
tokenizeParallel(doc_data)
'''s="You can use [NUM] and [CODE] to finish your work."
init()
print(tokenizer.tokenizer.vocab_size, len(tokenizer.tokenizer))
print(tokenizer.tokenizer.tokenize(s))
s_t=tokenize(s)
print(s)
print(s_t)'''
|
en
| 0.314521
|
s="You can use [NUM] and [CODE] to finish your work." init() print(tokenizer.tokenizer.vocab_size, len(tokenizer.tokenizer)) print(tokenizer.tokenizer.tokenize(s)) s_t=tokenize(s) print(s) print(s_t)
| 2.530933
| 3
|
tests/test_service.py
|
IMULMUL/PythonForWindows
| 479
|
6627391
|
import pytest
import windows
import windows.generated_def as gdef
def test_services_process():
services_with_process = [s for s in windows.system.services if s.status.dwProcessId]
service = services_with_process[0]
proc = service.process
assert proc.pid == service.status.dwProcessId
def test_service_appinfo():
appinfo = windows.system.services[b"Appinfo"]
assert appinfo.status.type & gdef.SERVICE_WIN32_OWN_PROCESS
# Check other fields
assert appinfo.name == b"Appinfo"
assert appinfo.description == b"Application Information"
def test_service_start():
faxservice = windows.system.services[b"Fax"]
# Just start a random serivce with a string
# Used to check string compat in py2/py3
faxservice.start(b"TEST STRING")
|
import pytest
import windows
import windows.generated_def as gdef
def test_services_process():
services_with_process = [s for s in windows.system.services if s.status.dwProcessId]
service = services_with_process[0]
proc = service.process
assert proc.pid == service.status.dwProcessId
def test_service_appinfo():
appinfo = windows.system.services[b"Appinfo"]
assert appinfo.status.type & gdef.SERVICE_WIN32_OWN_PROCESS
# Check other fields
assert appinfo.name == b"Appinfo"
assert appinfo.description == b"Application Information"
def test_service_start():
faxservice = windows.system.services[b"Fax"]
# Just start a random serivce with a string
# Used to check string compat in py2/py3
faxservice.start(b"TEST STRING")
|
en
| 0.459673
|
# Check other fields # Just start a random serivce with a string # Used to check string compat in py2/py3
| 2.278495
| 2
|
JHarm/Parse/Fail2Ban.py
|
lukasbalazik123/jharm
| 1
|
6627392
|
<reponame>lukasbalazik123/jharm
import AnyToJson
import re
import os
class Fail2Ban(AnyToJson.AnyToJson):
def __init__(self):
super().__init__()
self.i = 0
self.regs = {}
self.prepared_regex = {}
self.files_readed = []
self.source = self.conf.get("detection", "source")
self.prepared_regex["HOST"] = self.conf.get("config", "host_regex")
self.prepared_regex["__prefix_line"] = self.conf.get("config", "prefix_regex")
for fl in os.listdir(self.conf.get("config", "fail2ban_dir")+"/filter.d/"):
if fl.endswith(".conf"):
self.readfile(fl)
self.files_readed = map(lambda each:each.strip(".conf"), self.files_readed)
for key, value in self.prepared_regex.items():
if "failregex" in key:
self.regs[key] = re.compile(value)
def readfile(self, fl):
with open("/etc/fail2ban/filter.d/"+fl, errors='replace') as f:
lines = f.readlines()
goto = ""
for line in lines:
if line.startswith("before = "):
goto = (line.split("before = ")[1]).rstrip()
if goto and goto not in self.files_readed:
self.readfile(goto)
for line in lines:
self.get_regex(line)
if fl not in self.files_readed:
self.files_readed.append(fl)
def get_regex(self, line):
if line.startswith("#") or line.startswith("[") or line.startswith("\n") or line.startswith("ignoreregex"):
return None
line = line.rstrip()
match = re.findall("%\(([^\)]*)\)s", line)
for name in match:
if "_" in name or "?P<"+name+">":
line = re.sub("%\("+name+"\)s", self.prepared_regex[name], line)
continue
line = re.sub("%\("+name+"\)s", "(?P<"+name+">"+self.prepared_regex[name]+")", line)
line = re.sub("<HOST>", "(?P<HOST>"+self.prepared_regex["HOST"]+")", line) # replace special fail2ban variable
line = re.sub("\?P\(", "(", line) # fix broken regex
data = line.split("=", 1)
if len(data) == 2:
if "__prefix_line" != data[0].rstrip():
self.prepared_regex[data[0].rstrip()] = data[1].lstrip()
self.rkey = data[0].rstrip()
else:
self.prepared_regex[self.rkey+str(self.i)] = data[0].lstrip()
self.i += 1
|
import AnyToJson
import re
import os
class Fail2Ban(AnyToJson.AnyToJson):
def __init__(self):
super().__init__()
self.i = 0
self.regs = {}
self.prepared_regex = {}
self.files_readed = []
self.source = self.conf.get("detection", "source")
self.prepared_regex["HOST"] = self.conf.get("config", "host_regex")
self.prepared_regex["__prefix_line"] = self.conf.get("config", "prefix_regex")
for fl in os.listdir(self.conf.get("config", "fail2ban_dir")+"/filter.d/"):
if fl.endswith(".conf"):
self.readfile(fl)
self.files_readed = map(lambda each:each.strip(".conf"), self.files_readed)
for key, value in self.prepared_regex.items():
if "failregex" in key:
self.regs[key] = re.compile(value)
def readfile(self, fl):
with open("/etc/fail2ban/filter.d/"+fl, errors='replace') as f:
lines = f.readlines()
goto = ""
for line in lines:
if line.startswith("before = "):
goto = (line.split("before = ")[1]).rstrip()
if goto and goto not in self.files_readed:
self.readfile(goto)
for line in lines:
self.get_regex(line)
if fl not in self.files_readed:
self.files_readed.append(fl)
def get_regex(self, line):
if line.startswith("#") or line.startswith("[") or line.startswith("\n") or line.startswith("ignoreregex"):
return None
line = line.rstrip()
match = re.findall("%\(([^\)]*)\)s", line)
for name in match:
if "_" in name or "?P<"+name+">":
line = re.sub("%\("+name+"\)s", self.prepared_regex[name], line)
continue
line = re.sub("%\("+name+"\)s", "(?P<"+name+">"+self.prepared_regex[name]+")", line)
line = re.sub("<HOST>", "(?P<HOST>"+self.prepared_regex["HOST"]+")", line) # replace special fail2ban variable
line = re.sub("\?P\(", "(", line) # fix broken regex
data = line.split("=", 1)
if len(data) == 2:
if "__prefix_line" != data[0].rstrip():
self.prepared_regex[data[0].rstrip()] = data[1].lstrip()
self.rkey = data[0].rstrip()
else:
self.prepared_regex[self.rkey+str(self.i)] = data[0].lstrip()
self.i += 1
|
en
| 0.385269
|
# replace special fail2ban variable # fix broken regex
| 2.750099
| 3
|
Beginning/python3_cookbook/ch02/16-textwrap.py
|
XiangyuDing/Beginning-Python
| 1
|
6627393
|
<reponame>XiangyuDing/Beginning-Python<filename>Beginning/python3_cookbook/ch02/16-textwrap.py
# 2.16 以指定列宽格式化字符串
s = "Look into my eyes, look into my eyes, the eyes, the eyes, \
the eyes, not around the eyes, don't look around the eyes, \
look into my eyes, you're under."
import textwrap
print(textwrap.fill(s,70))
print(textwrap.fill(s,40))
print(textwrap.fill(s,40,initial_indent=' '))
print(textwrap.fill(s,40,subsequent_indent=' '))
import os
# print(os.get_terminal_size().columns) doens't work
|
# 2.16 以指定列宽格式化字符串
s = "Look into my eyes, look into my eyes, the eyes, the eyes, \
the eyes, not around the eyes, don't look around the eyes, \
look into my eyes, you're under."
import textwrap
print(textwrap.fill(s,70))
print(textwrap.fill(s,40))
print(textwrap.fill(s,40,initial_indent=' '))
print(textwrap.fill(s,40,subsequent_indent=' '))
import os
# print(os.get_terminal_size().columns) doens't work
|
en
| 0.111133
|
# 2.16 以指定列宽格式化字符串 # print(os.get_terminal_size().columns) doens't work
| 3.652016
| 4
|
garageofcode/pizza/main.py
|
tpi12jwe/garageofcode
| 2
|
6627394
|
<reponame>tpi12jwe/garageofcode<gh_stars>1-10
import numpy as np
import time
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from garageofcode.common.utils import flatten_simple as flatten
from garageofcode.common.utils import get_fn
from garageofcode.common.interval_utils import get_corners2, interval_overlap2
from garageofcode.sat.assignment import interval_selection, interval_selection2
def read_infile(fn):
mat = []
char2int = {"M": 0, "T": 1}
with open(fn, "r") as f:
lines = f.read().split("\n")
header = lines[0]
lines = lines[1:]
global R
global C
global L
global H
R, C, L, H = map(int, header.split(" "))
for line in lines:
mat.append([char2int[ch] for ch in line if line])
return mat
def draw_border(ax, c, **kwargs):
corners = get_corners2(c)
corners.append(corners[0])
x, y = zip(*corners)
ax.plot(x, y, **kwargs)
def draw_solution(mat, coords):
fig, ax = plt.subplots()
adj = 0.1
N, M = mat.shape
for i in range(N):
for j in range(M):
val = mat[i, j]
if val:
alpha = 0.1
else:
alpha = 0.9
patch = Rectangle((i+adj, j+adj), 1-2*adj, 1-2*adj,
alpha=alpha,
facecolor="k")
ax.add_patch(patch)
draw_border(ax, (0, N, 0, M), color='k', linewidth=3)
for c0 in coords:
for c1 in coords:
if c0 >= c1:
continue
if interval_overlap2(c0, c1):
print("Overlapping: {}, {}".format(c0, c1))
for c in coords:
ix, jx, iy, jy = c
wx = jx - ix
wy = jy - iy
patch = Rectangle((ix+adj, iy+adj), wx-2*adj, wy-2*adj,
alpha=0.5)
ax.add_patch(patch)
draw_border(ax, c, linewidth=1)
plt.title("Black: M, White: T, Blue: slice \nL={}, H={}".format(L, H))
plt.axis("off")
plt.plot()
plt.show()
def feasible(c):
try:
c = flatten(c)
except Exception:
pass
if len(c) > H:
return False
if sum(c) < L:
return False
if len(c) - sum(c) < L:
return False
return True
def feasible_in_row(row):
return [feasible(row[i:i+H]) for i in range(len(row))]
def get_score(coords):
return sum([(jx-ix) * (jy-iy) for (ix, jx, iy, jy) in coords])
def maximize_mat(mat):
if not list(mat):
return 0
t0 = time.time()
coords = interval_selection2(mat, feasible)
t1 = time.time()
#print("Mat time: {0:.3f}".format(t1 - t0))
score = get_score(coords)
#print("Mat score:", score)
#print(coords)
#draw_solution(mat, coords)
return score
def maximize_row(row):
#print(row, ":")
t0 = time.time()
score = interval_selection(row, feasible, max_len=14)
t1 = time.time()
#print("Row time: {0:.3f}".format(t1 - t0))
#score = sum([j - i + 1 for i, j in coords], 0)
#for i, j in coords:
# print("\t", row[i:j+1])
#print("Row score:", sum([j - i + 1 for i, j in coords], 0))
return 0
#print()
def main():
fn = "/home/jdw/garageofcode/data/pizza/medium.in"
mat = read_infile(fn)
mat = np.array(mat)
res = 12
N, M = mat.shape
score = 0.0
num_iters = 0
num_elems = 0
missed = 0
for i in range(0, N, res):
for j in range(0, M, res):
submat = mat[i:i+res, j:j+res]
subnum_elems = np.size(submat)
subscore = maximize_mat(submat)
submissed = subnum_elems - subscore
num_elems += subnum_elems
score += subscore
missed += submissed
num_iters += 1
#print("Average: {0:.3f}%".format(score / num_iters / num_elems * 100))
print("Completed: {0:.2f}%".format(num_elems / (N*M) * 100))
print("Missed: {}".format(missed))
print("Total score:", score)
#[print(row) for row in mat]
#print()
#for row in mat:
# print(feasible_in_row(row))
'''
score = 0
for row in mat[:10]:
score += maximize_row(row)
print("Total score:", score)
'''
if __name__ == '__main__':
main()
|
import numpy as np
import time
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from garageofcode.common.utils import flatten_simple as flatten
from garageofcode.common.utils import get_fn
from garageofcode.common.interval_utils import get_corners2, interval_overlap2
from garageofcode.sat.assignment import interval_selection, interval_selection2
def read_infile(fn):
mat = []
char2int = {"M": 0, "T": 1}
with open(fn, "r") as f:
lines = f.read().split("\n")
header = lines[0]
lines = lines[1:]
global R
global C
global L
global H
R, C, L, H = map(int, header.split(" "))
for line in lines:
mat.append([char2int[ch] for ch in line if line])
return mat
def draw_border(ax, c, **kwargs):
corners = get_corners2(c)
corners.append(corners[0])
x, y = zip(*corners)
ax.plot(x, y, **kwargs)
def draw_solution(mat, coords):
fig, ax = plt.subplots()
adj = 0.1
N, M = mat.shape
for i in range(N):
for j in range(M):
val = mat[i, j]
if val:
alpha = 0.1
else:
alpha = 0.9
patch = Rectangle((i+adj, j+adj), 1-2*adj, 1-2*adj,
alpha=alpha,
facecolor="k")
ax.add_patch(patch)
draw_border(ax, (0, N, 0, M), color='k', linewidth=3)
for c0 in coords:
for c1 in coords:
if c0 >= c1:
continue
if interval_overlap2(c0, c1):
print("Overlapping: {}, {}".format(c0, c1))
for c in coords:
ix, jx, iy, jy = c
wx = jx - ix
wy = jy - iy
patch = Rectangle((ix+adj, iy+adj), wx-2*adj, wy-2*adj,
alpha=0.5)
ax.add_patch(patch)
draw_border(ax, c, linewidth=1)
plt.title("Black: M, White: T, Blue: slice \nL={}, H={}".format(L, H))
plt.axis("off")
plt.plot()
plt.show()
def feasible(c):
try:
c = flatten(c)
except Exception:
pass
if len(c) > H:
return False
if sum(c) < L:
return False
if len(c) - sum(c) < L:
return False
return True
def feasible_in_row(row):
return [feasible(row[i:i+H]) for i in range(len(row))]
def get_score(coords):
return sum([(jx-ix) * (jy-iy) for (ix, jx, iy, jy) in coords])
def maximize_mat(mat):
if not list(mat):
return 0
t0 = time.time()
coords = interval_selection2(mat, feasible)
t1 = time.time()
#print("Mat time: {0:.3f}".format(t1 - t0))
score = get_score(coords)
#print("Mat score:", score)
#print(coords)
#draw_solution(mat, coords)
return score
def maximize_row(row):
#print(row, ":")
t0 = time.time()
score = interval_selection(row, feasible, max_len=14)
t1 = time.time()
#print("Row time: {0:.3f}".format(t1 - t0))
#score = sum([j - i + 1 for i, j in coords], 0)
#for i, j in coords:
# print("\t", row[i:j+1])
#print("Row score:", sum([j - i + 1 for i, j in coords], 0))
return 0
#print()
def main():
fn = "/home/jdw/garageofcode/data/pizza/medium.in"
mat = read_infile(fn)
mat = np.array(mat)
res = 12
N, M = mat.shape
score = 0.0
num_iters = 0
num_elems = 0
missed = 0
for i in range(0, N, res):
for j in range(0, M, res):
submat = mat[i:i+res, j:j+res]
subnum_elems = np.size(submat)
subscore = maximize_mat(submat)
submissed = subnum_elems - subscore
num_elems += subnum_elems
score += subscore
missed += submissed
num_iters += 1
#print("Average: {0:.3f}%".format(score / num_iters / num_elems * 100))
print("Completed: {0:.2f}%".format(num_elems / (N*M) * 100))
print("Missed: {}".format(missed))
print("Total score:", score)
#[print(row) for row in mat]
#print()
#for row in mat:
# print(feasible_in_row(row))
'''
score = 0
for row in mat[:10]:
score += maximize_row(row)
print("Total score:", score)
'''
if __name__ == '__main__':
main()
|
en
| 0.347802
|
#print("Mat time: {0:.3f}".format(t1 - t0)) #print("Mat score:", score) #print(coords) #draw_solution(mat, coords) #print(row, ":") #print("Row time: {0:.3f}".format(t1 - t0)) #score = sum([j - i + 1 for i, j in coords], 0) #for i, j in coords: # print("\t", row[i:j+1]) #print("Row score:", sum([j - i + 1 for i, j in coords], 0)) #print() #print("Average: {0:.3f}%".format(score / num_iters / num_elems * 100)) #[print(row) for row in mat] #print() #for row in mat: # print(feasible_in_row(row)) score = 0 for row in mat[:10]: score += maximize_row(row) print("Total score:", score)
| 2.846455
| 3
|
apps/taiga/back/django/gunicorn_config.py
|
stephenhillier/openshift-components
| 9
|
6627395
|
worker_class = 'gevent'
|
worker_class = 'gevent'
|
none
| 1
| 1.057154
| 1
|
|
liikkuja.py
|
UrsaOK/supertassu
| 0
|
6627396
|
from merkki import Sprite
class Liikkuja(Sprite):
def __init__(self, x, y, merkki, taso):
print("liikkuja init")
super(Liikkuja, self).__init__(x, y, merkki)
self.taso = taso
def liiku(self, suunta):
print("liikkuja liiku")
uusix = self.x + suunta[0]
uusiy = self.y + suunta[1]
if self.taso.kartta[uusix][uusiy].tyhja:
self.x = uusix
self.y = uusiy
return True
else:
return False
|
from merkki import Sprite
class Liikkuja(Sprite):
def __init__(self, x, y, merkki, taso):
print("liikkuja init")
super(Liikkuja, self).__init__(x, y, merkki)
self.taso = taso
def liiku(self, suunta):
print("liikkuja liiku")
uusix = self.x + suunta[0]
uusiy = self.y + suunta[1]
if self.taso.kartta[uusix][uusiy].tyhja:
self.x = uusix
self.y = uusiy
return True
else:
return False
|
none
| 1
| 2.873711
| 3
|
|
octodns/source/tinydns.py
|
Smallcubed/octodns
| 3
|
6627397
|
<gh_stars>1-10
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from ipaddress import ip_address
from os import listdir
from os.path import join
import logging
import re
import textwrap
from ..record import Record
from ..zone import DuplicateRecordException, SubzoneRecordException
from .base import BaseSource
class TinyDnsBaseSource(BaseSource):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'CNAME', 'MX', 'NS', 'TXT', 'AAAA'))
split_re = re.compile(r':+')
def __init__(self, id, default_ttl=3600):
super(TinyDnsBaseSource, self).__init__(id)
self.default_ttl = default_ttl
def _data_for_A(self, _type, records):
values = []
for record in records:
if record[0] != '0.0.0.0':
values.append(record[0])
if len(values) == 0:
return
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': values,
}
def _data_for_AAAA(self, _type, records):
values = []
for record in records:
# TinyDNS files have the ipv6 address written in full, but with the
# colons removed. This inserts a colon every 4th character to make
# the address correct.
values.append(u":".join(textwrap.wrap(record[0], 4)))
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': values,
}
def _data_for_TXT(self, _type, records):
values = []
for record in records:
new_value = record[0].encode('latin1').decode('unicode-escape') \
.replace(";", "\\;")
values.append(new_value)
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': values,
}
def _data_for_CNAME(self, _type, records):
first = records[0]
try:
ttl = first[1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'value': '{}.'.format(first[0])
}
def _data_for_MX(self, _type, records):
try:
ttl = records[0][2]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': [{
'preference': r[1],
'exchange': '{}.'.format(r[0])
} for r in records]
}
def _data_for_NS(self, _type, records):
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': ['{}.'.format(r[0]) for r in records]
}
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
before = len(zone.records)
if zone.name.endswith('in-addr.arpa.'):
self._populate_in_addr_arpa(zone, lenient)
else:
self._populate_normal(zone, lenient)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _populate_normal(self, zone, lenient):
type_map = {
'=': 'A',
'^': None,
'.': 'NS',
'C': 'CNAME',
'+': 'A',
'@': 'MX',
'\'': 'TXT',
'3': 'AAAA',
'6': 'AAAA',
}
name_re = re.compile(r'((?P<name>.+)\.)?{}$'.format(zone.name[:-1]))
data = defaultdict(lambda: defaultdict(list))
for line in self._lines():
_type = line[0]
if _type not in type_map:
# Something we don't care about
continue
_type = type_map[_type]
if not _type:
continue
# Skip type, remove trailing comments, and omit newline
line = line[1:].split('#', 1)[0]
# Split on :'s including :: and strip leading/trailing ws
line = [p.strip() for p in self.split_re.split(line)]
match = name_re.match(line[0])
if not match:
continue
name = zone.hostname_from_fqdn(line[0])
data[name][_type].append(line[1:])
for name, types in data.items():
for _type, d in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
data = data_for(_type, d)
if data:
record = Record.new(zone, name, data, source=self,
lenient=lenient)
try:
zone.add_record(record, lenient=lenient)
except SubzoneRecordException:
self.log.debug('_populate_normal: skipping subzone '
'record=%s', record)
def _populate_in_addr_arpa(self, zone, lenient):
name_re = re.compile(r'(?P<name>.+)\.{}$'.format(zone.name[:-1]))
for line in self._lines():
_type = line[0]
# We're only interested in = (A+PTR), and ^ (PTR) records
if _type not in ('=', '^'):
continue
# Skip type, remove trailing comments, and omit newline
line = line[1:].split('#', 1)[0]
# Split on :'s including :: and strip leading/trailing ws
line = [p.strip() for p in self.split_re.split(line)]
if line[0].endswith('in-addr.arpa'):
# since it's already in in-addr.arpa format
match = name_re.match(line[0])
value = '{}.'.format(line[1])
else:
addr = ip_address(line[1])
match = name_re.match(addr.reverse_pointer)
value = '{}.'.format(line[0])
if match:
try:
ttl = line[2]
except IndexError:
ttl = self.default_ttl
name = match.group('name')
record = Record.new(zone, name, {
'ttl': ttl,
'type': 'PTR',
'value': value
}, source=self, lenient=lenient)
try:
zone.add_record(record, lenient=lenient)
except DuplicateRecordException:
self.log.warn('Duplicate PTR record for {}, '
'skipping'.format(addr))
class TinyDnsFileSource(TinyDnsBaseSource):
'''
A basic TinyDNS zonefile importer created to import legacy data.
tinydns:
class: octodns.source.tinydns.TinyDnsFileSource
# The location of the TinyDNS zone files
directory: ./zones
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
NOTE: timestamps & lo fields are ignored if present.
'''
def __init__(self, id, directory, default_ttl=3600):
self.log = logging.getLogger('TinyDnsFileSource[{}]'.format(id))
self.log.debug('__init__: id=%s, directory=%s, default_ttl=%d', id,
directory, default_ttl)
super(TinyDnsFileSource, self).__init__(id, default_ttl)
self.directory = directory
self._cache = None
def _lines(self):
if self._cache is None:
# We unfortunately don't know where to look since tinydns stuff can
# be defined anywhere so we'll just read all files
lines = []
for filename in listdir(self.directory):
if filename[0] == '.':
# Ignore hidden files
continue
with open(join(self.directory, filename), 'r') as fh:
lines += [l for l in fh.read().split('\n') if l]
self._cache = lines
return self._cache
|
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from ipaddress import ip_address
from os import listdir
from os.path import join
import logging
import re
import textwrap
from ..record import Record
from ..zone import DuplicateRecordException, SubzoneRecordException
from .base import BaseSource
class TinyDnsBaseSource(BaseSource):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'CNAME', 'MX', 'NS', 'TXT', 'AAAA'))
split_re = re.compile(r':+')
def __init__(self, id, default_ttl=3600):
super(TinyDnsBaseSource, self).__init__(id)
self.default_ttl = default_ttl
def _data_for_A(self, _type, records):
values = []
for record in records:
if record[0] != '0.0.0.0':
values.append(record[0])
if len(values) == 0:
return
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': values,
}
def _data_for_AAAA(self, _type, records):
values = []
for record in records:
# TinyDNS files have the ipv6 address written in full, but with the
# colons removed. This inserts a colon every 4th character to make
# the address correct.
values.append(u":".join(textwrap.wrap(record[0], 4)))
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': values,
}
def _data_for_TXT(self, _type, records):
values = []
for record in records:
new_value = record[0].encode('latin1').decode('unicode-escape') \
.replace(";", "\\;")
values.append(new_value)
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': values,
}
def _data_for_CNAME(self, _type, records):
first = records[0]
try:
ttl = first[1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'value': '{}.'.format(first[0])
}
def _data_for_MX(self, _type, records):
try:
ttl = records[0][2]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': [{
'preference': r[1],
'exchange': '{}.'.format(r[0])
} for r in records]
}
def _data_for_NS(self, _type, records):
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': ['{}.'.format(r[0]) for r in records]
}
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
before = len(zone.records)
if zone.name.endswith('in-addr.arpa.'):
self._populate_in_addr_arpa(zone, lenient)
else:
self._populate_normal(zone, lenient)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _populate_normal(self, zone, lenient):
type_map = {
'=': 'A',
'^': None,
'.': 'NS',
'C': 'CNAME',
'+': 'A',
'@': 'MX',
'\'': 'TXT',
'3': 'AAAA',
'6': 'AAAA',
}
name_re = re.compile(r'((?P<name>.+)\.)?{}$'.format(zone.name[:-1]))
data = defaultdict(lambda: defaultdict(list))
for line in self._lines():
_type = line[0]
if _type not in type_map:
# Something we don't care about
continue
_type = type_map[_type]
if not _type:
continue
# Skip type, remove trailing comments, and omit newline
line = line[1:].split('#', 1)[0]
# Split on :'s including :: and strip leading/trailing ws
line = [p.strip() for p in self.split_re.split(line)]
match = name_re.match(line[0])
if not match:
continue
name = zone.hostname_from_fqdn(line[0])
data[name][_type].append(line[1:])
for name, types in data.items():
for _type, d in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
data = data_for(_type, d)
if data:
record = Record.new(zone, name, data, source=self,
lenient=lenient)
try:
zone.add_record(record, lenient=lenient)
except SubzoneRecordException:
self.log.debug('_populate_normal: skipping subzone '
'record=%s', record)
def _populate_in_addr_arpa(self, zone, lenient):
name_re = re.compile(r'(?P<name>.+)\.{}$'.format(zone.name[:-1]))
for line in self._lines():
_type = line[0]
# We're only interested in = (A+PTR), and ^ (PTR) records
if _type not in ('=', '^'):
continue
# Skip type, remove trailing comments, and omit newline
line = line[1:].split('#', 1)[0]
# Split on :'s including :: and strip leading/trailing ws
line = [p.strip() for p in self.split_re.split(line)]
if line[0].endswith('in-addr.arpa'):
# since it's already in in-addr.arpa format
match = name_re.match(line[0])
value = '{}.'.format(line[1])
else:
addr = ip_address(line[1])
match = name_re.match(addr.reverse_pointer)
value = '{}.'.format(line[0])
if match:
try:
ttl = line[2]
except IndexError:
ttl = self.default_ttl
name = match.group('name')
record = Record.new(zone, name, {
'ttl': ttl,
'type': 'PTR',
'value': value
}, source=self, lenient=lenient)
try:
zone.add_record(record, lenient=lenient)
except DuplicateRecordException:
self.log.warn('Duplicate PTR record for {}, '
'skipping'.format(addr))
class TinyDnsFileSource(TinyDnsBaseSource):
'''
A basic TinyDNS zonefile importer created to import legacy data.
tinydns:
class: octodns.source.tinydns.TinyDnsFileSource
# The location of the TinyDNS zone files
directory: ./zones
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
NOTE: timestamps & lo fields are ignored if present.
'''
def __init__(self, id, directory, default_ttl=3600):
self.log = logging.getLogger('TinyDnsFileSource[{}]'.format(id))
self.log.debug('__init__: id=%s, directory=%s, default_ttl=%d', id,
directory, default_ttl)
super(TinyDnsFileSource, self).__init__(id, default_ttl)
self.directory = directory
self._cache = None
def _lines(self):
if self._cache is None:
# We unfortunately don't know where to look since tinydns stuff can
# be defined anywhere so we'll just read all files
lines = []
for filename in listdir(self.directory):
if filename[0] == '.':
# Ignore hidden files
continue
with open(join(self.directory, filename), 'r') as fh:
lines += [l for l in fh.read().split('\n') if l]
self._cache = lines
return self._cache
|
en
| 0.841079
|
# # # # TinyDNS files have the ipv6 address written in full, but with the # colons removed. This inserts a colon every 4th character to make # the address correct. # Something we don't care about # Skip type, remove trailing comments, and omit newline # Split on :'s including :: and strip leading/trailing ws # We're only interested in = (A+PTR), and ^ (PTR) records # Skip type, remove trailing comments, and omit newline # Split on :'s including :: and strip leading/trailing ws # since it's already in in-addr.arpa format A basic TinyDNS zonefile importer created to import legacy data. tinydns: class: octodns.source.tinydns.TinyDnsFileSource # The location of the TinyDNS zone files directory: ./zones # The ttl to use for records when not specified in the data # (optional, default 3600) default_ttl: 3600 NOTE: timestamps & lo fields are ignored if present. # We unfortunately don't know where to look since tinydns stuff can # be defined anywhere so we'll just read all files # Ignore hidden files
| 2.164448
| 2
|
release/stubs.min/Rhino/DocObjects/__init___parts/AnnotationObjectBase.py
|
YKato521/ironpython-stubs
| 0
|
6627398
|
<reponame>YKato521/ironpython-stubs<filename>release/stubs.min/Rhino/DocObjects/__init___parts/AnnotationObjectBase.py
class AnnotationObjectBase(RhinoObject):
"""
Provides a base class for Rhino.Geometry.AnnotationBase-derived
objects that are placed in a document.
"""
DisplayText = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the text that is displayed to users.
Get: DisplayText(self: AnnotationObjectBase) -> str
"""
|
class AnnotationObjectBase(RhinoObject):
"""
Provides a base class for Rhino.Geometry.AnnotationBase-derived
objects that are placed in a document.
"""
DisplayText = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the text that is displayed to users.
Get: DisplayText(self: AnnotationObjectBase) -> str
"""
|
en
| 0.759412
|
Provides a base class for Rhino.Geometry.AnnotationBase-derived
objects that are placed in a document. Gets the text that is displayed to users.
Get: DisplayText(self: AnnotationObjectBase) -> str
| 2.2669
| 2
|
scheduler.py
|
zkogan/wittify
| 1
|
6627399
|
import schedule, subprocess, time, sys, os, time
date_folder = time.strftime("%d%m%Y")
output = date_folder + "/" + "output.txt"
if not os.path.isdir(date_folder):
os.mkdir(date_folder)
if not os.path.isfile(output):
f = open(output,"w+")
def job (output, date):
print("Running the transcriber script on {0} folder.".format(date_folder))
subprocess.call(["python", "files_args.py", "-o", output, date])
schedule.every().day.at("23:45").do(job, output, date_folder)
while 1:
date_folder = time.strftime("%d%m%Y")
schedule.run_pending()
time.sleep(1)
|
import schedule, subprocess, time, sys, os, time
date_folder = time.strftime("%d%m%Y")
output = date_folder + "/" + "output.txt"
if not os.path.isdir(date_folder):
os.mkdir(date_folder)
if not os.path.isfile(output):
f = open(output,"w+")
def job (output, date):
print("Running the transcriber script on {0} folder.".format(date_folder))
subprocess.call(["python", "files_args.py", "-o", output, date])
schedule.every().day.at("23:45").do(job, output, date_folder)
while 1:
date_folder = time.strftime("%d%m%Y")
schedule.run_pending()
time.sleep(1)
|
none
| 1
| 2.709435
| 3
|
|
src/day008.py
|
zhangxinyong12/my-python-demo
| 0
|
6627400
|
<reponame>zhangxinyong12/my-python-demo
# class Person:
# def __init__(self, name, age):
# self.name = name
# self.age = age
#
# def eat(self, foot):
# print(self.name + "正在吃" + foot)
#
#
# lilei = Person('lilei', 18)
# lilei.eat('苹果')
#
#
# #
#
# class Cat:
# type = '猫'
#
# def __init__(self, name, age, color):
# self.name = name
# self.age = age
# self.color = color
#
# def eat(self, foot):
# print('{}吃了{}'.format(self.name, foot))
#
# def catcj_mouse(self, weight, color):
# print('{}抓了一只{}kg{}的老鼠'.format(self.name, weight, color))
#
# def sellp(self, hour):
# if hour > 5:
# print('{}继续睡觉'.format(self.name))
# else:
# print('{}起来抓老鼠'.format(self.name))
#
# def show(self):
# print('名字:{},年龄:{},颜色:{}'.format(self.name, self.age, self.color))
#
#
# myCat = Cat('花猫', 2, 'black')
# myCat.catcj_mouse(2, '#fff')
# myCat.sellp(3)
# myCat.show()
# myCat.eat('老鼠')
# p 151
|
# class Person:
# def __init__(self, name, age):
# self.name = name
# self.age = age
#
# def eat(self, foot):
# print(self.name + "正在吃" + foot)
#
#
# lilei = Person('lilei', 18)
# lilei.eat('苹果')
#
#
# #
#
# class Cat:
# type = '猫'
#
# def __init__(self, name, age, color):
# self.name = name
# self.age = age
# self.color = color
#
# def eat(self, foot):
# print('{}吃了{}'.format(self.name, foot))
#
# def catcj_mouse(self, weight, color):
# print('{}抓了一只{}kg{}的老鼠'.format(self.name, weight, color))
#
# def sellp(self, hour):
# if hour > 5:
# print('{}继续睡觉'.format(self.name))
# else:
# print('{}起来抓老鼠'.format(self.name))
#
# def show(self):
# print('名字:{},年龄:{},颜色:{}'.format(self.name, self.age, self.color))
#
#
# myCat = Cat('花猫', 2, 'black')
# myCat.catcj_mouse(2, '#fff')
# myCat.sellp(3)
# myCat.show()
# myCat.eat('老鼠')
# p 151
|
en
| 0.233
|
# class Person: # def __init__(self, name, age): # self.name = name # self.age = age # # def eat(self, foot): # print(self.name + "正在吃" + foot) # # # lilei = Person('lilei', 18) # lilei.eat('苹果') # # # # # # class Cat: # type = '猫' # # def __init__(self, name, age, color): # self.name = name # self.age = age # self.color = color # # def eat(self, foot): # print('{}吃了{}'.format(self.name, foot)) # # def catcj_mouse(self, weight, color): # print('{}抓了一只{}kg{}的老鼠'.format(self.name, weight, color)) # # def sellp(self, hour): # if hour > 5: # print('{}继续睡觉'.format(self.name)) # else: # print('{}起来抓老鼠'.format(self.name)) # # def show(self): # print('名字:{},年龄:{},颜色:{}'.format(self.name, self.age, self.color)) # # # myCat = Cat('花猫', 2, 'black') # myCat.catcj_mouse(2, '#fff') # myCat.sellp(3) # myCat.show() # myCat.eat('老鼠') # p 151
| 3.825549
| 4
|
ecommerceweb/dbmodel.py
|
ShreyaDhananjay/EcommerceWebsite
| 9
|
6627401
|
<gh_stars>1-10
from ecommerceweb import db, login_manager
from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from ecommerceweb import db, login_manager, app
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
__tablename__="user"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(75), nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
contactno = db.Column(db.Numeric(10,0), unique=True)
address_line1 = db.Column(db.String(50))
address_line2 = db.Column(db.String(50))
address_line3 = db.Column(db.String(50))
pincode = db.Column(db.Integer)
city = db.Column(db.String(50))
state = db.Column(db.String(50))
country = db.Column(db.String(50))
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.name}', '{self.email}')"
class Seller(db.Model, UserMixin):
__tablename__="seller"
sid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(75), nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
contactno = db.Column(db.Numeric(10,0), unique=True)
address_line1 = db.Column(db.String(50))
address_line2 = db.Column(db.String(50))
address_line3 = db.Column(db.String(50))
pincode = db.Column(db.Integer)
city = db.Column(db.String(50))
state = db.Column(db.String(50))
country = db.Column(db.String(50))
description = db.Column(db.String(300))
website = db.Column(db.String(120))
def __repr__(self):
return f"Seller('{self.name}', '{self.email}')"
class Category(db.Model):
__tablename__="category"
cid = db.Column(db.Integer, primary_key=True)
cname = db.Column(db.String(100), nullable=False)
class Product(db.Model):
__tablename__="product"
pid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
cost = db.Column(db.Float, nullable=False)
details = db.Column(db.String(500), nullable=False)
category_id = db.Column(db.Integer, db.ForeignKey(Category.__table__.c.cid), nullable=False)
sid = db.Column(db.Integer, db.ForeignKey(Seller.__table__.c.sid), nullable=False)
image_file1 = db.Column(db.LargeBinary, nullable=False, default='default.jpg')
image_file2 = db.Column(db.LargeBinary, default='default.jpg')
image_file3 = db.Column(db.LargeBinary, default='default.jpg')
image_file4 = db.Column(db.LargeBinary, default='default.jpg')
stock = db.Column(db.Integer, nullable=False)
class Order(db.Model):
__tablename__="order"
oid = db.Column(db.Integer, primary_key=True)
uid = db.Column(db.Integer, db.ForeignKey(User.__table__.c.id), nullable=False)
pid = db.Column(db.Integer, db.ForeignKey(Product.__table__.c.pid), nullable=False)
quantity = db.Column(db.Integer, nullable=False)
total = db.Column(db.Integer, nullable=False)
order_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
order_status = db.Column(db.String, nullable=False)
class Shipping(db.Model):
__tablename__="shipping"
ship_id = db.Column(db.Integer, primary_key=True)
oid = db.Column(db.Integer, db.ForeignKey(Order.__table__.c.oid), nullable=False)
delivery_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
details = db.Column(db.String(100))
contactno = db.Column(db.Integer, unique=True, nullable=False)
address_line1 = db.Column(db.String(50), nullable=False)
address_line2 = db.Column(db.String(50))
address_line3 = db.Column(db.String(50))
pincode = db.Column(db.Integer, nullable=False)
city = db.Column(db.String(50), nullable=False)
state = db.Column(db.String(50), nullable=False)
country = db.Column(db.String(50), nullable=False)
class Cart(db.Model):
__tablename__="cart"
uid = db.Column(db.Integer, db.ForeignKey(User.__table__.c.id), primary_key=True)
pid = db.Column(db.Integer, db.ForeignKey(Product.__table__.c.pid), primary_key=True)
quantity = db.Column(db.Integer, nullable=False)
class Review(db.Model):
__tablename__="review"
user_id = db.Column(db.Integer, db.ForeignKey(User.__table__.c.id), primary_key=True, nullable=False)
prod_id = db.Column(db.Integer, db.ForeignKey(Product.__table__.c.pid), primary_key=True, nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.String(300), nullable=False)
user_name = db.Column(db.String(75))
def __repr__(self):
return f"Review('{self.user_id}', '{self.prod_id}')"
|
from ecommerceweb import db, login_manager
from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from ecommerceweb import db, login_manager, app
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
__tablename__="user"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(75), nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
contactno = db.Column(db.Numeric(10,0), unique=True)
address_line1 = db.Column(db.String(50))
address_line2 = db.Column(db.String(50))
address_line3 = db.Column(db.String(50))
pincode = db.Column(db.Integer)
city = db.Column(db.String(50))
state = db.Column(db.String(50))
country = db.Column(db.String(50))
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.name}', '{self.email}')"
class Seller(db.Model, UserMixin):
__tablename__="seller"
sid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(75), nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
contactno = db.Column(db.Numeric(10,0), unique=True)
address_line1 = db.Column(db.String(50))
address_line2 = db.Column(db.String(50))
address_line3 = db.Column(db.String(50))
pincode = db.Column(db.Integer)
city = db.Column(db.String(50))
state = db.Column(db.String(50))
country = db.Column(db.String(50))
description = db.Column(db.String(300))
website = db.Column(db.String(120))
def __repr__(self):
return f"Seller('{self.name}', '{self.email}')"
class Category(db.Model):
__tablename__="category"
cid = db.Column(db.Integer, primary_key=True)
cname = db.Column(db.String(100), nullable=False)
class Product(db.Model):
__tablename__="product"
pid = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True, nullable=False)
cost = db.Column(db.Float, nullable=False)
details = db.Column(db.String(500), nullable=False)
category_id = db.Column(db.Integer, db.ForeignKey(Category.__table__.c.cid), nullable=False)
sid = db.Column(db.Integer, db.ForeignKey(Seller.__table__.c.sid), nullable=False)
image_file1 = db.Column(db.LargeBinary, nullable=False, default='default.jpg')
image_file2 = db.Column(db.LargeBinary, default='default.jpg')
image_file3 = db.Column(db.LargeBinary, default='default.jpg')
image_file4 = db.Column(db.LargeBinary, default='default.jpg')
stock = db.Column(db.Integer, nullable=False)
class Order(db.Model):
__tablename__="order"
oid = db.Column(db.Integer, primary_key=True)
uid = db.Column(db.Integer, db.ForeignKey(User.__table__.c.id), nullable=False)
pid = db.Column(db.Integer, db.ForeignKey(Product.__table__.c.pid), nullable=False)
quantity = db.Column(db.Integer, nullable=False)
total = db.Column(db.Integer, nullable=False)
order_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
order_status = db.Column(db.String, nullable=False)
class Shipping(db.Model):
__tablename__="shipping"
ship_id = db.Column(db.Integer, primary_key=True)
oid = db.Column(db.Integer, db.ForeignKey(Order.__table__.c.oid), nullable=False)
delivery_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
details = db.Column(db.String(100))
contactno = db.Column(db.Integer, unique=True, nullable=False)
address_line1 = db.Column(db.String(50), nullable=False)
address_line2 = db.Column(db.String(50))
address_line3 = db.Column(db.String(50))
pincode = db.Column(db.Integer, nullable=False)
city = db.Column(db.String(50), nullable=False)
state = db.Column(db.String(50), nullable=False)
country = db.Column(db.String(50), nullable=False)
class Cart(db.Model):
__tablename__="cart"
uid = db.Column(db.Integer, db.ForeignKey(User.__table__.c.id), primary_key=True)
pid = db.Column(db.Integer, db.ForeignKey(Product.__table__.c.pid), primary_key=True)
quantity = db.Column(db.Integer, nullable=False)
class Review(db.Model):
__tablename__="review"
user_id = db.Column(db.Integer, db.ForeignKey(User.__table__.c.id), primary_key=True, nullable=False)
prod_id = db.Column(db.Integer, db.ForeignKey(Product.__table__.c.pid), primary_key=True, nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.String(300), nullable=False)
user_name = db.Column(db.String(75))
def __repr__(self):
return f"Review('{self.user_id}', '{self.prod_id}')"
|
none
| 1
| 2.326946
| 2
|
|
backend/config.py
|
edmondjt/python-blockchain
| 0
|
6627402
|
NANOSECONDS = 1
MICROSECONDS = 1000 * NANOSECONDS
MILLISECONDS = 1000 * MICROSECONDS
SECONDS = 1000 * MILLISECONDS
MINE_RATE = 4 * SECONDS
STARTING_BALANCE = 1000
MINING_REWARD = 50
MINING_REWARD_INPUT = { 'address': '*--approved-mining-reward--*' }
|
NANOSECONDS = 1
MICROSECONDS = 1000 * NANOSECONDS
MILLISECONDS = 1000 * MICROSECONDS
SECONDS = 1000 * MILLISECONDS
MINE_RATE = 4 * SECONDS
STARTING_BALANCE = 1000
MINING_REWARD = 50
MINING_REWARD_INPUT = { 'address': '*--approved-mining-reward--*' }
|
none
| 1
| 1.736815
| 2
|
|
setup.py
|
asuiu/idzip
| 1
|
6627403
|
#!/usr/bin/python
from setuptools import setup, find_packages
VERSION = "0.3.6"
setup(
name = "python-idzip",
version = VERSION,
packages=find_packages(),
entry_points={
"console_scripts": [
"idzip = idzip.command:main"
]
},
description = 'DictZip - Random Access gzip files',
author = '<NAME>',
maintainer= '<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
url = 'https://github.com/bauman/python-idzip',
download_url = 'https://github.com/bauman/python-idzip/archive/%s.tar.gz' %(VERSION),
classifiers = [
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
scripts=['bin/idzip']
)
|
#!/usr/bin/python
from setuptools import setup, find_packages
VERSION = "0.3.6"
setup(
name = "python-idzip",
version = VERSION,
packages=find_packages(),
entry_points={
"console_scripts": [
"idzip = idzip.command:main"
]
},
description = 'DictZip - Random Access gzip files',
author = '<NAME>',
maintainer= '<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
url = 'https://github.com/bauman/python-idzip',
download_url = 'https://github.com/bauman/python-idzip/archive/%s.tar.gz' %(VERSION),
classifiers = [
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
scripts=['bin/idzip']
)
|
ru
| 0.258958
|
#!/usr/bin/python
| 1.359643
| 1
|
rapidtide/workflows/rapidtide2x_parser.py
|
tsalo/rapidtide
| 0
|
6627404
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import getopt
import os
import sys
import numpy as np
import rapidtide.filter as tide_filt
import rapidtide.io as tide_io
import rapidtide.util as tide_util
global rt_floatset, rt_floattype
try:
import mkl
mklexists = True
except ImportError:
mklexists = False
try:
from memory_profiler import profile
memprofilerexists = True
except ImportError:
memprofilerexists = False
def usage():
print("usage: ", os.path.basename(sys.argv[0]), " datafilename outputname ")
print(
" ".join(
[
"[-r LAGMIN,LAGMAX]",
"[-s SIGMALIMIT]",
"[-a]",
"[--nowindow]",
"[--phat]",
"[--liang]",
"[--eckart]",
"[-f GAUSSSIGMA]",
"[-O oversampfac]",
"[-t TSTEP]",
"[--datatstep=TSTEP]",
"[--datafreq=FREQ]",
"[-d]",
"[-b]",
"[-V]",
"[-L]",
"[-R]",
"[-C]",
"[-F LOWERFREQ,UPPERFREQ[,LOWERSTOP,UPPERSTOP]]",
"[-o OFFSETTIME]",
"[--autosync]",
"[-T]",
"[-p]",
"[-P]",
"[-B]",
"[-h HISTLEN]",
"[-i INTERPTYPE]",
"[-I]",
"[-Z DELAYTIME]",
"[--nofitfilt]",
"[--searchfrac=SEARCHFRAC]",
"[-N NREPS]",
"[--motionfile=MOTFILE]",
"[--pickleft]",
"[--numskip=SKIP]",
"[--refineweighting=TYPE]",
"[--refineprenorm=TYPE]",
"[--passes=PASSES]",
"[--refinepasses=PASSES]",
"[--excluderefine=MASK]",
"[--includerefine=MASK]",
"[--includemean=MASK]",
"[--excludemean=MASK]" "[--lagminthresh=MIN]",
"[--lagmaxthresh=MAX]",
"[--ampthresh=AMP]",
"[--sigmathresh=SIGMA]",
"[--corrmask=MASK]",
"[--corrmaskthresh=PCT]",
"[--refineoffset]",
"[--pca]",
"[--ica]",
"[--weightedavg]",
"[--avg]",
"[--psdfilter]",
"[--noprogressbar]",
"[--despecklethresh=VAL]",
"[--despecklepasses=PASSES]",
"[--dispersioncalc]",
"[--refineupperlag]",
"[--refinelowerlag]",
"[--nosharedmem]",
"[--tmask=MASKFILE]",
"[--limitoutput]",
"[--motionfile=FILENAME[:COLSPEC]",
"[--softlimit]",
"[--timerange=START,END]",
"[--skipsighistfit]",
"[--accheck]",
"[--acfix]" "[--numskip=SKIP]",
"[--slicetimes=FILE]",
"[--glmsourcefile=FILE]",
"[--regressorfreq=FREQ]",
"[--regressortstep=TSTEP]" "[--regressor=FILENAME]",
"[--regressorstart=STARTTIME]",
"[--usesp]",
"[--peakfittype=FITTYPE]",
"[--mklthreads=NTHREADS]",
"[--nprocs=NPROCS]",
"[--nirs]",
"[--venousrefine]",
]
)
)
print("")
print("Required arguments:")
print(" datafilename - The input data file (BOLD fmri file or NIRS)")
print(" outputname - The root name for the output files")
print("")
print("Optional arguments:")
print(
" Arguments are processed in order of appearance. Later options can override ones earlier on"
)
print(" the command line")
print("")
print("Macros:")
print(
" --venousrefine - This is a macro that sets --lagminthresh=2.5, --lagmaxthresh=6.0,"
)
print(
" --ampthresh=0.5, and --refineupperlag to bias refinement towards "
)
print(
" voxels in the draining vasculature for an fMRI scan."
)
print(
" --nirs - This is a NIRS analysis - this is a macro that sets --nothresh,"
)
print(
" --preservefiltering, --refinenorm=var, --ampthresh=0.7, "
)
print(" and --lagminthresh=0.1.")
print("")
print("Preprocessing options:")
print(
" -t TSTEP, - Set the timestep of the data file to TSTEP (or 1/FREQ)"
)
print(" --datatstep=TSTEP, This will override the TR in an fMRI file.")
print(
" --datafreq=FREQ NOTE: if using data from a text file, for example with"
)
print(
" NIRS data, using one of these options is mandatory."
)
print(" -a - Disable antialiasing filter")
print(
" --detrendorder=ORDER - Set order of trend removal (0 to disable, default is 1 - linear)"
)
print(
" -I - Invert the sign of the regressor before processing"
)
print(
" -i - Use specified interpolation type (options are 'cubic',"
)
print(" 'quadratic', and 'univariate (default)')")
print(" -o - Apply an offset OFFSETTIME to the lag regressors")
print(
" --autosync - Calculate and apply offset time of an external regressor from "
)
print(
" the global crosscorrelation. Overrides offsettime if specified."
)
print(
" -b - Use butterworth filter for band splitting instead of"
)
print(" trapezoidal FFT filter")
print(" -F LOWERFREQ,UPPERFREQ[,LOWERSTOP,UPPERSTOP]")
print(
" - Filter data and regressors from LOWERFREQ to UPPERFREQ."
)
print(
" LOWERSTOP and UPPERSTOP can be specified, or will be"
)
print(" calculated automatically")
print(" -V - Filter data and regressors to VLF band")
print(" -L - Filter data and regressors to LFO band")
print(" -R - Filter data and regressors to respiratory band")
print(" -C - Filter data and regressors to cardiac band")
print(
" --padseconds=SECONDS - Set the filter pad time to SECONDS seconds. Default"
)
print(" is 30.0")
print(
" -N NREPS - Estimate significance threshold by running NREPS null "
)
print(
" correlations (default is 10000, set to 0 to disable). If you are"
)
print(
" running multiple passes, 'ampthresh' will be set to the 0.05 significance."
)
print(
" level unless it is manually specified (see below)."
)
print(
" --permutationmethod=METHOD - Method for permuting the regressor for significance estimation. Default"
)
print(" is shuffle")
print(
" --skipsighistfit - Do not fit significance histogram with a Johnson SB function"
)
print(
" --windowfunc=FUNC - Use FUNC window funcion prior to correlation. Options are"
)
print(" hamming (default), hann, blackmanharris, and None")
print(" --nowindow - Disable precorrelation windowing")
print(
" -f GAUSSSIGMA - Spatially filter fMRI data prior to analysis using "
)
print(" GAUSSSIGMA in mm")
print(
" -M - Generate a global mean regressor and use that as the "
)
print(" reference regressor")
print(" --globalmeaninclude=MASK[:VALSPEC]")
print(
" - Only use voxels in NAME for global regressor generation (if VALSPEC is"
)
print(
" given, only voxels with integral values listed in VALSPEC are used.)"
)
print(" --globalmeanexclude=MASK[:VALSPEC]")
print(
" - Do not use voxels in NAME for global regressor generation (if VALSPEC is"
)
print(
" given, only voxels with integral values listed in VALSPEC are used.)"
)
print(
" -m - Mean scale regressors during global mean estimation"
)
print(
" --slicetimes=FILE - Apply offset times from FILE to each slice in the dataset"
)
print(
" --numskip=SKIP - SKIP tr's were previously deleted during preprocessing (e.g. if you "
)
print(
" have done your preprocessing in FSL and set dummypoints to a "
)
print(" nonzero value.) Default is 0.")
print(" --timerange=START,END - Limit analysis to data between timepoints START ")
print(" and END in the fmri file. If END is set to -1, ")
print(
" analysis will go to the last timepoint. Negative values "
)
print(
" of START will be set to 0. Default is to use all timepoints."
)
print(
" --nothresh - Disable voxel intensity threshold (especially useful"
)
print(" for NIRS data)")
print(
" --motionfile=MOTFILE[:COLSPEC] - Read 6 columns of motion regressors out of MOTFILE text file."
)
print(
" (with timepoints rows) and regress their derivatives"
)
print(
" and delayed derivatives out of the data prior to analysis."
)
print(
" If COLSPEC is present, use the comma separated list of ranges to"
)
print(
" specify X, Y, Z, RotX, RotY, and RotZ, in that order. For"
)
print(
" example, :3-5,7,0,9 would use columns 3, 4, 5, 7, 0 and 9"
)
print(" for X, Y, Z, RotX, RotY, RotZ, respectively")
print(
" --motpos - Toggle whether displacement regressors will be used in motion regression."
)
print(" Default is False.")
print(
" --motderiv - Toggle whether derivatives will be used in motion regression."
)
print(" Default is True.")
print(
" --motdelayderiv - Toggle whether delayed derivative regressors will be used in motion regression."
)
print(" Default is False.")
print("")
print("Correlation options:")
print(
" -O OVERSAMPFAC - Oversample the fMRI data by the following integral "
)
print(
" factor. Setting to -1 chooses the factor automatically (default)"
)
print(" --regressor=FILENAME - Read probe regressor from file FILENAME (if none ")
print(" specified, generate and use global regressor)")
print(
" --regressorfreq=FREQ - Probe regressor in file has sample frequency FREQ "
)
print(
" (default is 1/tr) NB: --regressorfreq and --regressortstep"
)
print(" are two ways to specify the same thing")
print(
" --regressortstep=TSTEP - Probe regressor in file has sample time step TSTEP "
)
print(
" (default is tr) NB: --regressorfreq and --regressortstep"
)
print(" are two ways to specify the same thing")
print(
" --regressorstart=START - The time delay in seconds into the regressor file, corresponding"
)
print(" in the first TR of the fmri file (default is 0.0)")
print(
" --phat - Use generalized cross-correlation with phase alignment "
)
print(" transform (PHAT) instead of correlation")
print(
" --liang - Use generalized cross-correlation with Liang weighting function"
)
print(" (Liang, et al, doi:10.1109/IMCCC.2015.283)")
print(
" --eckart - Use generalized cross-correlation with Eckart weighting function"
)
print(
" --corrmaskthresh=PCT - Do correlations in voxels where the mean exceeeds this "
)
print(" percentage of the robust max (default is 1.0)")
print(
" --corrmask=MASK - Only do correlations in voxels in MASK (if set, corrmaskthresh"
)
print(" is ignored).")
print(
" --accheck - Check for periodic components that corrupt the autocorrelation"
)
print("")
print("Correlation fitting options:")
print(
" -Z DELAYTIME - Don't fit the delay time - set it to DELAYTIME seconds "
)
print(" for all voxels")
print(
" -r LAGMIN,LAGMAX - Limit fit to a range of lags from LAGMIN to LAGMAX"
)
print(
" -s SIGMALIMIT - Reject lag fits with linewidth wider than SIGMALIMIT"
)
print(
" -B - Bipolar mode - match peak correlation ignoring sign"
)
print(" --nofitfilt - Do not zero out peak fit values if fit fails")
print(
" --searchfrac=FRAC - When peak fitting, include points with amplitude > FRAC * the"
)
print(" maximum amplitude.")
print(" (default value is 0.5)")
print(
" --peakfittype=FITTYPE - Method for fitting the peak of the similarity function"
)
print(
" (default is 'gauss'). 'quad' uses a quadratic fit. Other options are "
)
print(
" 'fastgauss' which is faster but not as well tested, and 'None'."
)
print(
" --despecklepasses=PASSES - detect and refit suspect correlations to disambiguate peak"
)
print(" locations in PASSES passes")
print(
" --despecklethresh=VAL - refit correlation if median discontinuity magnitude exceeds"
)
print(" VAL (default is 5s)")
print(
" --softlimit - Allow peaks outside of range if the maximum correlation is"
)
print(" at an edge of the range.")
print("")
print("Regressor refinement options:")
print(
" --refineprenorm=TYPE - Apply TYPE prenormalization to each timecourse prior "
)
print(" to refinement (valid weightings are 'None', ")
print(" 'mean' (default), 'var', and 'std'")
print(" --refineweighting=TYPE - Apply TYPE weighting to each timecourse prior ")
print(" to refinement (valid weightings are 'None', ")
print(" 'R', 'R2' (default)")
print(" --passes=PASSES, - Set the number of processing passes to PASSES ")
print(" --refinepasses=PASSES (default is 1 pass - no refinement).")
print(
" NB: refinepasses is the wrong name for this option -"
)
print(
" --refinepasses is deprecated, use --passes from now on."
)
print(
" --refineinclude=MASK[:VALSPEC] - Only use nonzero voxels in MASK for regressor refinement (if VALSPEC is"
)
print(
" given, only voxels with integral values listed in VALSPEC are used.)"
)
print(
" --refineexclude=MASK[:VALSPEC] - Do not use nonzero voxels in MASK for regressor refinement (if VALSPEC is"
)
print(
" given, only voxels with integral values listed in VALSPEC are used.)"
)
print(" --lagminthresh=MIN - For refinement, exclude voxels with delays less ")
print(" than MIN (default is 0.5s)")
print(
" --lagmaxthresh=MAX - For refinement, exclude voxels with delays greater "
)
print(" than MAX (default is 5s)")
print(" --ampthresh=AMP - For refinement, exclude voxels with correlation ")
print(
" coefficients less than AMP (default is 0.3). NOTE: ampthresh will"
)
print(
" automatically be set to the p<0.05 significance level determined by"
)
print(
" the -N option if -N is set greater than 0 and this is not "
)
print(" manually specified.")
print(
" --sigmathresh=SIGMA - For refinement, exclude voxels with widths greater "
)
print(" than SIGMA (default is 100s)")
print(
" --refineoffset - Adjust offset time during refinement to bring peak "
)
print(" delay to zero")
print(
" --pickleft - When setting refineoffset, always select the leftmost histogram peak"
)
print(
" --pickleftthresh=THRESH - Set the threshold value (fraction of maximum) to decide something is a "
)
print(" peak in a histogram. Default is 0.33.")
print(" --refineupperlag - Only use positive lags for regressor refinement")
print(" --refinelowerlag - Only use negative lags for regressor refinement")
print(" --pca - Use pca to derive refined regressor (default is ")
print(" unweighted averaging)")
print(" --ica - Use ica to derive refined regressor (default is ")
print(" unweighted averaging)")
print(" --weightedavg - Use weighted average to derive refined regressor ")
print(" (default is unweighted averaging)")
print(
" --avg - Use unweighted average to derive refined regressor "
)
print(" (default)")
print(" --psdfilter - Apply a PSD weighted Wiener filter to shifted")
print(" timecourses prior to refinement")
print("")
print("Output options:")
print(
" --limitoutput - Don't save some of the large and rarely used files"
)
print(" -T - Save a table of lagtimes used")
print(
" -h HISTLEN - Change the histogram length to HISTLEN (default is"
)
print(" 100)")
print(
" --glmsourcefile=FILE - Regress delayed regressors out of FILE instead of the "
)
print(" initial fmri file used to estimate delays")
print(
" --noglm - Turn off GLM filtering to remove delayed regressor "
)
print(" from each voxel (disables output of fitNorm)")
print(" --preservefiltering - don't reread data prior to GLM")
print("")
print("Miscellaneous options:")
print(
" --noprogressbar - Disable progress bars - useful if saving output to files"
)
print(
" --wiener - Perform Wiener deconvolution to get voxel transfer functions"
)
print(
" --usesp - Use single precision for internal calculations (may"
)
print(" be useful when RAM is limited)")
print(" -c - Data file is a converted CIFTI")
print(" -S - Simulate a run - just report command line options")
print(" -d - Display plots of interesting timecourses")
print(" --nonumba - Disable jit compilation with numba")
print(
" --nosharedmem - Disable use of shared memory for large array storage"
)
print(" --memprofile - Enable memory profiling for debugging - warning:")
print(" this slows things down a lot.")
print(
" --multiproc - Enable multiprocessing versions of key subroutines. This"
)
print(
" speeds things up dramatically. Almost certainly will NOT"
)
print(
" work on Windows (due to different forking behavior)."
)
print(
" --mklthreads=NTHREADS - Use no more than NTHREADS worker threads in accelerated numpy calls."
)
print(
" --nprocs=NPROCS - Use NPROCS worker processes for multiprocessing. Setting NPROCS"
)
print(
" less than 1 sets the number of worker processes to"
)
print(
" n_cpus - 1 (default). Setting NPROCS enables --multiproc."
)
print(" --debug - Enable additional information output")
print(
" become the default, but for now I'm just trying it out."
)
print("")
print("Experimental options (not fully tested, may not work):")
print(
" --cleanrefined - perform additional processing on refined regressor to remove spurious"
)
print(" components.")
print(
" --dispersioncalc - Generate extra data during refinement to allow calculation of"
)
print(" dispersion.")
print(
" --acfix - Perform a secondary correlation to disambiguate peak location"
)
print(" (enables --accheck). Experimental.")
print(" --tmask=MASKFILE - Only correlate during epochs specified in ")
print(
" MASKFILE (NB: if file has one colum, the length needs to match"
)
print(
" the number of TRs used. TRs with nonzero values will be used"
)
print(
" in analysis. If there are 2 or more columns, each line of MASKFILE"
)
print(
" contains the time (first column) and duration (second column) of an"
)
print(" epoch to include.)")
return ()
def process_args(inputargs=None):
nargs = len(sys.argv)
if nargs < 3:
usage()
exit()
# set default variable values
optiondict = {}
# file i/o file options
optiondict["isgrayordinate"] = False
optiondict["textio"] = False
# preprocessing options
optiondict["gausssigma"] = 0.0 # the width of the spatial filter kernel in mm
optiondict[
"antialias"
] = True # apply an antialiasing filter to any regressors prior to filtering
optiondict["invertregressor"] = False # invert the initial regressor during startup
optiondict["slicetimes"] = None # do not apply any slice order correction by default
optiondict["startpoint"] = -1 # by default, analyze the entire length of the dataset
optiondict["endpoint"] = 10000000 # by default, analyze the entire length of the dataset
optiondict["preprocskip"] = 0 # number of trs skipped in preprocessing
optiondict["globalsignalmethod"] = "sum"
optiondict["globalpcacomponents"] = 0.8
optiondict["globalmaskmethod"] = "mean"
optiondict["globalmeanexcludename"] = None
optiondict["globalmeanexcludevals"] = None # list of integer values to use in the mask
optiondict["globalmeanincludename"] = None
optiondict["globalmeanincludevals"] = None # list of integer values to use in the mask
# correlation options
optiondict["similaritymetric"] = "correlation"
optiondict["smoothingtime"] = 3.0
optiondict["madnormMI"] = False
optiondict["dodemean"] = True # remove the mean from signals prior to correlation
optiondict["detrendorder"] = 1 # remove linear trends prior to correlation
optiondict["windowfunc"] = "hamming" # the particular window function to use for correlation
optiondict["zeropadding"] = 0 # by default, padding is 0 (circular correlation)
optiondict[
"corrweighting"
] = "None" # use a standard unweighted crosscorrelation for calculate time delays
optiondict["tmaskname"] = None # file name for tmask regressor
optiondict[
"corrmaskthreshpct"
] = 1.0 # percentage of robust maximum of mean to mask correlations
optiondict["corrmaskexcludename"] = None
optiondict["corrmaskexcludevals"] = None # list of integer values to use in the mask
optiondict["corrmaskincludename"] = None
optiondict["corrmaskincludevals"] = None # list of integer values to use in the mask
optiondict[
"check_autocorrelation"
] = True # check for periodic components that corrupt the autocorrelation
optiondict[
"fix_autocorrelation"
] = False # remove periodic components that corrupt the autocorrelation
optiondict[
"despeckle_thresh"
] = 5.0 # threshold value - despeckle if median discontinuity magnitude exceeds despeckle_thresh
optiondict["despeckle_passes"] = 0 # despeckling passes to perform
optiondict["nothresh"] = False # disable voxel intensity threshholding
# correlation fitting options
optiondict[
"hardlimit"
] = True # Peak value must be within specified range. If false, allow max outside if maximum
# correlation value is that one end of the range.
optiondict["bipolar"] = False # find peak with highest magnitude, regardless of sign
optiondict["lthreshval"] = 0.0 # zero out peaks with correlations lower than this value
optiondict["uthreshval"] = 1.0 # zero out peaks with correlations higher than this value
optiondict[
"edgebufferfrac"
] = 0.0 # what fraction of the correlation window to avoid on either end when fitting
optiondict["enforcethresh"] = True # only do fits in voxels that exceed threshhold
optiondict["zerooutbadfit"] = True # if true zero out all fit parameters if the fit fails
optiondict[
"searchfrac"
] = 0.5 # The fraction of the main peak over which points are included in the peak
optiondict[
"lagmod"
] = 1000.0 # if set to the location of the first autocorrelation sidelobe, this should
optiondict[
"peakfittype"
] = "gauss" # if set to 'gauss', use old gaussian fitting, if set to 'quad' use parabolic
optiondict["acwidth"] = 0.0 # width of the reference autocorrelation function
optiondict["absmaxsigma"] = 100.0 # width of the reference autocorrelation function
optiondict["absminsigma"] = 0.25 # width of the reference autocorrelation function
# move delay peaks back to the correct position if they hit a sidelobe
# postprocessing options
optiondict[
"doglmfilt"
] = True # use a glm filter to remove the delayed regressor from the data in each voxel
optiondict["preservefiltering"] = False
optiondict[
"glmsourcefile"
] = None # name of the file from which to regress delayed regressors (if not the original data)
optiondict["dodeconv"] = False # do Wiener deconvolution to find voxel transfer function
optiondict["motionfilename"] = None # by default do no motion regression
optiondict["mot_pos"] = False # do not do position
optiondict["mot_deriv"] = True # do use derivative
optiondict["mot_delayderiv"] = False # do not do delayed derivative
optiondict["savemotionfiltered"] = False # save motion filtered file for debugging
# filter options
optiondict["filtorder"] = 6
optiondict[
"padseconds"
] = 30.0 # the number of seconds of padding to add to each end of a filtered timecourse
optiondict["filtertype"] = "trapezoidal"
optiondict["respdelete"] = False
optiondict["lowerstop"] = None
optiondict["lowerpass"] = None
optiondict["upperpass"] = None
optiondict["upperstop"] = None
# output options
optiondict["savelagregressors"] = True
optiondict["savemovingsignal"] = True
optiondict["saveglmfiltered"] = True
optiondict["savecorrtimes"] = False
optiondict["saveintermediatemaps"] = False
optiondict["bidsoutput"] = False
optiondict["interptype"] = "univariate"
optiondict["useglobalref"] = False
optiondict["fixdelay"] = False
optiondict["fixeddelayvalue"] = 0.0
# significance estimation options
optiondict[
"numestreps"
] = 10000 # the number of sham correlations to perform to estimate significance
optiondict["permutationmethod"] = "shuffle"
optiondict[
"nohistzero"
] = False # if False, there is a spike at R=0 in the significance histogram
optiondict["ampthreshfromsig"] = True
optiondict["sighistlen"] = 1000
optiondict["dosighistfit"] = True
optiondict["histlen"] = 250
optiondict["oversampfactor"] = -1
optiondict["lagmin"] = -30.0
optiondict["lagmax"] = 30.0
optiondict["widthlimit"] = 100.0
optiondict["offsettime"] = 0.0
optiondict["autosync"] = False
optiondict["offsettime_total"] = 0.0
# refinement options
optiondict["cleanrefined"] = False
optiondict["lagmaskside"] = "both"
optiondict["refineweighting"] = "R2"
optiondict["refineprenorm"] = "mean"
optiondict["sigmathresh"] = 100.0
optiondict["lagminthresh"] = 0.5
optiondict["lagmaxthresh"] = 5.0
optiondict["ampthresh"] = 0.3
optiondict["passes"] = 1
optiondict["refineoffset"] = False
optiondict["pickleft"] = False
optiondict["pickleftthresh"] = 0.33
optiondict["refineexcludename"] = None
optiondict["refineexcludevals"] = None # list of integer values to use in the mask
optiondict["refineincludename"] = None
optiondict["refineincludevals"] = None # list of integer values to use in the mask
optiondict["corrmaskvallist"] = None
optiondict["refinetype"] = "unweighted_average"
optiondict["convergencethresh"] = None
optiondict["maxpasses"] = 10
optiondict["pcacomponents"] = 0.8
optiondict["filterbeforePCA"] = True
optiondict["fmrifreq"] = 0.0
optiondict["dodispersioncalc"] = False
optiondict["dispersioncalc_lower"] = -4.0
optiondict["dispersioncalc_upper"] = 4.0
optiondict["dispersioncalc_step"] = 0.50
optiondict["psdfilter"] = False
# debugging options
optiondict["singleproc_getNullDist"] = False
optiondict["singleproc_calcsimilarity"] = False
optiondict["singleproc_peakeval"] = False
optiondict["singleproc_fitcorr"] = False
optiondict["singleproc_glm"] = False
optiondict["internalprecision"] = "double"
optiondict["outputprecision"] = "single"
optiondict["nonumba"] = False
optiondict["memprofile"] = False
optiondict["sharedmem"] = True
optiondict["fakerun"] = False
optiondict["displayplots"] = False
optiondict["debug"] = False
optiondict["verbose"] = False
(
optiondict["release_version"],
optiondict["git_longtag"],
optiondict["git_date"],
optiondict["git_isdirty"],
) = tide_util.version()
optiondict["python_version"] = str(sys.version_info)
optiondict["nprocs"] = 1
optiondict["mklthreads"] = 1
optiondict["mp_chunksize"] = 50000
optiondict["showprogressbar"] = True
optiondict["savecorrmask"] = True
optiondict["savedespecklemasks"] = True
optiondict["checkpoint"] = False # save checkpoint information for tracking program state
optiondict["alwaysmultiproc"] = False
optiondict["calccoherence"] = False
# experimental options
optiondict["echocancel"] = False
optiondict["negativegradient"] = False
optiondict["negativegradregressor"] = False
# package options
optiondict["memprofilerexists"] = memprofilerexists
realtr = 0.0
theprefilter = tide_filt.NoncausalFilter()
theprefilter.setbutterorder(optiondict["filtorder"])
# start the clock!
tide_util.checkimports(optiondict)
# get the command line parameters
optiondict["regressorfile"] = None
optiondict["inputfreq"] = None
optiondict["inputstarttime"] = None
if len(sys.argv) < 3:
usage()
sys.exit()
# handle required args first
optiondict["in_file"] = sys.argv[1]
optiondict["outputname"] = sys.argv[2]
optparsestart = 3
# now scan for optional arguments
try:
opts, args = getopt.getopt(
sys.argv[optparsestart:],
"abcdf:gh:i:mo:s:r:t:vBCF:ILMN:O:RSTVZ:",
[
"help",
"nowindow",
"windowfunc=",
"datatstep=",
"datafreq=",
"lagminthresh=",
"lagmaxthresh=",
"ampthresh=",
"skipsighistfit",
"sigmathresh=",
"refineweighting=",
"refineprenorm=",
"corrmaskthresh=",
"despecklepasses=",
"despecklethresh=",
"autosync",
"accheck",
"acfix",
"padseconds",
"noprogressbar",
"refinepasses=",
"passes=",
"corrmask=",
"motionfile=",
"motpos",
"motderiv",
"motdelayderiv",
"globalmeaninclude=",
"globalmeanexclude=",
"refineinclude=",
"refineexclude=",
"refineoffset",
"pickleft",
"pickleftthresh=",
"nofitfilt",
"cleanrefined",
"pca",
"ica",
"weightedavg",
"avg",
"psdfilter",
"dispersioncalc",
"noglm",
"nosharedmem",
"multiproc",
"mklthreads=",
"permutationmethod=",
"nprocs=",
"debug",
"nonumba",
"savemotionglmfilt",
"tmask=",
"detrendorder=",
"slicetimes=",
"glmsourcefile=",
"preservefiltering",
"globalmaskmethod=",
"numskip=",
"nirs",
"venousrefine",
"nothresh",
"searchfrac=",
"limitoutput",
"softlimit",
"regressor=",
"regressorfreq=",
"regressortstep=",
"regressorstart=",
"timerange=",
"refineupperlag",
"refinelowerlag",
"memprofile",
"usesp",
"liang",
"eckart",
"phat",
"wiener",
"weiner",
"respdelete",
"checkpoint",
"peakfittype=",
],
)
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like 'option -a not recognized'
usage()
sys.exit(2)
formattedcmdline = [sys.argv[0] + " \\"]
for thearg in range(1, optparsestart):
formattedcmdline.append("\t" + sys.argv[thearg] + " \\")
for o, a in opts:
linkchar = " "
if o == "--nowindow":
optiondict["windowfunc"] = "None"
print("disable precorrelation windowing")
elif o == "--checkpoint":
optiondict["checkpoint"] = True
print("Enabled run checkpoints")
elif o == "--permutationmethod":
themethod = a
if (themethod != "shuffle") and (themethod != "phaserandom"):
print("illegal permutation method", themethod)
sys.exit()
optiondict["permutationmethod"] = themethod
linkchar = "="
print(
"Will use",
optiondict["permutationmethod"],
"as the permutation method for calculating null correlation threshold",
)
elif o == "--windowfunc":
thewindow = a
if (
(thewindow != "hamming")
and (thewindow != "hann")
and (thewindow != "blackmanharris")
and (thewindow != "None")
):
print("illegal window function", thewindow)
sys.exit()
optiondict["windowfunc"] = thewindow
linkchar = "="
print(
"Will use", optiondict["windowfunc"], "as the window function for correlation",
)
elif o == "-v":
optiondict["verbose"] = True
print("Turned on verbose mode")
elif o == "--liang":
optiondict["corrweighting"] = "liang"
print("Enabled Liang weighted crosscorrelation")
elif o == "--eckart":
optiondict["corrweighting"] = "eckart"
print("Enabled Eckart weighted crosscorrelation")
elif o == "--phat":
optiondict["corrweighting"] = "phat"
print("Enabled GCC-PHAT fitting")
elif o == "--weiner":
print("It's spelled wiener, not weiner")
print("The filter is named after <NAME>, an MIT mathematician.")
print("The name probably indicates that his family came from Vienna.")
print("Spell it right and try again. I mean, I know what you meant, and could just")
print("call the routine you wanted anyway, but then how would you learn?")
sys.exit()
elif o == "--cleanrefined":
optiondict["cleanrefined"] = True
print("Will attempt to clean refined regressor")
elif o == "--respdelete":
optiondict["respdelete"] = True
print("Will attempt to track and delete respiratory waveforms in the passband")
elif o == "--wiener":
optiondict["dodeconv"] = True
print("Will perform Wiener deconvolution")
elif o == "--usesp":
optiondict["internalprecision"] = "single"
print("Will use single precision for internal calculations")
elif o == "--preservefiltering":
optiondict["preservefiltering"] = True
print("Will not reread input file prior to GLM")
elif o == "--glmsourcefile":
optiondict["glmsourcefile"] = a
linkchar = "="
print("Will regress delayed regressors out of", optiondict["glmsourcefile"])
elif o == "--corrmaskthresh":
optiondict["corrmaskthreshpct"] = float(a)
linkchar = "="
print(
"Will perform correlations in voxels where mean exceeds",
optiondict["corrmaskthreshpct"],
"% of robust maximum",
)
elif o == "-I":
optiondict["invertregressor"] = True
print("Invert the regressor prior to running")
elif o == "-B":
optiondict["bipolar"] = True
print("Enabled bipolar correlation fitting")
elif o == "-S":
optiondict["fakerun"] = True
print("report command line options and quit")
elif o == "-a":
optiondict["antialias"] = False
print("antialiasing disabled")
elif o == "-M":
optiondict["useglobalref"] = True
print("using global mean timecourse as the reference regressor")
elif o == "--globalmaskmethod":
optiondict["globalmaskmethod"] = a
if optiondict["globalmaskmethod"] == "mean":
print("will use mean value to mask voxels prior to generating global mean")
elif optiondict["globalmaskmethod"] == "variance":
print(
"will use timecourse variance to mask voxels prior to generating global mean"
)
else:
print(
optiondict["globalmaskmethod"],
"is not a valid masking method. Valid methods are 'mean' and 'variance'",
)
sys.exit()
elif o == "-m":
optiondict["globalsignalmethod"] = "meanscale"
print("mean scale voxels prior to generating global mean")
elif o == "--limitoutput":
optiondict["savelagregressors"] = False
optiondict["savemovingsignal"] = False
print("disabling output of lagregressors and some ancillary GLM timecourses")
elif o == "--debug":
optiondict["debug"] = True
theprefilter.setdebug(optiondict["debug"])
print("enabling additional data output for debugging")
elif o == "--multiproc":
optiondict["nprocs"] = -1
print("enabling multiprocessing")
elif o == "--softlimit":
optiondict["hardlimit"] = False
linkchar = "="
print("will relax peak lag constraint for maximum correlations at edge of range")
elif o == "--nosharedmem":
optiondict["sharedmem"] = False
linkchar = "="
print("will not use shared memory for large array storage")
elif o == "--mklthreads":
optiondict["mklthreads"] = int(a)
linkchar = "="
if mklexists:
print(
"will use",
optiondict["mklthreads"],
"MKL threads for accelerated numpy processing.",
)
else:
print("MKL not present - ignoring --mklthreads")
elif o == "--nprocs":
optiondict["nprocs"] = int(a)
linkchar = "="
if optiondict["nprocs"] < 0:
print("will use n_cpus - 1 processes for calculation")
else:
print("will use", optiondict["nprocs"], "processes for calculation")
elif o == "--savemotionglmfilt":
optiondict["savemotionfiltered"] = True
print("saveing motion filtered data")
elif o == "--nonumba":
optiondict["nonumba"] = True
print("disabling numba if present")
elif o == "--memprofile":
if memprofilerexists:
optiondict["memprofile"] = True
print("enabling memory profiling")
else:
print("cannot enable memory profiling - memory_profiler module not found")
elif o == "--noglm":
optiondict["doglmfilt"] = False
print("disabling GLM filter")
elif o == "-T":
optiondict["savecorrtimes"] = True
print("saving a table of correlation times used")
elif o == "-V":
theprefilter.settype("vlf")
print("prefiltering to vlf band")
elif o == "-L":
theprefilter.settype("lfo")
optiondict["filtertype"] = "lfo"
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], 0.5 / (theprefilter.getfreqs()[2])]
)
print("prefiltering to lfo band")
elif o == "-R":
theprefilter.settype("resp")
optiondict["filtertype"] = "resp"
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], 0.5 / (theprefilter.getfreqs()[2])]
)
print("prefiltering to respiratory band")
elif o == "-C":
theprefilter.settype("cardiac")
optiondict["filtertype"] = "cardiac"
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], 0.5 / (theprefilter.getfreqs()[2])]
)
print("prefiltering to cardiac band")
elif o == "-F":
arbvec = a.split(",")
if len(arbvec) != 2 and len(arbvec) != 4:
usage()
sys.exit()
if len(arbvec) == 2:
optiondict["arb_lower"] = float(arbvec[0])
optiondict["arb_upper"] = float(arbvec[1])
optiondict["arb_lowerstop"] = 0.9 * float(arbvec[0])
optiondict["arb_upperstop"] = 1.1 * float(arbvec[1])
if len(arbvec) == 4:
optiondict["arb_lower"] = float(arbvec[0])
optiondict["arb_upper"] = float(arbvec[1])
optiondict["arb_lowerstop"] = float(arbvec[2])
optiondict["arb_upperstop"] = float(arbvec[3])
theprefilter.settype("arb")
optiondict["filtertype"] = "arb"
theprefilter.setfreqs(
optiondict["arb_lowerstop"],
optiondict["arb_lower"],
optiondict["arb_upper"],
optiondict["arb_upperstop"],
)
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], 0.5 / (theprefilter.getfreqs()[2])]
)
print(
"prefiltering to ",
optiondict["arb_lower"],
optiondict["arb_upper"],
"(stops at ",
optiondict["arb_lowerstop"],
optiondict["arb_upperstop"],
")",
)
elif o == "--padseconds":
optiondict["padseconds"] = float(a)
print("Setting filter padding to", optiondict["padseconds"], "seconds")
elif o == "-d":
optiondict["displayplots"] = True
print("displaying all plots")
elif o == "-N":
optiondict["numestreps"] = int(a)
if optiondict["numestreps"] == 0:
optiondict["ampthreshfromsig"] = False
print("Will not estimate significance thresholds from null correlations")
else:
print(
"Will estimate p<0.05 significance threshold from ",
optiondict["numestreps"],
" null correlations",
)
elif o == "--accheck":
optiondict["check_autocorrelation"] = True
print("Will check for periodic components in the autocorrelation function")
elif o == "--despecklethresh":
if optiondict["despeckle_passes"] == 0:
optiondict["despeckle_passes"] = 1
optiondict["check_autocorrelation"] = True
optiondict["despeckle_thresh"] = float(a)
linkchar = "="
print("Forcing despeckle threshhold to ", optiondict["despeckle_thresh"])
elif o == "--despecklepasses":
optiondict["check_autocorrelation"] = True
optiondict["despeckle_passes"] = int(a)
if optiondict["despeckle_passes"] < 0:
print("minimum number of despeckle passes is 0")
sys.exit()
linkchar = "="
print("Will do ", optiondict["despeckle_passes"], " despeckling passes")
elif o == "--acfix":
optiondict["fix_autocorrelation"] = True
optiondict["check_autocorrelation"] = True
print("Will remove periodic components in the autocorrelation function (experimental)")
elif o == "--noprogressbar":
optiondict["showprogressbar"] = False
print("Will disable progress bars")
elif o == "-s":
optiondict["widthlimit"] = float(a)
print("Setting gaussian fit width limit to ", optiondict["widthlimit"], "Hz")
elif o == "-b":
optiondict["filtertype"] = "butterworth"
theprefilter.setbutterorder(optiondict["filtorder"])
print("Using butterworth bandlimit filter")
elif o == "-Z":
optiondict["fixeddelayvalue"] = float(a)
optiondict["fixdelay"] = True
optiondict["lagmin"] = optiondict["fixeddelayvalue"] - 10.0
optiondict["lagmax"] = optiondict["fixeddelayvalue"] + 10.0
print("Delay will be set to ", optiondict["fixeddelayvalue"], "in all voxels")
elif o == "--motionfile":
optiondict["motionfilename"] = a
print(
"Will regress derivatives and delayed derivatives of motion out of data prior to analysis"
)
elif o == "--motpos":
optiondict["mot_pos"] = not optiondict["mot_pos"]
print(optiondict["mot_pos"], "set to", optiondict["mot_pos"])
elif o == "--motderiv":
optiondict["mot_deriv"] = not optiondict["mot_deriv"]
print(optiondict["mot_deriv"], "set to", optiondict["mot_deriv"])
elif o == "--motdelayderiv":
optiondict["mot_delayderiv"] = not optiondict["mot_delayderiv"]
print(optiondict["mot_delayderiv"], "set to", optiondict["mot_delayderiv"])
elif o == "-f":
optiondict["gausssigma"] = float(a)
print(
"Will prefilter fMRI data with a gaussian kernel of ",
optiondict["gausssigma"],
" mm",
)
elif o == "--timerange":
limitvec = a.split(",")
optiondict["startpoint"] = int(limitvec[0])
optiondict["endpoint"] = int(limitvec[1])
if optiondict["endpoint"] == -1:
optiondict["endpoint"] = 100000000
linkchar = "="
print(
"Analysis will be performed only on data from point ",
optiondict["startpoint"],
" to ",
optiondict["endpoint"],
".",
)
elif o == "-r":
lagvec = a.split(",")
if not optiondict["fixdelay"]:
optiondict["lagmin"] = float(lagvec[0])
optiondict["lagmax"] = float(lagvec[1])
if optiondict["lagmin"] >= optiondict["lagmax"]:
print("lagmin must be less than lagmax - exiting")
sys.exit(1)
print(
"Correlations will be calculated over range ",
optiondict["lagmin"],
" to ",
optiondict["lagmax"],
)
elif o == "-y":
optiondict["interptype"] = a
if (
(optiondict["interptype"] != "cubic")
and (optiondict["interptype"] != "quadratic")
and (optiondict["interptype"] != "univariate")
):
print("unsupported interpolation type!")
sys.exit()
elif o == "-h":
optiondict["histlen"] = int(a)
print("Setting histogram length to ", optiondict["histlen"])
elif o == "-o":
optiondict["offsettime"] = float(a)
optiondict["offsettime_total"] = float(a)
print("Applying a timeshift of ", optiondict["offsettime"], " to regressor")
elif o == "--autosync":
optiondict["autosync"] = True
print(
"Will calculate and apply regressor synchronization from global correlation. Overrides offsettime."
)
elif o == "--datafreq":
realtr = 1.0 / float(a)
linkchar = "="
print("Data time step forced to ", realtr)
elif o == "--datatstep":
realtr = float(a)
linkchar = "="
print("Data time step forced to ", realtr)
elif o == "-t":
print(
"DEPRECATION WARNING: The -t option is obsolete and will be removed in a future version. Use --datatstep=TSTEP or --datafreq=FREQ instead"
)
realtr = float(a)
print("Data time step forced to ", realtr)
elif o == "-c":
optiondict["isgrayordinate"] = True
print("Input fMRI file is a converted CIFTI file")
elif o == "-O":
optiondict["oversampfactor"] = int(a)
if 0 <= optiondict["oversampfactor"] < 1:
print(
"oversampling factor must be an integer greater than or equal to 1 (or negative to set automatically)"
)
sys.exit()
print("oversampling factor set to ", optiondict["oversampfactor"])
elif o == "--psdfilter":
optiondict["psdfilter"] = True
print(
"Will use a cross-spectral density filter on shifted timecourses prior to refinement"
)
elif o == "--avg":
optiondict["refinetype"] = "unweighted_average"
print("Will use unweighted average to refine regressor rather than simple averaging")
elif o == "--weightedavg":
optiondict["refinetype"] = "weighted_average"
print("Will use weighted average to refine regressor rather than simple averaging")
elif o == "--ica":
optiondict["refinetype"] = "ica"
print("Will use ICA procedure to refine regressor rather than simple averaging")
elif o == "--dispersioncalc":
optiondict["dodispersioncalc"] = True
print("Will do dispersion calculation during regressor refinement")
elif o == "--nofitfilt":
optiondict["zerooutbadfit"] = False
optiondict["nohistzero"] = True
print("Correlation parameters will be recorded even if out of bounds")
elif o == "--pca":
optiondict["refinetype"] = "pca"
print("Will use PCA procedure to refine regressor rather than simple averaging")
elif o == "--numskip":
optiondict["preprocskip"] = int(a)
linkchar = "="
print("Setting preprocessing trs skipped to ", optiondict["preprocskip"])
elif o == "--venousrefine":
optiondict["lagmaskside"] = "upper"
optiondict["lagminthresh"] = 2.5
optiondict["lagmaxthresh"] = 6.0
optiondict["ampthresh"] = 0.5
print("Biasing refinement to voxels in draining vasculature")
elif o == "--nirs":
optiondict["nothresh"] = True
optiondict["corrmaskthreshpct"] = 0.0
optiondict["preservefiltering"] = True
optiondict["refineprenorm"] = "var"
optiondict["ampthresh"] = 0.7
optiondict["lagminthresh"] = 0.1
print("Setting NIRS mode")
elif o == "--nothresh":
optiondict["nothresh"] = True
optiondict["corrmaskthreshpct"] = 0.0
print("Disabling voxel threshhold")
elif o == "--regressor":
optiondict["regressorfile"] = a
optiondict["useglobalref"] = False
linkchar = "="
print("Will use regressor file", a)
elif o == "--regressorfreq":
optiondict["inputfreq"] = float(a)
linkchar = "="
print("Setting regressor sample frequency to ", float(a))
elif o == "--regressortstep":
optiondict["inputfreq"] = 1.0 / float(a)
linkchar = "="
print("Setting regressor sample time step to ", float(a))
elif o == "--regressorstart":
optiondict["inputstarttime"] = float(a)
linkchar = "="
print("Setting regressor start time to ", optiondict["inputstarttime"])
elif o == "--slicetimes":
optiondict["slicetimes"] = tide_io.readvecs(a)
linkchar = "="
print("Using slicetimes from file", a)
elif o == "--detrendorder":
optiondict["detrendorder"] = int(a)
print(
"Setting trend removal order to",
optiondict["detrendorder"],
"for regressor generation and correlation preparation",
)
elif o == "--refineupperlag":
optiondict["lagmaskside"] = "upper"
print(
"Will only use lags between ",
optiondict["lagminthresh"],
" and ",
optiondict["lagmaxthresh"],
" in refinement",
)
elif o == "--refinelowerlag":
optiondict["lagmaskside"] = "lower"
print(
"Will only use lags between ",
-optiondict["lagminthresh"],
" and ",
-optiondict["lagmaxthresh"],
" in refinement",
)
elif o == "--refineoffset":
optiondict["refineoffset"] = True
print("Will refine offset time during subsequent passes")
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
elif o == "--pickleft":
optiondict["pickleft"] = True
print("Will select the leftmost delay peak when setting refine offset")
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
elif o == "--pickleftthresh":
optiondict["pickleftthresh"] = float(a)
print(
"Threshhold value for leftmost peak height set to", optiondict["pickleftthresh"],
)
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
elif o == "--lagminthresh":
optiondict["lagminthresh"] = float(a)
print("Using lagminthresh of ", optiondict["lagminthresh"])
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
linkchar = "="
elif o == "--lagmaxthresh":
optiondict["lagmaxthresh"] = float(a)
print("Using lagmaxthresh of ", optiondict["lagmaxthresh"])
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
linkchar = "="
elif o == "--skipsighistfit":
optiondict["dosighistfit"] = False
print("will not fit significance histogram with a Johnson SB function")
elif o == "--searchfrac":
optiondict["searchfrac"] = float(a)
linkchar = "="
print(
"Points greater than",
optiondict["ampthresh"],
"* the peak height will be used to fit peak parameters",
)
elif o == "--ampthresh":
optiondict["ampthresh"] = float(a)
optiondict["ampthreshfromsig"] = False
if optiondict["ampthresh"] < 0.0:
print(
"Setting ampthresh to the", -100.0 * optiondict["ampthresh"], "th percentile",
)
else:
print("Using ampthresh of ", optiondict["ampthresh"])
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
linkchar = "="
elif o == "--sigmathresh":
optiondict["sigmathresh"] = float(a)
print("Using widththresh of ", optiondict["sigmathresh"])
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
linkchar = "="
elif o == "--globalmeaninclude":
optiondict["globalmeanincludename"], colspec = tide_io.parsefilespec(a)
if colspec is not None:
optiondict["globalmeanincludevals"] = tide_io.colspectolist(colspec)
linkchar = "="
if optiondict["globalmeanincludevals"] is not None:
print(
"Using voxels where",
optiondict["globalmeanincludename"],
" = ",
optiondict["globalmeanincludevals"],
" for inclusion in global mean",
)
else:
print(
"Using ",
optiondict["globalmeanincludename"],
" as include mask for global mean calculation",
)
elif o == "--globalmeanexclude":
optiondict["globalmeanexcludename"], colspec = tide_io.parsefilespec(a)
if colspec is not None:
optiondict["globalmeanexcludevals"] = tide_io.colspectolist(colspec)
linkchar = "="
if optiondict["globalmeanexcludevals"] is not None:
print(
"Using voxels where",
optiondict["globalmeanexcludename"],
" = ",
optiondict["globalmeanexcludevals"],
" for exclusion from global mean",
)
else:
print(
"Using ",
optiondict["globalmeanexcludename"],
" as exclude mask for global mean calculation",
)
elif o == "--refineinclude":
optiondict["refineincludename"], colspec = tide_io.parsefilespec(a)
if colspec is not None:
optiondict["refineincludevals"] = tide_io.colspectolist(colspec)
linkchar = "="
if optiondict["refineincludevals"] is not None:
print(
"Using voxels where",
optiondict["refineincludename"],
" = ",
optiondict["refineincludevals"],
" for inclusion in refine mask",
)
else:
print(
"Using ",
optiondict["refineincludename"],
" as include mask for probe regressor refinement",
)
elif o == "--refineexclude":
optiondict["refineexcludename"], colspec = tide_io.parsefilespec(a)
if colspec is not None:
optiondict["refineexcludevals"] = tide_io.colspectolist(colspec)
linkchar = "="
if optiondict["refineexcludevals"] is not None:
print(
"Using voxels where",
optiondict["refineexcludename"],
" = ",
optiondict["refineexcludevals"],
" for exclusion from refine mask",
)
else:
print(
"Using ",
optiondict["refineexcludename"],
" as exclude mask for probe regressor refinement",
)
elif o == "--corrmask":
(
optiondict["corrmaskincludename"],
optiondict["corrmaskincludevals"],
) = tide_io.processnamespec(a, "Using voxels where ", "in correlation calculations.")
elif o == "--refineprenorm":
optiondict["refineprenorm"] = a
if (
(optiondict["refineprenorm"] != "None")
and (optiondict["refineprenorm"] != "mean")
and (optiondict["refineprenorm"] != "var")
and (optiondict["refineprenorm"] != "std")
and (optiondict["refineprenorm"] != "invlag")
):
print("unsupported refinement prenormalization mode!")
sys.exit()
linkchar = "="
elif o == "--refineweighting":
optiondict["refineweighting"] = a
if (
(optiondict["refineweighting"] != "None")
and (optiondict["refineweighting"] != "NIRS")
and (optiondict["refineweighting"] != "R")
and (optiondict["refineweighting"] != "R2")
):
print("unsupported refinement weighting!")
sys.exit()
linkchar = "="
elif o == "--tmask":
optiondict["tmaskname"] = a
linkchar = "="
print("Will multiply regressor by timecourse in ", optiondict["tmaskname"])
elif o == "--refinepasses" or o == "--passes":
if o == "--refinepasses":
print(
"DEPRECATION WARNING - refinepasses is deprecated and will be removed in a future version - use passes instead"
)
optiondict["passes"] = int(a)
linkchar = "="
print("Will do ", optiondict["passes"], " processing passes")
elif o == "--peakfittype":
optiondict["peakfittype"] = a
linkchar = "="
print("Similarity function peak fitting method is ", optiondict["peakfittype"])
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
assert False, "unhandled option: " + o
formattedcmdline.append("\t" + o + linkchar + a + " \\")
formattedcmdline[len(formattedcmdline) - 1] = formattedcmdline[len(formattedcmdline) - 1][:-2]
# store the filter limits
(
optiondict["lowerpass"],
optiondict["upperpass"],
optiondict["lowerstop"],
optiondict["upperstop"],
) = theprefilter.getfreqs()
# write out the command used
tide_io.writevec(formattedcmdline, optiondict["outputname"] + "_formattedcommandline.txt")
tide_io.writevec([" ".join(sys.argv)], optiondict["outputname"] + "_commandline.txt")
optiondict["commandlineargs"] = sys.argv[1:]
# add additional information to option structure for debugging
optiondict["realtr"] = realtr
optiondict["dispersioncalc_lower"] = optiondict["lagmin"]
optiondict["dispersioncalc_upper"] = optiondict["lagmax"]
optiondict["dispersioncalc_step"] = np.max(
[
(optiondict["dispersioncalc_upper"] - optiondict["dispersioncalc_lower"]) / 25,
optiondict["dispersioncalc_step"],
]
)
return optiondict, theprefilter
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import getopt
import os
import sys
import numpy as np
import rapidtide.filter as tide_filt
import rapidtide.io as tide_io
import rapidtide.util as tide_util
global rt_floatset, rt_floattype
try:
import mkl
mklexists = True
except ImportError:
mklexists = False
try:
from memory_profiler import profile
memprofilerexists = True
except ImportError:
memprofilerexists = False
def usage():
print("usage: ", os.path.basename(sys.argv[0]), " datafilename outputname ")
print(
" ".join(
[
"[-r LAGMIN,LAGMAX]",
"[-s SIGMALIMIT]",
"[-a]",
"[--nowindow]",
"[--phat]",
"[--liang]",
"[--eckart]",
"[-f GAUSSSIGMA]",
"[-O oversampfac]",
"[-t TSTEP]",
"[--datatstep=TSTEP]",
"[--datafreq=FREQ]",
"[-d]",
"[-b]",
"[-V]",
"[-L]",
"[-R]",
"[-C]",
"[-F LOWERFREQ,UPPERFREQ[,LOWERSTOP,UPPERSTOP]]",
"[-o OFFSETTIME]",
"[--autosync]",
"[-T]",
"[-p]",
"[-P]",
"[-B]",
"[-h HISTLEN]",
"[-i INTERPTYPE]",
"[-I]",
"[-Z DELAYTIME]",
"[--nofitfilt]",
"[--searchfrac=SEARCHFRAC]",
"[-N NREPS]",
"[--motionfile=MOTFILE]",
"[--pickleft]",
"[--numskip=SKIP]",
"[--refineweighting=TYPE]",
"[--refineprenorm=TYPE]",
"[--passes=PASSES]",
"[--refinepasses=PASSES]",
"[--excluderefine=MASK]",
"[--includerefine=MASK]",
"[--includemean=MASK]",
"[--excludemean=MASK]" "[--lagminthresh=MIN]",
"[--lagmaxthresh=MAX]",
"[--ampthresh=AMP]",
"[--sigmathresh=SIGMA]",
"[--corrmask=MASK]",
"[--corrmaskthresh=PCT]",
"[--refineoffset]",
"[--pca]",
"[--ica]",
"[--weightedavg]",
"[--avg]",
"[--psdfilter]",
"[--noprogressbar]",
"[--despecklethresh=VAL]",
"[--despecklepasses=PASSES]",
"[--dispersioncalc]",
"[--refineupperlag]",
"[--refinelowerlag]",
"[--nosharedmem]",
"[--tmask=MASKFILE]",
"[--limitoutput]",
"[--motionfile=FILENAME[:COLSPEC]",
"[--softlimit]",
"[--timerange=START,END]",
"[--skipsighistfit]",
"[--accheck]",
"[--acfix]" "[--numskip=SKIP]",
"[--slicetimes=FILE]",
"[--glmsourcefile=FILE]",
"[--regressorfreq=FREQ]",
"[--regressortstep=TSTEP]" "[--regressor=FILENAME]",
"[--regressorstart=STARTTIME]",
"[--usesp]",
"[--peakfittype=FITTYPE]",
"[--mklthreads=NTHREADS]",
"[--nprocs=NPROCS]",
"[--nirs]",
"[--venousrefine]",
]
)
)
print("")
print("Required arguments:")
print(" datafilename - The input data file (BOLD fmri file or NIRS)")
print(" outputname - The root name for the output files")
print("")
print("Optional arguments:")
print(
" Arguments are processed in order of appearance. Later options can override ones earlier on"
)
print(" the command line")
print("")
print("Macros:")
print(
" --venousrefine - This is a macro that sets --lagminthresh=2.5, --lagmaxthresh=6.0,"
)
print(
" --ampthresh=0.5, and --refineupperlag to bias refinement towards "
)
print(
" voxels in the draining vasculature for an fMRI scan."
)
print(
" --nirs - This is a NIRS analysis - this is a macro that sets --nothresh,"
)
print(
" --preservefiltering, --refinenorm=var, --ampthresh=0.7, "
)
print(" and --lagminthresh=0.1.")
print("")
print("Preprocessing options:")
print(
" -t TSTEP, - Set the timestep of the data file to TSTEP (or 1/FREQ)"
)
print(" --datatstep=TSTEP, This will override the TR in an fMRI file.")
print(
" --datafreq=FREQ NOTE: if using data from a text file, for example with"
)
print(
" NIRS data, using one of these options is mandatory."
)
print(" -a - Disable antialiasing filter")
print(
" --detrendorder=ORDER - Set order of trend removal (0 to disable, default is 1 - linear)"
)
print(
" -I - Invert the sign of the regressor before processing"
)
print(
" -i - Use specified interpolation type (options are 'cubic',"
)
print(" 'quadratic', and 'univariate (default)')")
print(" -o - Apply an offset OFFSETTIME to the lag regressors")
print(
" --autosync - Calculate and apply offset time of an external regressor from "
)
print(
" the global crosscorrelation. Overrides offsettime if specified."
)
print(
" -b - Use butterworth filter for band splitting instead of"
)
print(" trapezoidal FFT filter")
print(" -F LOWERFREQ,UPPERFREQ[,LOWERSTOP,UPPERSTOP]")
print(
" - Filter data and regressors from LOWERFREQ to UPPERFREQ."
)
print(
" LOWERSTOP and UPPERSTOP can be specified, or will be"
)
print(" calculated automatically")
print(" -V - Filter data and regressors to VLF band")
print(" -L - Filter data and regressors to LFO band")
print(" -R - Filter data and regressors to respiratory band")
print(" -C - Filter data and regressors to cardiac band")
print(
" --padseconds=SECONDS - Set the filter pad time to SECONDS seconds. Default"
)
print(" is 30.0")
print(
" -N NREPS - Estimate significance threshold by running NREPS null "
)
print(
" correlations (default is 10000, set to 0 to disable). If you are"
)
print(
" running multiple passes, 'ampthresh' will be set to the 0.05 significance."
)
print(
" level unless it is manually specified (see below)."
)
print(
" --permutationmethod=METHOD - Method for permuting the regressor for significance estimation. Default"
)
print(" is shuffle")
print(
" --skipsighistfit - Do not fit significance histogram with a Johnson SB function"
)
print(
" --windowfunc=FUNC - Use FUNC window funcion prior to correlation. Options are"
)
print(" hamming (default), hann, blackmanharris, and None")
print(" --nowindow - Disable precorrelation windowing")
print(
" -f GAUSSSIGMA - Spatially filter fMRI data prior to analysis using "
)
print(" GAUSSSIGMA in mm")
print(
" -M - Generate a global mean regressor and use that as the "
)
print(" reference regressor")
print(" --globalmeaninclude=MASK[:VALSPEC]")
print(
" - Only use voxels in NAME for global regressor generation (if VALSPEC is"
)
print(
" given, only voxels with integral values listed in VALSPEC are used.)"
)
print(" --globalmeanexclude=MASK[:VALSPEC]")
print(
" - Do not use voxels in NAME for global regressor generation (if VALSPEC is"
)
print(
" given, only voxels with integral values listed in VALSPEC are used.)"
)
print(
" -m - Mean scale regressors during global mean estimation"
)
print(
" --slicetimes=FILE - Apply offset times from FILE to each slice in the dataset"
)
print(
" --numskip=SKIP - SKIP tr's were previously deleted during preprocessing (e.g. if you "
)
print(
" have done your preprocessing in FSL and set dummypoints to a "
)
print(" nonzero value.) Default is 0.")
print(" --timerange=START,END - Limit analysis to data between timepoints START ")
print(" and END in the fmri file. If END is set to -1, ")
print(
" analysis will go to the last timepoint. Negative values "
)
print(
" of START will be set to 0. Default is to use all timepoints."
)
print(
" --nothresh - Disable voxel intensity threshold (especially useful"
)
print(" for NIRS data)")
print(
" --motionfile=MOTFILE[:COLSPEC] - Read 6 columns of motion regressors out of MOTFILE text file."
)
print(
" (with timepoints rows) and regress their derivatives"
)
print(
" and delayed derivatives out of the data prior to analysis."
)
print(
" If COLSPEC is present, use the comma separated list of ranges to"
)
print(
" specify X, Y, Z, RotX, RotY, and RotZ, in that order. For"
)
print(
" example, :3-5,7,0,9 would use columns 3, 4, 5, 7, 0 and 9"
)
print(" for X, Y, Z, RotX, RotY, RotZ, respectively")
print(
" --motpos - Toggle whether displacement regressors will be used in motion regression."
)
print(" Default is False.")
print(
" --motderiv - Toggle whether derivatives will be used in motion regression."
)
print(" Default is True.")
print(
" --motdelayderiv - Toggle whether delayed derivative regressors will be used in motion regression."
)
print(" Default is False.")
print("")
print("Correlation options:")
print(
" -O OVERSAMPFAC - Oversample the fMRI data by the following integral "
)
print(
" factor. Setting to -1 chooses the factor automatically (default)"
)
print(" --regressor=FILENAME - Read probe regressor from file FILENAME (if none ")
print(" specified, generate and use global regressor)")
print(
" --regressorfreq=FREQ - Probe regressor in file has sample frequency FREQ "
)
print(
" (default is 1/tr) NB: --regressorfreq and --regressortstep"
)
print(" are two ways to specify the same thing")
print(
" --regressortstep=TSTEP - Probe regressor in file has sample time step TSTEP "
)
print(
" (default is tr) NB: --regressorfreq and --regressortstep"
)
print(" are two ways to specify the same thing")
print(
" --regressorstart=START - The time delay in seconds into the regressor file, corresponding"
)
print(" in the first TR of the fmri file (default is 0.0)")
print(
" --phat - Use generalized cross-correlation with phase alignment "
)
print(" transform (PHAT) instead of correlation")
print(
" --liang - Use generalized cross-correlation with Liang weighting function"
)
print(" (Liang, et al, doi:10.1109/IMCCC.2015.283)")
print(
" --eckart - Use generalized cross-correlation with Eckart weighting function"
)
print(
" --corrmaskthresh=PCT - Do correlations in voxels where the mean exceeeds this "
)
print(" percentage of the robust max (default is 1.0)")
print(
" --corrmask=MASK - Only do correlations in voxels in MASK (if set, corrmaskthresh"
)
print(" is ignored).")
print(
" --accheck - Check for periodic components that corrupt the autocorrelation"
)
print("")
print("Correlation fitting options:")
print(
" -Z DELAYTIME - Don't fit the delay time - set it to DELAYTIME seconds "
)
print(" for all voxels")
print(
" -r LAGMIN,LAGMAX - Limit fit to a range of lags from LAGMIN to LAGMAX"
)
print(
" -s SIGMALIMIT - Reject lag fits with linewidth wider than SIGMALIMIT"
)
print(
" -B - Bipolar mode - match peak correlation ignoring sign"
)
print(" --nofitfilt - Do not zero out peak fit values if fit fails")
print(
" --searchfrac=FRAC - When peak fitting, include points with amplitude > FRAC * the"
)
print(" maximum amplitude.")
print(" (default value is 0.5)")
print(
" --peakfittype=FITTYPE - Method for fitting the peak of the similarity function"
)
print(
" (default is 'gauss'). 'quad' uses a quadratic fit. Other options are "
)
print(
" 'fastgauss' which is faster but not as well tested, and 'None'."
)
print(
" --despecklepasses=PASSES - detect and refit suspect correlations to disambiguate peak"
)
print(" locations in PASSES passes")
print(
" --despecklethresh=VAL - refit correlation if median discontinuity magnitude exceeds"
)
print(" VAL (default is 5s)")
print(
" --softlimit - Allow peaks outside of range if the maximum correlation is"
)
print(" at an edge of the range.")
print("")
print("Regressor refinement options:")
print(
" --refineprenorm=TYPE - Apply TYPE prenormalization to each timecourse prior "
)
print(" to refinement (valid weightings are 'None', ")
print(" 'mean' (default), 'var', and 'std'")
print(" --refineweighting=TYPE - Apply TYPE weighting to each timecourse prior ")
print(" to refinement (valid weightings are 'None', ")
print(" 'R', 'R2' (default)")
print(" --passes=PASSES, - Set the number of processing passes to PASSES ")
print(" --refinepasses=PASSES (default is 1 pass - no refinement).")
print(
" NB: refinepasses is the wrong name for this option -"
)
print(
" --refinepasses is deprecated, use --passes from now on."
)
print(
" --refineinclude=MASK[:VALSPEC] - Only use nonzero voxels in MASK for regressor refinement (if VALSPEC is"
)
print(
" given, only voxels with integral values listed in VALSPEC are used.)"
)
print(
" --refineexclude=MASK[:VALSPEC] - Do not use nonzero voxels in MASK for regressor refinement (if VALSPEC is"
)
print(
" given, only voxels with integral values listed in VALSPEC are used.)"
)
print(" --lagminthresh=MIN - For refinement, exclude voxels with delays less ")
print(" than MIN (default is 0.5s)")
print(
" --lagmaxthresh=MAX - For refinement, exclude voxels with delays greater "
)
print(" than MAX (default is 5s)")
print(" --ampthresh=AMP - For refinement, exclude voxels with correlation ")
print(
" coefficients less than AMP (default is 0.3). NOTE: ampthresh will"
)
print(
" automatically be set to the p<0.05 significance level determined by"
)
print(
" the -N option if -N is set greater than 0 and this is not "
)
print(" manually specified.")
print(
" --sigmathresh=SIGMA - For refinement, exclude voxels with widths greater "
)
print(" than SIGMA (default is 100s)")
print(
" --refineoffset - Adjust offset time during refinement to bring peak "
)
print(" delay to zero")
print(
" --pickleft - When setting refineoffset, always select the leftmost histogram peak"
)
print(
" --pickleftthresh=THRESH - Set the threshold value (fraction of maximum) to decide something is a "
)
print(" peak in a histogram. Default is 0.33.")
print(" --refineupperlag - Only use positive lags for regressor refinement")
print(" --refinelowerlag - Only use negative lags for regressor refinement")
print(" --pca - Use pca to derive refined regressor (default is ")
print(" unweighted averaging)")
print(" --ica - Use ica to derive refined regressor (default is ")
print(" unweighted averaging)")
print(" --weightedavg - Use weighted average to derive refined regressor ")
print(" (default is unweighted averaging)")
print(
" --avg - Use unweighted average to derive refined regressor "
)
print(" (default)")
print(" --psdfilter - Apply a PSD weighted Wiener filter to shifted")
print(" timecourses prior to refinement")
print("")
print("Output options:")
print(
" --limitoutput - Don't save some of the large and rarely used files"
)
print(" -T - Save a table of lagtimes used")
print(
" -h HISTLEN - Change the histogram length to HISTLEN (default is"
)
print(" 100)")
print(
" --glmsourcefile=FILE - Regress delayed regressors out of FILE instead of the "
)
print(" initial fmri file used to estimate delays")
print(
" --noglm - Turn off GLM filtering to remove delayed regressor "
)
print(" from each voxel (disables output of fitNorm)")
print(" --preservefiltering - don't reread data prior to GLM")
print("")
print("Miscellaneous options:")
print(
" --noprogressbar - Disable progress bars - useful if saving output to files"
)
print(
" --wiener - Perform Wiener deconvolution to get voxel transfer functions"
)
print(
" --usesp - Use single precision for internal calculations (may"
)
print(" be useful when RAM is limited)")
print(" -c - Data file is a converted CIFTI")
print(" -S - Simulate a run - just report command line options")
print(" -d - Display plots of interesting timecourses")
print(" --nonumba - Disable jit compilation with numba")
print(
" --nosharedmem - Disable use of shared memory for large array storage"
)
print(" --memprofile - Enable memory profiling for debugging - warning:")
print(" this slows things down a lot.")
print(
" --multiproc - Enable multiprocessing versions of key subroutines. This"
)
print(
" speeds things up dramatically. Almost certainly will NOT"
)
print(
" work on Windows (due to different forking behavior)."
)
print(
" --mklthreads=NTHREADS - Use no more than NTHREADS worker threads in accelerated numpy calls."
)
print(
" --nprocs=NPROCS - Use NPROCS worker processes for multiprocessing. Setting NPROCS"
)
print(
" less than 1 sets the number of worker processes to"
)
print(
" n_cpus - 1 (default). Setting NPROCS enables --multiproc."
)
print(" --debug - Enable additional information output")
print(
" become the default, but for now I'm just trying it out."
)
print("")
print("Experimental options (not fully tested, may not work):")
print(
" --cleanrefined - perform additional processing on refined regressor to remove spurious"
)
print(" components.")
print(
" --dispersioncalc - Generate extra data during refinement to allow calculation of"
)
print(" dispersion.")
print(
" --acfix - Perform a secondary correlation to disambiguate peak location"
)
print(" (enables --accheck). Experimental.")
print(" --tmask=MASKFILE - Only correlate during epochs specified in ")
print(
" MASKFILE (NB: if file has one colum, the length needs to match"
)
print(
" the number of TRs used. TRs with nonzero values will be used"
)
print(
" in analysis. If there are 2 or more columns, each line of MASKFILE"
)
print(
" contains the time (first column) and duration (second column) of an"
)
print(" epoch to include.)")
return ()
def process_args(inputargs=None):
nargs = len(sys.argv)
if nargs < 3:
usage()
exit()
# set default variable values
optiondict = {}
# file i/o file options
optiondict["isgrayordinate"] = False
optiondict["textio"] = False
# preprocessing options
optiondict["gausssigma"] = 0.0 # the width of the spatial filter kernel in mm
optiondict[
"antialias"
] = True # apply an antialiasing filter to any regressors prior to filtering
optiondict["invertregressor"] = False # invert the initial regressor during startup
optiondict["slicetimes"] = None # do not apply any slice order correction by default
optiondict["startpoint"] = -1 # by default, analyze the entire length of the dataset
optiondict["endpoint"] = 10000000 # by default, analyze the entire length of the dataset
optiondict["preprocskip"] = 0 # number of trs skipped in preprocessing
optiondict["globalsignalmethod"] = "sum"
optiondict["globalpcacomponents"] = 0.8
optiondict["globalmaskmethod"] = "mean"
optiondict["globalmeanexcludename"] = None
optiondict["globalmeanexcludevals"] = None # list of integer values to use in the mask
optiondict["globalmeanincludename"] = None
optiondict["globalmeanincludevals"] = None # list of integer values to use in the mask
# correlation options
optiondict["similaritymetric"] = "correlation"
optiondict["smoothingtime"] = 3.0
optiondict["madnormMI"] = False
optiondict["dodemean"] = True # remove the mean from signals prior to correlation
optiondict["detrendorder"] = 1 # remove linear trends prior to correlation
optiondict["windowfunc"] = "hamming" # the particular window function to use for correlation
optiondict["zeropadding"] = 0 # by default, padding is 0 (circular correlation)
optiondict[
"corrweighting"
] = "None" # use a standard unweighted crosscorrelation for calculate time delays
optiondict["tmaskname"] = None # file name for tmask regressor
optiondict[
"corrmaskthreshpct"
] = 1.0 # percentage of robust maximum of mean to mask correlations
optiondict["corrmaskexcludename"] = None
optiondict["corrmaskexcludevals"] = None # list of integer values to use in the mask
optiondict["corrmaskincludename"] = None
optiondict["corrmaskincludevals"] = None # list of integer values to use in the mask
optiondict[
"check_autocorrelation"
] = True # check for periodic components that corrupt the autocorrelation
optiondict[
"fix_autocorrelation"
] = False # remove periodic components that corrupt the autocorrelation
optiondict[
"despeckle_thresh"
] = 5.0 # threshold value - despeckle if median discontinuity magnitude exceeds despeckle_thresh
optiondict["despeckle_passes"] = 0 # despeckling passes to perform
optiondict["nothresh"] = False # disable voxel intensity threshholding
# correlation fitting options
optiondict[
"hardlimit"
] = True # Peak value must be within specified range. If false, allow max outside if maximum
# correlation value is that one end of the range.
optiondict["bipolar"] = False # find peak with highest magnitude, regardless of sign
optiondict["lthreshval"] = 0.0 # zero out peaks with correlations lower than this value
optiondict["uthreshval"] = 1.0 # zero out peaks with correlations higher than this value
optiondict[
"edgebufferfrac"
] = 0.0 # what fraction of the correlation window to avoid on either end when fitting
optiondict["enforcethresh"] = True # only do fits in voxels that exceed threshhold
optiondict["zerooutbadfit"] = True # if true zero out all fit parameters if the fit fails
optiondict[
"searchfrac"
] = 0.5 # The fraction of the main peak over which points are included in the peak
optiondict[
"lagmod"
] = 1000.0 # if set to the location of the first autocorrelation sidelobe, this should
optiondict[
"peakfittype"
] = "gauss" # if set to 'gauss', use old gaussian fitting, if set to 'quad' use parabolic
optiondict["acwidth"] = 0.0 # width of the reference autocorrelation function
optiondict["absmaxsigma"] = 100.0 # width of the reference autocorrelation function
optiondict["absminsigma"] = 0.25 # width of the reference autocorrelation function
# move delay peaks back to the correct position if they hit a sidelobe
# postprocessing options
optiondict[
"doglmfilt"
] = True # use a glm filter to remove the delayed regressor from the data in each voxel
optiondict["preservefiltering"] = False
optiondict[
"glmsourcefile"
] = None # name of the file from which to regress delayed regressors (if not the original data)
optiondict["dodeconv"] = False # do Wiener deconvolution to find voxel transfer function
optiondict["motionfilename"] = None # by default do no motion regression
optiondict["mot_pos"] = False # do not do position
optiondict["mot_deriv"] = True # do use derivative
optiondict["mot_delayderiv"] = False # do not do delayed derivative
optiondict["savemotionfiltered"] = False # save motion filtered file for debugging
# filter options
optiondict["filtorder"] = 6
optiondict[
"padseconds"
] = 30.0 # the number of seconds of padding to add to each end of a filtered timecourse
optiondict["filtertype"] = "trapezoidal"
optiondict["respdelete"] = False
optiondict["lowerstop"] = None
optiondict["lowerpass"] = None
optiondict["upperpass"] = None
optiondict["upperstop"] = None
# output options
optiondict["savelagregressors"] = True
optiondict["savemovingsignal"] = True
optiondict["saveglmfiltered"] = True
optiondict["savecorrtimes"] = False
optiondict["saveintermediatemaps"] = False
optiondict["bidsoutput"] = False
optiondict["interptype"] = "univariate"
optiondict["useglobalref"] = False
optiondict["fixdelay"] = False
optiondict["fixeddelayvalue"] = 0.0
# significance estimation options
optiondict[
"numestreps"
] = 10000 # the number of sham correlations to perform to estimate significance
optiondict["permutationmethod"] = "shuffle"
optiondict[
"nohistzero"
] = False # if False, there is a spike at R=0 in the significance histogram
optiondict["ampthreshfromsig"] = True
optiondict["sighistlen"] = 1000
optiondict["dosighistfit"] = True
optiondict["histlen"] = 250
optiondict["oversampfactor"] = -1
optiondict["lagmin"] = -30.0
optiondict["lagmax"] = 30.0
optiondict["widthlimit"] = 100.0
optiondict["offsettime"] = 0.0
optiondict["autosync"] = False
optiondict["offsettime_total"] = 0.0
# refinement options
optiondict["cleanrefined"] = False
optiondict["lagmaskside"] = "both"
optiondict["refineweighting"] = "R2"
optiondict["refineprenorm"] = "mean"
optiondict["sigmathresh"] = 100.0
optiondict["lagminthresh"] = 0.5
optiondict["lagmaxthresh"] = 5.0
optiondict["ampthresh"] = 0.3
optiondict["passes"] = 1
optiondict["refineoffset"] = False
optiondict["pickleft"] = False
optiondict["pickleftthresh"] = 0.33
optiondict["refineexcludename"] = None
optiondict["refineexcludevals"] = None # list of integer values to use in the mask
optiondict["refineincludename"] = None
optiondict["refineincludevals"] = None # list of integer values to use in the mask
optiondict["corrmaskvallist"] = None
optiondict["refinetype"] = "unweighted_average"
optiondict["convergencethresh"] = None
optiondict["maxpasses"] = 10
optiondict["pcacomponents"] = 0.8
optiondict["filterbeforePCA"] = True
optiondict["fmrifreq"] = 0.0
optiondict["dodispersioncalc"] = False
optiondict["dispersioncalc_lower"] = -4.0
optiondict["dispersioncalc_upper"] = 4.0
optiondict["dispersioncalc_step"] = 0.50
optiondict["psdfilter"] = False
# debugging options
optiondict["singleproc_getNullDist"] = False
optiondict["singleproc_calcsimilarity"] = False
optiondict["singleproc_peakeval"] = False
optiondict["singleproc_fitcorr"] = False
optiondict["singleproc_glm"] = False
optiondict["internalprecision"] = "double"
optiondict["outputprecision"] = "single"
optiondict["nonumba"] = False
optiondict["memprofile"] = False
optiondict["sharedmem"] = True
optiondict["fakerun"] = False
optiondict["displayplots"] = False
optiondict["debug"] = False
optiondict["verbose"] = False
(
optiondict["release_version"],
optiondict["git_longtag"],
optiondict["git_date"],
optiondict["git_isdirty"],
) = tide_util.version()
optiondict["python_version"] = str(sys.version_info)
optiondict["nprocs"] = 1
optiondict["mklthreads"] = 1
optiondict["mp_chunksize"] = 50000
optiondict["showprogressbar"] = True
optiondict["savecorrmask"] = True
optiondict["savedespecklemasks"] = True
optiondict["checkpoint"] = False # save checkpoint information for tracking program state
optiondict["alwaysmultiproc"] = False
optiondict["calccoherence"] = False
# experimental options
optiondict["echocancel"] = False
optiondict["negativegradient"] = False
optiondict["negativegradregressor"] = False
# package options
optiondict["memprofilerexists"] = memprofilerexists
realtr = 0.0
theprefilter = tide_filt.NoncausalFilter()
theprefilter.setbutterorder(optiondict["filtorder"])
# start the clock!
tide_util.checkimports(optiondict)
# get the command line parameters
optiondict["regressorfile"] = None
optiondict["inputfreq"] = None
optiondict["inputstarttime"] = None
if len(sys.argv) < 3:
usage()
sys.exit()
# handle required args first
optiondict["in_file"] = sys.argv[1]
optiondict["outputname"] = sys.argv[2]
optparsestart = 3
# now scan for optional arguments
try:
opts, args = getopt.getopt(
sys.argv[optparsestart:],
"abcdf:gh:i:mo:s:r:t:vBCF:ILMN:O:RSTVZ:",
[
"help",
"nowindow",
"windowfunc=",
"datatstep=",
"datafreq=",
"lagminthresh=",
"lagmaxthresh=",
"ampthresh=",
"skipsighistfit",
"sigmathresh=",
"refineweighting=",
"refineprenorm=",
"corrmaskthresh=",
"despecklepasses=",
"despecklethresh=",
"autosync",
"accheck",
"acfix",
"padseconds",
"noprogressbar",
"refinepasses=",
"passes=",
"corrmask=",
"motionfile=",
"motpos",
"motderiv",
"motdelayderiv",
"globalmeaninclude=",
"globalmeanexclude=",
"refineinclude=",
"refineexclude=",
"refineoffset",
"pickleft",
"pickleftthresh=",
"nofitfilt",
"cleanrefined",
"pca",
"ica",
"weightedavg",
"avg",
"psdfilter",
"dispersioncalc",
"noglm",
"nosharedmem",
"multiproc",
"mklthreads=",
"permutationmethod=",
"nprocs=",
"debug",
"nonumba",
"savemotionglmfilt",
"tmask=",
"detrendorder=",
"slicetimes=",
"glmsourcefile=",
"preservefiltering",
"globalmaskmethod=",
"numskip=",
"nirs",
"venousrefine",
"nothresh",
"searchfrac=",
"limitoutput",
"softlimit",
"regressor=",
"regressorfreq=",
"regressortstep=",
"regressorstart=",
"timerange=",
"refineupperlag",
"refinelowerlag",
"memprofile",
"usesp",
"liang",
"eckart",
"phat",
"wiener",
"weiner",
"respdelete",
"checkpoint",
"peakfittype=",
],
)
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like 'option -a not recognized'
usage()
sys.exit(2)
formattedcmdline = [sys.argv[0] + " \\"]
for thearg in range(1, optparsestart):
formattedcmdline.append("\t" + sys.argv[thearg] + " \\")
for o, a in opts:
linkchar = " "
if o == "--nowindow":
optiondict["windowfunc"] = "None"
print("disable precorrelation windowing")
elif o == "--checkpoint":
optiondict["checkpoint"] = True
print("Enabled run checkpoints")
elif o == "--permutationmethod":
themethod = a
if (themethod != "shuffle") and (themethod != "phaserandom"):
print("illegal permutation method", themethod)
sys.exit()
optiondict["permutationmethod"] = themethod
linkchar = "="
print(
"Will use",
optiondict["permutationmethod"],
"as the permutation method for calculating null correlation threshold",
)
elif o == "--windowfunc":
thewindow = a
if (
(thewindow != "hamming")
and (thewindow != "hann")
and (thewindow != "blackmanharris")
and (thewindow != "None")
):
print("illegal window function", thewindow)
sys.exit()
optiondict["windowfunc"] = thewindow
linkchar = "="
print(
"Will use", optiondict["windowfunc"], "as the window function for correlation",
)
elif o == "-v":
optiondict["verbose"] = True
print("Turned on verbose mode")
elif o == "--liang":
optiondict["corrweighting"] = "liang"
print("Enabled Liang weighted crosscorrelation")
elif o == "--eckart":
optiondict["corrweighting"] = "eckart"
print("Enabled Eckart weighted crosscorrelation")
elif o == "--phat":
optiondict["corrweighting"] = "phat"
print("Enabled GCC-PHAT fitting")
elif o == "--weiner":
print("It's spelled wiener, not weiner")
print("The filter is named after <NAME>, an MIT mathematician.")
print("The name probably indicates that his family came from Vienna.")
print("Spell it right and try again. I mean, I know what you meant, and could just")
print("call the routine you wanted anyway, but then how would you learn?")
sys.exit()
elif o == "--cleanrefined":
optiondict["cleanrefined"] = True
print("Will attempt to clean refined regressor")
elif o == "--respdelete":
optiondict["respdelete"] = True
print("Will attempt to track and delete respiratory waveforms in the passband")
elif o == "--wiener":
optiondict["dodeconv"] = True
print("Will perform Wiener deconvolution")
elif o == "--usesp":
optiondict["internalprecision"] = "single"
print("Will use single precision for internal calculations")
elif o == "--preservefiltering":
optiondict["preservefiltering"] = True
print("Will not reread input file prior to GLM")
elif o == "--glmsourcefile":
optiondict["glmsourcefile"] = a
linkchar = "="
print("Will regress delayed regressors out of", optiondict["glmsourcefile"])
elif o == "--corrmaskthresh":
optiondict["corrmaskthreshpct"] = float(a)
linkchar = "="
print(
"Will perform correlations in voxels where mean exceeds",
optiondict["corrmaskthreshpct"],
"% of robust maximum",
)
elif o == "-I":
optiondict["invertregressor"] = True
print("Invert the regressor prior to running")
elif o == "-B":
optiondict["bipolar"] = True
print("Enabled bipolar correlation fitting")
elif o == "-S":
optiondict["fakerun"] = True
print("report command line options and quit")
elif o == "-a":
optiondict["antialias"] = False
print("antialiasing disabled")
elif o == "-M":
optiondict["useglobalref"] = True
print("using global mean timecourse as the reference regressor")
elif o == "--globalmaskmethod":
optiondict["globalmaskmethod"] = a
if optiondict["globalmaskmethod"] == "mean":
print("will use mean value to mask voxels prior to generating global mean")
elif optiondict["globalmaskmethod"] == "variance":
print(
"will use timecourse variance to mask voxels prior to generating global mean"
)
else:
print(
optiondict["globalmaskmethod"],
"is not a valid masking method. Valid methods are 'mean' and 'variance'",
)
sys.exit()
elif o == "-m":
optiondict["globalsignalmethod"] = "meanscale"
print("mean scale voxels prior to generating global mean")
elif o == "--limitoutput":
optiondict["savelagregressors"] = False
optiondict["savemovingsignal"] = False
print("disabling output of lagregressors and some ancillary GLM timecourses")
elif o == "--debug":
optiondict["debug"] = True
theprefilter.setdebug(optiondict["debug"])
print("enabling additional data output for debugging")
elif o == "--multiproc":
optiondict["nprocs"] = -1
print("enabling multiprocessing")
elif o == "--softlimit":
optiondict["hardlimit"] = False
linkchar = "="
print("will relax peak lag constraint for maximum correlations at edge of range")
elif o == "--nosharedmem":
optiondict["sharedmem"] = False
linkchar = "="
print("will not use shared memory for large array storage")
elif o == "--mklthreads":
optiondict["mklthreads"] = int(a)
linkchar = "="
if mklexists:
print(
"will use",
optiondict["mklthreads"],
"MKL threads for accelerated numpy processing.",
)
else:
print("MKL not present - ignoring --mklthreads")
elif o == "--nprocs":
optiondict["nprocs"] = int(a)
linkchar = "="
if optiondict["nprocs"] < 0:
print("will use n_cpus - 1 processes for calculation")
else:
print("will use", optiondict["nprocs"], "processes for calculation")
elif o == "--savemotionglmfilt":
optiondict["savemotionfiltered"] = True
print("saveing motion filtered data")
elif o == "--nonumba":
optiondict["nonumba"] = True
print("disabling numba if present")
elif o == "--memprofile":
if memprofilerexists:
optiondict["memprofile"] = True
print("enabling memory profiling")
else:
print("cannot enable memory profiling - memory_profiler module not found")
elif o == "--noglm":
optiondict["doglmfilt"] = False
print("disabling GLM filter")
elif o == "-T":
optiondict["savecorrtimes"] = True
print("saving a table of correlation times used")
elif o == "-V":
theprefilter.settype("vlf")
print("prefiltering to vlf band")
elif o == "-L":
theprefilter.settype("lfo")
optiondict["filtertype"] = "lfo"
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], 0.5 / (theprefilter.getfreqs()[2])]
)
print("prefiltering to lfo band")
elif o == "-R":
theprefilter.settype("resp")
optiondict["filtertype"] = "resp"
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], 0.5 / (theprefilter.getfreqs()[2])]
)
print("prefiltering to respiratory band")
elif o == "-C":
theprefilter.settype("cardiac")
optiondict["filtertype"] = "cardiac"
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], 0.5 / (theprefilter.getfreqs()[2])]
)
print("prefiltering to cardiac band")
elif o == "-F":
arbvec = a.split(",")
if len(arbvec) != 2 and len(arbvec) != 4:
usage()
sys.exit()
if len(arbvec) == 2:
optiondict["arb_lower"] = float(arbvec[0])
optiondict["arb_upper"] = float(arbvec[1])
optiondict["arb_lowerstop"] = 0.9 * float(arbvec[0])
optiondict["arb_upperstop"] = 1.1 * float(arbvec[1])
if len(arbvec) == 4:
optiondict["arb_lower"] = float(arbvec[0])
optiondict["arb_upper"] = float(arbvec[1])
optiondict["arb_lowerstop"] = float(arbvec[2])
optiondict["arb_upperstop"] = float(arbvec[3])
theprefilter.settype("arb")
optiondict["filtertype"] = "arb"
theprefilter.setfreqs(
optiondict["arb_lowerstop"],
optiondict["arb_lower"],
optiondict["arb_upper"],
optiondict["arb_upperstop"],
)
optiondict["despeckle_thresh"] = np.max(
[optiondict["despeckle_thresh"], 0.5 / (theprefilter.getfreqs()[2])]
)
print(
"prefiltering to ",
optiondict["arb_lower"],
optiondict["arb_upper"],
"(stops at ",
optiondict["arb_lowerstop"],
optiondict["arb_upperstop"],
")",
)
elif o == "--padseconds":
optiondict["padseconds"] = float(a)
print("Setting filter padding to", optiondict["padseconds"], "seconds")
elif o == "-d":
optiondict["displayplots"] = True
print("displaying all plots")
elif o == "-N":
optiondict["numestreps"] = int(a)
if optiondict["numestreps"] == 0:
optiondict["ampthreshfromsig"] = False
print("Will not estimate significance thresholds from null correlations")
else:
print(
"Will estimate p<0.05 significance threshold from ",
optiondict["numestreps"],
" null correlations",
)
elif o == "--accheck":
optiondict["check_autocorrelation"] = True
print("Will check for periodic components in the autocorrelation function")
elif o == "--despecklethresh":
if optiondict["despeckle_passes"] == 0:
optiondict["despeckle_passes"] = 1
optiondict["check_autocorrelation"] = True
optiondict["despeckle_thresh"] = float(a)
linkchar = "="
print("Forcing despeckle threshhold to ", optiondict["despeckle_thresh"])
elif o == "--despecklepasses":
optiondict["check_autocorrelation"] = True
optiondict["despeckle_passes"] = int(a)
if optiondict["despeckle_passes"] < 0:
print("minimum number of despeckle passes is 0")
sys.exit()
linkchar = "="
print("Will do ", optiondict["despeckle_passes"], " despeckling passes")
elif o == "--acfix":
optiondict["fix_autocorrelation"] = True
optiondict["check_autocorrelation"] = True
print("Will remove periodic components in the autocorrelation function (experimental)")
elif o == "--noprogressbar":
optiondict["showprogressbar"] = False
print("Will disable progress bars")
elif o == "-s":
optiondict["widthlimit"] = float(a)
print("Setting gaussian fit width limit to ", optiondict["widthlimit"], "Hz")
elif o == "-b":
optiondict["filtertype"] = "butterworth"
theprefilter.setbutterorder(optiondict["filtorder"])
print("Using butterworth bandlimit filter")
elif o == "-Z":
optiondict["fixeddelayvalue"] = float(a)
optiondict["fixdelay"] = True
optiondict["lagmin"] = optiondict["fixeddelayvalue"] - 10.0
optiondict["lagmax"] = optiondict["fixeddelayvalue"] + 10.0
print("Delay will be set to ", optiondict["fixeddelayvalue"], "in all voxels")
elif o == "--motionfile":
optiondict["motionfilename"] = a
print(
"Will regress derivatives and delayed derivatives of motion out of data prior to analysis"
)
elif o == "--motpos":
optiondict["mot_pos"] = not optiondict["mot_pos"]
print(optiondict["mot_pos"], "set to", optiondict["mot_pos"])
elif o == "--motderiv":
optiondict["mot_deriv"] = not optiondict["mot_deriv"]
print(optiondict["mot_deriv"], "set to", optiondict["mot_deriv"])
elif o == "--motdelayderiv":
optiondict["mot_delayderiv"] = not optiondict["mot_delayderiv"]
print(optiondict["mot_delayderiv"], "set to", optiondict["mot_delayderiv"])
elif o == "-f":
optiondict["gausssigma"] = float(a)
print(
"Will prefilter fMRI data with a gaussian kernel of ",
optiondict["gausssigma"],
" mm",
)
elif o == "--timerange":
limitvec = a.split(",")
optiondict["startpoint"] = int(limitvec[0])
optiondict["endpoint"] = int(limitvec[1])
if optiondict["endpoint"] == -1:
optiondict["endpoint"] = 100000000
linkchar = "="
print(
"Analysis will be performed only on data from point ",
optiondict["startpoint"],
" to ",
optiondict["endpoint"],
".",
)
elif o == "-r":
lagvec = a.split(",")
if not optiondict["fixdelay"]:
optiondict["lagmin"] = float(lagvec[0])
optiondict["lagmax"] = float(lagvec[1])
if optiondict["lagmin"] >= optiondict["lagmax"]:
print("lagmin must be less than lagmax - exiting")
sys.exit(1)
print(
"Correlations will be calculated over range ",
optiondict["lagmin"],
" to ",
optiondict["lagmax"],
)
elif o == "-y":
optiondict["interptype"] = a
if (
(optiondict["interptype"] != "cubic")
and (optiondict["interptype"] != "quadratic")
and (optiondict["interptype"] != "univariate")
):
print("unsupported interpolation type!")
sys.exit()
elif o == "-h":
optiondict["histlen"] = int(a)
print("Setting histogram length to ", optiondict["histlen"])
elif o == "-o":
optiondict["offsettime"] = float(a)
optiondict["offsettime_total"] = float(a)
print("Applying a timeshift of ", optiondict["offsettime"], " to regressor")
elif o == "--autosync":
optiondict["autosync"] = True
print(
"Will calculate and apply regressor synchronization from global correlation. Overrides offsettime."
)
elif o == "--datafreq":
realtr = 1.0 / float(a)
linkchar = "="
print("Data time step forced to ", realtr)
elif o == "--datatstep":
realtr = float(a)
linkchar = "="
print("Data time step forced to ", realtr)
elif o == "-t":
print(
"DEPRECATION WARNING: The -t option is obsolete and will be removed in a future version. Use --datatstep=TSTEP or --datafreq=FREQ instead"
)
realtr = float(a)
print("Data time step forced to ", realtr)
elif o == "-c":
optiondict["isgrayordinate"] = True
print("Input fMRI file is a converted CIFTI file")
elif o == "-O":
optiondict["oversampfactor"] = int(a)
if 0 <= optiondict["oversampfactor"] < 1:
print(
"oversampling factor must be an integer greater than or equal to 1 (or negative to set automatically)"
)
sys.exit()
print("oversampling factor set to ", optiondict["oversampfactor"])
elif o == "--psdfilter":
optiondict["psdfilter"] = True
print(
"Will use a cross-spectral density filter on shifted timecourses prior to refinement"
)
elif o == "--avg":
optiondict["refinetype"] = "unweighted_average"
print("Will use unweighted average to refine regressor rather than simple averaging")
elif o == "--weightedavg":
optiondict["refinetype"] = "weighted_average"
print("Will use weighted average to refine regressor rather than simple averaging")
elif o == "--ica":
optiondict["refinetype"] = "ica"
print("Will use ICA procedure to refine regressor rather than simple averaging")
elif o == "--dispersioncalc":
optiondict["dodispersioncalc"] = True
print("Will do dispersion calculation during regressor refinement")
elif o == "--nofitfilt":
optiondict["zerooutbadfit"] = False
optiondict["nohistzero"] = True
print("Correlation parameters will be recorded even if out of bounds")
elif o == "--pca":
optiondict["refinetype"] = "pca"
print("Will use PCA procedure to refine regressor rather than simple averaging")
elif o == "--numskip":
optiondict["preprocskip"] = int(a)
linkchar = "="
print("Setting preprocessing trs skipped to ", optiondict["preprocskip"])
elif o == "--venousrefine":
optiondict["lagmaskside"] = "upper"
optiondict["lagminthresh"] = 2.5
optiondict["lagmaxthresh"] = 6.0
optiondict["ampthresh"] = 0.5
print("Biasing refinement to voxels in draining vasculature")
elif o == "--nirs":
optiondict["nothresh"] = True
optiondict["corrmaskthreshpct"] = 0.0
optiondict["preservefiltering"] = True
optiondict["refineprenorm"] = "var"
optiondict["ampthresh"] = 0.7
optiondict["lagminthresh"] = 0.1
print("Setting NIRS mode")
elif o == "--nothresh":
optiondict["nothresh"] = True
optiondict["corrmaskthreshpct"] = 0.0
print("Disabling voxel threshhold")
elif o == "--regressor":
optiondict["regressorfile"] = a
optiondict["useglobalref"] = False
linkchar = "="
print("Will use regressor file", a)
elif o == "--regressorfreq":
optiondict["inputfreq"] = float(a)
linkchar = "="
print("Setting regressor sample frequency to ", float(a))
elif o == "--regressortstep":
optiondict["inputfreq"] = 1.0 / float(a)
linkchar = "="
print("Setting regressor sample time step to ", float(a))
elif o == "--regressorstart":
optiondict["inputstarttime"] = float(a)
linkchar = "="
print("Setting regressor start time to ", optiondict["inputstarttime"])
elif o == "--slicetimes":
optiondict["slicetimes"] = tide_io.readvecs(a)
linkchar = "="
print("Using slicetimes from file", a)
elif o == "--detrendorder":
optiondict["detrendorder"] = int(a)
print(
"Setting trend removal order to",
optiondict["detrendorder"],
"for regressor generation and correlation preparation",
)
elif o == "--refineupperlag":
optiondict["lagmaskside"] = "upper"
print(
"Will only use lags between ",
optiondict["lagminthresh"],
" and ",
optiondict["lagmaxthresh"],
" in refinement",
)
elif o == "--refinelowerlag":
optiondict["lagmaskside"] = "lower"
print(
"Will only use lags between ",
-optiondict["lagminthresh"],
" and ",
-optiondict["lagmaxthresh"],
" in refinement",
)
elif o == "--refineoffset":
optiondict["refineoffset"] = True
print("Will refine offset time during subsequent passes")
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
elif o == "--pickleft":
optiondict["pickleft"] = True
print("Will select the leftmost delay peak when setting refine offset")
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
elif o == "--pickleftthresh":
optiondict["pickleftthresh"] = float(a)
print(
"Threshhold value for leftmost peak height set to", optiondict["pickleftthresh"],
)
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
elif o == "--lagminthresh":
optiondict["lagminthresh"] = float(a)
print("Using lagminthresh of ", optiondict["lagminthresh"])
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
linkchar = "="
elif o == "--lagmaxthresh":
optiondict["lagmaxthresh"] = float(a)
print("Using lagmaxthresh of ", optiondict["lagmaxthresh"])
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
linkchar = "="
elif o == "--skipsighistfit":
optiondict["dosighistfit"] = False
print("will not fit significance histogram with a Johnson SB function")
elif o == "--searchfrac":
optiondict["searchfrac"] = float(a)
linkchar = "="
print(
"Points greater than",
optiondict["ampthresh"],
"* the peak height will be used to fit peak parameters",
)
elif o == "--ampthresh":
optiondict["ampthresh"] = float(a)
optiondict["ampthreshfromsig"] = False
if optiondict["ampthresh"] < 0.0:
print(
"Setting ampthresh to the", -100.0 * optiondict["ampthresh"], "th percentile",
)
else:
print("Using ampthresh of ", optiondict["ampthresh"])
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
linkchar = "="
elif o == "--sigmathresh":
optiondict["sigmathresh"] = float(a)
print("Using widththresh of ", optiondict["sigmathresh"])
if optiondict["passes"] == 1:
print(
"WARNING: setting this value implies you are doing refinement; make sure if you want to do that, passes > 1"
)
linkchar = "="
elif o == "--globalmeaninclude":
optiondict["globalmeanincludename"], colspec = tide_io.parsefilespec(a)
if colspec is not None:
optiondict["globalmeanincludevals"] = tide_io.colspectolist(colspec)
linkchar = "="
if optiondict["globalmeanincludevals"] is not None:
print(
"Using voxels where",
optiondict["globalmeanincludename"],
" = ",
optiondict["globalmeanincludevals"],
" for inclusion in global mean",
)
else:
print(
"Using ",
optiondict["globalmeanincludename"],
" as include mask for global mean calculation",
)
elif o == "--globalmeanexclude":
optiondict["globalmeanexcludename"], colspec = tide_io.parsefilespec(a)
if colspec is not None:
optiondict["globalmeanexcludevals"] = tide_io.colspectolist(colspec)
linkchar = "="
if optiondict["globalmeanexcludevals"] is not None:
print(
"Using voxels where",
optiondict["globalmeanexcludename"],
" = ",
optiondict["globalmeanexcludevals"],
" for exclusion from global mean",
)
else:
print(
"Using ",
optiondict["globalmeanexcludename"],
" as exclude mask for global mean calculation",
)
elif o == "--refineinclude":
optiondict["refineincludename"], colspec = tide_io.parsefilespec(a)
if colspec is not None:
optiondict["refineincludevals"] = tide_io.colspectolist(colspec)
linkchar = "="
if optiondict["refineincludevals"] is not None:
print(
"Using voxels where",
optiondict["refineincludename"],
" = ",
optiondict["refineincludevals"],
" for inclusion in refine mask",
)
else:
print(
"Using ",
optiondict["refineincludename"],
" as include mask for probe regressor refinement",
)
elif o == "--refineexclude":
optiondict["refineexcludename"], colspec = tide_io.parsefilespec(a)
if colspec is not None:
optiondict["refineexcludevals"] = tide_io.colspectolist(colspec)
linkchar = "="
if optiondict["refineexcludevals"] is not None:
print(
"Using voxels where",
optiondict["refineexcludename"],
" = ",
optiondict["refineexcludevals"],
" for exclusion from refine mask",
)
else:
print(
"Using ",
optiondict["refineexcludename"],
" as exclude mask for probe regressor refinement",
)
elif o == "--corrmask":
(
optiondict["corrmaskincludename"],
optiondict["corrmaskincludevals"],
) = tide_io.processnamespec(a, "Using voxels where ", "in correlation calculations.")
elif o == "--refineprenorm":
optiondict["refineprenorm"] = a
if (
(optiondict["refineprenorm"] != "None")
and (optiondict["refineprenorm"] != "mean")
and (optiondict["refineprenorm"] != "var")
and (optiondict["refineprenorm"] != "std")
and (optiondict["refineprenorm"] != "invlag")
):
print("unsupported refinement prenormalization mode!")
sys.exit()
linkchar = "="
elif o == "--refineweighting":
optiondict["refineweighting"] = a
if (
(optiondict["refineweighting"] != "None")
and (optiondict["refineweighting"] != "NIRS")
and (optiondict["refineweighting"] != "R")
and (optiondict["refineweighting"] != "R2")
):
print("unsupported refinement weighting!")
sys.exit()
linkchar = "="
elif o == "--tmask":
optiondict["tmaskname"] = a
linkchar = "="
print("Will multiply regressor by timecourse in ", optiondict["tmaskname"])
elif o == "--refinepasses" or o == "--passes":
if o == "--refinepasses":
print(
"DEPRECATION WARNING - refinepasses is deprecated and will be removed in a future version - use passes instead"
)
optiondict["passes"] = int(a)
linkchar = "="
print("Will do ", optiondict["passes"], " processing passes")
elif o == "--peakfittype":
optiondict["peakfittype"] = a
linkchar = "="
print("Similarity function peak fitting method is ", optiondict["peakfittype"])
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
assert False, "unhandled option: " + o
formattedcmdline.append("\t" + o + linkchar + a + " \\")
formattedcmdline[len(formattedcmdline) - 1] = formattedcmdline[len(formattedcmdline) - 1][:-2]
# store the filter limits
(
optiondict["lowerpass"],
optiondict["upperpass"],
optiondict["lowerstop"],
optiondict["upperstop"],
) = theprefilter.getfreqs()
# write out the command used
tide_io.writevec(formattedcmdline, optiondict["outputname"] + "_formattedcommandline.txt")
tide_io.writevec([" ".join(sys.argv)], optiondict["outputname"] + "_commandline.txt")
optiondict["commandlineargs"] = sys.argv[1:]
# add additional information to option structure for debugging
optiondict["realtr"] = realtr
optiondict["dispersioncalc_lower"] = optiondict["lagmin"]
optiondict["dispersioncalc_upper"] = optiondict["lagmax"]
optiondict["dispersioncalc_step"] = np.max(
[
(optiondict["dispersioncalc_upper"] - optiondict["dispersioncalc_lower"]) / 25,
optiondict["dispersioncalc_step"],
]
)
return optiondict, theprefilter
|
en
| 0.718829
|
#!/usr/bin/env python # -*- coding: latin-1 -*- # # Copyright 2016-2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # set default variable values # file i/o file options # preprocessing options # the width of the spatial filter kernel in mm # apply an antialiasing filter to any regressors prior to filtering # invert the initial regressor during startup # do not apply any slice order correction by default # by default, analyze the entire length of the dataset # by default, analyze the entire length of the dataset # number of trs skipped in preprocessing # list of integer values to use in the mask # list of integer values to use in the mask # correlation options # remove the mean from signals prior to correlation # remove linear trends prior to correlation # the particular window function to use for correlation # by default, padding is 0 (circular correlation) # use a standard unweighted crosscorrelation for calculate time delays # file name for tmask regressor # percentage of robust maximum of mean to mask correlations # list of integer values to use in the mask # list of integer values to use in the mask # check for periodic components that corrupt the autocorrelation # remove periodic components that corrupt the autocorrelation # threshold value - despeckle if median discontinuity magnitude exceeds despeckle_thresh # despeckling passes to perform # disable voxel intensity threshholding # correlation fitting options # Peak value must be within specified range. If false, allow max outside if maximum # correlation value is that one end of the range. # find peak with highest magnitude, regardless of sign # zero out peaks with correlations lower than this value # zero out peaks with correlations higher than this value # what fraction of the correlation window to avoid on either end when fitting # only do fits in voxels that exceed threshhold # if true zero out all fit parameters if the fit fails # The fraction of the main peak over which points are included in the peak # if set to the location of the first autocorrelation sidelobe, this should # if set to 'gauss', use old gaussian fitting, if set to 'quad' use parabolic # width of the reference autocorrelation function # width of the reference autocorrelation function # width of the reference autocorrelation function # move delay peaks back to the correct position if they hit a sidelobe # postprocessing options # use a glm filter to remove the delayed regressor from the data in each voxel # name of the file from which to regress delayed regressors (if not the original data) # do Wiener deconvolution to find voxel transfer function # by default do no motion regression # do not do position # do use derivative # do not do delayed derivative # save motion filtered file for debugging # filter options # the number of seconds of padding to add to each end of a filtered timecourse # output options # significance estimation options # the number of sham correlations to perform to estimate significance # if False, there is a spike at R=0 in the significance histogram # refinement options # list of integer values to use in the mask # list of integer values to use in the mask # debugging options # save checkpoint information for tracking program state # experimental options # package options # start the clock! # get the command line parameters # handle required args first # now scan for optional arguments # print help information and exit: # will print something like 'option -a not recognized' # store the filter limits # write out the command used # add additional information to option structure for debugging
| 2.036468
| 2
|
scripts/calculate_migraine_ancestry_nonphased_sites.py
|
will-camb/Nero
| 1
|
6627405
|
import pandas as pd
import argparse
import seaborn as sns
import os
import sys
sns.set(color_codes=True)
parser = argparse.ArgumentParser()
parser.add_argument("-chr",
help="The chromosome",
required=True)
args = parser.parse_args()
mapping = pd.read_csv("rsID-chr_mapping", header=None, sep="\t", dtype=str)
rsID_list = mapping[mapping[2] == str(args.chr)][0].tolist()
rsID_files = list()
for rsID in rsID_list:
rsID_files.append("output_files/" + str(rsID) + ".hom.1.samples")
rsID_exists = [f for f in rsID_files if os.path.isfile(f)]
if not rsID_exists:
print("No rsIDs with valid input for chr " + str(args.chr))
sys.exit(0)
for anc in ["CHG", "EHG", "Farmer", "African", "EastAsian", "WHG", "Yamnaya"]:
cols = pd.read_csv("/willerslev/ukbiobank/painting_results_aggregate/copyprobs_per_anc/" + str(anc) + "." +
str(args.chr) + ".master_all_copyprobsperlocus.txt.gz", sep=" ", nrows=0).columns.tolist()
del cols[0]
cols = [int(x) for x in cols]
cols_reversed = cols[::-1]
wrong_right_map = pd.DataFrame(cols)
wrong_right_map.loc[:, 1] = cols_reversed
wrong_right_map.columns = ['Wrong', 'Right']
for rsID in rsID_list:
if not os.path.exists("output_files/" + rsID + ".hom.1.samples"):
continue
print("Processing " + str(rsID))
position_mapping = pd.read_csv("rsID-position_mapping", sep="\t", header=None)
position = position_mapping.loc[position_mapping[0] == rsID][1].item()
closest_position = min(cols, key=lambda x: abs(x - position)) # Get closest position in cols, because not
# all SNPs were painted
mapped_position = cols[cols_reversed.index(closest_position)]
max_position = int(mapped_position) + 6000000
min_position = int(mapped_position) - 6000000
cols_subset = [str(col) for col in cols if col in range(min_position, max_position)]
print("Calculating averages for " + str(len(cols_subset)) + " SNPs")
types_dict = {'0': str}
types_dict.update({col: 'int8' for col in cols_subset if col not in types_dict})
copyprobs = pd.read_csv("/willerslev/ukbiobank/painting_results_aggregate/copyprobs_per_anc/" + str(anc) + "." +
str(args.chr) + ".master_all_copyprobsperlocus.txt.gz",
sep=" ", dtype=types_dict, usecols=types_dict.keys())
samples = pd.read_csv("output_files/" + rsID + ".hom.1.samples", header=None)
merged = pd.merge(samples, copyprobs, left_on=0, right_on='0')
merged.set_index(0, inplace=True)
merged.drop('0', inplace=True, axis=1)
df = pd.DataFrame(merged.mean().tolist())
merged_cols = pd.DataFrame([int(col) for col in merged.columns.tolist()])
pd.merge(wrong_right_map, merged_cols, left_on='Wrong', right_on=0)
df[1] = pd.merge(wrong_right_map, merged_cols, left_on='Wrong', right_on=0)['Right'] # Saved in correct order
df.to_csv("ancestry_plots/" + rsID + "." + str(anc) + ".1.snp.subset.csv",
header=False,
index=False)
|
import pandas as pd
import argparse
import seaborn as sns
import os
import sys
sns.set(color_codes=True)
parser = argparse.ArgumentParser()
parser.add_argument("-chr",
help="The chromosome",
required=True)
args = parser.parse_args()
mapping = pd.read_csv("rsID-chr_mapping", header=None, sep="\t", dtype=str)
rsID_list = mapping[mapping[2] == str(args.chr)][0].tolist()
rsID_files = list()
for rsID in rsID_list:
rsID_files.append("output_files/" + str(rsID) + ".hom.1.samples")
rsID_exists = [f for f in rsID_files if os.path.isfile(f)]
if not rsID_exists:
print("No rsIDs with valid input for chr " + str(args.chr))
sys.exit(0)
for anc in ["CHG", "EHG", "Farmer", "African", "EastAsian", "WHG", "Yamnaya"]:
cols = pd.read_csv("/willerslev/ukbiobank/painting_results_aggregate/copyprobs_per_anc/" + str(anc) + "." +
str(args.chr) + ".master_all_copyprobsperlocus.txt.gz", sep=" ", nrows=0).columns.tolist()
del cols[0]
cols = [int(x) for x in cols]
cols_reversed = cols[::-1]
wrong_right_map = pd.DataFrame(cols)
wrong_right_map.loc[:, 1] = cols_reversed
wrong_right_map.columns = ['Wrong', 'Right']
for rsID in rsID_list:
if not os.path.exists("output_files/" + rsID + ".hom.1.samples"):
continue
print("Processing " + str(rsID))
position_mapping = pd.read_csv("rsID-position_mapping", sep="\t", header=None)
position = position_mapping.loc[position_mapping[0] == rsID][1].item()
closest_position = min(cols, key=lambda x: abs(x - position)) # Get closest position in cols, because not
# all SNPs were painted
mapped_position = cols[cols_reversed.index(closest_position)]
max_position = int(mapped_position) + 6000000
min_position = int(mapped_position) - 6000000
cols_subset = [str(col) for col in cols if col in range(min_position, max_position)]
print("Calculating averages for " + str(len(cols_subset)) + " SNPs")
types_dict = {'0': str}
types_dict.update({col: 'int8' for col in cols_subset if col not in types_dict})
copyprobs = pd.read_csv("/willerslev/ukbiobank/painting_results_aggregate/copyprobs_per_anc/" + str(anc) + "." +
str(args.chr) + ".master_all_copyprobsperlocus.txt.gz",
sep=" ", dtype=types_dict, usecols=types_dict.keys())
samples = pd.read_csv("output_files/" + rsID + ".hom.1.samples", header=None)
merged = pd.merge(samples, copyprobs, left_on=0, right_on='0')
merged.set_index(0, inplace=True)
merged.drop('0', inplace=True, axis=1)
df = pd.DataFrame(merged.mean().tolist())
merged_cols = pd.DataFrame([int(col) for col in merged.columns.tolist()])
pd.merge(wrong_right_map, merged_cols, left_on='Wrong', right_on=0)
df[1] = pd.merge(wrong_right_map, merged_cols, left_on='Wrong', right_on=0)['Right'] # Saved in correct order
df.to_csv("ancestry_plots/" + rsID + "." + str(anc) + ".1.snp.subset.csv",
header=False,
index=False)
|
en
| 0.984271
|
# Get closest position in cols, because not # all SNPs were painted # Saved in correct order
| 2.680058
| 3
|
q2_katharoseq/_transformer.py
|
antgonza/q2-katharoseq
| 0
|
6627406
|
import pandas as pd
from .plugin_setup import plugin
from ._format import EstimatedBiomassFmt
@plugin.register_transformer
def _1(data: pd.DataFrame) -> EstimatedBiomassFmt:
ff = EstimatedBiomassFmt()
data.to_csv(str(ff))
return ff
@plugin.register_transformer
def _2(ff: EstimatedBiomassFmt) -> pd.DataFrame:
return pd.read_csv(str(ff), index_col='sample-id')
|
import pandas as pd
from .plugin_setup import plugin
from ._format import EstimatedBiomassFmt
@plugin.register_transformer
def _1(data: pd.DataFrame) -> EstimatedBiomassFmt:
ff = EstimatedBiomassFmt()
data.to_csv(str(ff))
return ff
@plugin.register_transformer
def _2(ff: EstimatedBiomassFmt) -> pd.DataFrame:
return pd.read_csv(str(ff), index_col='sample-id')
|
none
| 1
| 2.150493
| 2
|
|
models/hyp_alpha_eval.py
|
gyy8426/R2-Net
| 0
|
6627407
|
<reponame>gyy8426/R2-Net<gh_stars>0
import matplotlib
matplotlib.use('Agg')
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
import torch
from sklearn.metrics import accuracy_score
from config import ModelConfig
from lib.pytorch_misc import optimistic_restore
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from config import BOX_SCALE, IM_SCALE, DATA_PATH
import dill as pkl
import os
import pickle as cpkl
from lib.pytorch_misc import load_reslayer4
#size_index = np.load('/home/guoyuyu/code/code_by_myself/scene_graph/dataset_analysis/size_index.npy')
conf = ModelConfig()
if conf.model == 'motifnet':
from lib.rel_model_hyp import RelModel
#from lib.rel_model_2bias_2_hyp import RelModel
#from lib.rel_model_topgcn_hyp import RelModel
#from lib.rel_model_rnn2_hyp import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
else:
raise ValueError()
train, val, test = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet')
if conf.test:
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, nl_adj=conf.nl_adj,
hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pass_in_obj_feats_to_gcn=conf.pass_in_obj_feats_to_gcn,
pass_embed_togcn=conf.pass_embed_togcn,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision,
attention_dim=conf.attention_dim,
adj_embed_dim = conf.adj_embed_dim,
with_adj_mat=conf.with_adj_mat,
bg_num_graph=conf.bg_num_graph,
bg_num_rel=conf.bg_num_rel,
adj_embed=conf.adj_embed,
mean_union_feat=conf.mean_union_feat,
ch_res=conf.ch_res,
with_att=conf.with_att,
with_gcn=conf.with_gcn,
fb_thr=conf.fb_thr,
with_biliner_score=conf.with_biliner_score,
gcn_adj_type=conf.gcn_adj_type,
where_gcn=conf.where_gcn,
with_gt_adj_mat=conf.gt_adj_mat,
type_gcn=conf.type_gcn,
edge_ctx_type=conf.edge_ctx_type,
nms_union=conf.nms_union,
cosine_dis=conf.cosine_dis,
test_alpha=conf.test_alpha,
)
detector.cuda()
ckpt = torch.load(conf.ckpt)
if conf.ckpt.split('-')[-2].split('/')[-1] == 'vgrel':
print("Loading EVERYTHING")
start_epoch = ckpt['epoch']
if not optimistic_restore(detector, ckpt['state_dict']):
start_epoch = -1
# optimistic_restore(detector.detector, torch.load('checkpoints/vgdet/vg-28.tar')['state_dict'])
else:
start_epoch = -1
optimistic_restore(detector.detector, ckpt['state_dict'])
#print('detector: ',detector.detector)
# for i in ckpt['state_dict'].keys():
# if 'roi_fmap' in i:
# print('ckpt state_dict: ',i)
if conf.mode != 'detclass':
if not conf.use_resnet:
detector.roi_fmap[1][0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap[1][3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap[1][0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap[1][3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.roi_fmap_obj[0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap_obj[3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap_obj[0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap_obj[3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
else :
load_reslayer4(detector, ckpt, 3)
"""
if conf.use_resnet:
detector.compress[0].weight.data.copy_(ckpt['state_dict']['compress.0.weight'])
detector.compress[0].bias.data.copy_(ckpt['state_dict']['compress.0.bias'])
detector.compress[2].weight.data.copy_(ckpt['state_dict']['compress.2.weight'])
detector.compress[2].bias.data.copy_(ckpt['state_dict']['compress.2.bias'])
detector.union_boxes.compress_union[0].weight.data.copy_(ckpt['state_dict']['compress.0.weight'])
detector.union_boxes.compress_union[0].bias.data.copy_(ckpt['state_dict']['compress.0.bias'])
detector.union_boxes.compress_union[2].weight.data.copy_(ckpt['state_dict']['compress.2.weight'])
detector.union_boxes.compress_union[2].bias.data.copy_(ckpt['state_dict']['compress.2.bias'])
"""
#optimistic_restore(detector, ckpt['state_dict'])
# if conf.mode == 'sgdet':
# det_ckpt = torch.load('checkpoints/new_vgdet/vg-19.tar')['state_dict']
# detector.detector.bbox_fc.weight.data.copy_(det_ckpt['bbox_fc.weight'])
# detector.detector.bbox_fc.bias.data.copy_(det_ckpt['bbox_fc.bias'])
# detector.detector.score_fc.weight.data.copy_(det_ckpt['score_fc.weight'])
# detector.detector.score_fc.bias.data.copy_(det_ckpt['score_fc.bias'])
all_TP_label_num = np.zeros([detector.num_classes])
all_label_num = np.zeros([detector.num_classes])
all_pred_size = []
def count_num(label_i):
for i in range(label_i.shape[0]):
all_label_num[label_i[i]] = all_label_num[label_i[i]] + 1
def TP_count_num(label_i, pred_i):
TP_labe_ind = ((label_i - pred_i) == 0)
TP_labe = TP_labe_ind * pred_i
for i in range(TP_labe.shape[0]):
if TP_labe_ind[i]:
all_TP_label_num[label_i[i]] = all_TP_label_num[label_i[i]] + 1
def val_batch(batch_num, b, evaluator, thrs=(20, 50, 100)):
det_res = detector[b]
num_correct = 0
num_sample = 0
num_correct_adj_mat = 0
num_sample_adj_mat = 0
# the image size after resizing to IMAGE_SCALE (1024)
if conf.num_gpus == 1:
det_res = [det_res]
for i, (r) in enumerate(det_res):
torch.cuda.empty_cache()
for j in range(11):
for k in range(11):
boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i, \
pred_adj_mat_rel_i, pred_adj_mat_obj_i, gt_adj_mat_i = r[j][k]
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
'gt_adj_mat': gt_adj_mat_i.copy(),
}
assert np.all(objs_i[rels_i[:,0]] > 0) and np.all(objs_i[rels_i[:,1]] > 0)
# assert np.all(rels_i[:,2] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE/IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i,
'pred_adj_mat_rel': pred_adj_mat_rel_i,
'pred_adj_mat_obj': pred_adj_mat_obj_i,
}
num_sample = num_sample + objs_i.shape[0]
num_sample_adj_mat = num_sample_adj_mat + gt_adj_mat_i.shape[0]
if conf.mode != 'sgdet':
num_correct = num_correct + np.sum((val.gt_classes[batch_num + i].copy() - objs_i)==0)
pred_adj_mat_bool_i = ( pred_adj_mat_rel_i > 0.5 ).astype('float64')
num_correct_adj_mat = num_correct_adj_mat + np.sum((gt_adj_mat_i - pred_adj_mat_bool_i)==0)
evaluator[j][k][conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
alpha=0.1 * j,
beta=0.1 * k,
)
return num_correct, num_sample, num_correct_adj_mat, num_sample_adj_mat,
evaluator = []
for i in range(11):
evaluatori = []
for j in range(11):
evaluatori.append(BasicSceneGraphEvaluator.all_modes(multiple_preds=conf.multi_pred))
evaluator.append(evaluatori)
detector.eval()
num_correct = 0
num_sample = 0
num_correct_adj = 0
num_sample_adj = 0
for val_b, batch in enumerate(tqdm(val_loader)):
num_correct_i, num_sample_i, num_correct_adj_i, num_sample_adj_i = val_batch(conf.num_gpus*val_b, batch, evaluator)
num_correct = num_correct + num_correct_i
num_sample = num_sample + num_sample_i
num_correct_adj = num_correct_adj + num_correct_adj_i
num_sample_adj = num_sample_adj + num_sample_adj_i
print('num_correct ',num_correct)
print('num_sample',num_sample)
print('Amp:', (num_correct*1.0)/(num_sample*1.0))
print('adj Amp:', (num_correct_adj * 1.0) / (num_sample_adj * 1.0))
re = []
for i in range(11):
print(i)
re_i = []
for j in range(11):
print(j)
evaluator[i][j][conf.mode].print_stats()
re_i.append([evaluator[i][j][conf.mode].get_recall()])
re.append(re_i)
import os
name_i = None
for i in range(100):
name_t = 'val_alpha_beta'+'gcn2_lstm2_'+str(i)+'.pkl'
if os.path.exists(name_t):
continue
else:
name_i = name_t
break
cpkl.dump(re, open(name_i,'wb'))
|
import matplotlib
matplotlib.use('Agg')
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
import torch
from sklearn.metrics import accuracy_score
from config import ModelConfig
from lib.pytorch_misc import optimistic_restore
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from config import BOX_SCALE, IM_SCALE, DATA_PATH
import dill as pkl
import os
import pickle as cpkl
from lib.pytorch_misc import load_reslayer4
#size_index = np.load('/home/guoyuyu/code/code_by_myself/scene_graph/dataset_analysis/size_index.npy')
conf = ModelConfig()
if conf.model == 'motifnet':
from lib.rel_model_hyp import RelModel
#from lib.rel_model_2bias_2_hyp import RelModel
#from lib.rel_model_topgcn_hyp import RelModel
#from lib.rel_model_rnn2_hyp import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
else:
raise ValueError()
train, val, test = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet')
if conf.test:
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, nl_adj=conf.nl_adj,
hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pass_in_obj_feats_to_gcn=conf.pass_in_obj_feats_to_gcn,
pass_embed_togcn=conf.pass_embed_togcn,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision,
attention_dim=conf.attention_dim,
adj_embed_dim = conf.adj_embed_dim,
with_adj_mat=conf.with_adj_mat,
bg_num_graph=conf.bg_num_graph,
bg_num_rel=conf.bg_num_rel,
adj_embed=conf.adj_embed,
mean_union_feat=conf.mean_union_feat,
ch_res=conf.ch_res,
with_att=conf.with_att,
with_gcn=conf.with_gcn,
fb_thr=conf.fb_thr,
with_biliner_score=conf.with_biliner_score,
gcn_adj_type=conf.gcn_adj_type,
where_gcn=conf.where_gcn,
with_gt_adj_mat=conf.gt_adj_mat,
type_gcn=conf.type_gcn,
edge_ctx_type=conf.edge_ctx_type,
nms_union=conf.nms_union,
cosine_dis=conf.cosine_dis,
test_alpha=conf.test_alpha,
)
detector.cuda()
ckpt = torch.load(conf.ckpt)
if conf.ckpt.split('-')[-2].split('/')[-1] == 'vgrel':
print("Loading EVERYTHING")
start_epoch = ckpt['epoch']
if not optimistic_restore(detector, ckpt['state_dict']):
start_epoch = -1
# optimistic_restore(detector.detector, torch.load('checkpoints/vgdet/vg-28.tar')['state_dict'])
else:
start_epoch = -1
optimistic_restore(detector.detector, ckpt['state_dict'])
#print('detector: ',detector.detector)
# for i in ckpt['state_dict'].keys():
# if 'roi_fmap' in i:
# print('ckpt state_dict: ',i)
if conf.mode != 'detclass':
if not conf.use_resnet:
detector.roi_fmap[1][0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap[1][3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap[1][0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap[1][3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.roi_fmap_obj[0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap_obj[3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap_obj[0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap_obj[3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
else :
load_reslayer4(detector, ckpt, 3)
"""
if conf.use_resnet:
detector.compress[0].weight.data.copy_(ckpt['state_dict']['compress.0.weight'])
detector.compress[0].bias.data.copy_(ckpt['state_dict']['compress.0.bias'])
detector.compress[2].weight.data.copy_(ckpt['state_dict']['compress.2.weight'])
detector.compress[2].bias.data.copy_(ckpt['state_dict']['compress.2.bias'])
detector.union_boxes.compress_union[0].weight.data.copy_(ckpt['state_dict']['compress.0.weight'])
detector.union_boxes.compress_union[0].bias.data.copy_(ckpt['state_dict']['compress.0.bias'])
detector.union_boxes.compress_union[2].weight.data.copy_(ckpt['state_dict']['compress.2.weight'])
detector.union_boxes.compress_union[2].bias.data.copy_(ckpt['state_dict']['compress.2.bias'])
"""
#optimistic_restore(detector, ckpt['state_dict'])
# if conf.mode == 'sgdet':
# det_ckpt = torch.load('checkpoints/new_vgdet/vg-19.tar')['state_dict']
# detector.detector.bbox_fc.weight.data.copy_(det_ckpt['bbox_fc.weight'])
# detector.detector.bbox_fc.bias.data.copy_(det_ckpt['bbox_fc.bias'])
# detector.detector.score_fc.weight.data.copy_(det_ckpt['score_fc.weight'])
# detector.detector.score_fc.bias.data.copy_(det_ckpt['score_fc.bias'])
all_TP_label_num = np.zeros([detector.num_classes])
all_label_num = np.zeros([detector.num_classes])
all_pred_size = []
def count_num(label_i):
for i in range(label_i.shape[0]):
all_label_num[label_i[i]] = all_label_num[label_i[i]] + 1
def TP_count_num(label_i, pred_i):
TP_labe_ind = ((label_i - pred_i) == 0)
TP_labe = TP_labe_ind * pred_i
for i in range(TP_labe.shape[0]):
if TP_labe_ind[i]:
all_TP_label_num[label_i[i]] = all_TP_label_num[label_i[i]] + 1
def val_batch(batch_num, b, evaluator, thrs=(20, 50, 100)):
det_res = detector[b]
num_correct = 0
num_sample = 0
num_correct_adj_mat = 0
num_sample_adj_mat = 0
# the image size after resizing to IMAGE_SCALE (1024)
if conf.num_gpus == 1:
det_res = [det_res]
for i, (r) in enumerate(det_res):
torch.cuda.empty_cache()
for j in range(11):
for k in range(11):
boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i, \
pred_adj_mat_rel_i, pred_adj_mat_obj_i, gt_adj_mat_i = r[j][k]
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
'gt_adj_mat': gt_adj_mat_i.copy(),
}
assert np.all(objs_i[rels_i[:,0]] > 0) and np.all(objs_i[rels_i[:,1]] > 0)
# assert np.all(rels_i[:,2] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE/IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i,
'pred_adj_mat_rel': pred_adj_mat_rel_i,
'pred_adj_mat_obj': pred_adj_mat_obj_i,
}
num_sample = num_sample + objs_i.shape[0]
num_sample_adj_mat = num_sample_adj_mat + gt_adj_mat_i.shape[0]
if conf.mode != 'sgdet':
num_correct = num_correct + np.sum((val.gt_classes[batch_num + i].copy() - objs_i)==0)
pred_adj_mat_bool_i = ( pred_adj_mat_rel_i > 0.5 ).astype('float64')
num_correct_adj_mat = num_correct_adj_mat + np.sum((gt_adj_mat_i - pred_adj_mat_bool_i)==0)
evaluator[j][k][conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
alpha=0.1 * j,
beta=0.1 * k,
)
return num_correct, num_sample, num_correct_adj_mat, num_sample_adj_mat,
evaluator = []
for i in range(11):
evaluatori = []
for j in range(11):
evaluatori.append(BasicSceneGraphEvaluator.all_modes(multiple_preds=conf.multi_pred))
evaluator.append(evaluatori)
detector.eval()
num_correct = 0
num_sample = 0
num_correct_adj = 0
num_sample_adj = 0
for val_b, batch in enumerate(tqdm(val_loader)):
num_correct_i, num_sample_i, num_correct_adj_i, num_sample_adj_i = val_batch(conf.num_gpus*val_b, batch, evaluator)
num_correct = num_correct + num_correct_i
num_sample = num_sample + num_sample_i
num_correct_adj = num_correct_adj + num_correct_adj_i
num_sample_adj = num_sample_adj + num_sample_adj_i
print('num_correct ',num_correct)
print('num_sample',num_sample)
print('Amp:', (num_correct*1.0)/(num_sample*1.0))
print('adj Amp:', (num_correct_adj * 1.0) / (num_sample_adj * 1.0))
re = []
for i in range(11):
print(i)
re_i = []
for j in range(11):
print(j)
evaluator[i][j][conf.mode].print_stats()
re_i.append([evaluator[i][j][conf.mode].get_recall()])
re.append(re_i)
import os
name_i = None
for i in range(100):
name_t = 'val_alpha_beta'+'gcn2_lstm2_'+str(i)+'.pkl'
if os.path.exists(name_t):
continue
else:
name_i = name_t
break
cpkl.dump(re, open(name_i,'wb'))
|
en
| 0.383737
|
#size_index = np.load('/home/guoyuyu/code/code_by_myself/scene_graph/dataset_analysis/size_index.npy') #from lib.rel_model_2bias_2_hyp import RelModel #from lib.rel_model_topgcn_hyp import RelModel #from lib.rel_model_rnn2_hyp import RelModel # optimistic_restore(detector.detector, torch.load('checkpoints/vgdet/vg-28.tar')['state_dict']) #print('detector: ',detector.detector) # for i in ckpt['state_dict'].keys(): # if 'roi_fmap' in i: # print('ckpt state_dict: ',i) if conf.use_resnet: detector.compress[0].weight.data.copy_(ckpt['state_dict']['compress.0.weight']) detector.compress[0].bias.data.copy_(ckpt['state_dict']['compress.0.bias']) detector.compress[2].weight.data.copy_(ckpt['state_dict']['compress.2.weight']) detector.compress[2].bias.data.copy_(ckpt['state_dict']['compress.2.bias']) detector.union_boxes.compress_union[0].weight.data.copy_(ckpt['state_dict']['compress.0.weight']) detector.union_boxes.compress_union[0].bias.data.copy_(ckpt['state_dict']['compress.0.bias']) detector.union_boxes.compress_union[2].weight.data.copy_(ckpt['state_dict']['compress.2.weight']) detector.union_boxes.compress_union[2].bias.data.copy_(ckpt['state_dict']['compress.2.bias']) #optimistic_restore(detector, ckpt['state_dict']) # if conf.mode == 'sgdet': # det_ckpt = torch.load('checkpoints/new_vgdet/vg-19.tar')['state_dict'] # detector.detector.bbox_fc.weight.data.copy_(det_ckpt['bbox_fc.weight']) # detector.detector.bbox_fc.bias.data.copy_(det_ckpt['bbox_fc.bias']) # detector.detector.score_fc.weight.data.copy_(det_ckpt['score_fc.weight']) # detector.detector.score_fc.bias.data.copy_(det_ckpt['score_fc.bias']) # the image size after resizing to IMAGE_SCALE (1024) # assert np.all(rels_i[:,2] > 0)
| 1.89827
| 2
|
List.py
|
sulphatet/Python
| 28,321
|
6627408
|
List = []
# List is Muteable
# means value can be change
List.insert(0 , 5)
List.insert(1,10)
List.insert(0,6)
print(List)
List.remove(6)
List.append(9)
List.append(1)
List.sort()
print(List)
List.pop()
List.reverse()
print(List)
"""
List.append(1)
print(List)
List.append(2)
print(List)
List.insert(1 , 3)
print(List)
"""
list2 = [2, 3, 7, 5, 10, 17, 12, 4, 1, 13]
for i in list2:
if i % 2 == 0:
print(i)
'''
Expected Output:
2
10
12
4
'''
|
List = []
# List is Muteable
# means value can be change
List.insert(0 , 5)
List.insert(1,10)
List.insert(0,6)
print(List)
List.remove(6)
List.append(9)
List.append(1)
List.sort()
print(List)
List.pop()
List.reverse()
print(List)
"""
List.append(1)
print(List)
List.append(2)
print(List)
List.insert(1 , 3)
print(List)
"""
list2 = [2, 3, 7, 5, 10, 17, 12, 4, 1, 13]
for i in list2:
if i % 2 == 0:
print(i)
'''
Expected Output:
2
10
12
4
'''
|
en
| 0.713667
|
# List is Muteable # means value can be change List.append(1)
print(List)
List.append(2)
print(List)
List.insert(1 , 3)
print(List) Expected Output:
2
10
12
4
| 4.006393
| 4
|
Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/widgets/dialog/wconfig.py
|
davidbrownell/Common_EnvironmentEx
| 0
|
6627409
|
"""\
wxDialog widget configuration
@copyright: 2014-2016 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
# keep in sync: wxDialog, wxPanel and wxStaticBitmap
config = {
'wxklass': 'wxDialog',
'style_defs': {
'wxDEFAULT_DIALOG_STYLE': {
'desc': 'from wxDialog',
'combination': 'wxCAPTION|wxCLOSE_BOX|wxSYSTEM_MENU',
},
'wxDIALOG_MODAL': {
'desc': _('Show a modal dialog'),
'obsolete': _("This style is obsolete and doesn't do anything any more, don't use it in any new code."),
'supported_by': ('wx2',),
},
'wxRESIZE_BOX': {
'desc': _('Displays a maximize box on the dialog.'),
'rename_to': 'wxMAXIMIZE_BOX',
'supported_by': ('wx2',),
},
'wxTHICK_FRAME': {
'desc': 'Display a thick frame around the window.',
'rename_to': 'wxRESIZE_BORDER',
},
'wxDIALOG_NO_PARENT': {
'desc': _("By default, a dialog created with a NULL parent "
"window will be given the application's top level "
"window as parent. Use this style to prevent this "
"from happening and create an orphan dialog. This "
"is not recommended for modal dialogs."),
},
# generic styles from wxWindow (from common.py):
# - wxFULL_REPAINT_ON_RESIZE
# - wxNO_FULL_REPAINT_ON_RESIZE
# - wxCLIP_CHILDREN
},
'style_list': ['wxDEFAULT_DIALOG_STYLE', 'wxDIALOG_MODAL', 'wxCAPTION',
'wxSYSTEM_MENU', 'wxCLOSE_BOX', 'wxRESIZE_BOX',
'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX', 'wxTHICK_FRAME',
'wxRESIZE_BORDER', 'wxSTAY_ON_TOP',
'wxDIALOG_NO_PARENT',
'wxFULL_REPAINT_ON_RESIZE', 'wxCLIP_CHILDREN'],
'events': {
'default': {
'type': 'wxCloseEvent',
'supported_by': ('wx3',),
},
'EVT_CLOSE': {},
'EVT_INIT_DIALOG': {},
},
}
|
"""\
wxDialog widget configuration
@copyright: 2014-2016 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
# keep in sync: wxDialog, wxPanel and wxStaticBitmap
config = {
'wxklass': 'wxDialog',
'style_defs': {
'wxDEFAULT_DIALOG_STYLE': {
'desc': 'from wxDialog',
'combination': 'wxCAPTION|wxCLOSE_BOX|wxSYSTEM_MENU',
},
'wxDIALOG_MODAL': {
'desc': _('Show a modal dialog'),
'obsolete': _("This style is obsolete and doesn't do anything any more, don't use it in any new code."),
'supported_by': ('wx2',),
},
'wxRESIZE_BOX': {
'desc': _('Displays a maximize box on the dialog.'),
'rename_to': 'wxMAXIMIZE_BOX',
'supported_by': ('wx2',),
},
'wxTHICK_FRAME': {
'desc': 'Display a thick frame around the window.',
'rename_to': 'wxRESIZE_BORDER',
},
'wxDIALOG_NO_PARENT': {
'desc': _("By default, a dialog created with a NULL parent "
"window will be given the application's top level "
"window as parent. Use this style to prevent this "
"from happening and create an orphan dialog. This "
"is not recommended for modal dialogs."),
},
# generic styles from wxWindow (from common.py):
# - wxFULL_REPAINT_ON_RESIZE
# - wxNO_FULL_REPAINT_ON_RESIZE
# - wxCLIP_CHILDREN
},
'style_list': ['wxDEFAULT_DIALOG_STYLE', 'wxDIALOG_MODAL', 'wxCAPTION',
'wxSYSTEM_MENU', 'wxCLOSE_BOX', 'wxRESIZE_BOX',
'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX', 'wxTHICK_FRAME',
'wxRESIZE_BORDER', 'wxSTAY_ON_TOP',
'wxDIALOG_NO_PARENT',
'wxFULL_REPAINT_ON_RESIZE', 'wxCLIP_CHILDREN'],
'events': {
'default': {
'type': 'wxCloseEvent',
'supported_by': ('wx3',),
},
'EVT_CLOSE': {},
'EVT_INIT_DIALOG': {},
},
}
|
en
| 0.518856
|
\ wxDialog widget configuration @copyright: 2014-2016 <NAME> @license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY # keep in sync: wxDialog, wxPanel and wxStaticBitmap # generic styles from wxWindow (from common.py): # - wxFULL_REPAINT_ON_RESIZE # - wxNO_FULL_REPAINT_ON_RESIZE # - wxCLIP_CHILDREN
| 1.856576
| 2
|
CRM-Project/config/settings.py
|
wiky-avis/trainee-domclick-test
| 0
|
6627410
|
import os
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
TELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')
CHAT_ID = os.getenv('CHAT_ID')
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '<KEY>'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'crm',
'accounts',
'crispy_forms',
'django_cleanup',
'django_filters',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(BASE_DIR, 'sent_emails')
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = 'dashboard'
LOGOUT_REDIRECT_URL = 'home'
USER = 'user'
REPAIR_SPECIALIST = 'repair'
SERVICE_SPECIALIST = 'service'
CONSULTANT_SPECIALIST = 'consultant'
ROLES = [
(REPAIR_SPECIALIST, 'Специалист по ремонту'),
(SERVICE_SPECIALIST, 'Специалист по обслуживанию'),
(CONSULTANT_SPECIALIST, 'Консультант'),
(USER, 'Пользователь без доступа в CRM'),
]
REPAIR = 'repair'
SERVICE = 'service'
CONSULTATION = 'consultant'
TYPE = [
(REPAIR, 'Заявка на ремонт'),
(SERVICE, 'Заявка на обслуживание'),
(CONSULTATION, 'Заявка на консультацию'),
]
OPEN = 'open'
WORK = 'work'
CLOSE = 'close'
STATUS = [
(OPEN, 'Открыта'),
(WORK, 'В работе'),
(CLOSE, 'Закрыта')
]
|
import os
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
TELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')
CHAT_ID = os.getenv('CHAT_ID')
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '<KEY>'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'crm',
'accounts',
'crispy_forms',
'django_cleanup',
'django_filters',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(BASE_DIR, 'sent_emails')
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = 'dashboard'
LOGOUT_REDIRECT_URL = 'home'
USER = 'user'
REPAIR_SPECIALIST = 'repair'
SERVICE_SPECIALIST = 'service'
CONSULTANT_SPECIALIST = 'consultant'
ROLES = [
(REPAIR_SPECIALIST, 'Специалист по ремонту'),
(SERVICE_SPECIALIST, 'Специалист по обслуживанию'),
(CONSULTANT_SPECIALIST, 'Консультант'),
(USER, 'Пользователь без доступа в CRM'),
]
REPAIR = 'repair'
SERVICE = 'service'
CONSULTATION = 'consultant'
TYPE = [
(REPAIR, 'Заявка на ремонт'),
(SERVICE, 'Заявка на обслуживание'),
(CONSULTATION, 'Заявка на консультацию'),
]
OPEN = 'open'
WORK = 'work'
CLOSE = 'close'
STATUS = [
(OPEN, 'Открыта'),
(WORK, 'В работе'),
(CLOSE, 'Закрыта')
]
|
none
| 1
| 1.717284
| 2
|
|
Find My Things -James- David- and Vaughan/PI GATEWAY/exampleresources.py
|
kelgazza/CSCE513-Fall2017
| 0
|
6627411
|
import time
from coapthon import defines
from coapthon.client.helperclient import HelperClient
from coapthon.resources.resource import Resource
import sensortag
__author__ = '<NAME>'
class BasicResource(Resource):
def __init__(self, name="BasicResource", coap_server=None, rt="observe", fIP="", fPort=None, fPath=""):
super(BasicResource, self).__init__(name, coap_server, visible=True,
observable=True, allow_children=True)
self.payload = name
self.resource_type = rt
self.content_type = "text/plain"
self.interface_type = "if1"
self.foreignIP = fIP
self.foreignPORT = fPort
self.foreignPATH = fPath
def render_GET(self, request):
client = HelperClient(server=(self.foreignIP, self.foreignPORT))
response = client.get(self.foreignPATH)
print response.pretty_print()
self.payload = response.payload
return self
def render_PUT(self, request):
requestPayload = request.payload
client = HelperClient(server=(self.foreignIP, self.foreignPORT))
response = client.put(self.foreignPATH, requestPayload)
print response.pretty_print()
if(response.payload == "PUT OK"):
self.edit_resource(request)
client.stop()
return self
def render_POST(self, request):
res = self.init_resource(request, BasicResource())
return res
def render_DELETE(self, request):
return True
class SensortagResource(Resource):
def __init__(self, sTag, name="SensortagResource", coap_server=None, rt="observe"):
super(SensortagResource, self).__init__(name, coap_server, visible=True,
observable=True, allow_children=True)
self.payload = name
self.resource_type = rt
self.content_type = "text/plain"
self.interface_type = "if1"
self.tag = sTag
def render_GET(self, request):
response = str(self.tag.read())
bad = "()"
for c in bad:
response = response.replace(c, "")
floats = response.split(",")
labels = self.tag.labels().split(",")
response = []
for f in floats:
response.append(labels.pop(0) + ', ' + f)
bad = "()'[]"
for c in bad:
response = str(response).replace(c, "")
self.payload = response
return self
def render_PUT(self, request):
return self
def render_POST(self, request):
res = self.init_resource(request, BasicResource())
return res
def render_DELETE(self, request):
return True
|
import time
from coapthon import defines
from coapthon.client.helperclient import HelperClient
from coapthon.resources.resource import Resource
import sensortag
__author__ = '<NAME>'
class BasicResource(Resource):
def __init__(self, name="BasicResource", coap_server=None, rt="observe", fIP="", fPort=None, fPath=""):
super(BasicResource, self).__init__(name, coap_server, visible=True,
observable=True, allow_children=True)
self.payload = name
self.resource_type = rt
self.content_type = "text/plain"
self.interface_type = "if1"
self.foreignIP = fIP
self.foreignPORT = fPort
self.foreignPATH = fPath
def render_GET(self, request):
client = HelperClient(server=(self.foreignIP, self.foreignPORT))
response = client.get(self.foreignPATH)
print response.pretty_print()
self.payload = response.payload
return self
def render_PUT(self, request):
requestPayload = request.payload
client = HelperClient(server=(self.foreignIP, self.foreignPORT))
response = client.put(self.foreignPATH, requestPayload)
print response.pretty_print()
if(response.payload == "PUT OK"):
self.edit_resource(request)
client.stop()
return self
def render_POST(self, request):
res = self.init_resource(request, BasicResource())
return res
def render_DELETE(self, request):
return True
class SensortagResource(Resource):
def __init__(self, sTag, name="SensortagResource", coap_server=None, rt="observe"):
super(SensortagResource, self).__init__(name, coap_server, visible=True,
observable=True, allow_children=True)
self.payload = name
self.resource_type = rt
self.content_type = "text/plain"
self.interface_type = "if1"
self.tag = sTag
def render_GET(self, request):
response = str(self.tag.read())
bad = "()"
for c in bad:
response = response.replace(c, "")
floats = response.split(",")
labels = self.tag.labels().split(",")
response = []
for f in floats:
response.append(labels.pop(0) + ', ' + f)
bad = "()'[]"
for c in bad:
response = str(response).replace(c, "")
self.payload = response
return self
def render_PUT(self, request):
return self
def render_POST(self, request):
res = self.init_resource(request, BasicResource())
return res
def render_DELETE(self, request):
return True
|
none
| 1
| 2.428946
| 2
|
|
app/config/petskill1.py
|
xiaomi2019/lolita_son
| 0
|
6627412
|
# -*- coding: utf-8 -*-
'''
Author: Hannibal
Data:
Desc: local data config
NOTE: Don't modify this file, it's build by xml-to-python!!!
'''
petskill1_map = {};
petskill1_map[1] = {"lv":1,"reqlv":1,"gold":20,"exppersec":1,};
petskill1_map[2] = {"lv":2,"reqlv":2,"gold":100,"exppersec":4,};
petskill1_map[3] = {"lv":3,"reqlv":3,"gold":300,"exppersec":9,};
petskill1_map[4] = {"lv":4,"reqlv":4,"gold":600,"exppersec":16,};
petskill1_map[5] = {"lv":5,"reqlv":5,"gold":1000,"exppersec":25,};
petskill1_map[6] = {"lv":6,"reqlv":6,"gold":2000,"exppersec":36,};
petskill1_map[7] = {"lv":7,"reqlv":7,"gold":4000,"exppersec":49,};
petskill1_map[8] = {"lv":8,"reqlv":8,"gold":8000,"exppersec":64,};
petskill1_map[9] = {"lv":9,"reqlv":9,"gold":20000,"exppersec":81,};
petskill1_map[10] = {"lv":10,"reqlv":10,"gold":50000,"exppersec":100,};
petskill1_map[11] = {"lv":11,"reqlv":11,"gold":100000,"exppersec":121,};
petskill1_map[12] = {"lv":12,"reqlv":12,"gold":200000,"exppersec":144,};
petskill1_map[13] = {"lv":13,"reqlv":13,"gold":300000,"exppersec":169,};
petskill1_map[14] = {"lv":14,"reqlv":14,"gold":400000,"exppersec":196,};
petskill1_map[15] = {"lv":15,"reqlv":15,"gold":500000,"exppersec":225,};
petskill1_map[16] = {"lv":16,"reqlv":16,"gold":600000,"exppersec":256,};
petskill1_map[17] = {"lv":17,"reqlv":17,"gold":800000,"exppersec":289,};
petskill1_map[18] = {"lv":18,"reqlv":18,"gold":1600000,"exppersec":324,};
petskill1_map[19] = {"lv":19,"reqlv":19,"gold":3200000,"exppersec":361,};
petskill1_map[20] = {"lv":20,"reqlv":20,"gold":6400000,"exppersec":400,};
petskill1_map[21] = {"lv":21,"reqlv":21,"gold":18522000,"exppersec":441,};
petskill1_map[22] = {"lv":22,"reqlv":22,"gold":21296000,"exppersec":484,};
petskill1_map[23] = {"lv":23,"reqlv":23,"gold":24334000,"exppersec":529,};
petskill1_map[24] = {"lv":24,"reqlv":24,"gold":27648000,"exppersec":576,};
petskill1_map[25] = {"lv":25,"reqlv":25,"gold":31250000,"exppersec":625,};
petskill1_map[26] = {"lv":26,"reqlv":26,"gold":35152000,"exppersec":676,};
petskill1_map[27] = {"lv":27,"reqlv":27,"gold":39366000,"exppersec":729,};
petskill1_map[28] = {"lv":28,"reqlv":28,"gold":43904000,"exppersec":784,};
petskill1_map[29] = {"lv":29,"reqlv":29,"gold":48778000,"exppersec":841,};
petskill1_map[30] = {"lv":30,"reqlv":30,"gold":54000000,"exppersec":900,};
petskill1_map[31] = {"lv":31,"reqlv":31,"gold":59582000,"exppersec":961,};
petskill1_map[32] = {"lv":32,"reqlv":32,"gold":65536000,"exppersec":1024,};
petskill1_map[33] = {"lv":33,"reqlv":33,"gold":71874000,"exppersec":1089,};
petskill1_map[34] = {"lv":34,"reqlv":34,"gold":78608000,"exppersec":1156,};
petskill1_map[35] = {"lv":35,"reqlv":35,"gold":85750000,"exppersec":1225,};
petskill1_map[36] = {"lv":36,"reqlv":36,"gold":93312000,"exppersec":1296,};
petskill1_map[37] = {"lv":37,"reqlv":37,"gold":101306000,"exppersec":1369,};
petskill1_map[38] = {"lv":38,"reqlv":38,"gold":109744000,"exppersec":1444,};
petskill1_map[39] = {"lv":39,"reqlv":39,"gold":118638000,"exppersec":1521,};
petskill1_map[40] = {"lv":40,"reqlv":40,"gold":128000000,"exppersec":1600,};
petskill1_map[41] = {"lv":41,"reqlv":41,"gold":137842000,"exppersec":1681,};
petskill1_map[42] = {"lv":42,"reqlv":42,"gold":148176000,"exppersec":1764,};
petskill1_map[43] = {"lv":43,"reqlv":43,"gold":159014000,"exppersec":1849,};
petskill1_map[44] = {"lv":44,"reqlv":44,"gold":170368000,"exppersec":1936,};
petskill1_map[45] = {"lv":45,"reqlv":45,"gold":182250000,"exppersec":2025,};
petskill1_map[46] = {"lv":46,"reqlv":46,"gold":194672000,"exppersec":2116,};
petskill1_map[47] = {"lv":47,"reqlv":47,"gold":207646000,"exppersec":2209,};
petskill1_map[48] = {"lv":48,"reqlv":48,"gold":221184000,"exppersec":2304,};
petskill1_map[49] = {"lv":49,"reqlv":49,"gold":235298000,"exppersec":2401,};
petskill1_map[50] = {"lv":50,"reqlv":50,"gold":250000000,"exppersec":2500,};
petskill1_map[51] = {"lv":51,"reqlv":51,"gold":265302000,"exppersec":2601,};
petskill1_map[52] = {"lv":52,"reqlv":52,"gold":281216000,"exppersec":2704,};
petskill1_map[53] = {"lv":53,"reqlv":53,"gold":297754000,"exppersec":2809,};
petskill1_map[54] = {"lv":54,"reqlv":54,"gold":314928000,"exppersec":2916,};
petskill1_map[55] = {"lv":55,"reqlv":55,"gold":332750000,"exppersec":3025,};
petskill1_map[56] = {"lv":56,"reqlv":56,"gold":351232000,"exppersec":3136,};
petskill1_map[57] = {"lv":57,"reqlv":57,"gold":370386000,"exppersec":3249,};
petskill1_map[58] = {"lv":58,"reqlv":58,"gold":390224000,"exppersec":3364,};
petskill1_map[59] = {"lv":59,"reqlv":59,"gold":410758000,"exppersec":3481,};
petskill1_map[60] = {"lv":60,"reqlv":60,"gold":432000000,"exppersec":3600,};
petskill1_map[61] = {"lv":61,"reqlv":61,"gold":453962000,"exppersec":3721,};
petskill1_map[62] = {"lv":62,"reqlv":62,"gold":476656000,"exppersec":3844,};
petskill1_map[63] = {"lv":63,"reqlv":63,"gold":500094000,"exppersec":3969,};
petskill1_map[64] = {"lv":64,"reqlv":64,"gold":524288000,"exppersec":4096,};
petskill1_map[65] = {"lv":65,"reqlv":65,"gold":549250000,"exppersec":4225,};
petskill1_map[66] = {"lv":66,"reqlv":66,"gold":574992000,"exppersec":4356,};
petskill1_map[67] = {"lv":67,"reqlv":67,"gold":601526000,"exppersec":4489,};
petskill1_map[68] = {"lv":68,"reqlv":68,"gold":628864000,"exppersec":4624,};
petskill1_map[69] = {"lv":69,"reqlv":69,"gold":657018000,"exppersec":4761,};
petskill1_map[70] = {"lv":70,"reqlv":70,"gold":686000000,"exppersec":4900,};
petskill1_map[71] = {"lv":71,"reqlv":71,"gold":715822000,"exppersec":5041,};
petskill1_map[72] = {"lv":72,"reqlv":72,"gold":746496000,"exppersec":5184,};
petskill1_map[73] = {"lv":73,"reqlv":73,"gold":778034000,"exppersec":5329,};
petskill1_map[74] = {"lv":74,"reqlv":74,"gold":810448000,"exppersec":5476,};
petskill1_map[75] = {"lv":75,"reqlv":75,"gold":843750000,"exppersec":5625,};
petskill1_map[76] = {"lv":76,"reqlv":76,"gold":877952000,"exppersec":5776,};
petskill1_map[77] = {"lv":77,"reqlv":77,"gold":913066000,"exppersec":5929,};
petskill1_map[78] = {"lv":78,"reqlv":78,"gold":949104000,"exppersec":6084,};
petskill1_map[79] = {"lv":79,"reqlv":79,"gold":986078000,"exppersec":6241,};
petskill1_map[80] = {"lv":80,"reqlv":80,"gold":1024000000,"exppersec":6400,};
petskill1_map[81] = {"lv":81,"reqlv":81,"gold":1062882000,"exppersec":6561,};
petskill1_map[82] = {"lv":82,"reqlv":82,"gold":1102736000,"exppersec":6724,};
petskill1_map[83] = {"lv":83,"reqlv":83,"gold":1143574000,"exppersec":6889,};
petskill1_map[84] = {"lv":84,"reqlv":84,"gold":1185408000,"exppersec":7056,};
petskill1_map[85] = {"lv":85,"reqlv":85,"gold":1228250000,"exppersec":7225,};
petskill1_map[86] = {"lv":86,"reqlv":86,"gold":1272112000,"exppersec":7396,};
petskill1_map[87] = {"lv":87,"reqlv":87,"gold":1317006000,"exppersec":7569,};
petskill1_map[88] = {"lv":88,"reqlv":88,"gold":1362944000,"exppersec":7744,};
petskill1_map[89] = {"lv":89,"reqlv":89,"gold":1409938000,"exppersec":7921,};
petskill1_map[90] = {"lv":90,"reqlv":90,"gold":1458000000,"exppersec":8100,};
petskill1_map[91] = {"lv":91,"reqlv":91,"gold":1507142000,"exppersec":8281,};
petskill1_map[92] = {"lv":92,"reqlv":92,"gold":1557376000,"exppersec":8464,};
petskill1_map[93] = {"lv":93,"reqlv":93,"gold":1608714000,"exppersec":8649,};
petskill1_map[94] = {"lv":94,"reqlv":94,"gold":1661168000,"exppersec":8836,};
petskill1_map[95] = {"lv":95,"reqlv":95,"gold":1714750000,"exppersec":9025,};
petskill1_map[96] = {"lv":96,"reqlv":96,"gold":1769472000,"exppersec":9216,};
petskill1_map[97] = {"lv":97,"reqlv":97,"gold":1825346000,"exppersec":9409,};
petskill1_map[98] = {"lv":98,"reqlv":98,"gold":1882384000,"exppersec":9604,};
petskill1_map[99] = {"lv":99,"reqlv":99,"gold":1940598000,"exppersec":9801,};
class Petskill1:
def __init__(self, key):
config = petskill1_map.get(key);
for k, v in config.items():
setattr(self, k, v);
return
def create_Petskill1(key):
config = petskill1_map.get(key);
if not config:
return
return Petskill1(key)
|
# -*- coding: utf-8 -*-
'''
Author: Hannibal
Data:
Desc: local data config
NOTE: Don't modify this file, it's build by xml-to-python!!!
'''
petskill1_map = {};
petskill1_map[1] = {"lv":1,"reqlv":1,"gold":20,"exppersec":1,};
petskill1_map[2] = {"lv":2,"reqlv":2,"gold":100,"exppersec":4,};
petskill1_map[3] = {"lv":3,"reqlv":3,"gold":300,"exppersec":9,};
petskill1_map[4] = {"lv":4,"reqlv":4,"gold":600,"exppersec":16,};
petskill1_map[5] = {"lv":5,"reqlv":5,"gold":1000,"exppersec":25,};
petskill1_map[6] = {"lv":6,"reqlv":6,"gold":2000,"exppersec":36,};
petskill1_map[7] = {"lv":7,"reqlv":7,"gold":4000,"exppersec":49,};
petskill1_map[8] = {"lv":8,"reqlv":8,"gold":8000,"exppersec":64,};
petskill1_map[9] = {"lv":9,"reqlv":9,"gold":20000,"exppersec":81,};
petskill1_map[10] = {"lv":10,"reqlv":10,"gold":50000,"exppersec":100,};
petskill1_map[11] = {"lv":11,"reqlv":11,"gold":100000,"exppersec":121,};
petskill1_map[12] = {"lv":12,"reqlv":12,"gold":200000,"exppersec":144,};
petskill1_map[13] = {"lv":13,"reqlv":13,"gold":300000,"exppersec":169,};
petskill1_map[14] = {"lv":14,"reqlv":14,"gold":400000,"exppersec":196,};
petskill1_map[15] = {"lv":15,"reqlv":15,"gold":500000,"exppersec":225,};
petskill1_map[16] = {"lv":16,"reqlv":16,"gold":600000,"exppersec":256,};
petskill1_map[17] = {"lv":17,"reqlv":17,"gold":800000,"exppersec":289,};
petskill1_map[18] = {"lv":18,"reqlv":18,"gold":1600000,"exppersec":324,};
petskill1_map[19] = {"lv":19,"reqlv":19,"gold":3200000,"exppersec":361,};
petskill1_map[20] = {"lv":20,"reqlv":20,"gold":6400000,"exppersec":400,};
petskill1_map[21] = {"lv":21,"reqlv":21,"gold":18522000,"exppersec":441,};
petskill1_map[22] = {"lv":22,"reqlv":22,"gold":21296000,"exppersec":484,};
petskill1_map[23] = {"lv":23,"reqlv":23,"gold":24334000,"exppersec":529,};
petskill1_map[24] = {"lv":24,"reqlv":24,"gold":27648000,"exppersec":576,};
petskill1_map[25] = {"lv":25,"reqlv":25,"gold":31250000,"exppersec":625,};
petskill1_map[26] = {"lv":26,"reqlv":26,"gold":35152000,"exppersec":676,};
petskill1_map[27] = {"lv":27,"reqlv":27,"gold":39366000,"exppersec":729,};
petskill1_map[28] = {"lv":28,"reqlv":28,"gold":43904000,"exppersec":784,};
petskill1_map[29] = {"lv":29,"reqlv":29,"gold":48778000,"exppersec":841,};
petskill1_map[30] = {"lv":30,"reqlv":30,"gold":54000000,"exppersec":900,};
petskill1_map[31] = {"lv":31,"reqlv":31,"gold":59582000,"exppersec":961,};
petskill1_map[32] = {"lv":32,"reqlv":32,"gold":65536000,"exppersec":1024,};
petskill1_map[33] = {"lv":33,"reqlv":33,"gold":71874000,"exppersec":1089,};
petskill1_map[34] = {"lv":34,"reqlv":34,"gold":78608000,"exppersec":1156,};
petskill1_map[35] = {"lv":35,"reqlv":35,"gold":85750000,"exppersec":1225,};
petskill1_map[36] = {"lv":36,"reqlv":36,"gold":93312000,"exppersec":1296,};
petskill1_map[37] = {"lv":37,"reqlv":37,"gold":101306000,"exppersec":1369,};
petskill1_map[38] = {"lv":38,"reqlv":38,"gold":109744000,"exppersec":1444,};
petskill1_map[39] = {"lv":39,"reqlv":39,"gold":118638000,"exppersec":1521,};
petskill1_map[40] = {"lv":40,"reqlv":40,"gold":128000000,"exppersec":1600,};
petskill1_map[41] = {"lv":41,"reqlv":41,"gold":137842000,"exppersec":1681,};
petskill1_map[42] = {"lv":42,"reqlv":42,"gold":148176000,"exppersec":1764,};
petskill1_map[43] = {"lv":43,"reqlv":43,"gold":159014000,"exppersec":1849,};
petskill1_map[44] = {"lv":44,"reqlv":44,"gold":170368000,"exppersec":1936,};
petskill1_map[45] = {"lv":45,"reqlv":45,"gold":182250000,"exppersec":2025,};
petskill1_map[46] = {"lv":46,"reqlv":46,"gold":194672000,"exppersec":2116,};
petskill1_map[47] = {"lv":47,"reqlv":47,"gold":207646000,"exppersec":2209,};
petskill1_map[48] = {"lv":48,"reqlv":48,"gold":221184000,"exppersec":2304,};
petskill1_map[49] = {"lv":49,"reqlv":49,"gold":235298000,"exppersec":2401,};
petskill1_map[50] = {"lv":50,"reqlv":50,"gold":250000000,"exppersec":2500,};
petskill1_map[51] = {"lv":51,"reqlv":51,"gold":265302000,"exppersec":2601,};
petskill1_map[52] = {"lv":52,"reqlv":52,"gold":281216000,"exppersec":2704,};
petskill1_map[53] = {"lv":53,"reqlv":53,"gold":297754000,"exppersec":2809,};
petskill1_map[54] = {"lv":54,"reqlv":54,"gold":314928000,"exppersec":2916,};
petskill1_map[55] = {"lv":55,"reqlv":55,"gold":332750000,"exppersec":3025,};
petskill1_map[56] = {"lv":56,"reqlv":56,"gold":351232000,"exppersec":3136,};
petskill1_map[57] = {"lv":57,"reqlv":57,"gold":370386000,"exppersec":3249,};
petskill1_map[58] = {"lv":58,"reqlv":58,"gold":390224000,"exppersec":3364,};
petskill1_map[59] = {"lv":59,"reqlv":59,"gold":410758000,"exppersec":3481,};
petskill1_map[60] = {"lv":60,"reqlv":60,"gold":432000000,"exppersec":3600,};
petskill1_map[61] = {"lv":61,"reqlv":61,"gold":453962000,"exppersec":3721,};
petskill1_map[62] = {"lv":62,"reqlv":62,"gold":476656000,"exppersec":3844,};
petskill1_map[63] = {"lv":63,"reqlv":63,"gold":500094000,"exppersec":3969,};
petskill1_map[64] = {"lv":64,"reqlv":64,"gold":524288000,"exppersec":4096,};
petskill1_map[65] = {"lv":65,"reqlv":65,"gold":549250000,"exppersec":4225,};
petskill1_map[66] = {"lv":66,"reqlv":66,"gold":574992000,"exppersec":4356,};
petskill1_map[67] = {"lv":67,"reqlv":67,"gold":601526000,"exppersec":4489,};
petskill1_map[68] = {"lv":68,"reqlv":68,"gold":628864000,"exppersec":4624,};
petskill1_map[69] = {"lv":69,"reqlv":69,"gold":657018000,"exppersec":4761,};
petskill1_map[70] = {"lv":70,"reqlv":70,"gold":686000000,"exppersec":4900,};
petskill1_map[71] = {"lv":71,"reqlv":71,"gold":715822000,"exppersec":5041,};
petskill1_map[72] = {"lv":72,"reqlv":72,"gold":746496000,"exppersec":5184,};
petskill1_map[73] = {"lv":73,"reqlv":73,"gold":778034000,"exppersec":5329,};
petskill1_map[74] = {"lv":74,"reqlv":74,"gold":810448000,"exppersec":5476,};
petskill1_map[75] = {"lv":75,"reqlv":75,"gold":843750000,"exppersec":5625,};
petskill1_map[76] = {"lv":76,"reqlv":76,"gold":877952000,"exppersec":5776,};
petskill1_map[77] = {"lv":77,"reqlv":77,"gold":913066000,"exppersec":5929,};
petskill1_map[78] = {"lv":78,"reqlv":78,"gold":949104000,"exppersec":6084,};
petskill1_map[79] = {"lv":79,"reqlv":79,"gold":986078000,"exppersec":6241,};
petskill1_map[80] = {"lv":80,"reqlv":80,"gold":1024000000,"exppersec":6400,};
petskill1_map[81] = {"lv":81,"reqlv":81,"gold":1062882000,"exppersec":6561,};
petskill1_map[82] = {"lv":82,"reqlv":82,"gold":1102736000,"exppersec":6724,};
petskill1_map[83] = {"lv":83,"reqlv":83,"gold":1143574000,"exppersec":6889,};
petskill1_map[84] = {"lv":84,"reqlv":84,"gold":1185408000,"exppersec":7056,};
petskill1_map[85] = {"lv":85,"reqlv":85,"gold":1228250000,"exppersec":7225,};
petskill1_map[86] = {"lv":86,"reqlv":86,"gold":1272112000,"exppersec":7396,};
petskill1_map[87] = {"lv":87,"reqlv":87,"gold":1317006000,"exppersec":7569,};
petskill1_map[88] = {"lv":88,"reqlv":88,"gold":1362944000,"exppersec":7744,};
petskill1_map[89] = {"lv":89,"reqlv":89,"gold":1409938000,"exppersec":7921,};
petskill1_map[90] = {"lv":90,"reqlv":90,"gold":1458000000,"exppersec":8100,};
petskill1_map[91] = {"lv":91,"reqlv":91,"gold":1507142000,"exppersec":8281,};
petskill1_map[92] = {"lv":92,"reqlv":92,"gold":1557376000,"exppersec":8464,};
petskill1_map[93] = {"lv":93,"reqlv":93,"gold":1608714000,"exppersec":8649,};
petskill1_map[94] = {"lv":94,"reqlv":94,"gold":1661168000,"exppersec":8836,};
petskill1_map[95] = {"lv":95,"reqlv":95,"gold":1714750000,"exppersec":9025,};
petskill1_map[96] = {"lv":96,"reqlv":96,"gold":1769472000,"exppersec":9216,};
petskill1_map[97] = {"lv":97,"reqlv":97,"gold":1825346000,"exppersec":9409,};
petskill1_map[98] = {"lv":98,"reqlv":98,"gold":1882384000,"exppersec":9604,};
petskill1_map[99] = {"lv":99,"reqlv":99,"gold":1940598000,"exppersec":9801,};
class Petskill1:
def __init__(self, key):
config = petskill1_map.get(key);
for k, v in config.items():
setattr(self, k, v);
return
def create_Petskill1(key):
config = petskill1_map.get(key);
if not config:
return
return Petskill1(key)
|
en
| 0.776909
|
# -*- coding: utf-8 -*- Author: Hannibal Data: Desc: local data config NOTE: Don't modify this file, it's build by xml-to-python!!!
| 1.68984
| 2
|
tests/test_multiviewica.py
|
hugorichard/mvneuro
| 0
|
6627413
|
import os
import pytest
import numpy as np
from mvneuro.multiviewica import MultiViewICA
import tempfile
def generate_data(
n_voxels, n_supervoxels, n_timeframes, n_components, n_subjects, datadir,
):
"""
Generate data without noise
Returns
------
W, Ss, Xs, np.array(paths)
W, Ss, Xs
"""
n_sessions = len(n_timeframes)
bigger_mix = [
np.linalg.svd(
np.random.rand(n_voxels, n_supervoxels), full_matrices=False,
)[0]
for _ in range(n_subjects)
]
W = [
np.random.rand(n_supervoxels, n_components) for _ in range(n_subjects)
]
Ss = []
for j in range(n_sessions):
Sj = np.random.laplace(size=(n_components, n_timeframes[j]))
Sj = Sj - np.mean(Sj, axis=1, keepdims=True)
Ss.append(Sj)
Xs = []
paths = []
for subject in range(n_subjects):
sessions_path = []
Xs_ = []
for session in range(n_sessions):
pth = "%i_%i" % (subject, session)
X__ = bigger_mix[subject].dot(W[subject]).dot(Ss[session])
Xs_.append(X__)
if datadir is not None:
np.save(os.path.join(datadir, pth), X__)
sessions_path.append(os.path.join(datadir, pth + ".npy"))
if datadir is not None:
paths.append(sessions_path)
Xs.append(Xs_)
if datadir is not None:
return W, Ss, Xs, np.array(paths)
else:
return W, Ss, Xs
def test_fit_transform_mvica():
"""
Check that we recover same subject specific sources
"""
n_voxels = 10
n_timeframes = [20, 20]
n_subjects = 5
n_components = 2
W, Ss, Xs = generate_data(
n_voxels, n_components, n_timeframes, n_components, n_subjects, None
)
S = np.concatenate(Ss, axis=1)
X = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(
reduction="srm", noise=1, n_components=n_components, n_iter=100
)
shared = ica.fit_transform(X)
for i in range(len(shared)):
np.testing.assert_almost_equal(shared[0], shared[i], 2)
def test_fit_transform_mvica():
"""
Check that we recover same subject specific sources
"""
n_voxels = 10
n_timeframes = [20, 20]
n_subjects = 5
n_components = 2
W, Ss, Xs = generate_data(
n_voxels, n_components, n_timeframes, n_components, n_subjects, None
)
S = np.concatenate(Ss, axis=1)
X = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(
reduction="srm", noise=1, n_components=n_components, n_iter=100
)
shared = ica.fit_transform(X)
for i in range(len(shared)):
np.testing.assert_almost_equal(shared[0], shared[i], 2)
@pytest.mark.parametrize("reduction, n_voxels", [("srm", 10), (None, 2)])
def test_inverse_transform(reduction, n_voxels):
"""
Test that we can recover data after transform
"""
n_timeframes = [10, 10]
n_subjects = 2
n_components = 2
W, Ss, Xs = generate_data(
n_voxels, n_components, n_timeframes, n_components, n_subjects, None
)
X = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(reduction=reduction, n_components=n_components, n_iter=100)
shared = ica.fit_transform(X)
X_pred = ica.inverse_transform(np.mean(shared, axis=0))
for i in range(len(X_pred)):
np.testing.assert_array_almost_equal(X_pred[i], X[i], 2)
shared_pred = ica.transform(X_pred)
np.testing.assert_array_almost_equal(
np.mean(shared, axis=0), np.mean(shared_pred, axis=0), 5
)
@pytest.mark.parametrize("reduction, n_voxels", [("srm", 10), (None, 2)])
def test_add_subjects(reduction, n_voxels):
"""
Test that we can recover data after transform
"""
n_voxels = n_voxels
n_timeframes = [10, 10]
n_subjects = 5
n_components = 2
W, Ss, Xs = generate_data(
n_voxels, n_components, n_timeframes, n_components, n_subjects, None
)
X = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(reduction=reduction, n_components=n_components, n_iter=100)
shared = ica.fit_transform(X[:4])
ica.add_subjects([X[4]], np.mean(shared, axis=0))
X_pred = ica.inverse_transform(np.mean(shared, axis=0))
for i in range(len(X_pred)):
np.testing.assert_array_almost_equal(X_pred[i], X[i], 2)
shared_pred = ica.transform(X_pred)
np.testing.assert_array_almost_equal(
np.mean(shared, axis=0), np.mean(shared_pred, axis=0), 5
)
def test_input():
"""
Test that we can recover data after transform
whatever input it is
"""
n_voxels = 10
n_timeframes = [20, 20]
n_subjects = 5
n_components = 2
with tempfile.TemporaryDirectory() as datadir:
W, Ss, Xs, paths = generate_data(
n_voxels,
n_components,
n_timeframes,
n_components,
n_subjects,
datadir,
)
np_data = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(reduction="srm", n_components=None, n_iter=100)
shared = ica.fit_transform(paths)
for i in range(len(Xs)):
X_predi = ica.inverse_transform(
shared[i], subjects_indexes=[i]
)[0]
np.testing.assert_allclose(X_predi, np_data[i], 5)
|
import os
import pytest
import numpy as np
from mvneuro.multiviewica import MultiViewICA
import tempfile
def generate_data(
n_voxels, n_supervoxels, n_timeframes, n_components, n_subjects, datadir,
):
"""
Generate data without noise
Returns
------
W, Ss, Xs, np.array(paths)
W, Ss, Xs
"""
n_sessions = len(n_timeframes)
bigger_mix = [
np.linalg.svd(
np.random.rand(n_voxels, n_supervoxels), full_matrices=False,
)[0]
for _ in range(n_subjects)
]
W = [
np.random.rand(n_supervoxels, n_components) for _ in range(n_subjects)
]
Ss = []
for j in range(n_sessions):
Sj = np.random.laplace(size=(n_components, n_timeframes[j]))
Sj = Sj - np.mean(Sj, axis=1, keepdims=True)
Ss.append(Sj)
Xs = []
paths = []
for subject in range(n_subjects):
sessions_path = []
Xs_ = []
for session in range(n_sessions):
pth = "%i_%i" % (subject, session)
X__ = bigger_mix[subject].dot(W[subject]).dot(Ss[session])
Xs_.append(X__)
if datadir is not None:
np.save(os.path.join(datadir, pth), X__)
sessions_path.append(os.path.join(datadir, pth + ".npy"))
if datadir is not None:
paths.append(sessions_path)
Xs.append(Xs_)
if datadir is not None:
return W, Ss, Xs, np.array(paths)
else:
return W, Ss, Xs
def test_fit_transform_mvica():
"""
Check that we recover same subject specific sources
"""
n_voxels = 10
n_timeframes = [20, 20]
n_subjects = 5
n_components = 2
W, Ss, Xs = generate_data(
n_voxels, n_components, n_timeframes, n_components, n_subjects, None
)
S = np.concatenate(Ss, axis=1)
X = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(
reduction="srm", noise=1, n_components=n_components, n_iter=100
)
shared = ica.fit_transform(X)
for i in range(len(shared)):
np.testing.assert_almost_equal(shared[0], shared[i], 2)
def test_fit_transform_mvica():
"""
Check that we recover same subject specific sources
"""
n_voxels = 10
n_timeframes = [20, 20]
n_subjects = 5
n_components = 2
W, Ss, Xs = generate_data(
n_voxels, n_components, n_timeframes, n_components, n_subjects, None
)
S = np.concatenate(Ss, axis=1)
X = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(
reduction="srm", noise=1, n_components=n_components, n_iter=100
)
shared = ica.fit_transform(X)
for i in range(len(shared)):
np.testing.assert_almost_equal(shared[0], shared[i], 2)
@pytest.mark.parametrize("reduction, n_voxels", [("srm", 10), (None, 2)])
def test_inverse_transform(reduction, n_voxels):
"""
Test that we can recover data after transform
"""
n_timeframes = [10, 10]
n_subjects = 2
n_components = 2
W, Ss, Xs = generate_data(
n_voxels, n_components, n_timeframes, n_components, n_subjects, None
)
X = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(reduction=reduction, n_components=n_components, n_iter=100)
shared = ica.fit_transform(X)
X_pred = ica.inverse_transform(np.mean(shared, axis=0))
for i in range(len(X_pred)):
np.testing.assert_array_almost_equal(X_pred[i], X[i], 2)
shared_pred = ica.transform(X_pred)
np.testing.assert_array_almost_equal(
np.mean(shared, axis=0), np.mean(shared_pred, axis=0), 5
)
@pytest.mark.parametrize("reduction, n_voxels", [("srm", 10), (None, 2)])
def test_add_subjects(reduction, n_voxels):
"""
Test that we can recover data after transform
"""
n_voxels = n_voxels
n_timeframes = [10, 10]
n_subjects = 5
n_components = 2
W, Ss, Xs = generate_data(
n_voxels, n_components, n_timeframes, n_components, n_subjects, None
)
X = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(reduction=reduction, n_components=n_components, n_iter=100)
shared = ica.fit_transform(X[:4])
ica.add_subjects([X[4]], np.mean(shared, axis=0))
X_pred = ica.inverse_transform(np.mean(shared, axis=0))
for i in range(len(X_pred)):
np.testing.assert_array_almost_equal(X_pred[i], X[i], 2)
shared_pred = ica.transform(X_pred)
np.testing.assert_array_almost_equal(
np.mean(shared, axis=0), np.mean(shared_pred, axis=0), 5
)
def test_input():
"""
Test that we can recover data after transform
whatever input it is
"""
n_voxels = 10
n_timeframes = [20, 20]
n_subjects = 5
n_components = 2
with tempfile.TemporaryDirectory() as datadir:
W, Ss, Xs, paths = generate_data(
n_voxels,
n_components,
n_timeframes,
n_components,
n_subjects,
datadir,
)
np_data = [np.concatenate(np.array(x), axis=1) for x in Xs]
ica = MultiViewICA(reduction="srm", n_components=None, n_iter=100)
shared = ica.fit_transform(paths)
for i in range(len(Xs)):
X_predi = ica.inverse_transform(
shared[i], subjects_indexes=[i]
)[0]
np.testing.assert_allclose(X_predi, np_data[i], 5)
|
en
| 0.843617
|
Generate data without noise Returns ------ W, Ss, Xs, np.array(paths) W, Ss, Xs Check that we recover same subject specific sources Check that we recover same subject specific sources Test that we can recover data after transform Test that we can recover data after transform Test that we can recover data after transform whatever input it is
| 2.001942
| 2
|
tests/config_tests/test_sample_config.py
|
AltaverseDAO/bittensor
| 0
|
6627414
|
import os, sys
from bittensor._neuron.text.template_miner import neuron as template_miner
from bittensor._neuron.text.core_validator import neuron as core_validator
from bittensor._neuron.text.template_server import server as template_server
from bittensor._neuron.text.advanced_server import server as advanced_server
from bittensor._neuron.text.multitron_server import server as multitron_server
def test_run_template_miner_config():
PATH = '/tests/config_tests/template_miner_sample_config.txt'
sys.argv = [sys.argv[0], '--config', PATH]
config = template_miner.config()
assert config['axon']['ip'] == '[::]'
assert config['dataset']['data_dir'] == '~/.bittensor/data/'
assert config['dendrite']['requires_grad'] == True
assert config['nucleus']['punishment'] == 0.001
def test_run_core_validator_config():
PATH = '/tests/config_tests/template_validator_sample_config.txt'
sys.argv = [sys.argv[0], '--config', PATH]
config = core_validator.config()
assert config['dataset']['batch_size'] == 10
assert config['dendrite']['requires_grad'] == True
assert config['logging']['logging_dir'] == '~/.bittensor/miners'
assert config['neuron']['clip_gradients'] == 1.0
def test_run_template_server_config():
PATH = '/tests/config_tests/template_server_sample_config.txt'
sys.argv = [sys.argv[0], '--config', PATH]
config = template_server.config()
assert config['axon']['backward_timeout'] == 20
assert config['dataset']['data_dir'] == '~/.bittensor/data/'
assert config['logging']['debug'] == False
assert config['wandb']['api_key'] == 'default'
def test_run_advanced_server_config():
PATH = '/tests/config_tests/advanced_server_sample_config.txt'
sys.argv = [sys.argv[0], '--config', PATH]
config = advanced_server.config()
assert config['axon']['backward_timeout'] == 20
assert config['dataset']['data_dir'] == '~/.bittensor/data/'
assert config['logging']['debug'] == False
assert config['neuron']['blacklist']['stake']['backward'] == 100
if __name__ == "__main__":
test_run_template_miner_config()
test_run_template_server_config()
test_run_advanced_server_config()
|
import os, sys
from bittensor._neuron.text.template_miner import neuron as template_miner
from bittensor._neuron.text.core_validator import neuron as core_validator
from bittensor._neuron.text.template_server import server as template_server
from bittensor._neuron.text.advanced_server import server as advanced_server
from bittensor._neuron.text.multitron_server import server as multitron_server
def test_run_template_miner_config():
PATH = '/tests/config_tests/template_miner_sample_config.txt'
sys.argv = [sys.argv[0], '--config', PATH]
config = template_miner.config()
assert config['axon']['ip'] == '[::]'
assert config['dataset']['data_dir'] == '~/.bittensor/data/'
assert config['dendrite']['requires_grad'] == True
assert config['nucleus']['punishment'] == 0.001
def test_run_core_validator_config():
PATH = '/tests/config_tests/template_validator_sample_config.txt'
sys.argv = [sys.argv[0], '--config', PATH]
config = core_validator.config()
assert config['dataset']['batch_size'] == 10
assert config['dendrite']['requires_grad'] == True
assert config['logging']['logging_dir'] == '~/.bittensor/miners'
assert config['neuron']['clip_gradients'] == 1.0
def test_run_template_server_config():
PATH = '/tests/config_tests/template_server_sample_config.txt'
sys.argv = [sys.argv[0], '--config', PATH]
config = template_server.config()
assert config['axon']['backward_timeout'] == 20
assert config['dataset']['data_dir'] == '~/.bittensor/data/'
assert config['logging']['debug'] == False
assert config['wandb']['api_key'] == 'default'
def test_run_advanced_server_config():
PATH = '/tests/config_tests/advanced_server_sample_config.txt'
sys.argv = [sys.argv[0], '--config', PATH]
config = advanced_server.config()
assert config['axon']['backward_timeout'] == 20
assert config['dataset']['data_dir'] == '~/.bittensor/data/'
assert config['logging']['debug'] == False
assert config['neuron']['blacklist']['stake']['backward'] == 100
if __name__ == "__main__":
test_run_template_miner_config()
test_run_template_server_config()
test_run_advanced_server_config()
|
none
| 1
| 1.927081
| 2
|
|
sandbox/rocky/tf/core/parameterized.py
|
windweller/nlplab
| 0
|
6627415
|
<reponame>windweller/nlplab<gh_stars>0
from contextlib import contextmanager
from rllab.core.serializable import Serializable
from rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors
import tensorflow as tf
import numpy as np
import h5py
import os
import time
load_params = True
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
yield
load_params = True
class Parameterized(object):
def __init__(self):
self._cached_params = {}
self._cached_param_dtypes = {}
self._cached_param_shapes = {}
self._cached_assign_ops = {}
self._cached_assign_placeholders = {}
timestr = time.strftime("%Y%m%d_%H%M%S")
self.save_name = self.save_name + timestr
print self.save_name
def get_params_internal(self, **tags):
"""
Internal method to be implemented which does not perform caching
"""
raise NotImplementedError
def get_params(self, **tags):
"""
Get the list of parameters, filtered by the provided tags.
Some common tags include 'regularizable' and 'trainable'
"""
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_params:
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_dtypes:
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_dtypes[tag_tuple] = [val.dtype for val in param_values]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_shapes:
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_shapes[tag_tuple] = [val.shape for val in param_values]
return self._cached_param_shapes[tag_tuple]
def get_param_values(self, **tags):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
return flatten_tensors(param_values)
def set_param_values(self, flattened_params, **tags):
debug = tags.pop("debug", False)
param_values = unflatten_tensors(
flattened_params, self.get_param_shapes(**tags))
ops = []
feed_dict = dict()
for param, dtype, value in zip(
self.get_params(**tags),
self.get_param_dtypes(**tags),
param_values):
if param not in self._cached_assign_ops:
assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype)
assign_op = tf.assign(param, assign_placeholder)
self._cached_assign_ops[param] = assign_op
self._cached_assign_placeholders[param] = assign_placeholder
ops.append(self._cached_assign_ops[param])
feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
if debug:
print("setting value of %s" % param.name)
tf.get_default_session().run(ops, feed_dict=feed_dict)
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
global load_params
if load_params:
d["params"] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
tf.get_default_session().run(tf.initialize_variables(self.get_params()))
self.set_param_values(d["params"])
class JointParameterized(Parameterized):
def __init__(self, components):
super(JointParameterized, self).__init__()
self.components = components
def get_params_internal(self, **tags):
params = [param for comp in self.components for param in comp.get_params_internal(**tags)]
# only return unique parameters
return sorted(set(params), key=hash)
class Model(Parameterized):
_load_dir = './weights'
_log_dir = './weights'
def load_params(self, filename, itr, skip_params):
print 'loading policy params...'
if not hasattr(self, 'load_dir'):
log_dir = Model._load_dir
else:
log_dir = self.load_dir
filename = log_dir + "/" + filename + '.h5'
assignments = []
# create log_dir if non-existent
if not os.path.exists(log_dir):
os.makedirs(log_dir)
with h5py.File(filename,'r') as hf:
if itr >= 0:
prefix = self._prefix(itr)
else:
prefix = hf.keys()[itr] + "/"
for param in self.get_params():
path = prefix + param.name
if param.name in skip_params:
continue
if path in hf:
assignments.append(
param.assign(hf[path][...])
)
else:
halt= True
sess = tf.get_default_session()
sess.run(assignments)
print 'done.'
def save_params(self, itr, overwrite= False):
print 'saving model...'
if not hasattr(self, 'log_dir'):
log_dir = Model._log_dir
else:
log_dir = self.log_dir
filename = log_dir + "/" + self.save_name + '.h5'
sess = tf.get_default_session()
# create log_dir if non-existent
if not os.path.exists(log_dir):
os.makedirs(log_dir)
key = self._prefix(itr)
with h5py.File(filename, 'a') as hf:
if key in hf:
dset = hf[key]
else:
dset = hf.create_group(key)
vs = self.get_params()
vals = sess.run(vs)
for v, val in zip(vs, vals):
dset[v.name] = val
print 'done.'
def set_log_dir(self, log_dir):
self.log_dir = log_dir
def set_load_dir(self, load_dir):
self.load_dir = load_dir
@staticmethod
def _prefix(x):
return 'iter{:05}/'.format(x)
|
from contextlib import contextmanager
from rllab.core.serializable import Serializable
from rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors
import tensorflow as tf
import numpy as np
import h5py
import os
import time
load_params = True
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
yield
load_params = True
class Parameterized(object):
def __init__(self):
self._cached_params = {}
self._cached_param_dtypes = {}
self._cached_param_shapes = {}
self._cached_assign_ops = {}
self._cached_assign_placeholders = {}
timestr = time.strftime("%Y%m%d_%H%M%S")
self.save_name = self.save_name + timestr
print self.save_name
def get_params_internal(self, **tags):
"""
Internal method to be implemented which does not perform caching
"""
raise NotImplementedError
def get_params(self, **tags):
"""
Get the list of parameters, filtered by the provided tags.
Some common tags include 'regularizable' and 'trainable'
"""
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_params:
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_dtypes:
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_dtypes[tag_tuple] = [val.dtype for val in param_values]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_shapes:
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
self._cached_param_shapes[tag_tuple] = [val.shape for val in param_values]
return self._cached_param_shapes[tag_tuple]
def get_param_values(self, **tags):
params = self.get_params(**tags)
param_values = tf.get_default_session().run(params)
return flatten_tensors(param_values)
def set_param_values(self, flattened_params, **tags):
debug = tags.pop("debug", False)
param_values = unflatten_tensors(
flattened_params, self.get_param_shapes(**tags))
ops = []
feed_dict = dict()
for param, dtype, value in zip(
self.get_params(**tags),
self.get_param_dtypes(**tags),
param_values):
if param not in self._cached_assign_ops:
assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype)
assign_op = tf.assign(param, assign_placeholder)
self._cached_assign_ops[param] = assign_op
self._cached_assign_placeholders[param] = assign_placeholder
ops.append(self._cached_assign_ops[param])
feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
if debug:
print("setting value of %s" % param.name)
tf.get_default_session().run(ops, feed_dict=feed_dict)
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
global load_params
if load_params:
d["params"] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
tf.get_default_session().run(tf.initialize_variables(self.get_params()))
self.set_param_values(d["params"])
class JointParameterized(Parameterized):
def __init__(self, components):
super(JointParameterized, self).__init__()
self.components = components
def get_params_internal(self, **tags):
params = [param for comp in self.components for param in comp.get_params_internal(**tags)]
# only return unique parameters
return sorted(set(params), key=hash)
class Model(Parameterized):
_load_dir = './weights'
_log_dir = './weights'
def load_params(self, filename, itr, skip_params):
print 'loading policy params...'
if not hasattr(self, 'load_dir'):
log_dir = Model._load_dir
else:
log_dir = self.load_dir
filename = log_dir + "/" + filename + '.h5'
assignments = []
# create log_dir if non-existent
if not os.path.exists(log_dir):
os.makedirs(log_dir)
with h5py.File(filename,'r') as hf:
if itr >= 0:
prefix = self._prefix(itr)
else:
prefix = hf.keys()[itr] + "/"
for param in self.get_params():
path = prefix + param.name
if param.name in skip_params:
continue
if path in hf:
assignments.append(
param.assign(hf[path][...])
)
else:
halt= True
sess = tf.get_default_session()
sess.run(assignments)
print 'done.'
def save_params(self, itr, overwrite= False):
print 'saving model...'
if not hasattr(self, 'log_dir'):
log_dir = Model._log_dir
else:
log_dir = self.log_dir
filename = log_dir + "/" + self.save_name + '.h5'
sess = tf.get_default_session()
# create log_dir if non-existent
if not os.path.exists(log_dir):
os.makedirs(log_dir)
key = self._prefix(itr)
with h5py.File(filename, 'a') as hf:
if key in hf:
dset = hf[key]
else:
dset = hf.create_group(key)
vs = self.get_params()
vals = sess.run(vs)
for v, val in zip(vs, vals):
dset[v.name] = val
print 'done.'
def set_log_dir(self, log_dir):
self.log_dir = log_dir
def set_load_dir(self, load_dir):
self.load_dir = load_dir
@staticmethod
def _prefix(x):
return 'iter{:05}/'.format(x)
|
en
| 0.519884
|
Internal method to be implemented which does not perform caching Get the list of parameters, filtered by the provided tags. Some common tags include 'regularizable' and 'trainable' # only return unique parameters # create log_dir if non-existent # create log_dir if non-existent
| 2.210729
| 2
|
sprint-1/python/string.py
|
pradeepwaviz/Aviral
| 0
|
6627416
|
string=input("Enter any String: ")
for char in string:
print(char)
|
string=input("Enter any String: ")
for char in string:
print(char)
|
none
| 1
| 3.953594
| 4
|
|
iirsBenchmark/explainers/_base_explainer.py
|
gAldeia/iirsBenchmark
| 0
|
6627417
|
<gh_stars>0
# Author: <NAME>
# Contact: <EMAIL>
# Version: 1.0.0
# Last modified: 08-20-2021 by <NAME>
from sklearn.exceptions import NotFittedError
from sklearn.utils import check_X_y
import numpy as np
"""
Base explainer, should be inherited by all explainers.
The base explainer has the following attributes:
* `agnostic` : attribute indicating if the explainer is model agnostic or not.
If it is, then agnostic=True, else agnostic is a list of strings containing
the name of compatible regressors;
* `local_scope` : boolean indicating if the explainer supports local
explanations. A local explanation is a feature importance attribution for
a single observation, and does not depend to the y value;
* `global_scope` : boolean indicating if the explainer supports global
explanations. A global explanation summarises the model feature importances
by creating a single feature importance array. The global explanation
can use training data, or can work independently of the data. In case
the global explanation depends of the data used to explain, different
global explanations can be obtained, and when it is independent of the data
the same explanation will always be returned.
And the following methods, that should be overwriten in subclasses:
* `_check_fit(self, X, y)` : checks if the explainer is compatible with the
given predictor on the constructor, and if X and y are compatible and
correct arrays. Raises different exceptions if some check fails;
* `_protect_explanation(self, X)`: takes an explanation and protects the
returned values if some invalid value is found. Can be overwritten for
more specific cases. Internal use only, should take a matrix of explanations
and will return a new matrix of same shape (n_observations, n_features)
* `_check_is_fitted(self)` : check if the explainer is fitted (when fitting
an explainer, some attributes ending with an underscore are created
within the class instance) when calling `explain_local` or `explain_global`;
* `fit(X, y)`: fits the explainer with the training data, should return
self at the end of the process (this allows fit().explain() chaining);
* `explain_local(self, X)` : takes as argument a matrix of shape
(n_observations, n_features) and return a local explanation for each
observation;
* `explain_global(self, X, y)` : takes as argument a matrix with more than
one sample and produces a global explanation, returning a matrix of shape
(1, n_features).
To implement explainers, this class should be inherited and the
`_protect_explanation` and `fit`, should be overwriten. Methods
`explain_local` and `explain_global` should be overwritten only if the
explainer supports those scope of explanations.
The constructor takes as argument, necessarly, a predictor instance
(not the predictor.predict!), and specifications about the subclass scope.
The constructor of the subclasses should take only the predictor and
optional arguments with respect to the explainer itself. The `Base_explainer`
constructor is not meant to be used by the user.
"""
from iirsBenchmark.exceptions import NotApplicableException
class Base_explainer():
def __init__(self, *,
predictor, agnostic=[], local_scope=False, global_scope=False):
# By default, the explainer does not support any model or scope.
self.predictor = predictor
self.agnostic = agnostic
self.local_scope = local_scope
self.global_scope = global_scope
def _check_fit(self, X, y):
X, y = check_X_y(X, y)
# Checking if the given predictor is supported
if (self.agnostic is not True and
self.predictor.__class__.__name__ not in self.agnostic):
raise NotApplicableException(
f"The regressor {self.predictor.__class__.__name__} "
"is not supported by this explainer.")
def _protect_explanation(self, X):
return np.ma.filled(np.ma.masked_outside(
np.ma.masked_where(np.isnan(X) | np.isinf(X), X), -1e+10, 1e+10), 0)
def _check_is_fitted(self):
attrs = [v for v in vars(self)
if v.endswith("_") and not v.startswith("__")]
if not attrs:
raise NotFittedError(
"This explainers instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.")
def fit(self, X, y):
raise NotImplementedError(
f"This explainer {self.__class__.__name__} "
"is missing the implementation of the fit method!")
def explain_local(self, X):
raise NotApplicableException(
f"The explainer {self.__class__.__name__} does not support "
"local explanations.")
def explain_global(self, X, y):
raise NotApplicableException(
f"The explainer {self.__class__.__name__} does not support "
"global explanations.")
|
# Author: <NAME>
# Contact: <EMAIL>
# Version: 1.0.0
# Last modified: 08-20-2021 by <NAME>
from sklearn.exceptions import NotFittedError
from sklearn.utils import check_X_y
import numpy as np
"""
Base explainer, should be inherited by all explainers.
The base explainer has the following attributes:
* `agnostic` : attribute indicating if the explainer is model agnostic or not.
If it is, then agnostic=True, else agnostic is a list of strings containing
the name of compatible regressors;
* `local_scope` : boolean indicating if the explainer supports local
explanations. A local explanation is a feature importance attribution for
a single observation, and does not depend to the y value;
* `global_scope` : boolean indicating if the explainer supports global
explanations. A global explanation summarises the model feature importances
by creating a single feature importance array. The global explanation
can use training data, or can work independently of the data. In case
the global explanation depends of the data used to explain, different
global explanations can be obtained, and when it is independent of the data
the same explanation will always be returned.
And the following methods, that should be overwriten in subclasses:
* `_check_fit(self, X, y)` : checks if the explainer is compatible with the
given predictor on the constructor, and if X and y are compatible and
correct arrays. Raises different exceptions if some check fails;
* `_protect_explanation(self, X)`: takes an explanation and protects the
returned values if some invalid value is found. Can be overwritten for
more specific cases. Internal use only, should take a matrix of explanations
and will return a new matrix of same shape (n_observations, n_features)
* `_check_is_fitted(self)` : check if the explainer is fitted (when fitting
an explainer, some attributes ending with an underscore are created
within the class instance) when calling `explain_local` or `explain_global`;
* `fit(X, y)`: fits the explainer with the training data, should return
self at the end of the process (this allows fit().explain() chaining);
* `explain_local(self, X)` : takes as argument a matrix of shape
(n_observations, n_features) and return a local explanation for each
observation;
* `explain_global(self, X, y)` : takes as argument a matrix with more than
one sample and produces a global explanation, returning a matrix of shape
(1, n_features).
To implement explainers, this class should be inherited and the
`_protect_explanation` and `fit`, should be overwriten. Methods
`explain_local` and `explain_global` should be overwritten only if the
explainer supports those scope of explanations.
The constructor takes as argument, necessarly, a predictor instance
(not the predictor.predict!), and specifications about the subclass scope.
The constructor of the subclasses should take only the predictor and
optional arguments with respect to the explainer itself. The `Base_explainer`
constructor is not meant to be used by the user.
"""
from iirsBenchmark.exceptions import NotApplicableException
class Base_explainer():
def __init__(self, *,
predictor, agnostic=[], local_scope=False, global_scope=False):
# By default, the explainer does not support any model or scope.
self.predictor = predictor
self.agnostic = agnostic
self.local_scope = local_scope
self.global_scope = global_scope
def _check_fit(self, X, y):
X, y = check_X_y(X, y)
# Checking if the given predictor is supported
if (self.agnostic is not True and
self.predictor.__class__.__name__ not in self.agnostic):
raise NotApplicableException(
f"The regressor {self.predictor.__class__.__name__} "
"is not supported by this explainer.")
def _protect_explanation(self, X):
return np.ma.filled(np.ma.masked_outside(
np.ma.masked_where(np.isnan(X) | np.isinf(X), X), -1e+10, 1e+10), 0)
def _check_is_fitted(self):
attrs = [v for v in vars(self)
if v.endswith("_") and not v.startswith("__")]
if not attrs:
raise NotFittedError(
"This explainers instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.")
def fit(self, X, y):
raise NotImplementedError(
f"This explainer {self.__class__.__name__} "
"is missing the implementation of the fit method!")
def explain_local(self, X):
raise NotApplicableException(
f"The explainer {self.__class__.__name__} does not support "
"local explanations.")
def explain_global(self, X, y):
raise NotApplicableException(
f"The explainer {self.__class__.__name__} does not support "
"global explanations.")
|
en
| 0.78377
|
# Author: <NAME> # Contact: <EMAIL> # Version: 1.0.0 # Last modified: 08-20-2021 by <NAME> Base explainer, should be inherited by all explainers.
The base explainer has the following attributes:
* `agnostic` : attribute indicating if the explainer is model agnostic or not.
If it is, then agnostic=True, else agnostic is a list of strings containing
the name of compatible regressors;
* `local_scope` : boolean indicating if the explainer supports local
explanations. A local explanation is a feature importance attribution for
a single observation, and does not depend to the y value;
* `global_scope` : boolean indicating if the explainer supports global
explanations. A global explanation summarises the model feature importances
by creating a single feature importance array. The global explanation
can use training data, or can work independently of the data. In case
the global explanation depends of the data used to explain, different
global explanations can be obtained, and when it is independent of the data
the same explanation will always be returned.
And the following methods, that should be overwriten in subclasses:
* `_check_fit(self, X, y)` : checks if the explainer is compatible with the
given predictor on the constructor, and if X and y are compatible and
correct arrays. Raises different exceptions if some check fails;
* `_protect_explanation(self, X)`: takes an explanation and protects the
returned values if some invalid value is found. Can be overwritten for
more specific cases. Internal use only, should take a matrix of explanations
and will return a new matrix of same shape (n_observations, n_features)
* `_check_is_fitted(self)` : check if the explainer is fitted (when fitting
an explainer, some attributes ending with an underscore are created
within the class instance) when calling `explain_local` or `explain_global`;
* `fit(X, y)`: fits the explainer with the training data, should return
self at the end of the process (this allows fit().explain() chaining);
* `explain_local(self, X)` : takes as argument a matrix of shape
(n_observations, n_features) and return a local explanation for each
observation;
* `explain_global(self, X, y)` : takes as argument a matrix with more than
one sample and produces a global explanation, returning a matrix of shape
(1, n_features).
To implement explainers, this class should be inherited and the
`_protect_explanation` and `fit`, should be overwriten. Methods
`explain_local` and `explain_global` should be overwritten only if the
explainer supports those scope of explanations.
The constructor takes as argument, necessarly, a predictor instance
(not the predictor.predict!), and specifications about the subclass scope.
The constructor of the subclasses should take only the predictor and
optional arguments with respect to the explainer itself. The `Base_explainer`
constructor is not meant to be used by the user. # By default, the explainer does not support any model or scope. # Checking if the given predictor is supported
| 2.830551
| 3
|
FRG Hardware/frghardware/wardmapper/frghardware/wardmapper/__init__.py
|
fenning-research-group/Instruments
| 0
|
6627418
|
# from .mapper import *
# from .stage import stage
# from .mono import mono
# from .daq import daq
|
# from .mapper import *
# from .stage import stage
# from .mono import mono
# from .daq import daq
|
en
| 0.658561
|
# from .mapper import * # from .stage import stage # from .mono import mono # from .daq import daq
| 0.972896
| 1
|
tests/test_spec.py
|
shanty-social/parcel
| 0
|
6627419
|
<filename>tests/test_spec.py
from unittest import TestCase
from parameterized import parameterized
from parcel.spec import Spec
class SpecTestCase(TestCase):
@parameterized.expand([
('foobar=1.0', 'foobar=1.0.0'),
('foobar>=1.0', 'foobar=1.0'),
('foobar>=1.0', 'foobar=2.0'),
('foobar==1.0', 'foobar=1.0'),
])
def test_satisfiability(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertTrue(spec1.is_satisfied_by(spec2))
self.assertTrue(spec2.satisfies(spec1))
@parameterized.expand([
('foobar>=1.0', 'barfoo=1.0'),
('foobar>1.0', 'foobar=1.0'),
('foobar==1.0', 'foobar=1.0.1'),
])
def test_negative_equality(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertFalse(spec1 == spec2)
@parameterized.expand([
('foobar==1.0.1', 'foobar==1.0'),
])
def test_gt(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertGreater(spec1, spec2)
@parameterized.expand([
('foobar==0.99', 'foobar==1.0'),
])
def test_lt(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertLess(spec1, spec2)
@parameterized.expand([
('foobar==1.0', 'foobar==1.0'),
('foobar==1.0.1', 'foobar==1.0'),
])
def test_gte(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertGreaterEqual(spec1, spec2)
@parameterized.expand([
('foobar==1.0', 'foobar==1.0'),
('foobar==0.99', 'foobar==1.0'),
])
def test_lte(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertLessEqual(spec1, spec2)
@parameterized.expand([
('foobar>=1.0', 'foobar==1.0'),
])
def test_cmp_invalid(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
with self.assertRaises(AssertionError):
self.assertGreater(spec1, spec2)
with self.assertRaises(AssertionError):
self.assertLess(spec1, spec2)
|
<filename>tests/test_spec.py
from unittest import TestCase
from parameterized import parameterized
from parcel.spec import Spec
class SpecTestCase(TestCase):
@parameterized.expand([
('foobar=1.0', 'foobar=1.0.0'),
('foobar>=1.0', 'foobar=1.0'),
('foobar>=1.0', 'foobar=2.0'),
('foobar==1.0', 'foobar=1.0'),
])
def test_satisfiability(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertTrue(spec1.is_satisfied_by(spec2))
self.assertTrue(spec2.satisfies(spec1))
@parameterized.expand([
('foobar>=1.0', 'barfoo=1.0'),
('foobar>1.0', 'foobar=1.0'),
('foobar==1.0', 'foobar=1.0.1'),
])
def test_negative_equality(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertFalse(spec1 == spec2)
@parameterized.expand([
('foobar==1.0.1', 'foobar==1.0'),
])
def test_gt(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertGreater(spec1, spec2)
@parameterized.expand([
('foobar==0.99', 'foobar==1.0'),
])
def test_lt(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertLess(spec1, spec2)
@parameterized.expand([
('foobar==1.0', 'foobar==1.0'),
('foobar==1.0.1', 'foobar==1.0'),
])
def test_gte(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertGreaterEqual(spec1, spec2)
@parameterized.expand([
('foobar==1.0', 'foobar==1.0'),
('foobar==0.99', 'foobar==1.0'),
])
def test_lte(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
self.assertLessEqual(spec1, spec2)
@parameterized.expand([
('foobar>=1.0', 'foobar==1.0'),
])
def test_cmp_invalid(self, spec1, spec2):
spec1, spec2 = Spec.parse(spec1), Spec.parse(spec2)
with self.assertRaises(AssertionError):
self.assertGreater(spec1, spec2)
with self.assertRaises(AssertionError):
self.assertLess(spec1, spec2)
|
none
| 1
| 2.809838
| 3
|
|
guild/plugins/dvc_stage_main.py
|
msarahan/guildai
| 0
|
6627420
|
<filename>guild/plugins/dvc_stage_main.py
# Copyright 2017-2022 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import os
import subprocess
import yaml
from guild import op_util
from guild import run as runlib
from guild import summary
from guild import util
from . import dvc_util
log = None
class State:
def __init__(self, target_stage, project_dir):
_assert_dvc_yaml(project_dir)
self.target_stage = target_stage
self.project_dir = project_dir
self.run_dir = _required_run_dir()
self.run = runlib.for_dir(self.run_dir)
self.ran_stages = []
self.dvc_config = _load_dvc_yaml(project_dir)
self.parent_run = op_util.current_run()
def _assert_dvc_yaml(project_dir):
dvc_yaml = os.path.join(project_dir, "dvc.yaml")
if not os.path.exists(dvc_yaml):
_missing_dvc_yaml_error(project_dir)
def _required_run_dir():
run_dir = os.getenv("RUN_DIR")
if not run_dir:
raise SystemExit("missing required environment RUN_DIR for operation")
return run_dir
def _load_dvc_yaml(dir):
with open(os.path.join(dir, "dvc.yaml")) as f:
return yaml.safe_load(f)
def main():
op_util.init_logging()
globals()["log"] = logging.getLogger("guild")
args = _init_args()
state = State(args.stage, args.project_dir)
_handle_stage(state)
def _init_args():
p = argparse.ArgumentParser()
p.add_argument("--project-dir")
p.add_argument("stage")
args = p.parse_args()
_ensure_project_dir_arg(args)
return args
def _ensure_project_dir_arg(args):
if not args.project_dir:
project_dir_env = os.getenv("PROJECT_DIR")
if not project_dir_env:
raise SystemExit(
"unspecified project dir - specify either --project-dir "
"or set the PROJECT_DIR environment variable"
)
args.project_dir = project_dir_env
def _missing_dvc_yaml_error(project_dir):
raise SystemExit(
"invalid DvC config directory '%s' - missing dvc.yaml" % project_dir
)
def _handle_stage(state):
_init_run_dir(state)
_repro_run(state)
_log_metrics_as_summaries(state)
def _init_run_dir(state):
log.info("Initializing run")
_write_run_attrs(state)
_init_dvc_repo(state)
_copy_dvc_yaml(state)
_resolve_deps(state)
_copy_params_with_flags(state)
def _write_run_attrs(state):
state.run.write_attr("dvc-stage", state.target_stage)
def _init_dvc_repo(state):
try:
dvc_util.ensure_dvc_repo(state.run_dir, state.project_dir)
except dvc_util.DvcInitError as e:
raise SystemExit(str(e))
def _copy_dvc_yaml(state):
src = os.path.join(state.project_dir, "dvc.yaml")
if not os.path.exists(src):
raise SystemExit("missing dvc.yaml - cannot run DvC stage")
dest = os.path.join(state.run_dir, "dvc.yaml")
util.copyfile(src, dest)
def _resolve_deps(state):
for dep_stage, deps in dvc_util.iter_stage_deps_by_parent(
state.target_stage, state.dvc_config
):
deps = _filter_unresolved_deps(deps, state)
if not deps:
continue
if dep_stage:
_resolve_stage_deps(dep_stage, deps, state)
else:
_resolve_project_deps(deps, state)
def _filter_unresolved_deps(deps, state):
return [dep for dep in deps if not os.path.exists(os.path.join(state.run_dir, dep))]
def _resolve_stage_deps(stage, deps, state):
stage_run = dvc_util.marked_or_latest_run_for_stage(stage)
if not stage_run:
_no_suitable_run_for_stage_error(stage, deps)
log.info("Using %s for '%s' DvC stage dependency", stage_run.id, stage)
_link_op_deps(stage_run, deps, state)
def _no_suitable_run_for_stage_error(stage, deps):
deps_desc = ", ".join(sorted(deps))
raise SystemExit(
"no suitable run for stage '%s' (needed for %s)" % (stage, deps_desc)
)
def _link_op_deps(run, deps, state):
for dep in deps:
target = os.path.join(run.dir, dep)
rel_target = os.path.relpath(target, state.run_dir)
link = os.path.join(state.run_dir, dep)
log.info("Linking %s", dep)
util.ensure_dir(os.path.dirname(link))
util.symlink(rel_target, link)
def _resolve_project_deps(deps, state):
for dep in deps:
if _is_project_file(dep, state):
_copy_or_link_project_file(dep, state)
else:
_pull_dep(dep, state)
def _is_project_file(dep, state):
path = os.path.join(state.project_dir, dep)
return os.path.exists(path)
def _copy_or_link_project_file(dep, state):
dep_path = os.path.join(state.project_dir, dep)
if _can_copy_dep(dep_path):
_copy_project_file(dep_path, dep, state)
else:
_link_project_file(dep_path, dep, state)
def _can_copy_dep(dep_path):
return os.path.isfile(dep_path)
def _copy_project_file(src, dep, state):
dest = os.path.join(state.run_dir, dep)
log.info("Copying %s", dep)
util.copyfile(src, dest)
def _link_project_file(src, dep, state):
link = os.path.join(state.run_dir, dep)
rel_src = os.path.relpath(src, os.path.dirname(link))
log.info("Linking to %s", dep)
util.symlink(rel_src, link)
def _pull_dep(dep, state):
log.info("Fetching %s", dep)
try:
dvc_util.pull_dvc_dep(dep, state.run_dir, state.project_dir)
except dvc_util.DvcPullError as e:
raise SystemExit(str(e))
def _copy_params_with_flags(state):
for name in _iter_stage_param_files(state):
dest = os.path.join(state.run_dir, name)
if os.path.exists(dest):
continue
src = os.path.join(state.project_dir, name)
if not os.path.exists(src):
raise SystemExit(
"cannot find config file '%s' in project directory %s"
% (name, state.project_dir)
)
log.info("Copying %s", name)
util.copyfile(src, dest)
def _iter_stage_param_files(state):
seen = set()
for _param, filename in dvc_util.iter_stage_params(
state.target_stage, state.dvc_config
):
if not filename in seen:
seen.add(filename)
yield filename
def _repro_run(state):
cmd = ["dvc", "repro", "--single-item", state.target_stage]
if not _debug_enabled():
cmd.append("--quiet")
log.info("Running stage '%s'", state.target_stage)
p = subprocess.Popen(cmd, cwd=state.run_dir)
returncode = p.wait()
if returncode != 0:
raise SystemExit(
"'dvc repro %s' failed (exit code %i) - see above for details"
% (util.shlex_quote(state.target_stage), returncode)
)
def _debug_enabled():
return log.getEffectiveLevel() <= logging.DEBUG
def _log_metrics_as_summaries(state):
with summary.SummaryWriter(state.run_dir) as events:
for metrics_name, metrics_data in dvc_util.iter_stage_metrics_data(
state.target_stage,
state.run_dir,
):
log.info("Logging metrics from %s", metrics_name)
for tag, val in _iter_metrics_scalars(metrics_data):
events.add_scalar(tag, val)
def _iter_metrics_scalars(data):
if not isinstance(data, dict):
return
flattened_data = util.encode_nested_config(data)
for name, val in flattened_data.items():
if isinstance(val, (int, float)):
yield name, val
if __name__ == "__main__":
try:
main()
except SystemExit as e:
op_util.handle_system_exit(e)
|
<filename>guild/plugins/dvc_stage_main.py
# Copyright 2017-2022 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import os
import subprocess
import yaml
from guild import op_util
from guild import run as runlib
from guild import summary
from guild import util
from . import dvc_util
log = None
class State:
def __init__(self, target_stage, project_dir):
_assert_dvc_yaml(project_dir)
self.target_stage = target_stage
self.project_dir = project_dir
self.run_dir = _required_run_dir()
self.run = runlib.for_dir(self.run_dir)
self.ran_stages = []
self.dvc_config = _load_dvc_yaml(project_dir)
self.parent_run = op_util.current_run()
def _assert_dvc_yaml(project_dir):
dvc_yaml = os.path.join(project_dir, "dvc.yaml")
if not os.path.exists(dvc_yaml):
_missing_dvc_yaml_error(project_dir)
def _required_run_dir():
run_dir = os.getenv("RUN_DIR")
if not run_dir:
raise SystemExit("missing required environment RUN_DIR for operation")
return run_dir
def _load_dvc_yaml(dir):
with open(os.path.join(dir, "dvc.yaml")) as f:
return yaml.safe_load(f)
def main():
op_util.init_logging()
globals()["log"] = logging.getLogger("guild")
args = _init_args()
state = State(args.stage, args.project_dir)
_handle_stage(state)
def _init_args():
p = argparse.ArgumentParser()
p.add_argument("--project-dir")
p.add_argument("stage")
args = p.parse_args()
_ensure_project_dir_arg(args)
return args
def _ensure_project_dir_arg(args):
if not args.project_dir:
project_dir_env = os.getenv("PROJECT_DIR")
if not project_dir_env:
raise SystemExit(
"unspecified project dir - specify either --project-dir "
"or set the PROJECT_DIR environment variable"
)
args.project_dir = project_dir_env
def _missing_dvc_yaml_error(project_dir):
raise SystemExit(
"invalid DvC config directory '%s' - missing dvc.yaml" % project_dir
)
def _handle_stage(state):
_init_run_dir(state)
_repro_run(state)
_log_metrics_as_summaries(state)
def _init_run_dir(state):
log.info("Initializing run")
_write_run_attrs(state)
_init_dvc_repo(state)
_copy_dvc_yaml(state)
_resolve_deps(state)
_copy_params_with_flags(state)
def _write_run_attrs(state):
state.run.write_attr("dvc-stage", state.target_stage)
def _init_dvc_repo(state):
try:
dvc_util.ensure_dvc_repo(state.run_dir, state.project_dir)
except dvc_util.DvcInitError as e:
raise SystemExit(str(e))
def _copy_dvc_yaml(state):
src = os.path.join(state.project_dir, "dvc.yaml")
if not os.path.exists(src):
raise SystemExit("missing dvc.yaml - cannot run DvC stage")
dest = os.path.join(state.run_dir, "dvc.yaml")
util.copyfile(src, dest)
def _resolve_deps(state):
for dep_stage, deps in dvc_util.iter_stage_deps_by_parent(
state.target_stage, state.dvc_config
):
deps = _filter_unresolved_deps(deps, state)
if not deps:
continue
if dep_stage:
_resolve_stage_deps(dep_stage, deps, state)
else:
_resolve_project_deps(deps, state)
def _filter_unresolved_deps(deps, state):
return [dep for dep in deps if not os.path.exists(os.path.join(state.run_dir, dep))]
def _resolve_stage_deps(stage, deps, state):
stage_run = dvc_util.marked_or_latest_run_for_stage(stage)
if not stage_run:
_no_suitable_run_for_stage_error(stage, deps)
log.info("Using %s for '%s' DvC stage dependency", stage_run.id, stage)
_link_op_deps(stage_run, deps, state)
def _no_suitable_run_for_stage_error(stage, deps):
deps_desc = ", ".join(sorted(deps))
raise SystemExit(
"no suitable run for stage '%s' (needed for %s)" % (stage, deps_desc)
)
def _link_op_deps(run, deps, state):
for dep in deps:
target = os.path.join(run.dir, dep)
rel_target = os.path.relpath(target, state.run_dir)
link = os.path.join(state.run_dir, dep)
log.info("Linking %s", dep)
util.ensure_dir(os.path.dirname(link))
util.symlink(rel_target, link)
def _resolve_project_deps(deps, state):
for dep in deps:
if _is_project_file(dep, state):
_copy_or_link_project_file(dep, state)
else:
_pull_dep(dep, state)
def _is_project_file(dep, state):
path = os.path.join(state.project_dir, dep)
return os.path.exists(path)
def _copy_or_link_project_file(dep, state):
dep_path = os.path.join(state.project_dir, dep)
if _can_copy_dep(dep_path):
_copy_project_file(dep_path, dep, state)
else:
_link_project_file(dep_path, dep, state)
def _can_copy_dep(dep_path):
return os.path.isfile(dep_path)
def _copy_project_file(src, dep, state):
dest = os.path.join(state.run_dir, dep)
log.info("Copying %s", dep)
util.copyfile(src, dest)
def _link_project_file(src, dep, state):
link = os.path.join(state.run_dir, dep)
rel_src = os.path.relpath(src, os.path.dirname(link))
log.info("Linking to %s", dep)
util.symlink(rel_src, link)
def _pull_dep(dep, state):
log.info("Fetching %s", dep)
try:
dvc_util.pull_dvc_dep(dep, state.run_dir, state.project_dir)
except dvc_util.DvcPullError as e:
raise SystemExit(str(e))
def _copy_params_with_flags(state):
for name in _iter_stage_param_files(state):
dest = os.path.join(state.run_dir, name)
if os.path.exists(dest):
continue
src = os.path.join(state.project_dir, name)
if not os.path.exists(src):
raise SystemExit(
"cannot find config file '%s' in project directory %s"
% (name, state.project_dir)
)
log.info("Copying %s", name)
util.copyfile(src, dest)
def _iter_stage_param_files(state):
seen = set()
for _param, filename in dvc_util.iter_stage_params(
state.target_stage, state.dvc_config
):
if not filename in seen:
seen.add(filename)
yield filename
def _repro_run(state):
cmd = ["dvc", "repro", "--single-item", state.target_stage]
if not _debug_enabled():
cmd.append("--quiet")
log.info("Running stage '%s'", state.target_stage)
p = subprocess.Popen(cmd, cwd=state.run_dir)
returncode = p.wait()
if returncode != 0:
raise SystemExit(
"'dvc repro %s' failed (exit code %i) - see above for details"
% (util.shlex_quote(state.target_stage), returncode)
)
def _debug_enabled():
return log.getEffectiveLevel() <= logging.DEBUG
def _log_metrics_as_summaries(state):
with summary.SummaryWriter(state.run_dir) as events:
for metrics_name, metrics_data in dvc_util.iter_stage_metrics_data(
state.target_stage,
state.run_dir,
):
log.info("Logging metrics from %s", metrics_name)
for tag, val in _iter_metrics_scalars(metrics_data):
events.add_scalar(tag, val)
def _iter_metrics_scalars(data):
if not isinstance(data, dict):
return
flattened_data = util.encode_nested_config(data)
for name, val in flattened_data.items():
if isinstance(val, (int, float)):
yield name, val
if __name__ == "__main__":
try:
main()
except SystemExit as e:
op_util.handle_system_exit(e)
|
en
| 0.839864
|
# Copyright 2017-2022 TensorHub, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 2.04881
| 2
|
weighted_average.py
|
yixinxu2020/kaggle-aptos2019-blindness-detection
| 40
|
6627421
|
<filename>weighted_average.py<gh_stars>10-100
import pandas as pd
def main():
probs = {}
probs['se_resnext50_32x4d'] = pd.read_csv('probs/se_resnext50_32x4d_080922.csv')['diagnosis'].values
probs['se_resnext101_32x4d'] = pd.read_csv('probs/se_resnext101_32x4d_081208.csv')['diagnosis'].values
probs['senet154'] = pd.read_csv('probs/senet154_082510.csv')['diagnosis'].values
test_df = pd.read_csv('inputs/test.csv')
test_df['diagnosis'] = 0.4 * probs['se_resnext50_32x4d'] + 0.3 * probs['se_resnext101_32x4d'] + 0.3 * probs['senet154']
test_df.to_csv('probs/weighted_average.csv', index=False)
if __name__ == '__main__':
main()
|
<filename>weighted_average.py<gh_stars>10-100
import pandas as pd
def main():
probs = {}
probs['se_resnext50_32x4d'] = pd.read_csv('probs/se_resnext50_32x4d_080922.csv')['diagnosis'].values
probs['se_resnext101_32x4d'] = pd.read_csv('probs/se_resnext101_32x4d_081208.csv')['diagnosis'].values
probs['senet154'] = pd.read_csv('probs/senet154_082510.csv')['diagnosis'].values
test_df = pd.read_csv('inputs/test.csv')
test_df['diagnosis'] = 0.4 * probs['se_resnext50_32x4d'] + 0.3 * probs['se_resnext101_32x4d'] + 0.3 * probs['senet154']
test_df.to_csv('probs/weighted_average.csv', index=False)
if __name__ == '__main__':
main()
|
none
| 1
| 2.785305
| 3
|
|
simqle/constants.py
|
Harlekuin/SimQLe
| 37
|
6627422
|
"""Load Constants."""
from os.path import expanduser, join
DEV_MAP = {
"production": "connections",
"development": "dev-connections",
"testing": "test-connections",
}
DEFAULT_FILE_LOCATIONS = [
"./.connections.yaml",
# the home folder on either Linux or Windows
join(expanduser("~"), ".connections.yaml")
]
|
"""Load Constants."""
from os.path import expanduser, join
DEV_MAP = {
"production": "connections",
"development": "dev-connections",
"testing": "test-connections",
}
DEFAULT_FILE_LOCATIONS = [
"./.connections.yaml",
# the home folder on either Linux or Windows
join(expanduser("~"), ".connections.yaml")
]
|
en
| 0.647826
|
Load Constants. # the home folder on either Linux or Windows
| 1.659347
| 2
|
conu/helpers/docker_backend.py
|
lslebodn/conu
| 95
|
6627423
|
from conu import DockerRunBuilder
def get_container_output(backend, image_name, command, image_tag="latest",
additional_opts=None):
"""
Create a throw-away container based on provided image and tag, run the supplied command in it
and return output. The container is stopped and removed after it exits.
:param backend: instance of DockerBackend
:param image_name: str, name of the container image
:param command: list of str, command to run in the container
:param image_tag: str, container image tag, defaults to "latest"
:param additional_opts: list of str, by default this function creates the container using
docker binary and run command; with this argument you can supply addition options to the
"docker run" invocation
:return: str (unicode), output of the container
"""
image = backend.ImageClass(image_name, tag=image_tag)
# FIXME: use run_via_api and make this a generic function
c = image.run_via_binary(DockerRunBuilder(command=command, additional_opts=additional_opts))
try:
c.wait()
return c.logs_unicode()
finally:
c.stop()
c.wait()
c.delete()
|
from conu import DockerRunBuilder
def get_container_output(backend, image_name, command, image_tag="latest",
additional_opts=None):
"""
Create a throw-away container based on provided image and tag, run the supplied command in it
and return output. The container is stopped and removed after it exits.
:param backend: instance of DockerBackend
:param image_name: str, name of the container image
:param command: list of str, command to run in the container
:param image_tag: str, container image tag, defaults to "latest"
:param additional_opts: list of str, by default this function creates the container using
docker binary and run command; with this argument you can supply addition options to the
"docker run" invocation
:return: str (unicode), output of the container
"""
image = backend.ImageClass(image_name, tag=image_tag)
# FIXME: use run_via_api and make this a generic function
c = image.run_via_binary(DockerRunBuilder(command=command, additional_opts=additional_opts))
try:
c.wait()
return c.logs_unicode()
finally:
c.stop()
c.wait()
c.delete()
|
en
| 0.772664
|
Create a throw-away container based on provided image and tag, run the supplied command in it and return output. The container is stopped and removed after it exits. :param backend: instance of DockerBackend :param image_name: str, name of the container image :param command: list of str, command to run in the container :param image_tag: str, container image tag, defaults to "latest" :param additional_opts: list of str, by default this function creates the container using docker binary and run command; with this argument you can supply addition options to the "docker run" invocation :return: str (unicode), output of the container # FIXME: use run_via_api and make this a generic function
| 3.145793
| 3
|
setup.py
|
radovankavicky/pymaclab
| 1
|
6627424
|
#!/usr/bin/env python
import os
import sys
import shutil
import glob
loco = sys.argv[0]
loco = loco.split('setup.py')[0]
# Read the requirements file
filo = open(os.path.join(loco,'requirements.txt'),'r')
lines = filo.readlines()
filo.close()
reqli = []
for lino in lines:
if '=' in lino:
packname = lino.split('=')[0].strip()
version = lino.split('=')[1].strip()
reqli.append([packname,version])
else:
reqli.append([lino.strip(),''])
######################################################################
# Check dependencies and install using pip
######################################################################
# Now check for numpy and install if needed
numpy_version = False
try:
import numpy
numpy_version = numpy.__version__
if numpy_version and numpy_version != '1.6.2':
print "You need Numpy version 1.6.2 for PyMacLab. Re-installing correct version now..."
os.system("pip install numpy==1.6.2")
except:
print "Numpy not detected. Fetching version 1.6.2 now using pip..."
os.system("pip install numpy==1.6.2")
# Now check for scipy and install if needed
scipy_version = False
try:
import scipy
scipy_version = scipy.__version__
if scipy_version and scipy_version != '0.11.0':
print "You need Scipy version 0.11.0 for PyMacLab. Re-installing correct version now..."
os.system("pip install scipy==0.11.0")
except:
print "Scipy not detected. Fetching version 0.11.0 now using pip..."
os.system("pip install scipy==0.11.0")
# Now check for matplotlib and install if needed
matplotlib_version = False
try:
import matplotlib
matplotlib_version = matplotlib.__version__
if matplotlib_version and matplotlib_version != '1.1.0':
print "You need Matplotlib version 1.1.0 for PyMacLab. Re-installing correct version now..."
os.system("pip install matplotlib==1.1.0")
except:
print "Matplotlib not detected. Fetching now using pip..."
os.system("pip install matplotlib==1.1.0")
# Now check for pandas and install if needed
pandas_version = False
try:
import pandas
pandas_version = pandas.__version__
if pandas_version and pandas_version != '0.10.1':
print "You need Pandas version 0.10.1 for PyMacLab. Re-installing correct version now..."
os.system("pip install pandas==0.10.1")
except:
print "Pandas not detected. Fetching now using pip..."
os.system("pip install pandas==0.10.1")
# Now check for ipython and install if needed
ipython_version = False
try:
import IPython
ipython_version = IPython.__version__
if ipython_version and ipython_version != '0.13.1':
print "You should use IPython version 0.13.1 for PyMacLab. Re-installing correct version now..."
os.system("pip install ipython==0.13.1")
except:
print "IPython not detected but is fun to use with PyMacLab. Fetching now using pip..."
os.system("pip install ipython==0.13.1")
'''
# Now check for pp and install if needed
pp_version = False
try:
import pp
pp_version = pp.version
if pp_version and pp_version != '1.6.2':
print "You should use PP version 1.6.2 for PyMacLab. Re-installing correct version now..."
os.system("pip install pp==1.6.2")
except:
print "PP not detected but makes PyMacLab much faster. Fetching now using pip..."
os.system("pip install pp==1.6.2")
'''
# Now check for wheezy.template and install if needed, but cannot check version so just install any latest
try:
import wheezy.template
except:
print "Wheezy.template not detected but used in PyMacLab. Fetching now using pip..."
os.system("pip install wheezy.template")
# Now check for mako and install if needed, but cannot check version so just install any latest
try:
import mako
except:
print "Mako not detected but used in PyMacLab. Fetching now using pip..."
os.system("pip install Mako")
##########################################################################
# Done checking for dependencies
##########################################################################
from datetime import datetime
# import setuptools # have to do this to be able to setup.py develop
from numpy.distutils.core import setup
from numpy import get_include
# Remove the build directory
if 'build' in os.listdir(os.getcwd()): shutil.rmtree(os.path.join(os.getcwd(),'build'))
DESCRIPTION="The Python Macroeconomics Library"
LONG_DESCRIPTION="""
PyMacLab stands for Python Macroeconomics Library which currently primarily serves the purposes of providing
a convenience framework written in Python to solve non-linear DSGE models. At the time of writing this the library
supports solving DSGE models using 1st and 2nd order perturbation methods which are computed around the steady state.
In particular, the library provides wrapper function for [Paul Klein's](http://paulklein.ca/newsite/start/start.php)
1st-order accurate method based on the Schur Decomposition as well a more recently published method by the same author
(co-authored with <NAME>, see [here](http://ideas.repec.org/a/eee/dyncon/v35y2011i4p604-615.html)) which provides
2nd-order accurate solutions without using Tensor Algebra (using the Magnus and Neudecker 1999 definition of the
Hessian matrix).
The library is extremely user-friendly in the sense of employing a model text file parser similar to that present in
[Dynare](http://www.dynare.org/) which requires users to only write down the original set of non-linear first-order
conditions of optimality. In addition, users are offered a menu of options of how to provide information required for
calculating the steady state of the model. Once the model is parsed and read in, several options of solving it exist
and users are provided with further convenience methods suitable for simulating solved models and investigating dynamic
statistical properties.
It should also be mentioned that because PyMacLab is a convenience library of highly modular nature (using
a object-oriented programming approach) it is very easy to loop over one model several thousand times each time changing
the original set of primitive parameters, such as depreciation rates, impatience factors, etc. in order to compute
solutions of the same model over a large set of conceivable parameterisations. Also, whenever solution methods require
the calculation of the Jacobian or Hessian, this is always done analytically (symbolically) using the Python
symbolic computation library [SympyCore](http://code.google.com/p/sympycore/) and not numerically as in other software
packages. Sympycore is not supplanted by Sympy, but it works well at the moment so we will alter PyMacLab at a later
stage to reflect this.
PyMacLab was authored by [<NAME>](http://www.ericscheffel.com) who is currently working as [Assistant Professor
in Economics at Nottingham University China](http://www.nottingham.edu.cn/en/business/people/staffprofile/eric-scheffel.aspx)
and is distributed under the GNU General Public License v3.0.
"""
DISTNAME = 'pymaclab'
LICENSE ="""
Copyright 2007-2012 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
AUTHOR = "<NAME>"
MAINTAINER = "<NAME>"
MAINTAINER_EMAIL = "<EMAIL>"
URL = 'http://github.com/escheffel/pymaclab/'
DOWNLOAD_URL="http://github.com/escheffel/pymaclab/tarball/v0.90.1"
CLASSIFIERS=["Scientific", "Macroeconomics", "General Equilibrium", "DSGE", "Time Series"]
MAJOR = 0
MINOR = 90
MICRO = 1
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.dev'
def write_version_py(filename='./version.py'):
cnt = """\
from datetime import datetime
version = '%s'
"""
a = open(filename, 'w')
try:
a.write(cnt % FULLVERSION)
finally:
a.close()
def configuration(parent_package='', top_path=None, package_name=DISTNAME):
# write_version_py()
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path,
version=FULLVERSION)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('pymaclab')
return config
if __name__ == '__main__':
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
configuration=configuration)
'''
# Find the standard site-packages absolute path
for elem in sys.path:
if 'site-packages' in elem: pathos = os.path.join(elem.split('site-packages')[0],'site-packages','pymaclab')
'''
# Install pp and sympycore, but first remove old stuff
# Delete old sympycore stuff
try:
# We don't want to import the package in the current working directory!
sys.path.pop(0)
import sympycore as sc
patho = sc.__path__
shutil.rmtree(patho[0])
globo = glob.glob(patho[0]+'*')
for elem in globo:
lasto = elem.split(r'/')[-1]
if lasto in os.listdir(elem.split(lasto)[0]):
shutil.rmtree(elem)
except:
pass
# Delete old pp stuff
try:
# We don't want to import the package in the current working directory!
sys.path.pop(0)
import pp as ppp
filo = ppp.__file__
nameos = ['pptransport.py','pptransport.pyc',
'ppauto.py','ppauto.pyc',
'ppcommon.py','ppcommon.pyc',
'pp.py','pp.pyc',
'ppworker.py','ppworker.pyc',
'pp-*.egg-info']
diro = filo.split('pp.')[0]
for elem in nameos:
if '*' not in nameos:
if elem in os.listdir(diro):
shutil.rmtree(os.path.join(diro,elem))
else:
globo = glob.glob(os.path.join(diro,elem))
for elem2 in globo:
lasto = elem2.split(r'/')[-1]
if lasto in os.listdir(elem2.split(lasto)[0]):
shutil.rmtree(elem2)
shutil.rmtree(patho[0])
except:
pass
# Now insert the current directory back in
sys.path[0] = ''
os.system("python setup_pp.py install")
os.system("python setup_sympycore.py install")
|
#!/usr/bin/env python
import os
import sys
import shutil
import glob
loco = sys.argv[0]
loco = loco.split('setup.py')[0]
# Read the requirements file
filo = open(os.path.join(loco,'requirements.txt'),'r')
lines = filo.readlines()
filo.close()
reqli = []
for lino in lines:
if '=' in lino:
packname = lino.split('=')[0].strip()
version = lino.split('=')[1].strip()
reqli.append([packname,version])
else:
reqli.append([lino.strip(),''])
######################################################################
# Check dependencies and install using pip
######################################################################
# Now check for numpy and install if needed
numpy_version = False
try:
import numpy
numpy_version = numpy.__version__
if numpy_version and numpy_version != '1.6.2':
print "You need Numpy version 1.6.2 for PyMacLab. Re-installing correct version now..."
os.system("pip install numpy==1.6.2")
except:
print "Numpy not detected. Fetching version 1.6.2 now using pip..."
os.system("pip install numpy==1.6.2")
# Now check for scipy and install if needed
scipy_version = False
try:
import scipy
scipy_version = scipy.__version__
if scipy_version and scipy_version != '0.11.0':
print "You need Scipy version 0.11.0 for PyMacLab. Re-installing correct version now..."
os.system("pip install scipy==0.11.0")
except:
print "Scipy not detected. Fetching version 0.11.0 now using pip..."
os.system("pip install scipy==0.11.0")
# Now check for matplotlib and install if needed
matplotlib_version = False
try:
import matplotlib
matplotlib_version = matplotlib.__version__
if matplotlib_version and matplotlib_version != '1.1.0':
print "You need Matplotlib version 1.1.0 for PyMacLab. Re-installing correct version now..."
os.system("pip install matplotlib==1.1.0")
except:
print "Matplotlib not detected. Fetching now using pip..."
os.system("pip install matplotlib==1.1.0")
# Now check for pandas and install if needed
pandas_version = False
try:
import pandas
pandas_version = pandas.__version__
if pandas_version and pandas_version != '0.10.1':
print "You need Pandas version 0.10.1 for PyMacLab. Re-installing correct version now..."
os.system("pip install pandas==0.10.1")
except:
print "Pandas not detected. Fetching now using pip..."
os.system("pip install pandas==0.10.1")
# Now check for ipython and install if needed
ipython_version = False
try:
import IPython
ipython_version = IPython.__version__
if ipython_version and ipython_version != '0.13.1':
print "You should use IPython version 0.13.1 for PyMacLab. Re-installing correct version now..."
os.system("pip install ipython==0.13.1")
except:
print "IPython not detected but is fun to use with PyMacLab. Fetching now using pip..."
os.system("pip install ipython==0.13.1")
'''
# Now check for pp and install if needed
pp_version = False
try:
import pp
pp_version = pp.version
if pp_version and pp_version != '1.6.2':
print "You should use PP version 1.6.2 for PyMacLab. Re-installing correct version now..."
os.system("pip install pp==1.6.2")
except:
print "PP not detected but makes PyMacLab much faster. Fetching now using pip..."
os.system("pip install pp==1.6.2")
'''
# Now check for wheezy.template and install if needed, but cannot check version so just install any latest
try:
import wheezy.template
except:
print "Wheezy.template not detected but used in PyMacLab. Fetching now using pip..."
os.system("pip install wheezy.template")
# Now check for mako and install if needed, but cannot check version so just install any latest
try:
import mako
except:
print "Mako not detected but used in PyMacLab. Fetching now using pip..."
os.system("pip install Mako")
##########################################################################
# Done checking for dependencies
##########################################################################
from datetime import datetime
# import setuptools # have to do this to be able to setup.py develop
from numpy.distutils.core import setup
from numpy import get_include
# Remove the build directory
if 'build' in os.listdir(os.getcwd()): shutil.rmtree(os.path.join(os.getcwd(),'build'))
DESCRIPTION="The Python Macroeconomics Library"
LONG_DESCRIPTION="""
PyMacLab stands for Python Macroeconomics Library which currently primarily serves the purposes of providing
a convenience framework written in Python to solve non-linear DSGE models. At the time of writing this the library
supports solving DSGE models using 1st and 2nd order perturbation methods which are computed around the steady state.
In particular, the library provides wrapper function for [Paul Klein's](http://paulklein.ca/newsite/start/start.php)
1st-order accurate method based on the Schur Decomposition as well a more recently published method by the same author
(co-authored with <NAME>, see [here](http://ideas.repec.org/a/eee/dyncon/v35y2011i4p604-615.html)) which provides
2nd-order accurate solutions without using Tensor Algebra (using the Magnus and Neudecker 1999 definition of the
Hessian matrix).
The library is extremely user-friendly in the sense of employing a model text file parser similar to that present in
[Dynare](http://www.dynare.org/) which requires users to only write down the original set of non-linear first-order
conditions of optimality. In addition, users are offered a menu of options of how to provide information required for
calculating the steady state of the model. Once the model is parsed and read in, several options of solving it exist
and users are provided with further convenience methods suitable for simulating solved models and investigating dynamic
statistical properties.
It should also be mentioned that because PyMacLab is a convenience library of highly modular nature (using
a object-oriented programming approach) it is very easy to loop over one model several thousand times each time changing
the original set of primitive parameters, such as depreciation rates, impatience factors, etc. in order to compute
solutions of the same model over a large set of conceivable parameterisations. Also, whenever solution methods require
the calculation of the Jacobian or Hessian, this is always done analytically (symbolically) using the Python
symbolic computation library [SympyCore](http://code.google.com/p/sympycore/) and not numerically as in other software
packages. Sympycore is not supplanted by Sympy, but it works well at the moment so we will alter PyMacLab at a later
stage to reflect this.
PyMacLab was authored by [<NAME>](http://www.ericscheffel.com) who is currently working as [Assistant Professor
in Economics at Nottingham University China](http://www.nottingham.edu.cn/en/business/people/staffprofile/eric-scheffel.aspx)
and is distributed under the GNU General Public License v3.0.
"""
DISTNAME = 'pymaclab'
LICENSE ="""
Copyright 2007-2012 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
AUTHOR = "<NAME>"
MAINTAINER = "<NAME>"
MAINTAINER_EMAIL = "<EMAIL>"
URL = 'http://github.com/escheffel/pymaclab/'
DOWNLOAD_URL="http://github.com/escheffel/pymaclab/tarball/v0.90.1"
CLASSIFIERS=["Scientific", "Macroeconomics", "General Equilibrium", "DSGE", "Time Series"]
MAJOR = 0
MINOR = 90
MICRO = 1
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.dev'
def write_version_py(filename='./version.py'):
cnt = """\
from datetime import datetime
version = '%s'
"""
a = open(filename, 'w')
try:
a.write(cnt % FULLVERSION)
finally:
a.close()
def configuration(parent_package='', top_path=None, package_name=DISTNAME):
# write_version_py()
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path,
version=FULLVERSION)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('pymaclab')
return config
if __name__ == '__main__':
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
configuration=configuration)
'''
# Find the standard site-packages absolute path
for elem in sys.path:
if 'site-packages' in elem: pathos = os.path.join(elem.split('site-packages')[0],'site-packages','pymaclab')
'''
# Install pp and sympycore, but first remove old stuff
# Delete old sympycore stuff
try:
# We don't want to import the package in the current working directory!
sys.path.pop(0)
import sympycore as sc
patho = sc.__path__
shutil.rmtree(patho[0])
globo = glob.glob(patho[0]+'*')
for elem in globo:
lasto = elem.split(r'/')[-1]
if lasto in os.listdir(elem.split(lasto)[0]):
shutil.rmtree(elem)
except:
pass
# Delete old pp stuff
try:
# We don't want to import the package in the current working directory!
sys.path.pop(0)
import pp as ppp
filo = ppp.__file__
nameos = ['pptransport.py','pptransport.pyc',
'ppauto.py','ppauto.pyc',
'ppcommon.py','ppcommon.pyc',
'pp.py','pp.pyc',
'ppworker.py','ppworker.pyc',
'pp-*.egg-info']
diro = filo.split('pp.')[0]
for elem in nameos:
if '*' not in nameos:
if elem in os.listdir(diro):
shutil.rmtree(os.path.join(diro,elem))
else:
globo = glob.glob(os.path.join(diro,elem))
for elem2 in globo:
lasto = elem2.split(r'/')[-1]
if lasto in os.listdir(elem2.split(lasto)[0]):
shutil.rmtree(elem2)
shutil.rmtree(patho[0])
except:
pass
# Now insert the current directory back in
sys.path[0] = ''
os.system("python setup_pp.py install")
os.system("python setup_sympycore.py install")
|
en
| 0.804239
|
#!/usr/bin/env python # Read the requirements file ###################################################################### # Check dependencies and install using pip ###################################################################### # Now check for numpy and install if needed # Now check for scipy and install if needed # Now check for matplotlib and install if needed # Now check for pandas and install if needed # Now check for ipython and install if needed # Now check for pp and install if needed pp_version = False try: import pp pp_version = pp.version if pp_version and pp_version != '1.6.2': print "You should use PP version 1.6.2 for PyMacLab. Re-installing correct version now..." os.system("pip install pp==1.6.2") except: print "PP not detected but makes PyMacLab much faster. Fetching now using pip..." os.system("pip install pp==1.6.2") # Now check for wheezy.template and install if needed, but cannot check version so just install any latest # Now check for mako and install if needed, but cannot check version so just install any latest ########################################################################## # Done checking for dependencies ########################################################################## # import setuptools # have to do this to be able to setup.py develop # Remove the build directory PyMacLab stands for Python Macroeconomics Library which currently primarily serves the purposes of providing a convenience framework written in Python to solve non-linear DSGE models. At the time of writing this the library supports solving DSGE models using 1st and 2nd order perturbation methods which are computed around the steady state. In particular, the library provides wrapper function for [Paul Klein's](http://paulklein.ca/newsite/start/start.php) 1st-order accurate method based on the Schur Decomposition as well a more recently published method by the same author (co-authored with <NAME>, see [here](http://ideas.repec.org/a/eee/dyncon/v35y2011i4p604-615.html)) which provides 2nd-order accurate solutions without using Tensor Algebra (using the Magnus and Neudecker 1999 definition of the Hessian matrix). The library is extremely user-friendly in the sense of employing a model text file parser similar to that present in [Dynare](http://www.dynare.org/) which requires users to only write down the original set of non-linear first-order conditions of optimality. In addition, users are offered a menu of options of how to provide information required for calculating the steady state of the model. Once the model is parsed and read in, several options of solving it exist and users are provided with further convenience methods suitable for simulating solved models and investigating dynamic statistical properties. It should also be mentioned that because PyMacLab is a convenience library of highly modular nature (using a object-oriented programming approach) it is very easy to loop over one model several thousand times each time changing the original set of primitive parameters, such as depreciation rates, impatience factors, etc. in order to compute solutions of the same model over a large set of conceivable parameterisations. Also, whenever solution methods require the calculation of the Jacobian or Hessian, this is always done analytically (symbolically) using the Python symbolic computation library [SympyCore](http://code.google.com/p/sympycore/) and not numerically as in other software packages. Sympycore is not supplanted by Sympy, but it works well at the moment so we will alter PyMacLab at a later stage to reflect this. PyMacLab was authored by [<NAME>](http://www.ericscheffel.com) who is currently working as [Assistant Professor in Economics at Nottingham University China](http://www.nottingham.edu.cn/en/business/people/staffprofile/eric-scheffel.aspx) and is distributed under the GNU General Public License v3.0. Copyright 2007-2012 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. \ from datetime import datetime version = '%s' # write_version_py() # Find the standard site-packages absolute path for elem in sys.path: if 'site-packages' in elem: pathos = os.path.join(elem.split('site-packages')[0],'site-packages','pymaclab') # Install pp and sympycore, but first remove old stuff # Delete old sympycore stuff # We don't want to import the package in the current working directory! # Delete old pp stuff # We don't want to import the package in the current working directory! # Now insert the current directory back in
| 2.481898
| 2
|
xgds_image/models.py
|
xgds/xgds_image
| 1
|
6627425
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
from enum import Enum
import os
import logging
import sys
import six
import tempfile
import xml.dom.minidom
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
from django.forms.models import model_to_dict
from django.utils import timezone
from django.utils.text import slugify
from django.urls import reverse
from geocamUtil.models import AbstractEnumModel
from geocamUtil.loader import LazyGetModelByName, getClassByName
from geocamUtil.defaultSettings import HOSTNAME
from geocamUtil.modelJson import modelToDict
from geocamUtil.UserUtil import getUserName
from geocamTrack import models as geocamTrackModels
from xgds_notes2.models import NoteMixin, NoteLinksMixin, DEFAULT_NOTES_GENERIC_RELATION
from xgds_core.couchDbStorage import CouchDbStorage
from xgds_core.models import SearchableModel, AbstractVehicle, HasFlight, HasDownloadableFiles, IsFlightChild, \
IsFlightData, BroadcastMixin
from xgds_core.views import get_file_from_couch
from deepzoom.models import DeepZoom
from deepzoom import deepzoom
if not settings.USE_PYTHON_DEEPZOOM_TILER:
import gi
gi.require_version('Vips', '8.0')
from gi.repository import Vips
from StringIO import StringIO
from datetime import datetime
from xgds_core.couchDbStorage import CouchDbStorage
from email.mime import image
import couchdb
import json
from django.db.models.signals import post_save
from django.dispatch import receiver
if settings.XGDS_CORE_REDIS:
from xgds_core.redisUtil import publishRedisSSE
logger = logging.getLogger("deepzoom.models")
# This global declaration does not work when the database name has to be changed
# at run time (e.g. when running unit tests), so the global declaration has been
# moved to a couple places where it is needed here and may need to be fixed
# elsewhere if the change has other unintended and undetected consequences
# couchStore = CouchDbStorage()
# couchDatabase = couchStore.get_couchDb()
def getNewImageFileName(instance, filename):
return settings.XGDS_IMAGE_DATA_SUBDIRECTORY + filename
class ImageType(Enum):
"""
Definitions of image type here.
Currently this will include:
SOURCE, for images which get converted.
FULL, for images which are full size renderable
THUMBNAIL, renderable thumbnail images
"""
full = 0
source = 1
thumbnail = 2
class Camera(AbstractVehicle):
"""
Camera class
"""
serial = models.CharField(max_length=128, blank=True, null=True)
name = models.CharField(max_length=64, blank=True)
heading_offset_degrees = models.FloatField(default=0, validators=[MinValueValidator(-360.0), MaxValueValidator(360.0)])
class Meta:
unique_together = ("name", "serial")
# TODO change these in your model classes if you are not using defaults
DEFAULT_CAMERA_FIELD = lambda: models.ForeignKey(Camera, null=True, blank=True)
DEFAULT_TRACK_POSITION_FIELD = lambda: models.ForeignKey(settings.GEOCAM_TRACK_PAST_POSITION_MODEL, null=True, blank=True, related_name="%(app_label)s_%(class)s_image_track_set" )
DEFAULT_EXIF_POSITION_FIELD = lambda: models.ForeignKey(settings.GEOCAM_TRACK_PAST_POSITION_MODEL, null=True, blank=True, related_name="%(app_label)s_%(class)s_image_exif_set" )
DEFAULT_USER_POSITION_FIELD = lambda: models.ForeignKey(settings.GEOCAM_TRACK_PAST_POSITION_MODEL, null=True, blank=True, related_name="%(app_label)s_%(class)s_image_user_set" )
DEFAULT_FLIGHT_FIELD = lambda: models.ForeignKey('xgds_core.Flight', related_name='%(app_label)s_%(class)s_related',
verbose_name=settings.XGDS_CORE_FLIGHT_MONIKER, blank=True, null=True)
# TODO if you are not using the default image set model you will have to override this in your classes
DEFAULT_IMAGE_SET_FIELD = lambda: models.ForeignKey('xgds_image.ImageSet', related_name='%(app_label)s_%(class)s_related',
verbose_name=settings.XGDS_IMAGE_IMAGE_SET_MONIKER, blank=True, null=True)
class DeepZoomImageDescriptor(deepzoom.DZIDescriptor):
def save(self, destination):
"""Save descriptor file."""
doc = xml.dom.minidom.Document()
image = doc.createElementNS(deepzoom.NS_DEEPZOOM, "Image")
image.setAttribute("xmlns", deepzoom.NS_DEEPZOOM)
image.setAttribute("TileSize", str(self.tile_size))
image.setAttribute("Overlap", str(self.tile_overlap))
image.setAttribute("Format", str(self.tile_format))
size = doc.createElementNS(deepzoom.NS_DEEPZOOM, "Size")
size.setAttribute("Width", str(self.width))
size.setAttribute("Height", str(self.height))
image.appendChild(size)
doc.appendChild(image)
descriptor = doc.toxml()
f = os.path.basename(destination)
fpath = os.path.dirname(destination)
full_file_name = os.path.join(fpath, f)
#################
# these were global, now defined locally:
couchStore = CouchDbStorage()
couchDatabase = couchStore.get_couchDb()
#################
couchDatabase[full_file_name] = {"category": "xgds_image", "basename": f, "name": fpath,
"creation_time": datetime.utcnow().isoformat()}
newDoc = couchDatabase[full_file_name]
couchDatabase.put_attachment(newDoc, descriptor, filename=f)
class DeepZoomImageCreator(deepzoom.ImageCreator):
def create(self, source, destination):
"""Creates Deep Zoom image from source file and saves it to destination."""
self.image = deepzoom.PILImage.open(source)
width, height = self.image.size
self.descriptor = DeepZoomImageDescriptor(width=width,
height=height,
tile_size=self.tile_size,
tile_overlap=self.tile_overlap,
tile_format=self.tile_format)
#destination = deepzoom._expand(destination) # path to dzi file: i.e. /vagrant/xgds_basalt/data/xgds_image/deepzoom_images/p6180021_deepzoom_107/p6180021_deepzoom_107.dzi
image_name = os.path.splitext(os.path.basename(destination))[0] # p6180021_deepzoom_107
dir_name = os.path.dirname(destination) # i.e. /vagrant/xgds_basalt/data/xgds_image/deepzoom_images/p6180021_deepzoom_107
image_files = os.path.join(dir_name, "%s_files" % image_name)
# Create tiles
levels = self.descriptor.num_levels # autocalculated from deepzoom DZIDescriptor -- set this in siteSettings
for level in range(levels):
level_dir = os.path.join(image_files, str(level))
level_image = self.get_image(level)
for (column, row) in self.tiles(level):
bounds = self.descriptor.get_tile_bounds(level, column, row)
tile = level_image.crop(bounds) # these are the tiles that I need to save to couchdb!
tile_format = self.descriptor.tile_format
# HERE save the tile to couch db
tile_name = "%s_%s.%s" % (column, row, tile_format)
tile_path = level_dir
full_tile_name = os.path.join(tile_path, tile_name)
# save the pil image with BytesIO as the file. and then get the string
from io import BytesIO
myIo = BytesIO()
tile.save(myIo, format='JPEG')
tileBytesIO = myIo.getvalue()
# basename and name are for convenience so we can look it up later.
#################
# these were global, now defined locally:
couchStore = CouchDbStorage()
couchDatabase = couchStore.get_couchDb()
#################
couchDatabase[full_tile_name] = {"category":"xgds_image",
"basename": tile_name,
"name": tile_path,
"creation_time": datetime.utcnow().isoformat()}
newDoc = couchDatabase[full_tile_name]
couchDatabase.put_attachment(newDoc, tileBytesIO, filename=tile_name)
self.descriptor.save(destination)
class DeepZoomTiles(DeepZoom):
def create_deepzoom_files(self):
"""
Creates deepzoom image from associated uploaded image.
Attempts to load `DEEPZOOM_PARAMS` and `DEEPZOOM_ROOT` from settings.
Substitutues default settings for any missing settings.
"""
#Try to load deep zoom parameters, otherwise assign default values.
try:
dz_params = settings.DEEPZOOM_PARAMS
except AttributeError:
if 'deepzoom.models' in settings.LOGGING['loggers']:
logger.exception("`DEEPZOOM_PARAMS` incorrectly defined!")
dz_params = self.DEFAULT_DEEPZOOM_PARAMS
if not isinstance(dz_params, dict):
raise AttributeError("`DEEPZOOM_PARAMS` must be a dictionary.")
_tile_size = self.get_dz_param('tile_size', dz_params)
_tile_overlap = self.get_dz_param('tile_size', dz_params)
_tile_format = self.get_dz_param('tile_size', dz_params)
_image_quality = self.get_dz_param('tile_size', dz_params)
_resize_filter = self.get_dz_param('tile_size', dz_params)
#Initialize deep zoom creator.
creator = DeepZoomImageCreator(tile_size=_tile_size,
tile_overlap=_tile_overlap,
tile_format=_tile_format,
image_quality=_image_quality,
resize_filter=_resize_filter)
#Try to load deep zoom root, otherwise assign default value.
try:
dz_deepzoom_root = settings.DEEPZOOM_ROOT
except AttributeError:
dz_deepzoom_root = self.DEFAULT_DEEPZOOM_ROOT
if not isinstance(dz_deepzoom_root, six.string_types):
raise AttributeError("`DEEPZOOM_ROOT` must be a string.")
dz_filename = self.slug + ".dzi"
dz_relative_filepath = os.path.join(dz_deepzoom_root, self.slug)
dz_couch_destination = os.path.join(dz_relative_filepath, dz_filename)
# getting the associated image
assoc_image_name = self.associated_image.split('/')[-1]
dataString = get_file_from_couch(settings.XGDS_IMAGE_DATA_SUBDIRECTORY, assoc_image_name)
dz_associated_image = StringIO(dataString)
#Process deep zoom image and save to file system.
try:
creator.create(dz_associated_image, dz_couch_destination) # source, destination
except OSError as err:
print("OS error({0}): {1}".format(err.errno, err.strerror))
except IOError as err:
print("I/O error({0}): {1}".format(err.errno, err.strerror))
except:
print("Unexpected deep zoom creation error:", sys.exc_info())
raise
return(dz_couch_destination, dz_relative_filepath)
class AbstractImageSet(models.Model, NoteMixin, SearchableModel, NoteLinksMixin, HasFlight, HasDownloadableFiles,
IsFlightChild, IsFlightData, BroadcastMixin):
"""
ImageSet is for supporting various resolution images from the same source image.
Set includes the raw image and any resized images.
Contains utility functions to fetch different sized images.
"""
name = models.CharField(max_length=128, default='', blank=True, null=True, help_text="Legible " + settings.XGDS_IMAGE_IMAGE_SET_MONIKER + " name", db_index=True)
shortName = models.CharField(max_length=32, blank=True, null=True, db_index=True, help_text="a short mnemonic code suitable to embed in a URL")
camera = 'set this to DEFAULT_CAMERA_FIELD() or similar in derived classes'
author = models.ForeignKey(User)
creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
deleted = models.BooleanField(default=False)
description = models.CharField(max_length=128, blank=True)
track_position = 'set this to DEFAULT_TRACK_POSITION_FIELD() or similar in derived classes'
exif_position = 'set this to DEFAULT_EXIF_POSITION_FIELD() or similar in derived classes'
user_position = 'set this to DEFAULT_USER_POSITION_FIELD() or similar in derived classes'
modification_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
acquisition_time = models.DateTimeField(editable=False, default=timezone.now, db_index=True)
acquisition_timezone = models.CharField(null=True, blank=False, max_length=128, default=settings.TIME_ZONE, db_index=True)
uploadAndSaveTime = models.FloatField(null=True, blank=True)
totalTimeSinceNotify = models.FloatField(null=True, blank=True)
#Optionally generate deep zoom from uploaded image if set to True.
create_deepzoom = models.BooleanField(default=settings.XGDS_IMAGE_DEFAULT_CREATE_DEEPZOOM,
help_text="Generate deep zoom?") # True if you need to create a deepzoom
#Link this image to generated deep zoom.
associated_deepzoom = models.ForeignKey(DeepZoomTiles,
null=True,
blank=True,
related_name="%(app_label)s_%(class)s",
editable=False,
on_delete=models.SET_NULL)
local_deepzoom_slug = models.CharField(null=True, blank=True, max_length=255)
rotation_degrees = models.PositiveSmallIntegerField(null=True, default=0)
flight = "TODO set to DEFAULT_FLIGHT_FIELD or similar"
@classmethod
def getSseType(cls):
return settings.XGDS_IMAGE_SSE_TYPE.lower()
@classmethod
def get_tree_json(cls, parent_class, parent_pk):
try:
found = LazyGetModelByName(settings.XGDS_IMAGE_IMAGE_SET_MODEL).get().objects.filter(flight__id=parent_pk)
result = None
if found.exists():
moniker = settings.XGDS_IMAGE_IMAGE_SET_MONIKER + 's'
flight = found[0].flight
result = [{"title": moniker,
"selected": False,
"tooltip": "%s for %s " % (moniker, flight.name),
"key": "%s_%s" % (flight.uuid, moniker),
"data": {"json": reverse('xgds_map_server_objectsJson',
kwargs={'object_name': 'XGDS_IMAGE_IMAGE_SET_MODEL',
'filter': 'flight__pk:' + str(flight.pk)}),
"sseUrl": "",
"type": 'MapLink',
}
}]
return result
except ObjectDoesNotExist:
return None
@classmethod
def get_info_json(cls, flight_pk):
found = LazyGetModelByName(settings.XGDS_IMAGE_IMAGE_SET_MODEL).get().objects.filter(flight__id=flight_pk)
result = None
if found.exists():
flight = LazyGetModelByName(settings.XGDS_CORE_FLIGHT_MODEL).get().objects.get(id=flight_pk)
result = {'name': settings.XGDS_IMAGE_IMAGE_SET_MONIKER + 's',
'count': found.count(),
'url': reverse('search_map_object_filter', kwargs={'modelName':settings.XGDS_IMAGE_IMAGE_SET_MONIKER,
'filter': 'flight__group:%d,flight__vehicle:%d' % (
flight.group.pk, flight.vehicle.pk)})
}
return result
@classmethod
def timesearchField(self):
return 'acquisition_time'
def create_deepzoom_slug(self):
"""
Returns a string instance for deepzoom slug.
"""
if self.name:
try:
filename = self.name.split('.')
except:
return ''
deepzoomSlug = filename[0] + "_deepzoom_" + str(self.id)
return deepzoomSlug.lower()
def create_deepzoom_image(self):
"""
Creates and processes deep zoom image files to storage.
Returns instance of newly created DeepZoom instance for associating
uploaded image to it.
"""
try:
deepzoomSlug = self.create_deepzoom_slug()
rawImageUrl = self.getRawImage().file.url
dz, created = DeepZoomTiles.objects.get_or_create(associated_image=rawImageUrl,
name=deepzoomSlug)
if created:
dz.slug = slugify(deepzoomSlug)
dz.save()
dz.create_deepzoom_files()
self.associated_deepzoom = dz
self.create_deepzoom = False
self.save()
except (TypeError, ValueError, AttributeError) as err:
print("Error: Incorrect deep zoom parameter(s) in settings.py: {0}".format(err))
raise
except:
print("Unexpected error creating deep zoom: {0}".format(sys.exc_info()[1:2]))
raise
# finally:
# Mark the thread inactive in the couchdb in case there's another
# thread waiting for this to be finished
# TODO: come up with a better multithreaded way to do this
# dbServer = couchdb.Server(settings.COUCHDB_URL)
# db = dbServer[settings.COUCHDB_FILESTORE_NAME]
# myFlag = db['create_deepzoom_thread']
# myFlag['active'] = False
# db['create_deepzoom_thread'] = myFlag
def create_vips_deepzoom_image(self):
"""
Creates and processes deep zoom image files to local storage.
"""
try:
deepzoomSlug = self.create_deepzoom_slug()
self.local_deepzoom_slug = deepzoomSlug
self.associated_deepzoom = None
rawImageFile = self.getRawImage().file
tempImageFile = tempfile.NamedTemporaryFile(delete=False)
tempImageFile.write(rawImageFile.read())
tempImageFile.close()
baseImage = Vips.Image.new_from_file(tempImageFile.name)
tileOutputDir = "%s%s%s" % (settings.DATA_ROOT, settings.DEEPZOOM_ROOT, deepzoomSlug)
os.mkdir(tileOutputDir)
baseImage.dzsave("%s/%s" % (tileOutputDir, deepzoomSlug),
overlap=settings.VIPS_PARAMS['overlap'],
tile_size=settings.VIPS_PARAMS['tile_size'])
os.remove(tempImageFile.name)
self.create_deepzoom = False
self.save()
except:
print("Unexpected error creating deep zoom: {0}".format(sys.exc_info()[1:2]))
raise
def delete_image_file(self, path_of_image_to_delete=None):
"""
Deletes uploaded image file from storage.
"""
try:
os.remove(path_of_image_to_delete)
except OSError:
logger.exception("Image file deletion failed!")
@classmethod
def cls_type(cls):
return settings.XGDS_IMAGE_IMAGE_MODEL_NAME
@property
def raw_image_url(self):
rawImage = self.getRawImage()
if rawImage:
return rawImage.file.url
return None
@property
def camera_name(self):
if self.camera:
return self.camera.name
return None
@property
def author_name(self):
return getUserName(self.author)
@property
def timezone(self):
return self.acquisition_timezone
@property
def originalImageResolutionString(self):
originalImage = self.getRawImage()
if originalImage:
width = originalImage.width
height = originalImage.height
if width and height:
megaPixels = (width * height)/(1000.0*1000.0)
return "%1d x %1d | %1.2f MP" % (width, height, megaPixels)
return 'n/a'
@property
def originalImageFileSizeMB(self):
originalImage = self.getRawImage()
if originalImage and originalImage.fileSizeBytes:
fileSizeMB = "%1.2f MB" % (originalImage.fileSizeBytes/(1024.0*1024.0))
return fileSizeMB
return 'n/a'
@property
def thumbnail_image_url(self):
thumbImage = self.getThumbnail()
if thumbImage:
return thumbImage.file.url
return ''
@property
def deepzoom_file_url(self):
# For backward compatibility we check the associated_deepzoom object first, but we are moving toward
# tiling images with VIPS at each node at storing locally (2nd if clause).
if self.associated_deepzoom:
deepzoomSlug = self.associated_deepzoom.slug
docDir = settings.DEEPZOOM_ROOT + deepzoomSlug
docFile = deepzoomSlug + '.dzi'
return reverse('get_db_attachment', kwargs={'docDir': docDir,'docName': docFile})
if self.local_deepzoom_slug:
return settings.DATA_URL + settings.DEEPZOOM_ROOT + self.local_deepzoom_slug + \
"/" + self.local_deepzoom_slug + ".dzi"
return None
def finish_initialization(self, request):
""" during construction, if you have extra data to fill in you can override this method"""
pass
class Meta:
abstract = True
def __unicode__(self):
return (u"ImageSet(%s, name='%s', shortName='%s')"
% (self.pk, self.name, self.shortName))
def getPosition(self):
if self.user_position:
return self.user_position
if self.exif_position:
return self.exif_position
if self.track_position:
return self.track_position
return None
@property
def head(self):
""" heading """
try:
position = self.getPosition()
if position:
if self.camera:
return position.heading + self.camera.heading_offset_degrees
return position.heading
except:
pass
return None
def getPositionDict(self):
""" override if you want to change the logic for how the positions are prioritized in JSON.
Right now exif_position is from the camera, track_position is from the track, and user_position stores any hand edits.
track provides lat lon and altitude, exif provides heading, and user trumps all.
"""
result = {}
result['alt'] = ""
result['head'] = ""
heading_offset_degrees = 0
if self.camera:
heading_offset_degrees = self.camera.heading_offset_degrees
if self.user_position:
result['lat'] = self.user_position.latitude
result['lon'] = self.user_position.longitude
if hasattr(self.user_position, 'altitude'):
result['alt'] = self.user_position.altitude
if hasattr(self.user_position, 'heading'):
result['head'] = self.user_position.heading + heading_offset_degrees
return result
result['position_id'] = ""
if self.track_position:
result['lat'] = self.track_position.latitude
result['lon'] = self.track_position.longitude
if self.track_position.altitude:
result['alt'] = self.track_position.altitude
if hasattr(self.track_position, 'heading'):
result['head'] = self.track_position.heading + heading_offset_degrees
if result['alt'] == '' and hasattr(self.exif_position, 'altitude'):
result['alt'] = self.track_position.altitude
return result
elif self.exif_position:
result['lat'] = self.exif_position.latitude
result['lon'] = self.exif_position.longitude
if hasattr(self.exif_position, 'altitude'):
result['alt'] = self.exif_position.altitude
if hasattr(self.exif_position, 'heading'):
result['head'] = self.exif_position.heading + heading_offset_degrees
else:
result['lat'] = ""
result['lon'] = ""
return result
def getRawImage(self):
rawImages = self.images.filter(raw=True)
if rawImages:
return rawImages[0]
else:
return None
def getSourceImage(self):
sourceImages = self.images.filter(imageType=ImageType.source.value)
if sourceImages:
return sourceImages[0]
else:
return None
def getDownloadableFiles(self):
"""
:return: list of file objects, each with their own `read()` functions
"""
sourceImage = self.getSourceImage()
if sourceImage:
return [sourceImage.file]
return [self.getRawImage().file]
def getLowerResImages(self):
return self.images.filter(raw=False, thumbnail=False)
def getThumbnail(self):
thumbImages = self.images.filter(thumbnail=True)
if thumbImages:
return thumbImages[0]
else:
return None
@classmethod
def getSearchableFields(self):
return ['name', 'description', 'author__first_name', 'author__last_name', 'flight__name']
@classmethod
def getSearchFormFields(cls):
return ['name',
'description',
'author',
'camera',
'flight__vehicle'
]
@classmethod
def getSearchFieldOrder(cls):
return ['flight__vehicle',
'author',
'name',
'description',
'camera',
'acquisition_timezone',
'min_acquisition_time',
'max_acquisition_time']
@classmethod
def buildTextAnnotationQuery(cls, search_keywords):
"""
Build a query that will search for an image set that is pointed to by a text annotation containing keyword
:param search_keywords: keywords to search for in the format keyword and keyword or keyword etc
:return: the found list of pks
"""
master_query = LazyGetModelByName(settings.XGDS_IMAGE_TEXT_ANNOTATION_MODEL).get().objects.all()
counter = 0
last_query = None
keyword_query = None
while counter < len(search_keywords):
if counter % 2 == 0:
last_query = Q(**{'content__icontains': search_keywords[counter]})
else:
if not keyword_query:
keyword_query = last_query
else:
join_type = search_keywords[counter]
if join_type == 'and':
keyword_query &= last_query
else:
keyword_query |= last_query
counter += 1
if not keyword_query:
keyword_query = last_query
result = master_query.filter(keyword_query)
image_ids = result.values_list('image_id', flat=True)
return list(image_ids)
class ImageSet(AbstractImageSet):
# set foreign key fields from parent model to point to correct types
camera = DEFAULT_CAMERA_FIELD()
track_position = DEFAULT_TRACK_POSITION_FIELD()
exif_position = DEFAULT_EXIF_POSITION_FIELD()
user_position = DEFAULT_USER_POSITION_FIELD()
notes = DEFAULT_NOTES_GENERIC_RELATION()
flight = DEFAULT_FLIGHT_FIELD()
couchStore = CouchDbStorage()
class AbstractSingleImage(models.Model):
"""
An abstract image which may not necessarily have a location on a map
"""
file = models.ImageField(upload_to=getNewImageFileName,
max_length=256, storage=couchStore)
creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
raw = models.BooleanField(default=True)
imageSet = 'set this to DEFAULT_IMAGE_SET_FIELD() or similar in derived models'
thumbnail = models.BooleanField(default=False)
width = models.IntegerField(blank=True, null=True)
height = models.IntegerField(blank=True, null=True)
fileSizeBytes = models.IntegerField(blank=True, null=True)
imageType = models.IntegerField(blank=True, null=True)
@property
def acquisition_time(self):
return self.imageSet.acquisition_time
# def toMapDict(self):
# """
# Return a reduced dictionary that will be turned to JSON
# """
# result = modelToDict(self)
# return result
def getAnnotations(self):
return ANNOTATION_MANAGER.filter(image__pk=self.pk)
class Meta:
abstract = True
ordering = ['-creation_time']
def __unicode__(self):
return self.file.name
@classmethod
def getSseType(cls):
return settings.XGDS_IMAGE_SSE_TYPE.lower()
class SingleImage(AbstractSingleImage):
""" This can be used for screenshots or non geolocated images
"""
# set foreign key fields from parent model to point to correct types
imageSet = models.ForeignKey(settings.XGDS_IMAGE_IMAGE_SET_MODEL, related_name='images',
verbose_name=settings.XGDS_IMAGE_IMAGE_SET_MONIKER, blank=True, null=True)
# @receiver(post_save, sender=SingleImage)
# def publishAfterSave(sender, instance, **kwargs):
# if settings.XGDS_CORE_REDIS:
# for channel in settings.XGDS_SSE_CHANNELS:
# publishRedisSSE(channel, settings.XGDS_IMAGE_SSE_TYPE.lower(), json.dumps({}))
DEFAULT_SINGLE_IMAGE_FIELD = lambda: models.ForeignKey(settings.XGDS_IMAGE_SINGLE_IMAGE_MODEL, related_name="image")
class AnnotationColor(models.Model):
name = models.CharField(max_length=16, db_index=True)
hex = models.CharField(max_length=16)
class AbstractAnnotation(models.Model):
left = models.IntegerField(null=False, blank=False)
top = models.IntegerField(null=False, blank=False)
strokeColor = models.ForeignKey(AnnotationColor, related_name='%(app_label)s_%(class)s_strokeColor', default=1)
strokeWidth = models.PositiveIntegerField(default=2)
angle = models.FloatField(default=0) # store shape rotation angle
scaleX = models.FloatField(default=1)
scaleY = models.FloatField(default=1)
originX = models.CharField(max_length=16, default="left")
originY = models.CharField(max_length=16, default="center")
fill = models.ForeignKey(AnnotationColor, related_name='%(app_label)s_%(class)s_fill', null=True, blank=True)
size = models.CharField(max_length=16, default="medium")
author = models.ForeignKey(User, related_name='%(app_label)s_%(class)s_related')
creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
image = 'set this to DEFAULT_IMAGE_SET_FIELD or similar in derived classes'
# WARNING -- you cannot include the below in this class or it will cause a circular dependency in migrations
#image = models.ForeignKey(settings.XGDS_IMAGE_IMAGE_SET_MODEL, related_name='%(app_label)s_%(class)s_image')
class Meta:
abstract = True
def getJsonType(self):
return 'annotation'
def toJson(self):
result = model_to_dict(self)
result['annotationType'] = self.getJsonType()
result['pk'] = self.pk
return result
class NormalAnnotation(AbstractAnnotation):
""" The default type of annotation, referring to an xgds_image.ImageSet """
image = DEFAULT_IMAGE_SET_FIELD()
class Meta:
abstract = True
class AbstractTextAnnotation(models.Model):
content = models.CharField(max_length=512, default='')
isBold = models.BooleanField(default=False)
isItalics = models.BooleanField(default=False)
width = models.PositiveIntegerField(default=1)
height = models.PositiveIntegerField(default=1)
fontSize = models.PositiveIntegerField(default=8)
def getJsonType(self):
return 'text'
class Meta:
abstract = True
class TextAnnotation(AbstractTextAnnotation, NormalAnnotation):
pass
class AbstractEllipseAnnotation(models.Model):
radiusX = models.IntegerField()
radiusY = models.IntegerField()
def getJsonType(self):
return 'ellipse'
class Meta:
abstract = True
class EllipseAnnotation(AbstractEllipseAnnotation, NormalAnnotation):
pass
class AbstractRectangleAnnotation(models.Model):
width = models.PositiveIntegerField()
height = models.PositiveIntegerField()
def getJsonType(self):
return 'rectangle'
class Meta:
abstract = True
class RectangleAnnotation(AbstractRectangleAnnotation, NormalAnnotation):
pass
class AbstractArrowAnnotation(models.Model):
points = models.TextField(default='[]')
def getJsonType(self):
return 'arrow'
class Meta:
abstract = True
class ArrowAnnotation(AbstractArrowAnnotation, NormalAnnotation):
pass
# NOT USED YET
# This will support the url to the saved annotated image download via url
# class AnnotatedScreenshot(models.Model):
# imageBinary = models.FileField(upload_to=settings.XGDS_IMAGE_ANNOTATED_IMAGES_SUBDIR)
# width = models.PositiveIntegerField(default=250)
# height = models.PositiveIntegerField(default=250)
# author = models.ForeignKey(User)
# creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
# image = 'set this to DEFAULT_SINGLE_IMAGE_FIELD or similar in derived classes'
# # WARNING -- the below will cause a circular dependency so don't do it. You have to have a derived class if you are planning to use this.
# #image = models.ForeignKey(settings.XGDS_IMAGE_IMAGE_SET_MODEL, related_name='%(app_label)s_%(class)s_image') # DEFAULT_SINGLE_IMAGE_FIELD # 'set this to DEFAULT_SINGLE_IMAGE_FIELD or similar in derived classes'
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
from enum import Enum
import os
import logging
import sys
import six
import tempfile
import xml.dom.minidom
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
from django.forms.models import model_to_dict
from django.utils import timezone
from django.utils.text import slugify
from django.urls import reverse
from geocamUtil.models import AbstractEnumModel
from geocamUtil.loader import LazyGetModelByName, getClassByName
from geocamUtil.defaultSettings import HOSTNAME
from geocamUtil.modelJson import modelToDict
from geocamUtil.UserUtil import getUserName
from geocamTrack import models as geocamTrackModels
from xgds_notes2.models import NoteMixin, NoteLinksMixin, DEFAULT_NOTES_GENERIC_RELATION
from xgds_core.couchDbStorage import CouchDbStorage
from xgds_core.models import SearchableModel, AbstractVehicle, HasFlight, HasDownloadableFiles, IsFlightChild, \
IsFlightData, BroadcastMixin
from xgds_core.views import get_file_from_couch
from deepzoom.models import DeepZoom
from deepzoom import deepzoom
if not settings.USE_PYTHON_DEEPZOOM_TILER:
import gi
gi.require_version('Vips', '8.0')
from gi.repository import Vips
from StringIO import StringIO
from datetime import datetime
from xgds_core.couchDbStorage import CouchDbStorage
from email.mime import image
import couchdb
import json
from django.db.models.signals import post_save
from django.dispatch import receiver
if settings.XGDS_CORE_REDIS:
from xgds_core.redisUtil import publishRedisSSE
logger = logging.getLogger("deepzoom.models")
# This global declaration does not work when the database name has to be changed
# at run time (e.g. when running unit tests), so the global declaration has been
# moved to a couple places where it is needed here and may need to be fixed
# elsewhere if the change has other unintended and undetected consequences
# couchStore = CouchDbStorage()
# couchDatabase = couchStore.get_couchDb()
def getNewImageFileName(instance, filename):
return settings.XGDS_IMAGE_DATA_SUBDIRECTORY + filename
class ImageType(Enum):
"""
Definitions of image type here.
Currently this will include:
SOURCE, for images which get converted.
FULL, for images which are full size renderable
THUMBNAIL, renderable thumbnail images
"""
full = 0
source = 1
thumbnail = 2
class Camera(AbstractVehicle):
"""
Camera class
"""
serial = models.CharField(max_length=128, blank=True, null=True)
name = models.CharField(max_length=64, blank=True)
heading_offset_degrees = models.FloatField(default=0, validators=[MinValueValidator(-360.0), MaxValueValidator(360.0)])
class Meta:
unique_together = ("name", "serial")
# TODO change these in your model classes if you are not using defaults
DEFAULT_CAMERA_FIELD = lambda: models.ForeignKey(Camera, null=True, blank=True)
DEFAULT_TRACK_POSITION_FIELD = lambda: models.ForeignKey(settings.GEOCAM_TRACK_PAST_POSITION_MODEL, null=True, blank=True, related_name="%(app_label)s_%(class)s_image_track_set" )
DEFAULT_EXIF_POSITION_FIELD = lambda: models.ForeignKey(settings.GEOCAM_TRACK_PAST_POSITION_MODEL, null=True, blank=True, related_name="%(app_label)s_%(class)s_image_exif_set" )
DEFAULT_USER_POSITION_FIELD = lambda: models.ForeignKey(settings.GEOCAM_TRACK_PAST_POSITION_MODEL, null=True, blank=True, related_name="%(app_label)s_%(class)s_image_user_set" )
DEFAULT_FLIGHT_FIELD = lambda: models.ForeignKey('xgds_core.Flight', related_name='%(app_label)s_%(class)s_related',
verbose_name=settings.XGDS_CORE_FLIGHT_MONIKER, blank=True, null=True)
# TODO if you are not using the default image set model you will have to override this in your classes
DEFAULT_IMAGE_SET_FIELD = lambda: models.ForeignKey('xgds_image.ImageSet', related_name='%(app_label)s_%(class)s_related',
verbose_name=settings.XGDS_IMAGE_IMAGE_SET_MONIKER, blank=True, null=True)
class DeepZoomImageDescriptor(deepzoom.DZIDescriptor):
def save(self, destination):
"""Save descriptor file."""
doc = xml.dom.minidom.Document()
image = doc.createElementNS(deepzoom.NS_DEEPZOOM, "Image")
image.setAttribute("xmlns", deepzoom.NS_DEEPZOOM)
image.setAttribute("TileSize", str(self.tile_size))
image.setAttribute("Overlap", str(self.tile_overlap))
image.setAttribute("Format", str(self.tile_format))
size = doc.createElementNS(deepzoom.NS_DEEPZOOM, "Size")
size.setAttribute("Width", str(self.width))
size.setAttribute("Height", str(self.height))
image.appendChild(size)
doc.appendChild(image)
descriptor = doc.toxml()
f = os.path.basename(destination)
fpath = os.path.dirname(destination)
full_file_name = os.path.join(fpath, f)
#################
# these were global, now defined locally:
couchStore = CouchDbStorage()
couchDatabase = couchStore.get_couchDb()
#################
couchDatabase[full_file_name] = {"category": "xgds_image", "basename": f, "name": fpath,
"creation_time": datetime.utcnow().isoformat()}
newDoc = couchDatabase[full_file_name]
couchDatabase.put_attachment(newDoc, descriptor, filename=f)
class DeepZoomImageCreator(deepzoom.ImageCreator):
def create(self, source, destination):
"""Creates Deep Zoom image from source file and saves it to destination."""
self.image = deepzoom.PILImage.open(source)
width, height = self.image.size
self.descriptor = DeepZoomImageDescriptor(width=width,
height=height,
tile_size=self.tile_size,
tile_overlap=self.tile_overlap,
tile_format=self.tile_format)
#destination = deepzoom._expand(destination) # path to dzi file: i.e. /vagrant/xgds_basalt/data/xgds_image/deepzoom_images/p6180021_deepzoom_107/p6180021_deepzoom_107.dzi
image_name = os.path.splitext(os.path.basename(destination))[0] # p6180021_deepzoom_107
dir_name = os.path.dirname(destination) # i.e. /vagrant/xgds_basalt/data/xgds_image/deepzoom_images/p6180021_deepzoom_107
image_files = os.path.join(dir_name, "%s_files" % image_name)
# Create tiles
levels = self.descriptor.num_levels # autocalculated from deepzoom DZIDescriptor -- set this in siteSettings
for level in range(levels):
level_dir = os.path.join(image_files, str(level))
level_image = self.get_image(level)
for (column, row) in self.tiles(level):
bounds = self.descriptor.get_tile_bounds(level, column, row)
tile = level_image.crop(bounds) # these are the tiles that I need to save to couchdb!
tile_format = self.descriptor.tile_format
# HERE save the tile to couch db
tile_name = "%s_%s.%s" % (column, row, tile_format)
tile_path = level_dir
full_tile_name = os.path.join(tile_path, tile_name)
# save the pil image with BytesIO as the file. and then get the string
from io import BytesIO
myIo = BytesIO()
tile.save(myIo, format='JPEG')
tileBytesIO = myIo.getvalue()
# basename and name are for convenience so we can look it up later.
#################
# these were global, now defined locally:
couchStore = CouchDbStorage()
couchDatabase = couchStore.get_couchDb()
#################
couchDatabase[full_tile_name] = {"category":"xgds_image",
"basename": tile_name,
"name": tile_path,
"creation_time": datetime.utcnow().isoformat()}
newDoc = couchDatabase[full_tile_name]
couchDatabase.put_attachment(newDoc, tileBytesIO, filename=tile_name)
self.descriptor.save(destination)
class DeepZoomTiles(DeepZoom):
def create_deepzoom_files(self):
"""
Creates deepzoom image from associated uploaded image.
Attempts to load `DEEPZOOM_PARAMS` and `DEEPZOOM_ROOT` from settings.
Substitutues default settings for any missing settings.
"""
#Try to load deep zoom parameters, otherwise assign default values.
try:
dz_params = settings.DEEPZOOM_PARAMS
except AttributeError:
if 'deepzoom.models' in settings.LOGGING['loggers']:
logger.exception("`DEEPZOOM_PARAMS` incorrectly defined!")
dz_params = self.DEFAULT_DEEPZOOM_PARAMS
if not isinstance(dz_params, dict):
raise AttributeError("`DEEPZOOM_PARAMS` must be a dictionary.")
_tile_size = self.get_dz_param('tile_size', dz_params)
_tile_overlap = self.get_dz_param('tile_size', dz_params)
_tile_format = self.get_dz_param('tile_size', dz_params)
_image_quality = self.get_dz_param('tile_size', dz_params)
_resize_filter = self.get_dz_param('tile_size', dz_params)
#Initialize deep zoom creator.
creator = DeepZoomImageCreator(tile_size=_tile_size,
tile_overlap=_tile_overlap,
tile_format=_tile_format,
image_quality=_image_quality,
resize_filter=_resize_filter)
#Try to load deep zoom root, otherwise assign default value.
try:
dz_deepzoom_root = settings.DEEPZOOM_ROOT
except AttributeError:
dz_deepzoom_root = self.DEFAULT_DEEPZOOM_ROOT
if not isinstance(dz_deepzoom_root, six.string_types):
raise AttributeError("`DEEPZOOM_ROOT` must be a string.")
dz_filename = self.slug + ".dzi"
dz_relative_filepath = os.path.join(dz_deepzoom_root, self.slug)
dz_couch_destination = os.path.join(dz_relative_filepath, dz_filename)
# getting the associated image
assoc_image_name = self.associated_image.split('/')[-1]
dataString = get_file_from_couch(settings.XGDS_IMAGE_DATA_SUBDIRECTORY, assoc_image_name)
dz_associated_image = StringIO(dataString)
#Process deep zoom image and save to file system.
try:
creator.create(dz_associated_image, dz_couch_destination) # source, destination
except OSError as err:
print("OS error({0}): {1}".format(err.errno, err.strerror))
except IOError as err:
print("I/O error({0}): {1}".format(err.errno, err.strerror))
except:
print("Unexpected deep zoom creation error:", sys.exc_info())
raise
return(dz_couch_destination, dz_relative_filepath)
class AbstractImageSet(models.Model, NoteMixin, SearchableModel, NoteLinksMixin, HasFlight, HasDownloadableFiles,
IsFlightChild, IsFlightData, BroadcastMixin):
"""
ImageSet is for supporting various resolution images from the same source image.
Set includes the raw image and any resized images.
Contains utility functions to fetch different sized images.
"""
name = models.CharField(max_length=128, default='', blank=True, null=True, help_text="Legible " + settings.XGDS_IMAGE_IMAGE_SET_MONIKER + " name", db_index=True)
shortName = models.CharField(max_length=32, blank=True, null=True, db_index=True, help_text="a short mnemonic code suitable to embed in a URL")
camera = 'set this to DEFAULT_CAMERA_FIELD() or similar in derived classes'
author = models.ForeignKey(User)
creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
deleted = models.BooleanField(default=False)
description = models.CharField(max_length=128, blank=True)
track_position = 'set this to DEFAULT_TRACK_POSITION_FIELD() or similar in derived classes'
exif_position = 'set this to DEFAULT_EXIF_POSITION_FIELD() or similar in derived classes'
user_position = 'set this to DEFAULT_USER_POSITION_FIELD() or similar in derived classes'
modification_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
acquisition_time = models.DateTimeField(editable=False, default=timezone.now, db_index=True)
acquisition_timezone = models.CharField(null=True, blank=False, max_length=128, default=settings.TIME_ZONE, db_index=True)
uploadAndSaveTime = models.FloatField(null=True, blank=True)
totalTimeSinceNotify = models.FloatField(null=True, blank=True)
#Optionally generate deep zoom from uploaded image if set to True.
create_deepzoom = models.BooleanField(default=settings.XGDS_IMAGE_DEFAULT_CREATE_DEEPZOOM,
help_text="Generate deep zoom?") # True if you need to create a deepzoom
#Link this image to generated deep zoom.
associated_deepzoom = models.ForeignKey(DeepZoomTiles,
null=True,
blank=True,
related_name="%(app_label)s_%(class)s",
editable=False,
on_delete=models.SET_NULL)
local_deepzoom_slug = models.CharField(null=True, blank=True, max_length=255)
rotation_degrees = models.PositiveSmallIntegerField(null=True, default=0)
flight = "TODO set to DEFAULT_FLIGHT_FIELD or similar"
@classmethod
def getSseType(cls):
return settings.XGDS_IMAGE_SSE_TYPE.lower()
@classmethod
def get_tree_json(cls, parent_class, parent_pk):
try:
found = LazyGetModelByName(settings.XGDS_IMAGE_IMAGE_SET_MODEL).get().objects.filter(flight__id=parent_pk)
result = None
if found.exists():
moniker = settings.XGDS_IMAGE_IMAGE_SET_MONIKER + 's'
flight = found[0].flight
result = [{"title": moniker,
"selected": False,
"tooltip": "%s for %s " % (moniker, flight.name),
"key": "%s_%s" % (flight.uuid, moniker),
"data": {"json": reverse('xgds_map_server_objectsJson',
kwargs={'object_name': 'XGDS_IMAGE_IMAGE_SET_MODEL',
'filter': 'flight__pk:' + str(flight.pk)}),
"sseUrl": "",
"type": 'MapLink',
}
}]
return result
except ObjectDoesNotExist:
return None
@classmethod
def get_info_json(cls, flight_pk):
found = LazyGetModelByName(settings.XGDS_IMAGE_IMAGE_SET_MODEL).get().objects.filter(flight__id=flight_pk)
result = None
if found.exists():
flight = LazyGetModelByName(settings.XGDS_CORE_FLIGHT_MODEL).get().objects.get(id=flight_pk)
result = {'name': settings.XGDS_IMAGE_IMAGE_SET_MONIKER + 's',
'count': found.count(),
'url': reverse('search_map_object_filter', kwargs={'modelName':settings.XGDS_IMAGE_IMAGE_SET_MONIKER,
'filter': 'flight__group:%d,flight__vehicle:%d' % (
flight.group.pk, flight.vehicle.pk)})
}
return result
@classmethod
def timesearchField(self):
return 'acquisition_time'
def create_deepzoom_slug(self):
"""
Returns a string instance for deepzoom slug.
"""
if self.name:
try:
filename = self.name.split('.')
except:
return ''
deepzoomSlug = filename[0] + "_deepzoom_" + str(self.id)
return deepzoomSlug.lower()
def create_deepzoom_image(self):
"""
Creates and processes deep zoom image files to storage.
Returns instance of newly created DeepZoom instance for associating
uploaded image to it.
"""
try:
deepzoomSlug = self.create_deepzoom_slug()
rawImageUrl = self.getRawImage().file.url
dz, created = DeepZoomTiles.objects.get_or_create(associated_image=rawImageUrl,
name=deepzoomSlug)
if created:
dz.slug = slugify(deepzoomSlug)
dz.save()
dz.create_deepzoom_files()
self.associated_deepzoom = dz
self.create_deepzoom = False
self.save()
except (TypeError, ValueError, AttributeError) as err:
print("Error: Incorrect deep zoom parameter(s) in settings.py: {0}".format(err))
raise
except:
print("Unexpected error creating deep zoom: {0}".format(sys.exc_info()[1:2]))
raise
# finally:
# Mark the thread inactive in the couchdb in case there's another
# thread waiting for this to be finished
# TODO: come up with a better multithreaded way to do this
# dbServer = couchdb.Server(settings.COUCHDB_URL)
# db = dbServer[settings.COUCHDB_FILESTORE_NAME]
# myFlag = db['create_deepzoom_thread']
# myFlag['active'] = False
# db['create_deepzoom_thread'] = myFlag
def create_vips_deepzoom_image(self):
"""
Creates and processes deep zoom image files to local storage.
"""
try:
deepzoomSlug = self.create_deepzoom_slug()
self.local_deepzoom_slug = deepzoomSlug
self.associated_deepzoom = None
rawImageFile = self.getRawImage().file
tempImageFile = tempfile.NamedTemporaryFile(delete=False)
tempImageFile.write(rawImageFile.read())
tempImageFile.close()
baseImage = Vips.Image.new_from_file(tempImageFile.name)
tileOutputDir = "%s%s%s" % (settings.DATA_ROOT, settings.DEEPZOOM_ROOT, deepzoomSlug)
os.mkdir(tileOutputDir)
baseImage.dzsave("%s/%s" % (tileOutputDir, deepzoomSlug),
overlap=settings.VIPS_PARAMS['overlap'],
tile_size=settings.VIPS_PARAMS['tile_size'])
os.remove(tempImageFile.name)
self.create_deepzoom = False
self.save()
except:
print("Unexpected error creating deep zoom: {0}".format(sys.exc_info()[1:2]))
raise
def delete_image_file(self, path_of_image_to_delete=None):
"""
Deletes uploaded image file from storage.
"""
try:
os.remove(path_of_image_to_delete)
except OSError:
logger.exception("Image file deletion failed!")
@classmethod
def cls_type(cls):
return settings.XGDS_IMAGE_IMAGE_MODEL_NAME
@property
def raw_image_url(self):
rawImage = self.getRawImage()
if rawImage:
return rawImage.file.url
return None
@property
def camera_name(self):
if self.camera:
return self.camera.name
return None
@property
def author_name(self):
return getUserName(self.author)
@property
def timezone(self):
return self.acquisition_timezone
@property
def originalImageResolutionString(self):
originalImage = self.getRawImage()
if originalImage:
width = originalImage.width
height = originalImage.height
if width and height:
megaPixels = (width * height)/(1000.0*1000.0)
return "%1d x %1d | %1.2f MP" % (width, height, megaPixels)
return 'n/a'
@property
def originalImageFileSizeMB(self):
originalImage = self.getRawImage()
if originalImage and originalImage.fileSizeBytes:
fileSizeMB = "%1.2f MB" % (originalImage.fileSizeBytes/(1024.0*1024.0))
return fileSizeMB
return 'n/a'
@property
def thumbnail_image_url(self):
thumbImage = self.getThumbnail()
if thumbImage:
return thumbImage.file.url
return ''
@property
def deepzoom_file_url(self):
# For backward compatibility we check the associated_deepzoom object first, but we are moving toward
# tiling images with VIPS at each node at storing locally (2nd if clause).
if self.associated_deepzoom:
deepzoomSlug = self.associated_deepzoom.slug
docDir = settings.DEEPZOOM_ROOT + deepzoomSlug
docFile = deepzoomSlug + '.dzi'
return reverse('get_db_attachment', kwargs={'docDir': docDir,'docName': docFile})
if self.local_deepzoom_slug:
return settings.DATA_URL + settings.DEEPZOOM_ROOT + self.local_deepzoom_slug + \
"/" + self.local_deepzoom_slug + ".dzi"
return None
def finish_initialization(self, request):
""" during construction, if you have extra data to fill in you can override this method"""
pass
class Meta:
abstract = True
def __unicode__(self):
return (u"ImageSet(%s, name='%s', shortName='%s')"
% (self.pk, self.name, self.shortName))
def getPosition(self):
if self.user_position:
return self.user_position
if self.exif_position:
return self.exif_position
if self.track_position:
return self.track_position
return None
@property
def head(self):
""" heading """
try:
position = self.getPosition()
if position:
if self.camera:
return position.heading + self.camera.heading_offset_degrees
return position.heading
except:
pass
return None
def getPositionDict(self):
""" override if you want to change the logic for how the positions are prioritized in JSON.
Right now exif_position is from the camera, track_position is from the track, and user_position stores any hand edits.
track provides lat lon and altitude, exif provides heading, and user trumps all.
"""
result = {}
result['alt'] = ""
result['head'] = ""
heading_offset_degrees = 0
if self.camera:
heading_offset_degrees = self.camera.heading_offset_degrees
if self.user_position:
result['lat'] = self.user_position.latitude
result['lon'] = self.user_position.longitude
if hasattr(self.user_position, 'altitude'):
result['alt'] = self.user_position.altitude
if hasattr(self.user_position, 'heading'):
result['head'] = self.user_position.heading + heading_offset_degrees
return result
result['position_id'] = ""
if self.track_position:
result['lat'] = self.track_position.latitude
result['lon'] = self.track_position.longitude
if self.track_position.altitude:
result['alt'] = self.track_position.altitude
if hasattr(self.track_position, 'heading'):
result['head'] = self.track_position.heading + heading_offset_degrees
if result['alt'] == '' and hasattr(self.exif_position, 'altitude'):
result['alt'] = self.track_position.altitude
return result
elif self.exif_position:
result['lat'] = self.exif_position.latitude
result['lon'] = self.exif_position.longitude
if hasattr(self.exif_position, 'altitude'):
result['alt'] = self.exif_position.altitude
if hasattr(self.exif_position, 'heading'):
result['head'] = self.exif_position.heading + heading_offset_degrees
else:
result['lat'] = ""
result['lon'] = ""
return result
def getRawImage(self):
rawImages = self.images.filter(raw=True)
if rawImages:
return rawImages[0]
else:
return None
def getSourceImage(self):
sourceImages = self.images.filter(imageType=ImageType.source.value)
if sourceImages:
return sourceImages[0]
else:
return None
def getDownloadableFiles(self):
"""
:return: list of file objects, each with their own `read()` functions
"""
sourceImage = self.getSourceImage()
if sourceImage:
return [sourceImage.file]
return [self.getRawImage().file]
def getLowerResImages(self):
return self.images.filter(raw=False, thumbnail=False)
def getThumbnail(self):
thumbImages = self.images.filter(thumbnail=True)
if thumbImages:
return thumbImages[0]
else:
return None
@classmethod
def getSearchableFields(self):
return ['name', 'description', 'author__first_name', 'author__last_name', 'flight__name']
@classmethod
def getSearchFormFields(cls):
return ['name',
'description',
'author',
'camera',
'flight__vehicle'
]
@classmethod
def getSearchFieldOrder(cls):
return ['flight__vehicle',
'author',
'name',
'description',
'camera',
'acquisition_timezone',
'min_acquisition_time',
'max_acquisition_time']
@classmethod
def buildTextAnnotationQuery(cls, search_keywords):
"""
Build a query that will search for an image set that is pointed to by a text annotation containing keyword
:param search_keywords: keywords to search for in the format keyword and keyword or keyword etc
:return: the found list of pks
"""
master_query = LazyGetModelByName(settings.XGDS_IMAGE_TEXT_ANNOTATION_MODEL).get().objects.all()
counter = 0
last_query = None
keyword_query = None
while counter < len(search_keywords):
if counter % 2 == 0:
last_query = Q(**{'content__icontains': search_keywords[counter]})
else:
if not keyword_query:
keyword_query = last_query
else:
join_type = search_keywords[counter]
if join_type == 'and':
keyword_query &= last_query
else:
keyword_query |= last_query
counter += 1
if not keyword_query:
keyword_query = last_query
result = master_query.filter(keyword_query)
image_ids = result.values_list('image_id', flat=True)
return list(image_ids)
class ImageSet(AbstractImageSet):
# set foreign key fields from parent model to point to correct types
camera = DEFAULT_CAMERA_FIELD()
track_position = DEFAULT_TRACK_POSITION_FIELD()
exif_position = DEFAULT_EXIF_POSITION_FIELD()
user_position = DEFAULT_USER_POSITION_FIELD()
notes = DEFAULT_NOTES_GENERIC_RELATION()
flight = DEFAULT_FLIGHT_FIELD()
couchStore = CouchDbStorage()
class AbstractSingleImage(models.Model):
"""
An abstract image which may not necessarily have a location on a map
"""
file = models.ImageField(upload_to=getNewImageFileName,
max_length=256, storage=couchStore)
creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
raw = models.BooleanField(default=True)
imageSet = 'set this to DEFAULT_IMAGE_SET_FIELD() or similar in derived models'
thumbnail = models.BooleanField(default=False)
width = models.IntegerField(blank=True, null=True)
height = models.IntegerField(blank=True, null=True)
fileSizeBytes = models.IntegerField(blank=True, null=True)
imageType = models.IntegerField(blank=True, null=True)
@property
def acquisition_time(self):
return self.imageSet.acquisition_time
# def toMapDict(self):
# """
# Return a reduced dictionary that will be turned to JSON
# """
# result = modelToDict(self)
# return result
def getAnnotations(self):
return ANNOTATION_MANAGER.filter(image__pk=self.pk)
class Meta:
abstract = True
ordering = ['-creation_time']
def __unicode__(self):
return self.file.name
@classmethod
def getSseType(cls):
return settings.XGDS_IMAGE_SSE_TYPE.lower()
class SingleImage(AbstractSingleImage):
""" This can be used for screenshots or non geolocated images
"""
# set foreign key fields from parent model to point to correct types
imageSet = models.ForeignKey(settings.XGDS_IMAGE_IMAGE_SET_MODEL, related_name='images',
verbose_name=settings.XGDS_IMAGE_IMAGE_SET_MONIKER, blank=True, null=True)
# @receiver(post_save, sender=SingleImage)
# def publishAfterSave(sender, instance, **kwargs):
# if settings.XGDS_CORE_REDIS:
# for channel in settings.XGDS_SSE_CHANNELS:
# publishRedisSSE(channel, settings.XGDS_IMAGE_SSE_TYPE.lower(), json.dumps({}))
DEFAULT_SINGLE_IMAGE_FIELD = lambda: models.ForeignKey(settings.XGDS_IMAGE_SINGLE_IMAGE_MODEL, related_name="image")
class AnnotationColor(models.Model):
name = models.CharField(max_length=16, db_index=True)
hex = models.CharField(max_length=16)
class AbstractAnnotation(models.Model):
left = models.IntegerField(null=False, blank=False)
top = models.IntegerField(null=False, blank=False)
strokeColor = models.ForeignKey(AnnotationColor, related_name='%(app_label)s_%(class)s_strokeColor', default=1)
strokeWidth = models.PositiveIntegerField(default=2)
angle = models.FloatField(default=0) # store shape rotation angle
scaleX = models.FloatField(default=1)
scaleY = models.FloatField(default=1)
originX = models.CharField(max_length=16, default="left")
originY = models.CharField(max_length=16, default="center")
fill = models.ForeignKey(AnnotationColor, related_name='%(app_label)s_%(class)s_fill', null=True, blank=True)
size = models.CharField(max_length=16, default="medium")
author = models.ForeignKey(User, related_name='%(app_label)s_%(class)s_related')
creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
image = 'set this to DEFAULT_IMAGE_SET_FIELD or similar in derived classes'
# WARNING -- you cannot include the below in this class or it will cause a circular dependency in migrations
#image = models.ForeignKey(settings.XGDS_IMAGE_IMAGE_SET_MODEL, related_name='%(app_label)s_%(class)s_image')
class Meta:
abstract = True
def getJsonType(self):
return 'annotation'
def toJson(self):
result = model_to_dict(self)
result['annotationType'] = self.getJsonType()
result['pk'] = self.pk
return result
class NormalAnnotation(AbstractAnnotation):
""" The default type of annotation, referring to an xgds_image.ImageSet """
image = DEFAULT_IMAGE_SET_FIELD()
class Meta:
abstract = True
class AbstractTextAnnotation(models.Model):
content = models.CharField(max_length=512, default='')
isBold = models.BooleanField(default=False)
isItalics = models.BooleanField(default=False)
width = models.PositiveIntegerField(default=1)
height = models.PositiveIntegerField(default=1)
fontSize = models.PositiveIntegerField(default=8)
def getJsonType(self):
return 'text'
class Meta:
abstract = True
class TextAnnotation(AbstractTextAnnotation, NormalAnnotation):
pass
class AbstractEllipseAnnotation(models.Model):
radiusX = models.IntegerField()
radiusY = models.IntegerField()
def getJsonType(self):
return 'ellipse'
class Meta:
abstract = True
class EllipseAnnotation(AbstractEllipseAnnotation, NormalAnnotation):
pass
class AbstractRectangleAnnotation(models.Model):
width = models.PositiveIntegerField()
height = models.PositiveIntegerField()
def getJsonType(self):
return 'rectangle'
class Meta:
abstract = True
class RectangleAnnotation(AbstractRectangleAnnotation, NormalAnnotation):
pass
class AbstractArrowAnnotation(models.Model):
points = models.TextField(default='[]')
def getJsonType(self):
return 'arrow'
class Meta:
abstract = True
class ArrowAnnotation(AbstractArrowAnnotation, NormalAnnotation):
pass
# NOT USED YET
# This will support the url to the saved annotated image download via url
# class AnnotatedScreenshot(models.Model):
# imageBinary = models.FileField(upload_to=settings.XGDS_IMAGE_ANNOTATED_IMAGES_SUBDIR)
# width = models.PositiveIntegerField(default=250)
# height = models.PositiveIntegerField(default=250)
# author = models.ForeignKey(User)
# creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True)
# image = 'set this to DEFAULT_SINGLE_IMAGE_FIELD or similar in derived classes'
# # WARNING -- the below will cause a circular dependency so don't do it. You have to have a derived class if you are planning to use this.
# #image = models.ForeignKey(settings.XGDS_IMAGE_IMAGE_SET_MODEL, related_name='%(app_label)s_%(class)s_image') # DEFAULT_SINGLE_IMAGE_FIELD # 'set this to DEFAULT_SINGLE_IMAGE_FIELD or similar in derived classes'
|
en
| 0.83404
|
#__BEGIN_LICENSE__ # Copyright (c) 2015, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. # All rights reserved. # # The xGDS platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. #__END_LICENSE__ # This global declaration does not work when the database name has to be changed # at run time (e.g. when running unit tests), so the global declaration has been # moved to a couple places where it is needed here and may need to be fixed # elsewhere if the change has other unintended and undetected consequences # couchStore = CouchDbStorage() # couchDatabase = couchStore.get_couchDb() Definitions of image type here. Currently this will include: SOURCE, for images which get converted. FULL, for images which are full size renderable THUMBNAIL, renderable thumbnail images Camera class # TODO change these in your model classes if you are not using defaults # TODO if you are not using the default image set model you will have to override this in your classes Save descriptor file. ################# # these were global, now defined locally: ################# Creates Deep Zoom image from source file and saves it to destination. #destination = deepzoom._expand(destination) # path to dzi file: i.e. /vagrant/xgds_basalt/data/xgds_image/deepzoom_images/p6180021_deepzoom_107/p6180021_deepzoom_107.dzi # p6180021_deepzoom_107 # i.e. /vagrant/xgds_basalt/data/xgds_image/deepzoom_images/p6180021_deepzoom_107 # Create tiles # autocalculated from deepzoom DZIDescriptor -- set this in siteSettings # these are the tiles that I need to save to couchdb! # HERE save the tile to couch db # save the pil image with BytesIO as the file. and then get the string # basename and name are for convenience so we can look it up later. ################# # these were global, now defined locally: ################# Creates deepzoom image from associated uploaded image. Attempts to load `DEEPZOOM_PARAMS` and `DEEPZOOM_ROOT` from settings. Substitutues default settings for any missing settings. #Try to load deep zoom parameters, otherwise assign default values. #Initialize deep zoom creator. #Try to load deep zoom root, otherwise assign default value. # getting the associated image #Process deep zoom image and save to file system. # source, destination ImageSet is for supporting various resolution images from the same source image. Set includes the raw image and any resized images. Contains utility functions to fetch different sized images. #Optionally generate deep zoom from uploaded image if set to True. # True if you need to create a deepzoom #Link this image to generated deep zoom. Returns a string instance for deepzoom slug. Creates and processes deep zoom image files to storage. Returns instance of newly created DeepZoom instance for associating uploaded image to it. # finally: # Mark the thread inactive in the couchdb in case there's another # thread waiting for this to be finished # TODO: come up with a better multithreaded way to do this # dbServer = couchdb.Server(settings.COUCHDB_URL) # db = dbServer[settings.COUCHDB_FILESTORE_NAME] # myFlag = db['create_deepzoom_thread'] # myFlag['active'] = False # db['create_deepzoom_thread'] = myFlag Creates and processes deep zoom image files to local storage. Deletes uploaded image file from storage. # For backward compatibility we check the associated_deepzoom object first, but we are moving toward # tiling images with VIPS at each node at storing locally (2nd if clause). during construction, if you have extra data to fill in you can override this method heading override if you want to change the logic for how the positions are prioritized in JSON. Right now exif_position is from the camera, track_position is from the track, and user_position stores any hand edits. track provides lat lon and altitude, exif provides heading, and user trumps all. :return: list of file objects, each with their own `read()` functions Build a query that will search for an image set that is pointed to by a text annotation containing keyword :param search_keywords: keywords to search for in the format keyword and keyword or keyword etc :return: the found list of pks # set foreign key fields from parent model to point to correct types An abstract image which may not necessarily have a location on a map # def toMapDict(self): # """ # Return a reduced dictionary that will be turned to JSON # """ # result = modelToDict(self) # return result This can be used for screenshots or non geolocated images # set foreign key fields from parent model to point to correct types # @receiver(post_save, sender=SingleImage) # def publishAfterSave(sender, instance, **kwargs): # if settings.XGDS_CORE_REDIS: # for channel in settings.XGDS_SSE_CHANNELS: # publishRedisSSE(channel, settings.XGDS_IMAGE_SSE_TYPE.lower(), json.dumps({})) # store shape rotation angle # WARNING -- you cannot include the below in this class or it will cause a circular dependency in migrations #image = models.ForeignKey(settings.XGDS_IMAGE_IMAGE_SET_MODEL, related_name='%(app_label)s_%(class)s_image') The default type of annotation, referring to an xgds_image.ImageSet # NOT USED YET # This will support the url to the saved annotated image download via url # class AnnotatedScreenshot(models.Model): # imageBinary = models.FileField(upload_to=settings.XGDS_IMAGE_ANNOTATED_IMAGES_SUBDIR) # width = models.PositiveIntegerField(default=250) # height = models.PositiveIntegerField(default=250) # author = models.ForeignKey(User) # creation_time = models.DateTimeField(blank=True, default=timezone.now, editable=False, db_index=True) # image = 'set this to DEFAULT_SINGLE_IMAGE_FIELD or similar in derived classes' # # WARNING -- the below will cause a circular dependency so don't do it. You have to have a derived class if you are planning to use this. # #image = models.ForeignKey(settings.XGDS_IMAGE_IMAGE_SET_MODEL, related_name='%(app_label)s_%(class)s_image') # DEFAULT_SINGLE_IMAGE_FIELD # 'set this to DEFAULT_SINGLE_IMAGE_FIELD or similar in derived classes'
| 1.354162
| 1
|
lndmanage/grpc_compiled/router_pb2_grpc.py
|
mmilata/lndmanage
| 0
|
6627426
|
<reponame>mmilata/lndmanage<filename>lndmanage/grpc_compiled/router_pb2_grpc.py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import lndmanage.grpc_compiled.router_pb2 as router__pb2
import lndmanage.grpc_compiled.rpc_pb2 as rpc__pb2
class RouterStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendPaymentV2 = channel.unary_stream(
'/routerrpc.Router/SendPaymentV2',
request_serializer=router__pb2.SendPaymentRequest.SerializeToString,
response_deserializer=rpc__pb2.Payment.FromString,
)
self.TrackPaymentV2 = channel.unary_stream(
'/routerrpc.Router/TrackPaymentV2',
request_serializer=router__pb2.TrackPaymentRequest.SerializeToString,
response_deserializer=rpc__pb2.Payment.FromString,
)
self.EstimateRouteFee = channel.unary_unary(
'/routerrpc.Router/EstimateRouteFee',
request_serializer=router__pb2.RouteFeeRequest.SerializeToString,
response_deserializer=router__pb2.RouteFeeResponse.FromString,
)
self.SendToRoute = channel.unary_unary(
'/routerrpc.Router/SendToRoute',
request_serializer=router__pb2.SendToRouteRequest.SerializeToString,
response_deserializer=router__pb2.SendToRouteResponse.FromString,
)
self.ResetMissionControl = channel.unary_unary(
'/routerrpc.Router/ResetMissionControl',
request_serializer=router__pb2.ResetMissionControlRequest.SerializeToString,
response_deserializer=router__pb2.ResetMissionControlResponse.FromString,
)
self.QueryMissionControl = channel.unary_unary(
'/routerrpc.Router/QueryMissionControl',
request_serializer=router__pb2.QueryMissionControlRequest.SerializeToString,
response_deserializer=router__pb2.QueryMissionControlResponse.FromString,
)
self.QueryProbability = channel.unary_unary(
'/routerrpc.Router/QueryProbability',
request_serializer=router__pb2.QueryProbabilityRequest.SerializeToString,
response_deserializer=router__pb2.QueryProbabilityResponse.FromString,
)
self.BuildRoute = channel.unary_unary(
'/routerrpc.Router/BuildRoute',
request_serializer=router__pb2.BuildRouteRequest.SerializeToString,
response_deserializer=router__pb2.BuildRouteResponse.FromString,
)
self.SubscribeHtlcEvents = channel.unary_stream(
'/routerrpc.Router/SubscribeHtlcEvents',
request_serializer=router__pb2.SubscribeHtlcEventsRequest.SerializeToString,
response_deserializer=router__pb2.HtlcEvent.FromString,
)
self.SendPayment = channel.unary_stream(
'/routerrpc.Router/SendPayment',
request_serializer=router__pb2.SendPaymentRequest.SerializeToString,
response_deserializer=router__pb2.PaymentStatus.FromString,
)
self.TrackPayment = channel.unary_stream(
'/routerrpc.Router/TrackPayment',
request_serializer=router__pb2.TrackPaymentRequest.SerializeToString,
response_deserializer=router__pb2.PaymentStatus.FromString,
)
class RouterServicer(object):
# missing associated documentation comment in .proto file
pass
def SendPaymentV2(self, request, context):
"""*
SendPaymentV2 attempts to route a payment described by the passed
PaymentRequest to the final destination. The call returns a stream of
payment updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TrackPaymentV2(self, request, context):
"""*
TrackPaymentV2 returns an update stream for the payment identified by the
payment hash.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EstimateRouteFee(self, request, context):
"""*
EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it
may cost to send an HTLC to the target end destination.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendToRoute(self, request, context):
"""*
SendToRoute attempts to make a payment via the specified route. This method
differs from SendPayment in that it allows users to specify a full route
manually. This can be used for things like rebalancing, and atomic swaps.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ResetMissionControl(self, request, context):
"""*
ResetMissionControl clears all mission control state and starts with a clean
slate.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def QueryMissionControl(self, request, context):
"""*
QueryMissionControl exposes the internal mission control state to callers.
It is a development feature.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def QueryProbability(self, request, context):
"""*
QueryProbability returns the current success probability estimate for a
given node pair and amount.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BuildRoute(self, request, context):
"""*
BuildRoute builds a fully specified route based on a list of hop public
keys. It retrieves the relevant channel policies from the graph in order to
calculate the correct fees and time locks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeHtlcEvents(self, request, context):
"""*
SubscribeHtlcEvents creates a uni-directional stream from the server to
the client which delivers a stream of htlc events.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPayment(self, request, context):
"""*
Deprecated, use SendPaymentV2. SendPayment attempts to route a payment
described by the passed PaymentRequest to the final destination. The call
returns a stream of payment status updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TrackPayment(self, request, context):
"""*
Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for
the payment identified by the payment hash.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendPaymentV2': grpc.unary_stream_rpc_method_handler(
servicer.SendPaymentV2,
request_deserializer=router__pb2.SendPaymentRequest.FromString,
response_serializer=rpc__pb2.Payment.SerializeToString,
),
'TrackPaymentV2': grpc.unary_stream_rpc_method_handler(
servicer.TrackPaymentV2,
request_deserializer=router__pb2.TrackPaymentRequest.FromString,
response_serializer=rpc__pb2.Payment.SerializeToString,
),
'EstimateRouteFee': grpc.unary_unary_rpc_method_handler(
servicer.EstimateRouteFee,
request_deserializer=router__pb2.RouteFeeRequest.FromString,
response_serializer=router__pb2.RouteFeeResponse.SerializeToString,
),
'SendToRoute': grpc.unary_unary_rpc_method_handler(
servicer.SendToRoute,
request_deserializer=router__pb2.SendToRouteRequest.FromString,
response_serializer=router__pb2.SendToRouteResponse.SerializeToString,
),
'ResetMissionControl': grpc.unary_unary_rpc_method_handler(
servicer.ResetMissionControl,
request_deserializer=router__pb2.ResetMissionControlRequest.FromString,
response_serializer=router__pb2.ResetMissionControlResponse.SerializeToString,
),
'QueryMissionControl': grpc.unary_unary_rpc_method_handler(
servicer.QueryMissionControl,
request_deserializer=router__pb2.QueryMissionControlRequest.FromString,
response_serializer=router__pb2.QueryMissionControlResponse.SerializeToString,
),
'QueryProbability': grpc.unary_unary_rpc_method_handler(
servicer.QueryProbability,
request_deserializer=router__pb2.QueryProbabilityRequest.FromString,
response_serializer=router__pb2.QueryProbabilityResponse.SerializeToString,
),
'BuildRoute': grpc.unary_unary_rpc_method_handler(
servicer.BuildRoute,
request_deserializer=router__pb2.BuildRouteRequest.FromString,
response_serializer=router__pb2.BuildRouteResponse.SerializeToString,
),
'SubscribeHtlcEvents': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeHtlcEvents,
request_deserializer=router__pb2.SubscribeHtlcEventsRequest.FromString,
response_serializer=router__pb2.HtlcEvent.SerializeToString,
),
'SendPayment': grpc.unary_stream_rpc_method_handler(
servicer.SendPayment,
request_deserializer=router__pb2.SendPaymentRequest.FromString,
response_serializer=router__pb2.PaymentStatus.SerializeToString,
),
'TrackPayment': grpc.unary_stream_rpc_method_handler(
servicer.TrackPayment,
request_deserializer=router__pb2.TrackPaymentRequest.FromString,
response_serializer=router__pb2.PaymentStatus.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'routerrpc.Router', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import lndmanage.grpc_compiled.router_pb2 as router__pb2
import lndmanage.grpc_compiled.rpc_pb2 as rpc__pb2
class RouterStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendPaymentV2 = channel.unary_stream(
'/routerrpc.Router/SendPaymentV2',
request_serializer=router__pb2.SendPaymentRequest.SerializeToString,
response_deserializer=rpc__pb2.Payment.FromString,
)
self.TrackPaymentV2 = channel.unary_stream(
'/routerrpc.Router/TrackPaymentV2',
request_serializer=router__pb2.TrackPaymentRequest.SerializeToString,
response_deserializer=rpc__pb2.Payment.FromString,
)
self.EstimateRouteFee = channel.unary_unary(
'/routerrpc.Router/EstimateRouteFee',
request_serializer=router__pb2.RouteFeeRequest.SerializeToString,
response_deserializer=router__pb2.RouteFeeResponse.FromString,
)
self.SendToRoute = channel.unary_unary(
'/routerrpc.Router/SendToRoute',
request_serializer=router__pb2.SendToRouteRequest.SerializeToString,
response_deserializer=router__pb2.SendToRouteResponse.FromString,
)
self.ResetMissionControl = channel.unary_unary(
'/routerrpc.Router/ResetMissionControl',
request_serializer=router__pb2.ResetMissionControlRequest.SerializeToString,
response_deserializer=router__pb2.ResetMissionControlResponse.FromString,
)
self.QueryMissionControl = channel.unary_unary(
'/routerrpc.Router/QueryMissionControl',
request_serializer=router__pb2.QueryMissionControlRequest.SerializeToString,
response_deserializer=router__pb2.QueryMissionControlResponse.FromString,
)
self.QueryProbability = channel.unary_unary(
'/routerrpc.Router/QueryProbability',
request_serializer=router__pb2.QueryProbabilityRequest.SerializeToString,
response_deserializer=router__pb2.QueryProbabilityResponse.FromString,
)
self.BuildRoute = channel.unary_unary(
'/routerrpc.Router/BuildRoute',
request_serializer=router__pb2.BuildRouteRequest.SerializeToString,
response_deserializer=router__pb2.BuildRouteResponse.FromString,
)
self.SubscribeHtlcEvents = channel.unary_stream(
'/routerrpc.Router/SubscribeHtlcEvents',
request_serializer=router__pb2.SubscribeHtlcEventsRequest.SerializeToString,
response_deserializer=router__pb2.HtlcEvent.FromString,
)
self.SendPayment = channel.unary_stream(
'/routerrpc.Router/SendPayment',
request_serializer=router__pb2.SendPaymentRequest.SerializeToString,
response_deserializer=router__pb2.PaymentStatus.FromString,
)
self.TrackPayment = channel.unary_stream(
'/routerrpc.Router/TrackPayment',
request_serializer=router__pb2.TrackPaymentRequest.SerializeToString,
response_deserializer=router__pb2.PaymentStatus.FromString,
)
class RouterServicer(object):
# missing associated documentation comment in .proto file
pass
def SendPaymentV2(self, request, context):
"""*
SendPaymentV2 attempts to route a payment described by the passed
PaymentRequest to the final destination. The call returns a stream of
payment updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TrackPaymentV2(self, request, context):
"""*
TrackPaymentV2 returns an update stream for the payment identified by the
payment hash.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EstimateRouteFee(self, request, context):
"""*
EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it
may cost to send an HTLC to the target end destination.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendToRoute(self, request, context):
"""*
SendToRoute attempts to make a payment via the specified route. This method
differs from SendPayment in that it allows users to specify a full route
manually. This can be used for things like rebalancing, and atomic swaps.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ResetMissionControl(self, request, context):
"""*
ResetMissionControl clears all mission control state and starts with a clean
slate.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def QueryMissionControl(self, request, context):
"""*
QueryMissionControl exposes the internal mission control state to callers.
It is a development feature.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def QueryProbability(self, request, context):
"""*
QueryProbability returns the current success probability estimate for a
given node pair and amount.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BuildRoute(self, request, context):
"""*
BuildRoute builds a fully specified route based on a list of hop public
keys. It retrieves the relevant channel policies from the graph in order to
calculate the correct fees and time locks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeHtlcEvents(self, request, context):
"""*
SubscribeHtlcEvents creates a uni-directional stream from the server to
the client which delivers a stream of htlc events.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPayment(self, request, context):
"""*
Deprecated, use SendPaymentV2. SendPayment attempts to route a payment
described by the passed PaymentRequest to the final destination. The call
returns a stream of payment status updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TrackPayment(self, request, context):
"""*
Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for
the payment identified by the payment hash.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendPaymentV2': grpc.unary_stream_rpc_method_handler(
servicer.SendPaymentV2,
request_deserializer=router__pb2.SendPaymentRequest.FromString,
response_serializer=rpc__pb2.Payment.SerializeToString,
),
'TrackPaymentV2': grpc.unary_stream_rpc_method_handler(
servicer.TrackPaymentV2,
request_deserializer=router__pb2.TrackPaymentRequest.FromString,
response_serializer=rpc__pb2.Payment.SerializeToString,
),
'EstimateRouteFee': grpc.unary_unary_rpc_method_handler(
servicer.EstimateRouteFee,
request_deserializer=router__pb2.RouteFeeRequest.FromString,
response_serializer=router__pb2.RouteFeeResponse.SerializeToString,
),
'SendToRoute': grpc.unary_unary_rpc_method_handler(
servicer.SendToRoute,
request_deserializer=router__pb2.SendToRouteRequest.FromString,
response_serializer=router__pb2.SendToRouteResponse.SerializeToString,
),
'ResetMissionControl': grpc.unary_unary_rpc_method_handler(
servicer.ResetMissionControl,
request_deserializer=router__pb2.ResetMissionControlRequest.FromString,
response_serializer=router__pb2.ResetMissionControlResponse.SerializeToString,
),
'QueryMissionControl': grpc.unary_unary_rpc_method_handler(
servicer.QueryMissionControl,
request_deserializer=router__pb2.QueryMissionControlRequest.FromString,
response_serializer=router__pb2.QueryMissionControlResponse.SerializeToString,
),
'QueryProbability': grpc.unary_unary_rpc_method_handler(
servicer.QueryProbability,
request_deserializer=router__pb2.QueryProbabilityRequest.FromString,
response_serializer=router__pb2.QueryProbabilityResponse.SerializeToString,
),
'BuildRoute': grpc.unary_unary_rpc_method_handler(
servicer.BuildRoute,
request_deserializer=router__pb2.BuildRouteRequest.FromString,
response_serializer=router__pb2.BuildRouteResponse.SerializeToString,
),
'SubscribeHtlcEvents': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeHtlcEvents,
request_deserializer=router__pb2.SubscribeHtlcEventsRequest.FromString,
response_serializer=router__pb2.HtlcEvent.SerializeToString,
),
'SendPayment': grpc.unary_stream_rpc_method_handler(
servicer.SendPayment,
request_deserializer=router__pb2.SendPaymentRequest.FromString,
response_serializer=router__pb2.PaymentStatus.SerializeToString,
),
'TrackPayment': grpc.unary_stream_rpc_method_handler(
servicer.TrackPayment,
request_deserializer=router__pb2.TrackPaymentRequest.FromString,
response_serializer=router__pb2.PaymentStatus.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'routerrpc.Router', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
en
| 0.833612
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! # missing associated documentation comment in .proto file Constructor. Args: channel: A grpc.Channel. # missing associated documentation comment in .proto file * SendPaymentV2 attempts to route a payment described by the passed PaymentRequest to the final destination. The call returns a stream of payment updates. * TrackPaymentV2 returns an update stream for the payment identified by the payment hash. * EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it may cost to send an HTLC to the target end destination. * SendToRoute attempts to make a payment via the specified route. This method differs from SendPayment in that it allows users to specify a full route manually. This can be used for things like rebalancing, and atomic swaps. * ResetMissionControl clears all mission control state and starts with a clean slate. * QueryMissionControl exposes the internal mission control state to callers. It is a development feature. * QueryProbability returns the current success probability estimate for a given node pair and amount. * BuildRoute builds a fully specified route based on a list of hop public keys. It retrieves the relevant channel policies from the graph in order to calculate the correct fees and time locks. * SubscribeHtlcEvents creates a uni-directional stream from the server to the client which delivers a stream of htlc events. * Deprecated, use SendPaymentV2. SendPayment attempts to route a payment described by the passed PaymentRequest to the final destination. The call returns a stream of payment status updates. * Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for the payment identified by the payment hash.
| 1.960116
| 2
|
tests/external_boto3/test_boto3_iam.py
|
newrelic/newrelic-python-agen
| 92
|
6627427
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
import boto3
import moto
from newrelic.api.background_task import background_task
from testing_support.fixtures import (validate_transaction_metrics,
validate_tt_segment_params, override_application_settings)
from testing_support.validators.validate_span_events import (
validate_span_events)
MOTO_VERSION = tuple(int(v) for v in moto.__version__.split('.')[:3])
# patch earlier versions of moto to support py37
if sys.version_info >= (3, 7) and MOTO_VERSION <= (1, 3, 1):
import re
moto.packages.responses.responses.re._pattern_type = re.Pattern
AWS_ACCESS_KEY_ID = '<KEY>'
AWS_SECRET_ACCESS_KEY = '<KEY>'
TEST_USER = 'python-agent-test-%s' % uuid.uuid4()
_iam_scoped_metrics = [
('External/iam.amazonaws.com/botocore/POST', 3),
]
_iam_rollup_metrics = [
('External/all', 3),
('External/allOther', 3),
('External/iam.amazonaws.com/all', 3),
('External/iam.amazonaws.com/botocore/POST', 3),
]
@override_application_settings({'distributed_tracing.enabled': True})
@validate_span_events(
exact_agents={'http.url': 'https://iam.amazonaws.com/'}, count=3)
@validate_span_events(expected_agents=('aws.requestId',), count=3)
@validate_span_events(exact_agents={'aws.operation': 'CreateUser'}, count=1)
@validate_span_events(exact_agents={'aws.operation': 'GetUser'}, count=1)
@validate_span_events(exact_agents={'aws.operation': 'DeleteUser'}, count=1)
@validate_tt_segment_params(present_params=('aws.requestId',))
@validate_transaction_metrics(
'test_boto3_iam:test_iam',
scoped_metrics=_iam_scoped_metrics,
rollup_metrics=_iam_rollup_metrics,
background_task=True)
@background_task()
@moto.mock_iam
def test_iam():
iam = boto3.client(
'iam',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
# Create user
resp = iam.create_user(UserName=TEST_USER)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
# Get the user
resp = iam.get_user(UserName=TEST_USER)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
assert resp['User']['UserName'] == TEST_USER
# Delete the user
resp = iam.delete_user(UserName=TEST_USER)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
import boto3
import moto
from newrelic.api.background_task import background_task
from testing_support.fixtures import (validate_transaction_metrics,
validate_tt_segment_params, override_application_settings)
from testing_support.validators.validate_span_events import (
validate_span_events)
MOTO_VERSION = tuple(int(v) for v in moto.__version__.split('.')[:3])
# patch earlier versions of moto to support py37
if sys.version_info >= (3, 7) and MOTO_VERSION <= (1, 3, 1):
import re
moto.packages.responses.responses.re._pattern_type = re.Pattern
AWS_ACCESS_KEY_ID = '<KEY>'
AWS_SECRET_ACCESS_KEY = '<KEY>'
TEST_USER = 'python-agent-test-%s' % uuid.uuid4()
_iam_scoped_metrics = [
('External/iam.amazonaws.com/botocore/POST', 3),
]
_iam_rollup_metrics = [
('External/all', 3),
('External/allOther', 3),
('External/iam.amazonaws.com/all', 3),
('External/iam.amazonaws.com/botocore/POST', 3),
]
@override_application_settings({'distributed_tracing.enabled': True})
@validate_span_events(
exact_agents={'http.url': 'https://iam.amazonaws.com/'}, count=3)
@validate_span_events(expected_agents=('aws.requestId',), count=3)
@validate_span_events(exact_agents={'aws.operation': 'CreateUser'}, count=1)
@validate_span_events(exact_agents={'aws.operation': 'GetUser'}, count=1)
@validate_span_events(exact_agents={'aws.operation': 'DeleteUser'}, count=1)
@validate_tt_segment_params(present_params=('aws.requestId',))
@validate_transaction_metrics(
'test_boto3_iam:test_iam',
scoped_metrics=_iam_scoped_metrics,
rollup_metrics=_iam_rollup_metrics,
background_task=True)
@background_task()
@moto.mock_iam
def test_iam():
iam = boto3.client(
'iam',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
# Create user
resp = iam.create_user(UserName=TEST_USER)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
# Get the user
resp = iam.get_user(UserName=TEST_USER)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
assert resp['User']['UserName'] == TEST_USER
# Delete the user
resp = iam.delete_user(UserName=TEST_USER)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
|
en
| 0.870379
|
# Copyright 2010 New Relic, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # patch earlier versions of moto to support py37 # Create user # Get the user # Delete the user
| 1.599962
| 2
|
opennem/core/compat/loader.py
|
paulculmsee/opennem
| 22
|
6627428
|
<filename>opennem/core/compat/loader.py
"""
OpenNEM v2 schema loader
"""
import logging
from datetime import timedelta
from typing import Dict, List, Optional
from opennem.api.export.map import StatType
from opennem.core.compat.schema import OpennemDataSetV2, OpennemDataV2
from opennem.diff.versions import get_v2_url
from opennem.schema.flows import FlowType
from opennem.schema.network import NetworkNEM
from opennem.utils.dates import get_last_complete_day_for_network
from opennem.utils.http import http
logger = logging.getLogger("opennem.compat.loader")
def load_statset_v2(statset: List[Dict]) -> OpennemDataSetV2:
return OpennemDataSetV2(data=[OpennemDataV2.parse_obj(i) for i in statset])
def statset_patch(ds: OpennemDataSetV2, bucket_size: str) -> OpennemDataSetV2:
"""Patch fix for today()"""
today = get_last_complete_day_for_network(NetworkNEM)
ds_out = ds.copy()
ds_out.data = []
for ft in [FlowType.imports, FlowType.exports]:
for st in [StatType.energy, StatType.marketvalue, StatType.emissions]:
search_id_match = "{}.{}".format(ft.value, st.value)
dsid = ds.search_id(search_id_match)
if not dsid:
continue
if bucket_size == "daily":
if dsid.history.last != today:
day_gap = today - dsid.history.last
dsid.history.start = str(dsid.history.start + timedelta(days=day_gap.days))
dsid.history.last = str(today - timedelta(days=1))
ds_out.data.append(dsid)
return ds_out
def get_dataset(
stat_type: StatType,
network_region: str,
bucket_size: str = "daily",
year: Optional[int] = None,
testing: bool = True,
) -> OpennemDataSetV2:
req_url = get_v2_url(stat_type, network_region, bucket_size, year, testing=testing)
r = http.get(req_url)
logger.debug("Loading: {}".format(req_url))
if not r.ok:
raise Exception("Could not parse URL: {}".format(req_url))
json_data = r.json()
statset = load_statset_v2(json_data)
statset = statset_patch(statset, bucket_size=bucket_size)
return statset
if __name__ == "__main__":
ds = get_dataset(StatType.energy, "NSW1", "daily")
with open("flow_test.json", "w") as fh:
fh.write(ds.json())
|
<filename>opennem/core/compat/loader.py
"""
OpenNEM v2 schema loader
"""
import logging
from datetime import timedelta
from typing import Dict, List, Optional
from opennem.api.export.map import StatType
from opennem.core.compat.schema import OpennemDataSetV2, OpennemDataV2
from opennem.diff.versions import get_v2_url
from opennem.schema.flows import FlowType
from opennem.schema.network import NetworkNEM
from opennem.utils.dates import get_last_complete_day_for_network
from opennem.utils.http import http
logger = logging.getLogger("opennem.compat.loader")
def load_statset_v2(statset: List[Dict]) -> OpennemDataSetV2:
return OpennemDataSetV2(data=[OpennemDataV2.parse_obj(i) for i in statset])
def statset_patch(ds: OpennemDataSetV2, bucket_size: str) -> OpennemDataSetV2:
"""Patch fix for today()"""
today = get_last_complete_day_for_network(NetworkNEM)
ds_out = ds.copy()
ds_out.data = []
for ft in [FlowType.imports, FlowType.exports]:
for st in [StatType.energy, StatType.marketvalue, StatType.emissions]:
search_id_match = "{}.{}".format(ft.value, st.value)
dsid = ds.search_id(search_id_match)
if not dsid:
continue
if bucket_size == "daily":
if dsid.history.last != today:
day_gap = today - dsid.history.last
dsid.history.start = str(dsid.history.start + timedelta(days=day_gap.days))
dsid.history.last = str(today - timedelta(days=1))
ds_out.data.append(dsid)
return ds_out
def get_dataset(
stat_type: StatType,
network_region: str,
bucket_size: str = "daily",
year: Optional[int] = None,
testing: bool = True,
) -> OpennemDataSetV2:
req_url = get_v2_url(stat_type, network_region, bucket_size, year, testing=testing)
r = http.get(req_url)
logger.debug("Loading: {}".format(req_url))
if not r.ok:
raise Exception("Could not parse URL: {}".format(req_url))
json_data = r.json()
statset = load_statset_v2(json_data)
statset = statset_patch(statset, bucket_size=bucket_size)
return statset
if __name__ == "__main__":
ds = get_dataset(StatType.energy, "NSW1", "daily")
with open("flow_test.json", "w") as fh:
fh.write(ds.json())
|
en
| 0.397879
|
OpenNEM v2 schema loader Patch fix for today()
| 2.240226
| 2
|
register_to_template_flirt.py
|
gsanroma/nimg-scripts
| 2
|
6627429
|
<reponame>gsanroma/nimg-scripts
import argparse
import os
import sys
parser = argparse.ArgumentParser(description='Registers images to template. Can use initial transformation.')
parser.add_argument("--mov_dir", type=str, nargs=1, required=True, help='directory input images')
parser.add_argument("--mov_suffix", type=str, nargs=1, required=True, help='suffix input images')
parser.add_argument("--template_file", type=str, nargs=1, required=True, help='template image')
parser.add_argument("--out_dir", type=str, nargs=1, required=True, help='output directory for transformation files')
parser.add_argument("--out_suffix", type=str, nargs=1, help="(optional) Suffix to be added to the moving image (if none, no warping is done)")
parser.add_argument("--xfm_dir", type=str, nargs=1, help="(optional) directory with transforms")
parser.add_argument("--xfm_suffix", type=str, nargs=1, help="(optional) Suffix of transforms (no optimization)")
parser.add_argument("--num_procs", type=int, nargs=1, default=[8], help='number of concurrent processes ')
args = parser.parse_args()
# args = parser.parse_args(''
# '--mov_dir /home/sanromag/DATA/WMH/test_flirt2mni '
# '--mov_suffix _t95.nii.gz '
# '--template_file /home/sanromag/DATA/WMH/template/MNI152_T1_1mm_brain.nii.gz '
# '--out_dir /home/sanromag/DATA/WMH/test_flirt2mni '
# '--out_suffix _segmni.nii.gz '
# '--xfm_dir /home/sanromag/DATA/WMH/test_flirt2mni '
# '--xfm_suffix _flirt.mat '
# ''.split())
from scheduler import Launcher
launcher = Launcher(args.num_procs[0])
#
# Initial checks
#
files_list = os.listdir(args.mov_dir[0])
img_list = [f for f in files_list if f.endswith(args.mov_suffix[0])]
assert img_list, "List of input images is empty"
assert os.path.exists(args.template_file[0]), "Template file not found"
# create output directory
if not os.path.exists(args.out_dir[0]):
os.makedirs(args.out_dir[0])
#
# Main loop
#
flirt_path = 'flirt'
name_list = []
for img_file in img_list:
img_path = os.path.join(args.mov_dir[0], img_file)
cmdline = [flirt_path]
cmdline.extend(['-in', img_path])
cmdline.extend(['-ref', args.template_file[0]])
if args.out_suffix is not None:
cmdline.extend(['-out', os.path.join(args.out_dir[0], img_file.split(os.extsep, 1)[0] + args.out_suffix[0])])
cmdline.extend(['-interp', 'trilinear'])
if args.xfm_suffix is None:
cmdline.extend(['-omat', os.path.join(args.out_dir[0], img_file.split(os.extsep, 1)[0] + '.mat')])
cmdline.extend(['-bins', '256'])
cmdline.extend(['-cost', 'mutualinfo'])
cmdline.extend(['-searchrx', '-90.0', '90.0'])
cmdline.extend(['-searchry', '-90.0', '90.0'])
cmdline.extend(['-searchrz', '-90.0', '90.0'])
# cmdline.extend(['-2D'])
cmdline.extend(['-dof', '12'])
else:
cmdline.extend(['-applyxfm'])
cmdline.extend(['-init', os.path.join(args.xfm_dir[0], img_file.split(args.mov_suffix[0])[0] + args.xfm_suffix[0])])
#
# launch
print "Launching registration of file {}".format(img_file)
name_list.append(img_file.split(os.extsep, 1)[0])
launcher.add(name_list[-1], ' '.join(cmdline), args.out_dir[0])
launcher.run(name_list[-1])
print "Waiting for registration jobs to finish..."
launcher.wait()
print "Registration finished."
|
import argparse
import os
import sys
parser = argparse.ArgumentParser(description='Registers images to template. Can use initial transformation.')
parser.add_argument("--mov_dir", type=str, nargs=1, required=True, help='directory input images')
parser.add_argument("--mov_suffix", type=str, nargs=1, required=True, help='suffix input images')
parser.add_argument("--template_file", type=str, nargs=1, required=True, help='template image')
parser.add_argument("--out_dir", type=str, nargs=1, required=True, help='output directory for transformation files')
parser.add_argument("--out_suffix", type=str, nargs=1, help="(optional) Suffix to be added to the moving image (if none, no warping is done)")
parser.add_argument("--xfm_dir", type=str, nargs=1, help="(optional) directory with transforms")
parser.add_argument("--xfm_suffix", type=str, nargs=1, help="(optional) Suffix of transforms (no optimization)")
parser.add_argument("--num_procs", type=int, nargs=1, default=[8], help='number of concurrent processes ')
args = parser.parse_args()
# args = parser.parse_args(''
# '--mov_dir /home/sanromag/DATA/WMH/test_flirt2mni '
# '--mov_suffix _t95.nii.gz '
# '--template_file /home/sanromag/DATA/WMH/template/MNI152_T1_1mm_brain.nii.gz '
# '--out_dir /home/sanromag/DATA/WMH/test_flirt2mni '
# '--out_suffix _segmni.nii.gz '
# '--xfm_dir /home/sanromag/DATA/WMH/test_flirt2mni '
# '--xfm_suffix _flirt.mat '
# ''.split())
from scheduler import Launcher
launcher = Launcher(args.num_procs[0])
#
# Initial checks
#
files_list = os.listdir(args.mov_dir[0])
img_list = [f for f in files_list if f.endswith(args.mov_suffix[0])]
assert img_list, "List of input images is empty"
assert os.path.exists(args.template_file[0]), "Template file not found"
# create output directory
if not os.path.exists(args.out_dir[0]):
os.makedirs(args.out_dir[0])
#
# Main loop
#
flirt_path = 'flirt'
name_list = []
for img_file in img_list:
img_path = os.path.join(args.mov_dir[0], img_file)
cmdline = [flirt_path]
cmdline.extend(['-in', img_path])
cmdline.extend(['-ref', args.template_file[0]])
if args.out_suffix is not None:
cmdline.extend(['-out', os.path.join(args.out_dir[0], img_file.split(os.extsep, 1)[0] + args.out_suffix[0])])
cmdline.extend(['-interp', 'trilinear'])
if args.xfm_suffix is None:
cmdline.extend(['-omat', os.path.join(args.out_dir[0], img_file.split(os.extsep, 1)[0] + '.mat')])
cmdline.extend(['-bins', '256'])
cmdline.extend(['-cost', 'mutualinfo'])
cmdline.extend(['-searchrx', '-90.0', '90.0'])
cmdline.extend(['-searchry', '-90.0', '90.0'])
cmdline.extend(['-searchrz', '-90.0', '90.0'])
# cmdline.extend(['-2D'])
cmdline.extend(['-dof', '12'])
else:
cmdline.extend(['-applyxfm'])
cmdline.extend(['-init', os.path.join(args.xfm_dir[0], img_file.split(args.mov_suffix[0])[0] + args.xfm_suffix[0])])
#
# launch
print "Launching registration of file {}".format(img_file)
name_list.append(img_file.split(os.extsep, 1)[0])
launcher.add(name_list[-1], ' '.join(cmdline), args.out_dir[0])
launcher.run(name_list[-1])
print "Waiting for registration jobs to finish..."
launcher.wait()
print "Registration finished."
|
en
| 0.070367
|
# args = parser.parse_args('' # '--mov_dir /home/sanromag/DATA/WMH/test_flirt2mni ' # '--mov_suffix _t95.nii.gz ' # '--template_file /home/sanromag/DATA/WMH/template/MNI152_T1_1mm_brain.nii.gz ' # '--out_dir /home/sanromag/DATA/WMH/test_flirt2mni ' # '--out_suffix _segmni.nii.gz ' # '--xfm_dir /home/sanromag/DATA/WMH/test_flirt2mni ' # '--xfm_suffix _flirt.mat ' # ''.split()) # # Initial checks # # create output directory # # Main loop # # cmdline.extend(['-2D']) # # launch
| 2.558267
| 3
|
pySPACE/tests/__init__.py
|
pyspace/pyspace
| 32
|
6627430
|
<gh_stars>10-100
""" Collection of pySPACE system and unit tests
.. note::
The section with :mod:`~pySPACE.tests` is not really complete,
but there is a script to run all unittests automatically.
"""
import logging
# create logger for test output
logger = logging.getLogger('TestLogger')
logger.setLevel(logging.DEBUG)
loggingFileHandler = logging.FileHandler("unittest_log.txt", mode='w')
loggingFileHandler.setLevel(logging.DEBUG)
loggingStreamHandler = logging.StreamHandler()
loggingStreamHandler.setLevel(logging.CRITICAL)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s")
loggingFileHandler.setFormatter(formatter)
loggingStreamHandler.setFormatter(formatter)
logger.addHandler(loggingFileHandler)
logger.addHandler(loggingStreamHandler)
|
""" Collection of pySPACE system and unit tests
.. note::
The section with :mod:`~pySPACE.tests` is not really complete,
but there is a script to run all unittests automatically.
"""
import logging
# create logger for test output
logger = logging.getLogger('TestLogger')
logger.setLevel(logging.DEBUG)
loggingFileHandler = logging.FileHandler("unittest_log.txt", mode='w')
loggingFileHandler.setLevel(logging.DEBUG)
loggingStreamHandler = logging.StreamHandler()
loggingStreamHandler.setLevel(logging.CRITICAL)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s")
loggingFileHandler.setFormatter(formatter)
loggingStreamHandler.setFormatter(formatter)
logger.addHandler(loggingFileHandler)
logger.addHandler(loggingStreamHandler)
|
en
| 0.823232
|
Collection of pySPACE system and unit tests .. note:: The section with :mod:`~pySPACE.tests` is not really complete, but there is a script to run all unittests automatically. # create logger for test output
| 2.502155
| 3
|
src/external/elf-loader/test/testjunit.py
|
cclauss/shadow
| 1
|
6627431
|
<gh_stars>1-10
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
import sys
import os
import subprocess
import junit_xml_output
test_cases = []
for arg in sys.argv[1:]:
if '12' in arg:
val = subprocess.Popen(['rm' ,'-f', 'libl.so'])
val.wait ()
cmd = ['./'+arg+'-ldso']
test_env = os.environ.copy()
test_env['LD_LIBRARY_PATH'] = '.:../'
val = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = test_env)
(stdout, stderr) = val.communicate()
f = open ('output/' + arg, 'w')
f.write (stdout)
f.close ()
if val.returncode != 0:
test_cases.append(junit_xml_output.TestCase(arg, '(crash)\n '+ stderr,
"failure"))
print 'CRASH ' + arg + ' -- LD_LIBRARY_PATH=.:../ ./' + arg + '-ldso'
else:
cmd = ['diff', '-q',
'output/' + arg,
'output/' + arg + '.ref']
val = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
(stdout, stderr) = val.communicate()
#print stdout
#print stderr
if val.returncode != 0:
test_cases.append(junit_xml_output.TestCase(arg, stderr,
"failure"))
print 'FAIL ' + arg + ' -- LD_LIBRARY_PATH=.:../ ./' + arg + '-ldso'
else:
test_cases.append(junit_xml_output.TestCase(arg, stdout,
"success"))
print 'PASS ' + arg
junit_xml = junit_xml_output.JunitXml("elf-loader-tests", test_cases)
f = open ('elf-loader-tests.xml', 'w')
f.write (junit_xml.dump())
f.close ()
|
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
import sys
import os
import subprocess
import junit_xml_output
test_cases = []
for arg in sys.argv[1:]:
if '12' in arg:
val = subprocess.Popen(['rm' ,'-f', 'libl.so'])
val.wait ()
cmd = ['./'+arg+'-ldso']
test_env = os.environ.copy()
test_env['LD_LIBRARY_PATH'] = '.:../'
val = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = test_env)
(stdout, stderr) = val.communicate()
f = open ('output/' + arg, 'w')
f.write (stdout)
f.close ()
if val.returncode != 0:
test_cases.append(junit_xml_output.TestCase(arg, '(crash)\n '+ stderr,
"failure"))
print 'CRASH ' + arg + ' -- LD_LIBRARY_PATH=.:../ ./' + arg + '-ldso'
else:
cmd = ['diff', '-q',
'output/' + arg,
'output/' + arg + '.ref']
val = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
(stdout, stderr) = val.communicate()
#print stdout
#print stderr
if val.returncode != 0:
test_cases.append(junit_xml_output.TestCase(arg, stderr,
"failure"))
print 'FAIL ' + arg + ' -- LD_LIBRARY_PATH=.:../ ./' + arg + '-ldso'
else:
test_cases.append(junit_xml_output.TestCase(arg, stdout,
"success"))
print 'PASS ' + arg
junit_xml = junit_xml_output.JunitXml("elf-loader-tests", test_cases)
f = open ('elf-loader-tests.xml', 'w')
f.write (junit_xml.dump())
f.close ()
|
en
| 0.336369
|
#! /usr/bin/env python ## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*- #print stdout #print stderr
| 2.083971
| 2
|
eval.py
|
tcl9876/Denoising_Student
| 4
|
6627432
|
<filename>eval.py
from imageio import imwrite
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tqdm import tqdm
import os
from utils import show_images, slerp, make_batch_of_images, float_to_image, load_models_from_gdrive
from models import Onestep_Model
#note: these functions do NOT support multi-gpu or TPU. Will either run on one GPU if available, otherwise CPU
def interpolation_experiment(model, device, n_images=11, savepath=None):
tset = [i/(n_images-1) for i in range(n_images)]
assert min(tset) == 0. and max(tset) == 1.
z1, z2 = tf.split(tf.random.normal([2, model.spatialres, model.spatialres, 3]), 2)
z_in = tf.concat([slerp(z1, z2, tset[i]) for i in range(n_images)], axis=0)
with tf.device(device):
images = model(z_in)
show_images(images, dims=[1, n_images], savepath=savepath)
def getmodelimages(model, device, bs):
z = tf.random.normal([bs, model.spatialres, model.spatialres, 3])
with tf.device(device):
images = model(z)
return images
def getmodel(data_to_use, denoising_student_dir):
model = Onestep_Model(data_to_use, None)
model(tf.random.normal([1, model.spatialres, model.spatialres, 3]))
model.load_weights(os.path.join(denoising_student_dir, '%s_ema_model.h5' % data_to_use))
return model
def write_images_to_folder(model, device, write_dir, batch_size=20, n_images=20):
'''
this function can be used to write 50k images for metrics (IS and FID)
metrics are each calculated against 50k training images from cifar10 and (FID only) celeba.
both IS and FID use the official code provided by [1] and [2] respectively.
celeba images are prepared in same manner as [3]
[1] https://arxiv.org/abs/1606.03498
[2] https://arxiv.org/abs/1706.08500
[3] https://arxiv.org/abs/2010.02502
'''
if write_dir is not None:
if not os.path.isdir(write_dir):
os.mkdir(write_dir)
n_batches = n_images//batch_size
remaining_samples = n_images - batch_size*n_batches
n_batches += 1
n_written = 0
for i in tqdm(range(n_batches)):
if i == n_batches - 1:
bs = remaining_samples
else:
bs = batch_size
if bs==0:
continue
images = getmodelimages(model, device, bs)
images = float_to_image(images)
if write_dir is not None:
for img in images:
imgpath = os.path.join(write_dir, 'images{}.png'.format(str(n_written)))
imwrite(imgpath, img)
n_written += 1
return n_written == n_images
def get_uncurated_samples(data_to_use, model_dir, savedir, device, n_images):
model = getmodel(data_to_use, model_dir)
images = getmodelimages(model, device, n_images)
savepath = os.path.join(savedir, '{}_figure_{}.png'.format(data_to_use, len(os.listdir(savedir))))
dims=[np.ceil(np.sqrt(n_images)), np.ceil(np.sqrt(n_images))]
scale = min(model.spatialres, 192)//32
if os.path.isfile(savepath):
print("There is a file here already. it will be overwritten.")
show_images(images, scale=scale, savepath=savepath, dims=dims)
return True
def main(action, savedir, data_to_use, n_images, model_dir, batch_size):
if model_dir == "download_from_web":
model_dir = './denoising_student_models'
if not os.path.exists(model_dir):
load_models_from_gdrive("./", False)
device = '/GPU:0' if tf.config.list_physical_devices('GPU') else '/CPU:0'
print("Running on device {}".format(device))
print("TPU and Multi-GPU setups are not supported in evaluation.")
if not os.path.isdir(savedir):
os.mkdir(savedir)
if action == 'figures':
status = get_uncurated_samples(data_to_use, model_dir, savedir, device, n_images)
elif action == 'tofolder':
model = getmodel(data_to_use, model_dir)
status = write_images_to_folder(model, device, savedir, batch_size, n_images)
else:
raise NotImplementedError("action must be 'figures' or 'tofolder'. ")
if status:
print("Finished execution properly.")
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("action", type=str, default='figures', help="what action to do. should be either 'figures', 'tofolder'. 'figures' option will create a square figure of images. 'tofolder' option will write each image to a file")
parser.add_argument("savedir", type=str, help="the directory to save outputs to.")
parser.add_argument("data_to_use", type=str, help="Which dataset's images to write. should be one of ['cifar10', 'celeba', 'lsun_bedroom', 'lsun_church'] ")
parser.add_argument("--n_images", type=int, default=20, help="how many images to write.")
parser.add_argument("--model_dir", type=str, default="download_from_web", help="the directory where the denoising_student_models are located. by default it will get them from the web")
parser.add_argument("--batch_size", type=int, default=20, help="when using tofolder, batch size to run examples on.")
args = parser.parse_args()
main(args.action, args.savedir, args.data_to_use, args.n_images, args.model_dir, args.batch_size)
|
<filename>eval.py
from imageio import imwrite
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tqdm import tqdm
import os
from utils import show_images, slerp, make_batch_of_images, float_to_image, load_models_from_gdrive
from models import Onestep_Model
#note: these functions do NOT support multi-gpu or TPU. Will either run on one GPU if available, otherwise CPU
def interpolation_experiment(model, device, n_images=11, savepath=None):
tset = [i/(n_images-1) for i in range(n_images)]
assert min(tset) == 0. and max(tset) == 1.
z1, z2 = tf.split(tf.random.normal([2, model.spatialres, model.spatialres, 3]), 2)
z_in = tf.concat([slerp(z1, z2, tset[i]) for i in range(n_images)], axis=0)
with tf.device(device):
images = model(z_in)
show_images(images, dims=[1, n_images], savepath=savepath)
def getmodelimages(model, device, bs):
z = tf.random.normal([bs, model.spatialres, model.spatialres, 3])
with tf.device(device):
images = model(z)
return images
def getmodel(data_to_use, denoising_student_dir):
model = Onestep_Model(data_to_use, None)
model(tf.random.normal([1, model.spatialres, model.spatialres, 3]))
model.load_weights(os.path.join(denoising_student_dir, '%s_ema_model.h5' % data_to_use))
return model
def write_images_to_folder(model, device, write_dir, batch_size=20, n_images=20):
'''
this function can be used to write 50k images for metrics (IS and FID)
metrics are each calculated against 50k training images from cifar10 and (FID only) celeba.
both IS and FID use the official code provided by [1] and [2] respectively.
celeba images are prepared in same manner as [3]
[1] https://arxiv.org/abs/1606.03498
[2] https://arxiv.org/abs/1706.08500
[3] https://arxiv.org/abs/2010.02502
'''
if write_dir is not None:
if not os.path.isdir(write_dir):
os.mkdir(write_dir)
n_batches = n_images//batch_size
remaining_samples = n_images - batch_size*n_batches
n_batches += 1
n_written = 0
for i in tqdm(range(n_batches)):
if i == n_batches - 1:
bs = remaining_samples
else:
bs = batch_size
if bs==0:
continue
images = getmodelimages(model, device, bs)
images = float_to_image(images)
if write_dir is not None:
for img in images:
imgpath = os.path.join(write_dir, 'images{}.png'.format(str(n_written)))
imwrite(imgpath, img)
n_written += 1
return n_written == n_images
def get_uncurated_samples(data_to_use, model_dir, savedir, device, n_images):
model = getmodel(data_to_use, model_dir)
images = getmodelimages(model, device, n_images)
savepath = os.path.join(savedir, '{}_figure_{}.png'.format(data_to_use, len(os.listdir(savedir))))
dims=[np.ceil(np.sqrt(n_images)), np.ceil(np.sqrt(n_images))]
scale = min(model.spatialres, 192)//32
if os.path.isfile(savepath):
print("There is a file here already. it will be overwritten.")
show_images(images, scale=scale, savepath=savepath, dims=dims)
return True
def main(action, savedir, data_to_use, n_images, model_dir, batch_size):
if model_dir == "download_from_web":
model_dir = './denoising_student_models'
if not os.path.exists(model_dir):
load_models_from_gdrive("./", False)
device = '/GPU:0' if tf.config.list_physical_devices('GPU') else '/CPU:0'
print("Running on device {}".format(device))
print("TPU and Multi-GPU setups are not supported in evaluation.")
if not os.path.isdir(savedir):
os.mkdir(savedir)
if action == 'figures':
status = get_uncurated_samples(data_to_use, model_dir, savedir, device, n_images)
elif action == 'tofolder':
model = getmodel(data_to_use, model_dir)
status = write_images_to_folder(model, device, savedir, batch_size, n_images)
else:
raise NotImplementedError("action must be 'figures' or 'tofolder'. ")
if status:
print("Finished execution properly.")
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("action", type=str, default='figures', help="what action to do. should be either 'figures', 'tofolder'. 'figures' option will create a square figure of images. 'tofolder' option will write each image to a file")
parser.add_argument("savedir", type=str, help="the directory to save outputs to.")
parser.add_argument("data_to_use", type=str, help="Which dataset's images to write. should be one of ['cifar10', 'celeba', 'lsun_bedroom', 'lsun_church'] ")
parser.add_argument("--n_images", type=int, default=20, help="how many images to write.")
parser.add_argument("--model_dir", type=str, default="download_from_web", help="the directory where the denoising_student_models are located. by default it will get them from the web")
parser.add_argument("--batch_size", type=int, default=20, help="when using tofolder, batch size to run examples on.")
args = parser.parse_args()
main(args.action, args.savedir, args.data_to_use, args.n_images, args.model_dir, args.batch_size)
|
en
| 0.843367
|
#note: these functions do NOT support multi-gpu or TPU. Will either run on one GPU if available, otherwise CPU this function can be used to write 50k images for metrics (IS and FID) metrics are each calculated against 50k training images from cifar10 and (FID only) celeba. both IS and FID use the official code provided by [1] and [2] respectively. celeba images are prepared in same manner as [3] [1] https://arxiv.org/abs/1606.03498 [2] https://arxiv.org/abs/1706.08500 [3] https://arxiv.org/abs/2010.02502
| 2.069199
| 2
|
src/pyrogue/color.py
|
whilb/roguelike
| 0
|
6627433
|
import curses
def main(stdscr):
curses.start_color()
curses.use_default_colors()
for i in range(0, curses.COLORS):
curses.init_pair(i + 1, i, -1)
try:
for i in range(0, 255):
stdscr.addstr(str(i), curses.color_pair(i))
except curses.ERR:
# End of screen reached
pass
stdscr.getch()
curses.wrapper(main)
|
import curses
def main(stdscr):
curses.start_color()
curses.use_default_colors()
for i in range(0, curses.COLORS):
curses.init_pair(i + 1, i, -1)
try:
for i in range(0, 255):
stdscr.addstr(str(i), curses.color_pair(i))
except curses.ERR:
# End of screen reached
pass
stdscr.getch()
curses.wrapper(main)
|
en
| 0.968511
|
# End of screen reached
| 3.006747
| 3
|
datastructure/practice/c3/r_3_1.py
|
stoneyangxu/python-kata
| 0
|
6627434
|
import matplotlib.pyplot as plt
import math
def f_8n(n):
return 8 * n
def f_4nlogn(n):
return 4 * n * math.log2(n)
def f_2n2(n):
return 2 * (n ** 2)
def f_n3(n):
return n ** 3
def f_2n(n):
return 2 ** n
if __name__ == '__main__':
seq_8n = []
seq_4nlogn = []
seq_2n2 = []
seq_n3 = []
seq_2n = []
x = range(1, 12)
for i in x:
seq_8n.append(f_8n(i))
seq_4nlogn.append(f_4nlogn(i))
seq_2n2.append(f_2n2(i))
seq_n3.append(f_n3(i))
seq_2n.append(f_2n(i))
plt.plot(x, seq_8n, label='8n')
plt.plot(x, seq_4nlogn, label='4nlog(n)')
plt.plot(x, seq_2n2, label='2n^2')
plt.plot(x, seq_n3, label='n^3')
plt.plot(x, seq_2n, label='2^n')
plt.legend()
plt.show()
|
import matplotlib.pyplot as plt
import math
def f_8n(n):
return 8 * n
def f_4nlogn(n):
return 4 * n * math.log2(n)
def f_2n2(n):
return 2 * (n ** 2)
def f_n3(n):
return n ** 3
def f_2n(n):
return 2 ** n
if __name__ == '__main__':
seq_8n = []
seq_4nlogn = []
seq_2n2 = []
seq_n3 = []
seq_2n = []
x = range(1, 12)
for i in x:
seq_8n.append(f_8n(i))
seq_4nlogn.append(f_4nlogn(i))
seq_2n2.append(f_2n2(i))
seq_n3.append(f_n3(i))
seq_2n.append(f_2n(i))
plt.plot(x, seq_8n, label='8n')
plt.plot(x, seq_4nlogn, label='4nlog(n)')
plt.plot(x, seq_2n2, label='2n^2')
plt.plot(x, seq_n3, label='n^3')
plt.plot(x, seq_2n, label='2^n')
plt.legend()
plt.show()
|
none
| 1
| 2.981225
| 3
|
|
Basic Programs/tut_44.py
|
Ahtisham-Shakir/Python_Basics
| 0
|
6627435
|
<gh_stars>0
l1 = ["Bhindi", "Aloo", "chopsticks", "chowmein"]
# i = 1
# for item in l1:
# if i%2 is not 0:
# print(f"Jarvis please buy {item}")
# i += 1
# Enumerate function return index and item of the list
for index, item in enumerate(l1):
if index % 2 == 0:
print(f"Jarvis please buy {item}")
|
l1 = ["Bhindi", "Aloo", "chopsticks", "chowmein"]
# i = 1
# for item in l1:
# if i%2 is not 0:
# print(f"Jarvis please buy {item}")
# i += 1
# Enumerate function return index and item of the list
for index, item in enumerate(l1):
if index % 2 == 0:
print(f"Jarvis please buy {item}")
|
en
| 0.601214
|
# i = 1 # for item in l1: # if i%2 is not 0: # print(f"Jarvis please buy {item}") # i += 1 # Enumerate function return index and item of the list
| 3.779583
| 4
|
libs/GoodOvernight.py
|
bioinformagic/monica
| 7
|
6627436
|
<reponame>bioinformagic/monica
import subprocess
import os
import xml.etree.ElementTree as parXML
import wget
from src.piper_pan import shell_runner
# TODO add documentation to your code!
class GoodOvernight():
def __init__(self):
pass
def unmapped_extractor(self, exp_output, unmapped_path): # this creates fasta files over unmapped bam sequences
list_bam_files = []
for file in os.listdir(exp_output):
if file.endswith('.bam'):
list_bam_files.append(file)
for element in list_bam_files:
subprocess.Popen(
'samtools fasta -f 4 ' + exp_output + '/' + element + ' > ' + unmapped_path + '/' + element[
:-4] + '_unmapped.fasta',
shell=True)
def biomarter(list):
wget.download('https://biodbnet-abcc.ncifcrf.gov/webServices/rest.php/biodbnetRestApi.xml?method=db2db&format=row&input=geneid&inputValues=' + ','.join(list) + '&outputs=refseqgenomicgi')#This line of code downloads from db2db a xml file with a specific filename
#that is biodbnetRestApi.xml
file = 'biodbnetRestApi.xml'
root = parXML.parse(file).getroot() # this line initializes the xml file so to be parsed
levels = root.findall('.//item')
ee = dict()
for level in levels:
GeneId = level.find('InputValue').text
Transformed = filter(None, str(level.find('RefSeqGenomicGI').text).split('//'))
ee[GeneId] = Transformed
# the previous for loop for every level so 'Gene' we may have 1 or different outputs from the sitethat are stored into Transformed and I create a dictionary knowing this datas
with open(file[:-4] + '_biomarter_output.txt', 'w') as out:
for gene in ee.keys():
for value in ee[gene]:
out.write(gene + ' ' + value + '\n')
os.remove(file)
# This creates a txt file as Gene and Ids
# example GoodOvernight.unmapped_extractor('/home/pepsi/Documents/Università/Bioinformatics2/mapped', '/home/pepsi/Documents/Università/Bioinformatics2/unmapped')
# example GoodOvernight.biomarter([])
def master_blaster(self):
"""
Blast the unmapped reads and returns the GenBank IDs
"""
id_list = []
# os.chdir("~Desktop/unmmaped_fasta")
# TODO fix parallel jobs with multiprocess.Pool()
blasting = shell_runner(
"ls *.fa | parallel 'blastn -query {} -db nt -remote -num_alignments 1 -outfmt 6 -out {.}.out'")
get_id = shell_runner("cut -f 2 *.out > list_ids.txt")
with open("list_ids.txt") as file:
output = [id_list.append(line.strip()) for line in file]
return id_list
if __name__ == "__main__":
goodOvernight = GoodOvernight()
|
import subprocess
import os
import xml.etree.ElementTree as parXML
import wget
from src.piper_pan import shell_runner
# TODO add documentation to your code!
class GoodOvernight():
def __init__(self):
pass
def unmapped_extractor(self, exp_output, unmapped_path): # this creates fasta files over unmapped bam sequences
list_bam_files = []
for file in os.listdir(exp_output):
if file.endswith('.bam'):
list_bam_files.append(file)
for element in list_bam_files:
subprocess.Popen(
'samtools fasta -f 4 ' + exp_output + '/' + element + ' > ' + unmapped_path + '/' + element[
:-4] + '_unmapped.fasta',
shell=True)
def biomarter(list):
wget.download('https://biodbnet-abcc.ncifcrf.gov/webServices/rest.php/biodbnetRestApi.xml?method=db2db&format=row&input=geneid&inputValues=' + ','.join(list) + '&outputs=refseqgenomicgi')#This line of code downloads from db2db a xml file with a specific filename
#that is biodbnetRestApi.xml
file = 'biodbnetRestApi.xml'
root = parXML.parse(file).getroot() # this line initializes the xml file so to be parsed
levels = root.findall('.//item')
ee = dict()
for level in levels:
GeneId = level.find('InputValue').text
Transformed = filter(None, str(level.find('RefSeqGenomicGI').text).split('//'))
ee[GeneId] = Transformed
# the previous for loop for every level so 'Gene' we may have 1 or different outputs from the sitethat are stored into Transformed and I create a dictionary knowing this datas
with open(file[:-4] + '_biomarter_output.txt', 'w') as out:
for gene in ee.keys():
for value in ee[gene]:
out.write(gene + ' ' + value + '\n')
os.remove(file)
# This creates a txt file as Gene and Ids
# example GoodOvernight.unmapped_extractor('/home/pepsi/Documents/Università/Bioinformatics2/mapped', '/home/pepsi/Documents/Università/Bioinformatics2/unmapped')
# example GoodOvernight.biomarter([])
def master_blaster(self):
"""
Blast the unmapped reads and returns the GenBank IDs
"""
id_list = []
# os.chdir("~Desktop/unmmaped_fasta")
# TODO fix parallel jobs with multiprocess.Pool()
blasting = shell_runner(
"ls *.fa | parallel 'blastn -query {} -db nt -remote -num_alignments 1 -outfmt 6 -out {.}.out'")
get_id = shell_runner("cut -f 2 *.out > list_ids.txt")
with open("list_ids.txt") as file:
output = [id_list.append(line.strip()) for line in file]
return id_list
if __name__ == "__main__":
goodOvernight = GoodOvernight()
|
en
| 0.70098
|
# TODO add documentation to your code! # this creates fasta files over unmapped bam sequences #This line of code downloads from db2db a xml file with a specific filename #that is biodbnetRestApi.xml # this line initializes the xml file so to be parsed # the previous for loop for every level so 'Gene' we may have 1 or different outputs from the sitethat are stored into Transformed and I create a dictionary knowing this datas # This creates a txt file as Gene and Ids # example GoodOvernight.unmapped_extractor('/home/pepsi/Documents/Università/Bioinformatics2/mapped', '/home/pepsi/Documents/Università/Bioinformatics2/unmapped') # example GoodOvernight.biomarter([]) Blast the unmapped reads and returns the GenBank IDs # os.chdir("~Desktop/unmmaped_fasta") # TODO fix parallel jobs with multiprocess.Pool()
| 2.732865
| 3
|
pypytorch/functions/basic/mm.py
|
dark-ai/pypytorch
| 10
|
6627437
|
<filename>pypytorch/functions/basic/mm.py
# -*- coding: utf-8 -*-
from pypytorch.functions.function import Function
class MM(Function):
"""
Notes
-----
Matmul doesn't have broadcast problem
"""
def forward(self, a, b):
return a @ b
def backward_0(self, grad):
a, b = self.inputs
return grad @ b.T
def backward_1(self, grad):
a, b = self.inputs
return a.T @ grad
|
<filename>pypytorch/functions/basic/mm.py
# -*- coding: utf-8 -*-
from pypytorch.functions.function import Function
class MM(Function):
"""
Notes
-----
Matmul doesn't have broadcast problem
"""
def forward(self, a, b):
return a @ b
def backward_0(self, grad):
a, b = self.inputs
return grad @ b.T
def backward_1(self, grad):
a, b = self.inputs
return a.T @ grad
|
en
| 0.951832
|
# -*- coding: utf-8 -*- Notes ----- Matmul doesn't have broadcast problem
| 3.240767
| 3
|
recombinator/var_info.py
|
tomsasani/recombinator
| 0
|
6627438
|
from __future__ import absolute_import, print_function
import sys
import toolshed as ts
from .denovo import variant_info
from peddy import Ped
from cyvcf2 import VCF
def main(argv):
import argparse
p = argparse.ArgumentParser()
p.add_argument("bed", help="bed file of variants for which to extract info")
p.add_argument("ped")
p.add_argument("vcf")
args = p.parse_args(argv)
run(args)
def get_position(vcf, d, ref=None, alt=None, extra=0):
loc = "%s:%d-%d" % (d['chrom'], int(d['start']) + 1 - extra,
int(d['end']) + extra)
for v in vcf(loc):
if ref and v.REF != ref: continue
if alt and alt not in v.ALT: continue
yield v
def run(args):
vcf = VCF(args.vcf, gts012=True)
sample_lookup = {s: i for i, s in enumerate(vcf.samples)}
ped = Ped(args.ped)
kid_lookup = {s.sample_id: s for s in ped.samples()}
skipped, k = 0, 0
for i, d in enumerate(ts.reader(args.bed, header="ordered")):
rec = None
kid = kid_lookup[d['sample_id']]
for v in get_position(vcf, d):
rec = variant_info(v, kid, sample_lookup)
if rec is None:
skipped += 1
continue
if k == 0:
print("#" + "\t".join(rec.keys()))
k += 1
print("\t".join(map(str, rec.values())))
if __name__ == "__main__":
if __package__ is None:
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import doctest
doctest.testmod()
main(sys.argv[1:])
|
from __future__ import absolute_import, print_function
import sys
import toolshed as ts
from .denovo import variant_info
from peddy import Ped
from cyvcf2 import VCF
def main(argv):
import argparse
p = argparse.ArgumentParser()
p.add_argument("bed", help="bed file of variants for which to extract info")
p.add_argument("ped")
p.add_argument("vcf")
args = p.parse_args(argv)
run(args)
def get_position(vcf, d, ref=None, alt=None, extra=0):
loc = "%s:%d-%d" % (d['chrom'], int(d['start']) + 1 - extra,
int(d['end']) + extra)
for v in vcf(loc):
if ref and v.REF != ref: continue
if alt and alt not in v.ALT: continue
yield v
def run(args):
vcf = VCF(args.vcf, gts012=True)
sample_lookup = {s: i for i, s in enumerate(vcf.samples)}
ped = Ped(args.ped)
kid_lookup = {s.sample_id: s for s in ped.samples()}
skipped, k = 0, 0
for i, d in enumerate(ts.reader(args.bed, header="ordered")):
rec = None
kid = kid_lookup[d['sample_id']]
for v in get_position(vcf, d):
rec = variant_info(v, kid, sample_lookup)
if rec is None:
skipped += 1
continue
if k == 0:
print("#" + "\t".join(rec.keys()))
k += 1
print("\t".join(map(str, rec.values())))
if __name__ == "__main__":
if __package__ is None:
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import doctest
doctest.testmod()
main(sys.argv[1:])
|
none
| 1
| 2.119692
| 2
|
|
strict-backup.py
|
sliwy/backup
| 0
|
6627439
|
<filename>strict-backup.py
# Author: <NAME> <<EMAIL>>
#
# MIT License
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import hashlib
import os
import shutil
from pathlib import Path
from tqdm.auto import tqdm
def get_hash(file_path: Path):
return hashlib.md5(file_path.read_bytes())
def same_file(file_path, other_file_path):
return get_hash(file_path).digest() == get_hash(other_file_path).digest()
def replace_with_source(p):
return Path(str(p).replace(str(target_path), str(source_path), 1))
def replace_with_target(p):
return Path(str(p).replace(str(source_path), str(target_path), 1))
parser = argparse.ArgumentParser()
parser.add_argument(
'source_path',
help='Source path for backup. All files and directories recursively inside '
'the specified directory will be synchronized.')
parser.add_argument(
'target_path',
help='Path where all the files from source_path will be saved.')
args = parser.parse_args()
source_path = Path(args.source_path)
target_path = Path(args.target_path)
value = input(
"Please confirm that you want to overwrite directory '"
"{}' with files from '{}'.\ny/n\n".format(args.target_path, args.source_path))
if value != 'y':
exit(0)
all_source_paths = list(source_path.rglob('*'))
# removing removed dirs
for path in target_path.rglob('*'):
if replace_with_source(path) not in all_source_paths:
try:
if path.is_file():
path.unlink()
except FileNotFoundError:
pass
for path in target_path.rglob('*'):
if path.is_dir() and replace_with_source(path) not in all_source_paths:
path.rmdir()
for path in tqdm(all_source_paths):
copy_target_path = replace_with_target(path)
if path.is_file():
if not copy_target_path.exists():
os.makedirs(str(copy_target_path.parent), exist_ok=True)
shutil.copy(str(path), str(copy_target_path))
elif copy_target_path.exists() and not same_file(path,
copy_target_path):
copy_target_path.unlink()
shutil.copy(str(path), str(copy_target_path))
elif path.is_dir():
copy_target_path.mkdir(parents=True, exist_ok=True)
|
<filename>strict-backup.py
# Author: <NAME> <<EMAIL>>
#
# MIT License
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import hashlib
import os
import shutil
from pathlib import Path
from tqdm.auto import tqdm
def get_hash(file_path: Path):
return hashlib.md5(file_path.read_bytes())
def same_file(file_path, other_file_path):
return get_hash(file_path).digest() == get_hash(other_file_path).digest()
def replace_with_source(p):
return Path(str(p).replace(str(target_path), str(source_path), 1))
def replace_with_target(p):
return Path(str(p).replace(str(source_path), str(target_path), 1))
parser = argparse.ArgumentParser()
parser.add_argument(
'source_path',
help='Source path for backup. All files and directories recursively inside '
'the specified directory will be synchronized.')
parser.add_argument(
'target_path',
help='Path where all the files from source_path will be saved.')
args = parser.parse_args()
source_path = Path(args.source_path)
target_path = Path(args.target_path)
value = input(
"Please confirm that you want to overwrite directory '"
"{}' with files from '{}'.\ny/n\n".format(args.target_path, args.source_path))
if value != 'y':
exit(0)
all_source_paths = list(source_path.rglob('*'))
# removing removed dirs
for path in target_path.rglob('*'):
if replace_with_source(path) not in all_source_paths:
try:
if path.is_file():
path.unlink()
except FileNotFoundError:
pass
for path in target_path.rglob('*'):
if path.is_dir() and replace_with_source(path) not in all_source_paths:
path.rmdir()
for path in tqdm(all_source_paths):
copy_target_path = replace_with_target(path)
if path.is_file():
if not copy_target_path.exists():
os.makedirs(str(copy_target_path.parent), exist_ok=True)
shutil.copy(str(path), str(copy_target_path))
elif copy_target_path.exists() and not same_file(path,
copy_target_path):
copy_target_path.unlink()
shutil.copy(str(path), str(copy_target_path))
elif path.is_dir():
copy_target_path.mkdir(parents=True, exist_ok=True)
|
en
| 0.751536
|
# Author: <NAME> <<EMAIL>> # # MIT License # # Copyright (c) 2020 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # removing removed dirs
| 2.22902
| 2
|
perma_web/perma/migrations/0006_add_internetarchive_status.py
|
rachelaus/perma
| 317
|
6627440
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
def update_upload_to_ia_field(apps, schema_editor):
Link = apps.get_model('perma', 'Link')
Link.objects.filter(uploaded_to_internet_archive=True).update(internet_archive_upload_status='completed')
Link.objects.filter(uploaded_to_internet_archive=False).update(internet_archive_upload_status='not_started')
HistoricalLink = apps.get_model('perma','HistoricalLink')
HistoricalLink.objects.filter(uploaded_to_internet_archive=True).update(internet_archive_upload_status='completed')
HistoricalLink.objects.filter(uploaded_to_internet_archive=False).update(internet_archive_upload_status='not_started')
def reverse_update_upload_to_ia_field(apps, schema_editor):
Link = apps.get_model('perma', 'Link')
Link.objects.filter(internet_archive_upload_status='completed').update(uploaded_to_internet_archive=True)
Link.objects.filter(
Q(internet_archive_upload_status='deleted') | Q(internet_archive_upload_status='not_started') | Q(internet_archive_upload_status='failed') | Q(internet_archive_upload_status='failed_permanently')
).update(uploaded_to_internet_archive=False)
HistoricalLink = apps.get_model('perma', 'HistoricalLink')
HistoricalLink.objects.filter(internet_archive_upload_status='completed').update(uploaded_to_internet_archive=True)
HistoricalLink.objects.filter(
Q(internet_archive_upload_status='deleted') | Q(internet_archive_upload_status='not_started') | Q(internet_archive_upload_status='failed') | Q(internet_archive_upload_status='failed_permanently')
).update(uploaded_to_internet_archive=False)
class Migration(migrations.Migration):
dependencies = [
('perma', '0005_auto_20160513_2006'),
]
operations = [
migrations.AddField(
model_name='historicallink',
name='internet_archive_upload_status',
field=models.CharField(default=b'not_started', max_length=20, choices=[(b'not_started', b'not_started'), (b'completed', b'completed'), (b'failed', b'failed'), (b'failed_permanently', b'failed_permanently'), (b'deleted', b'deleted')]),
),
migrations.AddField(
model_name='link',
name='internet_archive_upload_status',
field=models.CharField(default=b'not_started', max_length=20, choices=[(b'not_started', b'not_started'), (b'completed', b'completed'), (b'failed', b'failed'), (b'failed_permanently', b'failed_permanently'), (b'deleted', b'deleted')]),
),
migrations.RunPython(update_upload_to_ia_field, reverse_code=reverse_update_upload_to_ia_field),
migrations.RemoveField(
model_name='historicallink',
name='uploaded_to_internet_archive',
),
migrations.RemoveField(
model_name='link',
name='uploaded_to_internet_archive',
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
def update_upload_to_ia_field(apps, schema_editor):
Link = apps.get_model('perma', 'Link')
Link.objects.filter(uploaded_to_internet_archive=True).update(internet_archive_upload_status='completed')
Link.objects.filter(uploaded_to_internet_archive=False).update(internet_archive_upload_status='not_started')
HistoricalLink = apps.get_model('perma','HistoricalLink')
HistoricalLink.objects.filter(uploaded_to_internet_archive=True).update(internet_archive_upload_status='completed')
HistoricalLink.objects.filter(uploaded_to_internet_archive=False).update(internet_archive_upload_status='not_started')
def reverse_update_upload_to_ia_field(apps, schema_editor):
Link = apps.get_model('perma', 'Link')
Link.objects.filter(internet_archive_upload_status='completed').update(uploaded_to_internet_archive=True)
Link.objects.filter(
Q(internet_archive_upload_status='deleted') | Q(internet_archive_upload_status='not_started') | Q(internet_archive_upload_status='failed') | Q(internet_archive_upload_status='failed_permanently')
).update(uploaded_to_internet_archive=False)
HistoricalLink = apps.get_model('perma', 'HistoricalLink')
HistoricalLink.objects.filter(internet_archive_upload_status='completed').update(uploaded_to_internet_archive=True)
HistoricalLink.objects.filter(
Q(internet_archive_upload_status='deleted') | Q(internet_archive_upload_status='not_started') | Q(internet_archive_upload_status='failed') | Q(internet_archive_upload_status='failed_permanently')
).update(uploaded_to_internet_archive=False)
class Migration(migrations.Migration):
dependencies = [
('perma', '0005_auto_20160513_2006'),
]
operations = [
migrations.AddField(
model_name='historicallink',
name='internet_archive_upload_status',
field=models.CharField(default=b'not_started', max_length=20, choices=[(b'not_started', b'not_started'), (b'completed', b'completed'), (b'failed', b'failed'), (b'failed_permanently', b'failed_permanently'), (b'deleted', b'deleted')]),
),
migrations.AddField(
model_name='link',
name='internet_archive_upload_status',
field=models.CharField(default=b'not_started', max_length=20, choices=[(b'not_started', b'not_started'), (b'completed', b'completed'), (b'failed', b'failed'), (b'failed_permanently', b'failed_permanently'), (b'deleted', b'deleted')]),
),
migrations.RunPython(update_upload_to_ia_field, reverse_code=reverse_update_upload_to_ia_field),
migrations.RemoveField(
model_name='historicallink',
name='uploaded_to_internet_archive',
),
migrations.RemoveField(
model_name='link',
name='uploaded_to_internet_archive',
),
]
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.896424
| 2
|
src/Zope2/App/tests/test_schema.py
|
rbanffy/Zope
| 289
|
6627441
|
<filename>src/Zope2/App/tests/test_schema.py<gh_stars>100-1000
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
from zope.testing.cleanup import CleanUp
class Zope2VocabularyRegistryTests(unittest.TestCase, CleanUp):
def _getTargetClass(self):
from ..schema import Zope2VocabularyRegistry
return Zope2VocabularyRegistry
def _makeOne(self):
return self._getTargetClass()()
def test_class_conforms_to_IVocabularyRegistry(self):
from zope.interface.verify import verifyClass
from zope.schema.interfaces import IVocabularyRegistry
verifyClass(IVocabularyRegistry, self._getTargetClass())
def test_instance_conforms_to_IVocabularyRegistry(self):
from zope.interface.verify import verifyObject
from zope.schema.interfaces import IVocabularyRegistry
verifyObject(IVocabularyRegistry, self._makeOne())
def test_get_miss_raises_LookupError(self):
registry = self._makeOne()
context = object()
self.assertRaises(LookupError, registry.get, context, 'nonesuch')
def test_get_hit_finds_registered_IVocabularyFactory(self):
from zope.component import provideUtility
from zope.schema.interfaces import IVocabularyFactory
_marker = object()
def _factory(context):
return _marker
provideUtility(_factory, IVocabularyFactory, 'foundit')
registry = self._makeOne()
context = object()
found = registry.get(context, 'foundit')
self.assertTrue(found is _marker)
|
<filename>src/Zope2/App/tests/test_schema.py<gh_stars>100-1000
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
from zope.testing.cleanup import CleanUp
class Zope2VocabularyRegistryTests(unittest.TestCase, CleanUp):
def _getTargetClass(self):
from ..schema import Zope2VocabularyRegistry
return Zope2VocabularyRegistry
def _makeOne(self):
return self._getTargetClass()()
def test_class_conforms_to_IVocabularyRegistry(self):
from zope.interface.verify import verifyClass
from zope.schema.interfaces import IVocabularyRegistry
verifyClass(IVocabularyRegistry, self._getTargetClass())
def test_instance_conforms_to_IVocabularyRegistry(self):
from zope.interface.verify import verifyObject
from zope.schema.interfaces import IVocabularyRegistry
verifyObject(IVocabularyRegistry, self._makeOne())
def test_get_miss_raises_LookupError(self):
registry = self._makeOne()
context = object()
self.assertRaises(LookupError, registry.get, context, 'nonesuch')
def test_get_hit_finds_registered_IVocabularyFactory(self):
from zope.component import provideUtility
from zope.schema.interfaces import IVocabularyFactory
_marker = object()
def _factory(context):
return _marker
provideUtility(_factory, IVocabularyFactory, 'foundit')
registry = self._makeOne()
context = object()
found = registry.get(context, 'foundit')
self.assertTrue(found is _marker)
|
en
| 0.330381
|
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ##############################################################################
| 1.798296
| 2
|
Web/datafunctions.py
|
NielsVanM/BD-HotelReviewVisualizer
| 0
|
6627442
|
from DataLoader.apps import _Mongo
from bson.code import Code
from datetime import datetime
ReviewDB = _Mongo.full_reviews
TinyReviewDB = _Mongo.tiny_reviews
_argToFieldMap = {
"hotelnames": "Hotel_Name"
}
def _processArgs(request, keys):
filterQuery = {}
for key in keys:
val = request.GET.get(key)
if val != None:
filterQuery[key] = val.split(",")
return filterQuery
def _buildFilter(args):
pass
def GetHotelCoordinates(request):
countryFilter = request.GET.get("countries")
pipeline = []
if countryFilter != None:
pipeline.append({
"$match": {
"Reviewer_Nationality": {
"$in": countryFilter.split(",")
}
}
})
pipeline.append(
{"$group":
{"_id": {
"name": "$Hotel_Name",
"lat": "$lat",
"lon": "$lng"
}
}
}
)
res = ReviewDB.aggregate(pipeline)
return res
def GetReviewOverTime(request):
hotelNameFilter = request.GET.get("hotelnames")
countryFilter = request.GET.get("countries")
fq = {}
if hotelNameFilter != None:
fq["Hotel_Name"] = {"$in": hotelNameFilter.split(",")}
if countryFilter != None:
fq["Reviewer_Nationality"] = {"$in": countryFilter.split(",")}
if fq == {}:
fq = None
res = ReviewDB.map_reduce(
Code("""
function() {
emit(this.Review_Date, 1)
}"""),
Code("""
function(key, values) {
var total = 0
for (var i = 0; i < values.length; i++) {
total += values[i]
}
return total
}"""), "res", query=fq)
dataSet = res.find()
data = []
for doc in dataSet:
date = datetime.strptime(doc["_id"], "%m/%d/%Y")
data.append([
str(date),
doc["value"]
])
return data
def GetAmountOfReviewsPerNationality(request):
hotelNameFilter = request.GET.get("hotelnames")
fq = None
if hotelNameFilter != None:
fq = {"Hotel_Name": {"$in": hotelNameFilter.split(",")}}
res = ReviewDB.map_reduce(
Code("""
function() {
emit(this.Reviewer_Nationality, 2)
}
"""),
Code("""
function(key, values) {
var total = 0
for (var i = 0; i < values.length; i ++) {
total += values[i]
}
return total
}
"""), "out", query=fq)
out = []
for doc in res.find():
out.append({
"name": doc["_id"],
"y": doc["value"]
})
return out[1:]
|
from DataLoader.apps import _Mongo
from bson.code import Code
from datetime import datetime
ReviewDB = _Mongo.full_reviews
TinyReviewDB = _Mongo.tiny_reviews
_argToFieldMap = {
"hotelnames": "Hotel_Name"
}
def _processArgs(request, keys):
filterQuery = {}
for key in keys:
val = request.GET.get(key)
if val != None:
filterQuery[key] = val.split(",")
return filterQuery
def _buildFilter(args):
pass
def GetHotelCoordinates(request):
countryFilter = request.GET.get("countries")
pipeline = []
if countryFilter != None:
pipeline.append({
"$match": {
"Reviewer_Nationality": {
"$in": countryFilter.split(",")
}
}
})
pipeline.append(
{"$group":
{"_id": {
"name": "$Hotel_Name",
"lat": "$lat",
"lon": "$lng"
}
}
}
)
res = ReviewDB.aggregate(pipeline)
return res
def GetReviewOverTime(request):
hotelNameFilter = request.GET.get("hotelnames")
countryFilter = request.GET.get("countries")
fq = {}
if hotelNameFilter != None:
fq["Hotel_Name"] = {"$in": hotelNameFilter.split(",")}
if countryFilter != None:
fq["Reviewer_Nationality"] = {"$in": countryFilter.split(",")}
if fq == {}:
fq = None
res = ReviewDB.map_reduce(
Code("""
function() {
emit(this.Review_Date, 1)
}"""),
Code("""
function(key, values) {
var total = 0
for (var i = 0; i < values.length; i++) {
total += values[i]
}
return total
}"""), "res", query=fq)
dataSet = res.find()
data = []
for doc in dataSet:
date = datetime.strptime(doc["_id"], "%m/%d/%Y")
data.append([
str(date),
doc["value"]
])
return data
def GetAmountOfReviewsPerNationality(request):
hotelNameFilter = request.GET.get("hotelnames")
fq = None
if hotelNameFilter != None:
fq = {"Hotel_Name": {"$in": hotelNameFilter.split(",")}}
res = ReviewDB.map_reduce(
Code("""
function() {
emit(this.Reviewer_Nationality, 2)
}
"""),
Code("""
function(key, values) {
var total = 0
for (var i = 0; i < values.length; i ++) {
total += values[i]
}
return total
}
"""), "out", query=fq)
out = []
for doc in res.find():
out.append({
"name": doc["_id"],
"y": doc["value"]
})
return out[1:]
|
en
| 0.120925
|
function() { emit(this.Review_Date, 1) } function(key, values) { var total = 0 for (var i = 0; i < values.length; i++) { total += values[i] } return total } function() { emit(this.Reviewer_Nationality, 2) } function(key, values) { var total = 0 for (var i = 0; i < values.length; i ++) { total += values[i] } return total }
| 2.471338
| 2
|
casinotools/fileformat/casino2/ElementIntensity.py
|
drix00/pycasinotools
| 2
|
6627443
|
#!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "<NAME> (<EMAIL>)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2009 <NAME>"
__license__ = ""
# Standard library modules.
# Third party modules.
# Local modules.
import casinotools.fileformat.FileReaderWriterTools as FileReaderWriterTools
# Globals and constants variables.
class ElementIntensity(FileReaderWriterTools.FileReaderWriterTools):
def read(self, file):
assert getattr(file, 'mode', 'rb') == 'rb'
self.Name = self.readStrLength(file, 3)
self.Size = self.readLong(file)
if self.Size != 0:
self.IntensityK = self.readDoubleList(file, self.Size)
self.IntensityL = self.readDoubleList(file, self.Size)
self.IntensityM = self.readDoubleList(file, self.Size)
|
#!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "<NAME> (<EMAIL>)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2009 <NAME>"
__license__ = ""
# Standard library modules.
# Third party modules.
# Local modules.
import casinotools.fileformat.FileReaderWriterTools as FileReaderWriterTools
# Globals and constants variables.
class ElementIntensity(FileReaderWriterTools.FileReaderWriterTools):
def read(self, file):
assert getattr(file, 'mode', 'rb') == 'rb'
self.Name = self.readStrLength(file, 3)
self.Size = self.readLong(file)
if self.Size != 0:
self.IntensityK = self.readDoubleList(file, self.Size)
self.IntensityL = self.readDoubleList(file, self.Size)
self.IntensityM = self.readDoubleList(file, self.Size)
|
en
| 0.445092
|
#!/usr/bin/env python # Script information for the file. # Standard library modules. # Third party modules. # Local modules. # Globals and constants variables.
| 2.225429
| 2
|
code_11_subtraction.py
|
aianaconda/pytorch-GNN-1st
| 6
|
6627444
|
<filename>code_11_subtraction.py
# -*- coding: utf-8 -*-
"""
@author: 代码医生工作室
@公众号:xiangyuejiqiren (内有更多优秀文章及学习资料)
@来源: <PyTorch深度学习和图神经网络(卷 1)——基础知识>配套代码
@配套代码技术支持:bbs.aianaconda.com
Created on Thu Mar 30 09:43:58 2017
"""
import copy, numpy as np
np.random.seed(0) #随机数生成器的种子,可以每次得到一样的值
# compute sigmoid nonlinearity
def sigmoid(x): #激活函数
output = 1/(1+np.exp(-x))
return output
# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):#激活函数的导数
return output*(1-output)
int2binary = {} #整数到其二进制表示的映射
binary_dim = 8 #暂时制作256以内的减法
## 计算0-256的二进制表示
largest_number = pow(2,binary_dim)
binary = np.unpackbits(
np.array([range(largest_number)],dtype=np.uint8).T,axis=1)
for i in range(largest_number):
int2binary[i] = binary[i]
# input variables
alpha = 0.9 #学习速率
input_dim = 2 #输入的维度是2
hidden_dim = 16
output_dim = 1 #输出维度为1
# initialize neural network weights
synapse_0 = (2*np.random.random((input_dim,hidden_dim)) - 1)*0.05 #维度为2*16, 2是输入维度,16是隐藏层维度
synapse_1 = (2*np.random.random((hidden_dim,output_dim)) - 1)*0.05
synapse_h = (2*np.random.random((hidden_dim,hidden_dim)) - 1)*0.05
# => [-0.05, 0.05),
# 用于存放反向传播的权重更新值
synapse_0_update = np.zeros_like(synapse_0)
synapse_1_update = np.zeros_like(synapse_1)
synapse_h_update = np.zeros_like(synapse_h)
# training
for j in range(10000):
#生成一个数字a
a_int = np.random.randint(largest_number)
#生成一个数字b,b的最大值取的是largest_number/2,作为被减数,让它小一点。
b_int = np.random.randint(largest_number/2)
#如果生成的b大了,那么交换一下
if a_int<b_int:
tt = a_int
b_int = a_int
a_int=tt
a = int2binary[a_int] # binary encoding
b = int2binary[b_int] # binary encoding
# true answer
c_int = a_int - b_int
c = int2binary[c_int]
# 存储神经网络的预测值
d = np.zeros_like(c)
overallError = 0 #每次把总误差清零
layer_2_deltas = list() #存储每个时间点输出层的误差
layer_1_values = list() #存储每个时间点隐藏层的值
layer_1_values.append(np.ones(hidden_dim)*0.1) # 一开始没有隐藏层,所以初始化一下原始值为0.1
# moving along the positions in the binary encoding
for position in range(binary_dim):#循环遍历每一个二进制位
# generate input and output
X = np.array([[a[binary_dim - position - 1],b[binary_dim - position - 1]]])#从右到左,每次去两个输入数字的一个bit位
y = np.array([[c[binary_dim - position - 1]]]).T#正确答案
# hidden layer (input ~+ prev_hidden)
layer_1 = sigmoid(np.dot(X,synapse_0) + np.dot(layer_1_values[-1],synapse_h))#(输入层 + 之前的隐藏层) -> 新的隐藏层,这是体现循环神经网络的最核心的地方!!!
# output layer (new binary representation)
layer_2 = sigmoid(np.dot(layer_1,synapse_1)) #隐藏层 * 隐藏层到输出层的转化矩阵synapse_1 -> 输出层
layer_2_error = y - layer_2 #预测误差
layer_2_deltas.append((layer_2_error)*sigmoid_output_to_derivative(layer_2)) #把每一个时间点的误差导数都记录下来
overallError += np.abs(layer_2_error[0])#总误差
d[binary_dim - position - 1] = np.round(layer_2[0][0]) #记录下每一个预测bit位
# store hidden layer so we can use it in the next timestep
layer_1_values.append(copy.deepcopy(layer_1))#记录下隐藏层的值,在下一个时间点用
future_layer_1_delta = np.zeros(hidden_dim)
#反向传播,从最后一个时间点到第一个时间点
for position in range(binary_dim):
X = np.array([[a[position],b[position]]]) #最后一次的两个输入
layer_1 = layer_1_values[-position-1] #当前时间点的隐藏层
prev_layer_1 = layer_1_values[-position-2] #前一个时间点的隐藏层
# error at output layer
layer_2_delta = layer_2_deltas[-position-1] #当前时间点输出层导数
# error at hidden layer
# 通过后一个时间点(因为是反向传播)的隐藏层误差和当前时间点的输出层误差,计算当前时间点的隐藏层误差
layer_1_delta = (future_layer_1_delta.dot(synapse_h.T) + layer_2_delta.dot(synapse_1.T)) * sigmoid_output_to_derivative(layer_1)
# 等到完成了所有反向传播误差计算, 才会更新权重矩阵,先暂时把更新矩阵存起来。
synapse_1_update += np.atleast_2d(layer_1).T.dot(layer_2_delta)
synapse_h_update += np.atleast_2d(prev_layer_1).T.dot(layer_1_delta)
synapse_0_update += X.T.dot(layer_1_delta)
future_layer_1_delta = layer_1_delta
# 完成所有反向传播之后,更新权重矩阵。并把矩阵变量清零
synapse_0 += synapse_0_update * alpha
synapse_1 += synapse_1_update * alpha
synapse_h += synapse_h_update * alpha
synapse_0_update *= 0
synapse_1_update *= 0
synapse_h_update *= 0
# print out progress
if(j % 800 == 0):
#print(synapse_0,synapse_h,synapse_1)
print("总误差:" + str(overallError))
print("Pred:" + str(d))
print("True:" + str(c))
out = 0
for index,x in enumerate(reversed(d)):
out += x*pow(2,index)
print(str(a_int) + " - " + str(b_int) + " = " + str(out))
print("------------")
|
<filename>code_11_subtraction.py
# -*- coding: utf-8 -*-
"""
@author: 代码医生工作室
@公众号:xiangyuejiqiren (内有更多优秀文章及学习资料)
@来源: <PyTorch深度学习和图神经网络(卷 1)——基础知识>配套代码
@配套代码技术支持:bbs.aianaconda.com
Created on Thu Mar 30 09:43:58 2017
"""
import copy, numpy as np
np.random.seed(0) #随机数生成器的种子,可以每次得到一样的值
# compute sigmoid nonlinearity
def sigmoid(x): #激活函数
output = 1/(1+np.exp(-x))
return output
# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):#激活函数的导数
return output*(1-output)
int2binary = {} #整数到其二进制表示的映射
binary_dim = 8 #暂时制作256以内的减法
## 计算0-256的二进制表示
largest_number = pow(2,binary_dim)
binary = np.unpackbits(
np.array([range(largest_number)],dtype=np.uint8).T,axis=1)
for i in range(largest_number):
int2binary[i] = binary[i]
# input variables
alpha = 0.9 #学习速率
input_dim = 2 #输入的维度是2
hidden_dim = 16
output_dim = 1 #输出维度为1
# initialize neural network weights
synapse_0 = (2*np.random.random((input_dim,hidden_dim)) - 1)*0.05 #维度为2*16, 2是输入维度,16是隐藏层维度
synapse_1 = (2*np.random.random((hidden_dim,output_dim)) - 1)*0.05
synapse_h = (2*np.random.random((hidden_dim,hidden_dim)) - 1)*0.05
# => [-0.05, 0.05),
# 用于存放反向传播的权重更新值
synapse_0_update = np.zeros_like(synapse_0)
synapse_1_update = np.zeros_like(synapse_1)
synapse_h_update = np.zeros_like(synapse_h)
# training
for j in range(10000):
#生成一个数字a
a_int = np.random.randint(largest_number)
#生成一个数字b,b的最大值取的是largest_number/2,作为被减数,让它小一点。
b_int = np.random.randint(largest_number/2)
#如果生成的b大了,那么交换一下
if a_int<b_int:
tt = a_int
b_int = a_int
a_int=tt
a = int2binary[a_int] # binary encoding
b = int2binary[b_int] # binary encoding
# true answer
c_int = a_int - b_int
c = int2binary[c_int]
# 存储神经网络的预测值
d = np.zeros_like(c)
overallError = 0 #每次把总误差清零
layer_2_deltas = list() #存储每个时间点输出层的误差
layer_1_values = list() #存储每个时间点隐藏层的值
layer_1_values.append(np.ones(hidden_dim)*0.1) # 一开始没有隐藏层,所以初始化一下原始值为0.1
# moving along the positions in the binary encoding
for position in range(binary_dim):#循环遍历每一个二进制位
# generate input and output
X = np.array([[a[binary_dim - position - 1],b[binary_dim - position - 1]]])#从右到左,每次去两个输入数字的一个bit位
y = np.array([[c[binary_dim - position - 1]]]).T#正确答案
# hidden layer (input ~+ prev_hidden)
layer_1 = sigmoid(np.dot(X,synapse_0) + np.dot(layer_1_values[-1],synapse_h))#(输入层 + 之前的隐藏层) -> 新的隐藏层,这是体现循环神经网络的最核心的地方!!!
# output layer (new binary representation)
layer_2 = sigmoid(np.dot(layer_1,synapse_1)) #隐藏层 * 隐藏层到输出层的转化矩阵synapse_1 -> 输出层
layer_2_error = y - layer_2 #预测误差
layer_2_deltas.append((layer_2_error)*sigmoid_output_to_derivative(layer_2)) #把每一个时间点的误差导数都记录下来
overallError += np.abs(layer_2_error[0])#总误差
d[binary_dim - position - 1] = np.round(layer_2[0][0]) #记录下每一个预测bit位
# store hidden layer so we can use it in the next timestep
layer_1_values.append(copy.deepcopy(layer_1))#记录下隐藏层的值,在下一个时间点用
future_layer_1_delta = np.zeros(hidden_dim)
#反向传播,从最后一个时间点到第一个时间点
for position in range(binary_dim):
X = np.array([[a[position],b[position]]]) #最后一次的两个输入
layer_1 = layer_1_values[-position-1] #当前时间点的隐藏层
prev_layer_1 = layer_1_values[-position-2] #前一个时间点的隐藏层
# error at output layer
layer_2_delta = layer_2_deltas[-position-1] #当前时间点输出层导数
# error at hidden layer
# 通过后一个时间点(因为是反向传播)的隐藏层误差和当前时间点的输出层误差,计算当前时间点的隐藏层误差
layer_1_delta = (future_layer_1_delta.dot(synapse_h.T) + layer_2_delta.dot(synapse_1.T)) * sigmoid_output_to_derivative(layer_1)
# 等到完成了所有反向传播误差计算, 才会更新权重矩阵,先暂时把更新矩阵存起来。
synapse_1_update += np.atleast_2d(layer_1).T.dot(layer_2_delta)
synapse_h_update += np.atleast_2d(prev_layer_1).T.dot(layer_1_delta)
synapse_0_update += X.T.dot(layer_1_delta)
future_layer_1_delta = layer_1_delta
# 完成所有反向传播之后,更新权重矩阵。并把矩阵变量清零
synapse_0 += synapse_0_update * alpha
synapse_1 += synapse_1_update * alpha
synapse_h += synapse_h_update * alpha
synapse_0_update *= 0
synapse_1_update *= 0
synapse_h_update *= 0
# print out progress
if(j % 800 == 0):
#print(synapse_0,synapse_h,synapse_1)
print("总误差:" + str(overallError))
print("Pred:" + str(d))
print("True:" + str(c))
out = 0
for index,x in enumerate(reversed(d)):
out += x*pow(2,index)
print(str(a_int) + " - " + str(b_int) + " = " + str(out))
print("------------")
|
zh
| 0.856066
|
# -*- coding: utf-8 -*- @author: 代码医生工作室
@公众号:xiangyuejiqiren (内有更多优秀文章及学习资料)
@来源: <PyTorch深度学习和图神经网络(卷 1)——基础知识>配套代码
@配套代码技术支持:bbs.aianaconda.com
Created on Thu Mar 30 09:43:58 2017 #随机数生成器的种子,可以每次得到一样的值 # compute sigmoid nonlinearity #激活函数 # convert output of sigmoid function to its derivative #激活函数的导数 #整数到其二进制表示的映射 #暂时制作256以内的减法 ## 计算0-256的二进制表示 # input variables #学习速率 #输入的维度是2 #输出维度为1 # initialize neural network weights #维度为2*16, 2是输入维度,16是隐藏层维度 # => [-0.05, 0.05), # 用于存放反向传播的权重更新值 # training #生成一个数字a #生成一个数字b,b的最大值取的是largest_number/2,作为被减数,让它小一点。 #如果生成的b大了,那么交换一下 # binary encoding # binary encoding # true answer # 存储神经网络的预测值 #每次把总误差清零 #存储每个时间点输出层的误差 #存储每个时间点隐藏层的值 # 一开始没有隐藏层,所以初始化一下原始值为0.1 # moving along the positions in the binary encoding #循环遍历每一个二进制位 # generate input and output #从右到左,每次去两个输入数字的一个bit位 #正确答案 # hidden layer (input ~+ prev_hidden) #(输入层 + 之前的隐藏层) -> 新的隐藏层,这是体现循环神经网络的最核心的地方!!! # output layer (new binary representation) #隐藏层 * 隐藏层到输出层的转化矩阵synapse_1 -> 输出层 #预测误差 #把每一个时间点的误差导数都记录下来 #总误差 #记录下每一个预测bit位 # store hidden layer so we can use it in the next timestep #记录下隐藏层的值,在下一个时间点用 #反向传播,从最后一个时间点到第一个时间点 #最后一次的两个输入 #当前时间点的隐藏层 #前一个时间点的隐藏层 # error at output layer #当前时间点输出层导数 # error at hidden layer # 通过后一个时间点(因为是反向传播)的隐藏层误差和当前时间点的输出层误差,计算当前时间点的隐藏层误差 # 等到完成了所有反向传播误差计算, 才会更新权重矩阵,先暂时把更新矩阵存起来。 # 完成所有反向传播之后,更新权重矩阵。并把矩阵变量清零 # print out progress #print(synapse_0,synapse_h,synapse_1)
| 3.362066
| 3
|
models/generation_model.py
|
AIprogrammer/Detailed-virtual-try-on
| 160
|
6627445
|
import numpy as np
import torch
import os
from .base_model import BaseModel
from models.networks import Define_G, Define_D
from utils.transforms import create_part
import torch.nn.functional as F
from utils import pose_utils
from lib.geometric_matching_multi_gpu import GMM
from .base_model import BaseModel
from time import time
from utils import pose_utils
import os.path as osp
from torchvision import utils
import random
class GenerationModel(BaseModel):
def name(self):
return 'Generation model: pix2pix | pix2pixHD'
def __init__(self, opt):
self.t0 = time()
BaseModel.__init__(self, opt)
self.train_mode = opt.train_mode
# resume of networks
resume_gmm = opt.resume_gmm
resume_G_parse = opt.resume_G_parse
resume_D_parse = opt.resume_D_parse
resume_G_appearance = opt.resume_G_app
resume_D_appearance = opt.resume_D_app
resume_G_face = opt.resume_G_face
resume_D_face = opt.resume_D_face
# define network
self.gmm_model = torch.nn.DataParallel(GMM(opt)).cuda()
self.generator_parsing = Define_G(opt.input_nc_G_parsing, opt.output_nc_parsing, opt.ndf, opt.netG_parsing, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids)
self.discriminator_parsing = Define_D(opt.input_nc_D_parsing, opt.ndf, opt.netD_parsing, opt.n_layers_D,
opt.norm, opt.init_type, opt.init_gain, opt.gpu_ids)
self.generator_appearance = Define_G(opt.input_nc_G_app, opt.output_nc_app, opt.ndf, opt.netG_app, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids, with_tanh=False)
self.discriminator_appearance = Define_D(opt.input_nc_D_app, opt.ndf, opt.netD_app, opt.n_layers_D,
opt.norm, opt.init_type, opt.init_gain, opt.gpu_ids)
self.generator_face = Define_G(opt.input_nc_D_face, opt.output_nc_face, opt.ndf, opt.netG_face, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids)
self.discriminator_face = Define_D(opt.input_nc_D_face, opt.ndf, opt.netD_face, opt.n_layers_D,
opt.norm, opt.init_type, opt.init_gain, opt.gpu_ids)
if opt.train_mode == 'gmm':
setattr(self, 'generator', self.gmm_model)
else:
setattr(self, 'generator', getattr(self, 'generator_' + self.train_mode))
setattr(self, 'discriminator', getattr(self, 'discriminator_' + self.train_mode))
# load networks
self.networks_name = ['gmm', 'parsing', 'parsing', 'appearance', 'appearance', 'face', 'face']
self.networks_model = [self.gmm_model, self.generator_parsing, self.discriminator_parsing, self.generator_appearance, self.discriminator_appearance,
self.generator_face, self.discriminator_face]
self.networks = dict(zip(self.networks_name, self.networks_model))
self.resume_path = [resume_gmm, resume_G_parse, resume_D_parse, resume_G_appearance, resume_D_appearance, resume_G_face, resume_D_face]
for network, resume in zip(self.networks_model, self.resume_path):
if network != [] and resume != '':
assert(osp.exists(resume), 'the resume not exits')
print('loading...')
self.load_network(network, resume, ifprint=False)
# define optimizer
self.optimizer_gmm = torch.optim.Adam(self.gmm_model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
self.optimizer_parsing_G = torch.optim.Adam(self.generator_parsing.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_parsing_D = torch.optim.Adam(self.discriminator_parsing.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_appearance_G = torch.optim.Adam(self.generator_appearance.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_appearance_D = torch.optim.Adam(self.discriminator_appearance.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_face_G = torch.optim.Adam(self.generator_face.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_face_D = torch.optim.Adam(self.discriminator_face.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
if opt.train_mode == 'gmm':
self.optimizer_G = self.optimizer_gmm
elif opt.joint_all:
self.optimizer_G = [self.optimizer_parsing_G, self.optimizer_appearance_G, self.optimizer_face_G]
setattr(self, 'optimizer_D', getattr(self, 'optimizer_' + self.train_mode + '_D'))
else:
setattr(self, 'optimizer_G', getattr(self, 'optimizer_' + self.train_mode + '_G'))
setattr(self, 'optimizer_D', getattr(self, 'optimizer_' + self.train_mode + '_D'))
self.t1 = time()
def set_input(self, opt, result):
self.t2 = time()
self.source_pose_embedding = result['source_pose_embedding'].float().cuda()
self.target_pose_embedding = result['target_pose_embedding'].float().cuda()
self.source_image = result['source_image'].float().cuda()
self.target_image = result['target_image'].float().cuda()
self.source_parse = result['source_parse'].float().cuda()
self.target_parse = result['target_parse'].float().cuda()
self.cloth_image = result['cloth_image'].float().cuda()
self.cloth_parse = result['cloth_parse'].float().cuda()
self.warped_cloth = result['warped_cloth_image'].float().cuda() # preprocess warped image from gmm model
self.target_parse_cloth = result['target_parse_cloth'].float().cuda()
self.target_pose_img = result['target_pose_img'].float().cuda()
self.image_without_cloth = create_part(self.source_image, self.source_parse, 'image_without_cloth', False)
self.im_c = result['im_c'].float().cuda() # target warped cloth
index = [x for x in list(range(20)) if x != 5 and x != 6 and x != 7]
real_s_ = torch.index_select(self.source_parse, 1, torch.tensor(index).cuda())
self.input_parsing = torch.cat((real_s_, self.target_pose_embedding, self.cloth_parse), 1).cuda()
if opt.train_mode == 'gmm':
self.im_h = result['im_h'].float().cuda()
self.source_parse_shape = result['source_parse_shape'].float().cuda()
self.agnostic = torch.cat((self.source_parse_shape, self.im_h, self.target_pose_embedding), dim=1)
elif opt.train_mode == 'parsing':
self.real_s = self.input_parsing
self.source_parse_vis = result['source_parse_vis'].float().cuda()
self.target_parse_vis = result['target_parse_vis'].float().cuda()
elif opt.train_mode == 'appearance':
if opt.joint_all:
self.generated_parsing = F.softmax(self.generator_parsing(self.input_parsing), 1)
else:
with torch.no_grad():
self.generated_parsing = F.softmax(self.generator_parsing(self.input_parsing), 1)
self.input_appearance = torch.cat((self.image_without_cloth, self.warped_cloth, self.generated_parsing), 1).cuda()
"attention please"
generated_parsing_ = torch.argmax(self.generated_parsing, 1, keepdim=True)
self.generated_parsing_argmax = torch.Tensor()
for _ in range(20):
self.generated_parsing_argmax = torch.cat([self.generated_parsing_argmax.float().cuda(), (generated_parsing_ == _).float()], dim=1)
self.warped_cloth_parse = ((generated_parsing_ == 5) + (generated_parsing_ == 6) + (generated_parsing_ == 7)).float().cuda()
if opt.save_time:
self.generated_parsing_vis = torch.Tensor([0]).expand_as(self.target_image)
else:
# decode labels cost much time
_generated_parsing = torch.argmax(self.generated_parsing, 1, keepdim=True)
_generated_parsing = _generated_parsing.permute(0,2,3,1).contiguous().int()
self.generated_parsing_vis = pose_utils.decode_labels(_generated_parsing) #array
self.real_s = self.source_image
elif opt.train_mode == 'face':
if opt.joint_all:# opt.joint
generated_parsing = F.softmax(self.generator_parsing(self.input_parsing), 1)
self.generated_parsing_face = F.softmax(self.generator_parsing(self.input_parsing), 1)
else:
generated_parsing = F.softmax(self.generator_parsing(self.input_parsing), 1)
"attention please"
generated_parsing_ = torch.argmax(generated_parsing, 1, keepdim=True)
self.generated_parsing_argmax = torch.Tensor()
for _ in range(20):
self.generated_parsing_argmax = torch.cat([self.generated_parsing_argmax.float().cuda(), (generated_parsing_ == _).float()], dim=1)
# self.generated_parsing_face = generated_parsing_c
self.generated_parsing_face = self.target_parse
self.input_appearance = torch.cat((self.image_without_cloth, self.warped_cloth, generated_parsing), 1).cuda()
with torch.no_grad():
self.generated_inter = self.generator_appearance(self.input_appearance)
p_rendered, m_composite = torch.split(self.generated_inter, 3, 1)
p_rendered = F.tanh(p_rendered)
m_composite = F.sigmoid(m_composite)
self.generated_image = self.warped_cloth * m_composite + p_rendered * (1 - m_composite)
self.source_face = create_part(self.source_image, self.source_parse, 'face', False)
self.target_face_real = create_part(self.target_image, self.generated_parsing_face, 'face', False)
self.target_face_fake = create_part(self.generated_image, self.generated_parsing_face, 'face', False)
self.generated_image_without_face = self.generated_image - self.target_face_fake
self.input_face = torch.cat((self.source_face, self.target_face_fake), 1).cuda()
self.real_s = self.source_face
elif opt.train_mode == 'joint':
self.input_joint = torch.cat((self.image_without_cloth, self.warped_cloth, self.generated_parsing), 1).cuda()
self.t3 = time()
# setattr(self, 'input', getattr(self, 'input_' + self.train_mode))
def forward(self, opt):
self.t4 = time()
if self.train_mode == 'gmm':
self.grid, self.theta = self.gmm_model(self.agnostic, self.cloth_image)
self.warped_cloth_predict = F.grid_sample(self.cloth_image, self.grid)
if opt.train_mode == 'parsing':
self.fake_t = F.softmax(self.generator_parsing(self.input_parsing), dim=1)
self.real_t = self.target_parse
if opt.train_mode == 'appearance':
generated_inter = self.generator_appearance(self.input_appearance)
p_rendered, m_composite = torch.split(generated_inter, 3, 1)
p_rendered = F.tanh(p_rendered)
self.m_composite = F.sigmoid(m_composite)
p_tryon = self.warped_cloth * self.m_composite + p_rendered * (1 - self.m_composite)
self.fake_t = p_tryon
self.real_t = self.target_image
if opt.joint_all:
generate_face = create_part(self.fake_t, self.generated_parsing_argmax, 'face', False)
generate_image_without_face = self.fake_t - generate_face
real_s_face = create_part(self.source_image, self.source_parse, 'face', False)
real_t_face = create_part(self.target_image, self.generated_parsing_argmax, 'face', False)
input = torch.cat((real_s_face, generate_face), dim=1)
fake_t_face = self.generator_face(input)
###residual learning
r"""attention
"""
# fake_t_face = create_part(fake_t_face, self.generated_parsing, 'face', False)
# fake_t_face = generate_face + fake_t_face
fake_t_face = create_part(fake_t_face, self.generated_parsing_argmax, 'face', False)
### fake image
self.fake_t = generate_image_without_face + fake_t_face
if opt.train_mode == 'face':
self.fake_t = self.generator_face(self.input_face)
if opt.face_residual:
self.fake_t = create_part(self.fake_t, self.generated_parsing_face, 'face', False)
self.fake_t = self.target_face_fake + self.fake_t
self.fake_t = create_part(self.fake_t, self.generated_parsing_face, 'face', False)
self.refined_image = self.generated_image_without_face + self.fake_t
self.real_t = create_part(self.target_image, self.generated_parsing_face, 'face', False)
self.t5 = time()
def backward_G(self, opt):
self.t6 = time()
if opt.train_mode == 'gmm':
self.loss = self.criterionL1(self.warped_cloth_predict, self.im_c)
self.loss.backward()
self.t7 = time()
return
fake_st = torch.cat((self.real_s, self.fake_t), 1)
pred_fake = self.discriminator(fake_st)
if opt.train_mode == 'parsing':
self.loss_G_GAN = self.criterionGAN(pred_fake,True)
self.loss_G_BCE = self.criterionBCE_re(self.fake_t, self.real_t) * opt.lambda_L1
self.loss_G = self.loss_G_GAN + self.loss_G_BCE
self.loss_G.backward()
if opt.train_mode == 'appearance':
self.loss_G_GAN = self.criterionGAN(pred_fake,True) * opt.G_GAN
# vgg_loss
loss_vgg1,_ = self.criterion_vgg(self.fake_t, self.real_t, self.target_parse, False, True, False)
loss_vgg2,_ = self.criterion_vgg(self.fake_t, self.real_t, self.target_parse, False, False, False)
self.loss_G_vgg = (loss_vgg1 + loss_vgg2) * opt.G_VGG
self.loss_G_mask = self.criterionL1(self.m_composite, self.warped_cloth_parse) * opt.mask
if opt.mask_tvloss:
self.loss_G_mask_tv = self.criterion_tv(self.m_composite)
else:
self.loss_G_mask_tv = torch.Tensor([0]).cuda()
self.loss_G_L1 = self.criterion_smooth_L1(self.fake_t, self.real_t) * opt.lambda_L1
if opt.joint_all and opt.joint_parse_loss:
self.loss_G_parsing = self.criterionBCE_re(self.generated_parsing, self.target_parse) * opt.joint_G_parsing
self.loss_G = self.loss_G_GAN + self.loss_G_L1 + self.loss_G_vgg + self.loss_G_mask + self.loss_G_parsing
else:
self.loss_G = self.loss_G_GAN + self.loss_G_L1 + self.loss_G_vgg + self.loss_G_mask + self.loss_G_mask_tv
self.loss_G.backward()
if opt.train_mode == 'face':
_, self.loss_G_vgg = self.criterion_vgg(self.fake_t, self.real_t, self.generated_parsing_face, False, False, False) # part, gram, neareast
self.loss_G_vgg = self.loss_G_vgg * opt.face_vgg
self.loss_G_L1 = self.criterionL1(self.fake_t, self.real_t) * opt.face_L1
self.loss_G_GAN = self.criterionGAN(pred_fake, True) * opt.face_gan
self.loss_G_refine = self.criterionL1(self.refined_image, self.target_image) * opt.face_img_L1
self.loss_G = self.loss_G_vgg + self.loss_G_L1 + self.loss_G_GAN + self.loss_G_refine
self.loss_G.backward()
self.t7 = time()
def backward_D(self, opt):
self.t8 = time()
fake_st = torch.cat((self.real_s, self.fake_t), 1)
real_st = torch.cat((self.real_s, self.real_t), 1)
pred_fake = self.discriminator(fake_st.detach())
pred_real = self.discriminator(real_st) # batch_size,1, 30,30
self.loss_D_fake = self.criterionGAN(pred_fake, False)
self.loss_D_real = self.criterionGAN(pred_real, True)
self.loss_D = (self.loss_D_real + self.loss_D_fake) * 0.5
self.loss_D.backward()
self.t9 = time()
def optimize_parameters(self, opt):
self.t10 = time()
self.forward(opt) # compute fake images: G(A)
if opt.train_mode == 'gmm':
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G(opt) # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
self.t11 = time()
return
# update D
self.set_requires_grad(self.discriminator, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D(opt) # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.discriminator, False) # D requires no gradients when optimizing G
if opt.joint_all:
for _ in self.optimizer_G:
_.zero_grad()
self.backward_G(opt)
for _ in self.optimizer_G:
_.step()
else:
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G(opt) # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
self.t11 = time()
def save_result(self, opt, epoch, iteration):
if opt.train_mode == 'gmm':
images = [self.cloth_image,self.warped_cloth.detach(), self.im_c]
if opt.train_mode == 'parsing':
fake_t_vis = pose_utils.decode_labels(torch.argmax(self.fake_t, dim=1, keepdim=True).permute(0,2,3,1).contiguous())
images = [self.source_parse_vis, self.target_parse_vis, self.target_pose_img, self.cloth_parse, fake_t_vis]
if opt.train_mode == 'appearance':
images = [self.image_without_cloth, self.warped_cloth, self.warped_cloth_parse, self.target_image,
self.cloth_image, self.generated_parsing_vis, self.fake_t.detach()]
if opt.train_mode == 'face':
images = [self.generated_image.detach(), self.refined_image.detach(), self.source_image, self.target_image, self.real_t, self.fake_t.detach()]
pose_utils.save_img(images, os.path.join(self.vis_path, str(epoch) + '_' + str(iteration) + '.jpg'))
def save_model(self, opt, epoch):
if opt.train_mode == 'gmm':
model_G = osp.join(self.save_dir, 'generator', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss))
if not osp.exists(osp.join(self.save_dir, 'generator')):
os.makedirs(osp.join(self.save_dir, 'generator'))
torch.save(self.generator.state_dict(), model_G)
elif not opt.joint_all:
model_G = osp.join(self.save_dir, 'generator', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_G))
model_D = osp.join(self.save_dir, 'dicriminator', 'checkpoint_D_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_D))
if not osp.exists(osp.join(self.save_dir, 'generator')):
os.makedirs(osp.join(self.save_dir, 'generator'))
if not osp.exists(osp.join(self.save_dir, 'dicriminator')):
os.makedirs(osp.join(self.save_dir, 'dicriminator'))
torch.save(self.generator.state_dict(), model_G)
torch.save(self.discriminator.state_dict(), model_D)
else:
model_G_parsing = osp.join(self.save_dir, 'generator_parsing', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_G))
model_D_parsing = osp.join(self.save_dir, 'dicriminator_parsing', 'checkpoint_D_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_D))
model_G_appearance = osp.join(self.save_dir, 'generator_appearance', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_G))
model_D_appearance = osp.join(self.save_dir, 'dicriminator_appearance', 'checkpoint_D_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_D))
model_G_face = osp.join(self.save_dir, 'generator_face', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_G))
model_D_face = osp.join(self.save_dir, 'dicriminator_face', 'checkpoint_D_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_D))
joint_save_dirs = [osp.join(self.save_dir, 'generator_parsing'), osp.join(self.save_dir, 'dicriminator_parsing'),
osp.join(self.save_dir, 'generator_appearance'), osp.join(self.save_dir, 'dicriminator_appearance'),
osp.join(self.save_dir, 'generator_face'), osp.join(self.save_dir, 'dicriminator_face')]
for _ in joint_save_dirs:
if not osp.exists(_):
os.makedirs(_)
torch.save(self.generator_parsing.state_dict(), model_G_parsing)
torch.save(self.generator_appearance.state_dict(), model_G_appearance)
torch.save(self.generator_face.state_dict(), model_G_face)
torch.save(self.discriminator_appearance.state_dict(), model_D_appearance)
def print_current_errors(self, opt, epoch, i):
if opt.train_mode == 'gmm':
errors = {'loss_L1': self.loss.item()}
if opt.train_mode == 'appearance':
errors = {'loss_G': self.loss_G.item(), 'loss_G_GAN': self.loss_G_GAN.item(), 'loss_G_vgg':self.loss_G_vgg.item(), 'loss_G_mask':self.loss_G_mask.item(),
'loss_G_L1': self.loss_G_L1.item(), 'loss_D':self.loss_D.item(), 'loss_D_real': self.loss_D_real.item(), 'loss_D_fake':self.loss_D_real.item(), 'loss_G_mask_tv': self.loss_G_mask_tv.item()}
if opt.joint_all and opt.joint_parse_loss:
errors = {'loss_G': self.loss_G.item(), 'loss_G_GAN': self.loss_G_GAN.item(), 'loss_G_vgg':self.loss_G_vgg.item(), 'loss_G_mask':self.loss_G_mask.item(),
'loss_G_L1': self.loss_G_L1.item(), 'loss_D':self.loss_D.item(), 'loss_D_real': self.loss_D_real.item(), 'loss_D_fake':self.loss_D_real.item(), 'loss_G_parsing': self.loss_G_parsing.item()}
if opt.train_mode == 'parsing':
errors = {'loss_G': self.loss_G.item(), 'loss_G_GAN': self.loss_G_GAN.item(), 'loss_G_BCE': self.loss_G_BCE.item(),
'loss_D':self.loss_D.item(), 'loss_D_real': self.loss_D_real.item(), 'loss_D_fake':self.loss_D_real.item()}
if opt.train_mode == 'face':
errors = {'loss_G': self.loss_G.item(), 'loss_G_GAN': self.loss_G_GAN.item(), 'loss_G_vgg':self.loss_G_vgg.item(), 'loss_G_refine':self.loss_G_refine.item(),
'loss_G_L1': self.loss_G_L1.item(), 'loss_D':self.loss_D.item(), 'loss_D_real': self.loss_D_real.item(), 'loss_D_fake':self.loss_D_real.item()}
t = self.t11 - self.t2
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in sorted(errors.items()):
if v != 0:
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
|
import numpy as np
import torch
import os
from .base_model import BaseModel
from models.networks import Define_G, Define_D
from utils.transforms import create_part
import torch.nn.functional as F
from utils import pose_utils
from lib.geometric_matching_multi_gpu import GMM
from .base_model import BaseModel
from time import time
from utils import pose_utils
import os.path as osp
from torchvision import utils
import random
class GenerationModel(BaseModel):
def name(self):
return 'Generation model: pix2pix | pix2pixHD'
def __init__(self, opt):
self.t0 = time()
BaseModel.__init__(self, opt)
self.train_mode = opt.train_mode
# resume of networks
resume_gmm = opt.resume_gmm
resume_G_parse = opt.resume_G_parse
resume_D_parse = opt.resume_D_parse
resume_G_appearance = opt.resume_G_app
resume_D_appearance = opt.resume_D_app
resume_G_face = opt.resume_G_face
resume_D_face = opt.resume_D_face
# define network
self.gmm_model = torch.nn.DataParallel(GMM(opt)).cuda()
self.generator_parsing = Define_G(opt.input_nc_G_parsing, opt.output_nc_parsing, opt.ndf, opt.netG_parsing, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids)
self.discriminator_parsing = Define_D(opt.input_nc_D_parsing, opt.ndf, opt.netD_parsing, opt.n_layers_D,
opt.norm, opt.init_type, opt.init_gain, opt.gpu_ids)
self.generator_appearance = Define_G(opt.input_nc_G_app, opt.output_nc_app, opt.ndf, opt.netG_app, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids, with_tanh=False)
self.discriminator_appearance = Define_D(opt.input_nc_D_app, opt.ndf, opt.netD_app, opt.n_layers_D,
opt.norm, opt.init_type, opt.init_gain, opt.gpu_ids)
self.generator_face = Define_G(opt.input_nc_D_face, opt.output_nc_face, opt.ndf, opt.netG_face, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, opt.gpu_ids)
self.discriminator_face = Define_D(opt.input_nc_D_face, opt.ndf, opt.netD_face, opt.n_layers_D,
opt.norm, opt.init_type, opt.init_gain, opt.gpu_ids)
if opt.train_mode == 'gmm':
setattr(self, 'generator', self.gmm_model)
else:
setattr(self, 'generator', getattr(self, 'generator_' + self.train_mode))
setattr(self, 'discriminator', getattr(self, 'discriminator_' + self.train_mode))
# load networks
self.networks_name = ['gmm', 'parsing', 'parsing', 'appearance', 'appearance', 'face', 'face']
self.networks_model = [self.gmm_model, self.generator_parsing, self.discriminator_parsing, self.generator_appearance, self.discriminator_appearance,
self.generator_face, self.discriminator_face]
self.networks = dict(zip(self.networks_name, self.networks_model))
self.resume_path = [resume_gmm, resume_G_parse, resume_D_parse, resume_G_appearance, resume_D_appearance, resume_G_face, resume_D_face]
for network, resume in zip(self.networks_model, self.resume_path):
if network != [] and resume != '':
assert(osp.exists(resume), 'the resume not exits')
print('loading...')
self.load_network(network, resume, ifprint=False)
# define optimizer
self.optimizer_gmm = torch.optim.Adam(self.gmm_model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
self.optimizer_parsing_G = torch.optim.Adam(self.generator_parsing.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_parsing_D = torch.optim.Adam(self.discriminator_parsing.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_appearance_G = torch.optim.Adam(self.generator_appearance.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_appearance_D = torch.optim.Adam(self.discriminator_appearance.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_face_G = torch.optim.Adam(self.generator_face.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
self.optimizer_face_D = torch.optim.Adam(self.discriminator_face.parameters(), lr=opt.lr, betas=[opt.beta1, 0.999])
if opt.train_mode == 'gmm':
self.optimizer_G = self.optimizer_gmm
elif opt.joint_all:
self.optimizer_G = [self.optimizer_parsing_G, self.optimizer_appearance_G, self.optimizer_face_G]
setattr(self, 'optimizer_D', getattr(self, 'optimizer_' + self.train_mode + '_D'))
else:
setattr(self, 'optimizer_G', getattr(self, 'optimizer_' + self.train_mode + '_G'))
setattr(self, 'optimizer_D', getattr(self, 'optimizer_' + self.train_mode + '_D'))
self.t1 = time()
def set_input(self, opt, result):
self.t2 = time()
self.source_pose_embedding = result['source_pose_embedding'].float().cuda()
self.target_pose_embedding = result['target_pose_embedding'].float().cuda()
self.source_image = result['source_image'].float().cuda()
self.target_image = result['target_image'].float().cuda()
self.source_parse = result['source_parse'].float().cuda()
self.target_parse = result['target_parse'].float().cuda()
self.cloth_image = result['cloth_image'].float().cuda()
self.cloth_parse = result['cloth_parse'].float().cuda()
self.warped_cloth = result['warped_cloth_image'].float().cuda() # preprocess warped image from gmm model
self.target_parse_cloth = result['target_parse_cloth'].float().cuda()
self.target_pose_img = result['target_pose_img'].float().cuda()
self.image_without_cloth = create_part(self.source_image, self.source_parse, 'image_without_cloth', False)
self.im_c = result['im_c'].float().cuda() # target warped cloth
index = [x for x in list(range(20)) if x != 5 and x != 6 and x != 7]
real_s_ = torch.index_select(self.source_parse, 1, torch.tensor(index).cuda())
self.input_parsing = torch.cat((real_s_, self.target_pose_embedding, self.cloth_parse), 1).cuda()
if opt.train_mode == 'gmm':
self.im_h = result['im_h'].float().cuda()
self.source_parse_shape = result['source_parse_shape'].float().cuda()
self.agnostic = torch.cat((self.source_parse_shape, self.im_h, self.target_pose_embedding), dim=1)
elif opt.train_mode == 'parsing':
self.real_s = self.input_parsing
self.source_parse_vis = result['source_parse_vis'].float().cuda()
self.target_parse_vis = result['target_parse_vis'].float().cuda()
elif opt.train_mode == 'appearance':
if opt.joint_all:
self.generated_parsing = F.softmax(self.generator_parsing(self.input_parsing), 1)
else:
with torch.no_grad():
self.generated_parsing = F.softmax(self.generator_parsing(self.input_parsing), 1)
self.input_appearance = torch.cat((self.image_without_cloth, self.warped_cloth, self.generated_parsing), 1).cuda()
"attention please"
generated_parsing_ = torch.argmax(self.generated_parsing, 1, keepdim=True)
self.generated_parsing_argmax = torch.Tensor()
for _ in range(20):
self.generated_parsing_argmax = torch.cat([self.generated_parsing_argmax.float().cuda(), (generated_parsing_ == _).float()], dim=1)
self.warped_cloth_parse = ((generated_parsing_ == 5) + (generated_parsing_ == 6) + (generated_parsing_ == 7)).float().cuda()
if opt.save_time:
self.generated_parsing_vis = torch.Tensor([0]).expand_as(self.target_image)
else:
# decode labels cost much time
_generated_parsing = torch.argmax(self.generated_parsing, 1, keepdim=True)
_generated_parsing = _generated_parsing.permute(0,2,3,1).contiguous().int()
self.generated_parsing_vis = pose_utils.decode_labels(_generated_parsing) #array
self.real_s = self.source_image
elif opt.train_mode == 'face':
if opt.joint_all:# opt.joint
generated_parsing = F.softmax(self.generator_parsing(self.input_parsing), 1)
self.generated_parsing_face = F.softmax(self.generator_parsing(self.input_parsing), 1)
else:
generated_parsing = F.softmax(self.generator_parsing(self.input_parsing), 1)
"attention please"
generated_parsing_ = torch.argmax(generated_parsing, 1, keepdim=True)
self.generated_parsing_argmax = torch.Tensor()
for _ in range(20):
self.generated_parsing_argmax = torch.cat([self.generated_parsing_argmax.float().cuda(), (generated_parsing_ == _).float()], dim=1)
# self.generated_parsing_face = generated_parsing_c
self.generated_parsing_face = self.target_parse
self.input_appearance = torch.cat((self.image_without_cloth, self.warped_cloth, generated_parsing), 1).cuda()
with torch.no_grad():
self.generated_inter = self.generator_appearance(self.input_appearance)
p_rendered, m_composite = torch.split(self.generated_inter, 3, 1)
p_rendered = F.tanh(p_rendered)
m_composite = F.sigmoid(m_composite)
self.generated_image = self.warped_cloth * m_composite + p_rendered * (1 - m_composite)
self.source_face = create_part(self.source_image, self.source_parse, 'face', False)
self.target_face_real = create_part(self.target_image, self.generated_parsing_face, 'face', False)
self.target_face_fake = create_part(self.generated_image, self.generated_parsing_face, 'face', False)
self.generated_image_without_face = self.generated_image - self.target_face_fake
self.input_face = torch.cat((self.source_face, self.target_face_fake), 1).cuda()
self.real_s = self.source_face
elif opt.train_mode == 'joint':
self.input_joint = torch.cat((self.image_without_cloth, self.warped_cloth, self.generated_parsing), 1).cuda()
self.t3 = time()
# setattr(self, 'input', getattr(self, 'input_' + self.train_mode))
def forward(self, opt):
self.t4 = time()
if self.train_mode == 'gmm':
self.grid, self.theta = self.gmm_model(self.agnostic, self.cloth_image)
self.warped_cloth_predict = F.grid_sample(self.cloth_image, self.grid)
if opt.train_mode == 'parsing':
self.fake_t = F.softmax(self.generator_parsing(self.input_parsing), dim=1)
self.real_t = self.target_parse
if opt.train_mode == 'appearance':
generated_inter = self.generator_appearance(self.input_appearance)
p_rendered, m_composite = torch.split(generated_inter, 3, 1)
p_rendered = F.tanh(p_rendered)
self.m_composite = F.sigmoid(m_composite)
p_tryon = self.warped_cloth * self.m_composite + p_rendered * (1 - self.m_composite)
self.fake_t = p_tryon
self.real_t = self.target_image
if opt.joint_all:
generate_face = create_part(self.fake_t, self.generated_parsing_argmax, 'face', False)
generate_image_without_face = self.fake_t - generate_face
real_s_face = create_part(self.source_image, self.source_parse, 'face', False)
real_t_face = create_part(self.target_image, self.generated_parsing_argmax, 'face', False)
input = torch.cat((real_s_face, generate_face), dim=1)
fake_t_face = self.generator_face(input)
###residual learning
r"""attention
"""
# fake_t_face = create_part(fake_t_face, self.generated_parsing, 'face', False)
# fake_t_face = generate_face + fake_t_face
fake_t_face = create_part(fake_t_face, self.generated_parsing_argmax, 'face', False)
### fake image
self.fake_t = generate_image_without_face + fake_t_face
if opt.train_mode == 'face':
self.fake_t = self.generator_face(self.input_face)
if opt.face_residual:
self.fake_t = create_part(self.fake_t, self.generated_parsing_face, 'face', False)
self.fake_t = self.target_face_fake + self.fake_t
self.fake_t = create_part(self.fake_t, self.generated_parsing_face, 'face', False)
self.refined_image = self.generated_image_without_face + self.fake_t
self.real_t = create_part(self.target_image, self.generated_parsing_face, 'face', False)
self.t5 = time()
def backward_G(self, opt):
self.t6 = time()
if opt.train_mode == 'gmm':
self.loss = self.criterionL1(self.warped_cloth_predict, self.im_c)
self.loss.backward()
self.t7 = time()
return
fake_st = torch.cat((self.real_s, self.fake_t), 1)
pred_fake = self.discriminator(fake_st)
if opt.train_mode == 'parsing':
self.loss_G_GAN = self.criterionGAN(pred_fake,True)
self.loss_G_BCE = self.criterionBCE_re(self.fake_t, self.real_t) * opt.lambda_L1
self.loss_G = self.loss_G_GAN + self.loss_G_BCE
self.loss_G.backward()
if opt.train_mode == 'appearance':
self.loss_G_GAN = self.criterionGAN(pred_fake,True) * opt.G_GAN
# vgg_loss
loss_vgg1,_ = self.criterion_vgg(self.fake_t, self.real_t, self.target_parse, False, True, False)
loss_vgg2,_ = self.criterion_vgg(self.fake_t, self.real_t, self.target_parse, False, False, False)
self.loss_G_vgg = (loss_vgg1 + loss_vgg2) * opt.G_VGG
self.loss_G_mask = self.criterionL1(self.m_composite, self.warped_cloth_parse) * opt.mask
if opt.mask_tvloss:
self.loss_G_mask_tv = self.criterion_tv(self.m_composite)
else:
self.loss_G_mask_tv = torch.Tensor([0]).cuda()
self.loss_G_L1 = self.criterion_smooth_L1(self.fake_t, self.real_t) * opt.lambda_L1
if opt.joint_all and opt.joint_parse_loss:
self.loss_G_parsing = self.criterionBCE_re(self.generated_parsing, self.target_parse) * opt.joint_G_parsing
self.loss_G = self.loss_G_GAN + self.loss_G_L1 + self.loss_G_vgg + self.loss_G_mask + self.loss_G_parsing
else:
self.loss_G = self.loss_G_GAN + self.loss_G_L1 + self.loss_G_vgg + self.loss_G_mask + self.loss_G_mask_tv
self.loss_G.backward()
if opt.train_mode == 'face':
_, self.loss_G_vgg = self.criterion_vgg(self.fake_t, self.real_t, self.generated_parsing_face, False, False, False) # part, gram, neareast
self.loss_G_vgg = self.loss_G_vgg * opt.face_vgg
self.loss_G_L1 = self.criterionL1(self.fake_t, self.real_t) * opt.face_L1
self.loss_G_GAN = self.criterionGAN(pred_fake, True) * opt.face_gan
self.loss_G_refine = self.criterionL1(self.refined_image, self.target_image) * opt.face_img_L1
self.loss_G = self.loss_G_vgg + self.loss_G_L1 + self.loss_G_GAN + self.loss_G_refine
self.loss_G.backward()
self.t7 = time()
def backward_D(self, opt):
self.t8 = time()
fake_st = torch.cat((self.real_s, self.fake_t), 1)
real_st = torch.cat((self.real_s, self.real_t), 1)
pred_fake = self.discriminator(fake_st.detach())
pred_real = self.discriminator(real_st) # batch_size,1, 30,30
self.loss_D_fake = self.criterionGAN(pred_fake, False)
self.loss_D_real = self.criterionGAN(pred_real, True)
self.loss_D = (self.loss_D_real + self.loss_D_fake) * 0.5
self.loss_D.backward()
self.t9 = time()
def optimize_parameters(self, opt):
self.t10 = time()
self.forward(opt) # compute fake images: G(A)
if opt.train_mode == 'gmm':
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G(opt) # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
self.t11 = time()
return
# update D
self.set_requires_grad(self.discriminator, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D(opt) # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.discriminator, False) # D requires no gradients when optimizing G
if opt.joint_all:
for _ in self.optimizer_G:
_.zero_grad()
self.backward_G(opt)
for _ in self.optimizer_G:
_.step()
else:
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G(opt) # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
self.t11 = time()
def save_result(self, opt, epoch, iteration):
if opt.train_mode == 'gmm':
images = [self.cloth_image,self.warped_cloth.detach(), self.im_c]
if opt.train_mode == 'parsing':
fake_t_vis = pose_utils.decode_labels(torch.argmax(self.fake_t, dim=1, keepdim=True).permute(0,2,3,1).contiguous())
images = [self.source_parse_vis, self.target_parse_vis, self.target_pose_img, self.cloth_parse, fake_t_vis]
if opt.train_mode == 'appearance':
images = [self.image_without_cloth, self.warped_cloth, self.warped_cloth_parse, self.target_image,
self.cloth_image, self.generated_parsing_vis, self.fake_t.detach()]
if opt.train_mode == 'face':
images = [self.generated_image.detach(), self.refined_image.detach(), self.source_image, self.target_image, self.real_t, self.fake_t.detach()]
pose_utils.save_img(images, os.path.join(self.vis_path, str(epoch) + '_' + str(iteration) + '.jpg'))
def save_model(self, opt, epoch):
if opt.train_mode == 'gmm':
model_G = osp.join(self.save_dir, 'generator', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss))
if not osp.exists(osp.join(self.save_dir, 'generator')):
os.makedirs(osp.join(self.save_dir, 'generator'))
torch.save(self.generator.state_dict(), model_G)
elif not opt.joint_all:
model_G = osp.join(self.save_dir, 'generator', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_G))
model_D = osp.join(self.save_dir, 'dicriminator', 'checkpoint_D_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_D))
if not osp.exists(osp.join(self.save_dir, 'generator')):
os.makedirs(osp.join(self.save_dir, 'generator'))
if not osp.exists(osp.join(self.save_dir, 'dicriminator')):
os.makedirs(osp.join(self.save_dir, 'dicriminator'))
torch.save(self.generator.state_dict(), model_G)
torch.save(self.discriminator.state_dict(), model_D)
else:
model_G_parsing = osp.join(self.save_dir, 'generator_parsing', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_G))
model_D_parsing = osp.join(self.save_dir, 'dicriminator_parsing', 'checkpoint_D_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_D))
model_G_appearance = osp.join(self.save_dir, 'generator_appearance', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_G))
model_D_appearance = osp.join(self.save_dir, 'dicriminator_appearance', 'checkpoint_D_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_D))
model_G_face = osp.join(self.save_dir, 'generator_face', 'checkpoint_G_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_G))
model_D_face = osp.join(self.save_dir, 'dicriminator_face', 'checkpoint_D_epoch_%d_loss_%0.5f_pth.tar'%(epoch, self.loss_D))
joint_save_dirs = [osp.join(self.save_dir, 'generator_parsing'), osp.join(self.save_dir, 'dicriminator_parsing'),
osp.join(self.save_dir, 'generator_appearance'), osp.join(self.save_dir, 'dicriminator_appearance'),
osp.join(self.save_dir, 'generator_face'), osp.join(self.save_dir, 'dicriminator_face')]
for _ in joint_save_dirs:
if not osp.exists(_):
os.makedirs(_)
torch.save(self.generator_parsing.state_dict(), model_G_parsing)
torch.save(self.generator_appearance.state_dict(), model_G_appearance)
torch.save(self.generator_face.state_dict(), model_G_face)
torch.save(self.discriminator_appearance.state_dict(), model_D_appearance)
def print_current_errors(self, opt, epoch, i):
if opt.train_mode == 'gmm':
errors = {'loss_L1': self.loss.item()}
if opt.train_mode == 'appearance':
errors = {'loss_G': self.loss_G.item(), 'loss_G_GAN': self.loss_G_GAN.item(), 'loss_G_vgg':self.loss_G_vgg.item(), 'loss_G_mask':self.loss_G_mask.item(),
'loss_G_L1': self.loss_G_L1.item(), 'loss_D':self.loss_D.item(), 'loss_D_real': self.loss_D_real.item(), 'loss_D_fake':self.loss_D_real.item(), 'loss_G_mask_tv': self.loss_G_mask_tv.item()}
if opt.joint_all and opt.joint_parse_loss:
errors = {'loss_G': self.loss_G.item(), 'loss_G_GAN': self.loss_G_GAN.item(), 'loss_G_vgg':self.loss_G_vgg.item(), 'loss_G_mask':self.loss_G_mask.item(),
'loss_G_L1': self.loss_G_L1.item(), 'loss_D':self.loss_D.item(), 'loss_D_real': self.loss_D_real.item(), 'loss_D_fake':self.loss_D_real.item(), 'loss_G_parsing': self.loss_G_parsing.item()}
if opt.train_mode == 'parsing':
errors = {'loss_G': self.loss_G.item(), 'loss_G_GAN': self.loss_G_GAN.item(), 'loss_G_BCE': self.loss_G_BCE.item(),
'loss_D':self.loss_D.item(), 'loss_D_real': self.loss_D_real.item(), 'loss_D_fake':self.loss_D_real.item()}
if opt.train_mode == 'face':
errors = {'loss_G': self.loss_G.item(), 'loss_G_GAN': self.loss_G_GAN.item(), 'loss_G_vgg':self.loss_G_vgg.item(), 'loss_G_refine':self.loss_G_refine.item(),
'loss_G_L1': self.loss_G_L1.item(), 'loss_D':self.loss_D.item(), 'loss_D_real': self.loss_D_real.item(), 'loss_D_fake':self.loss_D_real.item()}
t = self.t11 - self.t2
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in sorted(errors.items()):
if v != 0:
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
|
en
| 0.608556
|
# resume of networks # define network # load networks # define optimizer # preprocess warped image from gmm model # target warped cloth # decode labels cost much time #array # opt.joint # self.generated_parsing_face = generated_parsing_c # setattr(self, 'input', getattr(self, 'input_' + self.train_mode)) ###residual learning attention # fake_t_face = create_part(fake_t_face, self.generated_parsing, 'face', False) # fake_t_face = generate_face + fake_t_face ### fake image # vgg_loss # part, gram, neareast # batch_size,1, 30,30 # compute fake images: G(A) # set G's gradients to zero # calculate graidents for G # udpate G's weights # update D # enable backprop for D # set D's gradients to zero # calculate gradients for D # update D's weights # update G # D requires no gradients when optimizing G # set G's gradients to zero # calculate graidents for G # udpate G's weights
| 1.988943
| 2
|
wikipendium/wiki/forms.py
|
iver56/wikipendium.no
| 0
|
6627446
|
<reponame>iver56/wikipendium.no<gh_stars>0
from django.forms import ModelForm, ValidationError
import django.forms as forms
from django.core.exceptions import ObjectDoesNotExist
from wikipendium.wiki.models import Article, ArticleContent
from wikipendium.wiki.merge3 import MergeError, merge
from wikipendium.wiki.langcodes import LANGUAGE_NAMES
from re import match
class ArticleForm(ModelForm):
slug = forms.CharField(label='')
language_list = sorted(LANGUAGE_NAMES.items(), key=lambda x: x[1])
choices = [('', '')] + language_list
lang = forms.ChoiceField(label='', choices=choices)
title = forms.CharField(label='')
content = forms.CharField(label='', widget=forms.Textarea())
class Meta:
model = ArticleContent
fields = ('lang', 'title', 'content')
def __init__(self, *args, **kwargs):
super(ArticleForm, self).__init__(*args, **kwargs)
if hasattr(self.instance, 'article'):
slug = self.instance.article.slug
self.fields['slug'].widget.attrs['value'] = slug
self.fields['slug'].widget.attrs['placeholder'] = 'Course code'
self.fields['lang'].widget.attrs = {
'class': "select_chosen",
'data-placeholder': "Language"
}
self.fields['title'].widget.attrs['placeholder'] = 'Course title'
self.fields.keyOrder = ['slug',
'lang',
'title',
'content',
]
def clean(self):
super(ArticleForm, self)
return self.cleaned_data
class NewArticleForm(ArticleForm):
def __init__(self, *args, **kwargs):
super(NewArticleForm, self).__init__(*args, **kwargs)
def clean_slug(self):
if not match('^[' + Article.slug_regex + ']+$',
self.cleaned_data['slug']):
raise ValidationError('Course codes must be alphanumeric.')
try:
Article.objects.get(slug=self.cleaned_data['slug'].upper())
raise ValidationError("This course code is already in use.")
except ObjectDoesNotExist:
pass
return self.cleaned_data['slug']
class AddLanguageArticleForm(ArticleForm):
def __init__(self, article, *args, **kwargs):
lang = kwargs.pop('lang') if 'lang' in kwargs else None
super(AddLanguageArticleForm, self).__init__(*args, **kwargs)
self.fields['slug'].widget.attrs['value'] = article.slug
self.fields['slug'].widget.attrs['readonly'] = True
existing_langs = (
article.get_available_language_codes()
)
filtered_choices = [x for x in self.fields['lang'].choices
if x[0] not in existing_langs]
self.fields['lang'].choices = filtered_choices
if lang:
self.fields['lang'].initial = lang
class EditArticleForm(ArticleForm):
parent_id = forms.IntegerField(label='', widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(EditArticleForm, self).__init__(*args, **kwargs)
if self.instance.pk:
self.fields['parent_id'].widget.attrs['value'] = self.instance.pk
self.fields['slug'].widget.attrs['readonly'] = True
self.fields['lang'].widget = forms.TextInput(attrs={
'readonly': True
})
self.fields.keyOrder.append('parent_id')
def clean(self):
super(ArticleForm, self)
self.merge_contents_if_needed()
return self.cleaned_data
def merge_contents_if_needed(self):
parent_id = self.cleaned_data['parent_id']
article = None
articleContent = None
slug = self.cleaned_data['slug']
lang = self.cleaned_data['lang']
try:
article = Article.objects.get(slug=slug)
except:
article = Article(slug=slug)
articleContent = article.get_newest_content(lang)
if articleContent is None:
articleContent = ArticleContent(article=article, lang=lang)
if parent_id and parent_id != articleContent.pk:
parent = ArticleContent.objects.get(id=parent_id)
a = parent
b = articleContent
ancestors = set()
commonAncestor = None
while True:
if a and a.pk in ancestors:
commonAncestor = a
break
if b and b.pk in ancestors:
commonAncestor = b
break
ancestors.add(a.pk)
ancestors.add(b.pk)
a = a.parent
b = b.parent
if a and a.parent is None and b and b.parent is None:
break
try:
merged = merge(self.cleaned_data['content'],
commonAncestor.content, articleContent.content)
self.cleaned_data['content'] = merged
except MergeError as e:
raise ValidationError("Merge conflict.",
params={'diff': e.diff})
return True
|
from django.forms import ModelForm, ValidationError
import django.forms as forms
from django.core.exceptions import ObjectDoesNotExist
from wikipendium.wiki.models import Article, ArticleContent
from wikipendium.wiki.merge3 import MergeError, merge
from wikipendium.wiki.langcodes import LANGUAGE_NAMES
from re import match
class ArticleForm(ModelForm):
slug = forms.CharField(label='')
language_list = sorted(LANGUAGE_NAMES.items(), key=lambda x: x[1])
choices = [('', '')] + language_list
lang = forms.ChoiceField(label='', choices=choices)
title = forms.CharField(label='')
content = forms.CharField(label='', widget=forms.Textarea())
class Meta:
model = ArticleContent
fields = ('lang', 'title', 'content')
def __init__(self, *args, **kwargs):
super(ArticleForm, self).__init__(*args, **kwargs)
if hasattr(self.instance, 'article'):
slug = self.instance.article.slug
self.fields['slug'].widget.attrs['value'] = slug
self.fields['slug'].widget.attrs['placeholder'] = 'Course code'
self.fields['lang'].widget.attrs = {
'class': "select_chosen",
'data-placeholder': "Language"
}
self.fields['title'].widget.attrs['placeholder'] = 'Course title'
self.fields.keyOrder = ['slug',
'lang',
'title',
'content',
]
def clean(self):
super(ArticleForm, self)
return self.cleaned_data
class NewArticleForm(ArticleForm):
def __init__(self, *args, **kwargs):
super(NewArticleForm, self).__init__(*args, **kwargs)
def clean_slug(self):
if not match('^[' + Article.slug_regex + ']+$',
self.cleaned_data['slug']):
raise ValidationError('Course codes must be alphanumeric.')
try:
Article.objects.get(slug=self.cleaned_data['slug'].upper())
raise ValidationError("This course code is already in use.")
except ObjectDoesNotExist:
pass
return self.cleaned_data['slug']
class AddLanguageArticleForm(ArticleForm):
def __init__(self, article, *args, **kwargs):
lang = kwargs.pop('lang') if 'lang' in kwargs else None
super(AddLanguageArticleForm, self).__init__(*args, **kwargs)
self.fields['slug'].widget.attrs['value'] = article.slug
self.fields['slug'].widget.attrs['readonly'] = True
existing_langs = (
article.get_available_language_codes()
)
filtered_choices = [x for x in self.fields['lang'].choices
if x[0] not in existing_langs]
self.fields['lang'].choices = filtered_choices
if lang:
self.fields['lang'].initial = lang
class EditArticleForm(ArticleForm):
parent_id = forms.IntegerField(label='', widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(EditArticleForm, self).__init__(*args, **kwargs)
if self.instance.pk:
self.fields['parent_id'].widget.attrs['value'] = self.instance.pk
self.fields['slug'].widget.attrs['readonly'] = True
self.fields['lang'].widget = forms.TextInput(attrs={
'readonly': True
})
self.fields.keyOrder.append('parent_id')
def clean(self):
super(ArticleForm, self)
self.merge_contents_if_needed()
return self.cleaned_data
def merge_contents_if_needed(self):
parent_id = self.cleaned_data['parent_id']
article = None
articleContent = None
slug = self.cleaned_data['slug']
lang = self.cleaned_data['lang']
try:
article = Article.objects.get(slug=slug)
except:
article = Article(slug=slug)
articleContent = article.get_newest_content(lang)
if articleContent is None:
articleContent = ArticleContent(article=article, lang=lang)
if parent_id and parent_id != articleContent.pk:
parent = ArticleContent.objects.get(id=parent_id)
a = parent
b = articleContent
ancestors = set()
commonAncestor = None
while True:
if a and a.pk in ancestors:
commonAncestor = a
break
if b and b.pk in ancestors:
commonAncestor = b
break
ancestors.add(a.pk)
ancestors.add(b.pk)
a = a.parent
b = b.parent
if a and a.parent is None and b and b.parent is None:
break
try:
merged = merge(self.cleaned_data['content'],
commonAncestor.content, articleContent.content)
self.cleaned_data['content'] = merged
except MergeError as e:
raise ValidationError("Merge conflict.",
params={'diff': e.diff})
return True
|
none
| 1
| 2.220645
| 2
|
|
pywren_ibm_cloud/compute/backends/docker/config.py
|
tomwhite/pywren-ibm-cloud
| 1
|
6627447
|
import sys
import multiprocessing
from pywren_ibm_cloud.utils import version_str
RUNTIME_DEFAULT_35 = 'pywren-docker-runtime-v3.5:latest'
RUNTIME_DEFAULT_36 = 'pywren-docker-runtime-v3.6:latest'
RUNTIME_DEFAULT_37 = 'pywren-docker-runtime-v3.7:latest'
RUNTIME_TIMEOUT_DEFAULT = 600 # 10 minutes
RUNTIME_MEMORY_DEFAULT = 256 # 256 MB
_DOCKERFILE_DEFAULT = """
RUN apt-get update && apt-get install -y \
git
RUN pip install --upgrade pip setuptools six \
&& pip install --no-cache-dir \
pika==0.13.1 \
ibm-cos-sdk \
redis \
requests \
numpy
# Copy PyWren app to the container image.
ENV APP_HOME /pywren
WORKDIR $APP_HOME
RUN git clone https://github.com/pywren/pywren-ibm-cloud && cd pywren-ibm-cloud && pip install .
# entry_point.py is automatically generated. Do not modify next lines!
COPY entry_point.py .
ENTRYPOINT ["python", "entry_point.py"]
CMD []
"""
DOCKERFILE_DEFAULT_35 = """
FROM python:3.5-slim-buster
""" + _DOCKERFILE_DEFAULT
DOCKERFILE_DEFAULT_36 = """
FROM python:3.6-slim-buster
""" + _DOCKERFILE_DEFAULT
DOCKERFILE_DEFAULT_37 = """
FROM python:3.7-slim-buster
""" + _DOCKERFILE_DEFAULT
def load_config(config_data):
if 'runtime_memory' not in config_data['pywren']:
config_data['pywren']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT
if 'runtime_timeout' not in config_data['pywren']:
config_data['pywren']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT
if 'runtime' not in config_data['pywren']:
this_version_str = version_str(sys.version_info)
if this_version_str == '3.5':
config_data['pywren']['runtime'] = RUNTIME_DEFAULT_35
elif this_version_str == '3.6':
config_data['pywren']['runtime'] = RUNTIME_DEFAULT_36
elif this_version_str == '3.7':
config_data['pywren']['runtime'] = RUNTIME_DEFAULT_37
if 'docker' not in config_data:
config_data['docker'] = {}
if 'workers' in config_data['pywren']:
config_data['docker']['workers'] = config_data['pywren']['workers']
else:
if 'workers' not in config_data['docker']:
total_cores = multiprocessing.cpu_count()
config_data['pywren']['workers'] = total_cores
config_data['docker']['workers'] = total_cores
else:
config_data['pywren']['workers'] = config_data['docker']['workers']
|
import sys
import multiprocessing
from pywren_ibm_cloud.utils import version_str
RUNTIME_DEFAULT_35 = 'pywren-docker-runtime-v3.5:latest'
RUNTIME_DEFAULT_36 = 'pywren-docker-runtime-v3.6:latest'
RUNTIME_DEFAULT_37 = 'pywren-docker-runtime-v3.7:latest'
RUNTIME_TIMEOUT_DEFAULT = 600 # 10 minutes
RUNTIME_MEMORY_DEFAULT = 256 # 256 MB
_DOCKERFILE_DEFAULT = """
RUN apt-get update && apt-get install -y \
git
RUN pip install --upgrade pip setuptools six \
&& pip install --no-cache-dir \
pika==0.13.1 \
ibm-cos-sdk \
redis \
requests \
numpy
# Copy PyWren app to the container image.
ENV APP_HOME /pywren
WORKDIR $APP_HOME
RUN git clone https://github.com/pywren/pywren-ibm-cloud && cd pywren-ibm-cloud && pip install .
# entry_point.py is automatically generated. Do not modify next lines!
COPY entry_point.py .
ENTRYPOINT ["python", "entry_point.py"]
CMD []
"""
DOCKERFILE_DEFAULT_35 = """
FROM python:3.5-slim-buster
""" + _DOCKERFILE_DEFAULT
DOCKERFILE_DEFAULT_36 = """
FROM python:3.6-slim-buster
""" + _DOCKERFILE_DEFAULT
DOCKERFILE_DEFAULT_37 = """
FROM python:3.7-slim-buster
""" + _DOCKERFILE_DEFAULT
def load_config(config_data):
if 'runtime_memory' not in config_data['pywren']:
config_data['pywren']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT
if 'runtime_timeout' not in config_data['pywren']:
config_data['pywren']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT
if 'runtime' not in config_data['pywren']:
this_version_str = version_str(sys.version_info)
if this_version_str == '3.5':
config_data['pywren']['runtime'] = RUNTIME_DEFAULT_35
elif this_version_str == '3.6':
config_data['pywren']['runtime'] = RUNTIME_DEFAULT_36
elif this_version_str == '3.7':
config_data['pywren']['runtime'] = RUNTIME_DEFAULT_37
if 'docker' not in config_data:
config_data['docker'] = {}
if 'workers' in config_data['pywren']:
config_data['docker']['workers'] = config_data['pywren']['workers']
else:
if 'workers' not in config_data['docker']:
total_cores = multiprocessing.cpu_count()
config_data['pywren']['workers'] = total_cores
config_data['docker']['workers'] = total_cores
else:
config_data['pywren']['workers'] = config_data['docker']['workers']
|
en
| 0.312297
|
# 10 minutes # 256 MB RUN apt-get update && apt-get install -y \ git RUN pip install --upgrade pip setuptools six \ && pip install --no-cache-dir \ pika==0.13.1 \ ibm-cos-sdk \ redis \ requests \ numpy # Copy PyWren app to the container image. ENV APP_HOME /pywren WORKDIR $APP_HOME RUN git clone https://github.com/pywren/pywren-ibm-cloud && cd pywren-ibm-cloud && pip install . # entry_point.py is automatically generated. Do not modify next lines! COPY entry_point.py . ENTRYPOINT ["python", "entry_point.py"] CMD [] FROM python:3.5-slim-buster FROM python:3.6-slim-buster FROM python:3.7-slim-buster
| 1.985335
| 2
|
LwF_Decentralized_Same/PLN_Class.py
|
anubhabghosh/EQ2443-5
| 0
|
6627448
|
<filename>LwF_Decentralized_Same/PLN_Class.py
########################################################################
# Project Name: Decentralised Deep Learning without Forgetting
# Creators: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# Project Owners: <NAME> (<EMAIL>),
# <NAME> (<EMAIL>)
# December 2019
#########################################################################
import numpy as np
import sklearn as sk
from Admm import optimize_admm, admm_sameset_modified, admm_sameset
from func_set import compute_NME, compute_accuracy
import matplotlib.pyplot as plt
##################################################################
# Class for initialising the PLN objects in every layer
# Attributes:
# Q : no. of output classes
# X : Input data (can be raw input or hidden activation)
# layer_no : Layer No. of the current PLN object
# n_hidden : No. of hidden nodes for that layer
# Functions:
# activation_function: Implements ReLU activation
# initialise_random_matrix : Implements Random matrix for every
# layer
# Normalisation : Magnitude normalisation for Random Matrix
##################################################################
class PLN():
# Initialises all parameters for each layer of PLN/SSFN
def __init__(self, Q: int, X, layer_no, n_hidden: int):
self.input = X
self.n_l = layer_no
self.V_Q = np.concatenate((np.eye(Q), -1*np.eye(Q)), axis=0)
self.R_l = self.initialise_random_matrix(n_hidden - 2*Q, X.shape[0])
self.O_l = self.initialise_random_matrix(Q, n_hidden)# Needs to be optimized by SGD/ADMM
self.Y_l = np.zeros((n_hidden,1))
#if W_ls.all() == None:
# pass
#else:
# self.W_ls = W_ls
#return None
# Computes element wise activation
def activation_function(self, Z):
#return np.where(Z > 0, Z, 0.1*Z) #modify to Leaky ReLU
return np.where(Z > 0, Z, 0) #ReLU
#return np.maximum(Z*0.1, Z)
def initialise_random_matrix(self,M,N):
#A = np.random.uniform(low = -1, high = 1, size = (M,N))
#return A - np.mean(A)
return np.random.normal(0, 1, size=(M, N))
def normalization(self, A):
return A / np.sum(A**2, axis=0, keepdims=True)**(1/2)
################################################################
# pln: a list containing all pln layers. Or: an empty list
# W: the corresponding W matrix, or en empty np.array
# Structure:
# pln: a list containing all the PLN layers.
# num_layer: number of layers
# W_ls : linear mapping matrix in layer 0
# mu : ADMM learning rate relevant
# lamba : wls training factors avoiding overfitting
# rho : ADMM learning rate relevant for computing Wls
# max_iterations : Maximum number of iteration in ADMM
################################################################
class PLN_network():
# Contsructor function to initialise a PLN object
def __init__(self, pln: np.array=None,W_ls: np.array=None,num_layer: int=20, mu=0.1, maxit=30, lamba=1e2, rho=0.1):
if pln is not None:
self.num_layer = len(pln)
for i in range(self.num_layer):
if type(pln[i]) != PLN: # If the list contains garbage values, it is initialised properly
self.pln=None
print("Construct Error!")
return
self.pln[i] = pln[i] # self.pln[i].O_l, Assigning the list a PLN object if datatype matches
else:
# If no PLN network is already constructed, assigns new variables
self.num_layer = num_layer # Assigns the number of layers
self.pln = np.array([None]*self.num_layer) # Assigns an empty list
# Assigning the value of W_ls (or O* for Layer 0)
if W_ls is not None:
self.W_ls=W_ls # If the value is already present, no needt to calculate
else:
self.W_ls=None # Assigned None, will be created later on
self.mu = mu # ADMM multiplier for the given dataset (for layers after Layer 0)
self.max_iterations = maxit # No. of iterations for the ADMM Algorithm
self.lam = lamba # Lagrangian parameter Lambda
self.rho = rho # ADMM multiplier for the given dataset (for layers after Layer L)
def construct_W(self, X_train, Y_train, rho=None, lambda_o = None, LwF_flag=False, W_ls_Old=None, max_iterations=100):
# lam: Given regularization parameter as used in the paper for the used Dataset
if self.W_ls is None and (LwF_flag == False):
# Computes the W_ls matrix just by using simple regularised least squares formulation
inv_matrix = np.linalg.inv(np.dot(X_train, X_train.T)+self.lam*np.eye(X_train.shape[0]))
self.W_ls = np.dot(np.dot(Y_train, X_train.T), inv_matrix).astype(np.float32)
elif self.W_ls is None or (LwF_flag == True):
# Implements the LwF problem for computing the W_ls for New Dataset, using W_ls for Old Dataset
self.W_ls = compute_ol_LwF(X_train, Y_train, rho, lambda_o, max_iterations, W_ls_Old, LwF_flag)
# Constructs a single layer of the network
def construct_one_layer(self, Y_train, Q, X_train = None, calculate_O = True, dec_flag = False, R_i = None, LwF_flag = False, pln_Old = None, lambda_o = None, mu= None):
# Input arguments
# Y_train : Training set targets
# Q : number of output classes
# X_train : Training set inputs, if None, it is using hidden activation inputs
# calculate_O : Flag to check whether O is required to be calculated or not
# dec_flag : To decide whether to implement the decentralised scenario here or not
# R_i : Random matrix to be used (for all nodes) in the i-th layer
# LwF_flag : To choose whether LwF is required to be implemented or not
# pln_Old : PLN Network object for the network trained using the 'Old' Dataset
# lambda_o : Needs to be used for forgetting factor
num_class = Y_train.shape[0] # Number of classes in the given network
num_node = 2*num_class + 100 # Number of nodes in every layer (fixed in this case)
if ((self.pln[0] is None) and (X_train is not None)):
# Construct the first layer
layer_no = 0 # Layer Number/Index (0 to L-1)
# Construct the first layer.
self.pln[0] = PLN(Q, X_train, layer_no, num_node)
# Compute the top part of the Composite Weight Matrix
W_top = np.dot(np.dot(self.pln[0].V_Q, self.W_ls), X_train)
# Compute the Bottom part of the Composite Weight Matrix and inner product with input, along with including normalization
if dec_flag == True:
self.pln[0].R_l = R_i
W_bottom = self.pln[0].normalization(np.dot(self.pln[0].R_l, X_train)) # Normalization performed is for the random matrix
# Concatenating the outputs to form W*X
pln_l1_Z_l = np.concatenate((W_top, W_bottom), axis=0)
# Then applying the activation function g(.)
self.pln[0].Y_l = self.pln[0].activation_function(pln_l1_Z_l)
# Computing the Output Matrix by using 100 iterations of ADMM
#print("ADMM for Layer No:{}".format(1))
if calculate_O and (LwF_flag==False):
# No implementation of LwF
self.pln[0].O_l = compute_ol(self.pln[0].Y_l, Y_train, self.mu, self.max_iterations, [], LwF_flag)
elif calculate_O or (LwF_flag==True):
# Implementation of LwF using the outputs using Old Dataset
# Outputs for the previous learned network is given in: pln_Old
self.pln[0].O_l = compute_ol_LwF(self.pln[0].Y_l, Y_train, mu, lambda_o, self.max_iterations, pln_Old.pln[0].O_l, LwF_flag)
else:
flag = True # mark whether all layers has been constructed
for i in range(1,self.num_layer):
if (self.pln[i] is None):
num_layers = i
flag = False
break
# if we have 3 layers already existed self.pln[0:2], this index is 3, self.pln[3] is the 4th layer we wanna construct.
if flag:
print("All layers already constructed")
return
X = self.pln[num_layers-1].Y_l # Input is the Output g(WX) for the previous layer
num_node = 2*Q + 100 # No. of nodes fixed for every layer
self.pln[num_layers] = PLN(Q, X, num_layers, num_node) # Creating the PLN Object for the new layer
# Compute the top part of the Composite Weight Matrix
W_top = np.dot(np.dot(self.pln[num_layers].V_Q,self.pln[num_layers-1].O_l), X)
# Compute the bottom part of the Composite Weight Matrix
if dec_flag == True:
self.pln[num_layers].R_l = R_i
W_bottom = self.pln[num_layers].normalization(np.dot(self.pln[num_layers].R_l, X))
# Concatenate the top and bottom part of the matrix
pln_Z_l = np.concatenate((W_top, W_bottom), axis=0)
# Apply the activation function
self.pln[num_layers].Y_l = self.pln[num_layers].activation_function(pln_Z_l)
# Compute the output matrix using ADMM for specified no. of iterations
if calculate_O and (LwF_flag == False):
# No implementation of LwF
self.pln[num_layers].O_l = compute_ol(self.pln[num_layers].Y_l, Y_train, self.mu, self.max_iterations, [], LwF_flag)
elif calculate_O or (LwF_flag == True):
# Implementation of LwF using the outputs using Old Dataset
# Outputs for the previous learned network is given in: pln_Old
self.pln[num_layers].O_l = compute_ol_LwF(self.pln[num_layers].Y_l, Y_train, mu, lambda_o, self.max_iterations, pln_Old.pln[num_layers].O_l, LwF_flag)
# Q is the dimension of the target variable.
def construct_all_layers(self, X_train, Y_train, Q):
for _ in range(0, self.num_layer):
#TODO: Some change might be needed
# Construct the layers one by one, starting from the layer - 1 to the final layer
#self.construct_one_layer(X_train, Y_train, Q)
self.construct_one_layer(Y_train, Q, X_train=X_train)
# This function is used to compute the test outputs for a given test set
def compute_test_outputs(self, X_test):
num_layers = self.num_layer
W_ls = self.W_ls
PLN_1 = self.pln[0]
if PLN_1 is not None:
# Computes the network output for the first layer
W_initial_top = np.dot(np.dot(PLN_1.V_Q, W_ls), X_test)
W_initial_bottom = PLN_1.normalization(np.dot(PLN_1.R_l, X_test))
Z = np.concatenate((W_initial_top, W_initial_bottom), axis=0)
y = PLN_1.activation_function(Z)
elif W_ls is not None:
# In case only the layer 0 is present, then only the test output after least sqaures is computed
return np.dot(W_ls, X_test)
else:
# In case no layer is computed
print("W_ls not calculated!")
return None
# Computes the network output for each layer after the first layer
# modify
for i in range(1, num_layers):
PLN_1 = self.pln[i] # Gets the PLN object for the zeroth layer
if PLN_1 is not None:
#TODO: y is not changed, so performance won't improve
W_top = np.dot(np.dot(PLN_1.V_Q, self.pln[i-1].O_l), y)
W_bottom = PLN_1.normalization(np.dot(PLN_1.R_l, y))
Z = np.concatenate((W_top, W_bottom), axis=0)
y = PLN_1.activation_function(Z)
else:
return np.dot(self.pln[i - 1].O_l, y)
#print("I terminate at ", i)
# Returns network output for the last layer
return np.dot(self.pln[num_layers - 1].O_l, y)
# flag: whether implementing LwF or Not.
def compute_ol(Y,T,mu, max_iterations, O_prev, flag):
# Computes the Output matrix by calling the ADMM Algorithm function with given parameters
if flag:
# rho is ADMM multiplier for LwF constraint
rho = 100
ol = admm_sameset(T, Y, mu, max_iterations, O_prev, rho)
return ol
# If LwF is not required, it is only sufficient to optimize a simple ADMM
ol = optimize_admm(T, Y, mu, max_iterations)
return ol
# flag: whether implementing LwF or Not.
def compute_ol_LwF(Y, T, mu, lambda_o, max_iterations, O_prev, flag):
# Computes the Output matrix by calling the ADMM Algorithm function with given parameters
if flag:
ol = admm_sameset_modified(T, Y, mu, max_iterations, O_prev, lambda_o)
return ol
# If LwF is not required, it is only sufficient to optimize a simple ADMM
ol = optimize_admm(T, Y, mu, max_iterations)
return ol
def param_tuning_for_LS_decentralised(X, T, rho, max_iterations, W_ls_prev):
# This function performs parameter tuning for the case of same datasets
rho = None
return None
|
<filename>LwF_Decentralized_Same/PLN_Class.py
########################################################################
# Project Name: Decentralised Deep Learning without Forgetting
# Creators: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# Project Owners: <NAME> (<EMAIL>),
# <NAME> (<EMAIL>)
# December 2019
#########################################################################
import numpy as np
import sklearn as sk
from Admm import optimize_admm, admm_sameset_modified, admm_sameset
from func_set import compute_NME, compute_accuracy
import matplotlib.pyplot as plt
##################################################################
# Class for initialising the PLN objects in every layer
# Attributes:
# Q : no. of output classes
# X : Input data (can be raw input or hidden activation)
# layer_no : Layer No. of the current PLN object
# n_hidden : No. of hidden nodes for that layer
# Functions:
# activation_function: Implements ReLU activation
# initialise_random_matrix : Implements Random matrix for every
# layer
# Normalisation : Magnitude normalisation for Random Matrix
##################################################################
class PLN():
# Initialises all parameters for each layer of PLN/SSFN
def __init__(self, Q: int, X, layer_no, n_hidden: int):
self.input = X
self.n_l = layer_no
self.V_Q = np.concatenate((np.eye(Q), -1*np.eye(Q)), axis=0)
self.R_l = self.initialise_random_matrix(n_hidden - 2*Q, X.shape[0])
self.O_l = self.initialise_random_matrix(Q, n_hidden)# Needs to be optimized by SGD/ADMM
self.Y_l = np.zeros((n_hidden,1))
#if W_ls.all() == None:
# pass
#else:
# self.W_ls = W_ls
#return None
# Computes element wise activation
def activation_function(self, Z):
#return np.where(Z > 0, Z, 0.1*Z) #modify to Leaky ReLU
return np.where(Z > 0, Z, 0) #ReLU
#return np.maximum(Z*0.1, Z)
def initialise_random_matrix(self,M,N):
#A = np.random.uniform(low = -1, high = 1, size = (M,N))
#return A - np.mean(A)
return np.random.normal(0, 1, size=(M, N))
def normalization(self, A):
return A / np.sum(A**2, axis=0, keepdims=True)**(1/2)
################################################################
# pln: a list containing all pln layers. Or: an empty list
# W: the corresponding W matrix, or en empty np.array
# Structure:
# pln: a list containing all the PLN layers.
# num_layer: number of layers
# W_ls : linear mapping matrix in layer 0
# mu : ADMM learning rate relevant
# lamba : wls training factors avoiding overfitting
# rho : ADMM learning rate relevant for computing Wls
# max_iterations : Maximum number of iteration in ADMM
################################################################
class PLN_network():
# Contsructor function to initialise a PLN object
def __init__(self, pln: np.array=None,W_ls: np.array=None,num_layer: int=20, mu=0.1, maxit=30, lamba=1e2, rho=0.1):
if pln is not None:
self.num_layer = len(pln)
for i in range(self.num_layer):
if type(pln[i]) != PLN: # If the list contains garbage values, it is initialised properly
self.pln=None
print("Construct Error!")
return
self.pln[i] = pln[i] # self.pln[i].O_l, Assigning the list a PLN object if datatype matches
else:
# If no PLN network is already constructed, assigns new variables
self.num_layer = num_layer # Assigns the number of layers
self.pln = np.array([None]*self.num_layer) # Assigns an empty list
# Assigning the value of W_ls (or O* for Layer 0)
if W_ls is not None:
self.W_ls=W_ls # If the value is already present, no needt to calculate
else:
self.W_ls=None # Assigned None, will be created later on
self.mu = mu # ADMM multiplier for the given dataset (for layers after Layer 0)
self.max_iterations = maxit # No. of iterations for the ADMM Algorithm
self.lam = lamba # Lagrangian parameter Lambda
self.rho = rho # ADMM multiplier for the given dataset (for layers after Layer L)
def construct_W(self, X_train, Y_train, rho=None, lambda_o = None, LwF_flag=False, W_ls_Old=None, max_iterations=100):
# lam: Given regularization parameter as used in the paper for the used Dataset
if self.W_ls is None and (LwF_flag == False):
# Computes the W_ls matrix just by using simple regularised least squares formulation
inv_matrix = np.linalg.inv(np.dot(X_train, X_train.T)+self.lam*np.eye(X_train.shape[0]))
self.W_ls = np.dot(np.dot(Y_train, X_train.T), inv_matrix).astype(np.float32)
elif self.W_ls is None or (LwF_flag == True):
# Implements the LwF problem for computing the W_ls for New Dataset, using W_ls for Old Dataset
self.W_ls = compute_ol_LwF(X_train, Y_train, rho, lambda_o, max_iterations, W_ls_Old, LwF_flag)
# Constructs a single layer of the network
def construct_one_layer(self, Y_train, Q, X_train = None, calculate_O = True, dec_flag = False, R_i = None, LwF_flag = False, pln_Old = None, lambda_o = None, mu= None):
# Input arguments
# Y_train : Training set targets
# Q : number of output classes
# X_train : Training set inputs, if None, it is using hidden activation inputs
# calculate_O : Flag to check whether O is required to be calculated or not
# dec_flag : To decide whether to implement the decentralised scenario here or not
# R_i : Random matrix to be used (for all nodes) in the i-th layer
# LwF_flag : To choose whether LwF is required to be implemented or not
# pln_Old : PLN Network object for the network trained using the 'Old' Dataset
# lambda_o : Needs to be used for forgetting factor
num_class = Y_train.shape[0] # Number of classes in the given network
num_node = 2*num_class + 100 # Number of nodes in every layer (fixed in this case)
if ((self.pln[0] is None) and (X_train is not None)):
# Construct the first layer
layer_no = 0 # Layer Number/Index (0 to L-1)
# Construct the first layer.
self.pln[0] = PLN(Q, X_train, layer_no, num_node)
# Compute the top part of the Composite Weight Matrix
W_top = np.dot(np.dot(self.pln[0].V_Q, self.W_ls), X_train)
# Compute the Bottom part of the Composite Weight Matrix and inner product with input, along with including normalization
if dec_flag == True:
self.pln[0].R_l = R_i
W_bottom = self.pln[0].normalization(np.dot(self.pln[0].R_l, X_train)) # Normalization performed is for the random matrix
# Concatenating the outputs to form W*X
pln_l1_Z_l = np.concatenate((W_top, W_bottom), axis=0)
# Then applying the activation function g(.)
self.pln[0].Y_l = self.pln[0].activation_function(pln_l1_Z_l)
# Computing the Output Matrix by using 100 iterations of ADMM
#print("ADMM for Layer No:{}".format(1))
if calculate_O and (LwF_flag==False):
# No implementation of LwF
self.pln[0].O_l = compute_ol(self.pln[0].Y_l, Y_train, self.mu, self.max_iterations, [], LwF_flag)
elif calculate_O or (LwF_flag==True):
# Implementation of LwF using the outputs using Old Dataset
# Outputs for the previous learned network is given in: pln_Old
self.pln[0].O_l = compute_ol_LwF(self.pln[0].Y_l, Y_train, mu, lambda_o, self.max_iterations, pln_Old.pln[0].O_l, LwF_flag)
else:
flag = True # mark whether all layers has been constructed
for i in range(1,self.num_layer):
if (self.pln[i] is None):
num_layers = i
flag = False
break
# if we have 3 layers already existed self.pln[0:2], this index is 3, self.pln[3] is the 4th layer we wanna construct.
if flag:
print("All layers already constructed")
return
X = self.pln[num_layers-1].Y_l # Input is the Output g(WX) for the previous layer
num_node = 2*Q + 100 # No. of nodes fixed for every layer
self.pln[num_layers] = PLN(Q, X, num_layers, num_node) # Creating the PLN Object for the new layer
# Compute the top part of the Composite Weight Matrix
W_top = np.dot(np.dot(self.pln[num_layers].V_Q,self.pln[num_layers-1].O_l), X)
# Compute the bottom part of the Composite Weight Matrix
if dec_flag == True:
self.pln[num_layers].R_l = R_i
W_bottom = self.pln[num_layers].normalization(np.dot(self.pln[num_layers].R_l, X))
# Concatenate the top and bottom part of the matrix
pln_Z_l = np.concatenate((W_top, W_bottom), axis=0)
# Apply the activation function
self.pln[num_layers].Y_l = self.pln[num_layers].activation_function(pln_Z_l)
# Compute the output matrix using ADMM for specified no. of iterations
if calculate_O and (LwF_flag == False):
# No implementation of LwF
self.pln[num_layers].O_l = compute_ol(self.pln[num_layers].Y_l, Y_train, self.mu, self.max_iterations, [], LwF_flag)
elif calculate_O or (LwF_flag == True):
# Implementation of LwF using the outputs using Old Dataset
# Outputs for the previous learned network is given in: pln_Old
self.pln[num_layers].O_l = compute_ol_LwF(self.pln[num_layers].Y_l, Y_train, mu, lambda_o, self.max_iterations, pln_Old.pln[num_layers].O_l, LwF_flag)
# Q is the dimension of the target variable.
def construct_all_layers(self, X_train, Y_train, Q):
for _ in range(0, self.num_layer):
#TODO: Some change might be needed
# Construct the layers one by one, starting from the layer - 1 to the final layer
#self.construct_one_layer(X_train, Y_train, Q)
self.construct_one_layer(Y_train, Q, X_train=X_train)
# This function is used to compute the test outputs for a given test set
def compute_test_outputs(self, X_test):
num_layers = self.num_layer
W_ls = self.W_ls
PLN_1 = self.pln[0]
if PLN_1 is not None:
# Computes the network output for the first layer
W_initial_top = np.dot(np.dot(PLN_1.V_Q, W_ls), X_test)
W_initial_bottom = PLN_1.normalization(np.dot(PLN_1.R_l, X_test))
Z = np.concatenate((W_initial_top, W_initial_bottom), axis=0)
y = PLN_1.activation_function(Z)
elif W_ls is not None:
# In case only the layer 0 is present, then only the test output after least sqaures is computed
return np.dot(W_ls, X_test)
else:
# In case no layer is computed
print("W_ls not calculated!")
return None
# Computes the network output for each layer after the first layer
# modify
for i in range(1, num_layers):
PLN_1 = self.pln[i] # Gets the PLN object for the zeroth layer
if PLN_1 is not None:
#TODO: y is not changed, so performance won't improve
W_top = np.dot(np.dot(PLN_1.V_Q, self.pln[i-1].O_l), y)
W_bottom = PLN_1.normalization(np.dot(PLN_1.R_l, y))
Z = np.concatenate((W_top, W_bottom), axis=0)
y = PLN_1.activation_function(Z)
else:
return np.dot(self.pln[i - 1].O_l, y)
#print("I terminate at ", i)
# Returns network output for the last layer
return np.dot(self.pln[num_layers - 1].O_l, y)
# flag: whether implementing LwF or Not.
def compute_ol(Y,T,mu, max_iterations, O_prev, flag):
# Computes the Output matrix by calling the ADMM Algorithm function with given parameters
if flag:
# rho is ADMM multiplier for LwF constraint
rho = 100
ol = admm_sameset(T, Y, mu, max_iterations, O_prev, rho)
return ol
# If LwF is not required, it is only sufficient to optimize a simple ADMM
ol = optimize_admm(T, Y, mu, max_iterations)
return ol
# flag: whether implementing LwF or Not.
def compute_ol_LwF(Y, T, mu, lambda_o, max_iterations, O_prev, flag):
# Computes the Output matrix by calling the ADMM Algorithm function with given parameters
if flag:
ol = admm_sameset_modified(T, Y, mu, max_iterations, O_prev, lambda_o)
return ol
# If LwF is not required, it is only sufficient to optimize a simple ADMM
ol = optimize_admm(T, Y, mu, max_iterations)
return ol
def param_tuning_for_LS_decentralised(X, T, rho, max_iterations, W_ls_prev):
# This function performs parameter tuning for the case of same datasets
rho = None
return None
|
en
| 0.59899
|
######################################################################## # Project Name: Decentralised Deep Learning without Forgetting # Creators: <NAME> (<EMAIL>) # <NAME> (<EMAIL>) # <NAME> (<EMAIL>) # <NAME> (<EMAIL>) # Project Owners: <NAME> (<EMAIL>), # <NAME> (<EMAIL>) # December 2019 ######################################################################### ################################################################## # Class for initialising the PLN objects in every layer # Attributes: # Q : no. of output classes # X : Input data (can be raw input or hidden activation) # layer_no : Layer No. of the current PLN object # n_hidden : No. of hidden nodes for that layer # Functions: # activation_function: Implements ReLU activation # initialise_random_matrix : Implements Random matrix for every # layer # Normalisation : Magnitude normalisation for Random Matrix ################################################################## # Initialises all parameters for each layer of PLN/SSFN # Needs to be optimized by SGD/ADMM #if W_ls.all() == None: # pass #else: # self.W_ls = W_ls #return None # Computes element wise activation #return np.where(Z > 0, Z, 0.1*Z) #modify to Leaky ReLU #ReLU #return np.maximum(Z*0.1, Z) #A = np.random.uniform(low = -1, high = 1, size = (M,N)) #return A - np.mean(A) ################################################################ # pln: a list containing all pln layers. Or: an empty list # W: the corresponding W matrix, or en empty np.array # Structure: # pln: a list containing all the PLN layers. # num_layer: number of layers # W_ls : linear mapping matrix in layer 0 # mu : ADMM learning rate relevant # lamba : wls training factors avoiding overfitting # rho : ADMM learning rate relevant for computing Wls # max_iterations : Maximum number of iteration in ADMM ################################################################ # Contsructor function to initialise a PLN object # If the list contains garbage values, it is initialised properly # self.pln[i].O_l, Assigning the list a PLN object if datatype matches # If no PLN network is already constructed, assigns new variables # Assigns the number of layers # Assigns an empty list # Assigning the value of W_ls (or O* for Layer 0) # If the value is already present, no needt to calculate # Assigned None, will be created later on # ADMM multiplier for the given dataset (for layers after Layer 0) # No. of iterations for the ADMM Algorithm # Lagrangian parameter Lambda # ADMM multiplier for the given dataset (for layers after Layer L) # lam: Given regularization parameter as used in the paper for the used Dataset # Computes the W_ls matrix just by using simple regularised least squares formulation # Implements the LwF problem for computing the W_ls for New Dataset, using W_ls for Old Dataset # Constructs a single layer of the network # Input arguments # Y_train : Training set targets # Q : number of output classes # X_train : Training set inputs, if None, it is using hidden activation inputs # calculate_O : Flag to check whether O is required to be calculated or not # dec_flag : To decide whether to implement the decentralised scenario here or not # R_i : Random matrix to be used (for all nodes) in the i-th layer # LwF_flag : To choose whether LwF is required to be implemented or not # pln_Old : PLN Network object for the network trained using the 'Old' Dataset # lambda_o : Needs to be used for forgetting factor # Number of classes in the given network # Number of nodes in every layer (fixed in this case) # Construct the first layer # Layer Number/Index (0 to L-1) # Construct the first layer. # Compute the top part of the Composite Weight Matrix # Compute the Bottom part of the Composite Weight Matrix and inner product with input, along with including normalization # Normalization performed is for the random matrix # Concatenating the outputs to form W*X # Then applying the activation function g(.) # Computing the Output Matrix by using 100 iterations of ADMM #print("ADMM for Layer No:{}".format(1)) # No implementation of LwF # Implementation of LwF using the outputs using Old Dataset # Outputs for the previous learned network is given in: pln_Old # mark whether all layers has been constructed # if we have 3 layers already existed self.pln[0:2], this index is 3, self.pln[3] is the 4th layer we wanna construct. # Input is the Output g(WX) for the previous layer # No. of nodes fixed for every layer # Creating the PLN Object for the new layer # Compute the top part of the Composite Weight Matrix # Compute the bottom part of the Composite Weight Matrix # Concatenate the top and bottom part of the matrix # Apply the activation function # Compute the output matrix using ADMM for specified no. of iterations # No implementation of LwF # Implementation of LwF using the outputs using Old Dataset # Outputs for the previous learned network is given in: pln_Old # Q is the dimension of the target variable. #TODO: Some change might be needed # Construct the layers one by one, starting from the layer - 1 to the final layer #self.construct_one_layer(X_train, Y_train, Q) # This function is used to compute the test outputs for a given test set # Computes the network output for the first layer # In case only the layer 0 is present, then only the test output after least sqaures is computed # In case no layer is computed # Computes the network output for each layer after the first layer # modify # Gets the PLN object for the zeroth layer #TODO: y is not changed, so performance won't improve #print("I terminate at ", i) # Returns network output for the last layer # flag: whether implementing LwF or Not. # Computes the Output matrix by calling the ADMM Algorithm function with given parameters # rho is ADMM multiplier for LwF constraint # If LwF is not required, it is only sufficient to optimize a simple ADMM # flag: whether implementing LwF or Not. # Computes the Output matrix by calling the ADMM Algorithm function with given parameters # If LwF is not required, it is only sufficient to optimize a simple ADMM # This function performs parameter tuning for the case of same datasets
| 2.61848
| 3
|
obspost/migrations/0003_auto_20160825_0710.py
|
pankajlal/prabandh
| 0
|
6627449
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-25 01:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('obspost', '0002_childsheet'),
]
operations = [
migrations.RemoveField(
model_name='observation',
name='date_marked_as_complete',
),
migrations.RemoveField(
model_name='observation',
name='is_complete',
),
migrations.RemoveField(
model_name='observation',
name='start_time',
),
migrations.AlterField(
model_name='observation',
name='child',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='observation',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=b''),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-25 01:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('obspost', '0002_childsheet'),
]
operations = [
migrations.RemoveField(
model_name='observation',
name='date_marked_as_complete',
),
migrations.RemoveField(
model_name='observation',
name='is_complete',
),
migrations.RemoveField(
model_name='observation',
name='start_time',
),
migrations.AlterField(
model_name='observation',
name='child',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='observation',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=b''),
),
]
|
en
| 0.758423
|
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-08-25 01:40
| 1.525146
| 2
|
rx/core/observable/generatewithrelativetime.py
|
daliclass/RxPY
| 0
|
6627450
|
<reponame>daliclass/RxPY
from rx.core import Observable
from rx.concurrency import timeout_scheduler
from rx.disposable import MultipleAssignmentDisposable
def _generate_with_relative_time(initial_state, condition, iterate, time_mapper) -> Observable:
"""Generates an observable sequence by iterating a state from an
initial state until the condition fails.
Example:
res = source.generate_with_relative_time(0, lambda x: True, lambda x: x + 1, lambda x: 0.5)
Args:
initial_state: Initial state.
condition: Condition to terminate generation (upon returning
false).
iterate: Iteration step function.
time_mapper: Time mapper function to control the speed of
values being produced each iteration, returning relative times, i.e.
either floats denoting seconds or instances of timedelta.
Returns:
The generated sequence.
"""
def subscribe(observer, scheduler=None):
scheduler = scheduler or timeout_scheduler
mad = MultipleAssignmentDisposable()
state = [initial_state]
has_result = [False]
result = [None]
first = [True]
time = [None]
def action(scheduler, _):
if has_result[0]:
observer.on_next(result[0])
try:
if first[0]:
first[0] = False
else:
state[0] = iterate(state[0])
has_result[0] = condition(state[0])
if has_result[0]:
result[0] = state[0]
time[0] = time_mapper(state[0])
except Exception as e:
observer.on_error(e)
return
if has_result[0]:
mad.disposable = scheduler.schedule_relative(time[0], action)
else:
observer.on_completed()
mad.disposable = scheduler.schedule_relative(0, action)
return mad
return Observable(subscribe)
|
from rx.core import Observable
from rx.concurrency import timeout_scheduler
from rx.disposable import MultipleAssignmentDisposable
def _generate_with_relative_time(initial_state, condition, iterate, time_mapper) -> Observable:
"""Generates an observable sequence by iterating a state from an
initial state until the condition fails.
Example:
res = source.generate_with_relative_time(0, lambda x: True, lambda x: x + 1, lambda x: 0.5)
Args:
initial_state: Initial state.
condition: Condition to terminate generation (upon returning
false).
iterate: Iteration step function.
time_mapper: Time mapper function to control the speed of
values being produced each iteration, returning relative times, i.e.
either floats denoting seconds or instances of timedelta.
Returns:
The generated sequence.
"""
def subscribe(observer, scheduler=None):
scheduler = scheduler or timeout_scheduler
mad = MultipleAssignmentDisposable()
state = [initial_state]
has_result = [False]
result = [None]
first = [True]
time = [None]
def action(scheduler, _):
if has_result[0]:
observer.on_next(result[0])
try:
if first[0]:
first[0] = False
else:
state[0] = iterate(state[0])
has_result[0] = condition(state[0])
if has_result[0]:
result[0] = state[0]
time[0] = time_mapper(state[0])
except Exception as e:
observer.on_error(e)
return
if has_result[0]:
mad.disposable = scheduler.schedule_relative(time[0], action)
else:
observer.on_completed()
mad.disposable = scheduler.schedule_relative(0, action)
return mad
return Observable(subscribe)
|
en
| 0.697699
|
Generates an observable sequence by iterating a state from an initial state until the condition fails. Example: res = source.generate_with_relative_time(0, lambda x: True, lambda x: x + 1, lambda x: 0.5) Args: initial_state: Initial state. condition: Condition to terminate generation (upon returning false). iterate: Iteration step function. time_mapper: Time mapper function to control the speed of values being produced each iteration, returning relative times, i.e. either floats denoting seconds or instances of timedelta. Returns: The generated sequence.
| 2.901069
| 3
|