id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4906001 | <reponame>t3kt/raytk
def onOffToOn(panelValue):
ext.tester.onCountLabelClick(panelValue.owner) | StarcoderdataPython |
6417692 | import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
from collections import defaultdict
import pickle
import face_recognition
import glob
import sklearn
import skimage.io as io
import skimage.filters as flt
from skimage.feature import greycomatrix, greycoprops
from skimage.feature import local_binary_pattern
import keras
import tensorflow as tf
from keras import datasets, layers, models, optimizers, losses, regularizers
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, load_model, Sequential
from keras.layers import InputLayer, Dense, Flatten, Dropout, Reshape, Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D, Activation, concatenate, AveragePooling2D, BatchNormalization
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from keras.optimizers import Adagrad, RMSprop, SGD, Adam
from keras.utils import to_categorical
IMG_SIZE = 24
IMG_SIZE_RF = 128
cascade_dir = "cascade_files/"
eye_state_dir = "data/eye_state/"
face_casc_path = cascade_dir + "haarcascade_frontalface_default.xml"
eye_casc_path = cascade_dir + "haarcascade_eye.xml"
open_eye_casc_path = cascade_dir + "haarcascade_eye_tree_eyeglasses.xml"
left_eye_casc_path = cascade_dir + "haarcascade_lefteye_2splits.xml"
right_eye_casc_path = cascade_dir + "haarcascade_righteye_2splits.xml"
def load_cascades():
face_detector = cv2.CascadeClassifier(face_casc_path)
eye_detector = cv2.CascadeClassifier(eye_casc_path)
open_eye_detector = cv2.CascadeClassifier(open_eye_casc_path)
left_eye_detector = cv2.CascadeClassifier(left_eye_casc_path)
right_eye_detector = cv2.CascadeClassifier(right_eye_casc_path)
return face_detector, eye_detector, open_eye_detector, left_eye_detector, right_eye_detector
def Eye_state_Classifier(nb_classes = 1):
model = Sequential()
model.add(Conv2D(6, (3, 3), activation = 'relu', input_shape = (IMG_SIZE, IMG_SIZE, 1)))
model.add(MaxPooling2D())
model.add(Conv2D(16, (3, 3), activation = 'relu'))
model.add(MaxPooling2D())
model.add(Conv2D(32, (3, 3), activation = 'relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120, activation = 'relu'))
model.add(Dense(84, activation = 'relu'))
model.add(Dense(nb_classes, activation = 'sigmoid'))
return model
def load_pretrained_model():
model = Eye_state_Classifier(nb_classes = 1)
INIT_LR = 1e-3
EPOCHS = 25
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) #Optimise using Adam
model.compile(loss = "binary_crossentropy", optimizer = opt, metrics = ["accuracy"])
#model.summary()
# load weights into new model
model.load_weights("models/eye_status_classifier.h5")
return model
def computeHaralick(gray):
distances = [1, 2, 3]
angles = [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4]
properties = ['energy', 'homogeneity']
glcm = greycomatrix(gray,
distances=distances,
angles=angles,
symmetric=True,
normed=True)
feats = np.hstack([greycoprops(glcm, prop).ravel() for prop in properties])
return feats
def computeLBP(gray):
lbp = local_binary_pattern(gray, 24, 8, method="uniform")
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, 24 + 3),
range=(0, 24 + 2))
hist = hist / sum(hist)
return hist
def predict(img, model):
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)).astype('float32')
img = img.reshape(1, IMG_SIZE, IMG_SIZE, 1) / 255
pred = model.predict(img)
if pred < 0.4:
pred = 'closed'
elif pred > 0.6:
pred = 'open'
else:
pred = 'idk'
return pred
def predict_rf(img, model):
img = cv2.resize(img, (IMG_SIZE_RF, IMG_SIZE_RF))
haralick = computeHaralick(img)
lbp = computeLBP(img)
final_feats = np.concatenate([haralick,lbp])
final_feats = final_feats.reshape((1,-1))
pred = model.predict(final_feats)
if pred < 0.4:
pred = 'real'
elif pred > 0.6:
pred = 'fake'
else:
pred = 'idk'
return pred
def init():
face_detector, eye_detector, open_eye_detector, left_eye_detector, right_eye_detector = load_cascades()
model = load_pretrained_model()
images = []
return (model, face_detector, eye_detector, open_eye_detector, left_eye_detector, right_eye_detector, images)
(model, face_detector, eye_detector, open_eye_detector, left_eye_detector, right_eye_detector, images) = init()
rf_model = pickle.load(open("models/randomforest.sav", 'rb'))
def isBlinking(history, maxFrames):
for i in range(maxFrames):
pattern = '1' + '0'*(i+1) + '1'
if pattern in history:
return True
return False
def detect_and_display(model, rf_model, video_capture, face_detector, eye_detector, open_eye_detector, left_eye_detector, right_eye_detector, data, eyes_detected, prev_encoding):
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces = face_detector.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(50, 50),
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(faces)>0:
#Find largest
rect = sorted(faces,reverse = True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(x, y, w, h) = rect
# for (x,y,w,h) in faces:
pred_rf = predict_rf(gray[y:y+h,x:x+w], rf_model)
encoding = face_recognition.face_encodings(rgb, [(y, x+w, y+h, x)])[0]
if prev_encoding == []:
prev_encoding = encoding
matches = face_recognition.compare_faces([prev_encoding], encoding)
name = "Unknown"
if False in matches:
eyes_detected[name] = ""
prev_encoding = encoding
face = frame[y:y+h,x:x+w]
gray_face = gray[y:y+h,x:x+w]
eyes = []
open_eyes_glasses = open_eye_detector.detectMultiScale(
gray_face,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
if len(open_eyes_glasses) == 2:
eyes_detected[name]+='1'
for (ex,ey,ew,eh) in open_eyes_glasses:
cv2.rectangle(face,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
else:
left_face = frame[y:y+h, x+int(w/2):x+w]
left_face_gray = gray[y:y+h, x+int(w/2):x+w]
right_face = frame[y:y+h, x:x+int(w/2)]
right_face_gray = gray[y:y+h, x:x+int(w/2)]
left_eye = left_eye_detector.detectMultiScale(
left_face_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
right_eye = right_eye_detector.detectMultiScale(
right_face_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
eye_status = '1'
for (ex,ey,ew,eh) in right_eye:
color = (0,255,0)
pred = predict(right_face_gray[ey:ey+eh,ex:ex+ew],model)
if pred == 'closed':
eye_status='0'
color = (0,0,255)
cv2.rectangle(right_face,(ex,ey),(ex+ew,ey+eh),color,2)
for (ex,ey,ew,eh) in left_eye:
color = (0,255,0)
pred = predict(left_face_gray[ey:ey+eh,ex:ex+ew],model)
if pred == 'closed':
eye_status='0'
color = (0,0,255)
cv2.rectangle(left_face,(ex,ey),(ex+ew,ey+eh),color,2)
eyes_detected[name] += eye_status
blink_output = isBlinking(eyes_detected[name],10)
if blink_output or pred_rf=='real':
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
y = y - 15 if y - 15 > 15 else y + 15
cv2.putText(frame, 'Real: '+name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,0.75, (0, 255, 0), 2)
else:
if len(eyes_detected[name]) > 20:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
y = y - 15 if y - 15 > 15 else y + 15
cv2.putText(frame, 'Fake: '+name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,0.75, (255, 0, 0), 2)
return frame
def main():
data = {'encodings': []}
print("[LOG] Opening webcam ...")
video_capture = cv2.VideoCapture(0)
eyes_detected = defaultdict(str)
prev_encoding = []
while True:
frame = detect_and_display(model, rf_model, video_capture, face_detector, eye_detector, open_eye_detector, left_eye_detector, right_eye_detector, data, eyes_detected, prev_encoding)
cv2.imshow("Eye-Blink LiveNet", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
main()
| StarcoderdataPython |
5087239 | <reponame>soma2000-lang/colour<filename>colour/models/rgb/transfer_functions/tests/test_blackmagic_design.py
"""
Defines the unit tests for the :mod:`colour.models.rgb.transfer_functions.\
blackmagic_design` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
oetf_BlackmagicFilmGeneration5,
oetf_inverse_BlackmagicFilmGeneration5,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"TestOetf_BlackmagicFilmGeneration5",
"TestOetf_inverse_BlackmagicFilmGeneration5",
]
class TestOetf_BlackmagicFilmGeneration5(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.blackmagic_design.\
oetf_BlackmagicFilmGeneration5` definition unit tests methods.
"""
def test_oetf_BlackmagicFilmGeneration5(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_BlackmagicFilmGeneration5` definition.
"""
self.assertAlmostEqual(
oetf_BlackmagicFilmGeneration5(0.0), 0.092465753424658, places=7
)
self.assertAlmostEqual(
oetf_BlackmagicFilmGeneration5(0.18), 0.383561643835617, places=7
)
self.assertAlmostEqual(
oetf_BlackmagicFilmGeneration5(1.0), 0.530489624957305, places=7
)
self.assertAlmostEqual(
oetf_BlackmagicFilmGeneration5(100.0), 0.930339851899973, places=7
)
self.assertAlmostEqual(
oetf_BlackmagicFilmGeneration5(222.86), 0.999999631713769, places=7
)
def test_n_dimensional_oetf_BlackmagicFilmGeneration5(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_BlackmagicFilmGeneration5` definition n-dimensional
arrays support.
"""
L = 0.18
V = oetf_BlackmagicFilmGeneration5(L)
L = np.tile(L, 6)
V = np.tile(V, 6)
np.testing.assert_almost_equal(
oetf_BlackmagicFilmGeneration5(L), V, decimal=7
)
L = np.reshape(L, (2, 3))
V = np.reshape(V, (2, 3))
np.testing.assert_almost_equal(
oetf_BlackmagicFilmGeneration5(L), V, decimal=7
)
L = np.reshape(L, (2, 3, 1))
V = np.reshape(V, (2, 3, 1))
np.testing.assert_almost_equal(
oetf_BlackmagicFilmGeneration5(L), V, decimal=7
)
def test_domain_range_scale_oetf_BlackmagicFilmGeneration5(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_BlackmagicFilmGeneration5` definition domain and range
scale support.
"""
L = 0.18
V = oetf_BlackmagicFilmGeneration5(L)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_BlackmagicFilmGeneration5(L * factor),
V * factor,
decimal=7,
)
@ignore_numpy_errors
def test_nan_oetf_BlackmagicFilmGeneration5(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_BlackmagicFilmGeneration5` definition nan support.
"""
oetf_BlackmagicFilmGeneration5(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
)
class TestOetf_inverse_BlackmagicFilmGeneration5(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_inverse_BlackmagicFilmGeneration5` definition unit tests
methods.
"""
def test_oetf_inverse_BlackmagicFilmGeneration5(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_inverse_BlackmagicFilmGeneration5` definition.
"""
self.assertAlmostEqual(
oetf_inverse_BlackmagicFilmGeneration5(0.092465753424658),
0.0,
places=7,
)
self.assertAlmostEqual(
oetf_inverse_BlackmagicFilmGeneration5(0.383561643835617),
0.18,
places=7,
)
self.assertAlmostEqual(
oetf_inverse_BlackmagicFilmGeneration5(0.530489624957305),
1.0,
places=7,
)
self.assertAlmostEqual(
oetf_inverse_BlackmagicFilmGeneration5(0.930339851899973),
100.0,
places=7,
)
self.assertAlmostEqual(
oetf_inverse_BlackmagicFilmGeneration5(0.999999631713769),
222.86,
places=7,
)
def test_n_dimensional_oetf_inverse_BlackmagicFilmGeneration5(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_inverse_BlackmagicFilmGeneration5` definition
n-dimensional arrays support.
"""
V = 0.383561643835617
L = oetf_inverse_BlackmagicFilmGeneration5(V)
V = np.tile(V, 6)
L = np.tile(L, 6)
np.testing.assert_almost_equal(
oetf_inverse_BlackmagicFilmGeneration5(V), L, decimal=7
)
V = np.reshape(V, (2, 3))
L = np.reshape(L, (2, 3))
np.testing.assert_almost_equal(
oetf_inverse_BlackmagicFilmGeneration5(V), L, decimal=7
)
V = np.reshape(V, (2, 3, 1))
L = np.reshape(L, (2, 3, 1))
np.testing.assert_almost_equal(
oetf_inverse_BlackmagicFilmGeneration5(V), L, decimal=7
)
def test_domain_range_scale_oetf_inverse_BlackmagicFilmGeneration5(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_inverse_BlackmagicFilmGeneration5` definition domain and
range scale support.
"""
V = 0.383561643835617
L = oetf_inverse_BlackmagicFilmGeneration5(V)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_inverse_BlackmagicFilmGeneration5(V * factor),
L * factor,
decimal=7,
)
@ignore_numpy_errors
def test_nan_oetf_inverse_BlackmagicFilmGeneration5(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.\
blackmagic_design.oetf_inverse_BlackmagicFilmGeneration5` definition nan
support.
"""
oetf_inverse_BlackmagicFilmGeneration5(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9790909 | # Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
assert_no_xrange_re = re.compile(r"\s*xrange\s*\(")
assert_True = re.compile(r".*assertEqual\(True, .*\)")
assert_None = re.compile(r".*assertEqual\(None, .*\)")
assert_Not_Equal = re.compile(r".*assertNotEqual\(None, .*\)")
assert_Is_Not = re.compile(r".*assertIsNot\(None, .*\)")
assert_raises_regexp = re.compile(r"assertRaisesRegexp\(")
no_log_warn = re.compile(r".*LOG.warn\(.*\)")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
def no_mutable_default_args(logical_line):
msg = "M001: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def no_xrange(logical_line):
if assert_no_xrange_re.match(logical_line):
yield (0, "M002: Do not use xrange().")
def validate_assertTrue(logical_line):
if re.match(assert_True, logical_line):
msg = ("M003: Unit tests should use assertTrue(value) instead"
" of using assertEqual(True, value).")
yield (0, msg)
def validate_assertIsNone(logical_line):
if re.match(assert_None, logical_line):
msg = ("M004: Unit tests should use assertIsNone(value) instead"
" of using assertEqual(None, value).")
yield (0, msg)
def no_log_warn_check(logical_line):
if re.match(no_log_warn, logical_line):
msg = ("M005: LOG.warn is deprecated, please use LOG.warning!")
yield (0, msg)
def validate_assertIsNotNone(logical_line):
if re.match(assert_Not_Equal, logical_line) or \
re.match(assert_Is_Not, logical_line):
msg = ("M006: Unit tests should use assertIsNotNone(value) instead"
" of using assertNotEqual(None, value) or"
" assertIsNot(None, value).")
yield (0, msg)
def assert_raisesRegexp(logical_line):
res = assert_raises_regexp.search(logical_line)
if res:
yield (0, "M007: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
def factory(register):
register(no_mutable_default_args)
register(no_xrange)
register(validate_assertTrue)
register(validate_assertIsNone)
register(no_log_warn_check)
register(validate_assertIsNotNone)
register(assert_raisesRegexp)
| StarcoderdataPython |
3251010 | <reponame>jjwatts/gigantum-client
# Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from gtmcore.environment import BaseRepository
from gtmcore.fixtures import (mock_config_with_repo, ENV_UNIT_TEST_REPO, ENV_UNIT_TEST_BASE, ENV_UNIT_TEST_REV)
class TestEnvironmentRepository(object):
def test_get_list_index_base_image(self, mock_config_with_repo):
"""Test accessing the list version of the index"""
repo = BaseRepository(mock_config_with_repo[0])
data = repo.get_base_list()
assert type(data) == list
assert len(data) == 5
assert any(n.get('id') == ENV_UNIT_TEST_BASE for n in data)
assert any(n.get('repository') == ENV_UNIT_TEST_REPO for n in data)
def test_get_component_index_base(self, mock_config_with_repo):
"""Test accessing the detail version of the index"""
repo = BaseRepository(mock_config_with_repo[0])
data = repo.get_base_versions(ENV_UNIT_TEST_REPO,
ENV_UNIT_TEST_BASE)
assert type(data) == list
assert len(data) >= 1
assert data[-1][1]['id'] == ENV_UNIT_TEST_BASE
assert data[-1][1]['repository'] == ENV_UNIT_TEST_REPO
def test_get_component_version_base(self, mock_config_with_repo):
"""Test accessing the a single version of the index"""
repo = BaseRepository(mock_config_with_repo[0])
data = repo.get_base(ENV_UNIT_TEST_REPO,
ENV_UNIT_TEST_BASE,
ENV_UNIT_TEST_REV)
assert type(data) == dict
assert data['id'] == ENV_UNIT_TEST_BASE
assert data['revision'] == ENV_UNIT_TEST_REV
assert 'image' in data
assert len(data['package_managers']) == 2
assert data['repository'] == ENV_UNIT_TEST_REPO
def test_get_component_version_base_does_not_exist(self, mock_config_with_repo):
"""Test accessing the a single version of the index that does not exist"""
repo = BaseRepository(mock_config_with_repo[0])
with pytest.raises(ValueError):
repo.get_base('gig-dev_environment-componentsXXX',
'quickstart-jupyterlab', '0.1')
with pytest.raises(ValueError):
repo.get_base(ENV_UNIT_TEST_REPO,
'quickstart-jupyterlab', '3')
with pytest.raises(ValueError):
repo.get_base(ENV_UNIT_TEST_REPO,
'quickstart-jupyterlabXXX', 0)
with pytest.raises(ValueError):
repo.get_base(ENV_UNIT_TEST_REPO,
'quickstart-jupyterlab', 99)
| StarcoderdataPython |
6468750 | # This file is Copyright 2007, 2009 <NAME>.
#
# This file is part of the Python-on-a-Chip program.
# Python-on-a-Chip is free software: you can redistribute it and/or modify
# it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE Version 2.1.
#
# Python-on-a-Chip is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# A copy of the GNU LESSER GENERAL PUBLIC LICENSE Version 2.1
# is seen in the file COPYING up one directory from this.
#
# This is a sample application that calls functions in the AVR module.
#
print "Hello world."
import avr
print "Counting from 0 to 249 on portA's pins"
avr.ddrA(0xFF)
i = 0
while i < 250:
avr.portA(i)
avr.delay(250)
i += 1
print "Done."
| StarcoderdataPython |
159865 | import json
import sys
import datetime
from os import getenv
from dotenv import load_dotenv
from notion import NotionHelper
from rabbit import RabbitHelper
load_dotenv()
notion_helper = NotionHelper()
# get discord id to notion id list
result = notion_helper.get_discord_list(getenv('NOTION_ID_LIST'))
discord_to_notion = {}
for r in result['results']:
discord_to_notion[r['properties']['discord_id']['title'][0]['plain_text']] = r['properties']['notion_id']['rich_text'][0]['plain_text']
rabbit_publisher = RabbitHelper()
# only execute at 1 and 16 every month
today = datetime.date.today()
if today.day not in (1,16):
sys.exit()
# check each notion database whethere there is someone's debit over 2000 and two month
for discord_id, notion_id in discord_to_notion.items():
# get not paid records
result = notion_helper.get_notpaid(notion_id)
rows = []
for r in result['results']:
rows.append(r['properties']['台幣']['number'])
# calcuate sum
if sum(rows) > 2000:
notion_url = f'https://www.notion.so/{notion_id}'
# send meesage to rabbit mq , the it will send discord DM to that guy
json_message = json.JSONEncoder().encode({
'user_id': discord_id, #temperary change to me
'message': f'[欠費提醒] {notion_url} (回訊息機器人看不到,如果有漏登聯絡一下XG) ',
})
rabbit_publisher.send(json_message)
rabbit_publisher.close()
| StarcoderdataPython |
216787 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 11:24:47 2021
@author: mshahzamal
"""
import numpy as np
import pandas as pd
from flask import Flask, request, jsonify, render_template
import pickle
#naming our app as app
app= Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route("/")
def home():
return render_template("index.html")
@app.route("/predict", methods=["POST"])
def predict():
int_features= [float(x) for x in request.form.values()]
final_features= [np.array(int_features)]
prediction= model.predict(final_features)
output= round(prediction[0], 2)
return render_template("index.html", prediction_text= "Cu_recov is {}".format(output))
#running the flask app
if __name__ == '__main__':
app.run(host='0.0.0.0') | StarcoderdataPython |
114225 | from discord.ext.commands import Cog
from discord.ext.commands import CheckFailure
from discord.ext.commands import command, has_permissions
from discord.ext.menus import MenuPages, ListPageSource
from datetime import datetime, timedelta
from random import randint
from typing import Optional
from discord import Member
from discord import Embed, Colour
from ..db import db
class HelpMenu(ListPageSource):
def __init__(self, ctx, data):
self.ctx = ctx
super().__init__(data, per_page=10)
async def write_page(self, menu, offset, fields=[]):
len_data = len(self.entries)
embed = Embed(
title="XP Leaderboard",
colour=Colour.random()
)
embed.set_thumbnail(url=self.ctx.guild.icon_url)
embed.set_footer(text=f"{offset:,} - {min(len_data, offset+self.per_page-1):,} of {len_data:,} members.")
for name, value in fields:
embed.add_field(name=name, value=value, inline = False)
return embed
async def format_page(self, menu, entries):
offset = (menu.current_page*self.per_page) + 1
fields = []
table = ("\n".join(f"{idx+offset}. {self.ctx.guild.get_member(entry[0]).display_name} (XP: {entry[1]} | Level: {entry[2]})"
for idx, entry in enumerate(entries)))
fields.append(("Ranks", table))
return await self.write_page(menu, offset, fields)
class Exp(Cog):
def __init__(self, bot):
self.bot = bot
async def process_xp(self, message):
xp, lvl, xplock = db.record("SELECT XP, Level, XPLock FROM exp WHERE ID = ?", hex(message.author.id + message.author.guild.id))
if datetime.utcnow() > datetime.fromisoformat(xplock):
await self.add_xp(message, xp, lvl)
async def add_xp(self, message, xp, lvl):
xp_add = randint(10, 20)
new_lvl = int(((xp+xp_add)//42) ** 0.55)
db.execute("UPDATE exp SET XP = XP + ?, Level = ?, XPLock = ? WHERE ID = ?", xp_add, new_lvl, (datetime.utcnow()+timedelta(seconds=10)).isoformat(), hex(message.author.id + message.author.guild.id))
if new_lvl > lvl:
lvl_channel = list(db.record("SELECT LevelUpChannel FROM guilds WHERE GuildID = ?", message.author.guild.id))
if lvl_channel[0] is not 0:
await self.bot.wait_until_ready()
await self.bot.get_channel(lvl_channel[0]).send(f"Congrats {message.author.mention}! You reached level {new_lvl:,}.")
else:
await message.channel.send(f"Congrats {message.author.mention}! You reached level {new_lvl:,}.")
@command(name='level', brief="Displays a user's or the caller's level", description="Displays a user's or the caller's level.")
async def display_level(self, ctx, target: Optional[Member]):
target = target or ctx.author
xp, lvl = db.record("SELECT XP, Level FROM exp WHERE ID = ?", hex(target.id + target.guild.id)) or (None, None)
if lvl is not None:
await ctx.send(f"{target.mention} is on level {lvl:,} with {xp:,} XP.")
else:
await ctx.send("That member is not tracked by the XP system.")
@command(name="rank", brief="Displays a user's or the caller's rank within the server", description="Displays a user's or the caller's rank within the server.")
async def display_rank(self, ctx, target: Optional[Member]):
target = target or ctx.author
ids = db.column("SELECT UserID FROM exp WHERE GuildID = ? ORDER BY XP DESC", target.guild.id)
try:
await ctx.send(f"{target.mention} is rank {ids.index(target.id)+1} of {len(ids)}.")
except ValueError:
await ctx.send("That member is not tracked by the XP system.")
@command(name="leaderboard", aliases=["lb"], brief="Displays the server's leaderboard", description="Displays the server's leaderboard.")
async def display_leaderboard(self, ctx):
records = db.records("SELECT UserID, XP, Level FROM exp WHERE GuildID = ? ORDER BY XP DESC", ctx.author.guild.id)
menu = MenuPages(source=HelpMenu(ctx, records),
clear_reactions_after=True,
timeout=60.0)
await menu.start(ctx)
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.levelup_channel = self.bot.get_channel(872167190986637313)
self.bot.cogs_ready.ready_up("exp")
@Cog.listener()
async def on_message(self, message):
if not message.author.bot:
await self.process_xp(message)
def setup(bot):
bot.add_cog(Exp(bot)) | StarcoderdataPython |
5117096 | <reponame>ben-hunter-hansen/matrix-api
from .matrix import Matrix
from .matrix import util
| StarcoderdataPython |
155700 | #!/usr/bin/python
#------------------------------------------------------------------------------
# Name: plotUpperLimits.py
# Author: <NAME>, 20150212
# Last Modified: 20150212
#This is to read upper limits files and plot them so another Python script
# createHTML.py, can display them at the end of a search summary.
# If they do not exist, it should gracefully quit, giving a placeholder so
# the parent script doesn't fail miserably.
#------------------------------------------------------------------------------
import numpy as np
import matplotlib as mpl
mpl.use('Agg') # This is so we can use matplotlib easily without setting $DISPLAY on remote servers
from matplotlib import pyplot as plt
import xml.etree.ElementTree as ET
from math import pow
import os
############################################################
#1) Read setup, upper limit and veto bands
############################################################
tree = ET.parse( open( "search_setup.xml",'r') )
root = tree.getroot()
#targetName = root[0].find("target").text
rightAscension = root[0].find("right_ascension").text
declination = root[0].find("declination").text
tau = float( root[0].find('spindown_age').text ) / (365.25*24*3600)
distance = float( root[0].find('distance').text ) / 3.08567758e19
Izz = float( root[0].find('moment_of_inertia').text )
print("Right Ascension: " + rightAscension )
print("Declination: " + declination )
def h0_age( tau, distance, Izz ):
"""calculates the spin-down based upper limit, h0_age, from the
supernova remnant's
age, distance and estimated moment of inertia"""
return 1.2e-24 * ( 3.4 / distance ) * pow( ( 300.0 / tau ) * ( Izz / 1.0e38) , 0.5)
h0_age = h0_age( tau, distance, Izz )
# Test to see if upper limits exist yet; fail gracefully and make a placeholder
# if not.
if os.path.isfile("upper_limit_bands.xml"):
tree = ET.parse( open( "upper_limit_bands.xml",'r') )
root = tree.getroot()
band_freq = []
band_width = [] #Usually constant, but we'll collect it up anyway
h0_ul = []
for band in root.iter('upper_limit_band'):
h0_ul.append( float( band.find('upper_limit_h0').text ) )
band_freq.append( float( band.find('freq').text ) )
band_width.append( float( band.find('band').text ) )
h0_ul = np.array( h0_ul )
band_freq = np.array( band_freq )
band_width = np.array( band_width )
# Get veto bands
tree = ET.parse( open( "veto_bands.xml",'r') )
root = tree.getroot()
veto_freq = []
veto_width = [] #Usually constant, but we'll collect it up anyway
for band in root.iter('veto_band'):
veto_freq.append( float( band.find('freq').text ) )
veto_width.append( float( band.find('band').text ) )
veto_freq = np.array( veto_freq )
veto_width = np.array( veto_width )
#############################################################################################
# Plot upper limits
#############################################################################################
figDir=os.path.join(os.getcwd(), 'figures')
if not os.path.isdir(figDir):
os.mkdir(figDir)
plt.figure(57)
plt.plot(band_freq + 0.5*band_width, h0_ul, "-ko", label="Upper limits")
plt.plot(band_freq + 0.5*band_width, [h0_age for x in band_freq], "-b", label="Age-based upper limit")
yPlotMax = 1.1*np.max([ max(h0_ul), h0_age ] )
plt.plot(veto_freq, [yPlotMax for x in veto_freq], "-or", label="Vetoed bands")
plt.axis([min(band_freq), max(band_freq), 0.9*np.min([ min(h0_ul), h0_age ]), yPlotMax ])
xForPlot = np.linspace(min(band_freq), max(band_freq+band_width), 5) # Make 5 marks on abscissa and ordinate
yForPlot = np.linspace(0.9*np.min([ min(h0_ul), h0_age ]) , 1.1*np.max([ max(h0_ul), h0_age ] ), 5)
x2DecPlcs = ['%.2f' % a for a in xForPlot ]
y2DecPlcs = ['%.3g' % a for a in yForPlot ]
plt.xticks(xForPlot, x2DecPlcs)
plt.yticks(yForPlot, y2DecPlcs)
plt.title("Estimated upper limits")
plt.xlabel("Frequency (Hz)")
plt.ylabel("$h_0$")
legend = plt.legend(loc='best', shadow=True)
frame = legend.get_frame() # Some probably overly sophisticated additions to the legend
frame.set_facecolor('0.90')
#plt.draw()
plt.savefig( os.path.join(figDir, "upper_limit_plot.png" ), dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format="png", transparent=False, bbox_inches="tight", pad_inches=0.1, frameon=None)
############################################################
# End of lplotUpperLimits.py
############################################################
| StarcoderdataPython |
1957483 | <reponame>shvetsiya/carvana<gh_stars>10-100
import cv2
import torch
import numpy as np
from tensorboardX import SummaryWriter
import shutil
class Callback:
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TensorBoardVisualizerCallback(Callback):
def __init__(self, path_to_files):
"""
Callback is executed every training epoch. The goal is to display
the result of the last validation batch in Tensorboard
Args:
path_to_files (str): The path where to store the log files
"""
self.path_to_files = path_to_files
def mask_overlay(self, image, mask, color=(0, 255, 0)):
mask = np.dstack((mask, mask, mask)) * np.array(color)
mask = mask.astype(np.uint8)
return cv2.addWeighted(mask, 0.5, image, 0.5, 0.) # image * α + mask * β + λ
def representation(self, image, mask):
"""
Given a mask and an image this method returns
one image representing 3 patches of the same image.
These patches represent:
- The original image
- The predicted/original mask
- The mask applied to the image
Args:
image (np.ndarray): The original image
mask (np.ndarray): The predicted/original mask
Returns (np.ndarray):
An image of size (original_image_height, (original_image_width * 3))
showing 3 patches of the original image
"""
H, W, C = image.shape
results = np.zeros((H, 3*W, 3), np.uint8)
blue_mask = np.zeros((H*W, 3), np.uint8)
pb = np.where(mask.flatten()==1)[0]
blue_mask[pb] = np.array([0, 0, 255])
blue_mask = blue_mask.reshape(H, W, 3)
overlay_imgs = self.mask_overlay(image, mask)
results[:, 0: W] = image
results[:, W: 2*W] = blue_mask
results[:, 2*W: 3*W] = overlay_imgs
return results
def __call__(self, *args, **kwargs):
epoch = kwargs['epoch']
last_images, last_targets, last_preds = kwargs['last_valid_batch']
writer = SummaryWriter(self.path_to_files)
for i, (image, target_mask, pred_mask) in enumerate(zip(last_images, last_targets, last_preds)):
image = (255*image.data).cpu().numpy().astype(np.uint8)
image = np.transpose(image, (1, 2, 0)) # Invert c, h, w to h, w, c
target_mask = (target_mask.data).cpu().numpy().astype(np.uint8).squeeze()
pred_mask = (pred_mask.data).cpu().numpy().astype(np.uint8).squeeze()
if image.shape[0] > 512: # We don't want images on tensorboard to be too large
image = cv2.resize(image, (512, 512))
target_mask = cv2.resize(target_mask, (512, 512))
pred_mask = cv2.resize(pred_mask, (512, 512))
expected_result = self.representation(image, target_mask)
pred_result = self.representation(image, pred_mask)
writer.add_image("Epoch_" + str(epoch) + '-Image_' + str(i + 1) + '-Expected', expected_result, epoch)
writer.add_image("Epoch_" + str(epoch) + '-Image_' + str(i + 1) + '-Predicted', pred_result, epoch)
if i == 1: # 2 Images are sufficient
break
writer.close()
class TensorBoardLoggerCallback(Callback):
def __init__(self, path_to_files):
"""
Callback intended to be executed at each epoch
of the training which goal is to add valuable
information to the tensorboard logs such as the losses
and accuracies
Args:
path_to_files (str): The path where to store the log files
"""
self.path_to_files = path_to_files
def __call__(self, *args, **kwargs):
epoch = kwargs['epoch']
writer = SummaryWriter(self.path_to_files)
writer.add_scalar('data/train_loss', kwargs['train_loss'], epoch)
writer.add_scalar('data/train_dice', kwargs['train_dice'], epoch)
writer.add_scalar('data/valid_loss', kwargs['valid_loss'], epoch)
writer.add_scalar('data/valid_dice', kwargs['valid_dice'], epoch)
writer.close()
class ModelSaverCallback(Callback):
def __init__(self, path_to_model, path_to_best_model):
"""
Callback intended to be executed each time a whole train pass
get finished. This callback saves the model in the given path
Args:
best_valid_loss (double): serve to identify the best model
path_to_model (str): The path where to store the model
path_to_best_model (str): The path where the best model is stored
"""
self.best_valid_loss = float('inf')
self.path_to_model = path_to_model
self.path_to_best_model = path_to_best_model
def __call__(self, *args, **kwargs):
net = kwargs['net']
epoch = kwargs['epoch']
valid_loss = kwargs['valid_loss']
is_best = valid_loss < self.best_valid_loss
self.best_valid_loss = min(self.best_valid_loss, valid_loss)
state = {"epoch": epoch+1, "state_dict": net.state_dict(), "valid_loss": valid_loss}
torch.save(state, self.path_to_model)
if is_best:
shutil.copyfile(self.path_to_model, self.path_to_best_model)
class SimpleLoggerCallback(Callback):
def __init__(self, log_file):
"""
Callback intended to be executed each time a whole train pass
get finished. This callback saves metrics in logfile
Args:
file_name (str): The path where to store the metrics
"""
self.filename = log_file
def __call__(self, *args, **kwargs):
epoch = kwargs['epoch']
train_loss = kwargs['train_loss']
valid_loss = kwargs['valid_loss']
train_dice = kwargs['train_dice']
valid_dice = kwargs['valid_dice']
log_string = "epoch = {},\t".format(epoch)
log_string += "train_loss = {:03f},\t".format(train_loss)
log_string += "train_dice = {:03f},\t".format(train_dice)
log_string += "valid_loss = {:03f},\t".format(valid_loss)
log_string += "valid_dice = {:03f}".format(valid_dice)
with open(self.filename, 'a') as f:
f.write(log_string)
f.write('\n')
| StarcoderdataPython |
9685166 | #!/usr/bin/python
# Import necessary libraries
import os
import pandas as pd
import matplotlib.pyplot as plt
import spacy
nlp = spacy.load("en_core_web_sm") #initialize spaCy
from spacytextblob.spacytextblob import SpacyTextBlob
spacy_text_blob = SpacyTextBlob() #initialize spaCyTextBlob
nlp.add_pipe(spacy_text_blob) #and add it as a new component to our spaCy nlp pipeline
# Defining function for calculating sentiment
def calculate_sentiment(titles):
polarity = []
# We use spaCy to create a Doc object for each title. For every doc in this pipe:
for title in nlp.pipe(titles, batch_size=500): #splitting up into batches and applying to one batch at a time
# Extract the polarity for each title
score = title._.sentiment.polarity
polarity.append(score)
return polarity
# Defining function for plotting and saving plots
def plotting(x, y, windowsize):
# create figure
fig = plt.figure(figsize=(10.0, 3.0))
# plot
plt.plot(x,y, label=f"{windowsize}-days rolling average")
# naming the x axis
plt.xlabel('Publish Date')
# naming the y axis
plt.ylabel('Polarity')
# adding legend
plt.legend()
# giving a title to my graph
plt.title('Daily sentiment score')
# function to show the plot
plt.show()
# save plot as .jpg file
plt.savefig(os.path.join("out", f"sentiment_{windowsize}-days.jpg"))
plt.close()
# Define main-function
def main():
# Specifying filepath
in_file = os.path.join("..", "..", "data", "assignment3", "abcnews-date-text.csv")
# Reading in data
data = pd.read_csv(in_file)
data = data.sample(100000)
# Apply function to calculate sentiment scores and add these to data df
data["sentiment"] = calculate_sentiment(data["headline_text"])
# Turn publish_date into datetime-object so that Python 'understands' that it is dates
data["publish_date"] = pd.to_datetime(data["publish_date"], format = "%Y%m%d")
# Calculating average sentiment score per day
data.index = data['publish_date'] #replace index with "publish_date" column to work with groupby function
data_average = data.groupby(pd.Grouper(freq='D')).mean() #take daily average of numerical values in df
data_average = pd.DataFrame.dropna(data_average) #remove row with NAs
data_average.columns = ["daily_sentiment"]
# Group together polarity scores into windows of 7 and 30 days at a time and calculate an average on that window.
data_average["smoothed_sentiment_7"] = pd.Series(data_average["daily_sentiment"]).rolling(7).mean()
data_average["smoothed_sentiment_30"] = pd.Series(data_average["daily_sentiment"]).rolling(30).mean()
# Applying function to create and save plots
plotting(x = data_average.index, y = data_average["smoothed_sentiment_7"], windowsize = "7")
plotting(x = data_average.index, y = data_average["smoothed_sentiment_30"], windowsize = "30")
return print("DONE")
# Define behaviour when called from command line
if __name__=="__main__":
main() | StarcoderdataPython |
11312484 | <filename>macro/tutorial/bundles/01_indexing.py
#!/usr/bin/env python
from gna.expression.index import *
#
# 0d index
#
nidx = NIndex(fromlist=[])
print('Test 0d index')
for i, nit in enumerate(nidx):
print(' iteration', i)
print(' index: ', nit.current_format() or '<empty string>')
print(' full name:', nit.current_format(name='var'))
print(' values: ', nit.current_values())
print()
#
# 1d index
#
nidx = NIndex(fromlist=[
('i', 'index', ['1', '2', '3'])
])
for i, nit in enumerate(nidx):
print(' iteration', i, end=': ')
print(' values: ', nit.current_values())
print()
print('Test 1d index')
for i, nit in enumerate(nidx):
print(' iteration', i, end=': ')
print(' index: ', nit.current_format())
print()
for i, nit in enumerate(nidx):
print(' iteration', i, end=': ')
print(' full name:', nit.current_format(name='var'))
print()
#
# 2d index
#
nidx = NIndex(fromlist=[
('i', 'index', ['1', '2', '3']),
('j', 'element', ['a', 'b'])
])
print('Test 2d index')
for i, nit in enumerate(nidx):
print(' iteration', i)
print(' index: ', nit.current_format())
print(' full name:', nit.current_format(name='var'))
print(' values: ', nit.current_values())
print()
#
# 3d index and arbitrary name position
#
nidx = NIndex(fromlist=[
('z', 'clone', ['clone_00', 'clone_01']),
'name',
('s', 'source', ['SA', 'SB']),
('d', 'detector', ['D1', 'D2'])
])
print('Test 3d index and arbitrary name position')
for i, nit in enumerate(nidx):
print(' iteration', i)
print(' full name:', nit.current_format(name='var'))
print(' values: ', nit.current_values(name='var'))
print()
#
# 4d index and separated iteration
#
nidx = NIndex(fromlist=[
('z', 'clone', ['clone_00', 'clone_01']),
'name',
('s', 'source', ['SA', 'SB']),
('d', 'detector', ['D1', 'D2']),
('e', 'element', ['e1', 'e2', 'e3'])
])
print('Test 4d index and separated iteration')
nidx_major, nidx_minor=nidx.split(('s', 'd'))
for i_major, nit_major in enumerate(nidx_major):
print(' major iteration', i_major)
print(' major values: ', nit_major.current_values())
for j_minor, nit_minor in enumerate(nidx_minor):
print(' minor iteration', j_minor)
print(' minor values: ', nit_minor.current_values())
nit = nit_major + nit_minor
print(' full name: ', nit.current_format(name='var'))
print(' custom label: ', nit.current_format('Flux from {source} to {detector} element {element} ({clone})'))
print()
break
#
# Dependant indices
#
nidx = NIndex(fromlist=[
('d', 'detector', ['D1', 'D2']),
('s', 'source', ['SA', 'SB']),
('g', 'group', ['g1', 'g2']),
('e', 'element', ['e1', 'e2', 'e3'], dict(short='g', name='group', map=[('g1', ('e1', 'e2')), ('g2', ('e3',)) ]))
])
print('Test 4d index and dependant indices')
nidx_major, nidx_minor=nidx.split(('d', 'g'))
nidx_e=nidx.get_subset('e')
for i_major, nit_major in enumerate(nidx_major):
print(' major iteration', i_major)
print(' major values: ', nit_major.current_values())
for j_minor, nit_minor in enumerate(nidx_minor):
nit = nit_major + nit_minor
print(' full values %i:'%j_minor, nit.current_values())
print()
print('Test 4d index and separated iteration: try to mix dependent indices')
nidx_major+=nidx_e
for i_major, nit_major in enumerate(nidx_major):
print(' major iteration', i_major)
print(' major values: ', nit_major.current_values())
for j_minor, nit_minor in enumerate(nidx_minor):
nit = nit_major + nit_minor
print(' full values %i: '%j_minor, nit.current_values())
print(' formatted string %i:'%j_minor, nit.current_format('Element {element} in group {group}'))
print()
| StarcoderdataPython |
3566256 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Command_ErrorDetectionNet(nn.Module):
"""
Baseline model for the Error Detection task, in which the label for each
data point is either 1 (degraded) or 0 (not degraded).
Adapted from: https://github.com/claravania/lstm-pytorch/blob/master/model.py
It:
1) embeds the integer batch input into a learned embedding space
2) passes this through a standard LSTM with one hidden layer
3) passes the final hidden state from the lstm through a dropout layer
4) then puts this through a linear layer and returns the output
You should use nn.CrossEntropyLoss which will perform both a softmax on
the output, then Negative log likelihood calculation (this is more
efficient and therefore I exclude a softmax layer from the model)
"""
def __init__(
self,
vocab_size,
embedding_dim,
hidden_dim,
output_size=2,
dropout_prob=0.1,
num_lstm_layers=1,
):
super().__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=num_lstm_layers)
self.hidden2out = nn.Linear(hidden_dim, output_size)
self.dropout_layer = nn.Dropout(p=dropout_prob)
def init_hidden(self, batch_size, device):
return (
torch.randn(1, batch_size, self.hidden_dim, device=device),
torch.randn(1, batch_size, self.hidden_dim, device=device),
)
def forward(self, batch, input_lengths=None):
if input_lengths is not None:
batch_length = np.max(input_lengths)
batch = batch[:, :batch_length]
batch_size = batch.shape[0]
device = batch.device
self.hidden = self.init_hidden(batch_size, device=device)
embeds = self.embedding(batch).permute(1, 0, 2)
# embeds = self.embedding(batch)
outputs, (ht, ct) = self.lstm(embeds, self.hidden)
# ht is the last hidden state of the sequences
# ht = (1 x batch_size x hidden_dim)
# ht[-1] = (batch_size x hidden_dim)
if input_lengths is None:
out = ht[-1]
else:
out = outputs[input_lengths - 1, np.arange(batch_size)]
output = self.dropout_layer(out)
output = self.hidden2out(output)
return output
class Command_ErrorClassificationNet(Command_ErrorDetectionNet):
"""
Baseline model for the Error Classification task, in which the label for
each data point is a degradation_id (with 0 = not degraded).
It's precisely the same network design as for task 1 - error detection,
except this has a number of output classes (9 for ACME1.0).
"""
def __init__(
self,
vocab_size,
embedding_dim,
hidden_dim,
output_size=9,
dropout_prob=0.1,
num_lstm_layers=1,
):
super().__init__(
vocab_size=vocab_size,
embedding_dim=embedding_dim,
hidden_dim=hidden_dim,
output_size=output_size,
dropout_prob=dropout_prob,
num_lstm_layers=num_lstm_layers,
)
class Pianoroll_ErrorLocationNet(nn.Module):
"""
Baseline model for the Error Location task, in which the label for
each data point is a binary label for each frame of input, with 0 = not
degraded and 1 = degraded.
The model consists of:
1) A bidirectional LSTM.
2) A sequence of dropout layers followed by linear layers.
3) A final dropout layer.
4) A final output layer of dim 2.
The outputs and labels should be flattened when computing the CE Loss.
"""
def __init__(
self,
input_dim,
hidden_dim,
output_dim,
layers=[],
dropout_prob=0.1,
num_lstm_layers=1,
):
super().__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(
input_dim,
hidden_dim,
num_layers=num_lstm_layers,
bidirectional=True,
batch_first=True,
)
current_dim = 2 * hidden_dim
linear_list = []
for dim in layers:
linear_list.append(nn.Dropout(p=dropout_prob))
linear_list.append(nn.Linear(current_dim, dim))
linear_list.append(nn.ELU())
current_dim = dim
self.linears = nn.ModuleList(linear_list)
self.hidden2out = nn.Linear(current_dim, output_dim)
self.dropout_layer = nn.Dropout(p=dropout_prob)
def init_hidden(self, batch_size, device):
return (
torch.randn(2, batch_size, self.hidden_dim, device=device),
torch.randn(2, batch_size, self.hidden_dim, device=device),
)
def forward(self, batch):
batch_size = batch.shape[0]
device = batch.device
output, _ = self.lstm(batch.float(), self.init_hidden(batch_size, device))
for module in self.linears:
output = module(output)
output = self.dropout_layer(output)
output = self.hidden2out(output)
return output
class Pianoroll_ErrorCorrectionNet(nn.Module):
"""
Baseline model for the Error Correction task, in which the label for each
data point is the clean data.
The model consists of:
1) Bi-LSTM to embed the input.
2) A linear connection layer.
3) A 2nd Bi-LSTM to decode.
4) Final output layers.
"""
def __init__(
self,
input_dim,
hidden_dim,
output_dim,
layers=[],
dropout_prob=0.1,
num_lstm_layers=1,
):
super().__init__()
self.hidden_dim = hidden_dim
self.encoder = nn.LSTM(
input_dim,
hidden_dim,
num_layers=num_lstm_layers,
bidirectional=True,
batch_first=True,
)
self.connector = nn.Linear(hidden_dim * 2, hidden_dim)
self.connector_do = nn.Dropout(p=dropout_prob)
self.decoder = nn.LSTM(
hidden_dim,
hidden_dim,
num_layers=num_lstm_layers,
bidirectional=True,
batch_first=True,
)
current_dim = 2 * hidden_dim
linear_list = []
for dim in layers:
linear_list.append(nn.Dropout(p=dropout_prob))
linear_list.append(nn.Linear(current_dim, dim))
linear_list.append(nn.ELU())
current_dim = dim
self.linears = nn.ModuleList(linear_list)
self.hidden2out = nn.Linear(current_dim, output_dim)
self.dropout_layer = nn.Dropout(p=dropout_prob)
def init_hidden(self, batch_size, device):
return (
torch.randn(2, batch_size, self.hidden_dim, device=device),
torch.randn(2, batch_size, self.hidden_dim, device=device),
)
def forward(self, batch, input_lengths):
batch_size = batch.shape[0]
device = batch.device
output, _ = self.encoder(batch.float(), self.init_hidden(batch_size, device))
output = self.connector_do(F.elu(self.connector(output)))
output, _ = self.decoder(output, self.init_hidden(batch_size, device))
for module in self.linears:
output = module(output)
output = self.dropout_layer(output)
output = self.hidden2out(output)
return torch.sigmoid(output)
| StarcoderdataPython |
6632722 | # template generated by /usr/local/lib/python3.6/dist-packages/colcon_python_shell/shell/python_shell.py
# This script extends the environment for this package.
import pathlib
# assumes colcon_current_prefix has been injected into globals by caller
assert colcon_current_prefix
def prepend_unique_path(envvar, subdirectory):
global colcon_current_prefix
import os
paths = os.environ.get(envvar, '').split(os.pathsep)
# If subdirectory is relative, it is relative to prefix
new_path = str(pathlib.Path(colcon_current_prefix, subdirectory).resolve())
new_paths = [new_path, *(p for p in paths if p != new_path)]
os.environ[envvar] = os.pathsep.join(new_paths)
# source python hooks
for exe, args in [('share/kinesis_webrtc_manager/hook/cmake_prefix_path.py', []), ('share/kinesis_webrtc_manager/hook/ld_library_path_lib.py', []), ('share/kinesis_webrtc_manager/hook/ros_package_path.py', []), ('share/kinesis_webrtc_manager/hook/pkg_config_path.py', []), ('share/kinesis_webrtc_manager/hook/pkg_config_path_multiarch.py', [])]:
exec(pathlib.Path(colcon_current_prefix, exe).read_bytes())
| StarcoderdataPython |
1717161 | <filename>saulscript/__init__.py<gh_stars>0
import exceptions
import runtime
from runtime import Context
| StarcoderdataPython |
3378077 | <gh_stars>0
#
# Copyright 2013 Geodelic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from functools import wraps
from pprint import pformat
from collections import Iterable
import logging
logger = logging.getLogger(__name__)
import boto
import boto.ec2
DEFAULT_TTL = 120
DEFAULT_TYPE = 'CNAME'
route53 = boto.connect_route53()
def memoize(fctn):
memory = {}
@wraps(fctn)
def memo(*args):
haxh = tuple(args)
if haxh not in memory:
memory[haxh] = fctn(*args)
return memory[haxh]
return memo
def trampoline(*list_of_lists):
stack = [list_of_lists]
iteree = iter(stack)
while 1:
try:
item = iteree.next()
except StopIteration:
try:
iteree = iter(stack.pop())
continue
except IndexError:
break
if isinstance(item, Iterable) and not isinstance(item, basestring):
stack.append(iteree)
iteree = iter(item)
continue
yield item
@memoize
def get_records(name):
zones = route53.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones']
name = name.strip('.')
zones = filter(lambda x: x['Name'].strip('.') == name, zones)
if zones:
zone = zones[0]
else:
response = route53.create_hosted_zone(name)
zone = response['CreateHostedZoneResponse']['HostedZone']
zid = zone['Id'].replace('/hostedzone/', '')
results = rrsets = route53.get_all_rrsets(zid)
while rrsets.is_truncated:
rrsets = route53.get_all_rrsets(zid, name=rrsets.next_record_name, type=rrsets.next_record_type)
results.extend(rrsets)
return results
def delete_record(rrset, record):
c = rrset.add_change('DELETE', record.name, record.type, record.ttl)
for v in record.resource_records:
c.add_value(v)
return rrset
def find_record(rrset, name):
name = name.strip('.')
return filter(lambda x: x.name.strip('.') == name, rrset)[0]
def process_record(rrset, name, value):
name = name.strip('.')
try:
record = find_record(rrset, name)
except IndexError:
logger.info("New: %s" % name)
pass
else:
if record.resource_records[0] == value:
logger.debug("Unchanged: %s" % name)
return rrset
logger.info("Updating: %s" % name)
logger.debug("Name: %s, Old values: %r, New value: %r" % (name, record.resource_records, value))
rrset = delete_record(rrset, record)
c = rrset.add_change('CREATE', name, 'CNAME', DEFAULT_TTL)
c.add_value(value)
return rrset
def gatherinstances():
def _iter_regions(regions):
for region in regions:
try:
yield region.get_all_instances()
except boto.exception.EC2ResponseError, e:
if e.status == 401 and e.error_code == u'AuthFailure':
# Been getting auth errors eu-central-1. don't have any
# servers there, so skip it if this happens.
logger.debug("Got AuthFailure while trying to list instances in region %s" % region.region.name, exc_info=1)
pass
else:
raise
regions = (region.connect() for region in boto.ec2.regions())
reservations = _iter_regions(regions)
instances = (reservation.instances for reservation in trampoline(reservations))
return trampoline(instances)
def process_all(instances):
instances = filter(lambda x: x.tags and x.dns_name and all((y in x.tags for y in ('domain_base', 'fqdn'))), instances)
domain_bases = set(x.tags['domain_base'] for x in instances)
unseen = {}
for d in domain_bases:
unseen[d] = set(r.name.strip('.') for r in get_records(d) if r.type == 'CNAME')
for instance in instances:
unseen[instance.tags['domain_base']].discard(instance.tags['fqdn'])
rrset = get_records(instance.tags['domain_base'])
process_record(rrset, instance.tags['fqdn'], instance.dns_name)
arrays = filter(lambda x: x.tags['fqdn'].startswith('array-') and \
all((_ in x.tags for _ in ('environment', 'type'))), instances)
array_types = {}
for array_instance in arrays:
environment = array_instance.tags['environment']
fqdn = array_instance.tags['fqdn']
fqdn_base = fqdn[fqdn.index('.'):]
array_type = array_instance.tags['type']
fqdn_alias = array_type + 'XX' + fqdn_base
atypedeploy = array_types.setdefault(environment, {})
atypes = atypedeploy.setdefault(array_type, [])
atypes.append((fqdn_alias, array_instance))
for deploy,atypesdict in array_types.iteritems():
for atype,v in atypesdict.iteritems():
v = sorted(v, key=lambda x: x[1].launch_time)
for e,(fqdn_alias,array_instance) in enumerate(v):
e = str(e).zfill(2)
fqdn = fqdn_alias.replace('XX', e)
unseen[array_instance.tags['domain_base']].discard(fqdn)
rrset = get_records(array_instance.tags['domain_base'])
process_record(rrset, fqdn, array_instance.dns_name)
for d,records in unseen.iteritems():
rrset = get_records(d)
for rname in records:
logger.info("Deleting: %s" % rname)
record = find_record(rrset, rname)
delete_record(rrset, record)
for d in domain_bases:
rrset = get_records(d)
if rrset.changes:
logger.info("committing changes for domain base: %s" % d)
logger.debug(pformat(rrset.changes))
rrset.commit()
def do_update():
process_all(gatherinstances())
if __name__ == '__main__':
from time import time
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
t = time()
do_update()
print('took %f seconds to run' % (time() - t))
| StarcoderdataPython |
8081501 | import pytest
import numpy as np
from stellarphot.photometry import calculate_noise
from stellarphot.core import Camera
GAINS = [1.0, 1.5, 2.0]
def test_calc_noise_defaults():
# If we put in nothing we should get zero back
assert calculate_noise() == 0
@pytest.mark.parametrize('aperture_area', [5, 20])
@pytest.mark.parametrize('gain', GAINS)
def test_calc_noise_source_only(gain, aperture_area):
# If the only source of noise is Poisson error in the source
# then the noise should be the square root of the counts.
counts = 100
expected = np.sqrt(gain * counts)
np.testing.assert_allclose(calculate_noise(gain=gain,
flux=counts,
aperture_area=aperture_area),
expected)
@pytest.mark.parametrize('aperture_area', [5, 20])
@pytest.mark.parametrize('gain', GAINS)
def test_calc_noise_dark_only(gain, aperture_area):
# Gain should not affect this one. Dark current needs a couple other things,
# but this is basically Poisson error.
dark_current = 10
exposure = 20
expected = np.sqrt(dark_current * aperture_area * exposure)
np.testing.assert_allclose(calculate_noise(gain=gain,
dark_current_per_sec=dark_current,
aperture_area=aperture_area,
exposure=exposure),
expected)
@pytest.mark.parametrize('aperture_area', [5, 20])
@pytest.mark.parametrize('gain', GAINS)
def test_calc_read_noise_only(gain, aperture_area):
# The read noise per pixel IS the noise. The only multiplier is
# the number of pixels.
read_noise = 10
expected = np.sqrt(aperture_area * read_noise**2)
np.testing.assert_allclose(calculate_noise(gain=gain,
read_noise=read_noise,
aperture_area=aperture_area),
expected)
@pytest.mark.parametrize('aperture_area', [5, 20])
@pytest.mark.parametrize('gain', GAINS)
def test_calc_sky_only(gain, aperture_area):
# The sky noise per pixel is the poisson and per pixel.
sky = 10
expected = np.sqrt(gain * aperture_area * sky)
np.testing.assert_allclose(calculate_noise(gain=gain,
aperture_area=aperture_area,
sky_per_pix=sky),
expected)
def test_annulus_area_term():
# Test that noise is correct with an annulus
aperture_area = 20
# Annulus is typically quite a bit larger than aperture.
annulus_area = 10 * aperture_area
gain = 1.5
sky = 10
expected = np.sqrt(gain * aperture_area *
(1 + aperture_area / annulus_area) * sky)
np.testing.assert_allclose(calculate_noise(gain=gain,
aperture_area=aperture_area,
annulus_area=annulus_area,
sky_per_pix=sky),
expected)
@pytest.mark.parametrize('digit,expected',
((False, 89.078616), (True, 89.10182)))
def test_calc_noise_messy_case(digit, expected):
# Do a single test where all the parameters are set and compare with
# what a calculator gave.
counts = 1000
aperture_area = 20
annulus_area = 10 * aperture_area
gain = 1.5
sky = 15
dark_current = 7
exposure = 18
read_noise = 12
np.testing.assert_allclose(
calculate_noise(flux=counts,
gain=gain,
dark_current_per_sec=dark_current,
read_noise=read_noise,
sky_per_pix=sky,
exposure=exposure,
aperture_area=aperture_area,
annulus_area=annulus_area,
include_digitization=digit),
expected
)
| StarcoderdataPython |
3338402 | import unittest
from unittest.mock import patch
from gym_powerworld.envs import voltage_control_env
# noinspection PyProtectedMember
from gym_powerworld.envs.voltage_control_env import LOSS, \
MinLoadBelowMinGenError, MaxLoadAboveMaxGenError, OutOfScenariosError, \
MIN_V, MAX_V, MIN_V_SCALED, MAX_V_SCALED, _scale_voltages
import os
import pandas as pd
import numpy as np
import numpy.testing as np_test
import logging
import warnings
from esa import SAW, PowerWorldError
from gym.spaces import Discrete
import shutil
# Get full path to this directory.
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# Cases are within this directory.
CASE_DIR = os.path.join(THIS_DIR, 'cases')
# IEEE 14 bus
DIR_14 = os.path.join(CASE_DIR, 'ieee_14')
PWB_14 = os.path.join(DIR_14, 'IEEE 14 bus.pwb')
AXD_14 = os.path.join(DIR_14, 'IEEE 14 bus.axd')
CONTOUR = os.path.join(DIR_14, 'contour.axd')
# Case with 3 gens modeled as condensers:
PWB_14_CONDENSERS = os.path.join(DIR_14, 'IEEE 14 bus condensers.pwb')
# Case with min and max MW limits on all 5 generators.
PWB_14_LIMITS = os.path.join(DIR_14, 'IEEE 14 bus limits.pwb')
# IL 200
PWB_200 = os.path.join(CASE_DIR, 'il_200', 'ACTIVSg200.pwb')
# TX 2000
PWB_2000 = os.path.join(CASE_DIR, 'tx_2000',
'ACTIVSg2000_AUG-09-2018_Ride_mod.PWB')
# Define some constants related to the IEEE 14 bus case.
N_GENS_14 = 5
N_LOADS_14 = 11
LOAD_MW_14 = 259.0
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusTestCase(unittest.TestCase):
"""Test initializing the environment with the 14 bus model."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 100
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer
)
# For easy comparison with the original case, get a fresh SAW
# object. Do not make any changes to this, use only "get" type
# methods.
cls.saw = SAW(PWB_14, early_bind=True)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.saw.exit()
cls.env.close()
def test_branches_to_open(self):
"""Ensure branches_to_open is the right shape and is in the
appropriate range.
"""
self.assertIsNotNone(self.env.branches_to_open)
self.assertEqual((self.num_scenarios,),
self.env.branches_to_open.shape)
self.assertTrue(self.env.branches_to_open.min() >= 0)
self.assertTrue(
self.env.branches_to_open.max()
< self.env.branch_init_data.shape[0])
def test_saw_load_state(self):
"""Ensure that calling saw.LoadState() works (testing that
saw.SaveState() has already been called).
"""
# NOTE: This changes the state of self.env.saw, which can cause
# issues in other tests.
self.assertIsNone(self.env.saw.LoadState())
def test_gen_key_fields(self):
"""Ensure the gen key fields are correct. Hard coding style."""
self.assertListEqual(['BusNum', 'GenID'], self.env.gen_key_fields)
def test_gen_init_fields(self):
self.assertListEqual(
self.env.gen_key_fields + self.env.GEN_INIT_FIELDS,
self.env.gen_init_fields)
def test_gen_obs_fields(self):
self.assertListEqual(self.env.gen_key_fields + self.env.GEN_OBS_FIELDS,
self.env.gen_obs_fields)
def test_gen_init_data(self):
self.assertIsInstance(self.env.gen_init_data, pd.DataFrame)
self.assertListEqual(self.env.gen_init_fields,
self.env.gen_init_data.columns.tolist())
def test_num_gens(self):
# 15 bus case has 5 generators.
self.assertEqual(5, self.env.num_gens)
def test_zero_negative_gen_mw_limits(self):
"""Ensure the _zero_negative_gen_mw_limits function works as
intended.
"""
# First, ensure it has been called.
self.assertTrue((self.env.gen_init_data['GenMWMin'] >= 0).all())
# Now, patch gen_init_data and saw and call the function.
gen_copy = self.env.gen_init_data.copy(deep=True)
gen_copy['GenMWMin'] = -10
# I wanted to use self.assertLogs, but that has trouble working
# with nested context managers...
with patch.object(self.env, '_gen_init_data', new=gen_copy):
with patch.object(self.env, 'saw') as p:
self.env._zero_negative_gen_mw_limits()
# The gen_copy should have had its GenMWMin values zeroed out.
self.assertTrue((gen_copy['GenMWMin'] == 0).all())
# change_parameters_multiple_element_df should have been
# called.
p.change_and_confirm_params_multiple_element.assert_called_once()
# Ensure the change was reflected in PowerWorld.
gens = self.env.saw.GetParametersMultipleElement(
'gen', ['BusNum', 'GenID', 'GenMWMin'])
self.assertTrue((gens['GenMWMin'] == 0).all())
# Finally, (this could have been done first, but oh well), make
# sure that the case started with negative GenMWMin values.
gens_orig = self.saw.GetParametersMultipleElement(
'gen', ['BusNum', 'GenID', 'GenMWMin'])
self.assertTrue((gens_orig['GenMWMin'] < 0).any())
def test_gen_mw_capacity(self):
# The generators are all set to a ridiculous maximum of 10 GW.
self.assertEqual(5 * 10000.0, self.env.gen_mw_capacity)
def test_gen_mvar_produce_capacity(self):
self.assertEqual(50. + 40. + 24. + 24.,
round(self.env.gen_mvar_produce_capacity, 2))
def test_gen_mvar_consume_capacity(self):
self.assertEqual(-40. - 6. - 6.,
round(self.env.gen_mvar_consume_capacity, 2))
def test_load_key_fields(self):
# Hard coding!
self.assertListEqual(self.env.load_key_fields, ['BusNum', 'LoadID'])
def test_load_init_fields(self):
self.assertListEqual(self.env.load_init_fields,
self.env.load_key_fields
+ self.env.LOAD_INIT_FIELDS)
def test_load_obs_fields(self):
self.assertListEqual(
self.env.load_obs_fields,
self.env.load_key_fields + self.env.LOAD_OBS_FIELDS)
def test_load_init_data(self):
self.assertIsInstance(self.env.load_init_data, pd.DataFrame)
self.assertListEqual(self.env.load_init_data.columns.tolist(),
self.env.load_init_fields)
def test_num_loads(self):
self.assertEqual(11, self.env.num_loads)
def test_zero_i_z_loads(self):
"""Patch the environment's load_init_data and ensure the method is
working properly.
"""
data = self.env.load_init_data.copy(deep=True)
data[voltage_control_env.LOAD_I_Z] = 1
with patch.object(self.env, '_load_init_data', new=data):
with patch.object(self.env, 'saw') as p:
self.env._zero_i_z_loads()
self.assertTrue((data[voltage_control_env.LOAD_I_Z] == 0).all().all())
p.change_and_confirm_params_multiple_element.assert_called_once()
def test_bus_key_fields(self):
self.assertListEqual(['BusNum'], self.env.bus_key_fields)
def test_bus_obs_fields(self):
self.assertListEqual(self.env.bus_key_fields + self.env.BUS_OBS_FIELDS,
self.env.bus_obs_fields)
def test_bus_init_data(self):
self.assertIsInstance(self.env.bus_init_data, pd.DataFrame)
self.assertListEqual(self.env.bus_init_fields,
self.env.bus_init_data.columns.tolist())
def test_num_buses(self):
self.assertEqual(14, self.env.num_buses)
def test_max_load_mw(self):
# System loading obtained from PowerWorld's Case Summary
# dialogue.
self.assertEqual(round(self.env.max_load_mw, 2),
self.max_load_factor * LOAD_MW_14)
def test_check_max_load_exception(self):
"""Ensure that an exception is thrown if maximum loading exceeds
maximum generation.
"""
with patch.object(self.env, 'max_load_mw', 10):
with patch.object(self.env, 'gen_mw_capacity', 9.9):
with self.assertRaisesRegex(MaxLoadAboveMaxGenError,
'The given max_load'):
self.env._check_max_load(2)
def test_check_max_load_warning(self):
"""Ensure we get a warning if the generation is in excess of
2x maximum load.
"""
with self.assertLogs(logger=self.env.log, level='WARNING'):
self.env._check_max_load(2)
def test_min_load_mw(self):
# System loading obtained from PowerWorld's Case Summary
# dialogue.
self.assertEqual(round(self.env.min_load_mw, 2),
self.min_load_factor * LOAD_MW_14)
def test_check_min_load(self):
# Get generator data.
gens = self.env.gen_init_data.copy(deep=True)
# Increase all minimum generation.
gens['GenMWMin'] = 10
# Patch:
with patch.object(self.env, '_gen_init_data', gens):
with patch.object(self.env, 'min_load_mw', 9.9):
with self.assertRaisesRegex(MinLoadBelowMinGenError,
'The given min_load'):
self.env._check_min_load(2)
def test_total_load_mw(self):
# Ensure it's 1D.
self.assertEqual(len(self.env.total_load_mw.shape), 1)
# Check shape.
self.assertEqual(self.env.total_load_mw.shape[0],
self.env.num_scenarios)
# Ensure all loads are less than the maximum.
np_test.assert_array_less(self.env.total_load_mw, self.env.max_load_mw)
# Ensure all loads are greater than the minimum.
np_test.assert_array_less(self.env.min_load_mw, self.env.total_load_mw)
def test_loads_mw(self):
# Check shape
self.assertEqual(self.env.loads_mw.shape,
(self.num_scenarios, self.env.num_loads))
# Ensure the individual loads match total loading.
np_test.assert_allclose(self.env.loads_mw.sum(axis=1),
self.env.total_load_mw, rtol=1e-6)
def test_loads_mvar(self):
# Check shape.
self.assertEqual(self.env.loads_mvar.shape,
(self.num_scenarios, self.env.num_loads))
# Ensure that portion of negative var loads (leading power
# factor) is close to the lead_pf_probability.
neg_portion = (self.env.loads_mvar < 0).sum().sum() \
/ (self.num_scenarios * self.env.num_loads)
# Ensure we're within 0.75 * prob and 1.25 * prob. This seems
# reasonable.
self.assertLessEqual(neg_portion, 1.25 * self.lead_pf_probability)
self.assertGreaterEqual(neg_portion, 0.75 * self.lead_pf_probability)
def test_load_power_factors(self):
"""Ensure all loads have a power factor greater than the min."""
# Ensure all power factors are valid. pf = P / |S|
s_mag = np.sqrt(np.square(self.env.loads_mw)
+ np.square(self.env.loads_mvar))
# Suppress numpy warnings - we'll be replacing NaNs.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pf = self.env.loads_mw / s_mag
# For sake of testing, set loads with 0 power to have a
# power factor of 1.
pf[np.isnan(pf)] = 1
np_test.assert_array_less(self.min_load_pf, pf)
def test_loads_on_match_probability(self):
"""Ensure the proportion of loads which are on matches the
load_on_probability to a reasonable tolerance.
"""
# First, ensure the zeros match up between loads_mw and loads_mvar.
mw_0 = self.env.loads_mw == 0
np.testing.assert_array_equal(mw_0, self.env.loads_mvar == 0)
# Now, ensure the total portion of loads that are "on" is close
# to the load_on_probability.
# noinspection PyUnresolvedReferences
portion = (~mw_0).sum().sum() \
/ (self.num_scenarios * self.env.num_loads)
# Ensure we're within 0.75 * prob and 1.25 * prob. This seems
# reasonable.
self.assertLessEqual(portion, 1.25 * self.load_on_probability)
self.assertGreaterEqual(portion, 0.75 * self.load_on_probability)
def test_gen_mw(self):
# Start with shape.
self.assertEqual(self.env.gen_mw.shape,
(self.num_scenarios, self.env.num_gens))
# Ensure total generation is close to total load plus losses.
np_test.assert_allclose(self.env.gen_mw.sum(axis=1),
self.env.total_load_mw * (1 + LOSS), rtol=1e-6)
# TODO: Since the generators in this case have ridiculously high
# maximums, I'm not going to bother testing that all gens are
# within their bounds. When we move to a more realistic case,
# e.g. the Texas 2000 bus case, we need to test that.
#
# # Ensure generator outputs are within bounds.
# for gen_idx, row in enumerate(env.gen_init_data.itertuples()):
# gen_output = env.gen_mw[:, gen_idx]
# # noinspection PyUnresolvedReferences
# self.assertTrue((gen_output <= row.GenMWMax).all())
# # noinspection PyUnresolvedReferences
# self.assertTrue((gen_output >= row.GenMWMin).all())
def test_gen_v(self):
# Shape.
self.assertEqual(self.env.gen_v.shape,
(self.env.num_scenarios, self.env.num_gens))
# Values.
self.assertTrue(
((self.env.gen_v >= self.gen_voltage_range[0]).all()
and
(self.env.gen_v <= self.gen_voltage_range[1]).all()
)
)
def test_action_space(self):
self.assertIsInstance(self.env.action_space, Discrete)
# Plus 1 because no-op action
self.assertEqual(self.env.action_space.n,
self.env.num_gens * self.num_gen_voltage_bins + 1)
def test_gen_bins(self):
# Hard coding!
np.testing.assert_allclose(
np.array([0.9, 0.925, 0.95, 0.975, 1.0, 1.025, 1.05, 1.075, 1.1]),
self.env.gen_bins)
def test_gen_action_array(self):
# Minus 1 because no-op action.
self.assertEqual(self.env.action_space.n - 1,
self.env.gen_action_array.shape[0])
self.assertEqual(2, self.env.gen_action_array.shape[1])
# Initialize array for comparison. Again, -1 due to no-op.
a = np.zeros(shape=(self.env.action_space.n - 1, 2), dtype=int)
# Put generator bus numbers in column 0. No need to worry about
# multiple generators at the same bus for this case.
a[:, 0] = np.array(
self.env.gen_init_data['BusNum'].tolist()
* self.num_gen_voltage_bins)
# Write a crappy, simple, loop to put the indices of the
# generator voltage levels in.
b = []
for i in range(self.num_gen_voltage_bins):
for _ in range(self.env.num_gens):
b.append(i)
a[:, 1] = np.array(b)
np.testing.assert_array_equal(a, self.env.gen_action_array)
def test_num_obs(self):
"""Ensure the number of observations matches the expected number
"""
# 14 buses + 3 * 5 gens + 3 * 11 loads
self.assertEqual(14 + 3 * 5 + 3 * 11, self.env.num_obs)
def test_observation_space(self):
"""Ensure the observation space has the appropriate properties.
"""
# Test shape.
self.assertEqual(self.env.observation_space.shape, (self.env.num_obs,))
# Test bounds. Bus voltages should have a high of 2, and the
# rest should have a high of 1.
self.assertTrue((self.env.observation_space.high[
0:self.env.num_buses] == 2.).all())
self.assertTrue((self.env.observation_space.high[
self.env.num_buses:] == 1.).all())
self.assertTrue((self.env.observation_space.low == 0.).all())
def test_observation_attributes(self):
"""After initialization, several observation related attributes
should be initialized to None.
"""
self.assertIsNone(self.env.gen_obs_data)
self.assertIsNone(self.env.load_obs_data)
self.assertIsNone(self.env.bus_obs_data)
self.assertIsNone(self.env.gen_obs_data_prev)
self.assertIsNone(self.env.load_obs_data_prev)
self.assertIsNone(self.env.bus_obs_data_prev)
def test_action_count(self):
"""After initialization, the action count should be 0."""
self.assertEqual(0, self.env.action_count)
def test_reward_matches(self):
"""For this simple initialization, the rewards should be the
same as the class constant.
"""
self.assertDictEqual(self.env.rewards, self.env.REWARDS)
def test_override_reward(self):
"""Ensure overriding a portion of the rewards behaves as
expected.
"""
# Create a new env, but use new rewards.
env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=10,
max_load_factor=self.max_load_factor,
min_load_factor=self.min_load_factor,
num_gen_voltage_bins=5,
rewards={'v_delta': 1000})
# Loop and assert.
for key, value in env.rewards.items():
if key == 'v_delta':
self.assertNotEqual(env.REWARDS[key], value)
else:
self.assertEqual(env.REWARDS[key], value)
# Ensure the keys are the same.
self.assertListEqual(list(env.rewards.keys()),
list(env.REWARDS.keys()))
def test_bad_reward_key(self):
"""Ensure an exception is raised if a bad reward key is given.
"""
with self.assertRaisesRegex(KeyError, 'The given rewards key, v_detl'):
_ = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=10,
max_load_factor=self.max_load_factor,
min_load_factor=self.min_load_factor,
rewards={'v_detla': 1000})
def test_log_columns(self):
"""Ensure the log columns are as they should be."""
self.assertListEqual(
['episode', 'action_taken', 'reward']
+ [f'bus_{x+1}_v' for x in range(14)]
+ [f'gen_{x}_{y}' for x, y in zip([1, 2, 3, 6, 8], [1] * 5)],
self.env.log_columns
)
def test_log_array(self):
self.assertEqual(self.env.log_array.shape,
# 14 + 3 --> num buses plus ep, action, reward,
# and num gens.
(self.log_buffer, 14 + 3 + 5))
def test_no_op_action(self):
# Cover several data types because the action comes directly
# from a neural network, which could have different data types.
self.assertEqual(0, self.env.no_op_action)
self.assertEqual(0.0, self.env.no_op_action)
self.assertEqual(np.float64(0), self.env.no_op_action)
self.assertEqual(np.float32(0), self.env.no_op_action)
self.assertEqual(np.int(0.0), self.env.no_op_action)
def test_last_action(self):
self.assertIsNone(self.env.last_action)
def test_gen_var_lim_zero_arr(self):
"""Ensure the gen_var_lim_zero_arr is as expected."""
# The swing bus (which is listed first) and generator at bus 3
# should have 0 limits.
mask = np.ones(self.env.gen_var_lim_zero_arr.shape[0], dtype=bool)
mask[0] = False
mask[2] = False
self.assertTrue(self.env.gen_var_lim_zero_arr[~mask].all())
self.assertFalse(self.env.gen_var_lim_zero_arr[mask].any())
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusLimitsTestCase(unittest.TestCase):
"""Test initializing the environment with the 14 bus model with
limits added.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
# Create a ton of scenarios so the generator dispatch is
# thoroughly exercised.
cls.num_scenarios = 100000
cls.max_load_factor = 1.44
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 100
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14_LIMITS, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
# noinspection PyUnresolvedReferences
def test_gens_in_bounds(self):
self.assertTrue(
(self.env.gen_mw
<= self.env.gen_init_data['GenMWMax'].to_numpy()).all()
)
self.assertTrue(
(self.env.gen_mw
>= self.env.gen_init_data['GenMWMin'].to_numpy()).all()
)
def test_gen_meets_load(self):
np.testing.assert_allclose(self.env.total_load_mw * (1 + LOSS),
self.env.gen_mw.sum(axis=1))
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusResetTestCase(unittest.TestCase):
"""Test the reset method of the environment."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype
)
# For easy comparison with the original case, get a fresh SAW
# object. Do not make any changes to this, use only "get" type
# methods.
cls.saw = SAW(PWB_14, early_bind=True)
# Extract generator data needed for testing the reset method.
cls.gens = cls.saw.GetParametersMultipleElement(
ObjectType='gen',
ParamList=cls.env.gen_key_fields + cls.env.GEN_RESET_FIELDS)
# Extract generator data needed for testing the reset method.
cls.loads = cls.saw.GetParametersMultipleElement(
ObjectType='load',
ParamList=cls.env.load_key_fields + cls.env.LOAD_RESET_FIELDS
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.saw.exit()
cls.env.close()
def setUp(self) -> None:
"""Reset the scenario index for each run, and restore the
case.
"""
self.env.scenario_idx = 0
self.env.saw.LoadState()
def test_scenario_idx_increments(self):
"""Ensure subsequent calls to reset update the scenario index.
"""
# Patch the changing of parameters so that we'll get a
# a consistent incrementing of the index (no failed power flow).
with patch.object(self.env.saw,
'change_parameters_multiple_element_df'):
self.env.reset()
self.assertEqual(1, self.env.scenario_idx)
self.env.reset()
self.assertEqual(2, self.env.scenario_idx)
self.env.reset()
self.assertEqual(3, self.env.scenario_idx)
def test_action_count_reset(self):
"""Ensure subsequent calls to reset reset the action_count."""
# Patch the changing of parameters so that we'll get a
# a consistent incrementing of the index (no failed power flow).
with patch.object(self.env.saw,
'change_parameters_multiple_element_df'):
self.env.action_count = 10
self.env.reset()
self.assertEqual(0, self.env.action_count)
self.env.action_count = 17
self.env.reset()
self.assertEqual(0, self.env.action_count)
self.env.action_count = 1
self.env.reset()
self.assertEqual(0, self.env.action_count)
def test_load_state_called(self):
"""Ensure the SAW object's LoadState method is called in reset.
"""
# Patch the changing of parameters so that we'll get a
# a consistent incrementing of the index (no failed power flow).
with patch.object(self.env.saw,
'change_parameters_multiple_element_df'):
with patch.object(
self.env.saw, 'LoadState',
side_effect=self.env.saw.LoadState) as p:
self.env.reset()
p.assert_called_once()
def test_gens_and_loads_set_correctly(self):
"""Ensure that the appropriate generators get opened and closed,
and that the power levels get set correctly in the case for both
generators and loads.
"""
# There are 5 generators in the 14 bus case. In the base case,
# only gens at buses 1 and 2 are providing active power, but
# the others are "Closed" and thus regulating their voltage.
# We'll patch the environment's gen_mw to have all gens on
# and sharing the load evenly except the generator at bus 2.
# We'll also patch all gens to be regulating to 1.05 per unit.
p = LOAD_MW_14 / 4
gen_mw_row = np.array([p, 0, p, p, p])
gen_mw = self.env.gen_mw.copy()
gen_mw[0, :] = gen_mw_row
gen_v_row = np.array([1.05] * 5)
gen_v = self.env.gen_v.copy()
gen_v[0, :] = gen_v_row
# Extract the original loading, but we'll bump one load by 1 MW
# and 1 MVAR and decrement another by 1 MW and 1 MVAR.
loads_mw_row = self.loads['LoadSMW'].to_numpy()
loads_mw_row[3] += 1
loads_mw_row[4] -= 1
loads_mw = self.env.loads_mw.copy()
loads_mw[0, :] = loads_mw_row
loads_mvar_row = self.loads['LoadSMVR'].to_numpy()
loads_mvar_row[3] += 1
loads_mvar_row[4] -= 1
loads_mvar = self.env.loads_mvar.copy()
loads_mvar[0, :] = loads_mvar_row
# Patch the scenario index, generator output, and loading. Then
# reset the environment.
with patch.object(self.env, 'gen_mw', new=gen_mw):
with patch.object(self.env, 'gen_v', new=gen_v):
with patch.object(self.env, 'loads_mw', new=loads_mw):
with patch.object(self.env, 'loads_mvar', new=loads_mvar):
# Patch branches_to_open so a line does not get
# opened.
with patch.object(self.env, 'branches_to_open',
new=None):
self.env.reset()
# Pull the generator data from PowerWorld and ensure that both
# the status and output match up.
gen_reset_data = self.env.saw.GetParametersMultipleElement(
ObjectType='gen',
ParamList=self.env.gen_key_fields + self.env.GEN_RESET_FIELDS)
# All gens except for the 2nd should be closed.
status = ['Closed'] * 5
status[1] = 'Open'
self.assertListEqual(status, gen_reset_data['GenStatus'].tolist())
# Excluding the slack, generator MW output should exactly match
# what was commanded.
np.testing.assert_allclose(
gen_mw_row[1:], gen_reset_data['GenMW'].to_numpy()[1:])
# The slack should be equal to within our assumed line losses.
np.testing.assert_allclose(
gen_mw_row[0], gen_reset_data['GenMW'].to_numpy()[0],
rtol=LOSS, atol=0
)
# Generator voltage setpoints should match.
np.testing.assert_allclose(
gen_v_row, gen_reset_data['GenVoltSet'].to_numpy()
)
# Pull the load data from PowerWorld and ensure that both the
# MW and MVAR outputs match up.
load_init_data = self.env.saw.GetParametersMultipleElement(
ObjectType='load',
ParamList=self.env.load_key_fields + self.env.LOAD_RESET_FIELDS
)
np.testing.assert_allclose(
loads_mw_row, load_init_data['LoadSMW'].to_numpy()
)
np.testing.assert_allclose(
loads_mvar_row, load_init_data['LoadSMVR'].to_numpy()
)
def test_failed_power_flow(self):
"""Ensure that if the power flow fails to solve, we move on
to the next scenario.
"""
# Patch SolvePowerFlow so that the second call fails, while
# the first, third, and fourth succeed.
with patch.object(
self.env.saw, 'SolvePowerFlow',
side_effect=[None, PowerWorldError('failure'), None,
None]):
self.env.reset()
# Our first attempt should fail, and the second should succeed.
# The index is always bumped at the end of each iteration, so
# it should end up at 2 (starts at 0, bumped to 1 after first
# failed iteration, bumped to 2 after second successful
# iteration).
self.assertEqual(2, self.env.scenario_idx)
def test_hit_max_iterations(self):
"""Exception should be raised once all scenarios are exhausted.
"""
# We want every other power flow solve to fail.
side_effect = [None, PowerWorldError('failure')] * 10
with patch.object(self.env.saw, 'SolvePowerFlow',
side_effect=side_effect):
with patch.object(self.env, 'num_scenarios', new=5):
with self.assertRaisesRegex(
OutOfScenariosError,
'We have gone through all scenarios'):
self.env.reset()
def test_reset_returns_proper_observation(self):
"""Ensure a single call to reset calls _get_observation and
returns the observation.
"""
with patch.object(self.env, '_get_observation',
side_effect=self.env._get_observation) as p:
obs = self.env.reset()
# _get_observation should be called once only. Note if we get
# into a bad state where the voltages are two low, it may
# be called more than once. Bad test design due to the fact
# we can't just spin up new ESA instances for each test.
p.assert_called_once()
self.assertIsInstance(obs, np.ndarray)
self.assertEqual(obs.shape, self.env.observation_space.shape)
def test_extra_reset_actions_called(self):
with patch.object(self.env, '_set_gens_for_scenario') as p:
self.env.reset()
p.assert_called_once()
def test_set_gens_for_scenario_called(self):
with patch.object(self.env, '_set_gens_for_scenario') as p:
self.env.reset()
p.assert_called_once()
def test_set_loads_for_scenario_called(self):
with patch.object(self.env, '_set_loads_for_scenario') as p:
with patch.object(self.env, '_solve_and_observe'):
self.env.reset()
p.assert_called_once()
def test_solve_and_observe_called(self):
with patch.object(self.env, '_solve_and_observe') as p:
self.env.reset()
p.assert_called_once()
def test_current_reward_cleared(self):
self.env.current_reward = 10
self.env.reset()
self.assertTrue(np.isnan(self.env.current_reward))
def test_last_action_cleared(self):
self.env.last_action = 7
self.env.reset()
self.assertIsNone(self.env.last_action)
def test_branch_opened(self):
"""Ensure branch is opened when reset is called."""
# Ensure the branch starts closed.
# Rely on the fact that our setup method sets the scenario index
# to 0.
branch_idx = self.env.branches_to_open[0]
branch = self.env.branch_init_data.iloc[branch_idx]
kf = self.env.branch_key_fields
branch_state = self.env.saw.GetParametersSingleElement(
ObjectType='branch',
ParamList=kf + ['LineStatus'],
Values=branch[kf].tolist() + [0]
)
self.assertEqual('Closed', branch_state['LineStatus'])
# Now call reset and ensure the branch is open.
self.env.reset()
branch_state = self.env.saw.GetParametersSingleElement(
ObjectType='branch',
ParamList=kf + ['LineStatus'],
Values=branch[kf].tolist() + [0]
)
self.assertEqual('Open', branch_state['LineStatus'])
def test_scenario_init_success_array_properly_updated(self):
"""Ensure the scenario_init_success array gets updated as it
should.
"""
# Patch _solve_and_observe to sometimes succeed, sometimes fail.
se = [None, PowerWorldError('stuff'), PowerWorldError('stuff'), None]
with patch.object(self.env, '_solve_and_observe', side_effect=se):
with patch.object(self.env, '_add_to_log'):
# Run reset twice, as that's all that is required since
# it'll keep trying on failures.
for _ in range(2):
self.env.reset()
# Test.
expected = np.array([True if x is None else False for x in se])
np.testing.assert_array_equal(
expected, self.env.scenario_init_success[0:4]
)
def test_gen_var_frac(self):
"""Ensure the gen_var_frac comes back as it should. This is not
a comprehensive test.
"""
# Call reset to ensure we have observation data.
self.env.reset()
# Grab array for short-hand.
a = self.env.gen_var_frac_arr
# Check shape.
self.assertEqual((self.env.num_gens,), a.shape)
# All values should be in range [0, 1].
self.assertTrue(((a >= 0.0) & (a <= 1.0)).all())
def test_gen_var_frac_addl(self):
"""Additional testing for gen_var_frac."""
# Call reset to get an observation.
self.env.reset()
# Get a copy of the observation data (for patching)
obs = self.env.gen_obs_data.copy(deep=True)
# Set the vars at 0 for the generator at bus 3, since it has a
# 0 limit.
obs.loc[2, 'GenMVRPercent'] = 0.0
# Put the swing bus percentage above 100.
obs.loc[0, 'GenMVRPercent'] = 101.00
# Ensure that both generators at buses 2 and 0 are on.
obs.loc[2, 'GenStatus'] = 'Closed'
obs.loc[0, 'GenStatus'] = 'Closed'
# Patch the observation and get the array.
with patch.object(self.env, 'gen_obs_data', new=obs):
a = self.env.gen_var_frac_arr
# All values should be in range [0, 1].
self.assertTrue(((a >= 0.0) & (a <= 1.0)).all())
# Generator at bus 3 should read 1.0
self.assertEqual(a[2], 1.0)
# Swing bus should read 1.0
self.assertEqual(a[0], 1.0)
# Now, open up the generator at bus 3. It should then have a
# fraction of 0.0.
obs.loc[2, 'GenStatus'] = 'Open'
with patch.object(self.env, 'gen_obs_data', new=obs):
a = self.env.gen_var_frac_arr
self.assertEqual(a[2], 0.0)
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusStepTestCase(unittest.TestCase):
"""Test the step method of the environment."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.rewards = {'no_op': 50}
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
rewards=cls.rewards,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
"""Reset the scenario index and call reset for each run.
"""
self.env.scenario_idx = 0
self.env.reset()
def action_helper(self, action, gen_bus, v_set):
"""Helper for testing that actions work correctly."""
# We call reset() in setUp, so that last_action should get
# reset.
self.assertIsNone(self.env.last_action)
# Perform the step.
self.env.step(action)
# Ensure the "last_action" is being performed properly.
self.assertEqual(action, self.env.last_action)
# Hard-code access to the 0th generator. It's at bus 1.
gen_init_data = self.env.saw.GetParametersSingleElement(
ObjectType='gen', ParamList=['BusNum', 'GenID', 'GenVoltSet'],
Values=[gen_bus, '1', 0]
)
self.assertAlmostEqual(v_set, gen_init_data['GenVoltSet'], places=3)
def test_action_0(self):
"""Action 0 is the no-op action."""
# Call _solve_and_observe to rotate all the observation data.
self.env._solve_and_observe()
# Take the 0th action.
_, reward, _, _ = self.env.step(0)
# The observation DataFrames should be identical, indicating
# that no action was taken.
pd.testing.assert_frame_equal(
self.env.bus_obs_data_prev, self.env.bus_obs_data)
pd.testing.assert_frame_equal(
self.env.gen_obs_data_prev, self.env.gen_obs_data
)
pd.testing.assert_frame_equal(
self.env.load_obs_data_prev, self.env.load_obs_data
)
# The absolute value should be equal to the no-op reward.
self.assertEqual(self.rewards['no_op'], abs(reward))
def test_action_1(self):
"""Action 1 should set the 0th generator to the minimum."""
# The 0th generator is at bus 1.
self.action_helper(1, 1, self.gen_voltage_range[0])
def test_action_last(self):
"""The last action should put the last generator to its maximum.
"""
# The last generator is at bus 8.
self.action_helper(self.env.action_space.n - 1, 8,
self.gen_voltage_range[1])
def test_action_middle(self):
"""Test an action not on the book ends and ensure the generator
set point is updated correctly.
"""
# Action 18 should put the 3rd generator at the 4th voltage
# level. The 3rd generator is at bus 3. Hard code the fact that
# the bins are in 0.025pu increments.
self.action_helper(18, 3, self.gen_voltage_range[0] + 3 * 0.025)
def test_action_count_increments(self):
"""Ensure each subsequent call to step bumps the action_count.
"""
self.assertEqual(0, self.env.action_count)
self.env.step(4)
self.assertEqual(1, self.env.action_count)
self.env.step(10)
self.assertEqual(2, self.env.action_count)
self.env.step(13)
self.assertEqual(3, self.env.action_count)
def test_failed_power_flow(self):
"""If a PowerWorldError is raised while calling SolvePowerFlow,
the observation should come back with zeros in the voltage
positions, and the reward should be negative.
"""
with patch.object(self.env.saw, 'SolvePowerFlow',
side_effect=PowerWorldError('failure')):
obs, reward, done, info = self.env.step(12)
# Ensure there are zeroes in the appropriate slots.
self.assertTrue((obs[0:self.env.num_buses] == 0.0).all())
# Ensure the observation is of the expected size.
self.assertEqual(obs.shape, (self.env.num_obs,))
# TODO: This fails because we actually can have numbers less
# than 0. So, also need to fix the observation space
# definition.
self.assertTrue((obs[self.env.num_buses:] >= 0.0).all())
# Make sure the reward is as expected.
self.assertEqual(
reward, self.env.rewards['action'] + self.env.rewards['fail'])
def test_compute_end_of_episode_reward_called_correctly(self):
with patch.object(self.env, '_solve_and_observe'):
with patch.object(self.env, '_compute_reward'):
# Have _check_done return True.
with patch.object(self.env, '_check_done',
return_value=True):
with patch.object(self.env,
'_compute_end_of_episode_reward') as p1:
self.env.step(1)
# Now, have _check_done return False
with patch.object(self.env, '_check_done',
return_value=False):
with patch.object(self.env,
'_compute_end_of_episode_reward') as p2:
self.env.step(1)
p1.assert_called_once()
self.assertEqual(p2.call_count, 0)
def test_no_op_with_no_op_flag(self):
"""If the no_op_flag is True, the episode should end with no
reward if the no-op action is taken.
"""
with patch.object(self.env, 'no_op_flag', new=True):
obs, reward, done, info = self.env.step(self.env.no_op_action)
self.assertEqual(obs.shape, self.env.observation_space.shape)
self.assertEqual(reward, 0)
self.assertTrue(done)
self.assertDictEqual(info, {'is_success': False})
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusComputeRewardTestCase(unittest.TestCase):
"""Test the _compute_reward method of the environment."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.95, 1.05)
cls.low_v = 0.95
cls.high_v = 1.05
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.rewards = {
"action": -10,
"v_delta": 1,
"v_in_bounds": 10,
"v_out_bounds": -10,
"gen_var_delta": 1,
"fail": -1000,
'no_op': 12
}
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
rewards=cls.rewards,
dtype=cls.dtype,
low_v=cls.low_v,
high_v=cls.high_v
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
"""Override the relevant observation DataFrames.
"""
# 6 buses with unity per unit voltage.
v_df = pd.DataFrame(
[[1., 'a'], [1., 'b'], [1., 'c'], [1., 'd'], [1., 'e'], [1., 'f']],
columns=['BusPUVolt', 'junk'])
self.env.bus_obs_data_prev = v_df.copy()
self.env.bus_obs_data = v_df.copy()
# 6 gens at 80% var loading.
var_df = pd.DataFrame(
[[.8, 'a'], [.8, 'b'], [.8, 'c'], [.8, 'd'], [.8, 'e'], [.8, 'f']],
columns=['GenMVRPercent', 'junk'])
self.env.gen_obs_data_prev = var_df.copy()
self.env.gen_obs_data = var_df.copy()
def get_reward_no_action(self):
"""Helper to compute the reward but temporarily zero out the
action penalty.
"""
with patch.dict(self.env.rewards, {'action': 0}):
reward = self.env._compute_reward()
return reward
def test_action_only(self):
"""No values change, should only get the action penalty."""
self.assertEqual(self.env._compute_reward(), self.rewards['action'])
def test_low_voltage_moved_up(self):
"""Test a single low bus voltage moving up, but not in bounds.
"""
self.env.bus_obs_data_prev.loc[2, 'BusPUVolt'] = 0.8
self.env.bus_obs_data.loc[2, 'BusPUVolt'] = 0.85
# The bus voltage moved up 5 1/100ths per unit.
self.assertAlmostEqual(self.get_reward_no_action(),
5 * self.rewards['v_delta'])
def test_high_voltage_moved_down(self):
"""Test a single high bus voltage moving down, but not in bounds.
"""
self.env.bus_obs_data_prev.loc[0, 'BusPUVolt'] = 1.1
self.env.bus_obs_data.loc[0, 'BusPUVolt'] = 1.07
# The bus voltage moved down 3 1/100ths per unit.
self.assertAlmostEqual(self.get_reward_no_action(),
3 * self.rewards['v_delta'])
def test_low_voltage_moved_up_past_nominal(self):
"""Test a single low bus voltage moving up and exceeding nominal
voltage.
"""
self.env.bus_obs_data_prev.loc[2, 'BusPUVolt'] = 0.93
self.env.bus_obs_data.loc[2, 'BusPUVolt'] = 1.02
# The bus voltage should get credit for reducing its distance to
# nominal, as well as a bonus for moving into the good band.
self.assertAlmostEqual(
self.get_reward_no_action(),
# ((1.02 - 1) - (1 - 0.93)) * 100 = 5
5 * self.rewards['v_delta'] + self.rewards['v_in_bounds'])
def test_high_voltage_moved_down_past_nominal(self):
"""Test a single high bus voltage moving down and going below
nominal voltage.
"""
self.env.bus_obs_data_prev.loc[5, 'BusPUVolt'] = 1.1
self.env.bus_obs_data.loc[5, 'BusPUVolt'] = 0.98
# The bus voltage should get credit for moving to nominal, and
# also get a bonus for moving into the good band.
self.assertAlmostEqual(
self.get_reward_no_action(),
# ((1.1 - 1) - (1 - 0.98)) * 100 = 8
8 * self.rewards['v_delta'] + self.rewards['v_in_bounds'])
def test_low_voltage_moved_in_range(self):
"""Should also get a bonus for moving a voltage in bounds."""
self.env.bus_obs_data_prev.loc[1, 'BusPUVolt'] = 0.91
self.env.bus_obs_data.loc[1, 'BusPUVolt'] = 0.95
# The bus voltage moved up 4 1/100ths per unit, and also moved
# into the "good" range.
self.assertAlmostEqual(
self.get_reward_no_action(),
4 * self.rewards['v_delta'] + self.rewards['v_in_bounds'])
def test_high_voltage_moved_in_range(self):
"""Should also get a bonus for moving a voltage in bounds."""
self.env.bus_obs_data_prev.loc[3, 'BusPUVolt'] = 1.2
self.env.bus_obs_data.loc[3, 'BusPUVolt'] = 1.05
# The bus voltage moved by 15 1/100ths per unit, and also
# moved into the "good" range.
self.assertAlmostEqual(
self.get_reward_no_action(),
15 * self.rewards['v_delta'] + self.rewards['v_in_bounds'])
def test_high_and_low_moved_in_range(self):
"""Test multiple buses moving opposite directions, but in bounds
"""
self.env.bus_obs_data_prev.loc[3, 'BusPUVolt'] = 1.07
self.env.bus_obs_data.loc[3, 'BusPUVolt'] = 1.05
self.env.bus_obs_data_prev.loc[0, 'BusPUVolt'] = 0.91
self.env.bus_obs_data.loc[0, 'BusPUVolt'] = 1.05
self.assertAlmostEqual(
self.get_reward_no_action(),
# high moved down
2 * self.rewards['v_delta']
# low moved up, but overshot
+ ((1 - 0.91) - (1.05 - 1)) * 100 * self.rewards['v_delta']
# bonus for moving in band
+ 2 * self.rewards['v_in_bounds'])
def test_changes_but_all_in_bounds(self):
"""If voltages change, but all stay in bounds, there should be
no reward, only the penalty for taking an action.
"""
self.env.bus_obs_data_prev.loc[0, 'BusPUVolt'] = 0.95
self.env.bus_obs_data.loc[0, 'BusPUVolt'] = 0.96
self.env.bus_obs_data_prev.loc[1, 'BusPUVolt'] = 1.0
self.env.bus_obs_data.loc[1, 'BusPUVolt'] = 1.01
self.env.bus_obs_data_prev.loc[2, 'BusPUVolt'] = 1.05
self.env.bus_obs_data.loc[2, 'BusPUVolt'] = 1.01
self.env.bus_obs_data_prev.loc[3, 'BusPUVolt'] = 0.8
self.env.bus_obs_data.loc[3, 'BusPUVolt'] = 0.8
self.assertAlmostEqual(self.env._compute_reward(),
self.rewards['action'])
def test_low_v_gets_lower(self):
"""Should get a penalty for moving a low voltage lower."""
self.env.bus_obs_data_prev.loc[2, 'BusPUVolt'] = 0.93
self.env.bus_obs_data.loc[2, 'BusPUVolt'] = 0.91
self.assertAlmostEqual(
self.get_reward_no_action(), -2 * self.rewards['v_delta'])
def test_high_v_gets_higher(self):
"""Should get a penalty for moving a high voltage higher."""
self.env.bus_obs_data_prev.loc[3, 'BusPUVolt'] = 1.06
self.env.bus_obs_data.loc[3, 'BusPUVolt'] = 1.09
self.assertAlmostEqual(
self.get_reward_no_action(), -3 * self.rewards['v_delta'])
def test_in_bounds_moves_low(self):
"""Should get penalty for voltage that was in bounds moving
out of bounds.
"""
self.env.bus_obs_data_prev.loc[3, 'BusPUVolt'] = 1.05
self.env.bus_obs_data.loc[3, 'BusPUVolt'] = 0.9
self.assertAlmostEqual(
self.get_reward_no_action(),
# Moved 0.05 pu away from lower boundary, also gets extra
# penalty for leaving bounds.
-5 * self.rewards['v_delta'] + self.rewards['v_out_bounds'])
def test_in_bounds_moves_high(self):
"""Should get penalty for voltage that was in bounds moving
out of bounds.
"""
self.env.bus_obs_data_prev.loc[0, 'BusPUVolt'] = 0.96
self.env.bus_obs_data.loc[0, 'BusPUVolt'] = 0.94
self.assertAlmostEqual(
self.get_reward_no_action(),
# Moved 0.01 pu away from lower boundary, also gets extra
# penalty for leaving bounds.
-1 * self.rewards['v_delta'] + self.rewards['v_out_bounds'])
def test_no_op_reward(self):
"""With no violations, should receive the no_op reward if the
no_op action is the last_action and buses are within bounds.
"""
with patch.object(self.env, 'last_action', 0):
r = self.env._compute_reward()
self.assertEqual(self.rewards['no_op'], r)
def test_no_op_penalty(self):
"""With any violations, should receive the no_op penalty if the
no_op action is the last_action and any buses are out of bounds.
"""
# Put one voltage below.
self.env.bus_obs_data.loc[0, 'BusPUVolt'] = 0.94
with patch.object(self.env, 'last_action', 0):
r = self.env._compute_reward()
self.assertEqual(self.rewards['no_op'], -r)
# noinspection DuplicatedCode
class GridMindControlEnv14BusInitTestCase(unittest.TestCase):
"""Test the initialization of the environment."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.rewards = {
"normal": 100,
"violation": -50,
"diverged": -100
}
cls.env = voltage_control_env.GridMindEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
rewards=cls.rewards,
dtype=cls.dtype
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def test_branches_to_open(self):
"""By default, branches_to_open should be None."""
self.assertIsNone(self.env.branches_to_open)
def test_set_branches_for_scenario_does_not_call_power_world(self):
"""By default, branches_to_open should be None, and thus
calling _set_branches_for_scenario should not use the saw
object.
"""
# Bad practice patching private method from external class.
with patch.object(self.env.saw, '_call_simauto') as p:
out = self.env._set_branches_for_scenario()
self.assertIsNone(out)
self.assertFalse(p.called)
def test_loading(self):
"""Ensure all load values are set and are in bounds."""
# Compare MW.
original_mw = self.env.load_init_data['LoadSMW'].to_numpy()
# I feel like there has to be a better way to do this, but
# I failed to find it.
#
# Ensure all loads are at or above the minimum.
# noinspection PyUnresolvedReferences
self.assertTrue(
((np.tile(original_mw, (self.num_scenarios, 1))
* self.min_load_factor)
<= self.env.loads_mw).all()
)
# Ensure all loads are at or below the maximum.
# noinspection PyUnresolvedReferences
self.assertTrue(
((np.tile(original_mw, (self.num_scenarios, 1))
* self.max_load_factor)
>= self.env.loads_mw).all()
)
# Ensure total loading matches.
np.testing.assert_array_equal(
self.env.total_load_mw, self.env.loads_mw.sum(axis=1))
# Ensure shapes are correct.
self.assertEqual(self.env.total_load_mw.shape, (self.num_scenarios,))
self.assertEqual(self.env.loads_mw.shape,
(self.num_scenarios, self.env.num_loads))
self.assertEqual(self.env.loads_mvar.shape,
(self.num_scenarios, self.env.num_loads))
def test_generation(self):
"""Change loading in the case, solve the power flow, and ensure
all gens pick up the difference.
"""
try:
load_copy = self.env.load_init_data.copy(deep=True)
# Increase loading.
load_copy['LoadSMW'] = load_copy['LoadSMW'] * 1.2
self.env.saw.change_and_confirm_params_multiple_element(
ObjectType='load', command_df=load_copy)
# Solve the power flow.
self.env.saw.SolvePowerFlow()
# Now get generator information.
gen_data = self.env.saw.GetParametersMultipleElement(
ObjectType='gen', ParamList=self.env.gen_init_fields
)
# Take the difference.
delta = (gen_data['GenMW']
- self.env.gen_init_data['GenMW']).to_numpy()
# All generators should take on some load.
np.testing.assert_array_less(0, delta)
# All generator increases should be nearly the same. The
# slack will have some differences - we'll allow for 0.5%
# relative tolerance.
np.testing.assert_allclose(actual=delta, desired=delta[-1],
rtol=0.005)
finally:
self.env.saw.LoadState()
def test_gen_action_array(self):
"""Ensure the action array is of the correct dimension."""
# Check the shape.
self.assertEqual(self.env.gen_action_array.shape[0],
self.env.action_space.n)
self.assertEqual(self.env.gen_action_array.shape[1], self.env.num_gens)
# Spot check
np.testing.assert_array_equal(self.env.gen_action_array[0, :],
np.array([self.gen_voltage_range[0]] * 5)
)
np.testing.assert_array_equal(self.env.gen_action_array[-1, :],
np.array([self.gen_voltage_range[1]] * 5)
)
a = np.array([self.gen_voltage_range[0]] * 5)
a[-1] = self.env.gen_bins[1]
np.testing.assert_array_equal(self.env.gen_action_array[1, :], a)
b = np.array([self.gen_voltage_range[-1]] * 5)
b[-1] = self.env.gen_bins[-2]
np.testing.assert_array_equal(self.env.gen_action_array[-2, :], b)
c = np.array([self.gen_voltage_range[0]] * 5)
c[-2] = self.env.gen_bins[1]
np.testing.assert_array_equal(self.env.gen_action_array[5], c)
# noinspection DuplicatedCode
class GridMindControlEnv14BusRewardTestCase(unittest.TestCase):
"""Test the _compute_reward method."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.rewards = {
"normal": 100,
"violation": -50,
"diverged": -100
}
cls.env = voltage_control_env.GridMindEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
rewards=cls.rewards,
dtype=cls.dtype
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
"""Override the relevant observation DataFrames, clear the
cumulative reward.
"""
# Call reset and decrement the scenario index for consistency.
self.env.reset()
self.env.scenario_idx = 0
# Overwrite bus observations.
# 6 buses with unity per unit voltage.
v_df = pd.DataFrame(
[[1., 'a'], [1., 'b'], [1., 'c'], [1., 'd'], [1., 'e'], [1., 'f']],
columns=['BusPUVolt', 'junk'])
self.env.bus_obs_data_prev = v_df.copy()
self.env.bus_obs_data = v_df.copy()
# Clear cumulative reward.
self.env.cumulative_reward = 0
def test_all_normal(self):
"""All buses in normal zone."""
self.assertEqual(0, self.env.cumulative_reward)
reward = self.env._compute_reward()
self.assertEqual(reward, self.rewards['normal'])
def test_all_diverged(self):
"""All buses in diverged zone."""
self.assertEqual(0, self.env.cumulative_reward)
self.env.bus_obs_data['BusPUVolt'] = \
np.array([0.0, 1.25, 200, 0.8, 0.5, 1.26])
reward = self.env._compute_reward()
self.assertEqual(reward, self.rewards['diverged'])
def test_all_violation(self):
"""All buses in violation zone."""
self.assertEqual(0, self.env.cumulative_reward)
self.env.bus_obs_data['BusPUVolt'] = \
np.array([0.81, 1.06, 1.249, 0.949, 0.9, 1.1])
reward = self.env._compute_reward()
self.assertEqual(reward, self.rewards['violation'])
def test_mixed(self):
"""Test a mixture of bus zones."""
self.assertEqual(0, self.env.cumulative_reward)
self.env.bus_obs_data['BusPUVolt'] = \
np.array([0.81, 0.79, 1., 1.02, 1.06, 1.04])
reward = self.env._compute_reward()
# The presence of any diverged buses means we should get the
# "diverged" reward.
self.assertEqual(reward, self.rewards['diverged'])
def test_cumulative_reward_correct_under_failed_pf(self):
"""Ensure the cumulative reward is correctly computed under
a failed power flow.
"""
# Ensure the cumulative reward is 0.
self.assertEqual(0, self.env.cumulative_reward)
# Ensure the current reward is NaN.
self.assertTrue(np.isnan(self.env.current_reward))
# Patch solve and observe to throw an exception. Also patch
# _take_action to do nothing. Need to patch _add_to_log so it
# doesn't get upset about bad sized dataframe.
with patch.object(self.env, '_solve_and_observe',
side_effect=PowerWorldError('bleh')):
with patch.object(self.env, '_take_action'):
with patch.object(self.env, '_add_to_log'):
# Take a step.
obs, rew, d, i = self.env.step(3)
# Current and cumulative rewards should be equal.
self.assertEqual(self.env.current_reward, self.env.cumulative_reward)
# Penalty should be equal to 2* the diverged reward.
self.assertEqual(self.rewards['diverged'] * 2,
self.env.cumulative_reward)
# noinspection DuplicatedCode
class GridMindControlEnv14BusMiscTestCase(unittest.TestCase):
"""Test a few miscellaneous aspects."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.rewards = {
"normal": 100,
"violation": -50,
"diverged": -100
}
cls.env = voltage_control_env.GridMindEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
rewards=cls.rewards,
dtype=cls.dtype
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def test_reset_clears_cumulative_reward(self):
self.env.cumulative_reward = 10
self.env.reset()
self.assertEqual(self.env.cumulative_reward, 0)
def test_compute_reward_failed_pf(self):
self.assertEqual(self.env._compute_reward_failed_pf(), -100)
def test_get_observation(self):
df = pd.DataFrame([[1., 'a'], [2., 'b']],
columns=['BusPUVolt', 'bleh'])
with patch.object(self.env, 'bus_obs_data', df):
obs = self.env._get_observation()
np.testing.assert_array_equal(obs, np.array([1., 2.]))
def test_take_action_0(self):
"""Action 0 should put all gens at the minimum voltage.
"""
self.env._take_action(0)
gens = self.env.saw.GetParametersMultipleElement(
ObjectType='gen',
ParamList=(self.env.gen_key_fields + ['GenVoltSet']))
np.testing.assert_allclose(gens['GenVoltSet'].to_numpy(),
self.gen_voltage_range[0])
def test_take_last_action(self):
"""The last action should put all gens at the maximum voltage.
"""
self.env._take_action(self.env.action_space.n - 1)
gens = self.env.saw.GetParametersMultipleElement(
ObjectType='gen',
ParamList=(self.env.gen_key_fields + ['GenVoltSet']))
np.testing.assert_allclose(gens['GenVoltSet'].to_numpy(),
self.gen_voltage_range[1])
def test_failed_pf_obs_zero(self):
obs = self.env._get_observation_failed_pf()
self.assertTrue((obs == 0.0).all())
self.assertEqual(obs.shape, (self.env.num_buses,))
def test_gen_bus_status_arr(self):
"""Test the gen_bus_status_arr property."""
df = pd.DataFrame({'BusNum': [1, 1, 3, 7, 7, 9, 11, 11],
'GenStatus': ['Open', 'Closed', 'Closed',
'Open', 'Open', 'Open',
'Closed', 'Closed']})
with patch.object(self.env, 'gen_obs_data', new=df):
vec = self.env.gen_bus_status_arr
np.testing.assert_array_equal(
# Bus 1, 3, 7, 9, 11
np.array([True, True, False, False, True]),
vec
)
# noinspection DuplicatedCode
class GridMindControlEnv14BusCondensersTestCase(unittest.TestCase):
"""Test the case with condensers and make sure behavior is
expected.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.rewards = {
"normal": 100,
"violation": -50,
"diverged": -100
}
cls.env = voltage_control_env.GridMindEnv(
pwb_path=PWB_14_CONDENSERS, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
rewards=cls.rewards,
dtype=cls.dtype
)
def test_generation(self):
"""Change loading in the case, solve the power flow, and ensure
only two gens pick up the difference.
"""
try:
load_copy = self.env.load_init_data.copy(deep=True)
# Increase loading.
load_copy['LoadSMW'] = load_copy['LoadSMW'] * 1.2
self.env.saw.change_and_confirm_params_multiple_element(
ObjectType='load', command_df=load_copy)
# Solve the power flow.
self.env.saw.SolvePowerFlow()
# Now get generator information.
gen_data = self.env.saw.GetParametersMultipleElement(
ObjectType='gen', ParamList=self.env.gen_init_fields
)
# Take the difference.
delta = (gen_data['GenMW']
- self.env.gen_init_data['GenMW']).to_numpy()
# The generators at buses 3, 6, and 8 should a) have 0 MW
# and b) have 0 change in MW.
gen_3_6_8 = gen_data['BusNum'].isin([3, 6, 8]).to_numpy()
np.testing.assert_array_equal(
gen_data.loc[gen_3_6_8, 'GenMW'].to_numpy(), 0.0)
np.testing.assert_array_equal(delta[gen_3_6_8], 0.0)
# The remaining generators should take on load.
np.testing.assert_array_less(0, delta[~gen_3_6_8])
# All generator increases should be nearly the same. The
# slack will have some differences - we'll allow for 0.5%
# relative tolerance.
np.testing.assert_allclose(actual=delta[~gen_3_6_8],
desired=delta[1], rtol=0.005)
finally:
self.env.saw.LoadState()
# noinspection DuplicatedCode
class GridMindControlEnv14BusRenderTestCase(unittest.TestCase):
"""Test rendering."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.oneline_axd = AXD_14
cls.contour_axd = CONTOUR
cls.image_dir = os.path.join(THIS_DIR, 'render_dir')
cls.render_interval = 0.1
cls.rewards = {
"normal": 100,
"violation": -50,
"diverged": -100
}
# noinspection PyTypeChecker
cls.env = voltage_control_env.GridMindEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
rewards=cls.rewards,
dtype=cls.dtype,
oneline_axd=cls.oneline_axd, contour_axd=cls.contour_axd,
image_dir=cls.image_dir, render_interval=cls.render_interval
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
shutil.rmtree(cls.image_dir)
def _get_files_in_image_dir(self):
# https://stackoverflow.com/a/3207973/11052174
return [f for f in os.listdir(self.env.image_dir)
if os.path.isfile(os.path.join(self.env.image_dir, f))]
def test_rendering(self):
# Before render has been called, several attributes should be
# None.
self.assertIsNone(self.env.image_path)
self.assertIsNone(self.env.image)
self.assertIsNone(self.env.image_axis)
self.assertIsNone(self.env.fig)
self.assertIsNone(self.env.ax)
# The render flag should be False.
self.assertFalse(self.env._render_flag)
# Reset should be called before render.
self.env.reset()
# Render flag should still be False.
self.assertFalse(self.env._render_flag)
# Calling render should initialize all sorts of stuff.
self.env.render()
self.assertIsNotNone(self.env.image_path)
self.assertIsNotNone(self.env.image)
self.assertIsNotNone(self.env.image_axis)
self.assertIsNotNone(self.env.fig)
self.assertIsNotNone(self.env.ax)
# We should have one file in our image directory.
files = self._get_files_in_image_dir()
self.assertEqual(len(files), 1)
# Take a couple steps and render each time.
for i in range(2):
self.env.step(self.env.action_space.sample())
self.env.render()
files = self._get_files_in_image_dir()
self.assertEqual(len(files), i+2)
# noinspection DuplicatedCode
class GridMindControlEnv14BusLoggingTestCase(unittest.TestCase):
"""Test that the logging is working as it should.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.rewards = {
"normal": 100,
"violation": -50,
"diverged": -100
}
cls.env = voltage_control_env.GridMindEnv(
pwb_path=PWB_14_CONDENSERS, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
rewards=cls.rewards,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def test_log(self):
"""Step through some training-like steps and ensure the logging
works as expected.
"""
# Ensure the log array starts empty.
zeros = np.zeros((self.log_buffer, 14 + 3 + 5))
np.testing.assert_array_equal(zeros, self.env.log_array)
# Calling reset should create a log entry.
self.env.reset()
entry_1 = self.env.log_array[0, :]
# Episode:
self.assertEqual(entry_1[0], 0)
# Action:
self.assertTrue(np.isnan(entry_1[1]))
# Reward:
self.assertTrue(np.isnan(entry_1[2]))
np.testing.assert_array_equal(
zeros[1:, :], self.env.log_array[1:, :])
# We haven't hit the "buffer" limit yet.
self.assertEqual(0, self.env.log_flush_count)
self.assertFalse(os.path.isfile(self.env.csv_logfile))
# If we run 9 actions, we should hit the buffer.
actions = [500 + x for x in range(9)]
for a in actions:
self.env.step(a)
# The log should have been flushed.
self.assertEqual(1, self.env.log_flush_count)
self.assertTrue(os.path.isfile(self.env.csv_logfile))
# The log index should have been reset.
self.assertEqual(0, self.env.log_idx)
# Read the log file.
log_data = pd.read_csv(self.env.csv_logfile, index_col=None)
# Columns should line up.
self.assertListEqual(log_data.columns.tolist(), self.env.log_columns)
# There should be the same number of entries as our "buffer"
# size.
self.assertEqual(log_data.shape[0], self.env.log_buffer)
# Ensure the episode number is 0 for all rows.
self.assertTrue((log_data['episode'] == 0).all())
# First action should be NaN, while the rest should line up
# with our action list.
self.assertTrue(np.isnan(log_data['action_taken'].to_numpy()[0]))
np.testing.assert_array_equal(
np.array(actions), log_data['action_taken'].to_numpy()[1:])
# First reward should be NaN, while the rest should not.
self.assertTrue(np.isnan(log_data['reward'].to_numpy()[0]))
self.assertFalse(np.isnan(log_data['reward'].to_numpy()[1:]).any())
# Bus voltages and generator setpoints should be greater than 0.
bus_cols = log_data.columns.to_numpy()[
log_data.columns.str.startswith('bus_') |
log_data.columns.str.startswith('gen_')]
self.assertEqual((14+5,), bus_cols.shape)
self.assertTrue((log_data[bus_cols].to_numpy() > 0).all())
# Reset the environment and take another set of actions that
# will cause the buffer to flush.
self.env.reset()
# If we run 9 actions, we should hit the buffer.
actions = [600 + x for x in range(9)]
for a in actions:
self.env.step(a)
# The log should have been flushed for the 2nd time.
self.assertEqual(2, self.env.log_flush_count)
self.assertTrue(os.path.isfile(self.env.csv_logfile))
# The log index should have been reset.
self.assertEqual(0, self.env.log_idx)
# Read the log file.
log_data = pd.read_csv(self.env.csv_logfile, index_col=None)
# Columns should line up.
self.assertListEqual(log_data.columns.tolist(), self.env.log_columns)
# Should now have 2x buffer size entries.
self.assertEqual(log_data.shape[0], 2 * self.env.log_buffer)
# Perform a reset and run two actions.
self.env.reset()
self.env.step(1502)
self.env.step(1242)
# Manually flush the log.
self.env._flush_log()
# Now we should get three more rows.
log_data = pd.read_csv(self.env.csv_logfile, index_col=None)
self.assertEqual(log_data.shape[0], 2 * self.env.log_buffer + 3)
# If the last row is 0's then the indexing is bad.
self.assertFalse(np.array_equal(
np.zeros(log_data.shape[1]), log_data.to_numpy()[-1, :]))
# Finally, ensure the "reset_log" method works as intended.
with patch.object(self.env, '_flush_log') as p:
self.env.reset_log(new_file='mynewlog.csv')
# Ensure _flush_log gets called, and that the appropriate
# variables get reset.
p.assert_called_once()
self.assertEqual(self.env.log_idx, 0)
self.assertEqual(self.env.log_flush_count, 0)
self.assertEqual(self.env.csv_logfile, 'mynewlog.csv')
# noinspection DuplicatedCode
class GridMindContingenciesEnv14BusLineOpenTestCase(unittest.TestCase):
"""Test that line opening is happening as it should.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.rewards = {
"normal": 100,
"violation": -50,
"diverged": -100
}
cls.env = voltage_control_env.GridMindContingenciesEnv(
pwb_path=PWB_14_CONDENSERS, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
rewards=cls.rewards,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self):
# Load the state between runs.
self.env.saw.LoadState()
self.env.saw.SolvePowerFlow()
def _all_closed(self):
line_data = self.env.saw.GetParametersMultipleElement(
'branch', self.env.branch_key_fields+['LineStatus'])
self.assertTrue((line_data['LineStatus'] == 'Closed').all())
def _one_open(self):
line_data = self.env.saw.GetParametersMultipleElement(
'branch', self.env.branch_key_fields+['LineStatus'])
# Ensure we have a single open line.
closed = line_data['LineStatus'] == 'Closed'
self.assertFalse(closed.all())
self.assertEqual(1, line_data[~closed].shape[0])
def test_set_branches_for_scenario(self):
# Ensure the lines are actually all closed right now.
self._all_closed()
# Run the method.
self.env._set_branches_for_scenario()
# Ensure a single line is open.
self._one_open()
def test_reset_opens_branch(self):
"""Ensure a branch is opened after calling reset."""
# Ensure all closed now.
self._all_closed()
# Run reset.
self.env.reset()
# One line should be open.
self._one_open()
# noinspection DuplicatedCode
class DiscreteVoltageControlGenState14BusEnvTestCase(unittest.TestCase):
"""Quick testing of DiscreteVoltageControlGenState14BusEnv."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.env = voltage_control_env.DiscreteVoltageControlGenState14BusEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def test_branches_to_open(self):
"""By default, branches_to_open should not be None."""
self.assertIsNotNone(self.env.branches_to_open)
self.assertEqual((self.env.num_scenarios,),
self.env.branches_to_open.shape)
self.assertTrue(self.env.branches_to_open.min() >= 0)
self.assertTrue(self.env.branches_to_open.max()
< len(self.env.LINES_TO_OPEN))
def test_set_branches_for_scenario_does_not_call_power_world(self):
"""By default, branches_to_open should be None, and thus
calling _set_branches_for_scenario should not use the saw
object.
"""
# Bad practice patching private method from external class.
with patch.object(self.env.saw, '_call_simauto') as p:
out = self.env._set_branches_for_scenario()
self.assertIsNone(out)
self.assertTrue(p.called)
def test_observation_shape(self):
# 14 buses, 5 generators.
self.assertEqual((14+5,), self.env.observation_space.shape)
def test_observation_bounds(self):
# All lower bounds should be 0.
np.testing.assert_array_equal(0, self.env.observation_space.low)
# Upper bounds corresponding to buses should be 2.
np.testing.assert_array_equal(2, self.env.observation_space.high[0:14])
# Remaining upper bounds correspond to generator states and
# should be at 1.
np.testing.assert_array_equal(1, self.env.observation_space.high[14:])
def test_num_obs(self):
# 14 buses, 5 generators.
self.assertEqual(14+5, self.env.num_obs)
def test_get_observation(self):
"""Patch the observation data frames and ensure we get back
what we expect.
"""
v_arr = np.ones(14, dtype=self.env.dtype)
v_arr += 0.1
v = pd.DataFrame({'BusPUVolt': v_arr, 'BusNum': np.arange(1, 15)})
g_list = ['Closed'] * 5
g_list[1] = 'Open'
g_arr = np.ones(5, dtype=self.env.dtype)
g_arr[1] = 0
g = pd.DataFrame({'GenStatus': g_list, 'Bleh': [1] * 5})
with patch.object(self.env, 'bus_obs_data', v):
with patch.object(self.env, 'gen_obs_data', g):
obs = self.env._get_observation()
np.testing.assert_array_equal(v_arr, obs[0:14])
np.testing.assert_array_equal(g_arr, obs[14:])
def test_get_observation_failed_pf(self):
v_arr = np.zeros(14, dtype=self.env.dtype)
g_list = ['Closed'] * 5
g_list[3] = 'Open'
g_list[4] = 'Open'
g_arr = np.ones(5, dtype=self.env.dtype)
g_arr[3] = 0
g_arr[4] = 0
g = pd.DataFrame({'GenStatus': g_list, 'Stuff': [2] * 5})
with patch.object(self.env, 'gen_obs_data', g):
obs = self.env._get_observation_failed_pf()
np.testing.assert_array_equal(v_arr, obs[0:14])
np.testing.assert_array_equal(g_arr, obs[14:])
# noinspection DuplicatedCode
class DiscreteVoltageControlBranchState14BusEnvTestCase(unittest.TestCase):
"""Quick testing of DiscreteVoltageControlBranchState14BusEnv."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.env = \
voltage_control_env.DiscreteVoltageControlBranchState14BusEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
self.env.scenario_idx = 0
self.env.reset()
def test_branches_to_open(self):
"""By default, branches_to_open should have an entry for each
scenario."""
self.assertIsNotNone(self.env.branches_to_open)
self.assertEqual((self.env.num_scenarios,),
self.env.branches_to_open.shape)
self.assertTrue(self.env.branches_to_open.min() >= 0)
self.assertTrue(self.env.branches_to_open.max()
< len(self.env.LINES_TO_OPEN))
def test_set_branches_for_scenario_does_not_call_power_world(self):
"""Since branches_to_open should not be None,
_set_branches_for_scenario should call PowerWorld.
"""
# Bad practice patching private method from external class.
with patch.object(self.env.saw, '_call_simauto') as p:
out = self.env._set_branches_for_scenario()
self.assertIsNone(out)
self.assertTrue(p.called)
def test_observation_shape(self):
# 14 buses, 4 "openable" lines.
self.assertEqual((14+4,), self.env.observation_space.shape)
def test_observation_bounds(self):
# All lower bounds should be 0.
np.testing.assert_array_equal(0, self.env.observation_space.low)
# Upper bounds corresponding to buses should be 2.
np.testing.assert_array_equal(2, self.env.observation_space.high[0:14])
# Remaining upper bounds correspond to line states and
# should be at 1.
np.testing.assert_array_equal(1, self.env.observation_space.high[14:])
def test_num_obs(self):
# 14 buses, 4 lines to open.
self.assertEqual(14+4, self.env.num_obs)
def test_get_observation_and_get_observation_failed_pf(self):
# Get a copy of the line observation DataFrame.
branch_obs = self.env.branch_obs_data.copy(deep=True)
# Convert to multi-index to make selecting buses a bit
# simpler.
branch_obs.set_index(['BusNum', 'BusNum:1'], inplace=True)
# Open all the eligible lines in this DataFrame.
for l_t in self.env.LINES_TO_OPEN:
branch_obs.loc[(l_t[0], l_t[1]), 'LineStatus'] = 'Open'
# Patch the existing DataFrame, call _get_observation.
with patch.object(self.env, 'branch_obs_data',
branch_obs.reset_index()):
obs = self.env._get_observation()
obs_failed = self.env._get_observation_failed_pf()
# The first 14 values correspond to bus voltages.
np.testing.assert_array_less(0, obs[0:14])
np.testing.assert_array_equal(0, obs_failed[0:14])
# The remaining correspond to line states, and should all be 0
# since we opened all the lines.
np.testing.assert_array_equal(0, obs[14:])
np.testing.assert_array_equal(0, obs_failed[14:])
# Now close all the lines.
for l_t in self.env.LINES_TO_OPEN:
branch_obs.loc[(l_t[0], l_t[1]), 'LineStatus'] = 'Closed'
# Patch the existing DataFrame, call _get_observation.
with patch.object(self.env, 'branch_obs_data',
branch_obs.reset_index()):
obs = self.env._get_observation()
obs_failed = self.env._get_observation_failed_pf()
# The first 14 values correspond to bus voltages.
np.testing.assert_array_less(0, obs[0:14])
np.testing.assert_array_equal(0, obs_failed[0:14])
# The remaining correspond to line states, and should all be 1
# since we closed all the lines.
np.testing.assert_array_equal(1, obs[14:])
np.testing.assert_array_equal(1, obs_failed[14:])
# Open the second eligible single line.
l_t = self.env.LINES_TO_OPEN[1]
branch_obs.loc[(l_t[0], l_t[1]), 'LineStatus'] = 'Open'
# Patch the existing DataFrame, call _get_observation.
with patch.object(self.env, 'branch_obs_data',
branch_obs.reset_index()):
obs = self.env._get_observation()
obs_failed = self.env._get_observation_failed_pf()
# The first 14 values correspond to bus voltages.
np.testing.assert_array_less(0, obs[0:14])
np.testing.assert_array_equal(0, obs_failed[0:14])
# The remaining correspond to line states. All but the second
# entry should be closed.
expected = np.ones(len(self.env.LINES_TO_OPEN), dtype=self.env.dtype)
expected[1] = 0
np.testing.assert_array_equal(expected, obs[14:])
np.testing.assert_array_equal(expected, obs_failed[14:])
# noinspection DuplicatedCode
class DiscreteVoltageControlBranchAndGenState14BusEnvTestCase(
unittest.TestCase):
"""Quick testing of DiscreteVoltageControlBranchAndGenState14BusEnv.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.env = \
voltage_control_env.DiscreteVoltageControlBranchAndGenState14BusEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
self.env.scenario_idx = 0
self.env.reset()
def test_observation_shape(self):
# 14 buses, 4 "openable" lines, 5 generators.
self.assertEqual((14+4+5,), self.env.observation_space.shape)
def test_observation_bounds(self):
# All lower bounds should be 0.
np.testing.assert_array_equal(0, self.env.observation_space.low)
# Upper bounds corresponding to buses should be 2.
np.testing.assert_array_equal(2, self.env.observation_space.high[0:14])
# Remaining upper bounds correspond to line states and
# generator states should be at 1.
np.testing.assert_array_equal(1, self.env.observation_space.high[14:])
def test_num_obs(self):
# 14 buses, 4 lines to open, 5 generators.
self.assertEqual(14+4+5, self.env.num_obs)
def test_get_observation_and_get_observation_failed_pf(self):
# Get a copy of the line observation DataFrame.
branch_obs = self.env.branch_obs_data.copy(deep=True)
# Convert to multi-index to make selecting buses a bit
# simpler.
branch_obs.set_index(['BusNum', 'BusNum:1'], inplace=True)
# Close all lines.
branch_obs['LineStatus'] = 'Closed'
# Open the first and last eligible lines.
for idx in [0, len(self.env.LINES_TO_OPEN) - 1]:
l_t = self.env.LINES_TO_OPEN[idx]
branch_obs.loc[(l_t[0], l_t[1]), 'LineStatus'] = 'Open'
# Get a copy of the generator observation DataFrame.
gen_obs = self.env.gen_obs_data.copy(deep=True)
# Close all generators.
gen_obs['GenStatus'] = 'Closed'
# Open the first and last generators.
gen_obs.loc[0, 'GenStatus'] = 'Open'
gen_obs.loc[gen_obs.shape[0] - 1, 'GenStatus'] = 'Open'
# Patch the existing DataFrames, call _get_observation.
with patch.object(self.env, 'branch_obs_data',
branch_obs.reset_index()):
with patch.object(self.env, 'gen_obs_data', gen_obs):
obs = self.env._get_observation()
obs_failed = self.env._get_observation_failed_pf()
# Ensure we get back the correct number of observations.
self.assertEqual(14+4+5, obs.shape[0])
self.assertEqual(14 + 4 + 5, obs_failed.shape[0])
# The first 14 values correspond to bus voltages.
np.testing.assert_array_less(0, obs[0:14])
np.testing.assert_array_equal(0, obs_failed[0:14])
# The remaining correspond to generator and line states.
line_arr = obs[14:18]
line_arr_failed = obs[14:18]
gen_arr = obs[18:]
gen_arr_failed = obs[18:]
# Gen states and line states should be the same for the normal
# and failed cases.
np.testing.assert_array_equal(line_arr, line_arr_failed)
np.testing.assert_array_equal(gen_arr, gen_arr_failed)
# Create array for expected line states.
line_expected = np.ones(len(self.env.LINES_TO_OPEN),
dtype=self.env.dtype)
line_expected[0] = 0
line_expected[-1] = 0
# Create array for expected gen states.
gen_expected = np.ones(self.env.num_gens, dtype=self.env.dtype)
gen_expected[0] = 0
gen_expected[-1] = 0
# Test.
np.testing.assert_array_equal(line_expected, line_arr)
np.testing.assert_array_equal(gen_expected, gen_arr)
# noinspection DuplicatedCode
class DiscreteVoltageControlBranchAndGenStateClippedReward14BusEnvTestCase(
unittest.TestCase):
"""Quick testing of
DiscreteVoltageControlBranchAndGenStateClippedReward14BusEnv.
Specifically, we'll be testing the
_compute_reward_volt_change_clipped and
_compute_reward_failed_power_flow_clipped methods which it uses.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.4
cls.min_load_factor = 0.6
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.9
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 1000
cls.csv_logfile = 'log.csv'
cls.truncate_voltages = True
cls.scale_voltage_obs = True
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.env = \
voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile,
truncate_voltages=True,
scale_voltage_obs=True,
clipped_reward=True
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
# Replace the voltage DataFrames for each run. It'll simply be
# 6 voltage measurements initialized to 1.
self.env.bus_obs_data = pd.DataFrame({'BusPUVolt': np.ones(6)})
self.env.bus_obs_data_prev = pd.DataFrame({'BusPUVolt': np.ones(6)})
def test_all_in(self):
self.assertEqual(1.0, self.env._compute_reward())
def test_fail(self):
self.assertEqual(-1.0, self.env._compute_reward_failed_pf())
def test_two_voltages_move_in(self):
# Move two from out to in.
self.env.bus_obs_data_prev.iloc[0]['BusPUVolt'] = 0.94
self.env.bus_obs_data_prev.iloc[-1]['BusPUVolt'] = 1.06
# Ensure at least one stays out of bounds.
self.env.bus_obs_data_prev.iloc[3]['BusPUVolt'] = 0.949
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 0.94
self.assertEqual(0.75, self.env._compute_reward())
def test_no_op_action_reward(self):
with patch.object(self.env, 'last_action', new=0):
self.assertEqual(0.0, self.env._compute_reward())
def test_one_voltage_moves_in(self):
# Move one bus from out to in.
self.env.bus_obs_data_prev.iloc[4]['BusPUVolt'] = 0.949
self.env.bus_obs_data.iloc[4]['BusPUVolt'] = 0.95000001
# Ensure at least one stays out of bounds.
self.env.bus_obs_data_prev.iloc[3]['BusPUVolt'] = 0.949
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 0.94
self.assertEqual(0.5, self.env._compute_reward())
def test_three_in_one_out(self):
# Move three from out to in.
self.env.bus_obs_data_prev.iloc[0]['BusPUVolt'] = 0.94
self.env.bus_obs_data_prev.iloc[-1]['BusPUVolt'] = 1.06
self.env.bus_obs_data_prev.iloc[2]['BusPUVolt'] = 1.1
# Move one from in to out.
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 0.94
self.assertEqual(0.75, self.env._compute_reward())
def test_two_move_out(self):
# Move two buses out of range.
self.env.bus_obs_data.iloc[2]['BusPUVolt'] = 1.08
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 0.9
self.assertEqual(-0.75, self.env._compute_reward())
def test_one_moves_out_net(self):
# Move two buses out of range.
self.env.bus_obs_data.iloc[1]['BusPUVolt'] = 1.06
self.env.bus_obs_data.iloc[0]['BusPUVolt'] = 0.92
# Move one in range.
self.env.bus_obs_data_prev.iloc[4]['BusPUVolt'] = 0.93
self.assertEqual(-0.5, self.env._compute_reward())
def test_bad_move_right_direction(self):
self.env.bus_obs_data_prev.iloc[-1]['BusPUVolt'] = 0.9
self.env.bus_obs_data.iloc[-1]['BusPUVolt'] = 0.91
self.assertEqual(0.25, self.env._compute_reward())
def test_bad_move_wrong_direction(self):
self.env.bus_obs_data_prev.iloc[3]['BusPUVolt'] = 1.051
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 1.06
self.assertEqual(-0.25, self.env._compute_reward())
@unittest.skip("Don't worry about over/undershoot for now.")
def test_overshoot(self):
self.env.bus_obs_data_prev.iloc[4]['BusPUVolt'] = 0.9
self.env.bus_obs_data.iloc[4]['BusPUVolt'] = 1.06
# The agent decreased the distance from the band.
self.assertEqual(0.15, self.env._compute_reward())
def test_do_nothing(self):
self.env.bus_obs_data_prev.iloc[4]['BusPUVolt'] = 0.9
self.env.bus_obs_data.iloc[4]['BusPUVolt'] = 0.9
self.assertEqual(-0.1, self.env._compute_reward())
# noinspection DuplicatedCode
class IL200BusShuntsTestCase(unittest.TestCase):
"""Test case for shunts with the 200 bus case and the
DiscreteVoltageControlGenAndShuntNoContingenciesEnv.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.4
cls.min_load_factor = 0.6
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.9
cls.shunt_closed_probability = 0.5
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
cls.env = \
voltage_control_env.DiscreteVoltageControlGenAndShuntNoContingenciesEnv(
pwb_path=PWB_200, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
shunt_closed_probability=cls.shunt_closed_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
def setUp(self) -> None:
self.env.scenario_idx = 0
self.env.reset()
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def test_branches_to_open_none(self):
self.assertIsNone(self.env.branches_to_open)
def _shunt_action_helper(self, shunt_idx, start_state, finish_state):
"""The last action should toggle the last shunt. Ensure that
shunt is open first. shunt_idx should be negative only.
"""
# Grab the last shunt.
last_shunt = self.env.shunt_init_data.iloc[shunt_idx].copy()
# Open it.
last_shunt['SSStatus'] = start_state
self.env.saw.ChangeParametersSingleElement(
ObjectType='shunt',
ParamList=last_shunt.index.tolist(),
Values=last_shunt.tolist()
)
# Update observations.
self.env._rotate_and_get_observation_frames()
# Helper to pull this shunt.
def get_shunt():
s = self.env.saw.GetParametersSingleElement(
ObjectType='shunt',
ParamList=['BusNum', 'ShuntID', 'SSStatus'],
Values=[last_shunt['BusNum'], last_shunt['ShuntID'], 0])
return s
# Confirm shunt changed state in PowerWorld.
shunt_out = get_shunt()
self.assertEqual(start_state, shunt_out['SSStatus'])
# Now, take the last action, which should toggle this shunt.
self.env._take_action(self.env.action_space.n + shunt_idx)
# Confirm it's closed now.
shunt_out = get_shunt()
self.assertEqual(finish_state, shunt_out['SSStatus'])
def test_take_action_last_shunt_open_to_closed(self):
"""Toggle the last shunt from open to closed."""
self._shunt_action_helper(
shunt_idx=-1, start_state='Open', finish_state='Closed')
def test_take_action_first_shunt_closed_to_open(self):
"""Toggle the first shunt from closed to open."""
# Hard-code the fact that there are 4 shunts in this case.
self._shunt_action_helper(
shunt_idx=-4, start_state='Closed', finish_state='Open'
)
def test_observation_space(self):
"""Ensure the observation space is the correct size via
hard-coding.
"""
# 200 buses, 49 generators, 4 shunts.
n = 200 + 49 + 4
self.assertEqual(
(n,), self.env.observation_space.shape
)
# Lower bound should be 0.
np.testing.assert_array_equal(
np.zeros(n, dtype=self.env.dtype), self.env.observation_space.low
)
# Voltage cap at 2.
np.testing.assert_array_equal(
np.ones(200, dtype=self.dtype) + 1,
self.env.observation_space.high[0:200])
# All else at 1 (gen and shunt states)
np.testing.assert_array_equal(
np.ones(49+4, dtype=self.dtype),
self.env.observation_space.high[200:]
)
def test_action_1_puts_gen_at_min(self):
"""Ensure action 1 puts the first generator at the lowest
set point.
"""
# Get the set point.
gen_data = self.env.gen_obs_data.iloc[0]
initial_v = gen_data['GenVoltSet']
# Ensure we dont' start at the minimum.
self.assertNotAlmostEqual(initial_v, self.gen_voltage_range[0])
# Take action 1 (0 is no-op).
self.env._take_action(1)
# Pull voltage for this generator.
gen = self.env.saw.GetParametersSingleElement(
ObjectType='gen',
ParamList=self.env.gen_key_fields + ['GenVoltSet'],
Values=gen_data[self.env.gen_key_fields].tolist() + [0]
)
# Ensure it's voltage set point is at the minimum.
self.assertAlmostEqual(self.gen_voltage_range[0], gen['GenVoltSet'])
def test_last_gen_action_puts_gen_at_max(self):
"""Ensure the last possible gen action puts the last generator
at the highest set point.
"""
# Get the starting set point.
gen_data = self.env.gen_obs_data.iloc[-1]
initial_v = gen_data['GenVoltSet']
# Ensure we don't start at the maximum.
self.assertNotAlmostEqual(initial_v, self.gen_voltage_range[1])
# Take the last generator action. No need for -1 because action
# 0 is no-op.
self.env._take_action(self.env.num_gens * self.num_gen_voltage_bins)
# Pull voltage for this generator.
gen = self.env.saw.GetParametersSingleElement(
ObjectType='gen',
ParamList=self.env.gen_key_fields + ['GenVoltSet'],
Values=gen_data[self.env.gen_key_fields].tolist() + [0]
)
# Ensure it's voltage set point is at the maximum.
self.assertAlmostEqual(self.gen_voltage_range[1], gen['GenVoltSet'],
6)
# noinspection DuplicatedCode
class TX2000BusShuntsTapsGensTestCase(unittest.TestCase):
"""Test case for shunts, taps, and generators in the Texas 2000 bus
case.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 10
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.shunt_closed_probability = 0.6
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
# Expected number of shunts.
cls.expected_shunts = 264
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_2000, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
shunt_closed_probability=cls.shunt_closed_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
def setUp(self) -> None:
self.env.saw.LoadState()
self.env.scenario_idx = 0
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def test_gens_at_same_bus_have_same_voltage_set_point_in_gen_v(self):
# Extract boolean array indicating generators that regulate the
# same bus.
dup_arr = self.env.gen_dup_reg.to_numpy()
# Shift that boolean array backwards one slot so we can test.
dup_shifted = np.roll(dup_arr, -1)
# Ensure voltage set points are the same for generators on the
# same buses.
np.testing.assert_array_equal(
self.env.gen_v[:, dup_arr],
self.env.gen_v[:, dup_shifted]
)
def test_take_action_multiple_gens_same_bus(self):
"""Ensure that commanding a voltage set point for generators
that share a bus works properly.
"""
# Bus 4192 has a bunch of generators.
bus = 4192
# Pull the initial voltage. It should be at 1.02.
gen_volt_before = self.env.gen_init_data[
self.env.gen_init_data['BusNum'] == bus
]['GenVoltSet']
np.testing.assert_allclose(gen_volt_before.to_numpy(), 1.02)
# Get the action which will put these generators at their max.
# Add 1 due to the no-op action.
action = np.argmax(
(self.env.gen_action_array[:, 0] == bus)
& (self.env.gen_action_array[:, 1]
== self.env.gen_bins.shape[0] - 1)) + 1
# Take the action.
self.env._take_action(action)
# Pull gens from PowerWorld.
gens = self.env.saw.GetParametersMultipleElement(
ObjectType='gen',
ParamList=['BusNum', 'GenID', 'GenVoltSet']
)
# All generators at our bus should have the maximum voltage
# set point.
np.testing.assert_allclose(
gens[gens['BusNum'] == bus]['GenVoltSet'].to_numpy(),
self.env.gen_bins[-1]
)
def test_shunt_init_data(self):
"""Ensure the right number of shunts have been picked up."""
self.assertEqual(self.env.shunt_init_data.shape[0],
self.expected_shunts)
def test_shunt_shunt_states(self):
"""Ensure the shunt_states attribute is as expected."""
# Check shape.
self.assertEqual(self.env.shunt_states.shape,
(self.num_scenarios, self.expected_shunts))
# Ensure the "on" percentage is fairly close (say, within 5%).
on_pct = self.env.shunt_states.sum().sum() \
/ (self.num_scenarios * self.expected_shunts)
self.assertGreaterEqual(on_pct, self.shunt_closed_probability - 0.05)
self.assertLessEqual(on_pct, self.shunt_closed_probability + 0.05)
def _shunt_helper(self, shunt_patch, state):
with patch.object(self.env, 'shunt_states', new=shunt_patch):
self.env._set_shunts_for_scenario()
# Retrieve.
df = self.env.saw.GetParametersMultipleElement(
'shunt', self.env.shunt_key_fields + ['SSStatus'])
# noinspection PyUnresolvedReferences
self.assertTrue((df['SSStatus'] == state).all())
def test_set_shunts_for_scenario_closed(self):
"""Ensure the shunt setting works properly."""
# Close all shunts.
shunt_patch = np.ones((self.num_scenarios, self.expected_shunts),
dtype=bool)
self._shunt_helper(shunt_patch, 'Closed')
def test_set_shunts_for_scenario_open(self):
# Open all shunts.
shunt_patch = np.zeros((self.num_scenarios, self.expected_shunts),
dtype=bool)
self._shunt_helper(shunt_patch, 'Open')
def test_auto_control_overridden_in_init(self):
"""Ensure that all shunts have their AutoControl property
turned off after initialization.
"""
# Ensure that some shunts did indeed start in auto mode.
self.assertTrue(
(self.env.shunt_init_data['AutoControl'] == 'YES').any())
# Fetch the current settings.
shunts = self.env.saw.GetParametersMultipleElement(
ObjectType='shunt',
ParamList=self.env.shunt_key_fields + ['AutoControl'])
# Now all shunts should not have auto control.
self.assertTrue((shunts['AutoControl'] == 'NO').all())
def test_ltc_data(self):
"""Ensure we're getting the expected LTC data."""
# There should be 35 LTCs in the case.
self.assertEqual(35, self.env.ltc_init_data.shape[0])
# Fetch the integer (well, they're floats for whatever silly
# reason) min and max tap positions. They should come back as
# -16 to 16.
ltc = self.env.saw.GetParametersMultipleElement(
ObjectType='branch',
ParamList=(self.env.branch_key_fields
+ ['XFTapPos:1', 'XFTapPos:2']),
FilterName=self.env.ltc_filter
)
self.assertTrue((ltc['XFTapPos:1'] == -16.0).all())
self.assertTrue((ltc['XFTapPos:2'] == 16.0).all())
def test_change_taps(self):
"""Test that changing taps creates the expected change in
voltage.
"""
# We'll be hard-coding bus numbers for an LTC.
# b_f = 7402 # bus from
# b_t = 7401 # bus to
# b_f = 7423
# b_t = 7422
b_f = 8140
b_t = 8139
c = '1' # circuit ID
# print(f'From bus: {b_f}')
# print(f'To bus: {b_t}')
# List of parameters for commanding.
c_list = ['BusNum', 'BusNum:1', 'LineCircuit', 'XFTapPos']
# Set the tap to 0 and solve the power flow.
self.env.saw.ChangeParametersSingleElement(
ObjectType='branch', ParamList=c_list, Values=[b_f, b_t, c, 0])
# Solve the power flow.
self.env.saw.SolvePowerFlow()
# Ensure the tap is in the right position and that the min and
# max are as expected.
# Ensure the tap changed.
xf = self.env.saw.GetParametersSingleElement(
ObjectType='branch',
ParamList=c_list + ['XFTapPos:1', 'XFTapPos:2', 'BusPUVolt',
'BusPUVolt:1', 'LineAmp', 'LineR', 'LineX',
'XFTapMin', 'XFTapMax'],
Values=([b_f, b_t, c] + [0] * 10))
self.assertEqual(xf['XFTapPos:1'], -16)
self.assertEqual(xf['XFTapPos:2'], 16)
self.assertEqual(xf['XFTapPos'], 0)
self.assertEqual(xf['XFTapMin'], 0.9)
self.assertEqual(xf['XFTapMax'], 1.1)
# Voltages should be almost equal.
# self.assertAlmostEqual(xf['BusPUVolt'], xf['BusPUVolt:1'], 2)
# print('With tap at 0:')
# print(xf['BusPUVolt'] / xf['BusPUVolt:1'])
# print(f"V1: {xf['BusPUVolt']:.3f}")
# print(f"V2: {xf['BusPUVolt:1']:.3f}")
# Put the tap at the maximum
self.env.saw.ChangeParametersSingleElement(
ObjectType='branch', ParamList=c_list, Values=[b_f, b_t, c, 16])
# Solve the power flow.
self.env.saw.SolvePowerFlow()
# Ensure the tap changed.
xf = self.env.saw.GetParametersSingleElement(
ObjectType='branch',
ParamList=c_list + ['BusPUVolt', 'BusPUVolt:1', 'XFTapPercent'],
Values=[b_f, b_t, c, 0, 0, 0, 0])
self.assertEqual(xf['XFTapPos'], 16)
self.assertEqual(xf['XFTapPercent'], 100.0)
# Ensure the first voltage is higher than the second.
self.assertTrue(xf['BusPUVolt'] > xf['BusPUVolt:1'])
# print('With tap at 16:')
# print(xf['BusPUVolt'] / xf['BusPUVolt:1'])
# print(f"V1: {xf['BusPUVolt']:.3f}")
# print(f"V2: {xf['BusPUVolt:1']:.3f}")
# The voltage out should be ~1.1 * the voltage in.
# self.assertAlmostEqual(xf['BusPUVolt'] * 1.1, xf['BusPUVolt:1'])
# Can't put the tap at the minimum without causing the power
# flow to diverge. Do -8 instead of -16.
# Put the tap low.
self.env.saw.ChangeParametersSingleElement(
ObjectType='branch', ParamList=c_list, Values=[b_f, b_t, c, -16])
self.env.saw.SolvePowerFlow()
# Ensure the tap changed.
xf = self.env.saw.GetParametersSingleElement(
ObjectType='branch',
ParamList=c_list + ['BusPUVolt', 'BusPUVolt:1', 'XFTapPercent'],
Values=[b_f, b_t, c, 0, 0, 0, 0])
self.assertEqual(xf['XFTapPos'], -16)
self.assertEqual(xf['XFTapPercent'], -100)
# Ensure the second voltage is higher than the first.
self.assertTrue(xf['BusPUVolt'] < xf['BusPUVolt:1'])
# print('With tap at -16:')
# print(xf['BusPUVolt'] / xf['BusPUVolt:1'])
# print(f"V1: {xf['BusPUVolt']:.3f}")
# print(f"V2: {xf['BusPUVolt:1']:.3f}")
def test_action_space(self):
"""Ensure the action space is the correct size accounting for
gens, ltcs, and shunts.
"""
self.assertTrue(False)
def test_action_ltc(self):
"""Ensure an action corresponding to an LTC is handled
correctly.
"""
self.assertTrue(False)
def test_action_shunt(self):
"""Ensure an action corresponding to a shunt is handled
correctly.
"""
self.assertTrue(False)
# def test_solve(self):
# while self.env.scenario_idx < self.env.num_scenarios:
# self.env.reset()
#
# ratio = self.env.reset_successes / self.env.num_scenarios
# self.assertGreaterEqual(ratio, 0.9)
# print(f'Success ratio: {ratio:.3f}')
# noinspection DuplicatedCode
class DiscreteVoltageControlEnvFilterScenariosTestCase(unittest.TestCase):
"""Test the filter_scenarios method of the environment."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 10
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
"""Reset the scenario index for each run, and restore the
case.
"""
self.env.scenario_idx = 0
self.env.saw.LoadState()
def test_filter(self):
"""Ensure the filtering is successful."""
# Create a filter in which every other scenario is feasible.
num_success = int(self.num_scenarios / 2)
mask = np.array([True, False] * num_success)
# Perform the filtering.
self.env.filter_scenarios(mask)
# Check shapes.
all_none = True
for attr in self.env.SCENARIO_INIT_ATTRIBUTES:
arr = getattr(self.env, attr)
if arr is None:
continue
all_none = False
self.assertEqual(num_success, arr.shape[0])
self.assertFalse(all_none)
# Run reset until we exhaust the scenarios. Patch SolvePowerFlow
# so we always have a success.
with patch.object(self.env.saw, 'SolvePowerFlow'):
i = 0
while i < self.num_scenarios:
try:
self.env.reset()
except OutOfScenariosError:
break
i += 1
# Now, the scenario index should equal the number of successes.
self.assertEqual(num_success, self.env.scenario_idx)
class ScaleVoltagesTestCase(unittest.TestCase):
"""Test _scale_voltages."""
# noinspection PyMethodMayBeStatic
def test_works(self):
# Assuming MIN_V is 0.7 and MAX_V is 1.2, this array should be
# [0.7, 0.95, 1.2]
a = np.array([MIN_V, MIN_V + (MAX_V - MIN_V)/2, MAX_V])
# Assuming MIN_V_SCALED is 0 and MAX_V_SCALED is 1, expected
# should be [0, 0.5, 1]
expected = np.array([MIN_V_SCALED, (MAX_V_SCALED - MIN_V_SCALED)/2,
MAX_V_SCALED])
np.testing.assert_allclose(expected, _scale_voltages(a))
class DiscreteVoltageControlEnvVoltBoundsTestCase(unittest.TestCase):
"""Test using the truncate_voltages parameter to cause
power flows to count as failed if the voltages are out of range.
We'll also test scaled voltage observations.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 10
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.truncate_voltages = True
cls.scale_voltage_obs = True
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
truncate_voltages=cls.truncate_voltages,
scale_voltage_obs=cls.scale_voltage_obs
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
self.env.scenario_idx = 0
self.env.reset()
def patch_helper(self, df, exc=True):
with patch.object(self.env.saw, 'SolvePowerFlow') as p:
with patch.object(self.env, 'bus_obs_data', new=df):
with patch.object(self.env,
'_rotate_and_get_observation_frames'):
with patch.object(self.env, '_get_observation'):
if exc:
with self.assertRaisesRegex(PowerWorldError,
'Scenario rejected'):
self.env._solve_and_observe()
else:
self.env._solve_and_observe()
def test_solve_and_observe_low(self):
"""Ensure _solve_and_observe throws a PowerWorld error for
voltages below the threshold.
"""
# Patch the bus_obs_data DataFrame.
df = pd.DataFrame({'BusPUVolt': np.ones(self.env.num_buses)})
df.iloc[3]['BusPUVolt'] = MIN_V - 0.01
self.patch_helper(df, True)
def test_solve_and_observe_high(self):
"""Ensure _solve_and_observe throws a PowerWorld error for
voltages below the threshold.
"""
# Patch the bus_obs_data DataFrame.
df = pd.DataFrame({'BusPUVolt': np.ones(self.env.num_buses)})
df.iloc[7]['BusPUVolt'] = MAX_V + 0.01
self.patch_helper(df, True)
def test_solve_and_observe_in_bounds(self):
"""Ensure _solve_and_observe throws a PowerWorld error for
voltages below the threshold.
"""
# Patch the bus_obs_data DataFrame.
df = pd.DataFrame({'BusPUVolt': np.ones(self.env.num_buses)})
df.iloc[11]['BusPUVolt'] = MAX_V
df.iloc[1]['BusPUVolt'] = MIN_V
self.patch_helper(df, False)
def test_scaled_voltage_observation(self):
"""Ensure that getting an observation correctly scales
voltages.
"""
arr = np.ones(self.env.num_buses, dtype=self.env.dtype)
arr[0] = MIN_V
arr[-1] = MAX_V
df = pd.DataFrame({'BusPUVolt': arr})
with patch.object(self.env, 'scale_voltage_obs', new=True):
with patch.object(self.env, 'bus_obs_data', new=df):
obs = self.env._get_observation()
v = obs[0:self.env.num_buses]
self.assertAlmostEqual(v[0], MIN_V_SCALED, 6)
self.assertAlmostEqual(v[-1], MAX_V_SCALED, 6)
def test_unscaled_voltage_observation(self):
"""Ensure that getting an observation correctly scales
voltages.
"""
arr = np.ones(self.env.num_buses, dtype=self.env.dtype)
arr[0] = MIN_V
arr[-1] = MAX_V
df = pd.DataFrame({'BusPUVolt': arr})
with patch.object(self.env, 'scale_voltage_obs', new=False):
with patch.object(self.env, 'bus_obs_data', new=df):
obs = self.env._get_observation()
v = obs[0:self.env.num_buses]
self.assertAlmostEqual(v[0], MIN_V, 6)
self.assertAlmostEqual(v[-1], MAX_V, 6)
def test_obs_space(self):
"""Voltage maximum should be 1."""
np.testing.assert_array_equal(
np.ones(self.env.num_buses, dtype=self.env.dtype),
self.env.observation_space.high[0:self.env.num_buses]
)
class GenMVRPercentTestCase(unittest.TestCase):
"""Test case for the generator attribute 'GenMVRPercent.' This shows
several properties:
- PowerWorld maps 0/0 to 0. In other words, if the generator is
at its minimum and that minimum is 0, the GenMVRPercent is 0.
- If a generator is off, it's GenMVRPercent is 0.
- If a generator is absorbing vars (GenMVR < 0), the GenMVRPercent
is negative.
- Particularly for the swing bus, the GenMVRPercent can exceed
100% or go below -100%.
- GenMVRPercent values do indeed come back as percentage values,
i.e. [-100, 100] (typically, excluding swing exception above)
"""
@classmethod
def setUpClass(cls) -> None:
"""Initialize SAW instance, tweak var limits for generator at
bus one, pull data, save state.
"""
# Get a SAW instance.
cls.saw = SAW(FileName=PWB_14, early_bind=True)
# Pull generator key field data.
cls.gen_kf = cls.saw.get_key_field_list('gen')
# Pull generator data.
cls.gen_df_orig = cls.saw.GetParametersMultipleElement(
'gen', cls.gen_kf + ['GenMVRPercent', 'GenStatus', 'GenMVR',
'GenMVRMin', 'GenMVRMax']
)
# Pull load key field data.
cls.load_kf = cls.saw.get_key_field_list('load')
# Pull load data.
cls.load_df_orig = cls.saw.GetParametersMultipleElement(
'load', cls.load_kf + ['LoadSMW', 'LoadSMVR']
)
# Tweak generator 1's limits to match generator 2's limits. For
# whatever reason, the case starts with generator 1 having 0
# var limits.
cls.gen_df_orig.loc[0, ['GenMVRMin', 'GenMVRMax']] = \
cls.gen_df_orig.loc[1, ['GenMVRMin', 'GenMVRMax']]
cls.saw.change_parameters_multiple_element_df('gen', cls.gen_df_orig)
# Solve the power flow and save state.
cls.saw.SolvePowerFlow()
cls.saw.SaveState()
def setUp(self) -> None:
# Restore case.
self.saw.LoadState()
# Get copies of data.
self.gen_df = self.gen_df_orig.copy(deep=True)
self.load_df = self.load_df_orig.copy(deep=True)
@classmethod
def tearDownClass(cls) -> None:
# noinspection PyUnresolvedReferences
cls.saw.exit()
def test_gen_off(self):
"""A generator that is 'off' should have GenMVRPercent=0."""
# Open a generator, solve the power flow.
self.gen_df.loc[2, 'GenStatus'] = 'Open'
self.saw.change_parameters_multiple_element_df('gen', self.gen_df)
self.saw.SolvePowerFlow()
# Retrieve updated generator data.
data = self.saw.GetParametersMultipleElement(
'gen', self.gen_kf + ['GenStatus', 'GenMVRPercent']
)
# Ensure the generator is indeed open.
self.assertEqual(data.loc[2, 'GenStatus'], 'Open')
# Ensure the GenMVRPercent is 0 for that generator.
self.assertEqual(data.loc[2, 'GenMVRPercent'], 0)
def test_max_out_gen_var_production(self):
"""Increase var loading in case, ensure GenMVRPercent=1"""
# First, make any leading loads lagging.
mask = self.load_df['LoadSMVR'] < 0
self.load_df.loc[mask, 'LoadSMVR'] = \
self.load_df.loc[mask, 'LoadSMVR'] * -1
# Increase reactive loading.
self.load_df['LoadSMVR'] = self.load_df['LoadSMVR'] * 2.1
self.saw.change_parameters_multiple_element_df('load', self.load_df)
# Solve the power flow.
self.saw.SolvePowerFlow()
# Pull generator data.
data = self.saw.GetParametersMultipleElement(
'gen', self.gen_kf + ['GenMVR', 'GenMVRPercent']
)
# With the exception of the swing bus, all MVRPercent values
# should be at 100.
self.assertTrue((data.loc[1:, 'GenMVRPercent'] == 100.0).all())
# Swing bus should be greater than 100 in this scenario.
self.assertTrue(data.loc[0, 'GenMVRPercent'] > 100.0)
def test_max_out_gen_var_consumption(self):
"""Flip loads to produce vars."""
# Flip load vars as necessary
mask = self.load_df['LoadSMVR'] > 0
self.load_df.loc[mask, 'LoadSMVR'] = \
self.load_df.loc[mask, 'LoadSMVR'] * -1
# Up the magnitude of the var loading
self.load_df['LoadSMVR'] = self.load_df['LoadSMVR'] * 1.5
self.saw.change_parameters_multiple_element_df('load', self.load_df)
# Solve the power flow.
self.saw.SolvePowerFlow()
# Pull generator data.
data = self.saw.GetParametersMultipleElement(
'gen', self.gen_kf + ['GenMVR', 'GenMVRPercent']
)
# Notes:
# gen at bus 3 (index 2) is at 0% because it's min is 0%.
# Swing bus exceeds -100
#
# All gens except swing and gen at bus 3 should be at -100:
self.assertEqual(data.loc[1, 'GenMVRPercent'], -100.0)
self.assertTrue((data.loc[3:, 'GenMVRPercent'] == -100.0).all())
# Swing will be in excess of -100
self.assertTrue(data.loc[0, 'GenMVRPercent'] < -100.0)
# Gen at bus 3 is at 0 due to its limit of 0.
self.assertEqual(data.loc[2, 'GenMVRPercent'], 0.0)
# The corresponding GenMVRMin should be 0.
self.assertEqual(self.gen_df_orig.loc[2, 'GenMVRMin'], 0.0)
#
# # noinspection DuplicatedCode
# class GridMindHardSolveTestCase(unittest.TestCase):
# """Ensure a certain percentage of hard cases are solvable."""
#
# @classmethod
# def setUpClass(cls) -> None:
# # Initialize the environment. Then, we'll use individual test
# # methods to test various attributes, methods, etc.
#
# # Define inputs to the constructor.
# cls.num_scenarios = 1000
# # 50% to 150% loading.
# cls.max_load_factor = 1.5
# cls.min_load_factor = 0.5
# cls.min_load_pf = 0.8
# cls.lead_pf_probability = 0.1
# cls.load_on_probability = 0.8
# cls.num_gen_voltage_bins = 5
# cls.gen_voltage_range = (0.95, 1.05)
# cls.seed = 18
# cls.log_level = logging.INFO
# cls.dtype = np.float32
# cls.log_buffer = 10
# cls.csv_logfile = 'log.csv'
#
# # Ensure we remove the logfile if it was created by other
# # test cases.
# try:
# os.remove(cls.csv_logfile)
# except FileNotFoundError:
# pass
#
# cls.rewards = {
# "normal": 100,
# "violation": -50,
# "diverged": -100
# }
#
# cls.env = voltage_control_env.GridMindHardEnv(
# pwb_path=PWB_14_CONDENSERS, num_scenarios=cls.num_scenarios,
# max_load_factor=cls.max_load_factor,
# min_load_factor=cls.min_load_factor,
# min_load_pf=cls.min_load_pf,
# lead_pf_probability=cls.lead_pf_probability,
# load_on_probability=cls.load_on_probability,
# num_gen_voltage_bins=cls.num_gen_voltage_bins,
# gen_voltage_range=cls.gen_voltage_range,
# seed=cls.seed,
# log_level=logging.INFO,
# rewards=cls.rewards,
# dtype=cls.dtype,
# log_buffer=cls.log_buffer,
# csv_logfile=cls.csv_logfile
# )
#
# # noinspection PyUnresolvedReferences
# @classmethod
# def tearDownClass(cls) -> None:
# cls.env.close()
#
# def test_solve(self):
# while self.env.scenario_idx < self.env.num_scenarios:
# self.env.reset()
#
# ratio = self.env.reset_successes / self.env.num_scenarios
# self.assertGreaterEqual(ratio, 0.9)
# print(f'Success ratio: {ratio:.3f}')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
129169 | # NOTE: This is a copy from ~/DevPriv/PythonProjects/MediaWikiMgmt/jk_mediawikirepo/src/jk_mediawikirepo/app/*
import jk_console
from .CLIForm import CLIForm
from .IOutputWriter import IOutputWriter
FG = jk_console.Console.ForeGround
SECTION_COLOR = FG.STD_LIGHTCYAN
SUBSECTION_COLOR = FG.STD_LIGHTCYAN
class _PrintSubSection(IOutputWriter):
def __init__(self, printer, prefix:str, title:str, bColor:bool = True, color:str = None):
assert printer
assert isinstance(prefix, str)
assert isinstance(title, str)
assert isinstance(bColor, bool)
if color is None:
if bColor:
color = SECTION_COLOR
if color is None:
color = ""
colorReset = ""
else:
assert isinstance(color, str)
colorReset = jk_console.Console.RESET
self.__color = color
self.__bColor = bColor
self.__printer = printer
self._print = printer.print
self._print(color + prefix + title + colorReset)
self.__prefix = prefix
#
def __enter__(self):
return self
#
def _dataToStr(self, *args):
return self.__prefix + "⸽ " + " ".join([ str(a) for a in args ])
#
def __exit__(self, exClazz, exObj, exStackTrace):
self._print(self.__prefix + "⸌┈")
self._print()
#
#
class _PrintSection(IOutputWriter):
def __init__(self, printer, title:str, bColor:bool = True, color:str = None):
assert printer
assert isinstance(title, str)
if color is None:
if bColor:
color = SECTION_COLOR
if color is None:
color = ""
colorReset = ""
else:
assert isinstance(color, str)
colorReset = jk_console.Console.RESET
self.__color = color
self.__bColor = bColor
self.__printer = printer
self._print = printer.print
self._print()
self._print(color + ">"*120 + colorReset)
self._print(color + ">>>>>>>> " + title + " " + colorReset)
self._print(color + ">"*120 + colorReset)
self._print()
#
def __enter__(self):
return self
#
def subsection(self, *args, color:str = None) -> _PrintSubSection:
assert len(args) > 0
title = " ".join([str(a) for a in args])
if color is None:
color = self.__color
return _PrintSubSection(self.__printer, " ", title, self.__bColor, color)
#
def _dataToStr(self, *args):
return " " + " ".join([ str(a) for a in args ])
#
def __exit__(self, exClazz, exObj, exStackTrace):
self._print()
#
#
class OutputWriter(IOutputWriter):
################################################################################################################################
## Constructor Methods
################################################################################################################################
def __init__(self, bColor:bool = True):
self.__bLastLineWasEmpty = False
self.__bHadOutput = False
self._print = self.print
self.__bColor = bColor
self.__buffer = []
self.__bAutoFlush = False
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def autoFlush(self) -> bool:
return self.__bAutoFlush
#
@autoFlush.setter
def autoFlush(self, value:bool):
assert isinstance(value, bool)
self.__bAutoFlush = value
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def __printToBuffer(self, *args):
s = " ".join([ str(a) for a in args ])
self.__buffer.append(s)
if self.__bAutoFlush:
self.flush()
#
################################################################################################################################
## Public Methods
################################################################################################################################
def print(self, *args):
text = " ".join([ str(a) for a in args ])
text = text.rstrip()
if len(text) == 0:
if not self.__bLastLineWasEmpty:
self.__buffer.append("")
self.__bLastLineWasEmpty = True
else:
self.__buffer.append(text)
self.__bLastLineWasEmpty = False
self.__bHadOutput = True
if self.__bAutoFlush:
self.flush()
#
def _dataToStr(self, *args):
return " ".join([ str(a) for a in args ])
#
def section(self, *args, color:str = None) -> _PrintSection:
assert len(args) > 0
title = " ".join([str(a) for a in args])
return _PrintSection(self, title, self.__bColor, color)
#
def __enter__(self):
self.print()
return self
#
def __exit__(self, exClazz, exObj, exStackTrace):
self.print()
self.flush()
#
def flush(self):
for line in self.__buffer:
print(line)
self.__buffer.clear()
#
#
| StarcoderdataPython |
307023 | from typing import Callable
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import streamlit as st
from sympy import diff, lambdify, parse_expr
from src.common.consts import COLOR, TRANSFORMATIONS
from src.common.methods.numerical_differentiation.first_derivative_finder import FirstDerivativeFinder
from src.common.methods.numerical_differentiation.second_derivative_finder import SecondDerivativeFinder
from src.tasks.task3.subtask2.common.state_var import StateVar
def _table_to_latex(table: pd.DataFrame, precision: int, na_rep: str = 'nan') -> str:
styler = table.style
styler.format(
{
r"|f'(x_i)_\text{Т} - f'(x_i)_\text{ЧД}|": '{:e}', # noqa: P103
r"|f''(x_i)_\text{Т} - f''(x_i)_\text{ЧД}|": '{:e}', # noqa: P103
},
precision=precision,
na_rep=na_rep,
)
return styler.to_latex(column_format='c' * len(table.columns), hrules=True)
def _latex_to_katex(latex_str: str) -> str:
katex_str = r'\def\arraystretch{1.3}' + latex_str # noqa: WPS336
katex_str = katex_str.replace('tabular', 'array')
katex_str = katex_str.replace('toprule', 'hline')
katex_str = katex_str.replace('midrule', 'hline')
katex_str = katex_str.replace('bottomrule', 'hline')
return katex_str # noqa: WPS331
def _show_plot(f: Callable, x: pd.Series, y: pd.Series, title: str = ''):
fig = go.Figure()
x_range = np.arange(
start=min(x),
stop=max(x) + StateVar.STEP.get() / 100,
step=StateVar.STEP.get() / 100,
)
fig.add_scatter(x=x_range, y=f(x_range), name='Производная', marker_color=COLOR.DARK_GRAY.value)
fig.add_scatter(x=x, y=y, mode='markers', name='Найденные значения производной', marker_color=COLOR.STREAMLIT.value)
fig.update_layout(
title_text=title,
title_x=0.5,
legend={'orientation': 'h', 'yanchor': 'top', 'xanchor': 'center', 'y': -0.1, 'x': 0.5},
)
st.plotly_chart(fig, use_container_width=True)
def show_results(table: pd.DataFrame):
st.header('Результаты')
f_expr = parse_expr(StateVar.TEXT_EXPRESSION.get(), transformations=TRANSFORMATIONS)
df = lambdify('x', diff(f_expr))
first_derivative_finder = FirstDerivativeFinder()
first_derivatives = first_derivative_finder.calculate_derivatives_table(
function_values=table['y'],
step=StateVar.STEP.get(),
)
table[r"f'(x_i)_\text{ЧД}"] = first_derivatives
table[r"|f'(x_i)_\text{Т} - f'(x_i)_\text{ЧД}|"] = abs(first_derivatives - df(table['x']))
ddf = lambdify('x', diff(diff(f_expr)))
second_derivative_finder = SecondDerivativeFinder()
second_derivatives = second_derivative_finder.calculate_derivatives_table(
function_values=table['y'],
step=StateVar.STEP.get(),
)
table[r"f''(x_i)_\text{ЧД}"] = second_derivatives
table[r"|f''(x_i)_\text{Т} - f''(x_i)_\text{ЧД}|"] = abs(second_derivatives - ddf(table['x']))
table.columns.name = 'i'
table.rename(columns={'x': 'x_i', 'y': 'f(x_i)'}, inplace=True)
latex_str = _table_to_latex(table, StateVar.PRECISION.get(), r'\text{\textemdash}')
st.latex(_latex_to_katex(latex_str))
left_column, right_column = st.columns(2)
with left_column:
_show_plot(df, table['x_i'], first_derivatives, 'Первая производная')
with right_column:
_show_plot(ddf, table['x_i'], second_derivatives, 'Вторая производная')
| StarcoderdataPython |
12825227 | import logging
import pandas as pd
import numpy as np
from spaceone.core.manager import BaseManager
from spaceone.statistics.error import *
from spaceone.statistics.connector.service_connector import ServiceConnector
_LOGGER = logging.getLogger(__name__)
_JOIN_TYPE_MAP = {
'LEFT': 'left',
'RIGHT': 'right',
'OUTER': 'outer',
'INNER': 'inner'
}
_SUPPORTED_AGGREGATE_OPERATIONS = [
'query',
'join',
'concat',
'sort',
'formula',
'fill_na'
]
class ResourceManager(BaseManager):
def stat(self, aggregate, page, domain_id):
results = self._execute_aggregate_operations(aggregate, domain_id)
return self._page(page, results)
def _execute_aggregate_operations(self, aggregate, domain_id):
df = None
if 'query' not in aggregate[0]:
raise ERROR_REQUIRED_QUERY_OPERATION()
for stage in aggregate:
if 'query' in stage:
df = self._query(stage['query'], domain_id)
elif 'join' in stage:
df = self._join(stage['join'], domain_id, df)
elif 'concat' in stage:
df = self._concat(stage['concat'], domain_id, df)
elif 'sort' in stage:
df = self._sort(stage['sort'], df)
elif 'formula' in stage:
df = self._execute_formula(stage['formula'], df)
elif 'fill_na' in stage:
df = self._fill_na(stage['fill_na'], df)
else:
raise ERROR_REQUIRED_PARAMETER(key='aggregate.query | aggregate.join | aggregate.concat | '
'aggregate.sort | aggregate.formula | aggregate.fill_na')
df = df.replace({np.nan: None})
results = df.to_dict('records')
return results
@staticmethod
def _fill_na(options, base_df):
data = options.get('data', {})
if len(data.keys()) > 0:
base_df = base_df.fillna(data)
return base_df
def _execute_formula(self, options, base_df):
if len(base_df) > 0:
if 'eval' in options:
base_df = self._execute_formula_eval(options['eval'], base_df)
elif 'query' in options:
base_df = self._execute_formula_query(options['query'], base_df)
else:
raise ERROR_REQUIRED_PARAMETER(key='aggregate.formula.eval | aggregate.formula.query')
return base_df
@staticmethod
def _execute_formula_query(formula, base_df):
try:
base_df = base_df.query(formula)
except Exception as e:
raise ERROR_STATISTICS_FORMULA(formula=formula)
return base_df
@staticmethod
def _execute_formula_eval(formula, base_df):
try:
base_df = base_df.eval(formula)
except Exception as e:
raise ERROR_STATISTICS_FORMULA(formula=formula)
return base_df
@staticmethod
def _sort(options, base_df):
if 'key' in options and len(base_df) > 0:
ascending = not options.get('desc', False)
try:
return base_df.sort_values(by=options['key'], ascending=ascending)
except Exception as e:
raise ERROR_STATISTICS_QUERY(reason=f'Sorting failed. (sort = {options})')
else:
return base_df
def _concat(self, options, domain_id, base_df):
concat_df = self._query(options, domain_id, operator='join')
try:
base_df = pd.concat([base_df, concat_df], ignore_index=True)
except Exception as e:
raise ERROR_STATISTICS_CONCAT(reason=str(e))
return base_df
@staticmethod
def _generate_empty_data(query):
empty_data = {}
aggregate = query.get('aggregate', [])
aggregate.reverse()
for stage in aggregate:
if 'group' in stage:
group = stage['group']
for key in group.get('keys', []):
if 'name' in key:
empty_data[key['name']] = []
for field in group.get('fields', []):
if 'name' in field:
empty_data[field['name']] = []
break
return pd.DataFrame(empty_data)
def _join(self, options, domain_id, base_df):
if 'type' in options and options['type'] not in _JOIN_TYPE_MAP:
raise ERROR_INVALID_PARAMETER_TYPE(key='aggregate.join.type', type=list(_JOIN_TYPE_MAP.keys()))
join_keys = options.get('keys')
join_type = options.get('type', 'LEFT')
join_df = self._query(options, domain_id, operator='join')
try:
if join_keys:
base_df = pd.merge(base_df, join_df, on=join_keys, how=_JOIN_TYPE_MAP[join_type])
else:
base_df = pd.merge(base_df, join_df, left_index=True, right_index=True, how=_JOIN_TYPE_MAP[join_type])
except Exception as e:
if join_keys is None:
raise ERROR_STATISTICS_INDEX_JOIN(reason=str(e))
else:
raise ERROR_STATISTICS_JOIN(resource_type=options['resource_type'], join_keys=join_keys)
return base_df
def _query(self, options, domain_id, operator='query'):
resource_type = options.get('resource_type')
query = options.get('query')
extend_data = options.get('extend_data', {})
if resource_type is None:
raise ERROR_REQUIRED_PARAMETER(key=f'aggregate.{operator}.resource_type')
if query is None:
raise ERROR_REQUIRED_PARAMETER(key=f'aggregate.{operator}.query')
self.service_connector: ServiceConnector = self.locator.get_connector('ServiceConnector')
service, resource = self._parse_resource_type(resource_type)
try:
response = self.service_connector.stat_resource(service, resource, query, domain_id)
results = response.get('results', [])
if len(results) > 0 and not isinstance(results[0], dict):
df = pd.DataFrame(results, columns=['value'])
else:
df = pd.DataFrame(results)
if len(df) == 0:
df = self._generate_empty_data(options['query'])
return self._extend_data(df, extend_data)
except ERROR_BASE as e:
raise ERROR_STATISTICS_QUERY(reason=e.message)
except Exception as e:
raise ERROR_STATISTICS_QUERY(reason=e)
@staticmethod
def _parse_resource_type(resource_type):
try:
service, resource = resource_type.split('.')
except Exception as e:
raise ERROR_INVALID_PARAMETER(key='resource_type', reason=f'resource_type is invalid. ({resource_type})')
return service, resource
@staticmethod
def _extend_data(df, data):
for key, value in data.items():
df[key] = value
return df
@staticmethod
def _page(page, results):
response = {
'total_count': len(results)
}
if 'limit' in page and page['limit'] > 0:
start = page.get('start', 1)
if start < 1:
start = 1
response['results'] = results[start - 1:start + page['limit'] - 1]
else:
response['results'] = results
return response
| StarcoderdataPython |
214395 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import jpath
from lck.django.common.models import MACAddressField
from lck.lang import Null, nullify
from lck.xml import etree_to_dict
import lck.xml.converters
from lxml import etree as ET
from ralph.discovery.models import (
DISK_PRODUCT_BLACKLIST,
DISK_VENDOR_BLACKLIST,
DeviceType,
)
from ralph.scan.errors import Error
from ralph.util import units, untangle
TAG_TRANSLATION_PAIRS = set([
('node', 'class'),
('capability', 'id'),
('setting', 'id'),
('resource', 'type'),
])
TEXT_TRANSLATION_PAIRS = set([
('setting', 'value'),
])
FC_CARD_PHYSICAL_ID_EXPRESSION = re.compile(r"([1-9][0-9]*)")
def _nullify(value):
if value is not None:
raise ValueError
return Null
def _get_logical_name(arg):
l_name = arg['logicalname']
if isinstance(l_name, list):
return l_name[0]
else:
return l_name
def parse_lshw(raw_data):
parser = ET.ETCompatXMLParser(recover=True)
response = ET.fromstring(raw_data, parser=parser)
if response.tag is None or response.tag.upper() != 'NODE':
raise Error('Lshw parse error.')
for element in response.findall('.//'):
for k in element.attrib.keys():
try:
v = element.attrib[k]
except UnicodeDecodeError:
continue # value has bytes not possible to decode with UTF-8
if (element.tag, k) in TAG_TRANSLATION_PAIRS:
try:
element.tag = v
except ValueError:
pass
continue
if (element.tag, k) in TEXT_TRANSLATION_PAIRS:
element.text = v
continue
if k == 'units':
value = ET.Element(b'value')
value.text = element.text
element.text = ''
element.append(value)
child = ET.Element(k)
child.text = v
element.append(child)
return nullify(
etree_to_dict(
response,
_converters=[
_nullify,
int,
float,
lck.xml.converters._datetime,
lck.xml.converters._datetime_strip_tz,
],
),
)[1]
def handle_lshw(data, is_virtual):
lshw = parse_lshw(data)
results = {}
prod_name = lshw['product']
manufacturer = lshw['vendor'].replace(', Inc.', '')
if prod_name.endswith(' ()'):
prod_name = prod_name[:-3]
if manufacturer and manufacturer in prod_name:
model_name = prod_name
else:
model_name = "{} {}".format(manufacturer, prod_name)
results['model_name'] = model_name
if is_virtual:
model_type = DeviceType.virtual_server
elif DeviceType.blade_server.matches(model_name):
model_type = DeviceType.blade_server
else:
model_type = DeviceType.rack_server
results['type'] = model_type.raw
mac_addresses = handle_lshw_mac_addresses(lshw)
if mac_addresses:
results['mac_addresses'] = mac_addresses
memory = handle_lshw_memory(lshw['bus']['memory'], is_virtual)
if memory:
results['memory'] = memory
processors = handle_lshw_processors(lshw['bus']['processor'], is_virtual)
if processors:
results['processors'] = processors
disks = handle_lshw_storage(lshw)
if disks:
results['disks'] = disks
fc_cards = handle_lshw_fibrechannel_cards(lshw)
if fc_cards:
results['fibrechannel_cards'] = fc_cards
return results
def handle_lshw_mac_addresses(lshw):
mac_addresses = set()
ethernets = sorted(
(e for e in jpath.get_all('..network', lshw) if e),
key=_get_logical_name,
)
for i, ethernet in enumerate(untangle(ethernets)):
try:
mac = MACAddressField.normalize(ethernet['serial'])
except (ValueError, KeyError):
continue
if not mac:
continue
mac_addresses.add(mac)
return list(mac_addresses)
def handle_lshw_memory(bus_memory, is_virtual=False):
memory_banks = []
for _mem in bus_memory:
# we're interested only in the system memory, not in caches etc.
if _mem['id'] == 'memory':
memory_banks = _mem['memory']
break
elif _mem['id'].startswith('memory:'):
memory_banks.extend(_mem['memory'])
index = 0
if isinstance(memory_banks, dict):
memory_banks = [memory_banks]
detected_memory = []
for memory in memory_banks:
if 'size' not in memory:
# empty slot
continue
index += 1
size = int(memory['size']['value'] or 0)
size /= units.size_divisor[memory['size']['units']]
size = int(size)
label = memory['slot']
if is_virtual:
label = 'Virtual %s' % label
detected_memory.append({
'label': label,
'size': size,
'index': index,
})
return detected_memory
def handle_lshw_processors(bus_processors, is_virtual=False):
if isinstance(bus_processors, dict):
bus_processors = [bus_processors]
detected_cpus = []
family = None
for processor in bus_processors:
family = processor['version'] or (
'Virtual CPU' if is_virtual else processor['product']
)
if family:
break
for i, processor in enumerate(bus_processors):
if processor['disabled'] == 'true' or not processor['size']:
continue
label = 'CPU {}'.format(i + 1)
speed = int(processor['size']['value'] or 0)
speed /= units.speed_divisor[processor['size']['units']]
speed = int(speed)
detected_cpus.append({
'index': i + 1,
'label': label,
'speed': speed,
'family': family,
'model_name': processor['product'] or 'CPU {} {}MHz'.format(
family, speed,
)
})
return detected_cpus
def handle_lshw_storage(lshw):
storages = []
for storage in jpath.get_all('..disk', lshw):
if not storage:
continue
if isinstance(storage, list):
storages.extend(storage)
else:
storages.append(storage)
detected_storages = []
for storage in storages:
if 'size' in storage:
size = storage['size']
elif 'capacity' in storage:
size = storage['capacity']
else:
# empty slot
continue
sn = unicode(storage.get('serial') or '') or None
if sn and sn.startswith('OCZ-'):
sn = sn.replace('OCZ-', '')
if (
not sn or
sn.startswith('QM000') or
storage.get(
'vendor', '',
).strip().lower() in DISK_VENDOR_BLACKLIST or
storage.get(
'product', '',
).strip().lower() in DISK_PRODUCT_BLACKLIST
):
continue
mount_point = storage.get('logicalname', None)
storage_size = int(size['value'])
storage_size /= units.size_divisor[size['units']]
storage_size = int(storage_size)
label = ''
if storage.get('vendor', '').strip():
label = storage['vendor'].strip() + ' '
if storage.get('product', '').strip():
label += storage['product'].strip()
elif storage.get('description', '').strip():
label += storage['description'].strip()
else:
label += 'Generic disk'
detected_storages.append({
'mount_point': mount_point,
'serial_number': sn,
'size': storage_size,
'label': label,
})
return detected_storages
def handle_lshw_fibrechannel_cards(lshw):
buses = []
for bus in jpath.get_all('..bus', lshw):
if not bus:
continue
if isinstance(bus, list):
buses.extend(bus)
else:
buses.append(bus)
buses = filter(lambda item: item['id'].startswith('fiber'), buses)
buses.sort(key=lambda item: item['handle'])
handled_buses = set()
fc_cards = []
for bus in buses:
handle = unicode(bus['handle'])
m = FC_CARD_PHYSICAL_ID_EXPRESSION.search(handle)
if not m:
continue
physid = m.group(1)
if physid in handled_buses:
continue
handled_buses.add(physid)
fc_cards.append({
'physical_id': physid,
'label': "{} {}".format(bus['vendor'], bus['product']),
'model_name': bus['product'],
})
return fc_cards
| StarcoderdataPython |
1977780 | <gh_stars>1-10
import ephem
from ephem import degree
class Coordinates:
# init function creates pyephem observer and stores it in self
def __init__(self, lat, lon, alt, az):
self.QTH = ephem.Observer()
self.QTH.lat = str(lat)
self.QTH.lon = str(lon)
self.QTH.pressure = 0
self.alt = alt
self.az = az
# Returns galactic coordinates
def galactic(self):
ra, dec = self.QTH.radec_of(str(self.az), str(self.alt))
eq_grid = ephem.Equatorial(ra, dec)
gal_lat, gal_lon = ephem.Galactic(eq_grid).lat / degree, ephem.Galactic(eq_grid).lon / degree
return round(gal_lat, 0), round(gal_lon, 0)
# Returns equatorial coordinates
def equatorial(self):
ra, dec = self.QTH.radec_of(str(self.az), str(self.alt))
return round(ra / degree, 0), round(dec / degree, 0)
'''
coord_calc = Coordinates(lat = 55.6, lon = 12.5, alt = 55.6, az = 1)
lat, lon = coord_calc.galactic()
print(lat, lon)
QTH = ephem.Observer()
QTH.lat = '55.6'
QTH.lon = '12.5'
QTH.pressure = 0
ra, dec = QTH.radec_of(str(1), str(55.6))
eq_grid = ephem.Equatorial(ra, dec)
'''
| StarcoderdataPython |
1887409 | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
# Create your views here.
from C_databases.models import BookInfo
from book_serializer.serializers import BookInfoSerializer
from rest_framework.response import Response
class BooksView(ViewSet):
"""
获取所有图书
保存图书
"""
def list(self, request):
# 1、查询所有图书
books = BookInfo.objects.all()
# 构建json数据 # 在类视图中调用序列化器类,初始化是传入需要序列化返回的数据对象
ser = BookInfoSerializer(books, many=True)
# 2、返回查询集数据
return Response(ser.data)
def create(self, request):
# 1、获取前端数据
# data = request.body.decode()
# data_dict = json.loads(data)
data = request.data
# 2、验证数据
# 序列化器初始化传入验证数据
ser = BookInfoSerializer(data=data)
# 序列化器提供的验证方法is_valid
ser.is_valid(raise_exception=True) # raise_exception参数作用:验证失败抛出异常
# 3、保存
ser.save()
# 4、返回
# 在类视图中调用序列化器类,初始化是传入需要序列化返回的数据对象
# 构建序列化的json数据
return Response(ser.data)
class BookView(ViewSet):
def retrieve(self, request, pk):
# 1、查询一个图书
book = BookInfo.objects.get(pk=pk)
# 构建json数据 # 在类视图中调用序列化器类,初始化是传入需要序列化返回的数据对象
ser = BookInfoSerializer(book)
# 2、返回查询集数据
return Response(ser.data)
# 修改图书信息 路由: PUT /books/<pk>
def update(self, request, pk):
data = request.data
book = BookInfo.objects.get(pk=pk)
ser = BookInfoSerializer(book, data=data)
ser.is_valid(raise_exception=True) # raise_exception参数作用:验证失败抛出异常
ser.save()
return Response(ser.data)
def destroy(self, request, pk):
book = BookInfo.objects.get(pk=pk)
# 逻辑删除
book.is_delete = True
book.save()
# 物理删除
# book.delete()
# 3、返回结果
return Response({})
| StarcoderdataPython |
8020007 | #!/usr/bin/env python3
"""
@summary: how to send a signed transaction
@version: v03 (6/March/2020)
@since: 6/March/2020
@author: https://github.com/drandreaskrueger
@see: https://github.com/drandreaskrueger/chainhammer-substrate for updates
"""
import time, sys
from pprint import pformat
from threading import Thread
from queue import Queue
from subprocess import run, PIPE
import substrateinterface
URL = "http://127.0.0.1:9800"
URL = "http://127.0.0.1:9933"
ALICE_ADDRESS = '5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY'
X_ADDRESS = '5Gdc7hM6WqVjd23YaJR1bUWJheCo4ymrcKAFc35FpfbeH68f'
ALICE_PUBKEY = '<KEY>'
X_PUBKEY = '0xca08b4e0f53054628a43912ffbb6d00a0362921ba9781932297edc037d197a5d'
BOB_ADDRESS = '5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty'
# see README.md:
# https://github.com/polkascan/py-substrate-interface#create-and-send-signed-extrinsics
from substrateinterface import SubstrateInterface, SubstrateRequestException, Keypair
def keypair_printer(keypair):
print ([key for key in dir(keypair) if not key.startswith("__")])
for member in "address_type mnemonic private_key public_key ss58_address".split(" "):
print ("keypair.%s=%s" % (member, getattr(keypair,member)))
def example_send_transaction():
"""
similar to what is given in polkascan github README.md
issue reported back to polkascan
https://github.com/polkascan/py-substrate-interface/issues/14
"""
# substrate = SubstrateInterface( url="ws://127.0.0.1:9944", address_type=42, type_registry_preset='kusama' )
# substrate = SubstrateInterface( url="ws://127.0.0.1:9944", address_type=2)
substrate = SubstrateInterface( url="ws://127.0.0.1:9944" )
keypair = Keypair.create_from_mnemonic('episode together nose spoon dose oil faculty zoo ankle evoke admit walnut')
# keypair = Keypair.create_from_private_key('//Alice')
# keypair = Keypair.create_from_mnemonic('//Alice')
keypair_printer(keypair)
print ("sending from:", keypair.ss58_address)
BOB_ADDRESS = '5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty'
call = substrate.compose_call(
call_module='Balances',
call_function='transfer',
call_params={
'dest': BOB_ADDRESS,
'value': 1 * 10**12
}
)
extrinsic = substrate.create_signed_extrinsic(call=call, keypair=keypair)
try:
result = substrate.send_extrinsic(extrinsic, wait_for_inclusion=True)
print("Extrinsic '{}' sent and included in block '{}'".format(result['extrinsic_hash'], result['block_hash']))
except SubstrateRequestException as e:
print("Failed to send: {} with args:".format(type(e)))
print("{}".format(pformat(e.args[0])))
def keypair_example():
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic)
signature = keypair.sign("Test123")
if keypair.verify("Test123", signature):
print('Verified')
def sign_many_threaded(N=5, payload="0xa8040400ff8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48070010a5d4e8"):
threads = []
signed = [] # container to keep all signed payloads
for i in range(N):
t = Thread(target = test_sign,
args = (signed, payload))
threads.append(t)
print (".", end="")
print ("%d transaction threads created." % len(threads))
for t in threads:
t.start()
print (".", end="")
sys.stdout.flush()
print ("all threads started.")
for t in threads:
t.join()
print ("all threads ended.")
print ("results:")
print (signed)
def sign_many_threaded_queue(numTx=20, num_worker_threads=4, payload="0xa8040400ff8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48070010a5d4e8"):
line = "sign %d transactions, via multi-threading queue with %d workers:\n"
print (line % (numTx, num_worker_threads))
q = Queue()
txs = [] # container to keep all transaction hashes
def worker():
while True:
item = q.get()
test_sign(txs, item)
print ("T", end=""); sys.stdout.flush()
q.task_done()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
print ("W", end=""); sys.stdout.flush()
print ("\n%d worker threads created." % num_worker_threads)
for i in range(numTx):
q.put (payload)
print ("I", end=""); sys.stdout.flush()
print ("\n%d items queued." % numTx)
q.join()
print ("\nall items - done.")
return txs
def balance_transfer(dest, value, signer):
payload = substrate.compose_call(call_module='Balances',
call_function='transfer',
call_params={'dest': dest,
'value': value})
print ("payload:", payload)
signed = sign(payload, signer)
print ("signed: >>>%s<<<" % signed)
# result = substrate.rpc_request(method="author_submitAndWatchExtrinsic", params=[signed])
result = substrate.rpc_request(method="author_submitExtrinsic", params=[signed])
print (result)
def benchmark_signing_workaround(N=50):
"""
100 transactions took 77.0 seconds, i.e. 0.770 per transaction.
--> max bandwidth is 1.3 TPS
"""
timed = time.time()
# sign_many_threaded_queue(numTx=20, num_worker_threads=10);
sign_many_threaded(N=N)
timed = time.time() - timed
print ("that took %.1f seconds, i.e. %.3f per transaction" % (timed, timed/N))
if __name__ == '__main__':
example_send_transaction(); exit()
# print(os_command())
# test_sign(); exit()
benchmark_signing_workaround(5); exit()
substrate = substrateinterface.SubstrateInterface(url=URL) # , address_type=42)
dot = get_balance(substrate, address=BOB_ADDRESS)
print ()
balance_transfer(dest=BOB_ADDRESS, value=1000000000000, signer='//Alice')
time.sleep(10)
print ()
dot = get_balance(substrate, address=BOB_ADDRESS)
| StarcoderdataPython |
3269310 | from django.test import TestCase
from hello.twitter_api import (
TwitterCli,
get_twitter_comments,
json_into_table,
save_tweets
)
import io
import json
try:
from urllib.error import HTTPError
except ImportError:
from urllib2 import HTTPError
with io.open("hello/tests/example_twit.json") as samplejson:
SAMPLE_JSON = json.loads(samplejson.read())
EXPECTED_TWITTER = {
"symbols": [],
"author": "<NAME>",
"hashtags": [
"MSFT",
"Build2016",
"mountaindew",
"bldwin",
"BeverageFail",
"HadToDrinkPepsi"
],
"source": "twitter",
"focus": "MSFT",
"author_image": "http://pbs.twimg.com/profile_images/558825363619344386/gUN09sSf_normal.jpeg",
"urls": [],
"url": "https://www.twitter.com/duanenewman/status/715241582597812224",
"created_time": "2009-01-29T21:01:24Z",
"popularity": 0,
"content": "Hey #MSFT, I thought #Build2016 was a developer conference. Where's the #mountaindew? #bldwin #BeverageFail #HadToDrinkPepsi",
"social_id": 19734130
}
class TwitterCase(TestCase):
"""Test Twitter API."""
def test_json_to_table(self):
"""Test the corrected format from json to db."""
jsonified = json_into_table(SAMPLE_JSON, "MSFT")
self.assertEqual(EXPECTED_TWITTER, jsonified)
def test_invalid_json(self):
"""Test passing in message with invalid ticker."""
with self.assertRaises(ValueError):
so_bad = {'potato': 'fries'}
json_into_table(SAMPLE_JSON, so_bad)
def test_retrieved_info(self):
"""Test tweet json content."""
resp = get_twitter_comments("MSFT")
for item in resp:
self.assertNotEqual(item.get('text'), None)
self.assertNotEqual(item.get('user'), None)
self.assertNotEqual(item.get('created_at'), None)
self.assertNotEqual(item.get('entities'), None)
self.assertNotEqual(item.get('id'), None)
def test_dne_ticker(self):
"""Test invalid ticker return."""
wrong = get_twitter_comments("opwuirehe")
self.assertEqual(wrong, [])
def test_saving_tweetdict(self):
"""Test if dict of tweets are saved."""
self.assertTrue(save_tweets(EXPECTED_TWITTER))
self.assertFalse(save_tweets(EXPECTED_TWITTER))
def test_get_access(self):
"""Test access to Twitter with correct auth."""
access = get_twitter_comments("aapl")
self.assertNotEqual(access, {})
def test_no_access(self):
"""Test acess without proper auth."""
with self.assertRaises(HTTPError):
client = TwitterCli("merp", "1333322")
resp = client.request("https://api.twitter.com/1.1/search/tweets.json?q=%23MSFT")
return resp
| StarcoderdataPython |
57063 | # <NAME>
# initial version of the webcam detector, can be used to test HSV settings, radius, etc
import cv2
#import time
import numpy as np
#from infer_imagenet import *
FRAME_WIDTH = 640
FRAME_HEIGHT = 480
# load in the video
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH,FRAME_WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT)
# Check if camera opened successfully
if (cap.isOpened() == False):
print("Error opening video stream or file")
# writing a video file for presentation
#fourcc = cv2.VideoWriter_fourcc(*'MJPG')
#out = cv2.VideoWriter('example_track.avi', fourcc , 30.0, (640, 480),
# Read until video is completed
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
redball_detected=False
# resize video for faster processing, add blurr to smooth image, convert to Hue saturation value
frame = cv2.resize(frame, (640, 480))
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
frameHSV = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# code for later exploring using CNNs for object detection, in this case a tennis ball
#found = infer_result(frame, 852, model)
#print('Tennis Ball found?:', found)
redLow = (0, 140, 140)
redHigh = (255, 255, 255)
# other colors such as the green for a tennis ball
#colorLow = (100, 40, 60)
#colorHigh = (120, 255, 255)
# masks the parts of the image which fits the HSV setting, fills in holes using erode/dilate
mask = cv2.inRange(frameHSV, redLow, redHigh)
mask = cv2.erode(mask, None, iterations=4)
mask = cv2.dilate(mask, None, iterations=8)
mask = cv2.erode(mask, None, iterations=4)
# copy of the mask for checking if circle
maskg = np.copy(mask)
imgg = np.zeros(frame.shape[0:2])
cv2.imshow('mask', mask)
cnts, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
center = None
cv2.drawContours(frame, cnts, -1, (0, 255, 0), 3)
# Checks to make sure there is a red object
if len(cnts) < 1:
cv2.imshow('Frame', frame)
#cv2.waitKey(10)
#out.write(frame)
else:
c = max(cnts, key=cv2.contourArea)
(x, y), radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
if int(M["m00"]) != 0:
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
print('radius', radius)
# only proceed if the radius meets a minimum size
if radius > 10:
# Check to see if the object is a circle by checking mask fill of enclosing circle
cv2.circle(imgg, center, int(radius), 255, -1)
masked = cv2.bitwise_and(maskg.astype(np.uint8), maskg.astype(np.uint8), mask=imgg.astype(np.uint8))
circle_fullness = np.sum(masked) / (np.pi * radius ** 2 * 255)
if circle_fullness > 0.8:
redball_detected=True
# draw the circle and centroid on the frame,
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 0, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# if large enough of a red object is detected it sends the coordinates
if redball_detected:
print('center coordinates', center)
print(type(center))
# write to a video file
#out.write(frame)
# Display the resulting frame
print('Redball detected:', redball_detected)
cv2.imshow('Frame', frame)
cv2.imshow("test", frameHSV)
#cv2.waitKey(1)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
#out.release()
# Closes all the frames
cv2.destroyAllWindows()
| StarcoderdataPython |
12863114 | <filename>scripts/update_covid_tracking_data.py<gh_stars>0
import logging
import datetime
import pathlib
import pytz
import requests
import pandas as pd
DATA_ROOT = pathlib.Path(__file__).parent.parent / "data"
_logger = logging.getLogger(__name__)
class CovidTrackingDataUpdater(object):
"""Updates the covid tracking data."""
HISTORICAL_STATE_DATA_URL = "http://covidtracking.com/api/states/daily"
COVID_TRACKING_ROOT = DATA_ROOT / "covid-tracking"
@property
def output_path(self) -> pathlib.Path:
return self.COVID_TRACKING_ROOT / "covid_tracking_states.csv"
@property
def version_path(self) -> pathlib.Path:
return self.COVID_TRACKING_ROOT / "version.txt"
@staticmethod
def _stamp():
# String of the current date and time.
# So that we're consistent about how we mark these
pacific = pytz.timezone('US/Pacific')
d = datetime.datetime.now(pacific)
return d.strftime('%A %b %d %I:%M:%S %p %Z')
def update(self):
_logger.info("Updating Covid Tracking data.")
response = requests.get(self.HISTORICAL_STATE_DATA_URL)
data = response.json()
df = pd.DataFrame(data)
df.to_csv(self.output_path, index=False)
version_path = self.version_path
version_path.write_text(f"Updated at {self._stamp()}\n")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
CovidTrackingDataUpdater().update()
| StarcoderdataPython |
3562202 | #!/usr/bin/env python
# Copyright 2018 ARC Centre of Excellence for Climate Systems Science
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from metadb.cli import cli
from metadb.model import *
import pytest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
@pytest.fixture(scope='module')
def sample_data(tmpdir_factory):
data = tmpdir_factory.mktemp('cli_data')
a = data.join('a')
a.write('hello')
return data
def test_collection_create_cmd(session):
cli(argv='create --collection a'.split(), session=session)
q = session.query(Collection).one()
assert q.name == 'a'
cli(argv='create -c a'.split(), session=session)
q = session.query(Collection)
assert q.count() == 1
cli(argv='create -c b /root/path /another/path'.split(), session=session)
q = session.query(Collection).filter_by(name='b').one()
assert len(q.root_paths) == 2
def test_import_to_collection(session):
c = Collection(name='a')
session.add(c)
cli(argv='import --collection a foo'.split(), session=session)
q = session.query(Path).one()
assert q.collections == set((c,))
def test_crawler(session, sample_data):
with patch('metadb.cli.crawler.crawl_recursive') as crawler:
cli(argv=('create --collection a %s' %
(sample_data)).split(), session=session)
c = session.query(Collection).one()
cli(argv='crawl --collection a'.split(), session=session)
crawler.assert_called_once_with(
session, str(sample_data), collection=c)
crawler.reset_mock()
cli(argv='crawl'.split(), session=session)
crawler.assert_called_once_with(
session, str(sample_data), collection=c)
def test_report(session, capsys, sample_data):
cli(argv=('create --collection a %s' % sample_data).split(), session=session)
cli(argv='crawl'.split(), session=session)
cli(argv='report'.split(), session=session)
| StarcoderdataPython |
6566697 | <filename>venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_storage_auto_giveback.py<gh_stars>1-10
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_storage_auto_giveback
short_description: Enables or disables NetApp ONTAP storage auto giveback for a specified node
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: '21.3.0'
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
description:
- Enable or disable storage auto giveback
options:
name:
description:
- Specifies the node name to enable or disable storage auto giveback on.
required: true
type: str
auto_giveback_enabled:
description:
- specifies whether auto give back should be enabled or disabled
required: true
type: bool
auto_giveback_after_panic_enabled:
description:
- specifies whether auto give back on panic should be enabled or disabled
type: bool
"""
EXAMPLES = """
- name: Enable storage auto giveback
na_ontap_storage_auto_giveback:
name: node1
auto_giveback_enabled: true
auto_giveback_after_panic_enabled: true
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Disable storage auto giveback
na_ontap_storage_auto_giveback:
name: node1
auto_giveback_enabled: false
auto_giveback_after_panic_enabled: false
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
class NetAppOntapStorageAutoGiveback(object):
"""
Enable or disable storage failover for a specified node
"""
def __init__(self):
"""
Initialize the ONTAP Storage auto giveback class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=True, type='str'),
auto_giveback_enabled=dict(required=True, type='bool'),
auto_giveback_after_panic_enabled=dict(required=False, type='bool')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
# set up variables
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.rest_api = OntapRestAPI(self.module)
self.use_rest = self.rest_api.is_rest()
if not self.use_rest:
if not netapp_utils.has_netapp_lib():
self.module.fail_json(msg='The python NetApp-Lib module is required')
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_storage_auto_giveback(self):
"""
get the storage failover giveback options for a given node
:return: dict for options
"""
return_value = None
if self.use_rest:
api = "private/cli/storage/failover"
query = {
'fields': 'node,auto_giveback,auto_giveback_after_panic',
'node': self.parameters['name'],
}
message, error = self.rest_api.get(api, query)
records, error = rrh.check_for_0_or_1_records(api, message, error)
if error is None and records is not None:
return_value = {
'name': message['records'][0]['node'],
'auto_giveback_enabled': message['records'][0]['auto_giveback'],
'auto_giveback_after_panic_enabled': message['records'][0]['auto_giveback_after_panic']
}
if error:
self.module.fail_json(msg=error)
if not records:
error = "REST API did not return failover options for node %s" % (self.parameters['name'])
self.module.fail_json(msg=error)
else:
storage_auto_giveback_get_iter = netapp_utils.zapi.NaElement('cf-get-iter')
try:
result = self.server.invoke_successfully(storage_auto_giveback_get_iter, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting auto giveback info for node %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
if result.get_child_by_name('attributes-list'):
attributes_list = result.get_child_by_name('attributes-list')
for storage_failover_info_attributes in attributes_list.get_children():
sfo_node_info = storage_failover_info_attributes.get_child_by_name('sfo-node-info')
node_related_info = sfo_node_info.get_child_by_name('node-related-info')
if node_related_info.get_child_content('node') == self.parameters['name']:
sfo_options_info = storage_failover_info_attributes.get_child_by_name('sfo-options-info')
options_related_info = sfo_options_info.get_child_by_name('options-related-info')
sfo_giveback_options_info = options_related_info.get_child_by_name('sfo-giveback-options-info')
giveback_options = sfo_giveback_options_info.get_child_by_name('giveback-options')
return_value = {
'name': node_related_info.get_child_content('node'),
'auto_giveback_enabled': self.na_helper.get_value_for_bool(
True, options_related_info.get_child_content('auto-giveback-enabled')),
'auto_giveback_after_panic_enabled': self.na_helper.get_value_for_bool(
True, giveback_options.get_child_content('auto-giveback-after-panic-enabled')),
}
break
return return_value
def modify_storage_auto_giveback(self):
"""
Modifies storage failover giveback options for a specified node
"""
if self.use_rest:
api = "private/cli/storage/failover"
body = dict()
query = {
'node': self.parameters['name']
}
body['auto_giveback'] = self.parameters['auto_giveback_enabled']
if 'auto_giveback_after_panic_enabled' in self.parameters:
body['auto_giveback_after_panic'] = self.parameters['auto_giveback_after_panic_enabled']
dummy, error = self.rest_api.patch(api, body, query)
if error:
self.module.fail_json(msg=error)
else:
storage_auto_giveback_enable = netapp_utils.zapi.NaElement('cf-modify-iter')
attributes_info = netapp_utils.zapi.NaElement('options-related-info-modify')
query_info = netapp_utils.zapi.NaElement('options-related-info-modify')
attributes_info.add_new_child('node', self.parameters['name'])
attributes_info.add_new_child('auto-giveback-enabled', self.na_helper.get_value_for_bool(
from_zapi=False, value=self.parameters['auto_giveback_enabled']))
if 'auto_giveback_after_panic_enabled' in self.parameters:
sfo_give_back_options_info_modify = netapp_utils.zapi.NaElement('sfo-giveback-options-info-modify')
give_back_options_modify = netapp_utils.zapi.NaElement('giveback-options-modify')
give_back_options_modify.add_new_child('auto-giveback-after-panic-enabled', self.na_helper.get_value_for_bool(
from_zapi=False, value=self.parameters['auto_giveback_after_panic_enabled']))
sfo_give_back_options_info_modify.add_child_elem(give_back_options_modify)
attributes_info.add_child_elem(sfo_give_back_options_info_modify)
query = netapp_utils.zapi.NaElement('query')
attributes = netapp_utils.zapi.NaElement("attributes")
query.add_child_elem(query_info)
attributes.add_child_elem(attributes_info)
storage_auto_giveback_enable.add_child_elem(query)
storage_auto_giveback_enable.add_child_elem(attributes)
try:
self.server.invoke_successfully(storage_auto_giveback_enable, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying auto giveback for node %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def ems_log_event(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
return netapp_utils.ems_log_event("na_ontap_storage_auto_giveback", cserver)
def apply(self):
if not self.use_rest:
self.ems_log_event()
current = self.get_storage_auto_giveback()
self.na_helper.get_modified_attributes(current, self.parameters)
if self.na_helper.changed:
if not self.module.check_mode:
self.modify_storage_auto_giveback()
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Enables or disables NetApp ONTAP storage auto giveback for a specified node
"""
obj = NetAppOntapStorageAutoGiveback()
obj.apply()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3431308 | import openmc
import pytest
from tests.testing_harness import PyAPITestHarness
@pytest.fixture
def model():
model = openmc.model.Model()
mat = openmc.Material()
mat.set_density('g/cm3', 10.0)
mat.add_nuclide('U235', 1.0)
model.materials.append(mat)
sph = openmc.Sphere(r=100.0, boundary_type='reflective')
cell = openmc.Cell(fill=mat, region=-sph)
model.geometry = openmc.Geometry([cell])
model.settings.particles = 1000
model.settings.batches = 5
model.settings.inactive = 2
model.settings.photon_transport = True
model.settings.source = openmc.Source(space=openmc.stats.Point((0, 0, 0)))
particle_filter = openmc.ParticleFilter(['neutron', 'photon'])
tally_tracklength = openmc.Tally()
tally_tracklength.filters = [particle_filter]
tally_tracklength.scores = ['fission', 'heating', 'heating-local']
tally_tracklength.nuclides = ['U235', 'total']
tally_tracklength.estimator = 'tracklength'
tally_collision = openmc.Tally()
tally_collision.filters = [particle_filter]
tally_collision.scores = ['fission', 'heating', 'heating-local']
tally_collision.nuclides = ['U235', 'total']
tally_collision.estimator = 'collision'
tally_analog = openmc.Tally()
tally_analog.filters = [particle_filter]
tally_analog.scores = ['fission', 'heating', 'heating-local']
tally_analog.nuclides = ['U235', 'total']
tally_analog.estimator = 'analog'
model.tallies.extend([tally_tracklength, tally_collision, tally_analog])
return model
def test_photon_production(model):
harness = PyAPITestHarness('statepoint.5.h5', model)
harness.main()
| StarcoderdataPython |
3387541 | <filename>e2xgrader/preprocessors/validateextracells.py
import traceback
from nbgrader.nbgraderformat import ValidationError
from nbgrader.preprocessors import NbGraderPreprocessor
from ..utils.extra_cells import is_singlechoice
class ExtraCellValidator:
def validate_cell(self, cell):
if 'nbgrader' not in cell.metadata:
return
# check if there is a single choice cell without a solution
if is_singlechoice(cell):
extended_metadata = cell.metadata.extended_cell
if (not 'choice' in extended_metadata) or (len(extended_metadata.choice) < 1):
raise ValidationError("single choice nbgrader cell {} does not have a solution".format(cell.metadata.nbgrader.grade_id))
def validate_nb(self, nb):
for cell in nb.cells:
self.validate_cell(cell)
class ValidateExtraCells(NbGraderPreprocessor):
"""A preprocessor for checking that choice cells have valid solutions."""
def preprocess(self, nb, resources):
try:
ExtraCellValidator().validate_nb(nb)
except ValidationError:
self.log.error(traceback.format_exc())
msg = "Some choice cells seem to miss a solution. Please check them again."
self.log.error(msg)
raise ValidationError(msg)
return nb, resources | StarcoderdataPython |
9647492 | <reponame>sdevkota007/MedicalColorTransfer
import cv2
import numpy as np
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument('--resize_ratio', type=float, default=0.5)
# parser.add_argument('--weight', type=int, default=2, choices=[2, 3])
parser.add_argument('--img_mri', type=str, default='data/7/content1.png')
args = parser.parse_args()
img = cv2.imread(args.img)
dim = (448, 448)
img_out = cv2.resize(img, dsize = dim)
img_name = args.img.split(".")
img_name = "".join(img_name[:-1])+ "-r."+ img_name[-1]
print(img_name)
cv2.imwrite(img_name, img_out)
| StarcoderdataPython |
1799578 | <gh_stars>0
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base action module."""
import inspect
_NO_ACTION_NAME_MSG = (
'Action class %s has no ACTION_NAME attribute or it is not a string.')
_NO_FRIENDLY_NAME_MSG = (
'Action class %s has no FRIENDLY_NAME attribute or it is not a string.')
_NO_RUN_METHOD_MSG = 'Action class %s has no method named run.'
class BaseAction(object):
"""A superclass for Actions.
When creating new Actions, sublcass this class in a new class named Action.*
and provide a run method, an ACTION_NAME string, and a FRIENDLY_NAME string.
For example:
class MyAction(base_action.BaseAction):
ACTION_NAME = 'my_action'
FRIENDLY_NAME = 'My Action'
def run(self):
return 'foo'
These subclasses can have arbitrary names, as can the modules containing them.
"""
def __init__(self):
"""Validates required attributes for Action subclasses."""
if not isinstance(getattr(self, 'ACTION_NAME', None), basestring):
raise AttributeError(_NO_ACTION_NAME_MSG % self.__class__.__name__)
if not isinstance(getattr(self, 'FRIENDLY_NAME', None), basestring):
raise AttributeError(_NO_FRIENDLY_NAME_MSG % self.__class__.__name__)
if not inspect.ismethod(getattr(self, 'run', None)):
raise AttributeError(_NO_RUN_METHOD_MSG % self.__class__.__name__)
| StarcoderdataPython |
1635535 | from .resource import DialpadResource
class CompanyResource(DialpadResource):
"""CompanyResource implements python bindings for the Dialpad API's company endpoints.
See https://developers.dialpad.com/reference#company for additional documentation.
"""
_resource_path = ['company']
def get(self):
"""Gets the company resource.
See Also:
https://developers.dialpad.com/reference#companyapi_getcompany
"""
return self.request(method='GET')
| StarcoderdataPython |
6508753 | <reponame>x06lan/mt<filename>leetcode/210.py
first=""
save={}
data=[[1,0],[0,1]]
for i in data:
a=i[0]
b=i[1]
if a==first or first=="":
first=b
try:
tem=save[b]
except:
save[b]=[]
save[b].append(a)
def allpath(text,data,num):
if num==len(text):
# print("@",text)
return [text]
out=[]
try:
tem=save[text[0]]
except:
tem=[]
# print(teㄅㄅ
count=0
for i in tem:
out=out+allpath([i]+text,data,num)
out=out+allpath(tem[count:len(tem)]+text,data,num)
count+=1
# print(out)
return out
# print(first,save)
out=allpath([first],save,2)
print(list(out))
| StarcoderdataPython |
12860949 | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
class Bucketize(Op):
op = 'Bucketize'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'kind': 'op',
'type': __class__.op,
'op': __class__.op,
'version': 'extension',
'type_infer': self.type_infer,
'infer': self.infer,
'in_ports_count': 2,
'out_ports_count': 1,
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return ["with_right_bound"]
@staticmethod
def type_infer(node):
# the output is always integer since the layer outputs a bucket index
node.out_port(0).set_data_type(np.int32)
@staticmethod
def infer(node: Node):
assert node.with_right_bound is not None, \
"Attribute \"with_right_bound\" is not defined"
assert len(node.in_nodes()) == 2, \
"Incorrect number of inputs for {} node".format(node.id)
output_shape = node.in_port(0).data.get_shape()
node.out_port(0).data.set_shape(output_shape)
input_value = node.in_port(0).data.get_value()
buckets_value = node.in_port(1).data.get_value()
# compute if all input is constant
if input_value is not None and buckets_value is not None:
node.out_port(0).data.set_value(np.digitize(input_value, buckets_value, right=node.with_right_bound))
| StarcoderdataPython |
3503634 | from pych.extern import Chapel
@Chapel(sfile="sfile.hello.chpl")
def hello_world():
return None
if __name__ == "__main__":
hello_world()
| StarcoderdataPython |
8014516 | # --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, based on code from <NAME>, <NAME> and <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ult.config import cfg
from ult.timer import Timer
from ult.ult import Get_next_sp, Get_next_sp_with_pose
from ult.apply_prior import apply_prior
import cv2
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
def get_blob(image_id):
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/val2014/COCO_val2014_' + (str(image_id)).zfill(12) + '.jpg'
# print(im_file)
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
return im_orig, im_shape
def im_detect(sess, net, image_id, Test_RCNN, prior_mask, Action_dic_inv, object_thres, human_thres, prior_flag, detection):
im_orig, im_shape = get_blob(image_id)
blobs = {}
blobs['H_num'] = 1
for Human_out in Test_RCNN[image_id]:
if (np.max(Human_out[5]) > human_thres) and (Human_out[1] == 'Human'): # This is a valid human
# Predict actrion using human appearance only
blobs['H_boxes'] = np.array([0, Human_out[2][0], Human_out[2][1], Human_out[2][2], Human_out[2][3]]).reshape(1,5)
# prediction_H = net.test_image_H(sess, im_orig, blobs)
# save image information
dic = {}
dic['image_id'] = image_id
dic['person_box'] = Human_out[2]
# Predict actrion using human and object appearance
Score_obj = np.empty((0, 4 + 29), dtype=np.float32)
for Object in Test_RCNN[image_id]:
if (np.max(Object[5]) > object_thres) and not (np.all(Object[2] == Human_out[2])): # This is a valid object
blobs['O_boxes'] = np.array([0, Object[2][0], Object[2][1], Object[2][2], Object[2][3]]).reshape(1,5)
blobs['sp'] = Get_next_sp(Human_out[2], Object[2]).reshape(1, 64, 64, 2)
prediction_HO = net.test_image_HO(sess, im_orig, blobs)
if prior_flag == 1:
prediction_HO = apply_prior(Object, prediction_HO)
if prior_flag == 2:
prediction_HO = prediction_HO * prior_mask[:,Object[4]].reshape(1,29)
if prior_flag == 3:
prediction_HO = apply_prior(Object, prediction_HO)
prediction_HO = prediction_HO * prior_mask[:,Object[4]].reshape(1,29)
This_Score_obj = np.concatenate((Object[2].reshape(1,4), prediction_HO[0] * np.max(Object[5])), axis=1)
Score_obj = np.concatenate((Score_obj, This_Score_obj), axis=0)
# print(prediction_HO.shape, blobs['H_boxes'].shape, blobs['O_boxes'].shape)
# exit()
# There is only a single human detected in this image. I just ignore it. Might be better to add Nan as object box.
if Score_obj.shape[0] == 0:
continue
# Find out the object box associated with highest action score
max_idx = np.argmax(Score_obj,0)[4:]
# agent mAP
for i in range(29):
#'''
# walk, smile, run, stand
# if (i == 3) or (i == 17) or (i == 22) or (i == 27):
# agent_name = Action_dic_inv[i] + '_agent'
# dic[agent_name] = np.max(Human_out[5]) * prediction_H[0][0][i]
# continue
# cut
if i == 2:
agent_name = 'cut_agent'
dic[agent_name] = np.max(Human_out[5]) * max(Score_obj[max_idx[2]][4 + 2], Score_obj[max_idx[4]][4 + 4])
continue
if i == 4:
continue
# eat
if i == 9:
agent_name = 'eat_agent'
dic[agent_name] = np.max(Human_out[5]) * max(Score_obj[max_idx[9]][4 + 9], Score_obj[max_idx[16]][4 + 16])
continue
if i == 16:
continue
# hit
if i == 19:
agent_name = 'hit_agent'
dic[agent_name] = np.max(Human_out[5]) * max(Score_obj[max_idx[19]][4 + 19], Score_obj[max_idx[20]][4 + 20])
continue
if i == 20:
continue
# These 2 classes need to save manually because there is '_' in action name
if i == 6:
agent_name = 'talk_on_phone_agent'
dic[agent_name] = np.max(Human_out[5]) * Score_obj[max_idx[i]][4 + i]
continue
if i == 8:
agent_name = 'work_on_computer_agent'
dic[agent_name] = np.max(Human_out[5]) * Score_obj[max_idx[i]][4 + i]
continue
# all the rest
agent_name = Action_dic_inv[i].split("_")[0] + '_agent'
dic[agent_name] = np.max(Human_out[5]) * Score_obj[max_idx[i]][4 + i]
#'''
'''
if i == 6:
agent_name = 'talk_on_phone_agent'
dic[agent_name] = np.max(Human_out[5]) * prediction_H[0][0][i]
continue
if i == 8:
agent_name = 'work_on_computer_agent'
dic[agent_name] = np.max(Human_out[5]) * prediction_H[0][0][i]
continue
agent_name = Action_dic_inv[i].split("_")[0] + '_agent'
dic[agent_name] = np.max(Human_out[5]) * prediction_H[0][0][i]
'''
# role mAP
for i in range(29):
# walk, smile, run, stand. Won't contribute to role mAP
# if (i == 3) or (i == 17) or (i == 22) or (i == 27):
# dic[Action_dic_inv[i]] = np.append(np.full(4, np.nan).reshape(1,4), np.max(Human_out[5]) * prediction_H[0][0][i])
# continue
# Impossible to perform this action
if np.max(Human_out[5]) * Score_obj[max_idx[i]][4 + i] == 0:
dic[Action_dic_inv[i]] = np.append(np.full(4, np.nan).reshape(1,4), np.max(Human_out[5]) * Score_obj[max_idx[i]][4 + i])
# Action with >0 score
else:
dic[Action_dic_inv[i]] = np.append(Score_obj[max_idx[i]][:4], np.max(Human_out[5]) * Score_obj[max_idx[i]][4 + i])
detection.append(dic)
def test_net(sess, net, Test_RCNN, prior_mask, Action_dic_inv, output_dir, object_thres, human_thres, prior_flag):
np.random.seed(cfg.RNG_SEED)
detection = []
count = 0
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
for line in open(cfg.DATA_DIR + '/' + '/v-coco/data/splits/vcoco_test.ids', 'r'):
_t['im_detect'].tic()
image_id = int(line.rstrip())
im_detect(sess, net, image_id, Test_RCNN, prior_mask, Action_dic_inv, object_thres, human_thres, prior_flag, detection)
_t['im_detect'].toc()
print('im_detect: {:d}/{:d} {:.3f}s'.format(count + 1, 4946, _t['im_detect'].average_time))
count += 1
pickle.dump( detection, open( output_dir, "wb" ) )
| StarcoderdataPython |
1661790 | ##############################################################################
#
# Copyright (c) 2014, 2degrees Limited.
# All Rights Reserved.
#
# This file is part of hubspot-contacts
# <https://github.com/2degrees/hubspot-contacts>, which is subject to the
# provisions of the BSD at
# <http://dev.2degreesnetwork.com/p/2degrees-license.html>. A copy of the
# license should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS"
# AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST
# INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
from itertools import chain
from pyrecord import Record
from hubspot.contacts._constants import BATCH_SAVING_SIZE_LIMIT
from hubspot.contacts._constants import CONTACTS_API_SCRIPT_NAME
from hubspot.contacts._property_utils import get_property_type_by_property_name
from hubspot.contacts.generic_utils import ipaginate
from hubspot.contacts.request_data_formatters.contacts import \
format_contacts_data_for_saving
Contact = Record.create_type(
'Contact',
'vid',
'email_address',
'properties',
'related_contact_vids',
related_contact_vids=(),
)
_CONTACTS_SAVING_URL_PATH = CONTACTS_API_SCRIPT_NAME + '/contact/batch/'
def save_contacts(contacts, connection):
"""
Request the creation and/or update of the ``contacts``.
:param iterable contacts: The contacts to be created/updated
:return: ``None``
:raises hubspot.connection.exc.HubspotException:
:raises hubspot.contacts.exc.HubspotPropertyValueError: If one of the
property values on a contact is invalid.
For each contact, only its email address and properties are passed to
HubSpot. Any other datum (e.g., the VID) is ignored.
As at this writing, this end-point does not process the requested changes
immediately. Instead, it **partially** validates the input and, if it's all
correct, the requested changes are queued.
End-point documentation:
http://developers.hubspot.com/docs/methods/contacts/batch_create_or_update
"""
contacts_batches = ipaginate(contacts, BATCH_SAVING_SIZE_LIMIT)
contacts_first_batch = next(contacts_batches, None)
if not contacts_first_batch:
return
property_type_by_property_name = \
get_property_type_by_property_name(connection)
for contacts_batch in chain([contacts_first_batch], contacts_batches):
contacts_batch_data = format_contacts_data_for_saving(
contacts_batch,
property_type_by_property_name,
)
connection.send_post_request(
_CONTACTS_SAVING_URL_PATH,
contacts_batch_data,
)
| StarcoderdataPython |
3321204 | import numpy as np
import datetime
import os
import argparse
from FortnitePlotWorld import generate_player_traces
from FortnitePlotWorld import PlayerTrace
import logjoiner
import fnplog
import trafficstat
CLIENT_SPECTATOR = (19.359, 2.6462)
CLIENT_ACTIVE = (43.868, 7.9772)
SERVER_LOW = (81.572, 34.467)
SERVER_HIGH = (267.65, 57.955)
def create_packet_sizes(mu, sigma, n):
s = np.random.normal(mu, sigma, n)
return s
def create_packets_client_active(seconds, start_time):
packet_sizes = []
for s in range(0, seconds):
num_packets = int(np.random.normal(35.937, 4.2692))
inter_packet_dist = 1 / float(num_packets)
sizes = create_packet_sizes(CLIENT_ACTIVE[0], CLIENT_ACTIVE[1], num_packets)
for i in range(0, len(sizes)):
packet_sizes = packet_sizes + [(start_time + s + i*inter_packet_dist, int(sizes[i]), "out")]
return packet_sizes
def create_packets_client_spectator(seconds, start_time):
packet_sizes = []
for s in range(0, seconds):
num_packets = int(np.random.normal(35.937, 4.2692))
#num_packets = int(np.random.poisson(36.198))
inter_packet_dist = 1 / float(num_packets)
#sizes = np.random.poisson(20.228, num_packets)
sizes = create_packet_sizes(CLIENT_SPECTATOR[0], CLIENT_SPECTATOR[1], num_packets)
#sizes = np.random.binomial(4*20.228, 0.25, num_packets)
#sizes = np.random.normal(20.228, 9.35, num_packets)
for i in range(0, len(sizes)):
packet_sizes = packet_sizes + [(start_time + s + i*inter_packet_dist, int(sizes[i]), "out")]
return packet_sizes
def create_packets_server_low_action(seconds, start_time, server_packet_mean=19.731, server_packet_size_mean=SERVER_LOW[0], use_variation=False):
packet_sizes = []
variation = False
next_variation = -1
variation_time = 0
for s in range(0, seconds):
if next_variation < 0:
next_variation = int(round(np.random.exponential(15, 1)))
if next_variation == 0 and variation is False:
variation = True
variation_time = 0
if variation or not use_variation:
num_packets = int(np.random.normal(server_packet_mean, 1.6053))
variation_time += 1
if variation_time >= 3:
variation = False
next_variation = -1
else:
num_packets = server_packet_mean
next_variation -= 1
inter_packet_dist = 1 / float(num_packets)
sizes = create_packet_sizes(server_packet_size_mean, SERVER_LOW[1], num_packets)
for i in range(0, len(sizes)):
packet_sizes = packet_sizes + [(start_time + s + i*inter_packet_dist, int(sizes[i]), "in")]
return packet_sizes
def create_packets_server_high_action(seconds, start_time, server_packet_mean=19.731, server_packet_size_mean=SERVER_HIGH[0]):
packet_sizes = []
for s in range(0, seconds):
num_packets = int(np.random.normal(server_packet_mean, 1.6053))
inter_packet_dist = 1 / float(num_packets)
sizes = create_packet_sizes(server_packet_size_mean, SERVER_HIGH[1], num_packets)
for i in range(0, len(sizes)):
packet_sizes = packet_sizes + [(start_time + s + i*inter_packet_dist, int(sizes[i]), "in")]
return packet_sizes
def create_player_trace(player_movement_traces, selected_player):
movement_trace = player_movement_traces.player_traces[selected_player]
game_duration = player_movement_traces.no_players[-1][0]
server_full_update_time = 0
server_packet_higher_load = 0
server_packet_higher_load_value = 550
for (t, no_players) in player_movement_traces.no_players:
if no_players < 80 and server_packet_higher_load == 0:
server_packet_higher_load = t
if no_players < 50:
server_full_update_time = t
break
m = (20 - 10) / float(server_full_update_time - 0)
m_server_packet_size = (SERVER_LOW[0] - server_packet_higher_load_value) / float(server_packet_higher_load - 0)
playing_time = len(movement_trace)
spectator_time = game_duration - playing_time
client_packets = create_packets_client_active(playing_time, 0) + create_packets_client_spectator(spectator_time, playing_time)
current_time = 0
server_packets = []
while current_time < game_duration:
movement_trace = player_movement_traces.player_traces[selected_player][current_time:]
action_times = [event[0] for event in filter(lambda c: c[2] == True, movement_trace)]
generated_to = current_time
for time in action_times:
if generated_to < server_full_update_time or generated_to < server_packet_higher_load:
for t in range(generated_to, time - 5):
if generated_to < server_full_update_time:
server_packet_freq_mean = m*generated_to + 10
else:
server_packet_freq_mean = 19.731
if generated_to < server_packet_higher_load:
server_packet_size_mean = m_server_packet_size*generated_to + server_packet_higher_load_value
server_packets = server_packets + create_packets_server_high_action(1, t, server_packet_freq_mean, server_packet_size_mean)
else:
server_packet_size_mean = SERVER_LOW[0]
server_packets = server_packets + create_packets_server_low_action(1, t, server_packet_freq_mean)
generated_to += 1
for t in range(time - 5, time):
if generated_to < server_full_update_time:
server_packet_freq_mean = m*generated_to + 10
else:
server_packet_freq_mean = 19.731
if generated_to < server_packet_higher_load:
server_packet_size_mean = m_server_packet_size*generated_to + server_packet_higher_load_value
else:
server_packet_size_mean = SERVER_HIGH[0]
server_packets = server_packets + create_packets_server_high_action(1, generated_to, server_packet_freq_mean, server_packet_size_mean)
generated_to += 1
else:
seconds_to_generate = time - generated_to
if seconds_to_generate > 5:
server_packets = server_packets + create_packets_server_low_action(seconds_to_generate - 5, generated_to, use_variation=True)
server_packets = server_packets + create_packets_server_high_action(min(5, seconds_to_generate), generated_to + seconds_to_generate - min(5, seconds_to_generate))
generated_to = time
current_time = time
if selected_player not in player_movement_traces.kill_relations:
break
selected_player = player_movement_traces.kill_relations[selected_player]
return client_packets, server_packets
##
# Start script for 100 player traces
# > seq 0 99 | xargs -P 4 -I {} python /ndnSim/PythonScripts/fortnite/TrafficGenerator.py -p {} -o out
##
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--game", help="Trace is generated for the given game number", type=int, default=0)
parser.add_argument("-c", "--charts", help="Plot charts", type=bool, default=True)
parser.add_argument("-o", "--output", help="Output directory [./]", default="./")
parser.add_argument("-v", "--verbose", help="Show debug information", type=bool, default=False)
args = parser.parse_args()
game = args.game
plot = args.charts
verbose = args.verbose
output = args.output
if output[-1] != "/":
output += "/"
if verbose:
print("Generate movement traces")
player_movement_traces = generate_player_traces(game, verbose=verbose)
game_duration = player_movement_traces.no_players[-1][0]
if not os.path.exists(output):
os.makedirs(output)
if verbose:
print("Write movement traces")
for i in range(0, len(player_movement_traces.player_traces)):
player_trace = player_movement_traces.player_traces[i]
f = open(output + "player_{:02d}_movement.csv".format(i), "w")
for entry in player_trace:
f.write("{}\t{:10.3f}\t{:10.3f}\n".format(entry[0], entry[1][0], entry[1][1]))
f.close()
for pl in range(0, 100):
selected_player = pl
if verbose:
print("Generate traffic traces")
client, server = create_player_trace(player_movement_traces, selected_player)
packets = client + server
packets.sort(key=lambda c:c[0])
if verbose:
print("Write Packet Log")
f = open(output + "player_{:02d}_packet_log.csv".format(selected_player), "w")
f.write("time\tin/out\tprotocol\tpayload-size\ttotal-size\n")
for packet in packets:
time = datetime.datetime.fromtimestamp(packet[0])
time_str = time.strftime("%Y-%m-%d %H:%M:%S.%f")
f.write("{}\t{}\t{}\t{}\t{}\n".format(time_str, packet[2], "udp:data", max(1, packet[1]), max(1, packet[1]) + 42))
f.close()
if verbose:
print("Write game info")
f = open(output + "player_{:02d}_game_info.csv".format(selected_player), "w")
for i in range(0, game_duration):
time = datetime.datetime.fromtimestamp(i)
time_str = time.strftime("%Y-%m-%d %H:%M:%S")
da = "alive"
if i > len(player_movement_traces.player_traces[selected_player]):
da = "dead"
f.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(time_str, i, i, player_movement_traces.no_players[i][1], "", da))
f.close()
if verbose:
print("Calculate traffic averages")
trafficstat.execute(output + "player_{:02d}_packet_log.csv".format(selected_player), output + "player_{:02d}_averages_packet_log.csv".format(selected_player))
if verbose:
print("Calculate combined logfile")
logjoiner.execute(output + "player_{:02d}_averages_packet_log.csv".format(selected_player), output + "player_{:02d}_game_info.csv".format(selected_player), output + "player_{:02d}_joined.csv".format(selected_player))
if plot:
if verbose:
print("Plot chart")
fnplog.execute(output + "player_{:02d}_joined.csv".format(selected_player), output + "player_{:02d}.pdf".format(selected_player), verbose=verbose) | StarcoderdataPython |
5181536 | <gh_stars>1-10
from django.shortcuts import render,redirect,HttpResponseRedirect
from django.contrib.auth import authenticate,login,logout
from django.contrib import messages
from notification.signals import notify
from .forms import UserCreationForm,LoginForm
from .models import MyUser
# Create your views here.
def user_register(request):
form = UserCreationForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('<PASSWORD>')
new_user=MyUser.objects.create_user(
username=username,
email=email,
password=password
)
# send signal to admin to let him know that a new user's registered
notify.send(
sender=new_user,
recipient=MyUser.objects.get(username='test'),
verb='registered',
)
messages.success(request, 'Thanks for your register, now you can login your account.')
return redirect('login')
context = {
'form':form,
'action':'',
'btn':'Register',
}
return render(request, 'account/forms.html', context)
def auth_logout(request):
logout(request)
return HttpResponseRedirect('/')
def auth_login(request):
form = LoginForm(request.POST or None)
next_url = request.GET.get('next')
if request.user.is_authenticated():
messages.warning(request, 'You have already login.')
return render(request, 'login.html',{'login_error':'haha' })
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if next_url:
return HttpResponseRedirect(next_url)
else:
return HttpResponseRedirect('/')
else:
return render(request, 'login.html', {'form':form,
'login_error':'wrong password or username',})
context = {
'form': form,
}
return render(request, 'login.html' ,context) | StarcoderdataPython |
5176475 | # Run celery workers
# celery -A dao worker --loglevel=info
import sys
import json
import psycopg2
import logging
from celery import Celery
from datetime import datetime
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# Configure with your own broker
app = Celery("tasks", broker="redis://localhost:6379/4")
# Initialize dao
db = psycopg2.connect(database="ticks", user="postgres", password="<PASSWORD>", host="127.0.0.1", port="5432")
# Db insert statement
insert_tick_statement = "INSERT INTO ticks (date, token, price) VALUES (%(date)s, %(token)s, %(price)s)"
# Task to insert to SQLite dao
@app.task
def insert_ticks(ticks):
c = db.cursor()
for tick in ticks:
c.execute(insert_tick_statement, {
"date": datetime.now(),
"token": tick["instrument_token"],
"price": tick["last_price"]})
logging.info("Inserting ticks to dao : {}".format(json.dumps(ticks)))
try:
db.commit()
except Exception:
db.rollback()
logging.exception("Couldn't write ticks to dao: ")
| StarcoderdataPython |
11377063 | from conan.packager import ConanMultiPackager
if __name__ == "__main__":
builder = ConanMultiPackager(username="drodri", channel="stable")
builder.add_common_builds(shared_option_name="HelloCi:shared")
builder.run() | StarcoderdataPython |
6597496 | <gh_stars>10-100
import logging
import torch
from pydantic import BaseModel
from transformers import DistilBertForTokenClassification, DistilBertTokenizerFast
from dbpunctuator.utils.utils import register_logger
logger = logging.getLogger(__name__)
register_logger(logger)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class TestingModelArguments(BaseModel):
"""Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
Args:
model_name(str): name or path of pre-trained model
tokenizer_name(str): name of pretrained tokenizer
"""
model_name: str
tokenizer_name: str
class TestingModel:
def __init__(self, arguments: TestingModelArguments) -> None:
self.device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
self.tokenizer = DistilBertTokenizerFast.from_pretrained(
arguments.tokenizer_name
)
self.classifer = DistilBertForTokenClassification.from_pretrained(
arguments.model_name
)
def sample_output(self, inputs):
tokenized_inputs = self.tokenizer(
inputs,
is_split_into_words=False,
padding=True,
truncation=True,
return_offsets_mapping=True,
return_tensors="pt",
)
logger.info(f"tokenized inputs: {tokenized_inputs}")
self.tokenized_input_ids = tokenized_inputs["input_ids"].to(self.device)
self.attention_mask = tokenized_inputs["attention_mask"].to(self.device)
logits = self.classifer(self.tokenized_input_ids, self.attention_mask).logits
if self.device.type == "cuda":
argmax_preds = logits.argmax(dim=2).detach().cpu().numpy()
else:
argmax_preds = logits.argmax(dim=2).detach().numpy()
logger.info(f"outputs of model {argmax_preds}")
if __name__ == "__main__":
args = TestingModelArguments(
model_name="distilbert-base-multilingual-cased",
tokenizer_name="distilbert-base-multilingual-cased",
)
testing_model = TestingModel(args)
test_texts = ["中文测试", "Chinese testing"]
testing_model.sample_output(test_texts)
| StarcoderdataPython |
252999 | <filename>while_loops.py
# Example 1
i = 0
while i <= 10:
print(i)
i += 1
# Example 2
available_fruits = ["Apple", "Pearl", "Banana", "Grapes"]
chosen_fruit = ''
print("We have the following available fruits: Apple, Pearl, Banana, Grapes")
while chosen_fruit not in available_fruits:
chosen_fruit = input("Please choose one of the options above: ")
if chosen_fruit == "exit":
print("Good bye!")
break
else:
print("Enjoy your fruit!")
| StarcoderdataPython |
11378794 | import numpy as np
def center_crop(l, x, y, ts, p, bboxes, old_shape, new_shape):
"""
Crops events and annotations to a centered region of the specified shape.
Events and bounding boxes are then shifted so that the top-left event margins
always start at (0,0)
"""
new_h, new_w = new_shape
old_h, old_w = old_shape
x_min, x_max = x.min(), x.max()
y_min, y_max = y.min(), y.max()
new_top = (x_max - x_min - new_w) // 2
new_left = (y_max - y_min - new_h) // 2
events_inside = np.logical_and.reduce([x >= new_left, x < new_left + new_w,
y >= new_top, y < new_top + new_h])
new_x, new_y, new_ts, new_p = x[events_inside], y[events_inside], \
ts[events_inside], p[events_inside]
new_x -= new_x.min()
new_y -= new_y.min()
new_l = new_x.shape[0]
bboxes[:, [0, 2]] *= old_w
bboxes[:, [1, 3]] *= old_h
new_bboxes = bboxes.copy()
new_bboxes[:, [0, 2]] = np.clip(bboxes[:, [0, 2]] * old_w - new_x.min(), 0, new_w) / new_w
new_bboxes[:, [1, 3]] = np.clip(bboxes[:, [1, 3]] * old_h - new_x.min(), 0, new_h) / new_h
return new_l, new_x, new_y, new_ts, new_p, new_bboxes
def apply_nms(batch_bboxes, batch_scores, batch_valid=None, iou_threshold=0.5):
"""
Applies Non-Maximum-Suppression on the provided boxes.
Implementation taken from: http://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
:param batch_bboxes: a [batch_size, num_boxes, 4] array providing the parameters of the bounding boxes
(x_center, y_center, w_box, h_box).
:param batch_scores: a [batch_size, num_boxes] array providing the scores associated with each bounding box
:param batch_valid: a [batch_size, num_boxes] boolean mask used to specify which values must be considered valid
across the batch. Optional, if not provided, all the boxes will be considered in the computation
:param iou_threshold: scalar, the threshold on the IOU. Optional, default 0.5.
:return: a list of 2 numpy arrays representing the indices in batch_bboxes of the selected boxes
"""
batch_valid = batch_valid if batch_valid is not None else [None] * batch_bboxes.shape[0]
picked_idx = []
# Loops over the batch dimension
for bboxes, scores, valid in zip(batch_bboxes, batch_scores, batch_valid):
if valid is not None:
bboxes = bboxes[valid]
scores = scores[valid]
# compute mapping from valid indices to original
valid_idx_to_original = np.where(valid)[0]
# if there are no boxes
if len(bboxes) == 0:
picked_idx.append([])
else:
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x = bboxes[:, 0]
y = bboxes[:, 1]
w = bboxes[:, 2]
h = bboxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by their score
area = w * h
idxs = np.argsort(scores)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# compute the top-left and bottom-right coordinates of the intersections
# between the current box and all the remaining ones
xx1 = np.maximum(x[i] - w[i] / 2, x[idxs[:last]] - w[idxs[:last]] / 2)
yy1 = np.maximum(y[i] - h[i] / 2, y[idxs[:last]] - h[idxs[:last]] / 2)
xx2 = np.minimum(x[i] + w[i] / 2, x[idxs[:last]] + w[idxs[:last]] / 2)
yy2 = np.minimum(y[i] + h[i] / 2, y[idxs[:last]] + h[idxs[:last]] / 2)
# compute the width and height of the intersection's boxes
ww = np.maximum(0, xx2 - xx1)
hh = np.maximum(0, yy2 - yy1)
# compute IOUs
iou = (ww * hh) / (area[idxs[:last]] + area[i] - (ww * hh))
# delete from the list of remaining indexes, the current one (last) and those
# of the bounding boxes with an IOU above the threshold with the current box
idxs = np.delete(idxs, np.concatenate(([last], np.where(iou >= iou_threshold)[0])))
# if a 'batch_valid' array has been provided, 'pick' will contain the indices of the filtered
# boxes, we need to map them back to original array's indices
pick = pick if valid is None else list(valid_idx_to_original[pick])
picked_idx.append(pick)
# HACK: it uses sum(list of lists, []) to flatten the list =D
idx_axis_0 = np.array(sum([[batch] * len(idx) for batch, idx in enumerate(picked_idx)], []))
idx_axis_1 = np.array(sum(picked_idx, []))
return [idx_axis_0, idx_axis_1]
| StarcoderdataPython |
3505031 | class Restaurant():
def __init__(self, name, c_type):
self.name = name
self.c_type = c_type
self.served = 0
def describe_R(self):
print("\nName: " + self.name + "\nCuisine Type: " + self.c_type + "\nCustomers: " + str(self.served))
def open_R(self):
print(self.name + " is Open.\n")
def get_served(self):
print(str(self.served)+" were served.")
def set_served(self, n):
self.served = n
def increment_served(self, n):
self.served += n
My_R = Restaurant("Cheese Burgers","Fast Food")
print(My_R.name + " " + My_R.c_type)
My_R.get_served()
My_R.set_served(15)
My_R.get_served()
My_R.increment_served(50)
My_R.get_served() | StarcoderdataPython |
5010492 | from mpkg.common import Soft
from mpkg.load import Load
from mpkg.utils import GetPage
class Package(Soft):
ID = 'ffmpeg'
def _prepare(self):
data = self.data
data.bin = [r'bin\ffmpeg.exe', r'bin\ffplay.exe', r'bin\ffprobe.exe']
parser = Load('http/common-zpcc.py', sync=False)[0][0].github
url = 'https://www.gyan.dev/ffmpeg/builds/ffmpeg-release-github'
header, links, data.date = parser(url)
data.ver = header.split(' ')[1]
data.changelog = 'https://ffmpeg.org/index.html#news'
# 'ffmpeg-([\\d.-]+)-full_build-shared.(zip|7z)</a>'
link = [link for link in links if 'full_build-shared.7z' in link][0]
data.arch = {'64bit': link}
data.sha256 = {'64bit': GetPage(
'https://www.gyan.dev/ffmpeg/builds/ffmpeg-release-full-shared.7z.sha256')}
| StarcoderdataPython |
9683026 | <reponame>aiaio/django-svn-revision<gh_stars>1-10
# $Id: django-revision.py $
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
"""
Creates a template tag called {% revision %} that returns the current svn version.
Requires svnversion.
"""
import sys, os
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), "../../"))
from django import template
from svn_revision.templatetags import REVISION
register = template.Library()
@register.simple_tag
def revision():
"""displays the revision number
{% revision %}"""
return REVISION
| StarcoderdataPython |
6413725 | import dash
from utils.code_and_show import example_app
dash.register_page(__name__, description="Interactively change the legend position")
filename = __name__.split("pages.")[1]
notes = """
#### Plotly Documentation:
- [How to configure and style the legend](https://plotly.com/python/legend/)
#### Contributed by:
This example app was contributed by [Plotly](https://plotly.com/python/)
"""
layout = example_app(filename, notes=notes)
| StarcoderdataPython |
1698065 | from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.colors import colorConverter
import os
import ast
from scipy import ndimage
import paras_dorsoventral as dors
import paras_rostrocaudal as ros
import colourmix as colmix
matplotlib.rcParams.update({'font.size': 18})
time = 4000.0
slicenr = 5
tstep=50.0
axis = 'dorso'#'dorso'
runThrough = 'space'
scale = 0.5
StaticDataPath = 'cooltube_0.5_1'
if axis == 'dorso':
fig = plt.figure(figsize = [6.5, 8])
if axis == 'rostro':
fig = plt.figure(figsize = [4, 14])
ax1 = fig.add_subplot(111)
#DORSOVENTRAL
# generate the colors for your colormap
colorP = colorConverter.to_rgba(dors.colours[0])
colorO = colorConverter.to_rgba(dors.colours[1])
colorN = colorConverter.to_rgba(dors.colours[2])
white='white'
# make the colormaps
cmapP = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',[white,colorP],256)
cmapO = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapO',[white,colorO],256)
cmapN = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapN',[white,colorN],256)
cmapP._init()
cmapO._init()
cmapN._init() # create the _lut array, with rgba values
# create your alpha array and fill the colormap with them.
# here it is progressive, but you can create whathever you want
dAlphas = np.linspace(0, 0.8, cmapO.N+3)
cmapP._lut[:,-1] = dAlphas
cmapO._lut[:,-1] = dAlphas
cmapN._lut[:,-1] = dAlphas
#ROSTROCAUDAL
colorFB = colorConverter.to_rgba(ros.colours[0])
colorMB = colorConverter.to_rgba(ros.colours[1])
colorHB = colorConverter.to_rgba(ros.colours[2])
white='white'
# make the colormaps
cmapFB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapFB',[white,colorFB],256)
cmapMB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapMB',[white,colorMB],256)
cmapHB = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapHB',[white,colorHB],256)
cmapFB._init()
cmapMB._init()
cmapHB._init() # create the _lut array, with rgba values
# create your alpha array and fill the colormap with them.
# here it is progressive, but you can create whathever you want
rAlphas = np.linspace(0, 0.8, cmapMB.N+3)
cmapFB._lut[:,-1] = rAlphas
cmapMB._lut[:,-1] = rAlphas
cmapHB._lut[:,-1] = rAlphas
def compare(matrices):
dimy = len(matrices[0])
dimx = len(matrices[0][0])
dimz = len(matrices[0][0][0])
show= np.zeros_like(matrices)
for i in range(dimy):
for j in range(dimx):
for k in range(dimz):
comparevalues =[m[i][j][k] for m in matrices]
gene = np.argmax(comparevalues)
show[gene][i][j][k] = np.max(comparevalues)
return show
def getCut(axis,t=0,s=0,dataPath=StaticDataPath):
if axis == 'dorso':
dorsoDir = dataPath + '/dorso/'
dcFile = dorsoDir + 'T%1.1f' %t + '_dComp.npy'
pFile = dorsoDir + 'T%1.1f' %t + '_P.npy'
oFile = dorsoDir + 'T%1.1f' %t + '_O.npy'
nFile = dorsoDir + 'T%1.1f' %t + '_N.npy'
if os.path.isfile(dcFile):
dComp = np.load(dcFile)
else:
pArray = np.load(pFile)
oArray = np.load(oFile)
nArray = np.load(nFile)
dComp = compare([pArray,oArray,nArray])
np.save(dcFile,dComp)
arrA = dComp[0]
arrB = dComp[1]
arrC = dComp[2]
arrA = arrA[s,:,:]
arrB = arrB[s,:,:]
arrC = arrC[s,:,:]
if axis == 'rostro':
rostroDir = dataPath + '/rostro/'
rcFile = rostroDir + 'T%1.1f' %t + '_rComp.npy'
FBFile = rostroDir + 'T%1.1f' %t + '_FB.npy'
MBFile = rostroDir + 'T%1.1f' %t + '_MB.npy'
HBFile = rostroDir + 'T%1.1f' %t + '_HB.npy'
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
FBArray = np.load(FBFile)
MBArray = np.load(MBFile)
HBArray = np.load(HBFile)
rComp = compare([FBArray,MBArray,HBArray])
np.save(rcFile,rComp)
arrA = rComp[0]
arrB = rComp[1]
arrC = rComp[2]
# arrA = arrA[:,s,:]
# arrB = arrB[:,s,:]
# arrC = arrC[:,s,:]
arrA = arrA[:,:,s]
arrB = arrB[:,:,s]
arrC = arrC[:,:,s]
if axis == 'rostro2':
rostroDir = dataPath + '/rostro/'
rcFile = rostroDir + 'T%1.1f' %t + '_rComp.npy'
FBFile = rostroDir + 'T%1.1f' %t + '_FB'
MBFile = rostroDir + 'T%1.1f' %t + '_MB'
HBFile = rostroDir + 'T%1.1f' %t + '_HB'
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
FBArray = np.load(FBFile)
MBArray = np.load(MBFile)
HBArray = np.load(HBFile)
rComp = compare([FBArray,MBArray,HBArray])
np.save(rcFile,rComp)
arrA = rComp[0]
arrB = rComp[1]
arrC = rComp[2]
arrA = arrA[:,s,:]
arrB = arrB[:,s,:]
arrC = arrC[:,s,:]
return arrA,arrB,arrC
def getTS(ts, rate, t=time, s=slicenr):
"""ts = what is looped over in the animation"""
if ts == 'time':
t_ret = rate*tstep
s_ret = slicenr
if ts =='space':
t_ret = t
s_ret = rate
return t_ret,s_ret
def update(rate):
ax1.clear()
t,s = getTS(runThrough,rate)
#print(rate,t,s)
cut = getCut(axis,t,s)
ax1.set_title("slice nr %d time %1.1f" %(s,t))
#if t < len(data[0][0]):
#ax1.matshow(data[:,t,:])
#t+=1
#else:
#t=0
# ax1.imshow(arrFB[rate,:,:],interpolation='bilinear',cmap=cmap1)
# ax1.imshow(arrMB[rate,:,:],interpolation='bilinear',cmap=cmap2)
# ax1.imshow(arrHB[rate,:,:],interpolation='bilinear',cmap=cmap3)
if axis == 'dorso':
cmap1,cmap2,cmap3 = cmapP,cmapO,cmapN
size = 500
if axis == 'rostro':
cmap1,cmap2,cmap3 = cmapFB,cmapMB,cmapHB
size =100
# ax1.imshow(cut[0],cmap=cmap1)
# ax1.imshow(cut[1],cmap=cmap2)
# ax1.imshow(cut[2],cmap=cmap3)
"""
ax1.imshow(cut[0],interpolation='nearest',cmap=cmap1)
ax1.imshow(cut[1],interpolation='nearest',cmap=cmap2)
ax1.imshow(cut[2],interpolation='nearest',cmap=cmap3)
"""
mapper1 = matplotlib.cm.ScalarMappable(cmap=cmap1)
mapper2 = matplotlib.cm.ScalarMappable(cmap=cmap2)
mapper3 = matplotlib.cm.ScalarMappable(cmap=cmap3)
c1= np.where(cut[0])
colors1 = mapper1.to_rgba(cut[0][c1])
c2= np.where(cut[1])
colors2 = mapper2.to_rgba(cut[1][c2])
c3= np.where(cut[2])
colors3 = mapper3.to_rgba(cut[2][c3])
ax1.set_aspect('auto')
ax1.set_xlim([-1,16])
ax1.scatter(c1[0],c1[1],c=colors1,s=size)
ax1.scatter(c2[0],c2[1],c=colors2,s=size)
ax1.scatter(c3[0],c3[1],c=colors3, s=size)
#plt.savefig('unsinnfig/t%d'% rate)
def plotSlices(time, dorsnr, rosnr, rosnr2, plotmethod='circle',save=True, dataPath=StaticDataPath):
# fug = plt.figure(figsize=(8, 6))
# gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
# axDors = fug.add_subplot(gs[0])
# axRos = fug.add_subplot(gs[1])
plt.close("all")
fug = plt.figure(figsize = [7.5, 8])
fag = plt.figure(figsize = [10, 14])
axDors = fug.add_subplot(1,1,1)
axRos = fag.add_subplot(1,2,1)
axRos2 = fag.add_subplot(1,2,2)
axDors.set_title("DV slice at \n x = %d µm, t = %1.1f " %(dorsnr*10/scale, time))
axDors.set_xlabel("y [µm]")
dxticks = np.arange(0,20,5)
axDors.xaxis.set_ticks(dxticks)
axDors.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in dxticks])
dyticks = np.arange(0,65,10)
axDors.yaxis.set_ticks(dyticks)
axDors.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in dyticks])
axDors.set_ylabel("z [µm]")
axRos.set_title("RC slice at \n z = %d µm, t = %1.1f " %(rosnr*10/scale, time))
rxticks = dxticks
axRos.xaxis.set_ticks(rxticks)
axRos.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in rxticks])
ryticks = np.arange(0,65,10)
axRos.yaxis.set_ticks(ryticks)
axRos.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in ryticks])
axRos.set_xlabel("y [µm]")
axRos.set_ylabel("x [µm]")
axRos2.set_title("RC slice at \n y = %d µm, t = %1.1f " %(rosnr*10/scale, time))
r2xticks = np.arange(0,65,10)
axRos2.xaxis.set_ticks(r2xticks)
axRos2.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in r2xticks])
r2yticks = np.arange(0,65,10)
axRos2.yaxis.set_ticks(r2yticks)
axRos2.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in r2yticks])
axRos2.set_xlabel("z [µm]")
axRos2.set_ylabel("x [µm]")
dataDors = getCut('dorso', t= time, s=dorsnr, dataPath = dataPath)
dataRos = getCut('rostro', t= time, s=rosnr, dataPath = dataPath)
dataRos2 = getCut('rostro2', t= time, s=rosnr2,dataPath = dataPath)
for axtype in ['rostro','dorso']:
if axtype == 'dorso':
cmap1,cmap2,cmap3 = cmapP,cmapO,cmapN
size = 500
ax = axDors
cut =dataDors
if axtype == 'rostro':
cmap1,cmap2,cmap3 = cmapFB,cmapMB,cmapHB
size =100
ax=axRos
ax2=axRos2
cut= dataRos
cut2=dataRos2
if plotmethod == 'circle':
mapper1 = matplotlib.cm.ScalarMappable(cmap=cmap1)
mapper2 = matplotlib.cm.ScalarMappable(cmap=cmap2)
mapper3 = matplotlib.cm.ScalarMappable(cmap=cmap3)
c1= np.where(cut[0])
colors1 = mapper1.to_rgba(cut[0][c1])
c2= np.where(cut[1])
colors2 = mapper2.to_rgba(cut[1][c2])
c3= np.where(cut[2])
colors3 = mapper3.to_rgba(cut[2][c3])
ax.set_aspect('auto')
#ax.set_xlim([-1,16])
ax.scatter(c1[0],c1[1],c=colors1,s=size)
ax.scatter(c2[0],c2[1],c=colors2,s=size)
ax.scatter(c3[0],c3[1],c=colors3, s=size)
if plotmethod == 'square':
# ax1.imshow(cut[0],cmap=cmap1)
# ax1.imshow(cut[1],cmap=cmap2)
# ax1.imshow(cut[2],cmap=cmap3)
if axtype == 'rostro':
ax.imshow(cut[0][:-1,:-1],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax.imshow(cut[1][:-1,:-1],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax.imshow(cut[2][:-1,:-1],interpolation='nearest',cmap=cmap3,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[0][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[1][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax2.imshow(ndimage.rotate(cut2[2][:-1,:-1],-90)[:,:-1],interpolation='nearest',cmap=cmap3,origin = 'lower')
# rcut0 = ndimage.rotate(cut[0], 90)
# rcut1 = ndimage.rotate(cut[1], 90)
# rcut2 = ndimage.rotate(cut[2], 90)
# ax.imshow(rcut0,interpolation='nearest',cmap=cmap1)
# ax.imshow(rcut1,interpolation='nearest',cmap=cmap2)
# ax.imshow(rcut2,interpolation='nearest',cmap=cmap3)
if axtype == 'dorso':
rcut0 = ndimage.rotate(cut[0], -90)
rcut1 = ndimage.rotate(cut[1], -90)
rcut2 = ndimage.rotate(cut[2], -90)
ax.imshow(rcut0[:-1,1:],interpolation='nearest',cmap=cmap1,origin = 'lower')
ax.imshow(rcut1[:-1,1:],interpolation='nearest',cmap=cmap2,origin = 'lower')
ax.imshow(rcut2[:-1,1:],interpolation='nearest',cmap=cmap3,origin = 'lower')
if save ==True:
fug.savefig(dataPath + '/allPictures/T%1.1f_DV%d.png' %(time,dorsnr) )
fag.savefig(dataPath + '/allPictures/T%1.1f_RC%d_%d.png' %(time,rosnr,rosnr2) )
def plotSliceMix(plotFrom, time, dorsnr, rosnr, rosnr2,save=True):
dataPath = plotFrom
"""Plot gene combinations with a different colour for each combination."""
wntDir = plotFrom + '/Wnt/'
shhDir = plotFrom + '/Shh/'
rostroDir = plotFrom + '/rostro/'
dorsoDir = plotFrom + '/dorso/'
mixDir = plotFrom + '/Mix/'
baseLevels = np.load(plotFrom + '/BaseLevels.npy')
allDir = plotFrom + '/allPictures/'
pFile = dorsoDir + 'T%1.1f' %time + '_P'
oFile = dorsoDir + 'T%1.1f' %time + '_O'
nFile = dorsoDir + 'T%1.1f' %time + '_N'
dcFile = dorsoDir + 'T%1.1f' %time + '_dComp.npy'
pArray =np.load(pFile +'.npy')
oArray =np.load(oFile +'.npy')
nArray =np.load(nFile +'.npy')
if os.path.isfile(dcFile):
dComp = np.load(dcFile)
else:
dComp = compare([pArray,oArray,nArray])
np.save(dcFile,dComp)
fbFile = rostroDir + 'T%1.1f' %time + '_FB'
mbFile = rostroDir + 'T%1.1f' %time + '_MB'
hbFile = rostroDir + 'T%1.1f' %time + '_HB'
rcFile = rostroDir + 'T%1.1f' %time + '_rComp.npy'
fbArray =np.load(fbFile +'.npy')
mbArray =np.load(mbFile +'.npy')
hbArray =np.load(hbFile +'.npy')
if os.path.isfile(rcFile):
rComp = np.load(rcFile)
else:
rComp = compare([fbArray,mbArray,hbArray])
np.save(rcFile,rComp)
dimX = len(rComp[0])
dimY = len(rComp[0][0])
dimZ = len(rComp[0][0][0])
mixArray = np.zeros((len(colmix.colours),dimX,dimY,dimZ))
i=0
for pon in dComp:
for fbmbhb in rComp:
an = np.transpose(np.nonzero(pon))
bn = np.transpose(np.nonzero(fbmbhb))
anl = an.tolist()
bnl = bn.tolist()
incommon = set(str(x) for x in anl) & set(str(y) for y in bnl)
incommon = np.asarray([ast.literal_eval(i) for i in incommon])
for coord in incommon:
#print(coord)
mixArray[i][coord[0]][coord[1]][coord[2]] = 1
i+=1
mixArray[mixArray==0] = np.nan
#plt.close("all")
fug = plt.figure(figsize = [7.5, 8])
fag = plt.figure(figsize = [10, 14])
axDors = fug.add_subplot(1,1,1)
axRos = fag.add_subplot(1,2,1)
axRos2 = fag.add_subplot(1,2,2)
axDors.set_title("DV slice at \n x = %d µm, t = %1.1f " %(dorsnr*10/scale, time))
axDors.set_xlabel("y [µm]")
dxticks = np.arange(0,20,5)
axDors.xaxis.set_ticks(dxticks)
axDors.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in dxticks])
dyticks = np.arange(0,65,10)
axDors.yaxis.set_ticks(dyticks)
axDors.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in dyticks])
axDors.set_ylabel("z [µm]")
axRos.set_title("RC slice at \n z = %d µm, t = %1.1f " %(rosnr*10/scale, time))
rxticks = dxticks
axRos.xaxis.set_ticks(rxticks)
axRos.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in rxticks])
ryticks = np.arange(0,65,10)
axRos.yaxis.set_ticks(ryticks)
axRos.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in ryticks])
axRos.set_xlabel("y [µm]")
axRos.set_ylabel("x [µm]")
axRos2.set_title("RC slice at \n y = %d µm, t = %1.1f " %(rosnr*10/scale, time))
r2xticks = np.arange(0,65,10)
axRos2.xaxis.set_ticks(r2xticks)
axRos2.xaxis.set_ticklabels(['%d' %(x*10/scale) for x in r2xticks])
r2yticks = np.arange(0,65,10)
axRos2.yaxis.set_ticks(r2yticks)
axRos2.yaxis.set_ticklabels(['%d' %(y*10/scale) for y in r2yticks])
axRos2.set_xlabel("z [µm]")
axRos2.set_ylabel("x [µm]")
for axtype in ['rostro','dorso']:
for i in range(len(mixArray)):
#for i in range(3):
colours = colmix.colours[i]
#colours2 = colmix.colours[i+1]
myCmap = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',[colours,colours],256)
#myCmap2 = matplotlib.colors.LinearSegmentedColormap.from_list('my_cmapP',['white',colours2],256)
print(i, colours)
if axtype == 'dorso':
size = 500
ax = axDors
arr = getMixCut(axtype,mixArray[i],s=dorsnr)
arr=(np.flip(np.transpose(arr),axis=1))[:-1,1:]
cut = np.ma.masked_where(np.isnan(arr),arr)
#cut= np.flip(cut)
ax.set_aspect('equal')
if axtype == 'rostro':
size =100
ax=axRos
ax2=axRos2
ax.set_aspect('equal')
ax2.set_aspect('equal')
arr= getMixCut('rostro',mixArray[i],s=rosnr)
arr2=getMixCut('rostro2',mixArray[i],s=rosnr2)
cut= np.ma.masked_where(np.isnan(arr),arr)
cut2 = np.ma.masked_where(np.isnan(arr2),arr2)
cut2 = (np.flip(np.transpose(cut2),axis=1))
cut2= cut2[:,1:]
# ax1.imshow(cut[0],cmap=cmap1)
# ax1.imshow(cut[1],cmap=cmap2)
# ax1.imshow(cut[2],cmap=cmap3)
if axtype == 'rostro':
print(cut[:-1,:-1])
ax.pcolor(cut[:-1,:-1],cmap=myCmap)
ax2.pcolor(cut2[:-1,:-1],cmap=myCmap)
# rcut0 = ndimage.rotate(cut[0], 90)
# rcut1 = ndimage.rotate(cut[1], 90)
# rcut2 = ndimage.rotate(cut[2], 90)
# ax.imshow(rcut0,interpolation='nearest',cmap=cmap1)
# ax.imshow(rcut1,interpolation='nearest',cmap=cmap2)
# ax.imshow(rcut2,interpolation='nearest',cmap=cmap3)
if axtype == 'dorso':
print("DORSO")
rcut = ndimage.rotate(cut, -90)
ax.pcolor(cut,cmap=myCmap)
if save ==True:
fug.savefig(dataPath + '/allPictures/T%1.1f_DV%d_Mix.png' %(time,dorsnr) )
fag.savefig(dataPath + '/allPictures/T%1.1f_RC%d_%d_Mix.png' %(time,rosnr,rosnr2) )
def getMixCut(axis,mixArray_i,s=0):
print(s, mixArray_i.size)
if axis == 'dorso':
arrA = mixArray_i[s,:,:]
if axis == 'rostro':
arrA = mixArray_i[:,:,s]
if axis == 'rostro2':
arrA = mixArray_i[:,s,:]
print(arrA.shape)
return arrA
def test():
plt.close("all")
fig = plt.figure()
ax = fig.add_subplot(3,1,1)
ax3=fig.add_subplot(3,1,3)
ax2 = fig.add_subplot(3,1,2)
arr = np.random.rand(10,10)
arr2 = np.copy(arr)
arr[arr<=0.5] = np.nan
arr2[arr2>0.5] = np.nan
print(arr)
m = np.ma.masked_where(np.isnan(arr),arr)
m2 = np.ma.masked_where(np.isnan(arr2),arr2)
ax.pcolor(m,cmap =cmapP)
ax2.pcolor(m2,cmap=cmapO)
ax3.pcolor(m,cmap=cmapP)
ax3.pcolor(m2,cmap=cmapO)
#animation = FuncAnimation(fig, update, interval=700)
myt= 8000
dnr=10
rnr=5
rnr2=4
plotSlices(myt,dnr,rnr,rnr2,save=True,plotmethod='square')
plotSliceMix(StaticDataPath,myt,dnr,rnr,rnr2)
#(StaticDataPath, 4000.0, 10,5,4)
#test()
plt.show() | StarcoderdataPython |
11350378 | import numpy as np
import pandas as pd
import numpy.testing as npt
from ..viewers import topic_mapping
from ..cooking_machine.models.base_model import BaseModel
class dummy_model(BaseModel):
def __init__(self, matrix):
self.values = matrix
def get_phi(self, class_ids):
""" """
index = ['topic_'+str(num) for num in range(len(self.values))]
phi = pd.DataFrame(self.values, index=index)
return phi
def test_diagonal_answer_same():
""" """
test_m = np.array([[1, 0],
[2, 0],
[3, 0]])
answer = ([0, 1, 2], [0, 1, 2])
result = topic_mapping.compute_topic_mapping(test_m, test_m)
npt.assert_array_equal(result, answer)
def test_diagonal_answer_different():
""" """
test_left = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
test_right = np.array([
[0.5, 0.5, 0],
[0, 0.5, 0.5],
[0, 0, 1]])
answer = ([0, 1, 2], [0, 1, 2])
result = topic_mapping.compute_topic_mapping(test_left, test_right)
npt.assert_array_equal(result, answer)
def test_map_viewer_min():
""" """
model_one = dummy_model([
[1, 2, 3, 4, 5, 6],
[0, 0, 0, 0, 0, 0],
])
model_two = dummy_model([
[0, 0],
[1, 2],
])
maping = topic_mapping.TopicMapViewer(model_one, model_two).view()
right_maping = ([0, 1], [0, 1])
npt.assert_array_equal(maping, right_maping)
def test_map_viewer_max():
""" """
model_one = dummy_model([
[1, 2, 3, 4, 5, 6],
[0, 0, 0, 0, 0, 0],
])
model_two = dummy_model([
[0, 0],
[1, 2],
])
maping = topic_mapping.TopicMapViewer(model_one, model_two, mode='max').view()
right_maping = ([0, 1, 2, 3, 4, 5], [0, 1, 0, 1, 0, 1])
npt.assert_array_equal(maping, right_maping)
| StarcoderdataPython |
11219265 | <gh_stars>0
#!/usr/bin/python3
'''Module for a minecraft villager app in python3'''
#pylint: disable=E0611,W0611,W0201,W0640,C0301,C0200,W0613,R0201
from time import sleep
from functools import partial
from kivy.base import runTouchApp
from kivy.lang import Builder
from kivy.app import App
from kivy.config import Config
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.dropdown import DropDown
from kivy.properties import ObjectProperty
from modules.cjson import JsonHandler
class VillagesWidget(BoxLayout):
'''Widget to load start screen.'''
container = ObjectProperty(None)
class MainWidget(BoxLayout):
'''Widget to load main screen.'''
size_hint = (1, 1)
orientation = 'vertical'
padding = [20, 20, 20, 20]
spacing = 20
container = ObjectProperty(None)
class VillagerWidget(BoxLayout):
'''Widget to load villager edit screen.'''
container = ObjectProperty(None)
class ButtonGrid(GridLayout):
'''Grid of control buttons.'''
size_hint = [1, None]
cols = 3
padding = [20, 20, 20, 20]
spacing = [20, 20]
def __init__(self):
super(ButtonGrid, self).__init__()
add_villager_button = Button(text='Add Villager', size_hint=[0.25, 0.1], font_size=25, background_color=(0, 0.5, 1, 1), background_normal='src/white16x.png')
self.add_widget(add_villager_button)
add_villager_button.bind(on_release=lambda x: VTA.add_villager(villager_name_input.text))
rm_villager_button = Button(text='Remove Villager', size_hint=[0.25, 0.1], font_size=25, background_color=(0, 0.5, 1, 1), background_normal='src/white16x.png')
self.add_widget(rm_villager_button)
rm_villager_button.bind(on_release=lambda x: VTA.rm_villager(villager_name_input.text))
villager_name_input = TextInput(hint_text='Name..', hint_text_color=(1, 1, 1, 1), size_hint=[0.25, 0.1], font_size=35, background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1), multiline=False)
self.add_widget(villager_name_input)
villager_name_input.bind(on_text_validate=lambda x: VTA.add_villager(villager_name_input.text))
class VillagerGrid(GridLayout):
'''Grid for the villagers in the main menu.'''
cols = 1
padding = [5, 5, 5, 5]
spacing = [5, 5]
size_hint = (1, None)
def __init__(self):
super(VillagerGrid, self).__init__()
self.buttons = []
for i in range(len(VTA.villagers)):
self.buttons.append(Button(id=VTA.villagers[i], text=VTA.villagers[i], size_hint_y=None, height=80, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1)))
self.add_widget(self.buttons[i])
self.buttons[i].bind(on_release=partial(self.transmitter, i))
def transmitter(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.main(VTA.project, instance.text)
class TradingGrid(GridLayout):
'''Grid for the villagers in the main menu.'''
cols = 6
padding = [10, 10, 10, 10]
spacing = [10, 10]
size_hint = (None, None)
row_force_default = True
row_default_height = 50
def __init__(self):
super(TradingGrid, self).__init__()
self.amout_demands = []
self.demands = []
self.supplys = []
self.amout_supplys = []
self.remove_buttons = []
for i in range(len(VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'])):
self.amout_demands.append(TextInput(hint_text='Amount', text=VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'][i]['amount_demand'], hint_text_color=(1, 1, 1, 1), font_size=35, background_color=(0, 0.5, 1, 1), multiline=False, size_hint=(70, 100), size=(70, 100), font_color=(1, 0.98, 0, 1), border=(4, 4, 4, 4), foreground_color=(1, 1, 1, 1)))
self.add_widget(self.amout_demands[i])
self.amout_demands[i].bind(on_text_validate=partial(self.transmitter_amount_demand, i))
self.amout_demands[i].bind(focus=partial(self.transmitter2_amount_demand, i))
self.demands.append(TextInput(hint_text='Item', text=VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'][i]['demand'], hint_text_color=(1, 1, 1, 1), font_size=35, background_color=(0, 0.5, 1, 1), multiline=False, size_hint=(70, 100), size=(70, 100), font_color=(1, 0.98, 0, 1), border=(4, 4, 4, 4), foreground_color=(1, 1, 1, 1)))
self.add_widget(self.demands[i])
self.demands[i].bind(on_text_validate=partial(self.transmitter_demand, i))
self.demands[i].bind(focus=partial(self.transmitter2_demand, i))
self.add_widget(Label(text='-', font_size=35))
self.supplys.append(TextInput(hint_text='Item', text=VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'][i]['supply'], hint_text_color=(1, 1, 1, 1), font_size=35, background_color=(0, 0.5, 1, 1), multiline=False, size_hint=(70, 100), size=(70, 100), font_color=(1, 0.98, 0, 1), border=(4, 4, 4, 4), foreground_color=(1, 1, 1, 1)))
self.add_widget(self.supplys[i])
self.supplys[i].bind(on_text_validate=partial(self.transmitter_supply, i))
self.supplys[i].bind(focus=partial(self.transmitter2_supply, i))
self.amout_supplys.append(TextInput(hint_text='Amount', text=VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'][i]['amount_supply'], hint_text_color=(1, 1, 1, 1), font_size=35, background_color=(0, 0.5, 1, 1), multiline=False, size_hint=(70, 100), size=(70, 100), font_color=(1, 0.98, 0, 1), border=(4, 4, 4, 4), foreground_color=(1, 1, 1, 1)))
self.add_widget(self.amout_supplys[i])
self.amout_supplys[i].bind(on_text_validate=partial(self.transmitter_amount_supply, i))
self.amout_supplys[i].bind(focus=partial(self.transmitter2_amount_supply, i))
self.remove_buttons.append(Button(text='-', size_hint=(None, None), size=(40, 50), font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1)))
self.add_widget(self.remove_buttons[i])
self.remove_buttons[i].bind(on_release=partial(self.transmitter_remove, i))
def transmitter_amount_demand(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_demand_amount(i, instance.text)
def transmitter_demand(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_demand(i, instance.text)
def transmitter_supply(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_supply(i, instance.text)
def transmitter_amount_supply(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_supply_amount(i, instance.text)
def transmitter2_amount_demand(self, i, instance, istrue):
'''Shows the number of the button pressed.'''
if not istrue:
VTA.change_demand_amount(i, instance.text)
def transmitter2_demand(self, i, instance, istrue):
'''Shows the number of the button pressed.'''
if not istrue:
VTA.change_demand(i, instance.text)
def transmitter2_supply(self, i, instance, istrue):
'''Shows the number of the button pressed.'''
if not istrue:
VTA.change_supply(i, instance.text)
def transmitter2_amount_supply(self, i, instance, istrue):
'''Shows the number of the button pressed.'''
if not istrue:
VTA.change_supply_amount(i, instance.text)
def transmitter_remove(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.rm_trading(i)
class ProfessionDropDown(DropDown):
'''DropDown of all professions.'''
def __init__(self):
super(ProfessionDropDown, self).__init__()
self.buttons = []
for i in range(len(VTA.data['professions'])):
self.buttons.append(Button(id=VTA.data['professions'][i].capitalize(), text=VTA.data['professions'][i].capitalize(), size_hint_y=None, height=40, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1)))
self.add_widget(self.buttons[i])
self.buttons[i].bind(on_release=partial(self.transmitter, i))
def transmitter(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_profession(VTA.villager, VTA.data['professions'][i])
class CareerDropDown(DropDown):
'''DropDown of all careers.'''
def __init__(self):
super(CareerDropDown, self).__init__()
self.buttons = []
for i in range(len(VTA.data['careers'])):
self.buttons.append(Button(id=VTA.data['careers'][i].capitalize(), text=VTA.data['careers'][i].capitalize(), size_hint_y=None, height=40, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1)))
self.add_widget(self.buttons[i])
self.buttons[i].bind(on_release=partial(self.transmitter, i))
def transmitter(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_career(VTA.villager, VTA.data['careers'][i])
class VillageToolApp(App):
'''All functions of the app.'''
def build(self):
'''Loading start screen.'''
self.icon = 'src/minecraft32px.png'
self.project = str()
self.file = 'kv/village.kv'
self.data = JsonHandler.importer('data')
self.root = Builder.load_file(self.file)
Window.maximize()
####################
self.main('vale', None)
####################
def main(self, project_name, villager):
'''Loading main screen.'''
if project_name == '':
return
self.title = project_name.lower()
self.project = project_name.lower()
try:
self.village = JsonHandler.importer(self.project)
except FileNotFoundError:
JsonHandler.exporter(self.project, {self.project: {'name': self.project, 'villagers': {}}})
self.village = JsonHandler.importer(self.project)
self.villagers = list(self.village[self.project]['villagers'].keys())
if villager is None:
try:
villager = self.villagers[0]
except IndexError:
pass
Builder.unload_file(self.file)
self.root.clear_widgets()
'''self.file = 'kv/main.kv'
screen = Builder.load_file(self.file)
villager_grid = VillagerGrid()
villager_grid.bind(minimum_height=villager_grid.setter('height'))
layout = ScrollView(pos_hint={'center_x': .5, 'center_y': .5}, do_scroll_x=False)
layout.add_widget(villager_grid)
screen.add_widget(layout)
self.root.add_widget(screen)'''
screen = MainWidget()
topbox = BoxLayout(size_hint=(1, 1), orientation='horizontal', padding=20, spacing=20)
quickview = GridLayout(cols=1, padding=[5, 5, 5, 5], spacing=5, size_hint=(1, None))
if villager is not None:
quickview.add_widget(TextInput(text=villager, font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
quickview.add_widget(TextInput(text=self.village[self.project]['villagers'][villager]['profession'].capitalize(), font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
quickview.add_widget(TextInput(text=self.village[self.project]['villagers'][villager]['career'].capitalize(), font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
edit_button = Button(text='Edit', font_size=30, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), background_normal='src/white16x.png')
edit_button.bind(on_release=lambda x: self.load_villager(villager))
quickview.add_widget(edit_button)
else:
quickview.add_widget(TextInput(text='None', font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
quickview.add_widget(TextInput(text='None', font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
quickview.add_widget(TextInput(text='None', font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
topbox.add_widget(quickview)
villager_grid = VillagerGrid()
villager_grid.bind(minimum_height=villager_grid.setter('height'))
villager_scroll = ScrollView(pos_hint={'center_x': .5, 'center_y': .5}, do_scroll_x=False)
villager_scroll.add_widget(villager_grid)
topbox.add_widget(villager_scroll)
screen.add_widget(topbox)
button_grid = ButtonGrid()
screen.add_widget(button_grid)
self.root.add_widget(screen)
def add_villager(self, name):
'''Adding a villager to the village.'''
if name != '':
self.village[self.project]['villagers'][name] = dict()
self.village[self.project]['villagers'][name]['name'] = name
self.village[self.project]['villagers'][name]['profession'] = 'none'
self.village[self.project]['villagers'][name]['career'] = 'none'
self.village[self.project]['villagers'][name]['tradings'] = list()
JsonHandler.exporter(self.project, self.village)
self.main(self.project, None)
def rm_villager(self, name):
'''Adding a villager to the village.'''
try:
del self.village[self.project]['villagers'][name]
JsonHandler.exporter(self.project, self.village)
self.main(self.project, None)
except KeyError:
pass
def load_villager(self, name):
'''Loading the villager edit screen.'''
self.villager = name
Builder.unload_file(self.file)
self.root.clear_widgets()
self.file = 'kv/villager.kv'
screen = Builder.load_file(self.file)
layout = GridLayout(cols=1, padding=[20, 20, 20, 20], spacing=5, size_hint=(1, 1), pos=(150, 10), size=(self.root.width - 300, self.root.height - 20))
input_name = TextInput(text=name, multiline=False, size_hint_y=None, height=80, font_size=40, font_color=(1, 0.98, 0, 1), foreground_color=(1, 1, 1, 1), background_color=(0, 0.5, 1, 1))
input_name.bind(on_text_validate=lambda x: self.rename_villager(name, input_name.text))
layout.add_widget(input_name)
self.profession_dropdown = ProfessionDropDown()
profession_button = Button(text=self.village[self.project]['villagers'][name]['profession'].capitalize(), size_hint_y=None, height=50, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1))
profession_button.bind(on_release=self.profession_dropdown.open)
layout.add_widget(profession_button)
self.career_dropdown = CareerDropDown()
career_button = Button(text=self.village[self.project]['villagers'][name]['career'].capitalize(), size_hint_y=None, height=50, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1))
career_button.bind(on_release=self.career_dropdown.open)
layout.add_widget(career_button)
add_button = Button(text='+', size_hint=(None, None), size=(40, 40), font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1))
add_button.bind(on_release=lambda x: self.add_trading())
layout.add_widget(add_button)
trading_scroll = ScrollView(do_scroll_x=False) #TODO Repair scrollview
trading_grid = TradingGrid()
trading_grid.bind(minimum_height=layout.setter('height'))
trading_scroll.add_widget(trading_grid)
layout.add_widget(trading_scroll)
layout.add_widget(Button(text='Back', size_hint_y=None, height=50, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1), on_release=lambda x: self.main(self.project, None)))
screen.add_widget(layout)
self.root.add_widget(screen)
def rename_villager(self, legacy_name, new_name):
'''Renames a villager in the edit screen and reloads the screen.'''
if new_name != '' and new_name != legacy_name:
self.village[self.project]['villagers'][new_name] = dict()
self.village[self.project]['villagers'][new_name]['name'] = new_name
self.village[self.project]['villagers'][new_name]['profession'] = self.village[self.project]['villagers'][legacy_name]['profession']
self.village[self.project]['villagers'][new_name]['career'] = self.village[self.project]['villagers'][legacy_name]['career']
self.village[self.project]['villagers'][new_name]['supplys'] = self.village[self.project]['villagers'][legacy_name]['supplys']
self.village[self.project]['villagers'][new_name]['demands'] = self.village[self.project]['villagers'][legacy_name]['demands']
self.rm_villager(legacy_name)
self.load_villager(new_name)
def change_profession(self, name, profession):
'''Changes the profession of a villager.'''
self.village[self.project]['villagers'][name]['profession'] = profession
JsonHandler.exporter(self.project, self.village)
self.profession_dropdown.dismiss()
self.load_villager(name)
def change_career(self, name, career):
'''Changes the career of a villager.'''
self.village[self.project]['villagers'][name]['career'] = career
JsonHandler.exporter(self.project, self.village)
self.career_dropdown.dismiss()
self.load_villager(name)
def add_trading(self):
'''Adding trade to villager's trade list.'''
empty_trading = dict()
empty_trading['amount_demand'] = str()
empty_trading['amount_supply'] = str()
empty_trading['demand'] = str()
empty_trading['supply'] = str()
self.village[self.project]['villagers'][self.villager]['tradings'].append(empty_trading)
JsonHandler.exporter(self.project, self.village)
self.load_villager(self.villager)
def rm_trading(self, index):
'''Remove trade from villager's trade list.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'].remove(self.village[self.project]['villagers'][self.villager]['tradings'][index])
JsonHandler.exporter(self.project, self.village)
self.load_villager(self.villager)
except ValueError as e:
print(e)
def change_demand_amount(self, index, amount):
'''Change the amount of items for the demand.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'][index]['amount_demand'] = amount
JsonHandler.exporter(self.project, self.village)
except ValueError:
pass
def change_supply_amount(self, index, amount):
'''Change the amount of items for the supply.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'][index]['amount_supply'] = amount
JsonHandler.exporter(self.project, self.village)
except ValueError:
pass
def change_demand(self, index, item):
'''Change the items for the demand.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'][index]['demand'] = item
JsonHandler.exporter(self.project, self.village)
except ValueError:
pass
def change_supply(self, index, item):
'''Change the items for the supply.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'][index]['supply'] = item
JsonHandler.exporter(self.project, self.village)
except ValueError:
pass
if __name__ == '__main__':
VTA = VillageToolApp()
VTA.run()
| StarcoderdataPython |
12817270 | """
This module contains test cases for Privex's Python Helper's (privex-helpers).
Testing pre-requisites
----------------------
- Ensure you have any mandatory requirements installed (see setup.py's install_requires)
- You should install ``pytest`` to run the tests, it works much better than standard python unittest.
- You may wish to install any optional requirements listed in README.md for best results
- Python 3.7 is recommended at the time of writing this. See README.md in-case this has changed.
For the best testing experience, it's recommended to install the ``dev`` extra, which includes every optional
dependency, as well as development requirements such as ``pytest`` , ``coverage`` as well as requirements for
building the documentation.
Running via PyTest
------------------
To run the tests, we strongly recommend using the ``pytest`` tool (used by default for our Travis CI)::
# Install PyTest if you don't already have it.
user@host: ~/privex-helpers $ pip3 install pytest
# We recommend adding the option ``-rxXs`` which will show information about why certain tests were skipped
# as well as info on xpass / xfail tests
# You can add `-v` for more detailed output, just like when running the tests directly.
user@host: ~/privex-helpers $ pytest -rxXs
# NOTE: If you're using a virtualenv, sometimes you may encounter strange conflicts between a global install
# of PyTest, and the virtualenv PyTest, resulting in errors related to packages not being installed.
# A simple workaround is just to call pytest as a module from the python3 executable:
user@host: ~/privex-helpers $ python3 -m pytest -rxXs
============================== test session starts ==============================
platform darwin -- Python 3.7.0, pytest-5.2.2, py-1.8.0, pluggy-0.13.0
rootdir: /home/user/privex-helpers
collected 99 items
tests/test_bool.py ......... [ 9%]
tests/test_cache.py ................ [ 25%]
tests/test_crypto.py ......................... [ 50%]
tests/test_general.py ................... [ 69%]
tests/test_net.py ssss.s [ 75%]
tests/test_parse.py .......... [ 85%]
tests/test_rdns.py .............. [100%]
============================ short test summary info ============================
SKIPPED [1] tests/test_net.py:76: Requires package 'dnspython'
SKIPPED [1] tests/test_net.py:83: Requires package 'dnspython'
SKIPPED [1] tests/test_net.py:66: Requires package 'dnspython'
SKIPPED [1] tests/test_net.py:71: Requires package 'dnspython'
SKIPPED [1] /home/user/privex-helpers/tests/test_net.py:56: Skipping test TestGeneral.test_ping_v6 as platform is
not supported: "privex.helpers.net.ping is not fully supported on platform 'Darwin'..."
================== 94 passed, 5 skipped, 1 warnings in 21.66s ===================
Running individual test modules
-------------------------------
Some test modules such as ``test_cache`` can be quite slow, as sometimes it's required to call sleep, e.g. ``sleep(2)``
either to prevent interference from previous/following tests, or when testing that an expiration/timeout works.
Thankfully, PyTest allows you to run individual test modules like this::
user@host: ~/privex-helpers $ pytest -rxXs -v tests/test_parse.py
============================== test session starts ==============================
platform darwin -- Python 3.7.0, pytest-5.2.2, py-1.8.0, pluggy-0.13.0
cachedir: .pytest_cache
rootdir: /home/user/privex-helpers
plugins: cov-2.8.1
collected 10 items
tests/test_parse.py::TestParseHelpers::test_csv_single PASSED [ 10%]
tests/test_parse.py::TestParseHelpers::test_csv_spaced PASSED [ 20%]
tests/test_parse.py::TestParseHelpers::test_env_bool_false PASSED [ 30%]
tests/test_parse.py::TestParseHelpers::test_env_bool_true PASSED [ 40%]
tests/test_parse.py::TestParseHelpers::test_env_nonexist_bool PASSED [ 50%]
tests/test_parse.py::TestParseHelpers::test_kval_clean PASSED [ 60%]
tests/test_parse.py::TestParseHelpers::test_kval_custom_clean PASSED [ 70%]
tests/test_parse.py::TestParseHelpers::test_kval_custom_spaced PASSED [ 80%]
tests/test_parse.py::TestParseHelpers::test_kval_single PASSED [ 90%]
tests/test_parse.py::TestParseHelpers::test_kval_spaced PASSED [100%]
============================== 10 passed in 0.09s ===============================
Running directly using Python Unittest
--------------------------------------
Alternatively, you can run the tests by hand with ``python3.7`` ( or just ``python3`` ), however we strongly
recommend using PyTest as our tests use various PyTest functionality to allow for things such as skipping tests
when you don't have a certain dependency installed.
Running via python unittest ::
user@the-matrix ~/privex-helpers $ python3.7 -m tests
............................
----------------------------------------------------------------------
Ran 28 tests in 0.001s
OK
For more verbosity, simply add ``-v`` to the end of the command::
user@the-matrix ~/privex-helpers $ python3 -m tests -v
test_empty_combined (__main__.TestBoolHelpers) ... ok
test_isfalse_truthy (__main__.TestBoolHelpers) ... ok
test_v4_arpa_boundary_16bit (__main__.TestIPReverseDNS)
Test generating 16-bit v4 boundary ... ok
test_v4_arpa_boundary_24bit (__main__.TestIPReverseDNS)
Test generating 24-bit v4 boundary ... ok
test_kval_single (__main__.TestParseHelpers)
Test that a single value still returns a list ... ok
test_kval_spaced (__main__.TestParseHelpers)
Test key:val csv parsing with excess outer whitespace, and value whitespace ... ok
# Truncated excess output in this PyDoc example, as there are many more lines showing
# the results of each individual testcase, wasting space and adding bloat...
----------------------------------------------------------------------
Ran 28 tests in 0.001s
OK
**Copyright**::
Copyright 2019 Privex Inc. ( https://www.privex.io )
License: X11 / MIT Github: https://github.com/Privex/python-helpers
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Originally Developed by Privex Inc. |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| (+) Kale (@kryogenic) [Privex] |
| |
+===================================================+
Copyright 2019 Privex Inc. ( https://www.privex.io )
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import unittest
from privex.loghelper import LogHelper
from privex.helpers import env_bool
from tests.base import PrivexBaseCase, EmptyIter
if env_bool('DEBUG', False) is True:
LogHelper('privex.helpers', level=logging.DEBUG).add_console_handler(logging.DEBUG)
else:
LogHelper('privex.helpers', level=logging.CRITICAL) # Silence non-critical log messages
if __name__ == '__main__':
unittest.main()
from tests.test_cache import *
from tests.general import *
from tests.test_crypto import *
from tests.test_bool import TestBoolHelpers
from tests.test_rdns import TestIPReverseDNS
from tests.test_parse import TestParseHelpers
from tests.test_net import TestNet
from tests.test_collections import TestIsNamedTuple, TestDictableNamedtuple, TestDictObject
from tests.test_extras import TestAttrs
| StarcoderdataPython |
1902217 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from .views import index as home
urlpatterns = [
url(r'^$', home, name='index'),
url(r'^store/$', include('store.urls')),
url(r'^admin/', include(admin.site.urls)),
]
# settings for development environment DEBUG
from django.conf.urls.static import static
from django.conf import settings
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'^media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}), )
| StarcoderdataPython |
11222751 | <filename>CircuitPython_Made_Easy_On_CPX/cpx_slide_switch/code.py
# SPDX-FileCopyrightText: 2017 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
from adafruit_circuitplayground.express import cpx
while True:
print("Slide switch:", cpx.switch)
time.sleep(0.1)
| StarcoderdataPython |
9718669 | <filename>vitrage/common/utils.py
# -*- encoding: utf-8 -*-
# Copyright 2015 - Alcatel-Lucent
# Copyright © 2014-2015 eNovance
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import cProfile
def recursive_keypairs(d, separator='.'):
# taken from ceilometer and gnocchi
for name, value in sorted(d.items()):
if isinstance(value, dict):
for subname, subvalue in recursive_keypairs(value, separator):
yield ('%s%s%s' % (name, separator, subname), subvalue)
else:
yield name, value
def opt_exists(conf_parent, opt):
try:
return conf_parent[opt]
except cfg.NoSuchOptError:
return False
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats('cumulative')
return profiled_func
| StarcoderdataPython |
1806573 | <gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
from keystone.common import controller
from keystone import exception
from keystone import tests
from keystone.tests import test_v3
CONF = cfg.CONF
def _build_role_assignment_url_and_entity(
role_id, user_id=None, group_id=None, domain_id=None,
project_id=None, inherited_to_projects=False,
effective=False):
if user_id and domain_id:
url = ('/domains/%(domain_id)s/users/%(user_id)s'
'/roles/%(role_id)s' % {
'domain_id': domain_id,
'user_id': user_id,
'role_id': role_id})
entity = {'role': {'id': role_id},
'user': {'id': user_id},
'scope': {'domain': {'id': domain_id}}}
elif user_id and project_id:
url = ('/projects/%(project_id)s/users/%(user_id)s'
'/roles/%(role_id)s' % {
'project_id': project_id,
'user_id': user_id,
'role_id': role_id})
entity = {'role': {'id': role_id},
'user': {'id': user_id},
'scope': {'project': {'id': project_id}}}
if group_id and domain_id:
url = ('/domains/%(domain_id)s/groups/%(group_id)s'
'/roles/%(role_id)s' % {
'domain_id': domain_id,
'group_id': group_id,
'role_id': role_id})
entity = {'role': {'id': role_id},
'group': {'id': group_id},
'scope': {'domain': {'id': domain_id}}}
elif group_id and project_id:
url = ('/projects/%(project_id)s/groups/%(group_id)s'
'/roles/%(role_id)s' % {
'project_id': project_id,
'group_id': group_id,
'role_id': role_id})
entity = {'role': {'id': role_id},
'group': {'id': group_id},
'scope': {'project': {'id': project_id}}}
if inherited_to_projects:
url = '/OS-INHERIT%s/inherited_to_projects' % url
if not effective:
entity['OS-INHERIT:inherited_to'] = 'projects'
return (url, entity)
class AssignmentTestCase(test_v3.RestfulTestCase):
"""Test domains, projects, roles and role assignments."""
def setUp(self):
super(AssignmentTestCase, self).setUp()
self.group = self.new_group_ref(
domain_id=self.domain_id)
self.group = self.identity_api.create_group(self.group)
self.group_id = self.group['id']
self.credential_id = uuid.uuid4().hex
self.credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
self.credential['id'] = self.credential_id
self.credential_api.create_credential(
self.credential_id,
self.credential)
# Domain CRUD tests
def test_create_domain(self):
"""Call ``POST /domains``."""
ref = self.new_domain_ref()
r = self.post(
'/domains',
body={'domain': ref})
return self.assertValidDomainResponse(r, ref)
def test_create_domain_case_sensitivity(self):
"""Call `POST /domains`` twice with upper() and lower() cased name."""
ref = self.new_domain_ref()
# ensure the name is lowercase
ref['name'] = ref['name'].lower()
r = self.post(
'/domains',
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
# ensure the name is uppercase
ref['name'] = ref['name'].upper()
r = self.post(
'/domains',
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
def test_create_domain_400(self):
"""Call ``POST /domains``."""
self.post('/domains', body={'domain': {}}, expected_status=400)
def test_list_domains(self):
"""Call ``GET /domains``."""
resource_url = '/domains'
r = self.get(resource_url)
self.assertValidDomainListResponse(r, ref=self.domain,
resource_url=resource_url)
def test_get_domain(self):
"""Call ``GET /domains/{domain_id}``."""
r = self.get('/domains/%(domain_id)s' % {
'domain_id': self.domain_id})
self.assertValidDomainResponse(r, self.domain)
def test_update_domain(self):
"""Call ``PATCH /domains/{domain_id}``."""
ref = self.new_domain_ref()
del ref['id']
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
def test_disable_domain(self):
"""Call ``PATCH /domains/{domain_id}`` (set enabled=False)."""
# Create a 2nd set of entities in a 2nd domain
self.domain2 = self.new_domain_ref()
self.assignment_api.create_domain(self.domain2['id'], self.domain2)
self.project2 = self.new_project_ref(
domain_id=self.domain2['id'])
self.assignment_api.create_project(self.project2['id'], self.project2)
self.user2 = self.new_user_ref(
domain_id=self.domain2['id'],
project_id=self.project2['id'])
password = self.user2['password']
self.user2 = self.identity_api.create_user(self.user2)
self.user2['password'] = password
self.assignment_api.add_user_to_project(self.project2['id'],
self.user2['id'])
# First check a user in that domain can authenticate, via
# Both v2 and v3
body = {
'auth': {
'passwordCredentials': {
'userId': self.user2['id'],
'password': <PASSWORD>['password']
},
'tenantId': self.project2['id']
}
}
self.admin_request(path='/v2.0/tokens', method='POST', body=body)
auth_data = self.build_authentication_request(
user_id=self.user2['id'],
password=<PASSWORD>['password'],
project_id=self.project2['id'])
self.v3_authenticate_token(auth_data)
# Now disable the domain
self.domain2['enabled'] = False
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain2['id']},
body={'domain': {'enabled': False}})
self.assertValidDomainResponse(r, self.domain2)
# Make sure the user can no longer authenticate, via
# either API
body = {
'auth': {
'passwordCredentials': {
'userId': self.user2['id'],
'password': <PASSWORD>['password']
},
'tenantId': self.project2['id']
}
}
self.admin_request(
path='/v2.0/tokens', method='POST', body=body, expected_status=401)
# Try looking up in v3 by name and id
auth_data = self.build_authentication_request(
user_id=self.user2['id'],
password=<PASSWORD>['password'],
project_id=self.project2['id'])
self.v3_authenticate_token(auth_data, expected_status=401)
auth_data = self.build_authentication_request(
username=self.user2['name'],
user_domain_id=self.domain2['id'],
password=self.user2['password'],
project_id=self.project2['id'])
self.v3_authenticate_token(auth_data, expected_status=401)
def test_delete_enabled_domain_fails(self):
"""Call ``DELETE /domains/{domain_id}`` (when domain enabled)."""
# Try deleting an enabled domain, which should fail
self.delete('/domains/%(domain_id)s' % {
'domain_id': self.domain['id']},
expected_status=exception.ForbiddenAction.code)
def test_delete_domain(self):
"""Call ``DELETE /domains/{domain_id}``.
The sample data set up already has a user, group, project
and credential that is part of self.domain. Since the user
we will authenticate with is in this domain, we create a
another set of entities in a second domain. Deleting this
second domain should delete all these new entities. In addition,
all the entities in the regular self.domain should be unaffected
by the delete.
Test Plan:
- Create domain2 and a 2nd set of entities
- Disable domain2
- Delete domain2
- Check entities in domain2 have been deleted
- Check entities in self.domain are unaffected
"""
# Create a 2nd set of entities in a 2nd domain
self.domain2 = self.new_domain_ref()
self.assignment_api.create_domain(self.domain2['id'], self.domain2)
self.project2 = self.new_project_ref(
domain_id=self.domain2['id'])
self.assignment_api.create_project(self.project2['id'], self.project2)
self.user2 = self.new_user_ref(
domain_id=self.domain2['id'],
project_id=self.project2['id'])
self.user2 = self.identity_api.create_user(self.user2)
self.group2 = self.new_group_ref(
domain_id=self.domain2['id'])
self.group2 = self.identity_api.create_group(self.group2)
self.credential2 = self.new_credential_ref(
user_id=self.user2['id'],
project_id=self.project2['id'])
self.credential_api.create_credential(
self.credential2['id'],
self.credential2)
# Now disable the new domain and delete it
self.domain2['enabled'] = False
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain2['id']},
body={'domain': {'enabled': False}})
self.assertValidDomainResponse(r, self.domain2)
self.delete('/domains/%(domain_id)s' % {
'domain_id': self.domain2['id']})
# Check all the domain2 relevant entities are gone
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
self.domain2['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
self.project2['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group,
self.group2['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
self.user2['id'])
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
self.credential2['id'])
# ...and that all self.domain entities are still here
r = self.assignment_api.get_domain(self.domain['id'])
self.assertDictEqual(r, self.domain)
r = self.assignment_api.get_project(self.project['id'])
self.assertDictEqual(r, self.project)
r = self.identity_api.get_group(self.group['id'])
self.assertDictEqual(r, self.group)
r = self.identity_api.get_user(self.user['id'])
self.user.pop('password')
self.assertDictEqual(r, self.user)
r = self.credential_api.get_credential(self.credential['id'])
self.assertDictEqual(r, self.credential)
def test_delete_default_domain_fails(self):
# Attempting to delete the default domain results in 403 Forbidden.
# Need to disable it first.
self.patch('/domains/%(domain_id)s' % {
'domain_id': CONF.identity.default_domain_id},
body={'domain': {'enabled': False}})
self.delete('/domains/%(domain_id)s' % {
'domain_id': CONF.identity.default_domain_id},
expected_status=exception.ForbiddenAction.code)
def test_delete_new_default_domain_fails(self):
# If change the default domain ID, deleting the new default domain
# results in a 403 Forbidden.
# Create a new domain that's not the default
new_domain = self.new_domain_ref()
new_domain_id = new_domain['id']
self.assignment_api.create_domain(new_domain_id, new_domain)
# Disable the new domain so can delete it later.
self.patch('/domains/%(domain_id)s' % {
'domain_id': new_domain_id},
body={'domain': {'enabled': False}})
# Change the default domain
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
# Attempt to delete the new domain
self.delete('/domains/%(domain_id)s' % {'domain_id': new_domain_id},
expected_status=exception.ForbiddenAction.code)
def test_delete_old_default_domain(self):
# If change the default domain ID, deleting the old default domain
# works.
# Create a new domain that's not the default
new_domain = self.new_domain_ref()
new_domain_id = new_domain['id']
self.assignment_api.create_domain(new_domain_id, new_domain)
old_default_domain_id = CONF.identity.default_domain_id
# Disable the default domain so we can delete it later.
self.patch('/domains/%(domain_id)s' % {
'domain_id': old_default_domain_id},
body={'domain': {'enabled': False}})
# Change the default domain
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
# Delete the old default domain
self.delete(
'/domains/%(domain_id)s' % {'domain_id': old_default_domain_id})
def test_token_revoked_once_domain_disabled(self):
"""Test token from a disabled domain has been invalidated.
Test that a token that was valid for an enabled domain
becomes invalid once that domain is disabled.
"""
self.domain = self.new_domain_ref()
self.assignment_api.create_domain(self.domain['id'], self.domain)
self.user2 = self.new_user_ref(domain_id=self.domain['id'])
password = <PASSWORD>['password']
self.user2 = self.identity_api.create_user(self.user2)
self.user2['password'] = password
# build a request body
auth_body = self.build_authentication_request(
user_id=self.user2['id'],
password=<PASSWORD>['password'])
# sends a request for the user's token
token_resp = self.post('/auth/tokens', body=auth_body)
subject_token = token_resp.headers.get('x-subject-token')
# validates the returned token and it should be valid.
self.head('/auth/tokens',
headers={'x-subject-token': subject_token},
expected_status=200)
# now disable the domain
self.domain['enabled'] = False
url = "/domains/%(domain_id)s" % {'domain_id': self.domain['id']}
self.patch(url,
body={'domain': {'enabled': False}},
expected_status=200)
# validates the same token again and it should be 'not found'
# as the domain has already been disabled.
self.head('/auth/tokens',
headers={'x-subject-token': subject_token},
expected_status=404)
def test_delete_domain_hierarchy(self):
"""Call ``DELETE /domains/{domain_id}``."""
domain = self.new_domain_ref()
self.assignment_api.create_domain(domain['id'], domain)
root_project = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(root_project['id'], root_project)
leaf_project = self.new_project_ref(
domain_id=domain['id'],
parent_id=root_project['id'])
self.assignment_api.create_project(leaf_project['id'], leaf_project)
# Need to disable it first.
self.patch('/domains/%(domain_id)s' % {
'domain_id': domain['id']},
body={'domain': {'enabled': False}})
self.delete(
'/domains/%(domain_id)s' % {
'domain_id': domain['id']})
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
root_project['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
leaf_project['id'])
# Project CRUD tests
def test_list_projects(self):
"""Call ``GET /projects``."""
resource_url = '/projects'
r = self.get(resource_url)
self.assertValidProjectListResponse(r, ref=self.project,
resource_url=resource_url)
def test_create_project(self):
"""Call ``POST /projects``."""
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post(
'/projects',
body={'project': ref})
self.assertValidProjectResponse(r, ref)
def test_create_project_400(self):
"""Call ``POST /projects``."""
self.post('/projects', body={'project': {}}, expected_status=400)
def _create_projects_hierarchy(self, hierarchy_size=1):
"""Creates a project hierarchy with specified size.
:param hierarchy_size: the desired hierarchy size, default is 1 -
a project with one child.
:returns projects: a list of the projects in the created hierarchy.
"""
resp = self.get(
'/projects/%(project_id)s' % {
'project_id': self.project_id})
projects = [resp.result]
for i in range(hierarchy_size):
new_ref = self.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[i]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
return projects
def test_create_hierarchical_project(self):
"""Call ``POST /projects``."""
self._create_projects_hierarchy()
def test_get_project(self):
"""Call ``GET /projects/{project_id}``."""
r = self.get(
'/projects/%(project_id)s' % {
'project_id': self.project_id})
self.assertValidProjectResponse(r, self.project)
def test_get_project_with_parents_list(self):
"""Call ``GET /projects/{project_id}?parents_as_list``."""
projects = self._create_projects_hierarchy(hierarchy_size=2)
r = self.get(
'/projects/%(project_id)s?parents_as_list' % {
'project_id': projects[1]['project']['id']})
self.assertEqual(1, len(r.result['project']['parents']))
self.assertValidProjectResponse(r, projects[1]['project'])
self.assertIn(projects[0], r.result['project']['parents'])
self.assertNotIn(projects[2], r.result['project']['parents'])
def test_get_project_with_subtree_list(self):
"""Call ``GET /projects/{project_id}?subtree_as_list``."""
projects = self._create_projects_hierarchy(hierarchy_size=2)
r = self.get(
'/projects/%(project_id)s?subtree_as_list' % {
'project_id': projects[1]['project']['id']})
self.assertEqual(1, len(r.result['project']['subtree']))
self.assertValidProjectResponse(r, projects[1]['project'])
self.assertNotIn(projects[0], r.result['project']['subtree'])
self.assertIn(projects[2], r.result['project']['subtree'])
def test_update_project(self):
"""Call ``PATCH /projects/{project_id}``."""
ref = self.new_project_ref(domain_id=self.domain_id)
del ref['id']
r = self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref})
self.assertValidProjectResponse(r, ref)
def test_update_project_domain_id(self):
"""Call ``PATCH /projects/{project_id}`` with domain_id."""
project = self.new_project_ref(domain_id=self.domain['id'])
self.assignment_api.create_project(project['id'], project)
project['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/projects/%(project_id)s' % {
'project_id': project['id']},
body={'project': project},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
project['domain_id'] = self.domain['id']
r = self.patch('/projects/%(project_id)s' % {
'project_id': project['id']},
body={'project': project})
self.assertValidProjectResponse(r, project)
def test_update_project_parent_id(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
leaf_project = projects[1]['project']
leaf_project['parent_id'] = None
self.patch(
'/projects/%(project_id)s' % {
'project_id': leaf_project['id']},
body={'project': leaf_project},
expected_status=403)
def test_disable_leaf_project(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
leaf_project = projects[1]['project']
leaf_project['enabled'] = False
r = self.patch(
'/projects/%(project_id)s' % {
'project_id': leaf_project['id']},
body={'project': leaf_project})
self.assertEqual(
leaf_project['enabled'], r.result['project']['enabled'])
def test_disable_not_leaf_project(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
root_project = projects[0]['project']
root_project['enabled'] = False
self.patch(
'/projects/%(project_id)s' % {
'project_id': root_project['id']},
body={'project': root_project},
expected_status=403)
def test_delete_project(self):
"""Call ``DELETE /projects/{project_id}``
As well as making sure the delete succeeds, we ensure
that any credentials that reference this projects are
also deleted, while other credentials are unaffected.
"""
# First check the credential for this project is present
r = self.credential_api.get_credential(self.credential['id'])
self.assertDictEqual(r, self.credential)
# Create a second credential with a different project
self.project2 = self.new_project_ref(
domain_id=self.domain['id'])
self.assignment_api.create_project(self.project2['id'], self.project2)
self.credential2 = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project2['id'])
self.credential_api.create_credential(
self.credential2['id'],
self.credential2)
# Now delete the project
self.delete(
'/projects/%(project_id)s' % {
'project_id': self.project_id})
# Deleting the project should have deleted any credentials
# that reference this project
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=self.credential['id'])
# But the credential for project2 is unaffected
r = self.credential_api.get_credential(self.credential2['id'])
self.assertDictEqual(r, self.credential2)
def test_delete_not_leaf_project(self):
"""Call ``DELETE /projects/{project_id}``."""
self._create_projects_hierarchy()
self.delete(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
expected_status=403)
# Role CRUD tests
def test_create_role(self):
"""Call ``POST /roles``."""
ref = self.new_role_ref()
r = self.post(
'/roles',
body={'role': ref})
return self.assertValidRoleResponse(r, ref)
def test_create_role_400(self):
"""Call ``POST /roles``."""
self.post('/roles', body={'role': {}}, expected_status=400)
def test_list_roles(self):
"""Call ``GET /roles``."""
resource_url = '/roles'
r = self.get(resource_url)
self.assertValidRoleListResponse(r, ref=self.role,
resource_url=resource_url)
def test_get_role(self):
"""Call ``GET /roles/{role_id}``."""
r = self.get('/roles/%(role_id)s' % {
'role_id': self.role_id})
self.assertValidRoleResponse(r, self.role)
def test_update_role(self):
"""Call ``PATCH /roles/{role_id}``."""
ref = self.new_role_ref()
del ref['id']
r = self.patch('/roles/%(role_id)s' % {
'role_id': self.role_id},
body={'role': ref})
self.assertValidRoleResponse(r, ref)
def test_delete_role(self):
"""Call ``DELETE /roles/{role_id}``."""
self.delete('/roles/%(role_id)s' % {
'role_id': self.role_id})
# Role Grants tests
def test_crud_user_project_role_grants(self):
collection_url = (
'/projects/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.project['id'],
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=self.role,
resource_url=collection_url)
# FIXME(gyee): this test is no longer valid as user
# have no role in the project. Can't get a scoped token
# self.delete(member_url)
# r = self.get(collection_url)
# self.assertValidRoleListResponse(r, expected_length=0)
# self.assertIn(collection_url, r.result['links']['self'])
def test_crud_user_project_role_grants_no_user(self):
"""Grant role on a project to a user that doesn't exist, 404 result.
When grant a role on a project to a user that doesn't exist, the server
returns 404 Not Found for the user.
"""
user_id = uuid.uuid4().hex
collection_url = (
'/projects/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.project['id'], 'user_id': user_id})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url, expected_status=404)
def test_crud_user_domain_role_grants(self):
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domain_id,
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=self.role,
resource_url=collection_url)
self.delete(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, expected_length=0,
resource_url=collection_url)
def test_crud_user_domain_role_grants_no_user(self):
"""Grant role on a domain to a user that doesn't exist, 404 result.
When grant a role on a domain to a user that doesn't exist, the server
returns 404 Not Found for the user.
"""
user_id = uuid.uuid4().hex
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domain_id, 'user_id': user_id})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url, expected_status=404)
def test_crud_group_project_role_grants(self):
collection_url = (
'/projects/%(project_id)s/groups/%(group_id)s/roles' % {
'project_id': self.project_id,
'group_id': self.group_id})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=self.role,
resource_url=collection_url)
self.delete(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, expected_length=0,
resource_url=collection_url)
def test_crud_group_project_role_grants_no_group(self):
"""Grant role on a project to a group that doesn't exist, 404 result.
When grant a role on a project to a group that doesn't exist, the
server returns 404 Not Found for the group.
"""
group_id = uuid.uuid4().hex
collection_url = (
'/projects/%(project_id)s/groups/%(group_id)s/roles' % {
'project_id': self.project_id,
'group_id': group_id})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url, expected_status=404)
def test_crud_group_domain_role_grants(self):
collection_url = (
'/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
'domain_id': self.domain_id,
'group_id': self.group_id})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=self.role,
resource_url=collection_url)
self.delete(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, expected_length=0,
resource_url=collection_url)
def test_crud_group_domain_role_grants_no_group(self):
"""Grant role on a domain to a group that doesn't exist, 404 result.
When grant a role on a domain to a group that doesn't exist, the server
returns 404 Not Found for the group.
"""
group_id = uuid.uuid4().hex
collection_url = (
'/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
'domain_id': self.domain_id,
'group_id': group_id})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url, expected_status=404)
def _create_new_user_and_assign_role_on_project(self):
"""Create a new user and assign user a role on a project."""
# Create a new user
new_user = self.new_user_ref(domain_id=self.domain_id)
user_ref = self.identity_api.create_user(new_user)
# Assign the user a role on the project
collection_url = (
'/projects/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.project_id,
'user_id': user_ref['id']})
member_url = ('%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id})
self.put(member_url, expected_status=204)
# Check the user has the role assigned
self.head(member_url, expected_status=204)
return member_url, user_ref
def test_delete_user_before_removing_role_assignment_succeeds(self):
"""Call ``DELETE`` on the user before the role assignment."""
member_url, user = self._create_new_user_and_assign_role_on_project()
# Delete the user from identity backend
self.identity_api.driver.delete_user(user['id'])
# Clean up the role assignment
self.delete(member_url, expected_status=204)
# Make sure the role is gone
self.head(member_url, expected_status=404)
def test_delete_user_and_check_role_assignment_fails(self):
"""Call ``DELETE`` on the user and check the role assignment."""
member_url, user = self._create_new_user_and_assign_role_on_project()
# Delete the user from identity backend
self.identity_api.driver.delete_user(user['id'])
# We should get a 404 when looking for the user in the identity
# backend because we're not performing a delete operation on the role.
self.head(member_url, expected_status=404)
def test_token_revoked_once_group_role_grant_revoked(self):
"""Test token is revoked when group role grant is revoked
When a role granted to a group is revoked for a given scope,
all tokens related to this scope and belonging to one of the members
of this group should be revoked.
The revocation should be independently to the presence
of the revoke API.
"""
# If enabled, the revoke API will revoke tokens first.
# This ensures that tokens are revoked even without revoke API.
self.assignment_api.revoke_api = None
# creates grant from group on project.
self.assignment_api.create_grant(role_id=self.role['id'],
project_id=self.project['id'],
group_id=self.group['id'])
# adds user to the group.
self.identity_api.add_user_to_group(user_id=self.user['id'],
group_id=self.group['id'])
# creates a token for the user
auth_body = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=self.project['id'])
token_resp = self.post('/auth/tokens', body=auth_body)
token = token_resp.headers.get('x-subject-token')
# validates the returned token; it should be valid.
self.head('/auth/tokens',
headers={'x-subject-token': token},
expected_status=200)
# revokes the grant from group on project.
self.assignment_api.delete_grant(role_id=self.role['id'],
project_id=self.project['id'],
group_id=self.group['id'])
# validates the same token again; it should not longer be valid.
self.head('/auth/tokens',
headers={'x-subject-token': token},
expected_status=404)
# Role Assignments tests
def test_get_role_assignments(self):
"""Call ``GET /role_assignments``.
The sample data set up already has a user, group and project
that is part of self.domain. We use these plus a new user
we create as our data set, making sure we ignore any
role assignments that are already in existence.
Since we don't yet support a first class entity for role
assignments, we are only testing the LIST API. To create
and delete the role assignments we use the old grant APIs.
Test Plan:
- Create extra user for tests
- Get a list of all existing role assignments
- Add a new assignment for each of the four combinations, i.e.
group+domain, user+domain, group+project, user+project, using
the same role each time
- Get a new list of all role assignments, checking these four new
ones have been added
- Then delete the four we added
- Get a new list of all role assignments, checking the four have
been removed
"""
# Since the default fixtures already assign some roles to the
# user it creates, we also need a new user that will not have any
# existing assignments
self.user1 = self.new_user_ref(
domain_id=self.domain['id'])
self.user1 = self.identity_api.create_user(self.user1)
collection_url = '/role_assignments'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
resource_url=collection_url)
existing_assignments = len(r.result.get('role_assignments'))
# Now add one of each of the four types of assignment, making sure
# that we get them all back.
gd_url, gd_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, group_id=self.group_id,
role_id=self.role_id)
self.put(gd_url)
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 1,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, gd_entity, link_url=gd_url)
ud_url, ud_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, user_id=self.user1['id'],
role_id=self.role_id)
self.put(ud_url)
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 2,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, ud_entity, link_url=ud_url)
gp_url, gp_entity = _build_role_assignment_url_and_entity(
project_id=self.project_id, group_id=self.group_id,
role_id=self.role_id)
self.put(gp_url)
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 3,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, gp_entity, link_url=gp_url)
up_url, up_entity = _build_role_assignment_url_and_entity(
project_id=self.project_id, user_id=self.user1['id'],
role_id=self.role_id)
self.put(up_url)
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 4,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=up_url)
# Now delete the four we added and make sure they are removed
# from the collection.
self.delete(gd_url)
self.delete(ud_url)
self.delete(gp_url)
self.delete(up_url)
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments,
resource_url=collection_url)
self.assertRoleAssignmentNotInListResponse(r, gd_entity)
self.assertRoleAssignmentNotInListResponse(r, ud_entity)
self.assertRoleAssignmentNotInListResponse(r, gp_entity)
self.assertRoleAssignmentNotInListResponse(r, up_entity)
def test_get_effective_role_assignments(self):
"""Call ``GET /role_assignments?effective``.
Test Plan:
- Create two extra user for tests
- Add these users to a group
- Add a role assignment for the group on a domain
- Get a list of all role assignments, checking one has been added
- Then get a list of all effective role assignments - the group
assignment should have turned into assignments on the domain
for each of the group members.
"""
self.user1 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user1['password']
self.user1 = self.identity_api.create_user(self.user1)
self.user1['password'] = password
self.user2 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user2['password']
self.user2 = self.identity_api.create_user(self.user2)
self.user2['password'] = password
self.identity_api.add_user_to_group(self.user1['id'], self.group['id'])
self.identity_api.add_user_to_group(self.user2['id'], self.group['id'])
collection_url = '/role_assignments'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
resource_url=collection_url)
existing_assignments = len(r.result.get('role_assignments'))
gd_url, gd_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, group_id=self.group_id,
role_id=self.role_id)
self.put(gd_url)
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 1,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, gd_entity, link_url=gd_url)
# Now re-read the collection asking for effective roles - this
# should mean the group assignment is translated into the two
# member user assignments
collection_url = '/role_assignments?effective'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 2,
resource_url=collection_url)
unused, ud_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, user_id=self.user1['id'],
role_id=self.role_id)
gd_url, unused = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, group_id=self.group['id'],
role_id=self.role_id)
self.assertRoleAssignmentInListResponse(r, ud_entity, link_url=gd_url)
ud_url, ud_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, user_id=self.user2['id'],
role_id=self.role_id)
self.assertRoleAssignmentInListResponse(r, ud_entity, link_url=gd_url)
def test_check_effective_values_for_role_assignments(self):
"""Call ``GET /role_assignments?effective=value``.
Check the various ways of specifying the 'effective'
query parameter. If the 'effective' query parameter
is included then this should always be treated as meaning 'True'
unless it is specified as:
{url}?effective=0
This is by design to match the agreed way of handling
policy checking on query/filter parameters.
Test Plan:
- Create two extra user for tests
- Add these users to a group
- Add a role assignment for the group on a domain
- Get a list of all role assignments, checking one has been added
- Then issue various request with different ways of defining
the 'effective' query parameter. As we have tested the
correctness of the data coming back when we get effective roles
in other tests, here we just use the count of entities to
know if we are getting effective roles or not
"""
self.user1 = self.new_user_ref(
domain_id=self.domain['id'])
password = <PASSWORD>['password']
self.user1 = self.identity_api.create_user(self.user1)
self.user1['password'] = password
self.user2 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user2['password']
self.user2 = self.identity_api.create_user(self.user2)
self.user2['password'] = password
self.identity_api.add_user_to_group(self.user1['id'], self.group['id'])
self.identity_api.add_user_to_group(self.user2['id'], self.group['id'])
collection_url = '/role_assignments'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
resource_url=collection_url)
existing_assignments = len(r.result.get('role_assignments'))
gd_url, gd_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, group_id=self.group_id,
role_id=self.role_id)
self.put(gd_url)
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 1,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, gd_entity, link_url=gd_url)
# Now re-read the collection asking for effective roles,
# using the most common way of defining "effective'. This
# should mean the group assignment is translated into the two
# member user assignments
collection_url = '/role_assignments?effective'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 2,
resource_url=collection_url)
# Now set 'effective' to false explicitly - should get
# back the regular roles
collection_url = '/role_assignments?effective=0'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 1,
resource_url=collection_url)
# Now try setting 'effective' to 'False' explicitly- this is
# NOT supported as a way of setting a query or filter
# parameter to false by design. Hence we should get back
# effective roles.
collection_url = '/role_assignments?effective=False'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 2,
resource_url=collection_url)
# Now set 'effective' to True explicitly
collection_url = '/role_assignments?effective=True'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(
r,
expected_length=existing_assignments + 2,
resource_url=collection_url)
def test_filtered_role_assignments(self):
"""Call ``GET /role_assignments?filters``.
Test Plan:
- Create extra users, group, role and project for tests
- Make the following assignments:
Give group1, role1 on project1 and domain
Give user1, role2 on project1 and domain
Make User1 a member of Group1
- Test a series of single filter list calls, checking that
the correct results are obtained
- Test a multi-filtered list call
- Test listing all effective roles for a given user
- Test the equivalent of the list of roles in a project scoped
token (all effective roles for a user on a project)
"""
# Since the default fixtures already assign some roles to the
# user it creates, we also need a new user that will not have any
# existing assignments
self.user1 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user1['password']
self.user1 = self.identity_api.create_user(self.user1)
self.user1['password'] = password
self.user2 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user2['password']
self.user2 = self.identity_api.create_user(self.user2)
self.user2['password'] = password
self.group1 = self.new_group_ref(
domain_id=self.domain['id'])
self.group1 = self.identity_api.create_group(self.group1)
self.identity_api.add_user_to_group(self.user1['id'],
self.group1['id'])
self.identity_api.add_user_to_group(self.user2['id'],
self.group1['id'])
self.project1 = self.new_project_ref(
domain_id=self.domain['id'])
self.assignment_api.create_project(self.project1['id'], self.project1)
self.role1 = self.new_role_ref()
self.assignment_api.create_role(self.role1['id'], self.role1)
self.role2 = self.new_role_ref()
self.assignment_api.create_role(self.role2['id'], self.role2)
# Now add one of each of the four types of assignment
gd_url, gd_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, group_id=self.group1['id'],
role_id=self.role1['id'])
self.put(gd_url)
ud_url, ud_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, user_id=self.user1['id'],
role_id=self.role2['id'])
self.put(ud_url)
gp_url, gp_entity = _build_role_assignment_url_and_entity(
project_id=self.project1['id'], group_id=self.group1['id'],
role_id=self.role1['id'])
self.put(gp_url)
up_url, up_entity = _build_role_assignment_url_and_entity(
project_id=self.project1['id'], user_id=self.user1['id'],
role_id=self.role2['id'])
self.put(up_url)
# Now list by various filters to make sure we get back the right ones
collection_url = ('/role_assignments?scope.project.id=%s' %
self.project1['id'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=up_url)
self.assertRoleAssignmentInListResponse(r, gp_entity, link_url=gp_url)
collection_url = ('/role_assignments?scope.domain.id=%s' %
self.domain['id'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, ud_entity, link_url=ud_url)
self.assertRoleAssignmentInListResponse(r, gd_entity, link_url=gd_url)
collection_url = '/role_assignments?user.id=%s' % self.user1['id']
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=up_url)
self.assertRoleAssignmentInListResponse(r, ud_entity, link_url=ud_url)
collection_url = '/role_assignments?group.id=%s' % self.group1['id']
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, gd_entity, link_url=gd_url)
self.assertRoleAssignmentInListResponse(r, gp_entity, link_url=gp_url)
collection_url = '/role_assignments?role.id=%s' % self.role1['id']
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, gd_entity, link_url=gd_url)
self.assertRoleAssignmentInListResponse(r, gp_entity, link_url=gp_url)
# Let's try combining two filers together....
collection_url = (
'/role_assignments?user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
'user_id': self.user1['id'],
'project_id': self.project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=1,
resource_url=collection_url)
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=up_url)
# Now for a harder one - filter for user with effective
# roles - this should return role assignment that were directly
# assigned as well as by virtue of group membership
collection_url = ('/role_assignments?effective&user.id=%s' %
self.user1['id'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=4,
resource_url=collection_url)
# Should have the two direct roles...
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=up_url)
self.assertRoleAssignmentInListResponse(r, ud_entity, link_url=ud_url)
# ...and the two via group membership...
unused, up1_entity = _build_role_assignment_url_and_entity(
project_id=self.project1['id'], user_id=self.user1['id'],
role_id=self.role1['id'])
unused, ud1_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, user_id=self.user1['id'],
role_id=self.role1['id'])
gp1_url, unused = _build_role_assignment_url_and_entity(
project_id=self.project1['id'], group_id=self.group1['id'],
role_id=self.role1['id'])
gd1_url, unused = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, group_id=self.group1['id'],
role_id=self.role1['id'])
self.assertRoleAssignmentInListResponse(r, up1_entity,
link_url=gp1_url)
self.assertRoleAssignmentInListResponse(r, ud1_entity,
link_url=gd1_url)
# ...and for the grand-daddy of them all, simulate the request
# that would generate the list of effective roles in a project
# scoped token.
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
'user_id': self.user1['id'],
'project_id': self.project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
# Should have one direct role and one from group membership...
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=up_url)
self.assertRoleAssignmentInListResponse(r, up1_entity,
link_url=gp1_url)
class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
"""Test inheritance crud and its effects."""
def config_overrides(self):
super(AssignmentInheritanceTestCase, self).config_overrides()
self.config_fixture.config(group='os_inherit', enabled=True)
def test_get_token_from_inherited_user_domain_role_grants(self):
# Create a new user to ensure that no grant is loaded from sample data
user = self.new_user_ref(domain_id=self.domain_id)
password = user['password']
user = self.identity_api.create_user(user)
user['password'] = password
# Define domain and project authentication data
domain_auth_data = self.build_authentication_request(
user_id=user['id'],
password=<PASSWORD>['password'],
domain_id=self.domain_id)
project_auth_data = self.build_authentication_request(
user_id=user['id'],
password=<PASSWORD>['password'],
project_id=self.project_id)
# Check the user cannot get a domain nor a project token
self.v3_authenticate_token(domain_auth_data, expected_status=401)
self.v3_authenticate_token(project_auth_data, expected_status=401)
# Grant non-inherited role for user on domain
non_inher_ud_url, non_inher_ud_entity = (
_build_role_assignment_url_and_entity(domain_id=self.domain_id,
user_id=user['id'],
role_id=self.role_id))
self.put(non_inher_ud_url)
# Check the user can get only a domain token
self.v3_authenticate_token(domain_auth_data)
self.v3_authenticate_token(project_auth_data, expected_status=401)
# Create inherited role
inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
self.assignment_api.create_role(inherited_role['id'], inherited_role)
# Grant inherited role for user on domain
inher_ud_url, inher_ud_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, user_id=user['id'],
role_id=inherited_role['id'], inherited_to_projects=True)
self.put(inher_ud_url)
# Check the user can get both a domain and a project token
self.v3_authenticate_token(domain_auth_data)
self.v3_authenticate_token(project_auth_data)
# Delete inherited grant
self.delete(inher_ud_url)
# Check the user can only get a domain token
self.v3_authenticate_token(domain_auth_data)
self.v3_authenticate_token(project_auth_data, expected_status=401)
# Delete non-inherited grant
self.delete(non_inher_ud_url)
# Check the user cannot get a domain token anymore
self.v3_authenticate_token(domain_auth_data, expected_status=401)
def test_get_token_from_inherited_group_domain_role_grants(self):
# Create a new group and put a new user in it to
# ensure that no grant is loaded from sample data
user = self.new_user_ref(domain_id=self.domain_id)
password = <PASSWORD>['password']
user = self.identity_api.create_user(user)
user['password'] = password
group = self.new_group_ref(domain_id=self.domain['id'])
group = self.identity_api.create_group(group)
self.identity_api.add_user_to_group(user['id'], group['id'])
# Define domain and project authentication data
domain_auth_data = self.build_authentication_request(
user_id=user['id'],
password=<PASSWORD>['password'],
domain_id=self.domain_id)
project_auth_data = self.build_authentication_request(
user_id=user['id'],
password=<PASSWORD>['password'],
project_id=self.project_id)
# Check the user cannot get a domain nor a project token
self.v3_authenticate_token(domain_auth_data, expected_status=401)
self.v3_authenticate_token(project_auth_data, expected_status=401)
# Grant non-inherited role for user on domain
non_inher_gd_url, non_inher_gd_entity = (
_build_role_assignment_url_and_entity(domain_id=self.domain_id,
user_id=user['id'],
role_id=self.role_id))
self.put(non_inher_gd_url)
# Check the user can get only a domain token
self.v3_authenticate_token(domain_auth_data)
self.v3_authenticate_token(project_auth_data, expected_status=401)
# Create inherited role
inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
self.assignment_api.create_role(inherited_role['id'], inherited_role)
# Grant inherited role for user on domain
inher_gd_url, inher_gd_entity = _build_role_assignment_url_and_entity(
domain_id=self.domain_id, user_id=user['id'],
role_id=inherited_role['id'], inherited_to_projects=True)
self.put(inher_gd_url)
# Check the user can get both a domain and a project token
self.v3_authenticate_token(domain_auth_data)
self.v3_authenticate_token(project_auth_data)
# Delete inherited grant
self.delete(inher_gd_url)
# Check the user can only get a domain token
self.v3_authenticate_token(domain_auth_data)
self.v3_authenticate_token(project_auth_data, expected_status=401)
# Delete non-inherited grant
self.delete(non_inher_gd_url)
# Check the user cannot get a domain token anymore
self.v3_authenticate_token(domain_auth_data, expected_status=401)
def test_crud_user_inherited_domain_role_grants(self):
role_list = []
for _ in range(2):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
role_list.append(role)
# Create a non-inherited role as a spoiler
self.assignment_api.create_grant(
role_list[1]['id'], user_id=self.user['id'],
domain_id=self.domain_id)
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domain_id,
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[0]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
# Check we can read it back
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[0],
resource_url=collection_url)
# Now delete and check its gone
self.delete(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, expected_length=0,
resource_url=collection_url)
def test_list_role_assignments_for_inherited_domain_grants(self):
"""Call ``GET /role_assignments with inherited domain grants``.
Test Plan:
- Create 4 roles
- Create a domain with a user and two projects
- Assign two direct roles to project1
- Assign a spoiler role to project2
- Issue the URL to add inherited role to the domain
- Issue the URL to check it is indeed on the domain
- Issue the URL to check effective roles on project1 - this
should return 3 roles.
"""
role_list = []
for _ in range(4):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
role_list.append(role)
domain = self.new_domain_ref()
self.assignment_api.create_domain(domain['id'], domain)
user1 = self.new_user_ref(
domain_id=domain['id'])
password = user1['password']
user1 = self.identity_api.create_user(user1)
user1['password'] = password
project1 = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(project1['id'], project1)
project2 = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(project2['id'], project2)
# Add some roles to the project
self.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[0]['id'])
self.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[1]['id'])
# ..and one on a different project as a spoiler
self.assignment_api.add_role_to_user_and_project(
user1['id'], project2['id'], role_list[2]['id'])
# Now create our inherited role on the domain
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': domain['id'],
'user_id': user1['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[3]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[3],
resource_url=collection_url)
# Now use the list domain role assignments api to check if this
# is included
collection_url = (
'/role_assignments?user.id=%(user_id)s'
'&scope.domain.id=%(domain_id)s' % {
'user_id': user1['id'],
'domain_id': domain['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=1,
resource_url=collection_url)
ud_url, ud_entity = _build_role_assignment_url_and_entity(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, ud_entity, link_url=ud_url)
# Now ask for effective list role assignments - the role should
# turn into a project role, along with the two direct roles that are
# on the project
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
'user_id': user1['id'],
'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=3,
resource_url=collection_url)
# An effective role for an inherited role will be a project
# entity, with a domain link to the inherited assignment
unused, up_entity = _build_role_assignment_url_and_entity(
project_id=project1['id'], user_id=user1['id'],
role_id=role_list[3]['id'])
ud_url, unused = _build_role_assignment_url_and_entity(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=ud_url)
def test_list_role_assignments_for_disabled_inheritance_extension(self):
"""Call ``GET /role_assignments with inherited domain grants``.
Test Plan:
- Issue the URL to add inherited role to the domain
- Issue the URL to check effective roles on project include the
inherited role
- Disable the extension
- Re-check the effective roles, proving the inherited role no longer
shows up.
"""
role_list = []
for _ in range(4):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
role_list.append(role)
domain = self.new_domain_ref()
self.assignment_api.create_domain(domain['id'], domain)
user1 = self.new_user_ref(
domain_id=domain['id'])
password = <PASSWORD>['password']
user1 = self.identity_api.create_user(user1)
user1['password'] = password
project1 = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(project1['id'], project1)
project2 = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(project2['id'], project2)
# Add some roles to the project
self.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[0]['id'])
self.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[1]['id'])
# ..and one on a different project as a spoiler
self.assignment_api.add_role_to_user_and_project(
user1['id'], project2['id'], role_list[2]['id'])
# Now create our inherited role on the domain
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': domain['id'],
'user_id': user1['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[3]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[3],
resource_url=collection_url)
# Get effective list role assignments - the role should
# turn into a project role, along with the two direct roles that are
# on the project
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
'user_id': user1['id'],
'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=3,
resource_url=collection_url)
unused, up_entity = _build_role_assignment_url_and_entity(
project_id=project1['id'], user_id=user1['id'],
role_id=role_list[3]['id'])
ud_url, unused = _build_role_assignment_url_and_entity(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=ud_url)
# Disable the extension and re-check the list, the role inherited
# from the project should no longer show up
self.config_fixture.config(group='os_inherit', enabled=False)
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
unused, up_entity = _build_role_assignment_url_and_entity(
project_id=project1['id'], user_id=user1['id'],
role_id=role_list[3]['id'])
ud_url, unused = _build_role_assignment_url_and_entity(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentNotInListResponse(r, up_entity,
link_url=ud_url)
def test_list_role_assignments_for_inherited_group_domain_grants(self):
"""Call ``GET /role_assignments with inherited group domain grants``.
Test Plan:
- Create 4 roles
- Create a domain with a user and two projects
- Assign two direct roles to project1
- Assign a spoiler role to project2
- Issue the URL to add inherited role to the domain
- Issue the URL to check it is indeed on the domain
- Issue the URL to check effective roles on project1 - this
should return 3 roles.
"""
role_list = []
for _ in range(4):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
role_list.append(role)
domain = self.new_domain_ref()
self.assignment_api.create_domain(domain['id'], domain)
user1 = self.new_user_ref(
domain_id=domain['id'])
password = user1['password']
user1 = self.identity_api.create_user(user1)
user1['password'] = password
user2 = self.new_user_ref(
domain_id=domain['id'])
password = <PASSWORD>['password']
user2 = self.identity_api.create_user(user2)
user2['password'] = password
group1 = self.new_group_ref(
domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
self.identity_api.add_user_to_group(user1['id'],
group1['id'])
self.identity_api.add_user_to_group(user2['id'],
group1['id'])
project1 = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(project1['id'], project1)
project2 = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(project2['id'], project2)
# Add some roles to the project
self.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[0]['id'])
self.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[1]['id'])
# ..and one on a different project as a spoiler
self.assignment_api.add_role_to_user_and_project(
user1['id'], project2['id'], role_list[2]['id'])
# Now create our inherited role on the domain
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
'domain_id': domain['id'],
'group_id': group1['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[3]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[3],
resource_url=collection_url)
# Now use the list domain role assignments api to check if this
# is included
collection_url = (
'/role_assignments?group.id=%(group_id)s'
'&scope.domain.id=%(domain_id)s' % {
'group_id': group1['id'],
'domain_id': domain['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=1,
resource_url=collection_url)
gd_url, gd_entity = _build_role_assignment_url_and_entity(
domain_id=domain['id'], group_id=group1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, gd_entity, link_url=gd_url)
# Now ask for effective list role assignments - the role should
# turn into a user project role, along with the two direct roles
# that are on the project
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
'user_id': user1['id'],
'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=3,
resource_url=collection_url)
# An effective role for an inherited role will be a project
# entity, with a domain link to the inherited assignment
unused, up_entity = _build_role_assignment_url_and_entity(
project_id=project1['id'], user_id=user1['id'],
role_id=role_list[3]['id'])
gd_url, unused = _build_role_assignment_url_and_entity(
domain_id=domain['id'], group_id=group1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, up_entity, link_url=gd_url)
def test_filtered_role_assignments_for_inherited_grants(self):
"""Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``.
Test Plan:
- Create 5 roles
- Create a domain with a user, group and two projects
- Assign three direct spoiler roles to projects
- Issue the URL to add an inherited user role to the domain
- Issue the URL to add an inherited group role to the domain
- Issue the URL to filter by inherited roles - this should
return just the 2 inherited roles.
"""
role_list = []
for _ in range(5):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
role_list.append(role)
domain = self.new_domain_ref()
self.assignment_api.create_domain(domain['id'], domain)
user1 = self.new_user_ref(
domain_id=domain['id'])
password = <PASSWORD>['password']
user1 = self.identity_api.create_user(user1)
user1['password'] = password
group1 = self.new_group_ref(
domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
project1 = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(project1['id'], project1)
project2 = self.new_project_ref(
domain_id=domain['id'])
self.assignment_api.create_project(project2['id'], project2)
# Add some spoiler roles to the projects
self.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[0]['id'])
self.assignment_api.add_role_to_user_and_project(
user1['id'], project2['id'], role_list[1]['id'])
# Create a non-inherited role as a spoiler
self.assignment_api.create_grant(
role_list[2]['id'], user_id=user1['id'], domain_id=domain['id'])
# Now create two inherited roles on the domain, one for a user
# and one for a domain
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': domain['id'],
'user_id': user1['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[3]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[3],
resource_url=collection_url)
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
'domain_id': domain['id'],
'group_id': group1['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[4]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[4],
resource_url=collection_url)
# Now use the list role assignments api to get a list of inherited
# roles on the domain - should get back the two roles
collection_url = (
'/role_assignments?scope.OS-INHERIT:inherited_to=projects')
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
ud_url, ud_entity = _build_role_assignment_url_and_entity(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
gd_url, gd_entity = _build_role_assignment_url_and_entity(
domain_id=domain['id'], group_id=group1['id'],
role_id=role_list[4]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, ud_entity, link_url=ud_url)
self.assertRoleAssignmentInListResponse(r, gd_entity, link_url=gd_url)
def _setup_hierarchical_projects_scenario(self):
"""Creates basic hierarchical projects scenario.
This basic scenario contains a root with one leaf project and
two roles with the following names: non-inherited and inherited.
"""
# Create project hierarchy
root = self.new_project_ref(domain_id=self.domain['id'])
leaf = self.new_project_ref(domain_id=self.domain['id'],
parent_id=root['id'])
self.assignment_api.create_project(root['id'], root)
self.assignment_api.create_project(leaf['id'], leaf)
# Create 'non-inherited' and 'inherited' roles
non_inherited_role = {'id': uuid.uuid4().hex, 'name': 'non-inherited'}
self.assignment_api.create_role(non_inherited_role['id'],
non_inherited_role)
inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
self.assignment_api.create_role(inherited_role['id'], inherited_role)
return (root['id'], leaf['id'],
non_inherited_role['id'], inherited_role['id'])
def test_get_token_from_inherited_user_project_role_grants(self):
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Define root and leaf projects authentication data
root_project_auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=root_id)
leaf_project_auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=leaf_id)
# Check the user cannot get a token on root nor leaf project
self.v3_authenticate_token(root_project_auth_data, expected_status=401)
self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
# Grant non-inherited role for user on leaf project
non_inher_up_url, non_inher_up_entity = (
_build_role_assignment_url_and_entity(
project_id=leaf_id, user_id=self.user['id'],
role_id=non_inherited_role_id))
self.put(non_inher_up_url)
# Check the user can only get a token on leaf project
self.v3_authenticate_token(root_project_auth_data, expected_status=401)
self.v3_authenticate_token(leaf_project_auth_data)
# Grant inherited role for user on root project
inher_up_url, inher_up_entity = _build_role_assignment_url_and_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_url)
# Check the user still can get a token only on leaf project
self.v3_authenticate_token(root_project_auth_data, expected_status=401)
self.v3_authenticate_token(leaf_project_auth_data)
# Delete non-inherited grant
self.delete(non_inher_up_url)
# Check the inherited role still applies for leaf project
self.v3_authenticate_token(root_project_auth_data, expected_status=401)
self.v3_authenticate_token(leaf_project_auth_data)
# Delete inherited grant
self.delete(inher_up_url)
# Check the user cannot get a token on leaf project anymore
self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
def test_get_token_from_inherited_group_project_role_grants(self):
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Create group and add user to it
group = self.new_group_ref(domain_id=self.domain['id'])
group = self.identity_api.create_group(group)
self.identity_api.add_user_to_group(self.user['id'], group['id'])
# Define root and leaf projects authentication data
root_project_auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=root_id)
leaf_project_auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=leaf_id)
# Check the user cannot get a token on root nor leaf project
self.v3_authenticate_token(root_project_auth_data, expected_status=401)
self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
# Grant non-inherited role for group on leaf project
non_inher_gp_url, non_inher_gp_entity = (
_build_role_assignment_url_and_entity(
project_id=leaf_id, group_id=group['id'],
role_id=non_inherited_role_id))
self.put(non_inher_gp_url)
# Check the user can only get a token on leaf project
self.v3_authenticate_token(root_project_auth_data, expected_status=401)
self.v3_authenticate_token(leaf_project_auth_data)
# Grant inherited role for group on root project
inher_gp_url, inher_gp_entity = _build_role_assignment_url_and_entity(
project_id=root_id, group_id=group['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_gp_url)
# Check the user still can get a token only on leaf project
self.v3_authenticate_token(root_project_auth_data, expected_status=401)
self.v3_authenticate_token(leaf_project_auth_data)
# Delete no-inherited grant
self.delete(non_inher_gp_url)
# Check the inherited role still applies for leaf project
self.v3_authenticate_token(leaf_project_auth_data)
# Delete inherited grant
self.delete(inher_gp_url)
# Check the user cannot get a token on leaf project anymore
self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
def test_get_role_assignments_for_project_hierarchy(self):
"""Call ``GET /role_assignments``.
Test Plan:
- Create 2 roles
- Create a hierarchy of projects with one root and one leaf project
- Issue the URL to add a non-inherited user role to the root project
- Issue the URL to add an inherited user role to the root project
- Issue the URL to get all role assignments - this should return just
2 roles (non-inherited and inherited) in the root project.
"""
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Grant non-inherited role
non_inher_up_url, non_inher_up_entity = (
_build_role_assignment_url_and_entity(
project_id=root_id, user_id=self.user['id'],
role_id=non_inherited_role_id))
self.put(non_inher_up_url)
# Grant inherited role
inher_up_url, inher_up_entity = _build_role_assignment_url_and_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_url)
# Get role assignments
collection_url = '/role_assignments'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
resource_url=collection_url)
# Assert that the user has non-inherited role on root project
self.assertRoleAssignmentInListResponse(r, non_inher_up_entity,
non_inher_up_url)
# Assert that the user has inherited role on root project
self.assertRoleAssignmentInListResponse(r, inher_up_entity,
inher_up_url)
# Assert that the user does not have non-inherited role on leaf project
non_inher_up_url = ('/projects/%s/users/%s/roles/%s' %
(leaf_id, self.user['id'], non_inherited_role_id))
non_inher_up_entity['scope']['project']['id'] = leaf_id
self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity,
non_inher_up_url)
# Assert that the user does not have inherited role on leaf project
inher_up_entity['scope']['project']['id'] = leaf_id
self.assertRoleAssignmentNotInListResponse(r, inher_up_entity,
inher_up_url)
def test_get_effective_role_assignments_for_project_hierarchy(self):
"""Call ``GET /role_assignments?effective``.
Test Plan:
- Create 2 roles
- Create a hierarchy of projects with one root and one leaf project
- Issue the URL to add a non-inherited user role to the root project
- Issue the URL to add an inherited user role to the root project
- Issue the URL to get effective role assignments - this should return
1 role (non-inherited) on the root project and 1 role (inherited) on
the leaf project.
"""
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Grant non-inherited role
non_inher_up_url, non_inher_up_entity = (
_build_role_assignment_url_and_entity(
project_id=root_id, user_id=self.user['id'],
role_id=non_inherited_role_id))
self.put(non_inher_up_url)
# Grant inherited role
inher_up_url, inher_up_entity = _build_role_assignment_url_and_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_url)
# Get effective role assignments
collection_url = '/role_assignments?effective'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
resource_url=collection_url)
# Assert that the user has non-inherited role on root project
self.assertRoleAssignmentInListResponse(r, non_inher_up_entity,
non_inher_up_url)
# Assert that the user does not have inherited role on root project
self.assertRoleAssignmentNotInListResponse(r, inher_up_entity,
inher_up_url)
# Assert that the user does not have non-inherited role on leaf project
non_inher_up_url = ('/projects/%s/users/%s/roles/%s' %
(leaf_id, self.user['id'], non_inherited_role_id))
non_inher_up_entity['scope']['project']['id'] = leaf_id
self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity,
non_inher_up_url)
# Assert that the user has inherited role on leaf project
inher_up_entity['scope']['project']['id'] = leaf_id
self.assertRoleAssignmentInListResponse(r, inher_up_entity,
inher_up_url)
def test_get_inherited_role_assignments_for_project_hierarchy(self):
"""Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``.
Test Plan:
- Create 2 roles
- Create a hierarchy of projects with one root and one leaf project
- Issue the URL to add a non-inherited user role to the root project
- Issue the URL to add an inherited user role to the root project
- Issue the URL to filter inherited to projects role assignments - this
should return 1 role (inherited) on the root project.
"""
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Grant non-inherited role
non_inher_up_url, non_inher_up_entity = (
_build_role_assignment_url_and_entity(
project_id=root_id, user_id=self.user['id'],
role_id=non_inherited_role_id))
self.put(non_inher_up_url)
# Grant inherited role
inher_up_url, inher_up_entity = _build_role_assignment_url_and_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_url)
# Get inherited role assignments
collection_url = ('/role_assignments'
'?scope.OS-INHERIT:inherited_to=projects')
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
resource_url=collection_url)
# Assert that the user does not have non-inherited role on root project
self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity,
non_inher_up_url)
# Assert that the user has inherited role on root project
self.assertRoleAssignmentInListResponse(r, inher_up_entity,
inher_up_url)
# Assert that the user does not have non-inherited role on leaf project
non_inher_up_url = ('/projects/%s/users/%s/roles/%s' %
(leaf_id, self.user['id'], non_inherited_role_id))
non_inher_up_entity['scope']['project']['id'] = leaf_id
self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity,
non_inher_up_url)
# Assert that the user does not have inherited role on leaf project
inher_up_entity['scope']['project']['id'] = leaf_id
self.assertRoleAssignmentNotInListResponse(r, inher_up_entity,
inher_up_url)
class AssignmentInheritanceDisabledTestCase(test_v3.RestfulTestCase):
"""Test inheritance crud and its effects."""
def config_overrides(self):
super(AssignmentInheritanceDisabledTestCase, self).config_overrides()
self.config_fixture.config(group='os_inherit', enabled=False)
def test_crud_inherited_role_grants_failed_if_disabled(self):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domain_id,
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url, expected_status=404)
self.head(member_url, expected_status=404)
self.get(collection_url, expected_status=404)
self.delete(member_url, expected_status=404)
class AssignmentV3toV2MethodsTestCase(tests.TestCase):
"""Test domain V3 to V2 conversion methods."""
def test_v2controller_filter_domain_id(self):
# V2.0 is not domain aware, ensure domain_id is popped off the ref.
other_data = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = {'domain_id': domain_id,
'other_data': other_data}
ref_no_domain = {'other_data': other_data}
expected_ref = ref_no_domain.copy()
updated_ref = controller.V2Controller.filter_domain_id(ref)
self.assertIs(ref, updated_ref)
self.assertDictEqual(ref, expected_ref)
# Make sure we don't error/muck up data if domain_id isn't present
updated_ref = controller.V2Controller.filter_domain_id(ref_no_domain)
self.assertIs(ref_no_domain, updated_ref)
self.assertDictEqual(ref_no_domain, expected_ref)
def test_v3controller_filter_domain_id(self):
# No data should be filtered out in this case.
other_data = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = {'domain_id': domain_id,
'other_data': other_data}
expected_ref = ref.copy()
updated_ref = controller.V3Controller.filter_domain_id(ref)
self.assertIs(ref, updated_ref)
self.assertDictEqual(ref, expected_ref)
| StarcoderdataPython |
9745435 | <filename>diagnnose/models/wrappers/google_lm.py
import os
import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from diagnnose.models.recurrent_lm import RecurrentLM
from diagnnose.tokenizer import create_char_vocab
from diagnnose.tokenizer.c2i import C2I
from diagnnose.typedefs.activations import ActivationDict, LayeredTensors
class GoogleLM(RecurrentLM):
"""Reimplementation of the LM of Jozefowicz et al. (2016).
Paper: https://arxiv.org/abs/1602.02410
Lib: https://github.com/tensorflow/models/tree/master/research/lm_1b
This implementation allows for only a subset of the SoftMax to be
loaded in, to alleviate RAM usage.
Parameters
----------
pbtxt_path : str
Path to the .pbtxt file containing the GraphDef model setup.
ckpt_dir : str
Path to folder containing parameter checkpoint files.
full_vocab_path : str
Path to the full model vocabulary of 800k tokens. Note that
`vocab_path` can be passed along as well, pointing toward the
corpus that will be extracted. In that case only a subset of
the model softmax will be loaded in.
corpus_vocab_path : str, optional
Path to the corpus for which a vocabulary will be created. This
allows for only a subset of the model softmax to be loaded in.
create_decoder : bool
Toggle to load in the (partial) softmax weights. Can be set to
false in case no decoding projection needs to be made, as is
the case during activation extraction, for example.
"""
forget_offset = 1
ih_concat_order = ["i", "h"]
sizes = {l: {"x": 1024, "h": 1024, "c": 8192} for l in range(2)}
split_order = ["i", "g", "f", "o"]
use_char_embs = True
def __init__(
self,
pbtxt_path: str,
ckpt_dir: str,
full_vocab_path: str,
corpus_vocab_path: Optional[Union[str, List[str]]] = None,
create_decoder: bool = True,
device: str = "cpu",
) -> None:
super().__init__()
print("Loading pretrained model...")
vocab: C2I = create_char_vocab(corpus_vocab_path or full_vocab_path)
self.device = device
self.decoder_w = None
self.decoder_b = None
try:
self.encoder = CharCNN(pbtxt_path, ckpt_dir, vocab, device)
self.lstm = LSTM(
ckpt_dir, self.num_layers, self.split_order, self.forget_offset, device
)
if create_decoder:
self.decoder = SoftMax(
vocab, full_vocab_path, ckpt_dir, self.sizes[1]["h"], device
)
self.decoder_w = self.decoder.decoder_w
self.decoder_b = self.decoder.decoder_b
except ImportError:
raise ImportError("tensorflow and protobuf are needed for GoogleLM")
print("Model initialisation finished.")
@property
def vocab(self) -> C2I:
return self.encoder.vocab
@property
def weight(self) -> LayeredTensors:
return self.lstm.weight
@property
def bias(self) -> LayeredTensors:
return self.lstm.bias
@property
def peepholes(self) -> ActivationDict:
return self.lstm.peepholes
def forward(
self,
tokens: List[str],
prev_activations: Optional[ActivationDict] = None,
compute_out: bool = True,
) -> Tuple[Optional[Tensor], ActivationDict]:
# Create the embeddings of the input words
embs = self.encoder(tokens)
if prev_activations is None:
prev_activations = self.init_hidden(1)
logits, activations = self.lstm(embs, prev_activations)
if compute_out and self.decoder_w is not None:
out = self.decoder_w @ logits + self.decoder_b
else:
out = None
return out, activations
class CharCNN(nn.Module):
def __init__(self, pbtxt_path: str, ckpt_dir: str, vocab: C2I, device: str) -> None:
print("Loading CharCNN...")
super().__init__()
self.cnn_sess, self.cnn_t = self._load_char_cnn(pbtxt_path, ckpt_dir)
self.cnn_embs: Dict[str, Tensor] = {}
self.vocab = vocab
self.device = device
@staticmethod
def _load_char_cnn(pbtxt_path: str, ckpt_dir: str) -> Any:
import tensorflow as tf
from google.protobuf import text_format
ckpt_file = os.path.join(ckpt_dir, "ckpt-char-embedding")
with tf.Graph().as_default():
sys.stderr.write("Recovering graph.\n")
with tf.gfile.FastGFile(pbtxt_path, "r") as f:
s = f.read()
gd = tf.GraphDef()
text_format.Merge(s, gd)
t = dict()
[t["char_inputs_in"], t["all_embs"]] = tf.import_graph_def(
gd, {}, ["char_inputs_in:0", "all_embs_out:0"], name=""
)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(f"save/Assign", {"save/Const:0": ckpt_file})
# The following was recovered from the graph structure, the first 62 assign modules
# relate to the parameters of the char CNN.
for i in range(1, 62):
sess.run(f"save/Assign_{i}", {"save/Const:0": ckpt_file})
return sess, t
def forward(self, tokens: List[str]) -> Tensor:
embs = torch.zeros((len(tokens), 1024), device=self.device)
for i, token in enumerate(tokens):
if token not in self.cnn_embs:
char_ids = self.vocab.token_to_char_ids(token)
input_dict = {self.cnn_t["char_inputs_in"]: char_ids}
emb = torch.from_numpy(
self.cnn_sess.run(self.cnn_t["all_embs"], input_dict)
).to(self.device)
self.cnn_embs[token] = emb
else:
emb = self.cnn_embs[token]
embs[i] = emb
return embs
class LSTM(nn.Module):
def __init__(
self,
ckpt_dir: str,
num_layers: int,
split_order: List[str],
forget_offset: int,
device: str,
) -> None:
super().__init__()
print("Loading LSTM...")
self.num_layers = num_layers
self.split_order = split_order
self.forget_offset = forget_offset
# Projects hidden+input (2*1024) onto cell state dimension (8192)
self.weight: LayeredTensors = {}
self.bias: LayeredTensors = {}
# Projects cell state dimension (8192) back to hidden dimension (1024)
self.weight_P: LayeredTensors = {}
# The 3 peepholes are weighted by a diagonal matrix
self.peepholes: ActivationDict = {}
self._load_lstm(ckpt_dir, device)
def _load_lstm(self, ckpt_dir: str, device: str) -> None:
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
lstm_reader = NewCheckpointReader(os.path.join(ckpt_dir, "ckpt-lstm"))
for l in range(self.num_layers):
# Model weights are divided into 8 chunks
# Shape: (2048, 32768)
self.weight[l] = torch.cat(
[
torch.from_numpy(lstm_reader.get_tensor(f"lstm/lstm_{l}/W_{i}"))
for i in range(8)
],
dim=0,
)
# Shape: (32768,)
self.bias[l] = torch.from_numpy(lstm_reader.get_tensor(f"lstm/lstm_{l}/B"))
# Shape: (8192, 1024)
self.weight_P[l] = torch.cat(
[
torch.from_numpy(lstm_reader.get_tensor(f"lstm/lstm_{l}/W_P_{i}"))
for i in range(8)
],
dim=0,
)
for p in ["f", "i", "o"]:
# Shape: (8192, 8192)
self.peepholes[l, p] = torch.from_numpy(
lstm_reader.get_tensor(f"lstm/lstm_{l}/W_{p.upper()}_diag")
)
# Cast to float32 tensors
self.weight[l] = self.weight[l].to(device)
self.weight_P[l] = self.weight_P[l].to(device)
self.bias[l] = self.bias[l].to(device)
for p in ["f", "i", "o"]:
self.peepholes[l, p] = self.peepholes[l, p].to(device)
def forward_step(
self, layer: int, emb: Tensor, prev_hx: Tensor, prev_cx: Tensor
) -> ActivationDict:
proj: Tensor = torch.cat((emb, prev_hx), dim=1) @ self.weight[layer]
proj += self.bias[layer]
split_proj: Dict[str, Tensor] = dict(
zip(self.split_order, torch.chunk(proj, 4, dim=1))
)
f_g = torch.sigmoid(
split_proj["f"] + prev_cx * self.peepholes[layer, "f"] + self.forget_offset
)
i_g = torch.sigmoid(split_proj["i"] + prev_cx * self.peepholes[layer, "i"])
c_tilde_g = torch.tanh(split_proj["g"])
cx = f_g * prev_cx + i_g * c_tilde_g
o_g = torch.sigmoid(split_proj["o"] + cx * self.peepholes[layer, "o"])
hx = (o_g * torch.tanh(cx)) @ self.weight_P[layer]
return {
(layer, "emb"): emb,
(layer, "hx"): hx,
(layer, "cx"): cx,
(layer, "f_g"): f_g,
(layer, "i_g"): i_g,
(layer, "o_g"): o_g,
(layer, "c_tilde_g"): c_tilde_g,
}
def forward(
self, input_: Tensor, prev_activations: ActivationDict
) -> Tuple[Optional[Tensor], ActivationDict]:
# Iteratively compute and store intermediate rnn activations
activations: ActivationDict = {}
for l in range(self.num_layers):
prev_hx = prev_activations[l, "hx"]
prev_cx = prev_activations[l, "cx"]
activations.update(self.forward_step(l, input_, prev_hx, prev_cx))
input_ = activations[l, "hx"]
return input_, activations
class SoftMax:
def __init__(
self,
vocab: C2I,
full_vocab_path: str,
ckpt_dir: str,
hidden_size_h: int,
device: str,
) -> None:
print("Loading SoftMax...")
self.decoder_w: Tensor = torch.zeros((len(vocab), hidden_size_h), device=device)
self.decoder_b: Tensor = torch.zeros(len(vocab), device=device)
self._load_softmax(vocab, full_vocab_path, ckpt_dir)
def _load_softmax(self, vocab: C2I, full_vocab_path: str, ckpt_dir: str) -> None:
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
with open(full_vocab_path) as f:
full_vocab: List[str] = f.read().strip().split("\n")
bias_reader = NewCheckpointReader(os.path.join(ckpt_dir, "ckpt-softmax8"))
full_bias = torch.from_numpy(bias_reader.get_tensor("softmax/b"))
# SoftMax is chunked into 8 arrays of size 100000x1024
for i in range(8):
sm_reader = NewCheckpointReader(os.path.join(ckpt_dir, f"ckpt-softmax{i}"))
sm_chunk = torch.from_numpy(sm_reader.get_tensor(f"softmax/W_{i}"))
bias_chunk = full_bias[i : full_bias.size(0) : 8]
vocab_chunk = full_vocab[i : full_bias.size(0) : 8]
for j, w in enumerate(vocab_chunk):
sm = sm_chunk[j]
bias = bias_chunk[j]
if w in vocab:
self.decoder_w[vocab[w]] = sm
self.decoder_b[vocab[w]] = bias
if w == "</S>":
self.decoder_w[vocab[vocab.eos_token]] = sm
self.decoder_b[vocab[vocab.eos_token]] = bias
elif w == "<UNK>":
self.decoder_w[vocab[vocab.unk_token]] = sm
self.decoder_b[vocab[vocab.unk_token]] = bias
| StarcoderdataPython |
4996121 | from ..problem import problem
import numpy as np
class sinusoid(problem):
def __init__(self, initialState_list, targetGate=lambda x: np.sin(x)**2, configPath='./problems/hadamard/hadamard_config.yaml', verbose=2):
problem.__init__(self, testState_list=[lambda x: np.sin(x)**2], testGate=lambda x: -np.sin(x),
configPath=configPath, verbose=verbose)
| StarcoderdataPython |
6502805 | <reponame>sbrunato/eodag
# -*- coding: utf-8 -*-
# Copyright 2021, CS GROUP - France, https://www.csgroup.eu/
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
from shapely import geometry
from tests import TEST_RESOURCES_PATH
from tests.context import EODataAccessGateway, EOProduct, SearchResult
class TestCoreSearchResults(unittest.TestCase):
def setUp(self):
self.dag = EODataAccessGateway()
self.maxDiff = None
self.geojson_repr = {
"features": [
{
"properties": {
"snowCover": None,
"resolution": None,
"completionTimeFromAscendingNode": "2018-02-16T00:12:14.035Z",
"keyword": {},
"productType": "OCN",
"downloadLink": (
"https://peps.cnes.fr/resto/collections/S1/"
"578f1768-e66e-5b86-9363-b19f8931cc7b/download"
),
"eodag_provider": "peps",
"eodag_product_type": "S1_SAR_OCN",
"platformSerialIdentifier": "S1A",
"cloudCover": 0,
"title": "S1A_WV_OCN__2SSV_20180215T235323_"
"20180216T001213_020624_023501_0FD3",
"orbitNumber": 20624,
"instrument": "SAR-C SAR",
"abstract": None,
"eodag_search_intersection": {
"coordinates": [
[
[89.590721, 2.614019],
[89.771805, 2.575546],
[89.809341, 2.756323],
[89.628258, 2.794767],
[89.590721, 2.614019],
]
],
"type": "Polygon",
},
"organisationName": None,
"startTimeFromAscendingNode": "2018-02-15T23:53:22.871Z",
"platform": None,
"sensorType": None,
"processingLevel": None,
"orbitType": None,
"topicCategory": None,
"orbitDirection": None,
"parentIdentifier": None,
"sensorMode": None,
"quicklook": None,
},
"id": "578f1768-e66e-5b86-9363-b19f8931cc7b",
"type": "Feature",
"geometry": {
"coordinates": [
[
[89.590721, 2.614019],
[89.771805, 2.575546],
[89.809341, 2.756323],
[89.628258, 2.794767],
[89.590721, 2.614019],
]
],
"type": "Polygon",
},
}
],
"type": "FeatureCollection",
}
self.search_result = SearchResult.from_geojson(self.geojson_repr)
# Ensure that each product in a search result has geometry and search
# intersection as a shapely geometry
for product in self.search_result:
product.search_intersection = geometry.shape(product.search_intersection)
def test_core_serialize_search_results(self):
"""The core api must serialize a search results to geojson"""
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
# Serialization when the destination file is specified => goes to the
# specified file
path = self.dag.serialize(self.search_result, filename=f.name)
self.assertEqual(path, f.name)
with open(path, "r") as f:
self.make_assertions(f)
os.unlink(path)
# Serialization when the destination is not specified => goes to
# 'search_results.geojson' in the cur dir
tmpdirname = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmpdirname)
self.assertEqual(
self.dag.serialize(self.search_result), "search_results.geojson"
)
os.chdir(current_dir)
shutil.rmtree(tmpdirname)
def test_core_deserialize_search_results(self):
"""The core api must deserialize a search result from geojson"""
search_results_geojson_path = os.path.join(
TEST_RESOURCES_PATH, "eodag_search_result.geojson"
)
search_result = self.dag.deserialize(search_results_geojson_path)
self.assertIsInstance(search_result, SearchResult)
with open(search_results_geojson_path, "r") as f:
self.make_assertions(f)
def make_assertions(self, f):
d = json.load(f)
self.assertEqual(d["type"], self.geojson_repr["type"])
self.assertEqual(len(d["features"]), len(self.geojson_repr["features"]))
feature = d["features"][0]
self.assertEqual(feature["id"], self.geojson_repr["features"][0]["id"])
self.assertEqual(feature["type"], self.geojson_repr["features"][0]["type"])
self.assertDictEqual(
feature["geometry"], self.geojson_repr["features"][0]["geometry"]
)
for key, value in self.geojson_repr["features"][0]["properties"].items():
if key not in ("geometry", "id"):
if isinstance(value, dict):
self.assertDictEqual(value, feature["properties"][key])
elif isinstance(value, list):
self.assertListEqual(value, feature["properties"][key])
else:
self.assertEqual(value, feature["properties"][key])
else:
self.assertEqual(value, feature[key])
@staticmethod
def _minimal_eoproduct_geojson_repr(eo_id, geom_coords, geom_type="Polygon"):
return {
"properties": {
"eodag_provider": "peps",
"eodag_product_type": "S1_SAR_OCN",
"eodag_search_intersection": {
"coordinates": geom_coords,
"type": geom_type,
},
},
"id": eo_id,
"geometry": {"coordinates": geom_coords, "type": geom_type},
}
def test_group_by_extent(self):
geom_coords_1 = [[[89, 2], [90, 2], [90, 3], [89, 3], [89, 2]]]
geom_coords_2 = [[[90, 3], [91, 3], [91, 4], [90, 4], [90, 3]]]
geom_coords_3 = [[[92, 4], [92, 4], [92, 5], [91, 5], [91, 4]]]
eo_geom1 = EOProduct.from_geojson(
self._minimal_eoproduct_geojson_repr("1", geom_coords_1)
)
eo_geom2 = EOProduct.from_geojson(
self._minimal_eoproduct_geojson_repr("2", geom_coords_2)
)
eo_geom3 = EOProduct.from_geojson(
self._minimal_eoproduct_geojson_repr("3", geom_coords_3)
)
first_search = SearchResult([eo_geom1])
second_search = SearchResult([eo_geom1, eo_geom2])
third_search = SearchResult([eo_geom1, eo_geom2, eo_geom3])
grouped_searches = EODataAccessGateway.group_by_extent(
[first_search, second_search, third_search]
)
# The returned value is a List[SearchResult]
self.assertIsInstance(grouped_searches, list)
self.assertTrue(all(isinstance(sr, SearchResult) for sr in grouped_searches))
# We expect three groups because we have given products that have
# three different geometry bounds.
self.assertEqual(len(grouped_searches), 3)
# Given how the search results were constructed the resulting groups
# must have these 3 different lengths.
ss_len = [len(sr) for sr in grouped_searches]
self.assertIn(1, ss_len)
self.assertIn(2, ss_len)
self.assertIn(3, ss_len)
| StarcoderdataPython |
110955 | <filename>tripleohelper/provisioners/openstack/utils.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystoneauth1.exceptions.connection
from keystoneauth1 import loading as ks_loading
import keystoneauth1.session
import neutronclient.v2_0
from novaclient import client as nova_client
import logging
import time
LOG = logging.getLogger('tripleohelper')
def ks_session(auth_url, username, password, project_id):
loader = ks_loading.get_plugin_loader('password')
auth = loader.load_from_options(auth_url=auth_url,
username=username,
password=password,
project_id=project_id)
sess = keystoneauth1.session.Session(auth=auth)
return sess
def _get_id_by_attr(resources, attr, value):
for resource in resources:
if getattr(resource, attr) == value:
return resource.id
return None
def build_nova_api(sess):
return nova_client.Client(2, session=sess)
def build_neutron_client(sess):
return neutronclient.v2_0.client.Client(
endpoint_url=sess.get_endpoint(service_type='network'),
token=sess.get_token())
def get_image_id(nova_api, image_name):
images = nova_api.images.list()
return _get_id_by_attr(images, 'name', image_name)
def get_flavor_id(nova_api, flavor_name):
flavors = nova_api.flavors.list()
return _get_id_by_attr(flavors, 'name', flavor_name)
def get_keypair_id(nova_api, keypair_name):
keypairs = nova_api.keypairs.list()
return _get_id_by_attr(keypairs, 'name', keypair_name)
def get_network_id(nova_api, network_name):
# NOTE(Goneri): we have a lot of temporary failure with keystone
# this is an attempt to reduce them.
networks = None
for _ in range(100):
try:
networks = nova_api.networks.list()
except keystoneauth1.exceptions.connection.ConnectFailure:
time.sleep(1)
else:
break
return _get_id_by_attr(networks, 'label', network_name)
def get_floating_ip(nova_api, ip=None):
floating_ips = nova_api.floating_ips.list()
for floating_ip in floating_ips:
if ip and ip == floating_ip.ip:
return floating_ip
elif floating_ip.instance_id is None and floating_ip.fixed_ip is None:
return floating_ip
def add_a_floating_ip(nova_api, os_instance, floating_ip=None):
floating_ip = get_floating_ip(nova_api, floating_ip)
os_instance.add_floating_ip(floating_ip.ip)
LOG.info("floating ip '%s' attached to '%s'" % (floating_ip.ip, os_instance.name))
return floating_ip.ip
def add_security_groups(os_instance, security_groups):
for sg in security_groups:
os_instance.add_security_group(sg)
def remove_instances_by_prefix(nova_api, prefix):
"""Remove all the instances on which their name start by a prefix."""
for server in nova_api.servers.list():
if server.name.startswith(prefix):
LOG.info("Remove instance '%s'" % server.name)
server.delete()
| StarcoderdataPython |
242702 | <gh_stars>1-10
from .BinBang import *
class RawWireCfg:
NA = 0x01
LSB = 0x02
_3WIRE = 0x04
OUTPUT = 0x08
class RawWire(BBIO):
def __init__(self, port, speed):
BBIO.__init__(self, port, speed)
def start_bit(self):
self.port.write("\x02")
self.timeout(0.1)
return self.response(1)
def stop_bit(self):
self.port.write("\x03")
self.timeout(0.1)
return self.response(1)
def cs_low(self):
self.port.write("\x04")
self.timeout(0.1)
return self.response(1)
def cs_high(self):
self.port.write("\x05")
self.timeout(0.1)
return self.response(1)
def read_byte(self):
self.port.write("\x06")
self.timeout(0.1)
return self.response(1)
def read_bit(self):
self.port.write("\x07")
self.timeout(0.1)
return self.response(1)
def peek(self):
self.port.write("\x08")
self.timeout(0.1)
return self.response(1)
def clock_tick(self):
self.port.write("\x09")
self.timeout(0.1)
return self.response(1)
def clock_low(self):
self.port.write("\x0A")
self.timeout(0.1)
return self.response(1)
def clock_high(self):
self.port.write("\x0B")
self.timeout(0.1)
return self.response(1)
def data_low(self):
self.port.write("\x0C")
self.timeout(0.1)
return self.response(1)
def data_high(self):
self.port.write("\x0D")
self.timeout(0.1)
return self.response(1)
def wire_cfg(self, pins=0):
self.port.write(0x80 | pins)
self.timeout(0.1)
return self.response(1)
def bulk_clock_ticks(self, ticks=1):
self.port.write(0x20 | (ticks-1))
self.timeout(0.1)
return self.response(1)
| StarcoderdataPython |
5139978 | '''
Create a function sum_list_values that takes a list parameter and returns the sum of all the numeric values in the list.
Sample Data
joe 10 15 20 30 40
bill 23 16 19 22
sue 8 22 17 14 32 17 24 21 2 9 11 17
grace 12 28 21 45 26 10
john 14 32 25 16 89
'''
def sum_list_values(data_list):
index=1
sum_list=0
while index < len(data_list):
sum_list += data_list[index]
index+=1
return sum_list
| StarcoderdataPython |
11307196 | n = int(input('Informe quantos elementos deseja: '))
c = 0
actual = 1
anterior =0
print('0; 1', end = '; ')
while c < n:
proximo = actual + anterior
print(proximo, end = '; ')
anterior = actual
actual = proximo
c+=1
print('FIM') | StarcoderdataPython |
1939087 | <filename>pycharm2020.1.3/script/core/common/EntityFactory.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
管理所有的Entity创建的工厂类
"""
from ..mobilelog.LogManager import LogManager
# from Md5OrIndexCodec import Md5OrIndexDecoder
# from mobilecommon import extendabletype
# from RpcIndex import RpcIndexer
from ..util.UtilApi import Singleton
@Singleton
class EntityFactory(object):
# __metaclass__ = extendabletype
def __init__(self):
# logger for EntityFactory
self.logger = LogManager.get_logger("server.EntityFactory")
#registered classed for EntityFactory
self.entity_classes = {}
def register_entity(self, entitytype, entityclass):
"""注册entity 类"""
self.entity_classes[entitytype] = entityclass
# 把自己的字符串注册到底层
# RpcIndexer.register_rpc(entityclass.__name__)
# Md5OrIndexDecoder.register_str(entityclass.__name__)
# import inspect
# methods = inspect.getmembers(entityclass, predicate=inspect.ismethod)
# # 排序以保证注册的顺序是一样的
# methods.sort(lambda a, b : cmp(a[0], b[0]))
# for method in methods:
# if not method[0].startswith("_"):
# RpcIndexer.register_rpc(method[0])
# Md5OrIndexDecoder.register_str(method[0])
def get_entity_class(self, entitytype):
EntityClass = None
if isinstance(entitytype, str):
EntityClass = self.entity_classes.get( entitytype, None)
elif isinstance(entitytype, type):
EntityClass = entitytype
return EntityClass
def create_entity(self, entitytype, entityid=None):
"""创建Entity"""
EntityClass = self.get_entity_class(entitytype)
if not EntityClass:
self.logger.error("failed to create entity for type %s id %s", str(entitytype), str(entityid))
return None
if entityid == None:
return EntityClass()
else:
return EntityClass(entityid)
| StarcoderdataPython |
4873107 | <reponame>murrple-1/rss_temple
import datetime
import logging
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotFound
from django.db import transaction
from django.conf import settings
from django.dispatch import receiver
from django.core.signals import setting_changed
import argon2
import ujson
import validators
from api.exceptions import QueryException
from api import query_utils, models
from api.context import Context
from api.password_hasher import password_hasher
from api.third_party_login import google, facebook
from api.render import verify as verifyrender
_logger = logging.getLogger('rss_temple')
_OBJECT_NAME = 'user'
_USER_VERIFICATION_EXPIRY_INTERVAL = None
@receiver(setting_changed)
def _load_global_settings(*args, **kwargs):
global _USER_VERIFICATION_EXPIRY_INTERVAL
_USER_VERIFICATION_EXPIRY_INTERVAL = settings.USER_VERIFICATION_EXPIRY_INTERVAL
_load_global_settings()
def user(request):
permitted_methods = {'GET', 'PUT'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'GET':
return _user_get(request)
elif request.method == 'PUT':
return _user_put(request)
def user_verify(request):
permitted_methods = {'POST'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'POST':
return _user_verify_post(request)
def user_attributes(request):
permitted_methods = {'PUT'}
if request.method not in permitted_methods:
return HttpResponseNotAllowed(permitted_methods) # pragma: no cover
if request.method == 'PUT':
return _user_attributes_put(request)
def _user_get(request):
context = Context()
context.parse_request(request)
context.parse_query_dict(request.GET)
user = request.user
field_maps = None
try:
fields = query_utils.get_fields__query_dict(request.GET)
field_maps = query_utils.get_field_maps(fields, _OBJECT_NAME)
except QueryException as e: # pragma: no cover
return HttpResponse(e.message, status=e.httpcode)
ret_obj = query_utils.generate_return_object(field_maps, user, context)
content, content_type = query_utils.serialize_content(ret_obj)
return HttpResponse(content, content_type)
def _user_put(request):
if not request.body:
return HttpResponseBadRequest('no HTTP body') # pragma: no cover
json_ = None
try:
json_ = ujson.loads(request.body)
except ValueError: # pragma: no cover
return HttpResponseBadRequest('HTTP body cannot be parsed')
if type(json_) is not dict:
return HttpResponseBadRequest('JSON body must be object') # pragma: no cover
user = request.user
has_changed = False
verification_token = None
if 'email' in json_:
if type(json_['email']) is not str:
return HttpResponseBadRequest('\'email\' must be string')
if not validators.email(json_['email']):
return HttpResponseBadRequest('\'email\' malformed') # pragma: no cover
if user.email != json_['email']:
if models.User.objects.filter(email=json_['email']).exists():
return HttpResponse('email already in use', status=409)
user.email = json_['email']
verification_token = models.VerificationToken(user=user, expires_at=(
datetime.datetime.utcnow() + _USER_VERIFICATION_EXPIRY_INTERVAL))
has_changed = True
my_login = None
if 'my' in json_:
my_json = json_['my']
if type(my_json) is not dict:
return HttpResponseBadRequest('\'my\' must be object')
my_login = user.my_login()
if 'password' in my_json:
password_json = my_json['password']
if type(password_json) is not dict:
return HttpResponseBadRequest('\'password\' must be object')
if 'old' not in password_json:
return HttpResponseBadRequest('\'old\' missing')
if type(password_json['old']) is not str:
return HttpResponseBadRequest('\'old\' must be string')
if 'new' not in password_json:
return HttpResponseBadRequest('\'new\' missing')
if type(password_json['new']) is not str:
return HttpResponseBadRequest('\'new\' must be string')
try:
password_hasher().verify(
my_login.pw_hash, password_json['old'])
except argon2.exceptions.VerifyMismatchError:
return HttpResponseForbidden()
my_login.pw_hash = password_hasher().hash(password_json['new'])
has_changed = True
google_login_db_fn = None
if 'google' in json_:
google_json = json_['google']
if google_json is None:
def google_login_db_fn(): return _google_login_delete(user)
has_changed = True
elif type(google_json) is dict:
google_login = None
try:
google_login = models.GoogleLogin.objects.get(user=user)
except models.GoogleLogin.DoesNotExist:
google_login = models.GoogleLogin(user=user)
def google_login_db_fn(): return _google_login_save(google_login)
if 'token' in google_json:
if type(google_json['token']) is not str:
return HttpResponseBadRequest('\'token\' must be string')
try:
google_login.g_user_id = google.get_id(
google_json['token'])
except ValueError: # pragma: no cover
return HttpResponseBadRequest('bad Google token')
has_changed = True
else:
return HttpResponseBadRequest('\'google\' must be object or null')
facebook_login_db_fn = None
if 'facebook' in json_:
facebook_json = json_['facebook']
if facebook_json is None:
def facebook_login_db_fn(): return _facebook_login_delete(user)
has_changed = True
elif type(facebook_json) is dict:
facebook_login = None
try:
facebook_login = models.FacebookLogin.objects.get(user=user)
except models.FacebookLogin.DoesNotExist:
facebook_login = models.FacebookLogin(user=user)
def facebook_login_db_fn(): return _facebook_login_save(facebook_login)
if 'token' in facebook_json:
if type(facebook_json['token']) is not str:
return HttpResponseBadRequest('\'token\' must be string')
try:
facebook_login.profile_id = facebook.get_id(
facebook_json['token'])
except ValueError: # pragma: no cover
return HttpResponseBadRequest('bad Facebook token')
has_changed = True
else:
return HttpResponseBadRequest('\'facebook\' must be object or null')
if has_changed:
with transaction.atomic():
user.save()
if my_login is not None:
my_login.save()
if google_login_db_fn is not None:
google_login_db_fn()
if facebook_login_db_fn is not None:
facebook_login_db_fn()
if verification_token is not None:
models.VerificationToken.objects.filter(user=user).delete()
verification_token.save()
token_str = verification_token.token_str()
subject = verifyrender.subject()
plain_text = verifyrender.plain_text(token_str)
html_text = verifyrender.html_text(token_str)
email_queue_entry = models.NotifyEmailQueueEntry.objects.create(
subject=subject, plain_text=plain_text, html_text=html_text)
models.NotifyEmailQueueEntryRecipient.objects.create(
type=models.NotifyEmailQueueEntryRecipient.TYPE_TO, email=json_['email'], entry=email_queue_entry)
return HttpResponse(status=204)
def _google_login_save(google_login):
google_login.save()
def _google_login_delete(user):
models.GoogleLogin.objects.filter(user=user).delete()
def _facebook_login_save(facebook_login):
facebook_login.save()
def _facebook_login_delete(user):
models.FacebookLogin.objects.filter(user=user).delete()
def _user_verify_post(request):
token = request.POST.get('token')
if token is None:
return HttpResponseBadRequest('\'token\' missing')
verification_token = models.VerificationToken.find_by_token(token)
if verification_token is None:
return HttpResponseNotFound('token not found')
verification_token.delete()
return HttpResponse(status=204)
def _user_attributes_put(request):
if not request.body:
return HttpResponseBadRequest('no HTTP body') # pragma: no cover
json_ = None
try:
json_ = ujson.loads(request.body)
except ValueError: # pragma: no cover
return HttpResponseBadRequest('HTTP body cannot be parsed')
if type(json_) is not dict:
return HttpResponseBadRequest('JSON body must be object') # pragma: no cover
user = request.user
user.attributes.update(json_)
del_keys = set()
for key, value in user.attributes.items():
if value is None:
del_keys.add(key)
for key in del_keys:
del user.attributes[key]
user.save(update_fields=['attributes'])
return HttpResponse(status=204)
| StarcoderdataPython |
3478680 | # test-script for QUTest unit testing harness
# see https://www.state-machine.com/qtools/qutest.html
# preamble...
def on_setup():
expect("@timestamp FIXTURE_SETUP")
def on_teardown():
expect("@timestamp FIXTURE_TEARDOWN")
# tests...
test("FP output")
command("COMMAND_Z", 0, 3, 7)
expect("@timestamp COMMAND_Z 4e-01 -6e+23")
expect("@timestamp Trg-Done QS_RX_COMMAND")
command("COMMAND_Z", 4, (-3 & 0xFFFFFFFF), 7)
expect("@timestamp COMMAND_Z -4.2857e-01 -6.0221e+23")
expect("@timestamp Trg-Done QS_RX_COMMAND")
| StarcoderdataPython |
4800921 | from .clients import CharityClient, ApiKeyClient
from .helpers import InvalidAPIVersionError
DEFAULT_BASE_URL = "https://charitybase.uk/api"
DEFAULT_API_VERSION = 'v4.0.0'
SUPPORTED_API_RANGES = [
"v4.0.x"
]
class CharityBase:
def __init__(self, apiKey, baseUrl=None):
self.config = {
"apiKey": apiKey,
"baseUrl": baseUrl if baseUrl else DEFAULT_BASE_URL,
"apiVersion": DEFAULT_API_VERSION
}
self.charity = CharityClient(self.config)
self.apiKey = ApiKeyClient(self.config)
def getApiVersion(self):
return self.config["apiVersion"]
def setApiVersion(self, version):
valid_version = self._validate_api_version(version)
if not valid_version:
raise InvalidAPIVersionError("{} not in supported API versions: {}".format(version, ", ".join(SUPPORTED_API_RANGES)))
self.config["apiVersion"] = version
def _validate_api_version(self, version):
# @TODO python semver libraries don't currently support `.x` syntax
# when it does this can replicate the proper check found in <https://github.com/charity-base/charity-base-client-js/blob/master/src/index.js#L8-L16>
# - see <https://github.com/rbarrois/python-semanticversion/issues/66>
return version.strip('v')
| StarcoderdataPython |
3267192 | <gh_stars>10-100
from setuptools import setup
setup(name='noisemix',
version='0.1',
description='NoiseMix is a library for data generation for text datasets.',
url='https://gitlab.com/hetazotutyun/NoiseMix',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['noisemix'],
zip_safe=False) | StarcoderdataPython |
3328244 | <reponame>DongjaeJang/Deep-Knowledge-Tracing
import torch.nn as nn
import torch
import torch.nn.functional as F
def get_criterion(pred, target, args):
loss = nn.BCELoss(reduction="none")
if args.loss == 'both' and args.epoch > 0:
bce_loss = loss(pred, target)
bce_loss = bce_loss[:,-1]
bce_loss = torch.mean(bce_loss)
l = roc_star_loss(target,pred, args.gamma, args.last_target, args.last_predict) + bce_loss
elif args.loss == 'roc_star' and args.epoch > 0:
l = roc_star_loss(target,pred, args.gamma, args.last_target, args.last_predict)
else:
l = loss(pred, target)
return l# - (pred-target)**2
def roc_star_loss( _y_true, y_pred, gamma, _epoch_true, epoch_pred):
"""
Nearly direct loss function for AUC.
See article,
<NAME>, "Roc-star : An objective function for ROC-AUC that actually works."
https://github.com/iridiumblue/articles/blob/master/roc_star.md
_y_true: `Tensor`. Targets (labels). Float either 0.0 or 1.0 . (batch_size, max_seq_len)
y_pred: `Tensor` . Predictions. (batch_size, max_seq_len)
gamma : `Float` Gamma, as derived from last epoch.
_epoch_true: `Tensor`. Targets (labels) from last epoch.
epoch_pred : `Tensor`. Predicions from last epoch.
"""
#convert labels to boolean
y_true = (_y_true>=0.50)[:,-1]
epoch_true = (_epoch_true>=0.50)
# if batch is either all true or false return small random stub value.
if torch.sum(y_true)==0 or torch.sum(y_true) == y_true.shape[0]: return torch.sum(y_pred)*1e-8
y_pred = y_pred[:,-1]
pos = y_pred[y_true]
neg = y_pred[~y_true]
# pos, neg =
epoch_pos = epoch_pred[epoch_true]
epoch_neg = epoch_pred[~epoch_true]
# Take random subsamples of the training set, both positive and negative.
max_pos = 10000 # Max number of positive training samples
max_neg = 10000 # Max number of positive training samples
cap_pos = epoch_pos.shape[0]
cap_neg = epoch_neg.shape[0]
epoch_pos = epoch_pos[torch.rand_like(epoch_pos) < max_pos/cap_pos]
epoch_neg = epoch_neg[torch.rand_like(epoch_neg) < max_neg/cap_pos]
ln_pos = pos.shape[0]
ln_neg = neg.shape[0]
# sum positive batch elements agaionst (subsampled) negative elements
if ln_pos>0 :
pos_expand = pos.view(-1,1).expand(-1,epoch_neg.shape[0]).reshape(-1)
neg_expand = epoch_neg.repeat(ln_pos)
diff2 = neg_expand - pos_expand + gamma
l2 = diff2[diff2>0]
m2 = l2 * l2
len2 = l2.shape[0]
else:
m2 = torch.tensor([0], dtype=torch.float).cuda()
len2 = 0
# Similarly, compare negative batch elements against (subsampled) positive elements
if ln_neg>0 :
pos_expand = epoch_pos.view(-1,1).expand(-1, ln_neg).reshape(-1)
neg_expand = neg.repeat(epoch_pos.shape[0])
diff3 = neg_expand - pos_expand + gamma
l3 = diff3[diff3>0]
m3 = l3*l3
len3 = l3.shape[0]
else:
m3 = torch.tensor([0], dtype=torch.float).cuda()
len3=0
if (torch.sum(m2)+torch.sum(m3))!=0 :
res2 = torch.sum(m2)/max_pos+torch.sum(m3)/max_neg
#code.interact(local=dict(globals(), **locals()))
else:
res2 = torch.sum(m2)+torch.sum(m3)
res2 = torch.where(torch.isnan(res2), torch.zeros_like(res2), res2)
return res2
def epoch_update_gamma(y_true,y_pred, epoch=-1,delta=1):
"""
Calculate gamma from last epoch's targets and predictions.
Gamma is updated at the end of each epoch.
y_true: `Tensor`. Targets (labels). Float either 0.0 or 1.0 .
y_pred: `Tensor` . Predictions.
"""
DELTA = delta+1
SUB_SAMPLE_SIZE = 2000.0
pos = y_pred[y_true==1]
neg = y_pred[y_true==0] # yo pytorch, no boolean tensors or operators? Wassap?
# subsample the training set for performance
cap_pos = pos.shape[0]
cap_neg = neg.shape[0]
pos = pos[torch.rand_like(pos) < SUB_SAMPLE_SIZE/cap_pos]
neg = neg[torch.rand_like(neg) < SUB_SAMPLE_SIZE/cap_neg]
ln_pos = pos.shape[0]
ln_neg = neg.shape[0]
pos_expand = pos.view(-1,1).expand(-1,ln_neg).reshape(-1)
neg_expand = neg.repeat(ln_pos)
diff = neg_expand - pos_expand
ln_All = diff.shape[0]
Lp = diff[diff>0] # because we're taking positive diffs, we got pos and neg flipped.
ln_Lp = Lp.shape[0]-1
diff_neg = -1.0 * diff[diff<0]
diff_neg = diff_neg.sort()[0]
ln_neg = diff_neg.shape[0]-1
ln_neg = max([ln_neg, 0])
left_wing = int(ln_Lp*DELTA)
left_wing = max([0,left_wing])
left_wing = min([ln_neg,left_wing])
default_gamma=torch.tensor(0.2, dtype=torch.float).cuda()
if diff_neg.shape[0] > 0 :
gamma = diff_neg[left_wing]
else:
gamma = default_gamma # default=torch.tensor(0.2, dtype=torch.float).cuda() #zoink
L1 = diff[diff>-1.0*gamma]
ln_L1 = L1.shape[0]
if epoch > -1 :
return gamma
else :
return default_gamma | StarcoderdataPython |
6424540 | <filename>source/api/app.py
"""
Hosts the main application, routing endpoints to their desired controller.
@author: <NAME>
@revision: v1.4
"""
from os import getenv as env
from flask import Flask, render_template, request
from werkzeug.exceptions import HTTPException
from models import BaseModel
from controllers import *
from responses import ISOAwareEncoder
##
# CONFIGURATION
##
# Defines and configures the web server,
# database connection, and data models.
##
app = Flask("letsschedit")
app.json_encoder = ISOAwareEncoder
db = BaseModel.get_database()
db.init(
env('DATABASE'),
user=env('DB_USERNAME'),
password=env('DB_PASSWORD')
)
@app.before_request
def _db_connect():
""" Ensures that whenever a HTTP request is comming in, a db connection is dispatched
from the pool. This is required as MySQL oftens kills idle connections, so we want
a hot new fresh one every time. """
db.connect()
@app.teardown_request
def _db_close(exc):
""" Ensures that whenever a request is finished being processed, the open connection is
closed and returned to the pool for reuse. """
if not db.is_closed():
db.close()
@app.errorhandler(HTTPException)
def _request_failed(e):
""" Displays an error page if something goes wrong somewhere, either on purpose or
accidentally. Error message and codes are automatically passed to the status page. """
return render_template("status.html", name=e.name,
message=e.description, code=e.code), e.code
@app.after_request
def add_cors_headers(response):
response.headers['Access-Control-Allow-Origin'] = '*'
if request.method == 'OPTIONS':
response.headers['Access-Control-Allow-Methods'] = "GET, POST, PUT, DELETE"
headers = request.headers.get('Access-Control-Request-Headers')
if headers:
response.headers['Access-Control-Allow-Headers'] = headers
return response
##
# ROUTING
##
# Define the application URLs and connect
# each URL to a specific controller, so that
# the API responds with specific actions.
##
app.add_url_rule('/cal/<string:UUID>', 'get-cal',
CalendarController.get, methods=["GET"])
app.add_url_rule('/cal/<string:UUID>/sync', 'sync-cal',
CalendarController.sync, methods=["POST"])
app.add_url_rule('/create/cal', 'create-cal',
CalendarController.new, methods=["PUT"])
## REQUIRED FOR CLI RUN ##
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
3561870 | #!/usr/bin/env python
import fvm
import fvm.fvmparallel as fvmparallel
import time
from numpy import *
from mpi4py import MPI
from FluentCase import FluentCase
fileBase = None
numIterations = 10
fileBase = "/home/yildirim/memosa/src/fvm/test/cav_44_tri"
#fileBase = "/home/yildirim/memosa/src/fvm/test/test_tri_500by500"
#fileBase = "/home/yildirim/memosa/src/fvm/test/cav32"
# change as needed
outfile = None
if __name__ == '__main__' and fileBase is None:
if len(sys.argv) < 2:
usage()
fileBase = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
if outfile == None:
outfile = fileBase+"-prism.dat"
reader = FluentCase(fileBase+".cas")
#import debug
reader.read();
import sys
fluent_meshes = reader.getMeshList()
import time
t0 = time.time()
nmesh = MPI.COMM_WORLD.Get_size()
#print "nmesh = ", nmesh
#npart = fvmparallel.IntVector(1,nmesh) #total of distributed meshes
#etype = fvmparallel.IntVector(1,1) #triangle
npart = [nmesh]
etype = [1]
part_mesh = fvmparallel.PartMesh( fluent_meshes, npart, etype );
part_mesh.setWeightType(0);
part_mesh.setNumFlag(0);
#actions
part_mesh.partition()
part_mesh.mesh()
part_mesh.debug_print()
part_mesh.mesh_debug()
meshes = part_mesh.meshList()
t1 = time.time()
#if outfile != '/dev/stdout':
# print '\nsolution time = %f' % (t1-t0)
| StarcoderdataPython |
332578 | <filename>Bite 37. Rewrite a for loop using recursion.py
"""Although you have to be careful using recursion it is one of those concepts you want to at least understand. It's also commonly used in coding interviews :)
In this beginner Bite we let you rewrite a simple countdown for loop using recursion. See countdown_for below, it produces the following output:
least understand. It's also commonly used in coding interviews :)
In this beginner Bite we let you rewrite a simple countdown for loop using recursion. See countdown_for below, it produces the following output:
$ python countdown.py
10
9
8
7
6
5
4
3
2
1
time is up
"""
# def countdown_for(start=10):
# for i in reversed(range(1, start + 1)):
# print(i)
# print('time is up')
def countdown_recursive(start=10):
print(start)
if start == 1:
print('time is up')
else:
return countdown_recursive(start - 1)
countdown_recursive(start=11) | StarcoderdataPython |
9756633 | import mne
import numpy as np
import pandas as pd
import logging
logger = logging.getLogger("mne")
logger.setLevel(logging.ERROR)
def process_resmed(file_path: str, station: str) -> pd.DataFrame:
"""process resmed files
Args:
file_path (str): path of the edf files
station (str): station
Returns:
pd.DataFrame: returns a DataFrame with breathing data
"""
edf_file = mne.io.read_raw_edf(file_path)
channels = edf_file.ch_names
cols = len(channels)
rows = len(edf_file[0][0][0])
edf_data = np.zeros((rows, cols))
for i in range(len(channels)):
edf_data[:, i] = edf_file[channels[i]][0][0]
edf_df = pd.DataFrame(edf_data)
edf_df.columns = [c.lower() for c in channels]
edf_df["timestamp"] = edf_file.info["meas_date"]
edf_df["time_offset"] = edf_file[0][1] - min(edf_file[0][1])
edf_df["timestamp"] = edf_df["timestamp"] + \
pd.to_timedelta(edf_file[0][1], "s")
edf_df["timestamp"] = edf_df["timestamp"].dt.strftime(
"%Y-%m-%d %H:%M:%S.%f")
edf_df["station"] = station
return edf_df
edf_file = mne.io.read_raw_edf("data/20201204_114117_0_HRD.edf")
edf_file.info
| StarcoderdataPython |
122870 | import os
import sqlite3
from flask import current_app, g
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATABASE = os.path.join(BASE_DIR, "data/database.db")
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def init_app(app):
app.teardown_appcontext(close_connection)
| StarcoderdataPython |
4968119 | # -*- coding: utf-8 -*-
"""Views that trigger notifications or alerts via RapidPro."""
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.utils.timezone import now
from django.conf import settings
from mspray.apps.alerts.tasks import (
health_facility_catchment_hook,
so_daily_form_completion,
task_send_weekly_update_email,
daily_spray_effectiveness_task,
)
@api_view(["GET", "POST"])
def start_health_facility_catchment(request):
"""Trigger the Health Facility Catchment notification."""
health_facility_catchment_hook.delay()
return Response({"success": True})
@api_view(["GET", "POST"])
def start_send_weekly_update_email(request):
"""Trigger weekly email."""
task_send_weekly_update_email.delay()
return Response({"success": True})
@api_view(["GET", "POST"])
def start_so_daily_form_completion(request):
"""Trigger spray operator daily form completion notification."""
if request.method == "POST":
district_code = request.data.get("district")
tla_code = request.data.get("SO_name")
confirmdecision = request.data.get("confirmdecisionform")
so_daily_form_completion.delay(
district_code, tla_code, confirmdecision
)
return Response({"success": True})
@api_view(["GET", "POST"])
def daily_spray_effectiveness(request):
"""Trigger spray area spray effectiveness notification."""
flow_uuid = getattr(settings, "RAPIDPRO_DAILY_SPRAY_SUCCESS_FLOW_ID")
spray_date = request._request.GET.get("spray_date", now().date())
daily_spray_effectiveness_task.delay(flow_uuid, spray_date)
return Response({"success": True})
| StarcoderdataPython |
12828925 | <gh_stars>1-10
import statistics
import pytest
from telliot_feed_examples.feeds.eth_jpy_feed import eth_jpy_median_feed
@pytest.mark.asyncio
async def test_AssetPriceFeed():
"""Retrieve median ETH/JPY price."""
v, _ = await eth_jpy_median_feed.source.fetch_new_datapoint()
assert v is not None
assert v > 0
print(f"ETH/JPY Price: {v}")
# Get list of data sources from sources dict
source_prices = [source.latest[0] for source in eth_jpy_median_feed.source.sources]
print(source_prices)
# Make sure error is less than decimal tolerance
assert (v - statistics.median(source_prices)) < 10**-6
| StarcoderdataPython |
3334049 | <reponame>ckjh/shopping
# Generated by Django 2.2.2 on 2019-09-03 16:22
import admin01.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Orders',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_sn', models.CharField(max_length=100, unique=True)),
('money', models.DecimalField(decimal_places=2, max_digits=10)),
('address', models.CharField(default='', max_length=255)),
('status', models.IntegerField(default=0)),
('pay_type', models.IntegerField(default=0)),
('code', models.CharField(default='', max_length=250)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'orders',
},
bases=(admin01.models.Base, models.Model),
),
migrations.CreateModel(
name='OrderDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('price', models.DecimalField(decimal_places=2, max_digits=7)),
('count', models.IntegerField(default=0)),
('pic', models.CharField(max_length=255)),
('order_sn', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='order.Orders', to_field='order_sn')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'order_detail',
},
),
]
| StarcoderdataPython |
4924731 | <filename>Chapter03/phrases.py
import nltk
import string
import csv
import json
import pandas as pd
import gensim
from langdetect import detect
import pickle
from nltk import FreqDist
from Chapter01.dividing_into_sentences import divide_into_sentences_nltk
from Chapter01.tokenization import tokenize_nltk
from Chapter01.removing_stopwords import read_in_csv
stopwords_file = "Chapter01/stopwords.csv"
stopwords = read_in_csv(stopwords_file)
yelp_reviews_file = "Chapter03/yelp-dataset/review.json"
def get_yelp_reviews(filename):
reader = pd.read_json(filename, orient="records", lines=True, chunksize=10000)
chunk = next(reader)
text = ''
for index, row in chunk.iterrows():
row_text =row['text']
lang = detect(row_text)
if (lang == "en"):
text = text + row_text.lower()
return text
def get_phrases(text):
words = nltk.tokenize.word_tokenize(text)
phrases = {}
current_phrase = []
for word in words:
if (word in stopwords or word in string.punctuation):
if (len(current_phrase) > 1):
phrases[" ".join(current_phrase)] = "_".join(current_phrase)
current_phrase = []
else:
current_phrase.append(word)
if (len(current_phrase) > 1):
phrases[" ".join(current_phrase)] = "_".join(current_phrase)
return phrases
def replace_phrases(phrases_dict, text):
for phrase in phrases_dict.keys():
text = text.replace(phrase, phrases_dict[phrase])
return text
def write_text_to_file(text, filename):
text_file = open(filename, "w", encoding="utf-8")
text_file.write(text)
text_file.close()
def create_and_save_frequency_dist(word_list, filename):
fdist = FreqDist(word_list)
pickle.dump(fdist, open(filename, 'wb'))
return fdist
def create_and_save_word2vec_model(words, filename):
model = gensim.models.Word2Vec(words, min_count=1)
model.train(words, total_examples=model.corpus_count, epochs=400)
pickle.dump(model, open(filename, 'wb'))
return model
def main():
#text = get_yelp_reviews(yelp_reviews_file)
#phrases = get_phrases(text)
#text = replace_phrases(phrases, text)
#write_text_to_file(text, "Chapter03/all_text.txt")
#sentences = divide_into_sentences_nltk(text)
#all_sentence_words = [tokenize_nltk(sentence.lower()) for sentence in sentences]
#flat_word_list = [word.lower() for sentence in all_sentence_words for word in sentence]
#fdist = create_and_save_frequency_dist(flat_word_list, "Chapter03/fdist.bin")
fdist = pickle.load(open("Chapter03/fdist.bin", "rb"))
#write_text_to_file(str(fdist.most_common()[:1000]), "Chapter03/most_frequent_phrases.txt")
print(fdist.most_common()[:1000])
#model = create_and_save_word2vec_model(all_sentence_words, "Chapter03/phrases.model")
model = gensim.models.Word2Vec.load("Chapter03/phrases.model")
words = model.wv.most_similar("highly_recommend", topn=10)
print(words)
words = model.wv.most_similar("happy_hour", topn=10)
print(words)
words = model.wv.most_similar("fried_rice", topn=10)
print(words)
words = model.wv.most_similar("dim_sum", topn=10)
print(words)
if (__name__ == "__main__"):
main()
| StarcoderdataPython |
4996936 | <reponame>udhayprakash/Django_Projects<filename>DjangoTraining/IPLcricket/matches/management/commands/load_data.py
#!/usr/bin/python
"""
Purpose:
"""
from django.core.management.base import BaseCommand
import os
import csv
from IPLcricket import settings
from matches.models import MatchesPlayed, Deliveries
from datetime import datetime
from django.db.utils import DataError
import pandas as pd
def load_matches(file_name):
file_path = os.path.join(settings.DOCS_PATH, file_name)
with open(file_path, 'r') as fh:
file_content = tuple(csv.DictReader(fh))
print(f'total_records_count :{len(file_content):4}')
newly_created_objs = 0
for each_row in file_content:
# print(each_row)
if each_row['date']:
each_row['date'] = datetime.strptime(each_row['date'], '%d-%m-%y')
try:
# # TO create a new object
# obj = MatchesPlayed.objects.create(**each_row)
# obj.save()
_, is_new_obj = MatchesPlayed.objects.update_or_create(id=each_row.pop('id'), defaults=each_row)
if is_new_obj:
newly_created_objs += 1
except DataError as ex:
print(ex)
print(f'newly_created_objs :{newly_created_objs:4}')
def load_deliveries(file_name):
file_path = os.path.join(settings.DOCS_PATH, file_name)
deliveries_df = pd.read_csv(file_path, delimiter=',', skip_blank_lines=True)
print(f'total_records_count :{len(deliveries_df):4}')
columns = tuple(deliveries_df.columns)
newly_created_objs = 0
for _index, each in deliveries_df.iterrows():
each_obj = dict(each.items())
# print(each_obj)
try:
_, is_new_obj = Deliveries.objects.update_or_create(id=_index+1, defaults=each_obj)
if is_new_obj:
newly_created_objs += 1
except DataError as ex:
print(ex)
print(f'newly_created_objs :{newly_created_objs:4}')
class Command(BaseCommand):
help = 'Create random users'
def add_arguments(self, parser):
parser.add_argument('file_name', type=str, help='file to load')
def handle(self, *args, **kwargs):
file_name = kwargs.get('file_name')
if file_name == 'matches.csv':
load_matches(file_name)
elif file_name == 'deliveries.csv':
load_deliveries(file_name)
| StarcoderdataPython |
3351096 | <gh_stars>10-100
import qq
class MyClient(qq.Client):
async def on_ready(self):
print(f'以 {self.user} 身份登录(ID:{self.user.id})')
print('------')
async def on_member_join(self, member: qq.Member):
channel = member.guild.get_channel(114514)
if channel is None:
channel = await member.guild.fetch_channel(114514) # 子频道ID
if channel is not None:
to_send = f'欢迎 {member.mention} 加入 {member.guild.name}!'
await channel.send(to_send)
intents = qq.Intents.default()
intents.members = True
client = MyClient(intents=intents)
client.run('token')
| StarcoderdataPython |
159090 | import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_predict,cross_val_score,train_test_split
from sklearn.metrics import classification_report,confusion_matrix,roc_curve,auc,precision_recall_curve,roc_curve
import pickle
#raw_df = pd.read_csv("/home/terrence/CODING/Python/MODELS/Credit_Union_PDs/default_data.csv", encoding="latin-1")
myfile = "/home/terrence/CODING/Python/MODELS/Credit_Union_PDs/Test Variables READY.xlsx"
raw_df = pd.read_excel(myfile, sheet_name = 'Data', header = 0)
print(raw_df.shape)
#raw_df.dropna(inplace = True)
#print(raw_df.shape)
#print(raw_df.columns.values)
'''
[u'Loan Number' u'Loan Type Description' u'Balance' u'Loan Term' u'Interest Rate' u'Origination Date' u'Origination Month'
u'Most Recent Credit Score' u'AmountFunded' u'MonthlyIncomeBaseSalary' u'TotalMonthlyIncome' u'MonthlyIncomeOther'
u'Collateral Current Valuation' u'LTV' u'Number of Days Delinquent' u'Days Late T or F' u'Balance.1' u'Days 11-15 Delinquent'
u'Days 16-20 Delinquent' u'Days 21-29 Delinquent' u'Days 30-44 Delinquent' u'Days 45-59 Delinquent' u'Days 60-179 Delinquent'
u'Days 180-359 Days Delinquent' u'Days 360+ Delinquent' u'Days Delinquent T or F' u'Grade Overall' u'Original Loan Amount'
u'Current Credit Limit' u'Maturity Date' u'Maturity Month' u'Original Credit Score' u'LTV-Original' u'Probability of Default'
u'Branch' u'Loan Officer' u'Underwriter' u'Loan Type Code' u'Loan Category' u'Auto Dealer' u'Primary Customer City' u'Status'
u'Updated Credit Score' u'Original Interest Rate' u'LTV (Effective)' u'LTV-Original (Effective)' u'LTV-Original Total Commitments'
u'LTV-Total Commitments' u'LTV-Total Commitments (Effective)' u'LTV-Total Commitments-Original (Effective)'
u'Grade by Most Recent Credit Score' u'Grade by Cerdit Score (ORIGINAL)' u'GRADE BY CREDIT SCORE (UPDATED)' u'JointTotalMonthlyIncome'
u'JointProfessionMonths' u'JointCity' u'JointApplicantType' u'JointMonthlyIncomeBaseSalary' u'JointMonthlyIncomeOther'
u'JointMonthlyIncomeOtherDescription1' u'JointOccupation' u'IndCity' u'IndMonthlyIncomeBaseSalary' u'IndMonthlyIncomeOther'
u'IndTotalMonthlyIncome' u'IndMonthlyIncomeOtherDescription1' u'PaymentAmount' u'PaymentFrequency' u'Insurance' u'DueDay1' u'DueDay2'
u'PaymentMethodText' u'SymitarPurposeCode' u'ApprovedLTV' u'FundedLTV' u'PaymentToIncome' u'NumberOfOpenRevolvingAccounts' u'AmountApproved'
u'AmountFunded.1' u'AmountOwedToLender' u'DOB' u'DOB.1' u'DOB.2' u'AGE' u'AGE of BORROWER' u'JointDOB' u'Year' u'Year.1' u'AGE OF JOINT'
u'AGE OF JOINT.1' u'IndDOB' u'YEAR' u'YEAR.1' u'AGE.1' u'AGE of IND' u'AllButThisDebtToIncomeFund' u'AllButThisDebtToIncomeUW'
u'EstimatedMonthlyPayment' u'TotalDebtToIncomeFund' u'TotalDebtToIncomeUW' u'TotalUnsecureBalance' u'TotalExistingLoanAmount' u'APR'
u'IsHighRiskConsumerLoan' u'IsAdvanceRequest' u'IsWorkoutLoan' u'LoanPaymentFrequency' u'PaymentType' u'Rate']
'''
raw_df['label'] = raw_df['Number of Days Delinquent'].map(lambda x : 1 if int(x) > 11 else 0)
print(raw_df.shape)
#print(raw_df['Loan Type Description'].mean())
print(np.any(np.isnan(raw_df['Loan Type Description'])))
#print(raw_df['Balance'].mean())
print(np.any(np.isnan(raw_df['Balance'])))
#print(raw_df['Loan Term'].mean())
print(np.any(np.isnan(raw_df['Loan Term'])))
#print(raw_df['LTV'].mean())
print(np.any(np.isnan(raw_df['LTV'])))
#print(raw_df['label'].sum())
print(np.any(np.isnan(raw_df['label'])))
print("\n\n")
#print(raw_df['Interest Rate'].mean())
print(np.any(np.isnan(raw_df['Interest Rate'])))
#print(raw_df['Origination Month'].mean())
print(np.any(np.isnan(raw_df['Origination Month'])))
#print(raw_df['Most Recent Credit Score'].mean())
print(np.any(np.isnan(raw_df['Most Recent Credit Score'])))
#print(raw_df['AmountFunded'].mean())
raw_df['AmountFunded'] = raw_df['AmountFunded'].fillna(raw_df['AmountFunded'].mean())
print(np.any(np.isnan(raw_df['AmountFunded'])))
#print(raw_df['MonthlyIncomeBaseSalary'].mean())
raw_df['MonthlyIncomeBaseSalary'] = raw_df['MonthlyIncomeBaseSalary'].fillna(raw_df['MonthlyIncomeBaseSalary'].mean())
print(np.any(np.isnan(raw_df['MonthlyIncomeBaseSalary'])))
#print(raw_df['TotalMonthlyIncome'].mean())
raw_df['TotalMonthlyIncome'] = raw_df['TotalMonthlyIncome'].fillna(raw_df['TotalMonthlyIncome'].mean())
print(np.any(np.isnan(raw_df['TotalMonthlyIncome'])))
#print(raw_df['MonthlyIncomeOther'].mean())
raw_df['MonthlyIncomeOther'] = raw_df['MonthlyIncomeOther'].fillna(raw_df['MonthlyIncomeOther'].mean())
print(np.any(np.isnan(raw_df['MonthlyIncomeOther'])))
#print(raw_df['Collateral Current Valuation'].mean())
print(np.any(np.isnan(raw_df['Collateral Current Valuation'])))
print("\n\n")
#raw_df['Balance'] = raw_df['Balance'].fillna(-99999)
print(np.any(np.isnan(raw_df['Balance'])))
#raw_df['Grade Overall'] = raw_df['Grade Overall'].fillna(-99999)
print(np.any(np.isnan(raw_df['Grade Overall'])))
#raw_df['Current Credit Limit'] = raw_df['Current Credit Limit'].fillna(-99999)
print(np.any(np.isnan(raw_df['Current Credit Limit'])))
#raw_df['Loan Type Code'] = raw_df['Loan Type Code'].fillna(-99999)
print(np.any(np.isnan(raw_df['Loan Type Code'])))
#raw_df['Status'] = raw_df['Status'].fillna(-99999)
print(np.any(np.isnan(raw_df['Status'])))
raw_df['Insurance'] = raw_df['Insurance'].fillna(raw_df['Insurance'].mean())
print(np.any(np.isnan(raw_df['Insurance'])))
raw_df['NumberOfOpenRevolvingAccounts'] = raw_df['NumberOfOpenRevolvingAccounts'].fillna(raw_df['NumberOfOpenRevolvingAccounts'].mean())
print(np.any(np.isnan(raw_df['NumberOfOpenRevolvingAccounts'])))
raw_df['APR'] = raw_df['APR'].fillna(raw_df['APR'].mean())
print(np.any(np.isnan(raw_df['APR'])))
#raw_df['PaymentToIncome'] = raw_df['PaymentToIncome'].fillna(raw_df['PaymentToIncome'].mean())
#print(np.any(np.isnan(raw_df['PaymentToIncome'])))
raw_df['AmountOwedToLender'] = raw_df['AmountOwedToLender'].fillna(raw_df['AmountOwedToLender'].mean())
print(np.any(np.isnan(raw_df['AmountOwedToLender'])))
#raw_df['AGE of BORROWER'] = raw_df['AGE of BORROWER'].fillna(raw_df['AGE of BORROWER'].mean())
#print(np.any(np.isnan(raw_df['AGE of BORROWER'])))
raw_df['LoanPaymentFrequency'] = raw_df['LoanPaymentFrequency'].fillna(raw_df['LoanPaymentFrequency'].mean())
print(np.any(np.isnan(raw_df['LoanPaymentFrequency'])))
raw_df['Rate'] = raw_df['Rate'].fillna(raw_df['Rate'].mean())
print(np.any(np.isnan(raw_df['Rate'])))
#df1 = pd.concat([raw_df['Loan Type Description'], raw_df['Balance'], raw_df['Loan Term'],raw_df['LTV'], raw_df['label']],axis =1)
df1 = raw_df[['Loan Type Description','Balance','Loan Term','Interest Rate','Origination Month','Most Recent Credit Score',
'AmountFunded','MonthlyIncomeBaseSalary', 'TotalMonthlyIncome','MonthlyIncomeOther','Collateral Current Valuation','LTV',
'Balance','Grade Overall','Current Credit Limit','Loan Type Code','Loan Category','Status','Updated Credit Score',
'Original Interest Rate','Grade by Cerdit Score (ORIGINAL)','GRADE BY CREDIT SCORE (UPDATED)','Insurance',
'NumberOfOpenRevolvingAccounts','AmountOwedToLender','APR','LoanPaymentFrequency','Rate','label']]
print(df1.shape)
print(df1.head(4))
#df1 = df1.reset_index()
print(np.any(np.isnan(df1)))
print(np.all(np.isfinite(df1)))
y_CU = raw_df['Probability of Default']
y = df1.label
X = df1.drop("label", axis =1)
print(X.shape)
RANDOM_SEED = 42
LABELS = ["non-delinguent", "delinguent"]
print(df1.shape)
print(df1.isnull().values.any())
print(df1.head(3))
fig11 = plt.figure()
count_classes = pd.value_counts(df1['label'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("delinguency distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency")
plt.show()
fig11.savefig("Class distribution.pdf")
#fig11.savefig("Class distribution.png")
print(df1['label'].value_counts())
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, auc, roc_curve
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
from imblearn.over_sampling import SMOTE
os = SMOTE(random_state=0)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
columns = X_train.columns
os_data_X,os_data_y=os.fit_sample(X_train, y_train)
os_data_X = pd.DataFrame(data=os_data_X,columns=columns )
os_data_y= pd.DataFrame(data=os_data_y,columns=['y'])
# we can Check the numbers of our data
print("length of X data is ",len(X))
print("length of oversampled data is ",len(os_data_X))
print("Number of no delinguent in oversampled data",len(os_data_y[os_data_y['y']==0]))
print("Number of delinguent",len(os_data_y[os_data_y['y']==1]))
print("Proportion of no delinguent data in oversampled data is ",len(os_data_y[os_data_y['y']==0])/len(os_data_X))
print("Proportion of delinguent data in oversampled data is ",len(os_data_y[os_data_y['y']==1])/len(os_data_X))
X_train = os_data_X
y_train = os_data_y
from sklearn.linear_model import LogisticRegression
fig12 = plt.figure(figsize=(15,8))
ax1 = fig12.add_subplot(1,2,1)
ax1.set_xlim([-0.05,1.05])
ax1.set_ylim([-0.05,1.05])
ax1.set_xlabel('Recall')
ax1.set_ylabel('Precision')
ax1.set_title('PR Curve')
ax2 = fig12.add_subplot(1,2,2)
ax2.set_xlim([-0.05,1.05])
ax2.set_ylim([-0.05,1.05])
ax2.set_xlabel('False Positive Rate')
ax2.set_ylabel('True Positive Rate')
ax2.set_title('ROC Curve')
for w,k in zip([1,5,10,20,50,100,10000],'bgrcmykw'):
lr_model = LogisticRegression(class_weight={0:1,1:w})
lr_model.fit(X_train,y_train)
#lr_model.fit(os_data_X,os_data_y)
pred_prob = lr_model.predict_proba(X_test)[:,1]
p,r,_ = precision_recall_curve(y_test,pred_prob)
tpr,fpr,_ = roc_curve(y_test,pred_prob)
ax1.plot(r,p,c=k,label=w)
ax2.plot(tpr,fpr,c=k,label=w)
ax1.legend(loc='lower left')
ax2.legend(loc='lower left')
plt.show()
fig12.savefig("log_reg_weights.pdf")
#fig12.savefig("log_reg_weights.png")
#lr = LogisticRegression(class_weight='balanced')
#lr = LogisticRegression(class_weight={0:1,1:28})
lr = LogisticRegression()
lr = lr.fit(X_train, y_train)
params = np.append(lr.intercept_,lr.coef_)
#params = np.append(lr.coef_)
#print(params)
var1 = np.append("Intercept",X.columns)
print(var1)
#coeff1 = pd.DataFrame({'Variable':var1,'Coeffient':params})
coeff1 = pd.DataFrame({'Coeffient':params, 'Variable':var1})
print(coeff1.shape)
print(coeff1.head(16))
coeff1.to_csv("Model_Coefficients.csv")
lr_predicted = lr.predict(X_test)
confusion = confusion_matrix(y_test, lr_predicted)
print(lr.score(X_test,y_test))
print("Number of mislabeled points out of a total %d points : %d" % (X_test.shape[0],(y_test != lr_predicted).sum()))
print("\n\n")
print(confusion)
y_pred = lr.predict(X_test)
acc = accuracy_score(y_test,y_pred)
prec = precision_score(y_test,y_pred)
rec = recall_score(y_test,y_pred)
f1 = f1_score(y_test, y_pred)
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
auc1 = auc(fpr,tpr)
print("\n\n")
print("Number of mislabeled points out of a total %d points : %d" % (X_test.shape[0],(y_test != y_pred).sum()))
print("\n\n")
print("Logistic accuracy:" ,acc)
print("Logistic precision:" ,prec)
print("Logistic recall:" ,rec)
print("Logistic f1 ratio:" ,f1)
print("Logistic AUC:" ,auc1)
#y_proba_lr = lr.fit(X_train, y_train).predict_proba(X_test)
y_proba_lr = lr.fit(X_train, y_train).predict_proba(X)
print(y_proba_lr[:,1])
from sklearn.model_selection import cross_val_score
# accuracy is the default scoring metric
print('Cross-validation (accuracy)', cross_val_score(lr, X_train, y_train, cv=5))
scores_acc = cross_val_score(lr, X_train, y_train, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores_acc.mean(), scores_acc.std() * 2))
# use AUC as scoring metric
print('Cross-validation (AUC)', cross_val_score(lr, X_train, y_train, cv=5, scoring = 'roc_auc'))
scores_auc = cross_val_score(lr, X_train, y_train, cv=5, scoring = 'roc_auc')
print("AUC: %0.2f (+/- %0.2f)" % (scores_auc.mean(), scores_auc.std() * 2))
# use recall as scoring metric
print('Cross-validation (recall)', cross_val_score(lr, X_train, y_train, cv=5, scoring = 'recall'))
scores_rec = cross_val_score(lr, X_train, y_train, cv=5, scoring = 'recall')
print("Recall: %0.2f (+/- %0.2f)" % (scores_rec.mean(), scores_rec.std() * 2))
print('Cross-validation (precision)', cross_val_score(lr, X_train, y_train, cv=5, scoring = 'precision'))
scores_prec = cross_val_score(lr, X_train, y_train, cv=5, scoring = 'precision')
print("precision: %0.2f (+/- %0.2f)" % (scores_prec.mean(), scores_prec.std() * 2))
import seaborn as sns
#cm = pd.crosstab(y_test, y_pred, rownames = 'True', colnames = 'predicted', margins = False)
cm = confusion_matrix(y_test, lr_predicted)
ax= plt.subplot()
sns.heatmap(cm, annot=True, ax = ax); #annot=True to annotate cells
# labels, title and ticks
ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(['non-delinguent', 'delinguent']); ax.yaxis.set_ticklabels(['non-delinguent', 'delinguent'])
plt.show()
#ax.savefig("confusion_matrix.pdf")
#ax.savefig("confusion_matrix.png")
y_scores_lr = lr.decision_function(X_test)
# ### Precision-recall curves
from sklearn.metrics import precision_recall_curve
precision, recall, thresholds = precision_recall_curve(y_test, y_scores_lr)
closest_zero = np.argmin(np.abs(thresholds))
closest_zero_p = precision[closest_zero]
closest_zero_r = recall[closest_zero]
plt.figure()
plt.xlim([0.0, 1.01])
plt.ylim([0.0, 1.01])
plt.plot(precision, recall, label='Precision-Recall Curve')
plt.plot(closest_zero_p, closest_zero_r, 'o', markersize = 12, fillstyle = 'none', c='r', mew=3)
plt.xlabel('Precision', fontsize=16)
plt.ylabel('Recall', fontsize=16)
plt.axes().set_aspect('equal')
plt.show()
fpr_lr, tpr_lr, _ = roc_curve(y_test, y_scores_lr)
roc_auc_lr = auc(fpr_lr, tpr_lr)
fig13 = plt.figure()
plt.xlim([-0.01, 1.00])
plt.ylim([-0.01, 1.01])
plt.plot(fpr_lr, tpr_lr, lw=3, label='Logistic Reg ROC curve (area = {:0.2f})'.format(roc_auc_lr))
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('ROC curve (delinguency classifier)', fontsize=16)
plt.legend(loc='lower right', fontsize=13)
plt.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--')
plt.axes().set_aspect('equal')
plt.show()
fig13.savefig("ROC_curve_1.pdf")
#fig1.savefig("ROC_curve_1.png")
print(y_proba_lr[:,1])
err = y_CU - y_proba_lr[:,1]
rmse_err = np.sqrt(np.mean(err**2))
print(rmse_err)
prob = y_proba_lr[:,1]
prob2 = pd.DataFrame({'probability':prob})
print(prob2.shape)
print(prob2.head(6))
prob2.to_csv("predicted_probability.csv")
save_classifier = open("log_reg_Credit_Union_PDS_model.pickle", "wb")
pickle.dump(lr, save_classifier)
#cPickle.dump(model, save_classifier)
##dill.dump(model, save_classifier)
save_classifier.close()
print("hoora!")
#classifier_f = open("log_reg_Credit_Union_PDS_model.pickle","rb")
#model = pickle.load(classifier_f)
#classifier_f.close()
#https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py
#https://towardsdatascience.com/building-a-logistic-regression-in-python-step-by-step-becd4d56c9c8
#https://github.com/susanli2016/Machine-Learning-with-Python/blob/master/Logistic%20Regression%20balanced.ipynb
y_score = lr.decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
fig14 =plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC) curve')
plt.legend(loc="lower right")
plt.show()
fig14.savefig("ROC_curve_2.pdf")
#fig.savefig("ROC_curve_2.png")
#++++++++++++++++++++++++++++++++++++++++ LGD +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Load modules and data
import statsmodels.api as sm
# Instantiate a gamma family model with the default link function.
gamma_model = sm.GLM(y_train, X_train, family=sm.families.Gamma())
gamma_results = gamma_model.fit()
print(gamma_results.summary())
| StarcoderdataPython |
9697003 | <filename>Secao5_EstruturaLog&Cond/Exercicios/Exerc.1.py
"""
Faça um programa que receba dois numeros e mostre qual deles é maior.
"""
num = input('Digite o 1° numero: ')
num2 = input('Digite o 2° numero: ')
if num < num2:
print(f'{num2} é o número maior!')
else:
print(f'{num} é o maior número!')
| StarcoderdataPython |
8090682 | <filename>PyGame/Player/src/handler.py
import pygame
class Handler:
def __init__(self, player, buttons):
self.player = player
self.buttons = buttons
def mouse_events(self, event):
if event.button == 1:
for button in self.buttons:
button.try_action(event.pos, self.player)
if event.button == 4:
self.player.change_volume(0.03)
elif event.button == 5:
self.player.change_volume(-0.03)
def keyboard_events(self, event):
if event.key == pygame.K_SPACE:
self.player.change_pause()
if event.key == pygame.K_m:
self.player.change_mute()
if event.key == pygame.K_UP:
self.player.change_volume(0.05)
elif event.key == pygame.K_DOWN:
self.player.change_volume(-0.05)
if event.key == pygame.K_RIGHT:
self.player.change_position(5)
elif event.key == pygame.K_LEFT:
self.player.change_position(-5)
| StarcoderdataPython |
6625313 | <reponame>GrapeBaBa/ibis
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.tests.util import assert_equal, assert_pickle_roundtrip
def test_ifelse(table):
bools = table.g.isnull()
result = bools.ifelse("foo", "bar")
assert isinstance(result, ir.StringColumn)
def test_simple_case_expr(table):
case1, result1 = "foo", table.a
case2, result2 = "bar", table.c
default_result = table.b
expr1 = table.g.lower().cases(
[(case1, result1), (case2, result2)], default=default_result
)
expr2 = (
table.g.lower()
.case()
.when(case1, result1)
.when(case2, result2)
.else_(default_result)
.end()
)
assert_equal(expr1, expr2)
assert isinstance(expr1, ir.IntegerColumn)
def test_multiple_case_expr(table):
case1 = table.a == 5
case2 = table.b == 128
case3 = table.c == 1000
result1 = table.f
result2 = table.b * 2
result3 = table.e
default = table.d
expr = (
ibis.case()
.when(case1, result1)
.when(case2, result2)
.when(case3, result3)
.else_(default)
.end()
)
op = expr.op()
assert isinstance(expr, ir.FloatingColumn)
assert isinstance(op, ops.SearchedCase)
assert op.default is default
def test_pickle_multiple_case_node(table):
case1 = table.a == 5
case2 = table.b == 128
case3 = table.c == 1000
result1 = table.f
result2 = table.b * 2
result3 = table.e
default = table.d
expr = (
ibis.case()
.when(case1, result1)
.when(case2, result2)
.when(case3, result3)
.else_(default)
.end()
)
op = expr.op()
assert_pickle_roundtrip(op)
def test_simple_case_null_else(table):
expr = table.g.case().when("foo", "bar").end()
op = expr.op()
assert isinstance(expr, ir.StringColumn)
assert isinstance(op.default, ir.ValueExpr)
assert isinstance(op.default.op(), ops.Cast)
assert op.default.op().to == dt.string
def test_multiple_case_null_else(table):
expr = ibis.case().when(table.g == "foo", "bar").end()
op = expr.op()
assert isinstance(expr, ir.StringColumn)
assert isinstance(op.default, ir.ValueExpr)
assert isinstance(op.default.op(), ops.Cast)
assert op.default.op().to == dt.string
def test_case_mixed_type():
t0 = ibis.table(
[('one', 'string'), ('two', 'double'), ('three', 'int32')],
name='my_data',
)
expr = (
t0.three.case()
.when(0, 'low')
.when(1, 'high')
.else_('null')
.end()
.name('label')
)
result = t0[expr]
assert result['label'].type().equals(dt.string)
| StarcoderdataPython |
4855404 | <filename>src/eddington_matplotlib/data.py
"""Plot fitting data."""
from eddington import FitData
from eddington_matplotlib.plot_configuration import PlotConfiguration
from eddington_matplotlib.util import (
get_figure,
errorbar,
)
def plot_data(data: FitData, plot_configuration: PlotConfiguration):
"""
Plot fitting data.
:param data: Fitting data
:param plot_configuration: Plot configuration
"""
fig = get_figure(
title_name=plot_configuration.data_title, plot_configuration=plot_configuration
)
errorbar(fig=fig, x=data.x, y=data.y, xerr=data.xerr, yerr=data.yerr)
return fig
| StarcoderdataPython |
12848071 | # pylint: skip-file
import random
import string
from .common import * # noqa
# we don't use user sessions, so it doesn't matter if we recreate the secret key on each startup
SECRET_KEY = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
# disable databases for the worker
DATABASES = {}
INSTALLED_APPS += (
# sentry
'raven.contrib.django.raven_compat',
)
# SENTRY
SENTRY_DSN = env.str('SENTRY_DSN', default=None)
if SENTRY_DSN:
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
'release': env.str('SENTRY_RELEASE', default=''),
}
| StarcoderdataPython |
9650835 | <gh_stars>1-10
import warnings
import unittest
import rupee.engine
class _Base(object):
cache = None
data = {
'int': 123,
'string': 'foo',
'list': [1, 2, 3],
'dict': {'foo': 'bar', 'baz': 5}
}
def setUp(self):
self.cache.delete_all_data()
warnings.simplefilter('ignore', ResourceWarning)
def test_single(self):
for key, value in self.data.items():
self.assertEqual(self.cache.get(key), None)
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), value)
self.cache.delete(key)
self.assertEqual(self.cache.get(key), None)
def test_multi(self):
keys = self.data.keys()
for key, value in self.data.items():
self.cache.delete(key)
self.cache.set_multi(self.data)
self.assertEqual(self.cache.get_multi(keys), self.data)
self.cache.delete_multi(keys)
deleted = self.cache.get_multi(keys)
for key, value in deleted.items():
self.assertEqual(value, None)
class TestMemcached(_Base, unittest.TestCase):
cache = rupee.engine.Memcached([('localhost:11211')], prefix='test')
class TestMemory(_Base, unittest.TestCase):
cache = rupee.engine.Memory()
class TestRedis(_Base, unittest.TestCase):
cache = rupee.engine.Redis(prefix='test')
| StarcoderdataPython |
6430965 | #!/usr/bin/env python
import pika
from pika import spec
import sys
import subprocess
import requests
import json
def get_node_ip(node_name):
bash_command = "bash ../cluster/get-node-ip.sh " + node_name
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
ip = output.decode('ascii').replace('\n', '')
return ip
def put_ha_policy(mgmt_node_ip):
r = requests.put('http://' + mgmt_node_ip + ':15672/api/policies/%2F/ha-queues',
data = "{\"pattern\":\"\", \"definition\": {\"ha-mode\":\"exactly\", \"ha-params\": " + rep_factor + " }, \"priority\":0, \"apply-to\": \"queues\"}",
auth=("jack","jack"))
print(f"Create policy response: {r}")
queue_name = sys.argv[1]
rep_factor = sys.argv[2]
purge = sys.argv[3]
node_ip = get_node_ip("rabbitmq1")
put_ha_policy(node_ip)
credentials = pika.PlainCredentials('jack', '<PASSWORD>')
parameters = pika.ConnectionParameters(node_ip,
5672,
'/',
credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=True, arguments={"x-queue-mode": "lazy"})
print(f"Declared queue {queue_name}")
if purge == "true":
channel.queue_purge(queue_name)
print(f"Purged queue {queue_name}")
channel.close()
connection.close()
| StarcoderdataPython |
364809 | <filename>apps/infra_gateway/functions.py
import random
import string
import requests
from django.conf import settings
from apps.challenge.models import Match, Map
def random_token():
chars = string.ascii_letters + string.digits
return ''.join((random.choice(chars)) for i in range(15))
def upload_code(submission):
"""
This function uploads a code file to infrastructure synchronously
:param submission: File field from TeamSubmission model
:return: file token or raises error with error message
"""
print("ommad upload kone", submission.file.size)
response = requests.post(
settings.GATEWAY_HOST + "/upload/code",
data={'language': submission.language},
files={'file': submission.file},
headers={'Authorization': f'{settings.GATEWAY_AUTH_TOKEN}'}
)
print(response.status_code, response.json(), "==== Upload Code ====")
return response.json()['code_id']
def upload_map(file):
print("ommad upload kone", file.size)
response = requests.post(
settings.GATEWAY_HOST + "/upload/map",
files={'file': file},
headers={'Authorization': f'{settings.GATEWAY_AUTH_TOKEN}'}
)
print(response.status_code, response.json(), "==== Upload Map ====")
return response.json()['map_id']
def upload_file_with_url(file):
"""
This function uploads a file to infrastructure synchronously
Site will be as server and infrastructure will download it
with the url came from site
:param file: File field from TeamSubmission model
:return: file token or raises error with error message
"""
pass
def download_file(file_token):
"""
Downloads file from infrastructure synchronously
:param file_token: the file token obtained already from infra.
:return: sth that TeamSubmission file field can be assigned to
"""
pass
def compile_submissions(submissions):
"""
Tell the infrastructure to compile a list of submissions
:return: list of dictionaries each have token, success[, errors] keys
"""
pass
def run_match(match: Match, priority=0):
response = requests.post(
settings.GATEWAY_HOST + "/game/register",
data={
'map_id': match.match_info.map.infra_token,
'player_ids': [
match.match_info.team1_code.infra_token, # in game id: 0
match.match_info.team2_code.infra_token # in game id: 1
],
},
params={'priority': priority},
headers={'Authorization': f'{settings.GATEWAY_AUTH_TOKEN}'}
)
print(response.status_code, response.json(), "==== Run Game ====")
return response.json()['game_id']
def run_games(single_games, desired_map):
"""
Tell the infrastructure to run a list of single_matches (single_match includes tokens,maps,...)
:param desired_map:
:param single_games:
:return: Returns the list of tokens and success status and errors assigned to the matches
"""
pass
def download_code(file_infra_token):
response = requests.get(
settings.GATEWAY_HOST + "/download/code",
params={
'code_id': file_infra_token
},
headers={'Authorization': f'{settings.GATEWAY_AUTH_TOKEN}'}
)
print(response.status_code, response.json(), "==== Download File ====")
return response.json()['code']
def download_log(match_infra_token, file_infra_token=None):
params = {
'game_id': match_infra_token
}
if file_infra_token:
params['player_id'] = file_infra_token
response = requests.get(
settings.GATEWAY_HOST + "/download/log",
params=params,
headers={'Authorization': f'{settings.GATEWAY_AUTH_TOKEN}'}
)
print(response.status_code, response.json(), "==== Download File ====")
return response.json()['log']
| StarcoderdataPython |
4847283 | <filename>wtypes/python_types.py
import sys
import typing
import wtypes
class _NoType:
...
class _ForwardSchema(wtypes.base._ContextMeta):
"""A forward reference to an object, the object must exist in sys.modules.
Notes
-----
Python types live on the __annotations__ attribute.
"""
_type_args = None
_type_kwargs = None
def __new__(cls, name, base, kwargs, **schema):
if "args" in schema:
kwargs.update({"_type_args": schema.pop("args")})
if "keywords" in schema:
kwargs.update({"_type_kwargs": schema.pop("keywords")})
cls = super().__new__(cls, name, base, kwargs, **schema)
cls._merge_args()
return cls
def __getitem__(cls, object):
if not isinstance(object, tuple):
object = (object,)
schema = []
for object in object:
if isinstance(object, str):
schema.append(typing.ForwardRef(object))
else:
schema.append(object)
cls = cls.create(cls.__name__, py=typing.Union[tuple(schema)])
return cls
def validate(cls, object):
cls.eval()
def eval(cls):
t = typing.Union[cls._type]
t = t.__args__[0] if isinstance(t, typing._GenericAlias) else t
if isinstance(t, typing.ForwardRef):
return t._evaluate(sys.modules, sys.modules)
return t
def __add__(cls, object):
# Cycle through dicts and lists
if isinstance(object, dict):
return type(cls.__name__ + object.__name__, (cls,), dict(), **object)
if isinstance(object, Forward):
return type(cls.__name__ + object.__name__, (cls, object), dict(),)
return super().__add__(object)
class _ArgumentSchema(_ForwardSchema):
def __getitem__(cls, object):
if not isinstance(object, dict) and not isinstance(object, tuple):
object = (object,)
return type(
cls.__name__, (cls,), {}, **{wtypes.base._lower_key(cls.__name__): object}
)
class Args(
_NoType, wtypes.base._NoInit, wtypes.base._NoTitle, metaclass=_ArgumentSchema
):
...
class Kwargs(
_NoType, wtypes.base._NoInit, wtypes.base._NoTitle, metaclass=_ArgumentSchema
):
...
class Forward(metaclass=_ForwardSchema):
"""Create type using objects or forward references.
Examples
--------
>>> assert Forward['builtins.range']() is range
"""
def __new__(cls):
return cls.eval()
class Class(Forward):
"""Create type using objects or forward references.
Examples
--------
>>> assert isinstance(range, Class['builtins.range'])
"""
def __new__(cls):
object = super().__new__()
if isinstance(object, tuple):
object = object[0]
return object
@classmethod
def validate(cls, object):
try:
if issubclass(object, cls.eval()):
return
except:
...
raise wtypes.ValidationError(f"{object} is not a type of {cls._schema}.")
class Instance(Forward):
"""Create an instance of a type using objects or forward references.
Examples
--------
>>> assert (Instance[range] + Args[10, 20])() == range(10, 20)
>>> assert (Instance['builtins.range'] + Args[10, 20])() == range(10, 20)
>>> assert isinstance(range(10), Instance['builtins.range'])
Deffered references.
>>> assert not isinstance(1, Instance['pandas.DataFrame'])
>>> assert 'pandas' not in __import__('sys').modules
"""
def __new__(cls, *args, **kwargs):
args = tuple(cls._type_args or tuple()) + args
kwargs = {**(cls._type_kwargs or dict()), **kwargs}
return cls.eval()(*args, **kwargs)
@classmethod
def validate(cls, object):
wtypes.validate_generic(object, cls.eval())
| StarcoderdataPython |
6501304 | <filename>tf-mnist/mnist_model.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import sys
import numpy as np
import tensorflow as tf
import shutil
from tensorflow import keras
import pandas as pd
import copy
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants, signature_def_utils_impl
# Configure model options
TF_DATA_DIR = os.getenv("TF_DATA_DIR", "/tmp/data/")
TF_MODEL_DIR = os.getenv("TF_MODEL_DIR", None)
TF_EXPORT_DIR = os.getenv("TF_EXPORT_DIR", "./mnist/")
def main(unused_args):
tf.logging.set_verbosity(tf.logging.INFO)
if not os.path.exists(TF_DATA_DIR):
os.mkdir(TF_DATA_DIR)
data = pd.read_csv("/opt/data_csv_filtered1.csv")
data = data.fillna(0)
data.drop(['egQDropPkts', 'bufferDrop'], axis=1)
column_names = list(data.head(0))
unique_streams = set()
for val in data['srcIp']:
unique_streams.add(val)
unique_ts = set()
for val in data['timestamp']:
unique_ts.add(val)
final_result = list(dict())
vals_to_transpose = ('byteCount', 'pktCount', 'egQOcc')
malicious_stm = ('mStream10', 'mStream20', 'mStream30')
while len(unique_ts):
temp_unique_streams = copy.deepcopy(unique_streams)
temp_dict = dict()
curr_ts = unique_ts.pop()
temp_dict['timestamp'] = curr_ts
test = data[data.timestamp == curr_ts]
m_stream10 = list(test[malicious_stm[0]])
m_stream20 = list(test[malicious_stm[1]])
m_stream30 = list(test[malicious_stm[2]])
for val in test[test.columns[1:7]].iterrows():
temp = val[1]
for each in vals_to_transpose:
new_index = temp['srcIp'] + "_" + each
temp_dict[new_index] = temp[each]
temp_unique_streams.remove(temp['srcIp'])
for strm in temp_unique_streams:
for each in vals_to_transpose:
new_index = strm + "_" + each
temp_dict[new_index] = 0
for strms in unique_streams:
temp_dict[strms] = 0
if 1.0 in m_stream10:
temp_dict['mStream10'] = 1
else:
temp_dict['mStream10'] = 0
if 1.0 in m_stream20:
temp_dict['mStream20'] = 1
else:
temp_dict['mStream20'] = 0
if 1.0 in m_stream30:
temp_dict['mStream30'] = 1
else:
temp_dict['mStream30'] = 0
final_result.append(temp_dict)
final_data = pd.DataFrame(final_result)
label_columns = list((data.loc[:, 'mStream10':'stream9']).head(0))
label_columns
final_data[:10]
feature_columns = list(final_data.head(0))
for col in label_columns:
if col in feature_columns:
feature_columns.remove(col)
feature_data = final_data.drop(['timestamp'], axis=1)
feature_columns.remove('timestamp')
feature_data[0:2]
label_data = final_data.drop(feature_columns, axis=1)
label_data = label_data.drop(['timestamp'], axis=1)
label_data[0:2]
#Normalize the byte and packet counts to get so that all features are in the scale 0 to 1
normalized_data = final_data[:]
normalized_data = normalized_data.drop(['timestamp'], axis=1)
for col in feature_columns:
if "bytecount" in col.lower():
normalized_data[col] = normalized_data[col]/(10*(10**9))
elif "pktcount" in col.lower():
normalized_data[col] = normalized_data[col]/820209
elif "egqocc" in col.lower():
normalized_data[col] = normalized_data[col]/100
normalized_data[:10]
# Splitting the data into Training and Testing
# In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
sample = np.random.choice(normalized_data.index, size=int(len(normalized_data)*0.9), replace=False)
train_data, test_data = normalized_data.iloc[sample], normalized_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
print(train_data[:10])
print(test_data[:10])
# Splitting the data into features and targets (labels)
# Now, as a final step before the training, we'll split the data into features (X) and targets (y).
# Separate data and one-hot encode the output
# Note: We're also turning the data into numpy arrays, in order to train the model in Keras
features = np.array(train_data.drop(label_columns, axis=1))
targets = np.array(train_data.drop(feature_columns, axis=1))
features_test = np.array(test_data.drop(label_columns, axis=1))
targets_test = np.array(test_data.drop(feature_columns, axis=1))
print(features[:2])
print(targets[:2])
# Building the model
model = keras.Sequential()
model.add(keras.layers.Dense(256, activation='sigmoid', input_shape=(177,)))
model.add(keras.layers.Dropout(.2))
model.add(keras.layers.Dense(128, activation='sigmoid'))
model.add(keras.layers.Dropout(.2))
model.add(keras.layers.Dense(64, activation='sigmoid'))
model.add(keras.layers.Dropout(.1))
model.add(keras.layers.Dense(59, activation='sigmoid'))
# Compiling the model
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0),
metrics=['categorical_accuracy'])
model.summary()
# Training the model
history = model.fit(features, targets, epochs=25, batch_size=100, verbose=2)
# Scoring the model
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Loss:", score[0])
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Loss:", score[0])
print("\n Testing Accuracy:", score[1])
print("Model.input - ", model.input)
print("Model.output - ", model.output)
tf.saved_model.simple_save(keras.backend.get_session(),
TF_EXPORT_DIR + "/" + str(int(time.time())),
inputs={'data': model.input},
outputs={t.name:t for t in model.outputs})
if __name__ == '__main__':
tf.app.run()
| StarcoderdataPython |
4915662 | <filename>pygears/typing/number.py
from .base import GenericMeta
from abc import ABCMeta, abstractmethod
class NumberType(ABCMeta, GenericMeta):
@property
@abstractmethod
def signed(self) -> bool:
...
class Number(metaclass=NumberType):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
# class Number(int, metaclass=NumberType):
# pass
| StarcoderdataPython |
9697154 | import bpy
from bpy.props import *
from ...nodes.BASE.node_base import RenderNodeBase
# from ...utility import source_attr
from mathutils import Color, Vector
def update_node(self, context):
self.execute_tree()
class RenderNodeMaterialInput(RenderNodeBase):
bl_idname = 'RenderNodeMaterialInput'
bl_label = 'Material Input'
default_value: PointerProperty(type=bpy.types.Material, update=update_node)
def init(self, context):
self.create_output('RenderNodeSocketMaterial', 'output', "Output")
def draw_buttons(self, context, layout):
layout.scale_y = 0.75
layout.scale_x = 0.75
layout.template_ID_preview(
self, "default_value",
rows=3, cols=4, hide_buttons=False)
def process(self,context,id,path):
self.outputs[0].set_value(self.default_value)
def register():
bpy.utils.register_class(RenderNodeMaterialInput)
def unregister():
bpy.utils.unregister_class(RenderNodeMaterialInput)
| StarcoderdataPython |
326876 | # Generated by Django 3.0.3 on 2020-05-09 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('techbeauty', '0005_addproduct'),
]
operations = [
migrations.CreateModel(
name='AddService',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('categories', models.CharField(max_length=50)),
('name', models.CharField(max_length=50)),
('price', models.CharField(max_length=50)),
('image', models.URLField()),
],
),
migrations.DeleteModel(
name='Product',
),
]
| StarcoderdataPython |
11291991 | <gh_stars>0
#for non-overlapping substring you can simply count the number of occurances using:
#string.count(substring, start(optional), end(optional)
#for overlapping substring:
def overlapCount(string, sub_string):
count = 0
string_len = len(string)
sub_string_len = len(sub_string)
for i in range(string_len):
if string[i: i+sub_string_len] == sub_string:
count += 1
return count
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.