text stringlengths 957 885k |
|---|
# import the necessary packages
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import imutils
import cv2
import os
from playsound import playsound
import threading
from datetime import datetime
import mysql.connector
from time import gmtime, strftime
from ftplib import FTP
#DB Config
host = "localhost"
username = "root"
password = ""
database_name = "facemask"
mydb = mysql.connector.connect(
host="{}".format(host),
user="{}".format(username),
passwd="{}".<PASSWORD>(password),
database="{}".format(database_name)
)
#FTP Config
ftp = FTP("localhost")
ftp.login(user='fmd_user',passwd='<PASSWORD>')
def greeting_function():
currentHour = int(datetime.today().strftime('%H'))
basicGreeting = ""
if currentHour >= 0 and currentHour < 12:
basicGreeting = "morning"
if currentHour >= 12 and currentHour < 18:
basicGreeting = "afternoon"
if currentHour >= 18 and currentHour != 0:
basicGreeting = "evening"
return basicGreeting
def get_Center(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
return cx, cy
ROI = 300
offset = 8
global_current_date = datetime.today().strftime('%Y-%m-%d')
lang = ""
SelectDataCursor = mydb.cursor()
SelectDataCursor.execute("SELECT lang FROM language")
collecttedData = SelectDataCursor.fetchall()
for data in collecttedData:
lang = data[0]
without_mask_sound = ""
if lang == "tamil":
without_mask_sound = "sound-effect\\without_mask\\tamil.mp3"
elif lang == "sinhala":
without_mask_sound = "sound-effect\\without_mask\\sinhala.mp3"
elif lang == "english":
without_mask_sound = "sound-effect\\without_mask\\english.mp3"
is_sound_playing = False
def soundPlay(sound_path):
global is_sound_playing
is_sound_playing = True
playsound(sound_path)
is_sound_playing = False
def without_mask_detected(frame):
global is_sound_playing, without_mask_sound
cv2.imwrite("cache-img.jpg",frame)
current_time = datetime.today().strftime('%I:%M:%S %p')
file_name = "{}_{}.jpg".format(global_current_date,current_time.replace(":","-"))
ftp.storbinary('STOR '+"{}".format(file_name), open("cache-img.jpg", 'rb'))
sqlCode = "INSERT INTO data (auto_id, _date, _time, image_path) VALUES (%s, %s, %s, %s)"
values = ("", global_current_date,current_time,file_name)
insertCursor = mydb.cursor()
insertCursor.execute(sqlCode,values)
mydb.commit()
if is_sound_playing == False:
soundPlay(without_mask_sound)
def with_mask_detected():
global is_sound_playing
if is_sound_playing == False:
greeting = greeting_function()
if greeting == "morning":
if lang == "tamil":
soundPlay("sound-effect\\with_mask\\tamil_morning.mp3")
elif lang == "sinhala":
soundPlay("sound-effect\\with_mask\\sinhala_morning.mp3")
elif lang == "english":
soundPlay("sound-effect\\with_mask\\english_morning.mp3")
elif greeting == "afternoon":
if lang == "tamil":
soundPlay("sound-effect\\with_mask\\tamil_afternoon.mp3")
elif lang == "sinhala":
soundPlay("sound-effect\\with_mask\\sinhala_afternoon.mp3")
elif lang == "english":
soundPlay("sound-effect\\with_mask\\english_afternoon.mp3")
elif greeting == "evening":
if lang == "tamil":
soundPlay("sound-effect\\with_mask\\tamil_evening.mp3")
elif lang == "sinhala":
soundPlay("sound-effect\\with_mask\\sinhala_evening.mp3")
elif lang == "english":
soundPlay("sound-effect\\with_mask\\english_evening.mp3")
def detect_and_predict_mask(frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
#print(detections.shape)
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence for face detections
if confidence > 0.3:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
# load our serialized face detector model from disk
prototxtPath = r"face-detector\deploy.prototxt"
weightsPath = r"face-detector\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
maskNet = load_model("mask-detector.model")
# initialize the video stream
print("[INFO] starting video stream...")
#vs = VideoStream(src=0).start()
vs = cv2.VideoCapture("video-1.mp4")
width_and_hieght = 850
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
ret,frame = vs.read()
frame = imutils.resize(frame, width = width_and_hieght)
copyFrame = frame.copy()
cv2.line(frame, (0 , ROI), (1200 , ROI), (0,255,255), 4) # Line
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(xmin, ymin, xmax, ymax) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
if mask > withoutMask:
label = "Mask"
color = (0, 255, 0)
else:
label = "No Mask"
color = (0, 0, 255)
#label = "Mask" if mask > withoutMask else "No Mask"
#color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
#label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (xmin, ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX, 2, color, 2)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
mid_point = get_Center(int(x), int(y), int(w),int(h))
cv2.circle(frame, (mid_point[0], mid_point[1]), 6, color, -1)
if mid_point[1] < (ROI + offset) and mid_point[1] > (ROI - offset):
if label == "Mask":
cv2.line(frame, (0 , ROI), (1200 , ROI), (0, 255, 0), 4)
withMask_threading = threading.Thread(target = with_mask_detected)
withMask_threading.start()
elif label == "No Mask":
cv2.line(frame, (0 , ROI), (1200 , ROI), (0, 0, 255), 4)
withoutMask_threading = threading.Thread(target = without_mask_detected, args=(copyFrame,))
withoutMask_threading.start()
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
ftp.quit() |
<reponame>kgarg8/cs559-uic-neural-networks
# Q-learning implementation for 5*5 grid
import numpy as np, matplotlib.pyplot as plt, random, pdb
from tqdm import tqdm
#### Environment ####
# HO 21 22 23 24
# 15 16 17 18 19
# 10 11 12 13 14
# 5 6 7 8 9
# I 1 2 3 GM
# GM: Gold Mine (4)
# HO: Home (20)
# I: Initial Pos (0)
######################
# environment settings
GRID_SIZE = 5
ACTIONS = 4 # R, L, U, D
GOLD_MAX = 3 # maximum gold value
STATES = GRID_SIZE*GRID_SIZE
GOLD_MINE = GRID_SIZE-1 # location of gold mine
HOME = GRID_SIZE*(GRID_SIZE-1) # location of home
# hyperparameters
num_episodes = 100
train_timesteps = 5000
test_timesteps = 500
alpha = 0.10 # learning rate
gamma = 0.9 # discount factor
prob = 0.1 # probability for exploration
# seed
seed = 112
np.random.seed(seed)
random.seed(seed)
# global parameters
Q_table = np.random.normal(size=(STATES, GOLD_MAX+1, ACTIONS)) # Initialize with Gaussian Distribution values
action_map = {0:'R', 1:'L', 2:'U', 3:'D'} # Actions
# returns (reward, next_state, new_gold_val)
def reward(state, action, gold):
# next step is GOLD MINE
if (state==GOLD_MINE-1 and action==0) or (state==GOLD_MINE+GRID_SIZE and action==3):
if gold < GOLD_MAX:
gold = gold + 1
return (0, state, gold) # zero reward, stay in the same state
# next step is HOME
elif (state==HOME+1 and action==1) or (state==HOME-GRID_SIZE and action==2):
ret = (gold, state, 0) # reward equals gold and gold gets unloaded
gold = 0
return ret # non-zero reward, stay in the same state
# RIGHT
elif action == 0:
if state%GRID_SIZE == GRID_SIZE-1:
return (0, state, gold) # no way to go
else:
return (0, state+1, gold)
# LEFT
elif action == 1:
if state%GRID_SIZE == 0:
return (0, state, gold) # no way to go
else:
return (0, state-1, gold)
# UP
elif action == 2:
if state//GRID_SIZE == GRID_SIZE-1:
return (0, state, gold) # no way to go
else:
return (0, state+GRID_SIZE, gold)
# DOWN
else:
if state//GRID_SIZE == 0:
return (0, state, gold) # no way to go
else:
return (0, state-GRID_SIZE, gold)
def plot(rewards, label):
if label == 'Test timesteps':
arr = [i for i in range(test_timesteps)]
else: # 'Episodes'
arr = [i for i in range(num_episodes)]
plt.figure()
plt.xlabel(label)
plt.ylabel('Cumulative Reward')
plt.plot(arr, rewards)
plt.title('{} vs. Cumulative Reward'.format(label))
plt.savefig('{} vs. Cumulative Reward_gamma_{}.png'.format(label, gamma))
def episodic_test(episode):
x = 0; gold = 0; cum_reward = 0 # initial state
for t in range(test_timesteps):
a = np.argmax(Q_table[(x, gold)])
r, y, gold = reward(x, a, gold)
x = y
cum_reward += r*(gamma**t)
print('Episode: {}, Cumulative Reward: {}'.format(episode, cum_reward))
return cum_reward
def train():
rewards = []
for e in range(num_episodes):
x = 0; gold = 0 # initial state
for t in range(train_timesteps):
a = random.random()
if a < prob:
a = random.randint(0, 3) # exploration
else:
a = np.argmax(Q_table[(x, gold)]) # exploitation
prev_state = (x, gold)
r, y, gold = reward(x, a, gold)
a_Qmax = np.argmax(Q_table[(y, gold)])
Q_table[prev_state][a] = (1-alpha)*Q_table[prev_state][a] + alpha*(r + gamma*Q_table[(y, gold)][a_Qmax])
# logging
# print('Cur state: {}, Action: {}, New state: {}, Reward: {}, Gold: {}'.format(x, action_map[a], y, r, gold))
# if r != 0: print(t)
x = y # save new state
episodic_reward = episodic_test(e)
rewards.append(episodic_reward)
plot(rewards, 'Episodes')
def test():
x = 0; gold = 0; cum_reward = 0 # initial state
rewards = []
for t in range(test_timesteps):
a = np.argmax(Q_table[(x, gold)])
r, y, gold = reward(x, a, gold)
# logging
print('Timestep: {}, Cur state: {}, Action: {}, New state: {}, Reward: {}, Gold: {}'.format(t+1, x, action_map[a], y, r, gold))
# if r != 0: print(t)
x = y
cum_reward += r*(gamma**t)
rewards.append(cum_reward)
plot(rewards, 'Test timesteps')
train()
test() |
import itertools
from collections import Counter
the_code = "18 5 22 25 15 5 17 13 18 19 23 25 15 19 23 12 13 24 19 3 19 17 13 24 9 23 5 23 12 13 15 25 17 19 22 13 5 17 9 17 19 10 25 22 5 18 25 15 5 15 13 17 13 1 19 24 19 8 19 17 9 17 25"
dictionary_file_path = r"C:\Users\slawo\Downloads\sjp-20200205\slowa.txt"
# read dictionary file with all possible words
print ("read dictionary")
dictionary_file = open(dictionary_file_path, "r")
dictionary = dictionary_file.read()
words_in_dictionary = dictionary.replace("\n"," ").split(" ")
# initial parser of the code
print ("initialize parser")
words = [str(x).strip() for x in the_code.split(" ") if len(str(x).strip()) > 1 ]
words_in_code = dict()
for word in words:
words_in_code[word] = dict()
words_in_code[word]["text"] = word
words_in_code[word]["length"] = len(word.split(' '))
words_in_code[word]["words_a_len"] = len([x for x in words_in_dictionary if len(x) == words_in_code[word]["length"]])
words_in_code[word]["words_a"] = [x for x in words_in_dictionary if len(x) == words_in_code[word]["length"]]
# 1 - simple, check repeating characters in specific places and check matching words
print ("simple check / occurences")
for key, value in words_in_code.items():
tmp_list_of_possible_words = list(value["words_a"])
new_tmp_list_of_possible_words = []
current_word = value["text"]
list_of_occurences = Counter(current_word.split(" "))
list_of_repeating = [k for k, v in list_of_occurences.items() if int(v) > 1]
if len(list_of_repeating) > 0:
iteration = 1
for code in list_of_repeating:
indexes = [i for i,n in enumerate(current_word.split(" ")) if n == code]
for possible_word in tmp_list_of_possible_words:
temp_characters = []
for index in indexes:
temp_characters.append(possible_word[index])
if len(set(temp_characters)) == 1:
new_tmp_list_of_possible_words.append(possible_word)
tmp_list_of_possible_words = new_tmp_list_of_possible_words
if iteration != len(list_of_repeating):
new_tmp_list_of_possible_words = []
iteration = iteration + 1
words_in_code[current_word]["words_b_len"] = len(new_tmp_list_of_possible_words)
words_in_code[current_word]["words_b"] = new_tmp_list_of_possible_words
else:
words_in_code[current_word]["words_b_len"] = len(tmp_list_of_possible_words)
words_in_code[current_word]["words_b"] = tmp_list_of_possible_words
# generate all possible pairs of words
print ("generate combinations")
all_words_combinations = list(set(itertools.combinations(words, 2)))
# # crack...
print ("crack")
iteration_combination = 1
for combination in all_words_combinations:
print("crack combination " + str(iteration_combination) + " of " + str(len(all_words_combinations)) + " >> " + str(combination))
words_in_code[combination[0]]["words_c_len"] = words_in_code[combination[0]]["words_b_len"]
words_in_code[combination[0]]["words_c"] = words_in_code[combination[0]]["words_b"]
words_in_code[combination[1]]["words_c_len"] = words_in_code[combination[1]]["words_b_len"]
words_in_code[combination[1]]["words_c"] = words_in_code[combination[1]]["words_b"]
list_of_combination_a = str(combination[0]).split(" ")
list_of_combination_b = str(combination[1]).split(" ")
common_codes = list(set(list_of_combination_a).intersection(list_of_combination_b))
tmp_list_of_possible_words_a = words_in_code[combination[0]]["words_c"]
new_tmp_list_of_possible_words_a = []
tmp_list_of_possible_words_b = words_in_code[combination[1]]["words_c"]
new_tmp_list_of_possible_words_b = []
if len(common_codes) > 0:
for common_code in common_codes:
indexes_of_a = [i for i,n in enumerate(list_of_combination_a) if n == common_code]
indexes_of_b = [i for i,n in enumerate(list_of_combination_b) if n == common_code]
iteration = 1
for possible_word_a in tmp_list_of_possible_words_a:
for possible_word_b in tmp_list_of_possible_words_b:
current_values_in_given_indexes_for_possible_word_a = [n for i,n in enumerate(possible_word_a) if i in indexes_of_a]
current_values_in_given_indexes_for_possible_word_b = [n for i,n in enumerate(possible_word_b) if i in indexes_of_b]
all_current_values_from_possible_a_and_b = current_values_in_given_indexes_for_possible_word_a + current_values_in_given_indexes_for_possible_word_b
if len(set(all_current_values_from_possible_a_and_b)) == 1:
if possible_word_a not in new_tmp_list_of_possible_words_a:
new_tmp_list_of_possible_words_a.append(possible_word_a)
if possible_word_b not in new_tmp_list_of_possible_words_b:
new_tmp_list_of_possible_words_b.append(possible_word_b)
tmp_list_of_possible_words_a = new_tmp_list_of_possible_words_a
tmp_list_of_possible_words_b = new_tmp_list_of_possible_words_b
if iteration != len(list_of_repeating):
new_tmp_list_of_possible_words_a = []
new_tmp_list_of_possible_words_b = []
iteration = iteration + 1
dadsa = 3
dsadsa = 453
words_in_code[combination[0]]["words_c_len"] = len(new_tmp_list_of_possible_words_a)
words_in_code[combination[0]]["words_c"] = new_tmp_list_of_possible_words_a
words_in_code[combination[1]]["words_c_len"] = len(new_tmp_list_of_possible_words_b)
words_in_code[combination[1]]["words_c"] = new_tmp_list_of_possible_words_b
else:
words_in_code[combination[0]]["words_c_len"] = len(tmp_list_of_possible_words_a)
words_in_code[combination[0]]["words_c"] = tmp_list_of_possible_words_a
words_in_code[combination[1]]["words_c_len"] = len(tmp_list_of_possible_words_b)
words_in_code[combination[1]]["words_c"] = tmp_list_of_possible_words_b
iteration_combination = iteration_combination + 1
for key, value in words_in_code.items():
x = str(value["text"])
y0 = str(len(words_in_dictionary))
y1 = str(value["words_a_len"])
y2 = str(value["words_b_len"])
y2 = str(value["words_c_len"])
print (x + " : " + y0 + " > " + y1 + " > " + y2 + " > " + y2) |
<filename>celery_project/tools/qcloud/image_cut/cut_helper.py
# coding=utf-8
from __future__ import absolute_import, unicode_literals
import os
import time
import requests
from PIL import Image
from io import BytesIO
from requests import HTTPError
from tools.qcloud.cos_api.python_upload import UploadImage
class CutHelper:
def __init__(self):
pass
@staticmethod
def fix_http_url(url):
if url.startswith("//"):
fixed_url = "%s%s" % ("http:", url)
elif url.startswith("http"):
fixed_url = url
else:
fixed_url = url
return fixed_url
@staticmethod
def cut_and_upload(src_img_url, target_image_name):
need_retry = False
card_big_image_url = None
card_small_image_url = None
try:
r = requests.get(src_img_url)
r.raise_for_status()
if r.status_code == 200:
print("[cur_and_upload]get image successful")
# [cut]
img = Image.open(BytesIO(r.content))
x, y = img.size
y -= 80
box = (0, 0, 0 + x, 0 + y)
img_after = img.crop(box)
local_image_path = '/tmp/%s' % ('big_' + target_image_name)
img_after.save(local_image_path)
# [upload]
ui = UploadImage()
ret = ui.upload(('big_' + target_image_name), local_image_path)
# [获取卡片图cos-url]
if ret['code'] == 0:
card_big_image_url = ret['data']['source_url']
print("[cut_and_upload]upload successful url:%s" % card_big_image_url)
elif ret['code'] == -4018:
card_big_image_url = ret['data']['access_url']
print("[cut_and_upload]file already exists url:%s" % card_big_image_url)
else:
need_retry = True
print("[cut_and_upload]upload failure!")
# [delete file]
os.remove(local_image_path)
min_px = min(x, y)
box = ((x - min_px) / 2, 0, (x - min_px) / 2 + min_px, 0 + min_px)
img_after = img.crop(box)
local_image_path = '/tmp/%s' % ('small_' + target_image_name)
img_after.save(local_image_path)
# [upload]
ui = UploadImage()
ret = ui.upload(('small_' + target_image_name), local_image_path)
# [获取卡片图cos-url]
if ret['code'] == 0:
card_small_image_url = ret['data']['source_url']
print("[cut_and_upload]upload successful url:%s" % card_small_image_url)
elif ret['code'] == -4018:
card_small_image_url = ret['data']['access_url']
print("[cut_and_upload]file already exists url:%s" % card_small_image_url)
else:
need_retry = True
print("[cut_and_upload]upload failure!")
# [delete file]
os.remove(local_image_path)
except HTTPError:
print("[cut_helper]get image http error")
need_retry = True
except OSError: # for remove
print("[cut_helper]remove file not exists")
need_retry = True
return need_retry, card_big_image_url, card_small_image_url
if __name__ == '__main__':
def test_cut_and_upload():
url = "http://qcloud.dpfile.com/pc/mzD1Js5cY3gJZw1KDyyDkDxjSin4gm-SbusC9DQ1VxiWaWl-JNHXcz6PguBWxtDiTYGVDmosZWTLal1WbWRW3A.jpg"
a, b, c = CutHelper.cut_and_upload(url, 'heng.jpg')
print(a, b, c)
test_cut_and_upload()
|
from celery import shared_task
import requests as r
import yaml
import base64
import collections
import json
import time
import secrets
import string
import modules.keycloak_lib as keylib
from .exceptions import ProjectCreationException
from django.conf import settings
from .models import Flavor, Environment, Project, S3, MLFlow, ReleaseName
#
@shared_task
def create_keycloak_client_task(project_slug, username, repository):
# Create Keycloak client for project with default project role.
# The creator of the project assumes all roles by default.
print('Creating Keycloak resources.')
HOST = settings.DOMAIN
RELEASE_NAME = str(project_slug)
# This is just a dummy URL -- it doesn't go anywhere.
URL = 'https://{}/{}/{}'.format(HOST, username, RELEASE_NAME)
client_id, client_secret, res_json = keylib.keycloak_setup_base_client(URL, RELEASE_NAME, username, settings.PROJECT_ROLES, settings.PROJECT_ROLES)
if not res_json['success']:
print("ERROR: Failed to create keycloak client for project.")
else:
print('Done creating Keycloak client for project.')
def create_settings_file(project_slug):
proj_settings = dict()
proj_settings['active'] = 'stackn'
proj_settings['client_id'] = 'studio-api'
proj_settings['realm'] = settings.KC_REALM
proj_settings['active_project'] = project_slug
return yaml.dump(proj_settings)
@shared_task
def create_resources_from_template(user, project_slug, template):
from apps.models import Apps
import apps.views as appviews
# print(template)
decoder = json.JSONDecoder(object_pairs_hook=collections.OrderedDict)
template = decoder.decode(template)
# print(template)
project = Project.objects.get(slug=project_slug)
alphabet = string.ascii_letters + string.digits
for key, item in template.items():
print(key)
if 'flavors' == key:
flavors = item
for key, item in flavors.items():
flavor = Flavor(name=key,
cpu_req=item['cpu']['requirement'],
cpu_lim=item['cpu']['limit'],
mem_req=item['mem']['requirement'],
mem_lim=item['mem']['limit'],
gpu_req=item['gpu']['requirement'],
gpu_lim=item['gpu']['limit'],
ephmem_req=item['ephmem']['requirement'],
ephmem_lim=item['ephmem']['limit'],
project=project)
flavor.save()
if 'environments' == key:
environments = item
print(item)
for key, item in environments.items():
try:
app = Apps.objects.filter(slug=item['app']).order_by('-revision')[0]
except Exception as err:
print("App for environment not found.")
print(item['app'])
print(project_slug)
print(user)
print(err)
raise
try:
environment = Environment(name=key,
project=project,
repository=item['repository'],
image=item['image'],
app=app)
environment.save()
except Exception as err:
print("Failed to create new environment: {}".format(key))
print(project)
print(item['repository'])
print(item['image'])
print(app)
print(user)
print(err)
# if 'S3' == key:
# S3 = item
# for key, item in S3.items():
# app = Apps.objects.get(slug=item['app'])
# environment = Environment(name=key,
# project=project,
# repository=item['repository'],
# image=item['image'],
# app=app)
# environment.save()
if 'apps' == key:
apps = item
for key, item in apps.items():
app_name = key
data = {
"app_name": app_name,
"app_action": "Create"
}
if 'credentials.access_key' in item:
item['credentials.access_key'] = ''.join(secrets.choice(alphabet) for i in range(8))
if 'credentials.secret_key' in item:
item['credentials.secret_key'] = ''.join(secrets.choice(alphabet) for i in range(14))
if 'credentials.username' in item:
item['credentials.username'] = 'admin'
if 'credentials.password' in item:
item['credentials.password'] = ''.join(secrets.choice(alphabet) for i in range(14))
data = {**data, **item}
print("DATA TEMPLATE")
print(data)
res = appviews.create([], user, project.slug, app_slug=item['slug'], data=data, wait=True)
if 'settings' == key:
print("PARSING SETTINGS")
if 'project-S3' in item:
print("SETTING DEFAULT S3")
s3storage=item['project-S3']
s3obj = S3.objects.get(name=s3storage, project=project)
project.s3storage = s3obj
project.save()
if 'project-MLflow' in item:
print("SETTING DEFAULT MLflow")
mlflow=item['project-MLflow']
mlflowobj = MLFlow.objects.get(name=mlflow, project=project)
project.mlflow = mlflowobj
project.save()
@shared_task
def delete_project_apps(project_slug):
project = Project.objects.get(slug=project_slug)
from apps.models import AppInstance
from apps.tasks import delete_resource
apps = AppInstance.objects.filter(project=project)
for app in apps:
delete_resource.delay(app.pk)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 16 16:39:54 2018
nteseqr
Description:
Takes ribosome profiling data and annotated reference genomes to identify
N-terminal extensions (NTE)
@author: <NAME>
12.3.2019 (Concentrate Feed) Implementing improvements for MiMB publication
_x_ nteseqr, identify high likelihood NTEs and filter them from uORFseqr
04.28.2021 (Oral Solve)
_x_ Removed atis_uid hash
"""
#
#import multiprocessing
import re
import numpy as np
import pickle
import scipy.stats as stats
import argparse
import subprocess
import os
def output_handler(output):
if len(output.strip()) > 1:
print(output)
#function handles help commands
def help_dialog():
monolog=('Manual for nteseqr\n'+
'#=============================================================#\n'+
'Lead programmer: <NAME> <EMAIL>\n'+
'Release version: 1.0 \n'+
'Release date: 12.31.19 \n'+
'Description:\n\t nteseqr identifies N-terminal extensions using ribosome \n'+
'\t profiling and RNAseq data.\n'+
'\t\t Briefly, NTE-seqr attempts to identify N-terminal extensions by first \n'+
'\t finding all regions upstream of main ORF start codons and the nearest \n'+
'\t in-frame upstream stop codon. These search regions are then scanned to \n'+
'\t identify genes with large numbers of in-frame ribosomes. Search regions \n'+
'\t are also scanned for AUG and NCC start codons. We presume that the start \n'+
'\t codon most likely to function as the initiation site will have a confluence \n'+
'\t of features: higher relative start magnitude, higher relative translational \n'+
'\t efficiency, and a significant fraction of total in-frame ribosomes.'+
'Citation:'+
'Copyright MIT License - <NAME>'
'#=============================================================#\n'+
'For demonstration use:\n\t python nteseqr.py -demo\n'+
'To run a install test using defaults, use:\n\t python uorfseqr.py -test\n'+
'')
print(monolog)
#
def demo():
monolog = ('\tStep 1. -load command loads and assigns reads. This will need to be done for '+
'every pair of RPF and RNA files. Here we use only two, the minimum number.\n')
print(monolog)
monolog = ('\t\tUsage:\n\tpython nteseqr.py -load -gff <path_to_gff_file> -fa <path to reference fasta file>\n'+
'\t-sample <sample_name> <path_to_RPF_bam_file> <path_to_RNA_bam_file> -o <output_prefix>'+
'\t\tExample:\n\tpython nteseqr.py -load -gff analysis/saccharomyces_cerevisiae.gff -fa data/reference_genomes/Scer_SacCer3.fa -samples Scer_A data/bam/Scer_A_RPF_10.bam data/bam/Scer_A_mRNA_10.bam -o Scer_A_nte\n')
print(monolog)
'''
python scripts/nteseqr.py -load -gff ensembl_50/saccharomyces_cerevisiae.gff \
-fa ensembl_50/Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa \
-samples DGY1657_R1_nte \
/scratch/ps163/STAR_Carolina_03_18_2021/processed/RPF_DGY1657_R1.sorted.bam \
/scratch/ps163/STAR_Carolina_03_18_2021/processed/RNA_DGY1657_R1.sorted.bam \
-o nte/DGY1657_R1_nte
'''
monolog = ('\tStep 2. -load command loads and assigns reads for replicate 2.\n')
print(monolog)
monolog = ('\t\tUsage:\n\tpython nteseqr.py -load -gff <path_to_gff_file> -fa <path to reference fasta file>\n'+
'\t-sample <sample_name> <path_to_RPF_bam_file> <path_to_RNA_bam_file> -o <output_prefix>'+
'\t\tExample:\n\tpython nteseqr.py -load -gff analysis/saccharomyces_cerevisiae.gff -fa data/reference_genomes/Scer_SacCer3.fa -samples Scer_B data/bam/Scer_B_RPF_10.bam data/bam/Scer_B_mRNA_10.bam -o Scer_B_nte\n')
print(monolog)
'''
python scripts/nteseqr.py -load -gff ensembl_50/saccharomyces_cerevisiae.gff \
-fa ensembl_50/Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa \
-samples DGY1657_R2_nte \
/scratch/ps163/STAR_Carolina_03_18_2021/processed/RPF_DGY1657_R2.sorted.bam \
/scratch/ps163/STAR_Carolina_03_18_2021/processed/RNA_DGY1657_R2.sorted.bam \
-o nte/DGY1657_R2_nte
'''
monolog = ('\tStep 3. -eval command generates candidate uORFs using the previouslt loaded samples.\n'+
'Note that the -samples values here are the output (-o) from the previous two steps.\n')
print(monolog)
monolog = ('\t\tUsage:\n\tpython nteseqr.py -eval -samples <name_of_sample_1> <name_of_sample_2> -o <output_prefix>\n'+
'\t\tExample:\n\tpython nteseqr.py -eval -samples Scer_A_nte Scer_B_nte -o scer.demo/combined\n')
print(monolog)
'''
python scripts/nteseqr.py -eval -samples nte/DGY1657_R1_nte nte/DGY1657_R2_nte -o nte/DG1657_nte
'''
monolog = ('\tStep 4. NTE candidate file.\n'+
'\tThe highest scoring alternative translation initiation site (aTIS) for each NTE event\n'+
'is output in bed file format.')
print(monolog)
#
def test():
monolog = ('=== Currently Testing nteseqr.py ===')
print(monolog)
monolog = ('\tTesting Step 1a. -load command loads and assigns reads for replicate 1.\n')
print(monolog)
bashCommand = ('python nteseqr.py -load -gff analysis/saccharomyces_cerevisiae.gff -fa data/reference_genomes/Scer_SacCer3.fa -samples Scer_A ../data/bam/Scer_A_RPF_10.bam ../data/bam/Scer_A_mRNA_10.bam -o Scer_A_nte')
print(bashCommand)
output_handler(subprocess.check_output([bashCommand],stderr=subprocess.STDOUT,shell=True))
monolog = ('\tTesting Step 1b. -load command loads and assigns reads for replicate 2.\n')
print(monolog)
bashCommand = ('python nteseqr.py -load -gff analysis/saccharomyces_cerevisiae.gff -fa data/reference_genomes/Scer_SacCer3.fa -samples Scer_B ../data/bam/Scer_B_RPF_10.bam ../data/bam/Scer_B_mRNA_10.bam -o Scer_B_nte')
print(bashCommand)
output_handler(subprocess.check_output([bashCommand],stderr=subprocess.STDOUT,shell=True))
monolog = ('\tTesting Step 3. -eval command to evaluate candidates based on loaded expression data.\n')
print(monolog)
bashCommand = ('python nteseqr.py -eval -samples Scer_A_nte Scer_B_nte -o scer.demo/combined')
print(bashCommand)
output_handler(subprocess.check_output([bashCommand],stderr=subprocess.STDOUT,shell=True))
### Argparser definitions
parser = argparse.ArgumentParser()
##nteseqr
'''
python nteseqr.py -load -gff saccharomyces_cerevisiae.gff -fa Scer_SacCer3.fa -samples starved_r2 r2_RPF.bam r2_RNA.bam -o set_r2
python nteseqr.py -load -gff saccharomyces_cerevisiae.gff -fa Scer_SacCer3.fa -samples starved_r3 r3_RPF.bam r3_RNA.bam -o set_r3
python nteseqr.py -eval -samples set_r2 set_r3 -o set_combined
---
python nteseqr.py -load -gff saccharomyces_cerevisiae.gff -fa Scer_SacCer3.fa -samples starved_r2 ./Scer_r2Starved_RPF_genome_Aligned.out.bam ./Scer_r2Starved_mRNA_genome_Aligned.out.bam -o starved_r2
python nteseqr.py -load -gff saccharomyces_cerevisiae.gff -fa Scer_SacCer3.fa -samples starved_r3 ./Scer_r3Starved_RPF_genome_Aligned.out.bam ./Scer_r3Starved_mRNA_genome_Aligned.out.bam -o starved_r3
python nteseqr.py -eval -samples starved_r2 starved_r3 -o starved_combined
'''
# help dialog arguments
parser.add_argument('-man',"--manual", action='store_true')
parser.add_argument('-demo',"--demo",action='store_true')
parser.add_argument('-test',"--test",action='store_true')
# Load reads arguments
parser.add_argument('-load',"--load_reads", action='store_true')
parser.add_argument('-i',"--input_file")
parser.add_argument('-o',"--output_file")
parser.add_argument('-fa',"--fa_file")
parser.add_argument('-gff',"--gff_file")
parser.add_argument('-samples', '--sample_list', nargs='+')
parser.add_argument('-gt',"--gene_tag")
parser.add_argument('-tl',"--transcript_leader_tag")
parser.add_argument('-3p',"--three_prime_UTR_tag")
parser.add_argument('-min_tl',"--minimum_length_transcript_leader")
parser.add_argument('-min_3p',"--minimum_length_three_prime_UTR")
parser.add_argument('-mask_tl',"--mask_length_transcript_leader")
parser.add_argument('-mask_3p',"--mask_length_three_prime_UTR")
parser.add_argument('-defualt',"--default_search_region_length")
#evaluate arguments
parser.add_argument('-eval',"--evaluate", action='store_true')
parser.add_argument('-min_reads',"--minimum_reads")
#
args = parser.parse_args()
###
#Common dictionaries
start_codons = ['ATG', 'TTG', 'GTG', 'CTG', 'ATC', 'ATA', 'ATT', 'ACG']
stop_codons = ['TAG', 'TAA', 'TGA']
strand_to_sign = {0:'+',1:'-'}
complement = {'A':'T','G':'C','T':'A','C':'G'}
###
if args.manual:
help_dialog()
if args.test:
test()
def parse_cigar(cigar, sequence):
"""This function calculates the offset for the read based on the match
"""
# TODO - maybe improve to handle '28M1I4M', 'TCAGGGAAATATTGATTTACCCAAAAAAAGACG'
if cigar.count('M') == 1:
left_cut = 0
right_cut = 0
left_list = re.split('M|S|D|I|H|N', cigar.split('M')[0])[0:-1]
M = re.split('M|S|D|I|H|N', cigar.split('M')[0])[-1]
right_list = re.split('M|S|D|I|H|N', cigar.split('M')[1])
for each in left_list:
if each:
left_cut += int(each)
for each in right_list:
if each:
right_cut -= int(each)
n_cigar = ('{}M').format(M)
if right_cut:
n_sequence = sequence[left_cut:right_cut]
else:
n_sequence = sequence[left_cut:]
#print (left_cut, right_cut, n_cigar, n_sequence)
return(True, n_cigar, n_sequence)
else:
return(False, '', '')
def unpackbits(x, num_bits=12):
xshape = list(x.shape)
x = x.reshape([-1,1])
to_and = 2**np.arange(num_bits).reshape([1,num_bits])
upb = (x & to_and).astype(bool).astype(int).reshape(xshape + [num_bits])
#0 (rp) read_paired
#1 (rmp) read_mapped_in_proper_pair
#2 (ru) read_unmapped
#3 (mu) mate_unmapped
#4 (rrs) read_reverse_strand
#5 (mrs) mate_reverse_strand
#6 (fip) first_in_pair
#7 (sip) second_in_pair
#8 (npa) not_primary_alignment
#9 (rfp) read_fails_platform
#10 (pcr) read_is_PCR_or_optical_duplicate
#11 (sa) supplementary_alignment
""" DISCORDANT definition (from samblaster)
Both side of the read pair are mapped (neither FLAG 0x4 or 0x8 is set).
The properly paired FLAG (0x2) is not set.
Note: We implemented an additional criteria to distinguish between strand re-orientations and distance issues
Strand Discordant reads must be both on the same strand.
"""
""" SPLIT READS
Identify reads that have between two and --maxSplitCount [2] primary and supplemental alignments.
Sort these alignments by their strand-normalized position along the read.
Two alignments are output as splitters if they are adjacent on the read, and meet these criteria:
each covers at least --minNonOverlap [20] base pairs of the read that the other does not.
the two alignments map to different reference sequences and/or strands.
the two alignments map to the same sequence and strand, and represent a SV that is at least --minIndelSize [50] in length,
and have at most --maxUnmappedBases [50] of un-aligned base pairs between them.
Split read alignments that are part of a duplicate read will be output unless the -e option is used.
"""
return(upb)
def os_mkdir(in_name):
if '/' in in_name:
directory_name = in_name.rsplit('/',1)[0]
if not os.path.exists(directory_name):
os.makedirs(directory_name)
###
''' Handle inputs and defaults:
users can set their own gff file to define 5'UTRs (aka. transcript leaders), 3'UTRs,
Transcription start sites, poly-a sites, and main orf coordinates.
Otherwise the standard gff for S.cerevisiae (from Spealman and Naik, Genome Research, 2017) is loaded.
'''
if args.gff_file:
gff_file = args.gff_file
else:
gff_file = '../data/reference_genomes/saccharomyces_cerevisiae.gff'
''' Given a gff and fasta identity the upstream in frame stops,
then downstream inframe starts,
then ask if reads from a sam map to those starts.
'''
if args.gene_tag:
gene_tag = args.gene_tag
else:
gene_tag = 'gene'
if args.transcript_leader_tag:
tl_tag = args.transcript_leader_tag
else:
tl_tag = 'five_prime_UTR'
if args.three_prime_UTR_tag:
tp_tag = args.three_prime_UTR_tag
else:
tp_tag = 'three_prime_UTR'
if args.minimum_length_transcript_leader:
min_tl = int(args.minimum_length_transcript_leader)
else:
min_tl = 15
# TODO - future version turns this on
#if args.minimum_length_three_prime_UTR:
# min_3p = int(args.minimum_length_three_prime_UTR)
#else:
# min_3p = 15
min_3p = 0
if args.mask_length_transcript_leader:
mask_tl = int(args.mask_length_transcript_leader)
else:
mask_tl = 3
if args.mask_length_three_prime_UTR:
mask_3p = int(args.mask_length_three_prime_UTR)
else:
mask_3p = 3
if args.default_search_region_length:
dsrl = int(args.default_search_region_length)
else:
dsrl = 0
if args.minimum_reads:
minimum_reads = int(args.minimum_reads)
else:
minimum_reads = 3
def parse_name(field):
field = field.strip()
if 'ID=' in field:
field = field.split('ID=')[1]
else:
if 'PARENT=' in field:
field = field.split('PARENT=')[1]
if ';' in field:
field = field.split(';')[0]
if '_mRNA' in field:
field = field.split('_mRNA')[0]
return(field)
def parse_gff(gff_name, gene_tag, tl_tag, tp_tag, min_utr, min_3p, mask_tl, mask_3p):
global fasta_dict
global filter_nt
coord_dict = {}
chromosome_set = set()
gff_file = open(gff_name)
#Build coord_dict
for line in gff_file:
if line[0] != '#':
name = parse_name(line.split('\t')[8])
chromo = line.split('\t')[0]
region = line.split('\t')[2]
start = int(line.split('\t')[3])-1
stop = int(line.split('\t')[4])
sign = line.split('\t')[6]
if chromo not in chromosome_set:
if 'chr' in chromo:
chromo = chromo.split('chr')[1]
chromosome_set.add(chromo)
if name not in coord_dict:
coord_dict[name] = {'chromo': chromo, 'sign': line.split('\t')[6], 'tl':'', 'gene':'', 'tp': '', 'tl_mask':'', 'tp_mask':''}
if chromo not in filter_nt:
filter_nt[chromo] = set()
if region == gene_tag:
coord_dict[name]['gene'] = (start, stop)
if region == tl_tag:
if abs(stop - start) > min_utr:
coord_dict[name]['tl'] = (start, stop)
if sign == '+':
for nt in range(stop-mask_tl, stop+1):
filter_nt[chromo].add(nt)
else:
for nt in range(start, start+mask_tl+1):
filter_nt[chromo].add(nt)
else:
coord_dict[name]['tl'] = 'too_small'
# TODO: Future version - C-terminal extensions should be a similar work flow
# if region == tp_tag:
# if abs(stop-start) > min_3p:
# coord_dict[name]['tp'] = (start, stop)
#
# if sign == '+':
# for nt in range(start, start+mask_3p+1):
# filter_nt[chromo].add(nt)
# else:
# for nt in range(stop-mask_3p, stop+1):
# filter_nt[chromo].add(nt)
# else:
# coord_dict[name]['tp'] = 'too_small'
gff_file.close()
# cycle through coord_dict, remove anything with 'too_small'
remove_set = set()
for name, region_dict in coord_dict.items():
if not region_dict['gene']:
remove_set.add(name)
else:
for each_region in ['tl','tp']:
if region_dict[each_region] == 'too_small':
remove_set.add(name)
for remove in remove_set:
_pop = coord_dict.pop(remove)
if '@' not in remove:
outline = ('Removing {} for to short of a UTR.').format(remove)
print(outline)
return(coord_dict, chromosome_set)
def parse_fasta(fasta_name):
fasta_file = open(fasta_name)
fasta_dict = {}
for line in fasta_file:
line = line.strip()
if line[0] == '>':
name = line.split('>')[1].split(' ')[0]
name = name.strip()
if name not in fasta_dict:
fasta_dict[name]=''
else:
print('Error in FASTQ chromosome name, duplicate names identified.\n Names are after the carrot ">" and before the space " " - Make sure each name is unique. ')
quit()
else:
fasta_dict[name]+=line
return(fasta_dict)
def rev_comp(seq):
seq = seq.upper()
seq = seq[::-1]
rev_seq = ''
for each in seq:
rev_seq+= complement[each]
return(rev_seq)
def use_dsrl(name, region, first_pass_dict, coord_dict):
chromo = first_pass_dict[name]['chromo']
sign = first_pass_dict[name]['sign']
gleast = coord_dict[name]['gene'][0]
gmost = coord_dict[name]['gene'][1]
if region == 'tl':
if sign == '+':
start = int(gleast)-dsrl
stop = int(gleast)-1
first_pass_dict[name][region] = fasta_dict[chromo][start:stop]
coord_dict[name][region]=(start, stop)
if sign == '-':
start = int(gmost)+1
stop = int(gmost)+dsrl
first_pass_dict[name][region] = fasta_dict[chromo][start:stop]
coord_dict[name][region]=(start, stop)
if region == 'tp':
if sign == '-':
start = int(gleast)-dsrl
stop = int(gleast)-1
first_pass_dict[name][region] = fasta_dict[chromo][start:stop]
coord_dict[name][region]=(start, stop)
if sign == '+':
start = int(gmost)+1
stop = int(gmost)+dsrl
first_pass_dict[name][region] = fasta_dict[chromo][start:stop]
coord_dict[name][region]=(start, stop)
return(first_pass_dict, coord_dict)
#def derive_coordinates(start, stop, triplet_step, sign, runmode):
# if (runmode == 'tl' and sign == '+') or (runmode == 'tp' and sign == '-'):
# sr_start = stop - (triplet_step*3)-3
# sr_stop = stop
# if (runmode == 'tl' and sign == '-') or (runmode == 'tp' and sign == '+'):
# sr_start = start
# sr_stop = start + (triplet_step*3)+3
#
# return(sr_start, sr_stop)
#
#def recover_sequence(triplet_step, tl_list, sign, runmode):
# if (runmode == 'tl' and sign == '+') or (runmode == 'tp' and sign == '-'):
# tl_list = tl_list[::-1]
# tl_list = tl_list[1:]
def find_stop(name, start, stop, seq, sign, runmode):
tl_list = []
sr_seq = []
#if (runmode == 'tl' and sign == '+') or (runmode == 'tp' and sign == '-'):
if (runmode == 'tl' and sign == '+'):
seq = seq[::-1]
for triplet_step in range(len(seq)//3):
triplet = seq[(3*triplet_step):(3*triplet_step)+3]
tl_list.append(triplet[::-1])
triplet_step = 0
for triplet in tl_list:
sr_seq.append(triplet)
if triplet in stop_codons:
sr_start = stop - (3*triplet_step)-3
sr_stop = stop
return(sr_start, sr_stop, sr_seq)
triplet_step += 1
return(start, stop, sr_seq)
#if (runmode == 'tl' and sign == '-') or (runmode == 'tp' and sign == '+'):
if (runmode == 'tl' and sign == '-'):
#reverse so the first codon is the one next to the start...
seq = seq[::-1]
#step out and reverse each codon to the original (negative) orientation, add to list
for triplet_step in range((len(seq))//3):
triplet = seq[(3*triplet_step):(3*triplet_step)+3]
tl_list.append(triplet[::-1])
# scan for stop, first stop send the modified sequence out for detection
for triplet in tl_list:
sr_seq.append(triplet)
if triplet in stop_codons:
sr_start = start
sr_stop = start + (3*triplet_step)+3
return(sr_start, sr_stop, sr_seq)
triplet_step += 1
return(start, stop, sr_seq)
return(start, stop, sr_seq)
def find_starts(name, chromo, sign, start, stop, tl_list, runmode):
global atis_id_dict
global coord_dict
tl_list = tl_list[::-1]
seq = ''
if sign == '-':
for each_codon in tl_list:
each_codon = each_codon[::-1]
seq += each_codon
else:
for each_codon in tl_list:
seq += each_codon
start_coords = {'full':(start+1, stop)}
#start_coords['full']=(start+1, stop)
#if (runmode == 'tl' and sign == '+') or (runmode == 'tp' and sign == '-'):
if (runmode == 'tl' and sign == '+'):
triplet_step = 0
for triplet in tl_list:
#sr_seq += triplet
if triplet in start_codons:
sr_seq = ''
for codon in tl_list[triplet_step:]:
sr_seq += codon
atis_id = ('{}_{}_{}_{}').format(name, triplet, triplet_step, runmode)
#atis_id = hash(hash_line)
start_coords = {'atis':{},'sr':{}, 'up':{}, 'gene':[coord_dict[name]['gene'][0],coord_dict[name]['gene'][1]], 'meta':{'name':name, 'chromo':chromo, 'sign': sign, 'region':runmode, 'seq':sr_seq, 'triplet':triplet}, 'full': (start+1, stop)}
#calc aTIS
#+1 for gff format
sr_start = start + (3*triplet_step) + 1
sr_stop = sr_start + 3
start_coords['atis'] = (sr_start, sr_stop)
#calc whole region
sr_start = start + (3*triplet_step) + 1
sr_stop = stop
start_coords['sr'] = (sr_start, sr_stop)
#calc upstream
sr_start = start + 1
sr_stop = start + (3*triplet_step)
start_coords['up'] = (sr_start, sr_stop)
atis_id_dict[atis_id] = start_coords
triplet_step += 1
#if (runmode == 'tl' and sign == '-') or (runmode == 'tp' and sign == '+'):
if (runmode == 'tl' and sign == '-'):
#tl_list = tl_list[::-1]
triplet_step = 0
for triplet in tl_list:
if triplet in start_codons:
rt_step = len(tl_list)-triplet_step
sr_seq = ''
for codon in tl_list[triplet_step:]:
sr_seq += codon
atis_id = ('{}_{}_{}_{}').format(name, triplet, triplet_step, runmode)
#atis_id = hash(hash_line)
start_coords = {'atis':{},'sr':{}, 'up':{}, 'gene':[coord_dict[name]['gene'][0],coord_dict[name]['gene'][1]], 'meta':{'name':name, 'chromo':chromo, 'sign': sign, 'region':runmode, 'seq':sr_seq, 'triplet':triplet}, 'full': (start+1, stop)}
#calc atis
sr_start = start + (3*rt_step) - 2
sr_stop = start + (3*rt_step)
start_coords['atis'] = (sr_start, sr_stop)
#calc whole region
sr_start = start + 1
sr_stop = start + (3*rt_step)
start_coords['sr'] = (sr_start, sr_stop)
#calc upstream
sr_start = start + (3*rt_step) + 1
sr_stop = stop
start_coords['up'] = (sr_start, sr_stop)
atis_id_dict[atis_id] = start_coords
triplet_step += 1
return(start_coords)
def build_search_region(coord_dict, fasta_dict, dsrl):
first_pass_dict = {}
#make each, if possible
for name, deets in coord_dict.items():
chromo = deets['chromo']
if chromo in fasta_dict:
sign = deets['sign']
first_pass_dict[name] = {'chromo': chromo, 'sign': sign, 'tl':'', 'gene':'', 'tp':''}
for region in ['tl', 'gene', 'tp']:
if deets[region]:
start = int(deets[region][0])
stop = int(deets[region][1])+1
if sign == '+':
first_pass_dict[name][region] = fasta_dict[chromo][start:stop-1]
if sign == '-':
first_pass_dict[name][region] = rev_comp(fasta_dict[chromo][start:stop])
#use 'gene' and default search region lenght to fill in those that are absent
for name, deets in first_pass_dict.items():
chromo = deets['chromo']
sign = deets['sign']
for region in ['tl', 'tp']:
if not deets[region]:
first_pass_dict, coord_dict = use_dsrl(name, region, first_pass_dict, coord_dict)
#scan for stops and define regions
search_region_dict = {}
assign_region_dict = {'tl':{}, 'tp':{}}
flanking_region_dict = {'tl':{}, 'tp':{}}
for name, deets in first_pass_dict.items():
chromo = deets['chromo']
sign = deets['sign']
search_region_dict[name] = {'chromo': chromo, 'sign': sign, 'tl':'', 'tp':'', 'starts':{'tl':{},'tp':{}}}
for region in ['tl', 'tp']:
r_start = coord_dict[name][region][0]
r_stop = coord_dict[name][region][1]
sr_start, sr_stop, sr_seq = find_stop(name, r_start, r_stop, deets[region], sign, region)
search_region_dict[name]['starts'][region] = find_starts(name, chromo, sign, sr_start, sr_stop, sr_seq, region)
if chromo not in assign_region_dict[region]:
assign_region_dict[region][chromo]={}
if chromo not in flanking_region_dict[region]:
flanking_region_dict[region][chromo]={}
for nt in range(sr_start, sr_stop+1):
if nt not in assign_region_dict[region][chromo]:
gene_set = set()
gene_set.add(name)
assign_region_dict[region][chromo][nt] = gene_set
else:
assign_region_dict[region][chromo][nt].add(name)
for f_nt in range(r_start, r_stop+1):
if f_nt not in range(sr_start, sr_stop+1):
if f_nt not in flanking_region_dict[region][chromo]:
gene_set = set()
gene_set.add(name)
flanking_region_dict[region][chromo][f_nt] = gene_set
else:
flanking_region_dict[region][chromo][f_nt].add(name)
return(search_region_dict, assign_region_dict, flanking_region_dict)
def convert_to_sam(each_sample):
if each_sample[-4:] == '.bam':
new_name = each_sample.split('.bam')[0]+'.sam'
monolog = ('\tLoading bam file {}.\n').format(str(each_sample))
print(monolog)
bashCommand = ('samtools view -h -o {} {}').format(new_name, each_sample)
output_handler(subprocess.check_output([bashCommand],stderr=subprocess.STDOUT,shell=True))
return(new_name)
if each_sample[-4:] == '.sam':
return(each_sample)
def assign_reads(uid, chromo, start, stop, sign, runmode):
global sample_dict
global search_region_dict
global assign_region_dict
global flanking_region_dict
global filter_nt
hit_ct = 0
filter_region = filter_nt[chromo]
for region in ['tl','tp']:
for nt in range(start, stop+1):
if nt not in filter_region:
if nt in assign_region_dict[region][chromo]:
gene_set = assign_region_dict[region][chromo][nt]
for gene in gene_set:
if sign == search_region_dict[gene]['sign']:
hit_ct += 1
if gene not in sample_dict[runmode]['sr']:
uid_set = set()
uid_set.add(uid)
sample_dict[runmode]['sr'][gene] = {}
sample_dict[runmode]['sr'][gene][nt] = uid_set
else:
if nt not in sample_dict[runmode]['sr'][gene]:
uid_set = set()
uid_set.add(uid)
sample_dict[runmode]['sr'][gene][nt] = uid_set
else:
sample_dict[runmode]['sr'][gene][nt].add(uid)
else:
if nt in flanking_region_dict[region][chromo]:
gene_set = flanking_region_dict[region][chromo][nt]
for gene in gene_set:
if sign == search_region_dict[gene]['sign']:
hit_ct += 1
if gene not in sample_dict[runmode]['fl']:
uid_set = set()
uid_set.add(uid)
sample_dict[runmode]['fl'][gene] = {}
sample_dict[runmode]['fl'][gene][nt] = uid_set
else:
if nt not in sample_dict[runmode]['fl'][gene]:
uid_set = set()
uid_set.add(uid)
sample_dict[runmode]['fl'][gene][nt] = uid_set
else:
sample_dict[runmode]['fl'][gene][nt].add(uid)
return(hit_ct)
def load_reads(convert_name, chromosome_set, search_region_dict, assign_region_dict, flanking_region_dict, runmode):
global sample_bam_dict
#TODO: Future version - Basic version only extracts 28M reads, make this better
sam_file = open(convert_name)
ct = 0
hit_ct = 0
header_list = []
for line in sam_file:
if line[0] == '@':
header_list.append(line)
else:
ct += 1
cigar = line.split('\t')[5]
chromo = line.split('\t')[2]
if 'chr' in chromo:
chromo = chromo.split('chr')[1]
if chromo in chromosome_set:
uid = line.split('\t')[0]+'~'+str(ct)
flag = line.split('\t')[1]
start = int(line.split('\t')[3])
sign = strand_to_sign[unpackbits(np.array([int(flag)]))[0][4]]
sequence = line.split('\t')[9]
process, n_cigar, n_sequence = parse_cigar(cigar, sequence)
stop = start + len(n_sequence)
new_hits = assign_reads(uid, chromo, start, stop, sign, runmode)
hit_ct += new_hits
if new_hits > 0:
sample_bam_dict[runmode][uid] = line
if '28M' in cigar and runmode == 'RPF':
if chromo in chromosome_set:
if process:
if sign == '-':
psite = stop - 13
p_seq = n_sequence[-13]
else:
psite = start + 12
p_seq = n_sequence[12]
mapq = line.split('\t')[4]
mid = str(line.split('\t')[6:9]).replace('[','').replace(']','').replace(',','\t').replace("'",'').replace(' ','')
qual = str(line.split('\t')[10:]).replace('[','').replace(']','').replace(',','\t').replace("'",'').replace(' ','')
new_hits = assign_reads(uid, chromo, psite, psite, sign, 'psites')
hit_ct += new_hits
new_line = ('{uid}\t{flag}\t{chromo}\t{psite}\t{mapq}\t1M\t{mid}\t{p_seq}\t{qual}\n').format(uid=uid, flag=flag, chromo=chromo,
psite=psite, mapq=mapq, mid=mid, p_seq=p_seq, qual=qual)
sample_bam_dict['psites'][uid]= new_line
sam_file.close()
sample_bam_dict['header'] = header_list
return()
def output_bam(output_dir, header_list, new_bam_dict):
file_name = ('{}.sam').format(output_dir)
new_sam_file = open(file_name, 'w')
for header in header_list:
new_sam_file.write(header)
for uid, line in new_bam_dict.items():
if uid != 'header':
new_sam_file.write(line)
new_sam_file.close()
print('\tConverting to bam file\n')
bashCommand = ('samtools view -Sb {sample_name}.sam > {sample_name}_unsorted.bam').format(sample_name=args.output_file)
output_handler(subprocess.check_output([bashCommand],stderr=subprocess.STDOUT,shell=True))
bashCommand = ('samtools sort -o {sample_name}.bam {sample_name}_unsorted.bam').format(sample_name=args.output_file)
output_handler(subprocess.check_output([bashCommand],stderr=subprocess.STDOUT,shell=True))
bashCommand = ('samtools index {sample_name}.bam').format(sample_name=args.output_file)
output_handler(subprocess.check_output([bashCommand],stderr=subprocess.STDOUT,shell=True))
def output_bed(name, atis_id, tis_scores, atis_sample_dict, outputfile):
for each_sample, atis_id_dict in atis_sample_dict.items():
score = np.median(tis_scores)
if atis_id in atis_id_dict:
check_name = atis_id_dict[atis_id]['meta']['name']
chromo = atis_id_dict[atis_id]['meta']['chromo']
sign = atis_id_dict[atis_id]['meta']['sign']
seq = atis_id_dict[atis_id]['meta']['seq']
triplet = atis_id_dict[atis_id]['meta']['triplet']
start, stop = atis_id_dict[atis_id]['sr']
if name != check_name:
print('atis name disagreement', name, check_name, atis_id)
else:
outline = ('{chromo}\t{start}\t{stop}\t{name}_{triplet}_{seq}_nte\t{score}\t{sign}\n').format(chromo=chromo, start=start, stop=stop, name=name, seq=seq, triplet=triplet, score=score, sign=sign)
outputfile.write(outline)
return()
print('not found', name, atis_id, score)
def parse_samples(sample_list):
name_dict = {}
if sample_list:
if len(sample_list)%3!= 0:
print('Error: each sample requires a Name, RPF bam file, and RNA bam file.')
else:
for i in range(int(len(sample_list)/3)):
name_dict[sample_list[i*3]]=[sample_list[(i*3)+1], sample_list[(i*3)+2]]
return(name_dict)
def eval_atis(atis_id, athird):
global atis_id_dict
global score_dict
global eval_atis_dict
global quantified_search_regions_dict
process_ct = 0
for each_sample in args.sample_list:
if atis_id in atis_id_dict[each_sample]:
if atis_id_dict[each_sample][atis_id]['meta']['region'] == 'tl':
name = atis_id_dict[each_sample][atis_id]['meta']['name']
if name in quantified_search_regions_dict[each_sample]:
print(quantified_search_regions_dict[each_sample][name].keys())
if atis_id in quantified_search_regions_dict[each_sample][name]:
if quantified_search_regions_dict[each_sample][name][atis_id]['sr']['psites'][0] >= minimum_reads:
process_ct += 1
if (process_ct/float(len(args.sample_list)) > 0.5) and process_ct >= 2:
for each_sample in args.sample_list:
atis_details = quantified_search_regions_dict[each_sample][name][atis_id]
if atis_id not in eval_atis_dict:
eval_atis_dict[atis_id] = {'rte':[], 'rsm':[], 'tis_score':[], 'pval':[], 'atis_psites':[], 'sr_psites':[], 'up_psites':[]}
rte = (atis_details['sr']['RPF'])/float(max(1, atis_details['sr']['RNA']))
rsm = (atis_details['atis']['psites'][0])/float(max(1, sum(atis_details['up']['psites'])))
weight = (atis_details['atis']['psites'][0]/float(atis_details['full']['psites'][0]))
eval_atis_dict[atis_id]['tis_score'].append(rte * rsm * weight)
eval_atis_dict[atis_id]['pval'].append(stats.binom_test([atis_details['sr']['psites'][0], atis_details['sr']['psites'][1] + atis_details['sr']['psites'][2]], p=athird))
if np.median(eval_atis_dict[atis_id]['pval']) <= 0.05 and ((atis_details['sr']['psites'][0] >= atis_details['sr']['psites'][1]) and (atis_details['sr']['psites'][0] >= atis_details['sr']['psites'][2])):
output_bed(name, atis_id, eval_atis_dict[atis_id]['tis_score'], atis_id_dict, nte_potential_file)
if name not in score_dict:
score_dict[name] = {}
if atis_id not in score_dict[name]:
score_dict[name][atis_id] = (eval_atis_dict[atis_id]['tis_score'])
else:
print('atis_id error', atis_id)
1/0
if __name__ == '__main__':
atis_id_dict = {}
filter_nt = {}
os_mkdir(args.output_file)
if args.load_reads:
print('Starting nteseq ... ')
name_dict = parse_samples(args.sample_list)
#2 parse fasta
print('Parsing fasta file... ')
fasta_dict = parse_fasta(args.fa_file)
#1 parse_gff
print('Parsing GFF file for genes with transcript leaders...')
coord_dict, chromosome_set = parse_gff(gff_file, gene_tag, tl_tag, tp_tag, min_tl, min_3p, mask_tl, mask_3p)
#3 for each gene get fasta of upstream, scan for first stop codon, all start codons
print('Defining NTE search regions ...')
search_region_dict, assign_region_dict, flanking_region_dict = build_search_region(coord_dict, fasta_dict, dsrl)
#4 load bam, sam file
print('Loading reads from bam/sam files into candidate regions... ')
sample_dict = {'RNA':{'sr':{}, 'fl':{}, 'uid':{}}, 'RPF':{'sr':{}, 'fl':{}, 'uid':{}}, 'psites':{'sr':{}, 'fl':{}, 'uid':{}}}
sample_bam_dict = {'RNA':{}, 'RPF':{}, 'psites':{}, 'header':[]}
#for i in range(len(name_dict)+1):
jobs = []
for each_sample, RPF_RNA_pair in name_dict.items():
print('Loading sample: ' + str(each_sample))
RPF_name, RNA_name = RPF_RNA_pair
convert_rpf_name = convert_to_sam(RPF_name)
convert_rna_name = convert_to_sam(RNA_name)
load_reads(convert_rpf_name, chromosome_set, search_region_dict, assign_region_dict, flanking_region_dict, 'RPF')
load_reads(convert_rna_name, chromosome_set, search_region_dict, assign_region_dict, flanking_region_dict, 'RNA')
#TODO: future versions multiprocess read loading
#p = multiprocessing.Process(target=load_reads, args=(convert_rpf_name, chromosome_set, search_region_dict, assign_region_dict, flanking_region_dict, 'RPF',))
#jobs.append(p)
#p = multiprocessing.Process(target=load_reads, args=(convert_rna_name, chromosome_set, search_region_dict, assign_region_dict, flanking_region_dict, 'RNA',))
#jobs.append(p)
#p.start()
#p.join()
#
print('generating P-site bam files ... ')
output_bam(args.output_file, sample_bam_dict['header'], sample_bam_dict['psites'])
print('Writing data out:')
resource_pickle_name = ('{}_sample_dict.p').format(args.output_file)
print('\t' + resource_pickle_name)
with open(resource_pickle_name, 'wb') as file:
pickle.dump(sample_dict, file)
#
print('loading reads and p-sites into candidate regions ... ')
quantified_search_regions_dict = {}
rep_psite_dict = sample_dict['psites']['sr']
rep_RPF_dict = sample_dict['RPF']['sr']
rep_RNA_dict = sample_dict['RNA']['sr']
for atis_id, etc in atis_id_dict.items():
name = etc['meta']['name']
sign = etc['meta']['sign']
if sign == '+':
gene_start_codon = etc['gene'][0]
else:
gene_start_codon = etc['gene'][1]
if name in rep_psite_dict:
sr_psites_dict = rep_psite_dict[name]
sr_RPF_dict = rep_psite_dict[name]
sr_RNA_dict = rep_psite_dict[name]
if name not in quantified_search_regions_dict:
quantified_search_regions_dict[name] = {}
if atis_id not in quantified_search_regions_dict[name]:
quantified_search_regions_dict[name][atis_id] = {'atis':{}, 'sr': {}, 'up':{}, 'full':{}}
for atis_region in ['atis', 'sr', 'up', 'full']:
quantified_search_regions_dict[name][atis_id][atis_region] = {'psites':{}, 'RNA':0, 'RPF':0}
quantified_search_regions_dict[name][atis_id][atis_region]['psites'] = [0,0,0]
for nt in range(etc[atis_region][0], etc[atis_region][1]+1):
if nt in sr_psites_dict:
psite_ct = len(sr_psites_dict[nt])
rpf_ct = len(sr_RPF_dict[nt])
rna_ct = len(sr_RNA_dict[nt])
if sign == '+':
reading_frame = (nt - gene_start_codon - 1 ) % 3
else:
reading_frame = (gene_start_codon - nt) % 3
quantified_search_regions_dict[name][atis_id][atis_region]['psites'][reading_frame] += psite_ct
quantified_search_regions_dict[name][atis_id][atis_region]['RPF'] += rpf_ct
quantified_search_regions_dict[name][atis_id][atis_region]['RNA'] += rna_ct
print('Writing data out:')
resource_pickle_name = ('{}_quantified_search_regions_dict.p').format(args.output_file)
print('\t' + resource_pickle_name)
with open(resource_pickle_name, 'wb') as file:
pickle.dump(quantified_search_regions_dict, file)
resource_pickle_name = ('{}_atis_id_dict.p').format(args.output_file)
print('\t' + resource_pickle_name)
with open(resource_pickle_name, 'wb') as file:
pickle.dump(atis_id_dict, file)
if args.evaluate:
union_set = set()
quantified_search_regions_dict = {'union':{}}
sample_dict = {}
atis_id_dict = {}
print('Loading data: ')
for each_sample in args.sample_list:
pickle_out = ('{}_quantified_search_regions_dict.p').format(each_sample)
print('\t' + pickle_out)
quantified_search_regions_dict[each_sample] = pickle.load(open(pickle_out, 'rb'))
pickle_out = ('{}_sample_dict.p').format(each_sample)
print('\t' + pickle_out)
sample_dict[each_sample] = pickle.load(open(pickle_out, 'rb'))
pickle_out = ('{}_atis_id_dict.p').format(each_sample)
print('\t' + pickle_out)
atis_id_dict[each_sample] = pickle.load(open(pickle_out, 'rb'))
for atis_id, _etc in atis_id_dict[each_sample].items():
union_set.add(atis_id)
athird = float(1/3)
eval_atis_dict = {}
score_dict = {}
nte_candidate_file_name = ('{}.bed').format(args.output_file)
nte_candidate_file = open(nte_candidate_file_name, 'w')
nte_potential_file_name = ('{}_potential.bed').format(args.output_file)
nte_potential_file = open(nte_potential_file_name, 'w')
print('Evaluating candidate NTE events... ')
for atis_id in union_set:
eval_atis(atis_id, athird)
for name, atis_scores in score_dict.items():
best_score = 0
best_set = [0,0,0]
best_atis = ''
for atis_id, atis_score in atis_scores.items():
if len(atis_score) > 0:
calc_score = sum(atis_score)
if best_atis != atis_id:
if calc_score > best_score:
best_score = calc_score
best_atis = atis_id
best_set = atis_score
if calc_score == best_score:
if np.median(atis_score) > np.median(best_set):
best_score = calc_score
best_atis = atis_id
best_set = atis_score
if best_atis != '':
output_bed(name, best_atis, best_score, atis_id_dict, nte_candidate_file)
nte_potential_file.close()
nte_candidate_file.close()
print('Completed evaluation of candidate NTE events results saved in ', nte_candidate_file_name)
|
<reponame>tddesjardins/stsynphot_refactor
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test catalog.py module."""
# THIRD-PARTY
import numpy as np
import pytest
# ASTROPY
from astropy import units as u
# SYNPHOT
from synphot import exceptions as synexceptions
from synphot import units
# LOCAL
from .. import catalog, exceptions
@pytest.mark.remote_data
def test_grid_to_spec():
"""Test creating spectrum from grid, and related cache."""
sp = catalog.grid_to_spec('k93models', 6440, 0, 4.3)
w = sp.waveset
w_first_50 = w[:50]
y_first_50 = units.convert_flux(w_first_50, sp(w_first_50), units.FLAM)
w_last_50 = w[-50:]
y_last_50 = units.convert_flux(w_last_50, sp(w_last_50), units.FLAM)
assert 'k93' in sp.meta['expr']
np.testing.assert_allclose(
w_first_50.value,
[90.90000153, 93.50000763, 96.09999847, 97.70000458, 99.59999847, 102,
103.80000305, 105.6000061, 107.70000458, 110.40000153, 114,
117.79999542, 121.30000305, 124.79999542, 127.09999847, 128.40000916,
130.5, 132.3999939, 133.90000916, 136.6000061, 139.80000305,
143.30000305, 147.19999695, 151, 155.20001221, 158.80000305,
162.00001526, 166, 170.30000305, 173.40000916, 176.80000305,
180.20001221, 181.69999695, 186.1000061, 191, 193.8999939,
198.40000916, 201.80000305, 205, 210.5, 216.20001221, 219.80000305,
223, 226.80000305, 230, 234, 240, 246.5, 252.3999939, 256.80001831])
np.testing.assert_array_equal(y_first_50.value, 0)
np.testing.assert_allclose(
w_last_50.value,
[83800, 84200, 84600, 85000, 85400, 85800, 86200, 86600, 87000, 87400,
87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, 91400,
91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, 95400,
95800, 96200, 96600, 97000, 97400, 97800, 98200, 98600, 99000, 99400,
99800, 100200, 200000, 400000, 600000, 800000, 1000000, 1200000,
1400000, 1600000])
np.testing.assert_allclose(
y_last_50.value,
[2.52510792e+03, 2.47883842e+03, 2.43311637e+03, 2.38843415e+03,
2.34455095e+03, 2.30190141e+03, 2.25982266e+03, 2.21930715e+03,
2.17950029e+03, 2.14031198e+03, 2.10216378e+03, 2.06411734e+03,
2.02789000e+03, 1.99191291e+03, 1.95752853e+03, 1.92259620e+03,
1.88976666e+03, 1.85768178e+03, 1.82475330e+03, 1.79369145e+03,
1.76356796e+03, 1.73377904e+03, 1.70432192e+03, 1.67572220e+03,
1.64739969e+03, 1.61997833e+03, 1.59299008e+03, 1.56657219e+03,
1.54066436e+03, 1.51508799e+03, 1.49065412e+03, 1.46606232e+03,
1.44255637e+03, 1.41922753e+03, 1.39555249e+03, 1.37360936e+03,
1.35179525e+03, 1.33041182e+03, 1.30944458e+03, 1.28851215e+03,
1.26828580e+03, 1.24841065e+03, 8.04744247e+01, 5.03657385e+00,
9.88851448e-01, 3.10885179e-01, 1.26599425e-01, 6.07383728e-02,
3.26344365e-02, 1.90505413e-02])
# Test cache
key = list(catalog._CACHE.keys())[0]
assert key.endswith('grid/k93models/catalog.fits')
assert isinstance(catalog._CACHE[key], list)
# Reset cache
catalog.reset_cache()
assert catalog._CACHE == {}
@pytest.mark.remote_data
@pytest.mark.parametrize(
('t', 'm', 'g'),
[(3499, 0, 4.3),
(50001, 0, 4.3),
(6440, -6, 4.3),
(6440, 2, 4.3),
(6440, 0, -1),
(6440, 0, 10)])
def test_grid_to_spec_bounds_check(t, m, g):
"""Test out of bounds check."""
with pytest.raises(exceptions.ParameterOutOfBounds):
catalog.grid_to_spec('k93models', t, m, g)
@pytest.mark.remote_data
def test_phoenix_gap():
"""
https://github.com/spacetelescope/stsynphot_refactor/issues/44
"""
catalog.grid_to_spec('phoenix', 2700, -1, 5.1) # OK
with pytest.raises(exceptions.ParameterOutOfBounds):
catalog.grid_to_spec('phoenix', 2700, -0.5, 5.1)
with pytest.raises(exceptions.ParameterOutOfBounds):
catalog.grid_to_spec('phoenix', 2700, -0.501, 5.1)
def test_grid_to_spec_exceptions_1():
"""Test other exceptions."""
# Invalid catalog
with pytest.raises(synexceptions.SynphotError):
catalog.grid_to_spec('foo', 6440, 0, 4.3)
@pytest.mark.remote_data
def test_grid_to_spec_exceptions_2():
# Quantity is not acceptable for log values
with pytest.raises(synexceptions.SynphotError):
catalog.grid_to_spec(
'k93models', 6440, 0 * u.dimensionless_unscaled, 4.3)
with pytest.raises(synexceptions.SynphotError):
catalog.grid_to_spec(
'k93models', 6440, 0, 4.3 * u.dimensionless_unscaled)
|
<reponame>spacetelescope/jwst-fgs-countrate<filename>fgscountrate/utils.py
import io
import numpy as np
import pandas as pd
import requests
def convert_to_abmag(value, name):
"""
Convert magnitude to AB magnitude
Parameters
----------
value : float
Value of the band
name : str
Name of the band as stated in the GSC column name.
Options are: 2MASS: tmassJMag, tmassHMag, tmassKsMag
SDSS: SDSSgMag, SDSSiMag, SDSSzMag
GSC: JpgMag, FpgMag, IpgMag
"""
mag_constants = {
'tmassJMag': 0.90,
'tmassHMag': 1.37,
'tmassKsMag': 1.85,
'SDSSuMag': 0.0,
'SDSSgMag': 0.0,
'SDSSrMag': 0.0,
'SDSSiMag': 0.0,
'SDSSzMag': 0.0,
'JpgMag': -0.055,
'FpgMag': 0.24,
'NpgMag': 0.48,
}
abmag = value + mag_constants[name]
return abmag
def query_gsc(gs_id=None, ra=None, dec=None, cone_radius=None, minra=None, maxra=None,
mindec=None, maxdec=None, catalog=None):
"""
Query the Guide Star Catalog using one of 4 query options:
1) Guide Star ID
2) Exact RA & DEC
3) Centering RA & DEC and a cone search radius
4) Min/Max RA and Min/Max DEC for a bounding box search
Only pass in values for one of the 4 query options and pass all
required values
Parameters
----------
gs_id : str
The ID of the guide star of interest. This corresponds to the
HST ID input in the Guide Star Catalog 2
ra : float
The right ascension in degrees of the target or catalog sub-section
to be retrieved.
dec : float
The declination in degrees of the target or catalog sub-section
to be retrieved.
cone_radius : float
Cone search radius in degrees.
minra : float
Minimum right ascension in degrees for box search.
maxra : float
Maximum right ascension in degrees for box search.
mindec : float
Minimum declination in degrees for box search.
maxdec : float
Maximum declination in degrees for box search.
catalog : str
There are 5 different GSC2 versions available. Default is GSC 2.4.2
Call GSC241 to access GSC2.4.1
Call GSC242 to access GSC2.4.2
Returns
-------
data_frame : Pandas DataFrame
A pd dataframe containing the row(s) from the specified catalog
corresponding to the requested GS ID, coordinates, &/or area
"""
# Set file format and default catalog
file_format = 'CSV'
if catalog is None:
catalog = 'GSC242'
# Check only 1 coordinate specification is being used AND the coordinate specification chosen is complete
method_list = [any([gs_id]), any([ra, dec, cone_radius]), any([minra, maxra, mindec, maxdec])]
complete_list = [all([gs_id]), all([ra, dec]) or all([ra, dec, cone_radius]), all([minra, maxra, mindec, maxdec])]
if method_list.count(True) != 1:
raise ValueError("You may only specify coordinates using one method.")
if complete_list.count(True) != 1:
raise ValueError("You must specify a full set of coordinates for your chosen method.")
# Set URL
url = 'http://gsss.stsci.edu/webservices/vo/CatalogSearch.aspx?'
if gs_id is not None:
url = url + 'GSC2ID={}&'.format(gs_id)
if ra is not None:
url = url + 'RA={}&'.format(ra)
if dec is not None:
url = url + 'DEC={}&'.format(dec)
if cone_radius is not None:
url = url + 'SR={}&'.format(cone_radius)
if minra is not None:
url = url + 'BBOX={}%2c'.format(minra)
if mindec is not None:
url = url + '{}%2c'.format(mindec)
if maxra is not None:
url = url + '{}%2c'.format(maxra)
if maxdec is not None:
url = url + '{}&'.format(maxdec)
url = url + 'FORMAT={}&CAT={}'.format(file_format, catalog)
# Query data
request = requests.get(url).content
# Read data into pandas
try:
data_frame = pd.read_csv(io.StringIO(request.decode('utf-8')), skiprows=1, na_values=[' '])
data_frame.replace(np.nan, -999, regex=True, inplace=True)
except pd.errors.EmptyDataError:
raise NameError("No guide stars match these requirements in catalog {}".format(catalog))
# Update header to new capitalization if using an old GSC version
if catalog in ['GSC2412', 'GSC241']:
data_frame = data_frame.rename(columns={'tmassJmag': 'tmassJMag', 'tmassJmagErr': 'tmassJMagErr',
'tmassHmag': 'tmassHMag', 'tmassHmagErr': 'tmassHMagErr'})
return data_frame
def check_band_below_faint_limits(bands, mags):
"""
Check if a star's magnitude for a certain band is below the the
faint limit for that band.
Parameters
----------
bands : str or list
Band(s) to check (e.g. ['SDSSgMag', 'SDSSiMag'].
mags : float or list
Magnitude(s) of the band(s) corresponding to the band(s) in the
bands variable
Returns
-------
bool : True if the band if below the faint limit. False if it is not
"""
if isinstance(bands, str):
bands = [bands]
if isinstance(mags, float):
mags = [mags]
for band, mag in zip(bands, mags):
if 'SDSSgMag' in band and mag >= 24:
return True
elif 'SDSSrMag' in band and mag >= 24:
return True
elif 'SDSSiMag' in band and mag >= 23:
return True
elif 'SDSSzMag' in band and mag >= 22:
return True
return False
def trapezoid_sum(df, col, col2='Wavelength'):
"""
Sum across a Pandas dataframe of values
using a trapezoid method
Parameters
----------
df : Pandas Dataframe
Dataframe with columns "Wavelength" and
parameter col
col : str
Name of the column to sum over
col2 : str
Name of 2nd column to sum over; default is
"Wavelength"
"""
length = len(df) - 1
trap = np.zeros(length)
for i in range(length):
trap[i] = (df.at[df.index[i + 1], col2] -
df.at[df.index[i], col2]) * \
(df.at[df.index[i], col] +
df.at[df.index[i + 1], col]) / 2.0
return np.sum(trap)
|
import warnings
from fault.verilog_target import VerilogTarget
from .verilog_utils import verilog_name
from .util import (is_valid_file_mode, file_mode_allows_reading,
file_mode_allows_writing)
import magma as m
from pathlib import Path
import fault.actions as actions
from fault.actions import FileOpen, FileClose, GetValue, Loop, If
from hwtypes import (BitVector, AbstractBitVectorMeta, AbstractBit,
AbstractBitVector)
import fault.value_utils as value_utils
from fault.select_path import SelectPath
from fault.wrapper import PortWrapper
from fault.subprocess_run import subprocess_run
import fault
import fault.expression as expression
from fault.real_type import RealKind
import os
from numbers import Number
src_tpl = """\
{timescale}
module {top_module};
{declarations}
{assigns}
{circuit_name} #(
{param_list}
) dut (
{port_list}
);
initial begin
{initial_body}
#20 $finish;
end
endmodule
"""
class SystemVerilogTarget(VerilogTarget):
# Language properties of SystemVerilog used in generating code blocks
BLOCK_START = 'begin'
BLOCK_END = 'end'
LOOP_VAR_TYPE = None
def __init__(self, circuit, circuit_name=None, directory="build/",
skip_compile=None, magma_output="coreir-verilog",
magma_opts=None, include_verilog_libraries=None,
simulator=None, timescale="1ns/1ns", clock_step_delay=5,
num_cycles=10000, dump_waveforms=True, dump_vcd=None,
no_warning=False, sim_env=None, ext_model_file=None,
ext_libs=None, defines=None, flags=None, inc_dirs=None,
ext_test_bench=False, top_module=None, ext_srcs=None,
use_input_wires=False, parameters=None, disp_type='on_error',
waveform_file=None, use_kratos=False):
"""
circuit: a magma circuit
circuit_name: the name of the circuit (default is circuit.name)
directory: directory to use for generating collateral, buildling, and
running simulator
skip_compile: (boolean) whether or not to compile the magma circuit
magma_output: Set the output parameter to m.compile
(default coreir-verilog)
magma_opts: Options dictionary for `magma.compile` command
simulator: "ncsim", "vcs", "iverilog", or "vivado"
timescale: Set the timescale for the verilog simulation
(default 1ns/1ns)
clock_step_delay: Set the number of steps to delay for each step of the
clock
sim_env: Environment variable definitions to use when running the
simulator. If not provided, the value from os.environ will
be used.
ext_model_file: If True, don't include the assumed model name in the
list of Verilog sources. The assumption is that the
user has already taken care of this via
include_verilog_libraries.
ext_libs: List of external files that should be treated as "libraries",
meaning that the simulator will look in them for module
definitions but not try to compile them otherwise.
defines: Dictionary mapping Verilog DEFINE variable names to their
values. If any value is None, that define simply defines
the variable without giving it a specific value.
flags: List of additional arguments that should be passed to the
simulator.
inc_dirs: List of "include directories" to search for the `include
statement.
ext_test_bench: If True, do not compile testbench actions into a
SystemVerilog testbench and instead simply run a
simulation of an existing (presumably manually
created) testbench. Can be used to get started
integrating legacy testbenches into the fault
framework.
top_module: Name of the top module in the design. If the value is
None and ext_test_bench is False, then top_module will
automatically be filled with the name of the module
containing the generated testbench code.
ext_srcs: Shorter alias for "include_verilog_libraries" argument.
If both "include_verilog_libraries" and ext_srcs are
specified, then the sources from include_verilog_libraries
will be processed first.
use_input_wires: If True, drive DUT inputs through wires that are in
turn assigned to a reg.
parameters: Dictionary of parameters to be defined for the DUT.
disp_type: 'on_error', 'realtime'. If 'on_error', only print if there
is an error. If 'realtime', print out STDOUT as lines come
in, then print STDERR after the process completes.
dump_waveforms: Enable tracing of internal values
waveform_file: name of file to dump waveforms (default is
"waveform.vcd" for ncsim and "waveform.vpd" for vcs)
use_kratos: If True, set the environment up for debugging in kratos
"""
# set default for list of external sources
if include_verilog_libraries is None:
include_verilog_libraries = []
if ext_srcs is None:
ext_srcs = []
include_verilog_libraries = include_verilog_libraries + ext_srcs
# set default for there being an external model file
if ext_model_file is None:
ext_model_file = ext_test_bench
# set default for whether magma compilation should happen
if skip_compile is None:
skip_compile = ext_model_file
# set default for magma compilation options
magma_opts = magma_opts if magma_opts is not None else {}
# call the super constructor
super().__init__(circuit, circuit_name, directory, skip_compile,
include_verilog_libraries, magma_output,
magma_opts, use_kratos=use_kratos)
# sanity check
if simulator is None:
raise ValueError("Must specify simulator when using system-verilog"
" target")
if simulator not in {"vcs", "ncsim", "iverilog", "vivado"}:
raise ValueError(f"Unsupported simulator {simulator}")
# save settings
self.simulator = simulator
self.timescale = timescale
self.clock_step_delay = clock_step_delay
self.num_cycles = num_cycles
self.dump_waveforms = dump_waveforms
if dump_vcd is not None:
warnings.warn("tester.compile_and_run parameter dump_vcd is "
"deprecated; use dump_waveforms instead.",
DeprecationWarning)
self.dump_waveforms = dump_vcd
self.no_warning = no_warning
self.declarations = {} # dictionary keyed by signal name
self.assigns = {} # dictionary keyed by LHS name
self.sim_env = sim_env if sim_env is not None else {}
self.sim_env.update(os.environ)
self.ext_model_file = ext_model_file
self.ext_libs = ext_libs if ext_libs is not None else []
self.defines = defines if defines is not None else {}
self.flags = flags if flags is not None else []
self.inc_dirs = inc_dirs if inc_dirs is not None else []
self.ext_test_bench = ext_test_bench
self.top_module = top_module if not use_kratos else "TOP"
self.use_input_wires = use_input_wires
self.parameters = parameters if parameters is not None else {}
self.disp_type = disp_type
self.waveform_file = waveform_file
if self.waveform_file is None and self.dump_waveforms:
if self.simulator == "vcs":
self.waveform_file = "waveforms.vpd"
elif self.simulator in {"ncsim", "iverilog", "vivado"}:
self.waveform_file = "waveforms.vcd"
else:
raise NotImplementedError(self.simulator)
self.use_kratos = use_kratos
# check to see if runtime is installed
if use_kratos:
import sys
assert sys.platform == "linux" or sys.platform == "linux2",\
"Currently only linux is supported"
if not fault.util.has_kratos_runtime():
raise ImportError("Cannot find kratos-runtime in the system. "
"Please do \"pip install kratos-runtime\" "
"to install.")
def add_decl(self, type_, name, exist_ok=False):
if str(name) in self.declarations:
if exist_ok:
pass
else:
raise Exception(f'A declaration of name {name} already exists.') # noqa
else:
# Note that order is preserved with Python 3.7 dictionary behavior
self.declarations[str(name)] = (type_, name)
def add_assign(self, lhs, rhs):
if str(lhs) in self.assigns:
raise Exception(f'The LHS signal {lhs} has already been assigned.')
else:
# Note that order is preserved with Python 3.7 dictionary behavior
self.assigns[str(lhs)] = (lhs, rhs)
def make_name(self, port):
if isinstance(port, PortWrapper):
port = port.select_path
if isinstance(port, SelectPath):
if len(port) > 2:
name = f"dut.{port.system_verilog_path}"
else:
# Top level ports assign to the external reg
name = verilog_name(port[-1].name)
elif isinstance(port, fault.WrappedVerilogInternalPort):
name = f"dut.{port.path}"
else:
name = verilog_name(port.name)
return name
def process_peek(self, value):
if isinstance(value.port, fault.WrappedVerilogInternalPort):
return f"dut.{value.port.path}"
else:
return f"{value.port.name}"
def make_var(self, i, action):
if isinstance(action._type, AbstractBitVectorMeta):
self.add_decl(f'reg [{action._type.size - 1}:0]', action.name)
return []
raise NotImplementedError(action._type)
def make_file_scan_format(self, i, action):
var_args = ", ".join(f"{var.name}" for var in action.args)
fd_var = self.fd_var(action.file)
return [f'$fscanf({fd_var}, "{action._format}", {var_args});']
def process_value(self, port, value):
if isinstance(value, BitVector):
value = f"{len(value)}'d{value.as_uint()}"
elif isinstance(port, m.SInt) and value < 0:
port_len = len(port)
value = BitVector[port_len](value).as_uint()
value = f"{port_len}'d{value}"
elif value is fault.UnknownValue:
value = "'X"
elif value is fault.HiZ:
value = "'Z"
elif isinstance(value, actions.Peek):
value = self.process_peek(value)
elif isinstance(value, PortWrapper):
value = f"dut.{value.select_path.system_verilog_path}"
elif isinstance(value, actions.FileRead):
new_value = f"{value.file.name_without_ext}_in"
value = new_value
elif isinstance(value, expression.Expression):
value = f"({self.compile_expression(value)})"
return value
def compile_expression(self, value):
if isinstance(value, expression.BinaryOp):
left = self.compile_expression(value.left)
right = self.compile_expression(value.right)
op = value.op_str
return f"({left}) {op} ({right})"
elif isinstance(value, expression.UnaryOp):
operand = self.compile_expression(value.operand)
op = value.op_str
return f"{op} ({operand})"
elif isinstance(value, PortWrapper):
return f"dut.{value.select_path.system_verilog_path}"
elif isinstance(value, actions.Peek):
return self.process_peek(value)
elif isinstance(value, actions.Var):
value = value.name
return value
def make_poke(self, i, action):
name = self.make_name(action.port)
# For now we assume that verilog can handle big ints
value = self.process_value(action.port, action.value)
# Build up the poke action, including delay
retval = []
retval += [f'{name} <= {value};']
if action.delay is not None:
retval += [f'#({action.delay}*1s);']
return retval
def make_delay(self, i, action):
return [f'#({action.time}*1s);']
def make_print(self, i, action):
# build up argument list for the $write command
args = []
args.append(f'"{action.format_str}"')
for port in action.ports:
if isinstance(port, (Number, AbstractBit, AbstractBitVector)) and \
not isinstance(port, m.Bits):
args.append(f'{port}')
else:
args.append(f'{self.make_name(port)}')
# generate the command
args = ', '.join(args)
return [f'$write({args});']
def make_loop(self, i, action):
# loop variable has to be declared outside of the loop
self.add_decl('integer', action.loop_var, exist_ok=True)
return super().make_loop(i, action)
def make_file_open(self, i, action):
# make sure the file mode is supported
if not is_valid_file_mode(action.file.mode):
raise NotImplementedError(action.file.mode)
# declare the file read variable if the file mode allows reading
if file_mode_allows_reading(action.file.mode):
bit_size = (action.file.chunk_size * 8) - 1
in_ = self.in_var(action.file)
self.add_decl(f'reg [{bit_size}:0]', in_)
# declare the file descriptor variable
fd = self.fd_var(action.file)
self.add_decl('integer', fd)
# return the command
return [f'{fd} = $fopen("{action.file.name}", "{action.file.mode}");']
def make_file_close(self, i, action):
fd = self.fd_var(action.file)
return [f'$fclose({fd});']
def make_file_read(self, i, action):
assert file_mode_allows_reading(action.file.mode), \
f'File mode "{action.file.mode}" is not compatible with reading.'
idx = '__i'
fd = self.fd_var(action.file)
in_ = self.in_var(action.file)
return self.generate_action_code(i, [
f'{in_} = 0;',
Loop(loop_var=idx,
n_iter=action.file.chunk_size,
count='down' if action.file.endianness == 'big' else 'up',
actions=[
f'{in_} |= $fgetc({fd}) << (8 * {idx});'
])
])
def write_byte(self, fd, expr):
if self.simulator == 'iverilog':
return f'$fputc({expr}, {fd});'
else:
return f'$fwrite({fd}, "%c", {expr});'
def make_file_write(self, i, action):
assert file_mode_allows_writing(action.file.mode), \
f'File mode "{action.file.mode}" is not compatible with writing.'
idx = '__i'
fd = self.fd_var(action.file)
value = self.make_name(action.value)
byte_expr = f"({value} >> (8 * {idx})) & 8'hFF"
return self.generate_action_code(i, [
Loop(loop_var=idx,
n_iter=action.file.chunk_size,
count='down' if action.file.endianness == 'big' else 'up',
actions=[
self.write_byte(fd, byte_expr)
])
])
def make_get_value(self, i, action):
fd_var = self.fd_var(self.value_file)
fmt = action.get_format()
value = self.make_name(action.port)
return [f'$fwrite({fd_var}, "{fmt}\\n", {value});']
def make_assert(self, i, action):
expr_str = self.compile_expression(action.expr)
return [f'if (!({expr_str})) $error("{expr_str} failed");']
def make_expect(self, i, action):
# don't do anything if any value is OK
if value_utils.is_any(action.value):
return []
# determine the exact name of the signal
name = self.make_name(action.port)
# TODO: add something like "make_read_name" and "make_write_name"
# so that reading inout signals has more uniform behavior across
# expect, peek, etc.
if actions.is_inout(action.port):
name = self.input_wire(name)
# determine the name of the signal for debugging purposes
if isinstance(action.port, SelectPath):
debug_name = action.port[-1].name
elif isinstance(action.port, fault.WrappedVerilogInternalPort):
debug_name = name
else:
debug_name = action.port.name
# determine the value to be checked
value = self.process_value(action.port, action.value)
# determine the condition and error body
err_hdr = ''
err_hdr += f'Failed on action={i} checking port {debug_name}'
if action.traceback is not None:
err_hdr += f' with traceback {action.traceback}'
if action.above is not None:
if action.below is not None:
# must be in range
cond = f'!(({action.above} <= {name}) && ({name} <= {action.below}))' # noqa
err_msg = 'Expected %0f to %0f, got %0f'
err_args = [action.above, action.below, name]
else:
# must be above
cond = f'!({action.above} <= {name})'
err_msg = 'Expected above %0f, got %0f'
err_args = [action.above, name]
else:
if action.below is not None:
# must be below
cond = f'!({name} <= {action.below})'
err_msg = 'Expected below %0f, got %0f'
err_args = [action.below, name]
else:
# equality comparison
if action.strict:
cond = f'!({name} === {value})'
else:
cond = f'!({name} == {value})'
err_msg = 'Expected %x, got %x'
err_args = [value, name]
# construct the body of the $error call
err_fmt_str = f'"{err_hdr}. {err_msg}."'
err_body = [err_fmt_str] + err_args
err_body = ', '.join([str(elem) for elem in err_body])
# return a snippet of verilog implementing the assertion
return self.make_if(i, If(cond, [f'$error({err_body});']))
def make_eval(self, i, action):
# Emulate eval by inserting a delay
return ['#1;']
def make_step(self, i, action):
name = verilog_name(action.clock.name)
code = []
for step in range(action.steps):
code.append(f"#{self.clock_step_delay} {name} ^= 1;")
return code
def generate_recursive_port_code(self, name, type_, power_args):
port_list = []
if issubclass(type_, m.Array):
for j in range(type_.N):
result = self.generate_port_code(
name + "_" + str(j), type_.T, power_args
)
port_list.extend(result)
elif issubclass(type_, m.Tuple):
for k, t in zip(type_.keys(), type_.types()):
result = self.generate_port_code(
name + "_" + str(k), t, power_args
)
port_list.extend(result)
return port_list
def generate_port_code(self, name, type_, power_args):
is_array_of_non_bits = issubclass(type_, m.Array) and \
not issubclass(type_.T, m.Bit)
if is_array_of_non_bits or issubclass(type_, m.Tuple):
return self.generate_recursive_port_code(name, type_, power_args)
else:
width_str = ""
connect_to = f"{name}"
if issubclass(type_, m.Array) and \
issubclass(type_.T, m.Digital):
width_str = f" [{len(type_) - 1}:0]"
if isinstance(type_, RealKind):
t = "real"
elif name in power_args.get("supply0s", []):
t = "supply0"
elif name in power_args.get("supply1s", []):
t = "supply1"
elif name in power_args.get("tris", []):
t = "tri"
elif type_.is_output():
t = "wire"
elif type_.is_inout() or \
(type_.is_input() and self.use_input_wires):
# declare a reg and assign it to a wire
# that wire will then be connected to the
# DUT pin
connect_to = self.input_wire(name)
self.add_decl(f'reg{width_str}', f'{name}')
self.add_decl(f'wire{width_str}', f'{connect_to}')
self.add_assign(f'{connect_to}', f'{name}')
# set the signal type to None to avoid re-declaring
# connect_to
t = None
elif type_.is_input():
t = "reg"
else:
raise NotImplementedError()
# declare the signal that will be connected to the pin, if needed
if t is not None:
self.add_decl(f'{t}{width_str}', f'{connect_to}')
# return the wiring statement describing how the testbench signal
# is connected to the DUT
return [f".{name}({connect_to})"]
def generate_code(self, actions, power_args):
# format the port list
port_list = []
for name, type_ in self.circuit.IO.ports.items():
result = self.generate_port_code(name, type_, power_args)
port_list.extend(result)
port_list = f',\n{2*self.TAB}'.join(port_list)
# build up the body of the initial block
initial_body = []
# set up probing
if self.dump_waveforms and self.simulator == "vcs":
initial_body += [f'$vcdplusfile("{self.waveform_file}");',
f'$vcdpluson();',
f'$vcdplusmemon();']
elif self.dump_waveforms and self.simulator in {"iverilog", "vivado"}:
# https://iverilog.fandom.com/wiki/GTKWAVE
initial_body += [f'$dumpfile("{self.waveform_file}");',
f'$dumpvars(0, dut);']
# if we're using the GetValue feature, then we need to open a file to
# which GetValue results will be written
if any(isinstance(action, GetValue) for action in actions):
actions = [FileOpen(self.value_file)] + actions
actions += [FileClose(self.value_file)]
# handle all of user-specified actions in the testbench
for i, action in enumerate(actions):
initial_body += self.generate_action_code(i, action)
# format the paramter list
param_list = [f'.{name}({value})'
for name, value in self.parameters.items()]
param_list = f',\n{2*self.TAB}'.join(param_list)
# add proper indentation and newlines to strings in the initial body
initial_body = [f'{2*self.TAB}{elem}' for elem in initial_body]
initial_body = '\n'.join(initial_body)
# format declarations
declarations = [f'{self.TAB}{type_} {name};'
for type_, name in self.declarations.values()]
declarations = '\n'.join(declarations)
# format assignments
assigns = [f'{self.TAB}assign {lhs}={rhs};'
for lhs, rhs in self.assigns.values()]
assigns = '\n'.join(assigns)
# determine the top module name
if self.top_module:
top_module = self.top_module
else:
top_module = f'{self.circuit_name}_tb'
# add timescale
timescale = f'`timescale {self.timescale}'
# fill out values in the testbench template
src = src_tpl.format(
timescale=timescale,
declarations=declarations,
assigns=assigns,
initial_body=initial_body,
port_list=port_list,
param_list=param_list,
circuit_name=self.circuit_name,
top_module=top_module
)
# return the string representing the system-verilog testbench
return src
def run(self, actions, power_args=None):
# set defaults
power_args = power_args if power_args is not None else {}
# assemble list of sources files
vlog_srcs = []
if not self.ext_test_bench:
tb_file = self.write_test_bench(actions=actions,
power_args=power_args)
vlog_srcs += [tb_file]
if not self.ext_model_file:
vlog_srcs += [self.verilog_file]
vlog_srcs += self.include_verilog_libraries
# generate simulator commands
if self.simulator == 'ncsim':
# Compile and run simulation
cmd_file = self.write_ncsim_tcl()
sim_cmd = self.ncsim_cmd(sources=vlog_srcs, cmd_file=cmd_file)
sim_err_str = None
# Skip "bin_cmd"
bin_cmd = None
bin_err_str = None
elif self.simulator == 'vivado':
# Compile and run simulation
cmd_file = self.write_vivado_tcl(sources=vlog_srcs)
sim_cmd = self.vivado_cmd(cmd_file=cmd_file)
sim_err_str = ['CRITICAL WARNING', 'ERROR', 'Fatal', 'Error']
# Skip "bin_cmd"
bin_cmd = None
bin_err_str = None
elif self.simulator == 'vcs':
# Compile simulation
# TODO: what error strings are expected at this stage?
sim_cmd, bin_file = self.vcs_cmd(sources=vlog_srcs)
sim_err_str = None
# Run simulation
bin_cmd = [bin_file]
bin_err_str = 'Error'
elif self.simulator == 'iverilog':
# Compile simulation
sim_cmd, bin_file = self.iverilog_cmd(sources=vlog_srcs)
sim_err_str = ['syntax error', 'I give up.']
# Run simulation
bin_cmd = ['vvp', '-N', bin_file]
bin_err_str = 'ERROR'
else:
raise NotImplementedError(self.simulator)
# add any extra flags
sim_cmd += self.flags
# link the library over if using kratos to debug
if self.use_kratos:
self.link_kratos_lib()
# compile the simulation
subprocess_run(sim_cmd, cwd=self.directory, env=self.sim_env,
err_str=sim_err_str, disp_type=self.disp_type)
# run the simulation binary (if applicable)
if bin_cmd is not None:
subprocess_run(bin_cmd, cwd=self.directory, env=self.sim_env,
err_str=bin_err_str, disp_type=self.disp_type)
# post-process GetValue actions
self.post_process_get_value_actions(actions)
def write_test_bench(self, actions, power_args):
# determine the path of the testbench file
tb_file = self.directory / Path(f'{self.circuit_name}_tb.sv')
tb_file = tb_file.absolute()
# generate source code of test bench
src = self.generate_code(actions, power_args)
# If there's an old test bench file, ncsim might not recompile based on
# the timestamp (1s granularity), see
# https://github.com/StanfordAHA/lassen/issues/111
# so we check if the new/old file have the same timestamp and edit them
# to force an ncsim recompile
check_timestamp = os.path.isfile(tb_file)
if check_timestamp:
check_timestamp = True
old_stat_result = os.stat(tb_file)
old_times = (old_stat_result.st_atime, old_stat_result.st_mtime)
with open(tb_file, "w") as f:
f.write(src)
if check_timestamp:
new_stat_result = os.stat(tb_file)
new_times = (new_stat_result.st_atime, new_stat_result.st_mtime)
if old_times[0] <= new_times[0] or new_times[1] <= old_times[1]:
new_times = (old_times[0] + 1, old_times[1] + 1)
os.utime(tb_file, times=new_times)
# return the path to the testbench location
return tb_file
@staticmethod
def input_wire(name):
return f'__{name}_wire'
def link_kratos_lib(self):
from kratos_runtime import get_lib_path
lib_path = get_lib_path()
dst_path = os.path.join(self.directory, os.path.basename(lib_path))
if not os.path.isfile(dst_path):
os.symlink(lib_path, dst_path)
# also add the directory to the current LD_LIBRARY_PATH
self.sim_env["LD_LIBRARY_PATH"] = os.path.abspath(self.directory)
def write_ncsim_tcl(self):
# construct the TCL commands to run the Incisive/Xcelium simulation
tcl_cmds = []
if self.dump_waveforms:
tcl_cmds += [f'database -open -vcd vcddb -into {self.waveform_file} -default -timescale ps'] # noqa
tcl_cmds += [f'probe -create -all -vcd -depth all']
tcl_cmds += [f'run {self.num_cycles}ns']
tcl_cmds += [f'quit']
# write the command file
cmd_file = Path(f'{self.circuit_name}_cmd.tcl')
with open(self.directory / cmd_file, 'w') as f:
f.write('\n'.join(tcl_cmds))
# return the path to the command file
return cmd_file
def write_vivado_tcl(self, sources=None, proj_name='project', proj_dir=None,
proj_part=None):
# set defaults
if sources is None:
sources = []
if proj_dir is None:
proj_dir = f'{proj_name}'
# build up a list of commands to run a simulation with Vivado
tcl_cmds = []
# create the project
create_proj = f'create_project -force {proj_name} {proj_dir}'
if proj_part is not None:
create_proj += f' -part "{proj_part}"'
tcl_cmds += [create_proj]
# add source files and library files
vlog_add_files = []
vlog_add_files += [f'{src}' for src in sources]
vlog_add_files += [f'{lib}' for lib in self.ext_libs]
if len(vlog_add_files) > 0:
vlog_add_files = ' '.join(vlog_add_files)
tcl_cmds += [f'add_files "{vlog_add_files}"']
# add include file search paths
if len(self.inc_dirs) > 0:
vlog_inc_dirs = ' '.join(f'{dir_}' for dir_ in self.inc_dirs)
tcl_cmds += [f'set_property include_dirs "{vlog_inc_dirs}" [get_fileset sim_1]'] # noqa
# add verilog `defines
vlog_defs = []
for key, val in self.defines.items():
if val is not None:
vlog_defs += [f'{key}={val}']
else:
vlog_defs += [f'{key}']
if len(vlog_defs) > 0:
vlog_defs = ' '.join(vlog_defs)
tcl_cmds += [f'set_property -name "verilog_define" -value {{{vlog_defs}}} -objects [get_fileset sim_1]'] # noqa
# set the name of the top module
if self.top_module is None and not self.ext_test_bench:
top = f'{self.circuit_name}_tb'
else:
top = self.top_module
if top is not None:
tcl_cmds += [f'set_property -name top -value {top} -objects [get_fileset sim_1]'] # noqa
else:
# have Vivado pick the top module automatically if not specified
tcl_cmds += [f'update_compile_order -fileset sim_1']
# run until $finish (as opposed to running for a certain amount of time)
tcl_cmds += [f'set_property -name "xsim.simulate.runtime" -value "-all" -objects [get_fileset sim_1]'] # noqa
# run the simulation
tcl_cmds += ['launch_simulation']
# write the command file
cmd_file = Path(f'{self.circuit_name}_cmd.tcl')
with open(self.directory / cmd_file, 'w') as f:
f.write('\n'.join(tcl_cmds))
# return the path to the command file
return cmd_file
def def_args(self, prefix):
retval = []
for key, val in self.defines.items():
def_arg = f'{prefix}{key}'
if val is not None:
def_arg += f'={val}'
retval += [def_arg]
return retval
def ncsim_cmd(self, sources, cmd_file):
cmd = []
# binary name
cmd += ['irun']
# determine the name of the top module
if self.top_module is None and not self.ext_test_bench:
top = f'{self.circuit_name}_tb' if not self.use_kratos else "TOP"
else:
top = self.top_module
# send name of top module to the simulator
if top is not None:
cmd += ['-top', f'{top}']
# timescale
cmd += ['-timescale', f'{self.timescale}']
# TCL commands
cmd += ['-input', f'{cmd_file}']
# source files
cmd += [f'{src}' for src in sources]
# library files
for lib in self.ext_libs:
cmd += ['-v', f'{lib}']
# include directory search path
for dir_ in self.inc_dirs:
cmd += ['-incdir', f'{dir_}']
# define variables
cmd += self.def_args(prefix='+define+')
# misc flags
cmd += ['-access', '+rwc']
cmd += ['-notimingchecks']
if self.no_warning:
cmd += ['-neverwarn']
# kratos flags
if self.use_kratos:
from kratos_runtime import get_ncsim_flag
cmd += get_ncsim_flag().split()
# return arg list
return cmd
def vivado_cmd(self, cmd_file):
cmd = []
# binary name
cmd += ['vivado']
# run from an external script
cmd += ['-mode', 'batch']
# specify path to script
cmd += ['-source', f'{cmd_file}']
# turn off annoying output
cmd += ['-nolog']
cmd += ['-nojournal']
# return arg list
return cmd
def vcs_cmd(self, sources):
cmd = []
# binary name
cmd += ['vcs']
# timescale
cmd += [f'-timescale={self.timescale}']
# source files
cmd += [f'{src}' for src in sources]
# library files
for lib in self.ext_libs:
cmd += ['-v', f'{lib}']
# include directory search path
for dir_ in self.inc_dirs:
cmd += [f'+incdir+{dir_}']
# define variables
cmd += self.def_args(prefix='+define+')
# misc flags
cmd += ['-sverilog']
cmd += ['-full64']
cmd += ['+v2k']
cmd += ['-LDFLAGS']
cmd += ['-Wl,--no-as-needed']
# kratos flags
if self.use_kratos:
# +vpi -load libkratos-runtime.so:initialize_runtime_vpi -acc+=rw
from kratos_runtime import get_vcs_flag
cmd += get_vcs_flag().split()
if self.dump_waveforms:
cmd += ['+vcs+vcdpluson', '-debug_pp']
# return arg list and binary file location
return cmd, './simv'
def iverilog_cmd(self, sources):
cmd = []
# binary name
cmd += ['iverilog']
# output file
bin_file = f'{self.circuit_name}_tb'
cmd += [f'-o{bin_file}']
# look for *.v and *.sv files, if we're using library directories
if len(self.ext_libs) > 0:
cmd += ['-Y.v', '-Y.sv']
# Icarus verilog does not have an option like "-v" that allows
# individual files to be included, so the best we can do is gather a
# list of unique library directories
unq_lib_dirs = {}
for lib in self.ext_libs:
parent_dir = Path(lib).parent
if parent_dir not in unq_lib_dirs:
unq_lib_dirs[parent_dir] = None
cmd += [f'-y{unq_lib_dir}' for unq_lib_dir in unq_lib_dirs]
# include directory search path
for dir_ in self.inc_dirs:
cmd += [f'-I{dir_}']
# define variables
cmd += self.def_args(prefix='-D')
# misc flags
cmd += ['-g2012']
# source files
cmd += [f'{src}' for src in sources]
# return arg list and binary file location
return cmd, bin_file
|
<gh_stars>0
# SPDX-FileCopyrightText: 2021 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Tegra T186 pin names"""
import atexit
from Jetson import GPIO
GPIO.setmode(GPIO.TEGRA_SOC)
GPIO.setwarnings(False) # shh!
class Pin:
"""Pins dont exist in CPython so...lets make our own!"""
IN = 0
OUT = 1
LOW = 0
HIGH = 1
PULL_NONE = 0
PULL_UP = 1
PULL_DOWN = 2
id = None
_value = LOW
_mode = IN
def __init__(self, bcm_number):
self.id = bcm_number
def __repr__(self):
return str(self.id)
def __eq__(self, other):
return self.id == other
def init(self, mode=IN, pull=None):
"""Initialize the Pin"""
if mode is not None:
if mode == self.IN:
self._mode = self.IN
GPIO.setup(self.id, GPIO.IN)
elif mode == self.OUT:
self._mode = self.OUT
GPIO.setup(self.id, GPIO.OUT)
else:
raise RuntimeError("Invalid mode for pin: %s" % self.id)
if pull is not None:
if self._mode != self.IN:
raise RuntimeError("Cannot set pull resistor on output")
if pull == self.PULL_UP:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_UP)
elif pull == self.PULL_DOWN:
GPIO.setup(self.id, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
else:
raise RuntimeError("Invalid pull for pin: %s" % self.id)
def value(self, val=None):
"""Set or return the Pin Value"""
if val is not None:
if val == self.LOW:
self._value = val
GPIO.output(self.id, val)
return None
if val == self.HIGH:
self._value = val
GPIO.output(self.id, val)
return None
raise RuntimeError("Invalid value for pin")
return GPIO.input(self.id)
# pylint: disable=no-method-argument
@atexit.register
def cleanup():
"""Clean up pins"""
print("Exiting... \nCleaning up pins")
GPIO.cleanup()
# pylint: enable=no-method-argument
# Cannot be used as GPIO
SDA = Pin("GPIO_SEN9")
SCL = Pin("GPIO_SEN8")
SDA_1 = Pin("GEN1_I2C_SDA")
SCL_1 = Pin("GEN1_I2C_SCL")
# Jetson TX2 specific
J06 = Pin("GPIO_AUD1")
AA02 = Pin("CAN_GPIO2")
N06 = Pin("GPIO_CAM7")
N04 = Pin("GPIO_CAM5")
N05 = Pin("GPIO_CAM6")
N03 = Pin("GPIO_CAM4")
AA01 = Pin("CAN_GPIO1")
I05 = Pin("GPIO_PQ5")
T03 = Pin("UART1_CTS")
T02 = Pin("UART1_RTS")
P17 = Pin("GPIO_EXP_P17")
AA00 = Pin("CAN_GPIO0")
Y01 = Pin("GPIO_MDM2")
P16 = Pin("GPIO_EXP_P16")
I04 = Pin("GPIO_PQ4")
J05 = Pin("GPIO_AUD0")
# Jetson TX2 NX specific
W04 = Pin("UART3_RTS")
V01 = Pin("GPIO_SEN1")
C02 = Pin("DAP2_DOUT")
C03 = Pin("DAP2_DIN")
V04 = Pin("GPIO_SEN4")
H02 = Pin("GPIO_WAN7")
H01 = Pin("GPIO_WAN6")
V02 = Pin("GPIO_SEN2")
H00 = Pin("GPIO_WAN5")
H03 = Pin("GPIO_WAN8")
Y03 = Pin("GPIO_MDM4")
N01 = Pin("GPIO_CAM2")
EE02 = Pin("TOUCH_CLK")
U00 = Pin("GPIO_DIS0")
U05 = Pin("GPIO_DIS5")
W05 = Pin("UART3_CTS")
V03 = Pin("GPIO_SEN3")
# Shared pin
J03 = Pin("DAP1_FS")
J02 = Pin("DAP1_DIN")
J01 = Pin("DAP1_DOUT")
J00 = Pin("DAP1_SCLK")
J04 = Pin("AUD_MCLK")
i2cPorts = (
(1, SCL, SDA),
(0, SCL_1, SDA_1),
)
# ordered as spiId, sckId, mosiId, misoId
spiPorts = ((3, N03, N05, N04),)
|
<reponame>gortizji/tv-graph-cnn
import numpy as np
from scipy.sparse.linalg import eigsh
from scipy.signal import square
def delta(Nv, c):
d = np.zeros([Nv, 1])
d[c] = 1
return d
def diffusion_seq(W, x, T):
N = W.shape[0]
X = np.zeros((N, T))
for t in range(T):
if t == 0:
X[:, 0] = x.T / np.max(x)
continue
X[:, t] = ((W ** t) * X[:, t - 1]).T
X[:, t] /= np.max(X)
return X
def generate_diffusion_samples(N, W, T=10, sigma=0.05):
samples = []
labels = []
Nv = W.shape[0]
for _ in range(N):
k = np.random.randint(Nv)
Wk = W ** k
c = np.random.randint(Nv)
x = Wk.dot(delta(Nv, c))
X = diffusion_seq(W, x, T) + sigma * np.random.randn(Nv, T)
samples.append(X)
labels.append(c)
return np.array(samples), np.array(labels)
def wave_seq(Wphi, b, Kstart, T, f, duty_fixed=False, signal="square"):
Nv = Wphi.shape[0]
X = np.zeros((Nv, Kstart + T))
phi = 2 * np.pi * np.random.rand()
if duty_fixed:
duty = 0.5
else:
duty = np.random.rand()
if signal == "square":
X[:, 0] = (b * 0.5 * (1 - square(phi, duty=duty))).T
for t in range(1, Kstart + T):
X[:, t] = Wphi.dot(X[:, t - 1]).T + (b * 0.5 * (1 - square(f * t + phi, duty=duty))).T
elif signal == "cosine":
X[:, 0] = (b * 0.5 * (1 - np.cos(phi))).T
for t in range(1, Kstart + T):
X[:, t] = Wphi.dot(X[:, t - 1]).T + (b * 0.5 * (1 - np.cos(f * t + phi))).T
else:
raise ValueError("Signal type is not implemented")
return X[:, Kstart:Kstart + T]
def generate_wave_samples(N, W, T=10, Kmin=10, Kmax=100, sigma=0.05, signal="square"):
samples = []
labels = []
Nv = W.shape[0]
_, U = eigsh(W)
Wphi = U.dot(U.T)
for _ in range(N):
kstart = np.random.randint(Kmin, Kmax)
c = np.random.randint(Nv)
b = delta(Nv, c)
f = np.pi * np.random.rand()
X = wave_seq(Wphi, b, kstart, T, f, signal=signal) + sigma * np.random.randn(Nv, T)
samples.append(X)
labels.append(c)
return np.array(samples), np.array(labels)
def hp_hp_sample(T, G, f_c, lambda_c, sigma=1):
"""
Generates a random low-pass signal in time and vertex
:param T: Number of time samples
:param G: Underlying graph
:param f_c: Index of cut frequency in time fourier domain
:param lambda_c: Index of cut frequency in graph fourier domain
:param sigma: Standard deviation of generator
:return: Filtered signal
"""
x = sigma * np.random.randn(G.N, T)
G.compute_fourier_basis()
xg = G.gft(x)
xgf = np.fft.fft(xg)
xgf = np.fft.fftshift(xgf)
xgf[:lambda_c, :] = 0
xgf[:, T // 2 - f_c: T // 2 + f_c + 1] = 0
xgf = np.fft.ifftshift(xgf)
xg = np.fft.ifft(xgf)
x = G.igft(xg)
return x
def lp_lp_sample(T, G, f_c, lambda_c, sigma=1):
"""
Generates a random low-pass signal in time and vertex
:param T: Number of time samples
:param G: Underlying graph
:param f_c: Index of cut frequency in time fourier domain
:param lambda_c: Index of cut frequency in graph fourier domain
:param sigma: Standard deviation of generator
:return: Filtered signal
"""
x = sigma * np.random.randn(G.N, T)
G.compute_fourier_basis()
xg = G.gft(x)
xgf = np.fft.fft(xg)
xgf = np.fft.fftshift(xgf)
lp_lp_filter = np.zeros(xgf.shape)
lp_lp_filter[:lambda_c, T // 2 - f_c: T // 2 + f_c + 1] = 1
xgf = xgf * lp_lp_filter
xgf = np.fft.ifftshift(xgf)
xg = np.fft.ifft(xgf)
x = G.igft(xg)
return x
def hp_lp_sample(T, G, f_c, lambda_c, sigma=1):
"""
Generates a random low-pass signal in time and high-pass vertex
:param T: Number of time samples
:param G: Underlying graph
:param f_c: Index of cut frequency in time fourier domain
:param lambda_c: Index of cut frequency in graph fourier domain
:param sigma: Standard deviation of generator
:return: Filtered signal
"""
x = sigma * np.random.randn(G.N, T)
G.compute_fourier_basis()
xg = G.gft(x)
xgf = np.fft.fft(xg)
xgf = np.fft.fftshift(xgf)
xgf[lambda_c:, :] = 0
xgf[:, T // 2 - f_c: T // 2 + f_c + 1] = 0
xgf = np.fft.ifftshift(xgf)
xg = np.fft.ifft(xgf)
x = G.igft(xg)
return x
def lp_hp_sample(T, G, f_c, lambda_c, sigma=1):
"""
Generates a random high-pass signal in time and low-pass vertex
:param T: Number of time samples
:param G: Underlying graph
:param f_c: Index of cut frequency in time fourier domain
:param lambda_c: Index of cut frequency in graph fourier domain
:param sigma: Standard deviation of generator
:return: Filtered signal
"""
x = sigma * np.random.randn(G.N, T)
G.compute_fourier_basis()
xg = G.gft(x)
xgf = np.fft.fft(xg)
xgf = np.fft.fftshift(xgf)
hp_lp_filter = np.zeros(xgf.shape)
hp_lp_filter[lambda_c:, T // 2 - f_c: T // 2 + f_c + 1] = 1
xgf = xgf * hp_lp_filter
xgf = np.fft.ifftshift(xgf)
xg = np.fft.ifft(xgf)
x = G.igft(xg)
return x
def generate_spectral_samples(N, T, G, f_h, lambda_h, f_l, lambda_l, sigma=10, sigma_n=0):
"""
Generate dataset composed of quantized filtered hp-hp, lp-lp, hp-lp and lp-hp white noise.
:param N: Number of samples per type
:param T: Time length
:param G: Underlying graph
:param f_h: Index of hp cut frequency in time fourier domain
:param lambda_h: Index of hp cut frequency in graph fourier domain
:param f_l: Index of lp cut frequency in time fourier domain
:param lambda_l: Index of lp cut frequency in graph fourier domain
:param sigma: Standard deviation of generator
:param sigma_n: Standard deviation of noise
:return: Filtered signal
"""
dataset = []
labels = []
for _ in range(N):
x = hp_hp_sample(T, G, f_h, lambda_h, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(0)
for _ in range(N):
x = lp_lp_sample(T, G, f_l, lambda_l, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(1)
for _ in range(N):
x = lp_hp_sample(T, G, f_l, lambda_h, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(2)
for _ in range(N):
x = hp_lp_sample(T, G, f_h, lambda_l, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(3)
dataset = np.array(dataset)
dataset += sigma_n * np.random.randn(*dataset.shape)
print(dataset.shape)
labels = np.array(labels)
return dataset, labels
def generate_spectral_samples_hard(N, T, G, f_h, lambda_h, f_l, lambda_l, sigma=10, sigma_n=0):
"""
Generate dataset composed of quantized filtered hp-hp, lp-lp, hp-lp and lp-hp white noise.
:param N: Number of samples per type
:param T: Time length
:param G: Underlying graph
:param f_h: Index of hp cut frequency in time fourier domain
:param lambda_h: Index of hp cut frequency in graph fourier domain
:param f_l: Index of lp cut frequency in time fourier domain
:param lambda_l: Index of lp cut frequency in graph fourier domain
:param sigma: Standard deviation of generator
:param sigma_n: Standard deviation of noise
:return: Filtered signal
"""
dataset = []
labels = []
for _ in range(N):
x = hp_hp_sample(T, G, f_h, lambda_h, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(0)
for _ in range(N):
x = lp_lp_sample(T, G, f_l, lambda_l, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(1)
for _ in range(N):
x = lp_hp_sample(T, G, f_l, lambda_h, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(2)
for _ in range(N):
x = hp_lp_sample(T, G, f_h, lambda_l, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(3)
# Spectral Combinations
for _ in range(N):
x = lp_lp_sample(T, G, f_h, lambda_h, sigma) + hp_hp_sample(T, G, f_h, lambda_h, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(4)
for _ in range(N):
x = lp_hp_sample(T, G, f_h, lambda_h, sigma) + hp_lp_sample(T, G, f_h, lambda_h, sigma)
x = np.tanh(x.real)
x = np.expand_dims(x, axis=3)
dataset.append(x)
labels.append(5)
dataset = np.array(dataset)
dataset += sigma_n * np.random.randn(*dataset.shape)
print(dataset.shape)
labels = np.array(labels)
return dataset, labels
|
<reponame>dbirman/cs375<filename>final_project/train_coco.py
"""
Final project
"""
import os
import numpy as np
import tensorflow as tf
from tfutils import base, data, model, optimizer, utils
from coco_provider import COCO
from data_provider import Combine_world
from yolo_tiny_net import YoloTinyNet
from scipy.misc import imsave
from skimage.draw import line_aa
var_dict = None
class CocoYolo():
"""
Defines the ImageNet training experiment
"""
class Config():
"""
Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters.
Please set the seed to your group number. You can also change the batch
size and n_epochs if you want but please do not change the rest.
"""
batch_size = 1
seed = 0
thres_loss = 1000
n_epochs = 90
datasets = {'imagenet': 1, 'coco': 1}
crop_size = 224
common_params = {
'image_size': 224,
'num_classes': 20,
'batch_size': 1
}
net_params = {
'boxes_per_cell': 2,
'weight_decay': 0.0005,
'cell_size': 4,
'object_scale':1,
'noobject_scale':1,
'class_scale':1,
'coord_scale':1
}
ytn = YoloTinyNet(common_params,net_params,test=False)
train_steps = 100 #ImageNetDataProvider.N_TRAIN / batch_size * n_epochs
val_steps = 1 #np.ceil(ImageNetDataProvider.N_VAL / batch_size).astype(int)
def custom_train_loop(self, sess, train_targets, **loop_params):
"""Define Custom training loop.
Args:
sess (tf.Session): Current tensorflow session.
train_targets (list): Description.
**loop_params: Optional kwargs needed to perform custom train loop.
Returns:
dict: A dictionary containing train targets evaluated by the session.
"""
# boxes = var_dict['boxes']
# boxes_val = sess.run(boxes)
# import pdb; pdb.set_trace()
max_obj = 0
for i in range(20):
# ih, iw, image, obj_count, boxes = sess.run([var_dict[k] for k in ['ih', 'iw', 'images', 'num_objects', 'boxes']]) #['images', 'labels',
ih, iw, obj_count, boxes, images = sess.run([var_dict[k] for k in ['ih', 'iw', 'num_objects', 'boxes', 'images']])
max_obj = max(max_obj, obj_count)
print i, ih, iw, obj_count, boxes[0][:obj_count[0]]#, image.shape, obj_count
img = np.array(images[0])
x_center, y_center, w, h = boxes[0, 0, :4]
coords = [(x_center - w/2), (x_center + w/2), (y_center-h/2), (y_center+h/2)] # x1, x2, y1, y2
x1, x2, y1, y2 = [int(c) for c in coords]
print([int(c) for c in coords])
rr, cc, val = line_aa(y1, x1, y2, x2)
img[rr, cc, 0] = val
imsave('image_{}.png'.format(i), img)
import pdb; pdb.set_trace()
train_results, p = sess.run([train_targets, var_dict['print']])
for i, result in enumerate(train_results):
print('Model {} has loss {}'.format(i, result['loss']))
return train_results
def setup_params(self):
"""
This function illustrates how to setup up the parameters for
train_from_params.
"""
params = {}
"""
train_params defines the training parameters consisting of
- the data provider that reads the data, preprocesses it and enqueues it into
the data queue
- the data queue that batches and if specified shuffles the data and provides
the input to the model
- other configuration parameters like the number of training steps
It's arguments are
data_params: defines how the data is read in.
queue_params: defines how the data is presented to the model, i.e.
if it is shuffled or not and how big of a batch size is used.
targets: the targets to be extracted and evaluated in the tensorflow session
num_steps: number of training steps
thres_loss: if the loss exceeds thres_loss the training will be stopped
validate_first: run validation before starting the training
"""
params['train_params'] = {
'data_params': {
# ImageNet data provider arguments
'func': Combine_world,
'cfg_dataset': self.Config.datasets,
'group': 'train',
'crop_size': self.Config.crop_size,
# TFRecords (super class) data provider arguments
'file_pattern': 'train*.tfrecords',
'batch_size': self.Config.batch_size,
'shuffle': False,
'shuffle_seed': self.Config.seed,
'file_grab_func': self.subselect_tfrecords,
'n_threads': 1,
},
'queue_params': {
'queue_type': 'random',
'batch_size': self.Config.batch_size,
'seed': self.Config.seed,
'capacity': self.Config.batch_size * 10,
'min_after_dequeue': self.Config.batch_size * 5,
},
'targets': {
'func': self.return_outputs,
'targets': [],
},
'num_steps': self.Config.train_steps,
'thres_loss': self.Config.thres_loss,
'validate_first': False,
'train_loop': {'func': self.custom_train_loop}
}
"""
validation_params similar to train_params defines the validation parameters.
It has the same arguments as train_params and additionally
agg_func: function that aggregates the validation results across batches,
e.g. to calculate the mean of across batch losses
online_agg_func: function that aggregates the validation results across
batches in an online manner, e.g. to calculate the RUNNING mean across
batch losses
"""
"""
Using combine worlds
'data_params': {
'func': Combine_world,
'cfg_dataset': {'imagenet': 0}
'
"""
# params['validation_params'] = {
# 'topn_val': {
# 'data_params': {
# # ImageNet data provider arguments
# 'func': COCO,
# 'group': 'val',
# # TFRecords (super class) data provider arguments
# 'batch_size': self.Config.batch_size,
# 'n_threads': 4,
# },
# 'queue_params': {
# 'queue_type': 'fifo',
# 'batch_size': self.Config.batch_size,
# 'seed': self.Config.seed,
# 'capacity': self.Config.batch_size * 10,
# 'min_after_dequeue': self.Config.batch_size * 5,
# },
# 'num_steps': self.Config.val_steps,
# 'agg_func': self.agg_mean,
# 'online_agg_func': self.online_agg_mean,
# }
# }
"""
model_params defines the model i.e. the architecture that
takes the output of the data provider as input and outputs
the prediction of the model.
You will need to EDIT alexnet_model in models.py. alexnet_model
is supposed to define a standard AlexNet model in tensorflow.
Please open models.py and fill out the missing parts in the alexnet_model
function. Once you start working with different models you will need to
switch out alexnet_model with your model function.
"""
params['model_params'] = {
'func': self.Config.ytn.inference,
}
"""
loss_params defines your training loss.
You will need to EDIT 'loss_per_case_func'.
Implement a softmax cross-entropy loss. You can use tensorflow's
tf.nn.sparse_softmax_cross_entropy_with_logits function.
Note:
1.) loss_per_case_func is called with
loss_per_case_func(inputs, outputs)
by tfutils.
2.) labels = outputs['labels']
logits = outputs['pred']
"""
def loss_wrapper(inputs, outputs):
global var_dict
var_dict = outputs
predicts = outputs['bboxes']
gt_boxes = tf.reshape(tf.cast(outputs['boxes'], tf.int32), [1, -1, 5])
num_objects = outputs['num_objects']
loss, nonsense, p = self.Config.ytn.loss(predicts, gt_boxes, num_objects)
var_dict['print'] = p
return loss + 0.0*tf.reduce_sum(outputs['logits'])
params['loss_params'] = {
'targets': ['boxes'],
'agg_func': tf.reduce_mean,
'loss_per_case_func': loss_wrapper,
'loss_per_case_func_params' : {'_outputs': 'outputs',
'_targets_$all': 'inputs'},
'loss_func_kwargs' : {},
}
"""
learning_rate_params defines the learning rate, decay and learning function.
You will need to EDIT this part. Replace the exponential decay
learning rate policy with a piecewise constant learning policy.
ATTENTION:
1.) 'learning_rate', 'decay_steps', 'decay_rate' and 'staircase' are not
arguments of tf.train.piecewise_constant! You will need to replace
them with the appropriate keys.
2.) 'func' passes global_step as input to your learning rate policy
function. Set the 'x' argument of tf.train.piecewise_constant to
global_step.
3.) set 'values' to [0.01, 0.005, 0.001, 0.0005] and
'boundaries' to [150000, 300000, 450000] for a batch size of 256
4.) You will need to delete all keys except for 'func' and replace them
with the input arguments to
"""
params['learning_rate_params'] = {
'func': tf.train.exponential_decay,
'learning_rate': 0.0001,
'decay_steps': 30, #TODO: what number to put here?
'decay_rate': 0.95,
'staircase': True,
}
"""
optimizer_params defines the optimizer.
You will need to EDIT the optimizer class. Replace the Adam optimizer
with a momentum optimizer after switching the learning rate policy to
piecewise constant.
"""
params['optimizer_params'] = {
'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.AdamOptimizer,
'clip': False,
}
"""
save_params defines how, where and when your training results are saved
in the database.
You will need to EDIT this part. Set your 'host' (set it to 'localhost',
or to IP if using remote mongodb), 'port' (set it to 24444, unless you
have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'.
"""
params['save_params'] = {
'host': '192.168.127.12 ',
'port': 24444,
'dbname': 'final',
'collname': 'yolo',
'exp_id': 'coco',
'save_valid_freq': 10000,
'save_filters_freq': 30000,
'cache_filters_freq': 50000,
'save_metrics_freq': 200,
'save_initial_filters' : False,
'save_to_gfs': [],
}
"""
load_params defines how and if a model should be restored from the database.
You will need to EDIT this part. Set your 'host' (set it to 'localhost',
or to IP if using remote mongodb), 'port' (set it to 24444, unless you
have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'.
If you want to restore your training these parameters should be the same
as in 'save_params'.
"""
params['load_params'] = {
'host': '192.168.127.12 ',
'port': 24444,
'dbname': 'final',
'collname': 'yolo',
'exp_id': 'coco',
'do_restore': False,
'load_query': None,
}
return params
def agg_mean(self, x):
return {k: np.mean(v) for k, v in x.items()}
def in_top_k(self, inputs, outputs):
"""
Implements top_k loss for validation
You will need to EDIT this part. Implement the top1 and top5 functions
in the respective dictionary entry.
"""
def k_wrapper(inputs, outputs, k):
return tf.nn.in_top_k(outputs['logits'], inputs['labels'], k)
return {'top1': k_wrapper(inputs, outputs, 1),
'top5': k_wrapper(inputs, outputs, 5)}
def subselect_tfrecords(self, path):
"""
Illustrates how to subselect files for training or validation
"""
all_filenames = os.listdir(path)
rng = np.random.RandomState(seed=SEED)
rng.shuffle(all_filenames)
return [os.path.join(path, fn) for fn in all_filenames
if fn.endswith('.tfrecords')]
def return_outputs(self, inputs, outputs, targets, **kwargs):
"""
Illustrates how to extract desired targets from the model
"""
retval = {}
for target in targets:
retval[target] = outputs[target]
return retval
def online_agg_mean(self, agg_res, res, step):
"""
Appends the mean value for each key
"""
if agg_res is None:
agg_res = {k: [] for k in res}
for k, v in res.items():
agg_res[k].append(np.mean(v))
return agg_res
if __name__ == '__main__':
"""
Illustrates how to run the configured model using tfutils
"""
base.get_params()
m = CocoYolo()
params = m.setup_params()
base.train_from_params(**params)
|
#
# Copyright (c) 2016 <NAME>
# All rights reserved.
#
# This file is part of Faber. It is made available under the
# Boost Software License, Version 1.0.
# (Consult LICENSE or http://www.boost.org/LICENSE_1_0.txt)
from faber.feature import feature, incidental, map, join
from faber.action import action
from faber.artefact import artefact, notfile, always
from faber.tools import fileutils
from faber.rule import rule
from faber.utils import capture_output
from test.common import pyecho
from os.path import exists
import pytest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
@pytest.mark.usefixtures('module')
def test_call():
a = action()
b = artefact('b', attrs=notfile)
c = artefact('c', attrs=notfile)
with pytest.raises(ValueError) as e:
a(b, c)
assert 'not implemented' in str(e.value)
with capture_output() as (out, err):
a = action('echo', 'echo $(<)')
a([b])
assert out.getvalue().strip() == 'test.b'
assert err.getvalue() == ''
@pytest.mark.usefixtures('module')
def test_recipe():
"""Check that an artefact's __recipe__ method is called to report
the execution of the recipe updating it."""
a = artefact('a', attrs=notfile|always)
b = artefact('b', attrs=notfile|always)
c = artefact('c', attrs=notfile|always)
a = rule(pyecho, a)
b = rule(pyecho, b, a)
c = rule(pyecho, c, b)
with patch('faber.action.action.__status__') as recipe:
assert b.update()
(_, _, _, _, output, _), kwds = recipe.call_args_list[-1]
assert output.strip() == 'b <- a'
assert c.update()
(_, _, _, _, output, _), kwds = recipe.call_args_list[-1]
assert output.strip() == 'c <- b'
@pytest.mark.usefixtures('module')
def test_variables():
"""Check that an action's variables are properly substituted."""
variable = feature('variable', attributes=incidental)
class A(action):
var = map(variable, join)
command = 'echo $(var)'
a = artefact('a', attrs=notfile|always)
b = artefact('b', attrs=notfile|always)
c = artefact('c', attrs=notfile|always)
echo = action('echo', 'echo $(variable)')
pye = action('pyecho', pyecho, ['variable'])
a = rule(A(), a, features=variable('A'))
b = rule(echo, b, a, features=variable('B'))
c = rule(pye, c, b, features=variable('C'))
with patch('faber.action.action.__status__') as recipe:
assert a.update()
(_, _, _, _, output, _), kwds = recipe.call_args_list[-1]
assert output.strip() == 'A'
assert b.update()
(_, _, _, _, output, _), kwds = recipe.call_args_list[-1]
assert output.strip() == 'B'
assert c.update()
(_, _, _, _, output, _), kwds = recipe.call_args_list[-1]
assert output.strip() == "c <- b (variable=['C'])"
@pytest.mark.usefixtures('module')
def test_compound():
"""Compound a command and a Python function into a single action."""
class C(action):
touch = fileutils.touch
@staticmethod
def command(targets, sources):
f = targets[0]._filename
if C.touch(targets, sources) and exists(f):
with open(f, 'w') as out:
out.write('something')
a = rule(C(), 'a')
a.update()
with open(a._filename, 'r') as f:
assert f.readlines() == ['something']
|
<filename>dro_training.py
""" Functions for training using the naive approach."""
import os
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import tensorflow as tf
import time
import data
import losses
import model
import naive_training
import optimization
import utils
class DRO_Model(model.Model):
"""Linear model with DRO constrained optimization.
Args:
feature_names: list of strings, a list of names of all feature columns.
protected_columns: list of strings, a list of the names of all protected group columns
(column should contain values of 0 or 1 representing group membership).
label_column: string, name of label column. Column should contain values of 0 or 1.
maximum_lambda_radius: float, an optional upper bound to impose on the
sum of the lambdas.
maximum_p_radius: float, an optional upper bound to impose on the
L1 norm of each row of phats - ptildes.
Raises:
ValueError: if "maximum_lambda_radius" is nonpositive.
ValueError: if "maximum_p_radius" is negative.
"""
def __init__(self, feature_names, protected_columns, label_column, phats, maximum_lambda_radius=None, maximum_p_radius=[1,1,1]):
tf.reset_default_graph()
tf.random.set_random_seed(123)
self.feature_names = feature_names
self.protected_columns = protected_columns
self.label_column = label_column
self.num_data = len(phats[0])
self.phats = phats
if (maximum_lambda_radius is not None and maximum_lambda_radius <= 0.0):
raise ValueError("maximum_lambda_radius must be strictly positive")
if (maximum_p_radius is not None and maximum_p_radius[0] < 0.0):
raise ValueError("maximum_p_radius must be non negative")
self._maximum_lambda_radius = maximum_lambda_radius
self._maximum_p_radius = maximum_p_radius
# Set up feature and label tensors.
num_features = len(self.feature_names)
self.features_placeholder = tf.placeholder(tf.float32, shape=(None, num_features), name='features_placeholder')
self.protected_placeholders = [tf.placeholder(tf.float32, shape=(None, 1), name=attribute+"_placeholder") for attribute in self.protected_columns]
self.labels_placeholder = tf.placeholder(
tf.float32, shape=(None, 1), name='labels_placeholder')
self.num_groups = len(self.protected_placeholders)
# Construct linear model.
self.predictions_tensor = tf.layers.dense(inputs=self.features_placeholder, units=1, activation=None, name="linear_model")
self.theta_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "linear_model")
def build_train_ops(self, constraint= 'tpr', learning_rate_theta=0.01, learning_rate_lambda=0.01, learning_rate_p_list=[0.01, 0.01, 0.01], constraints_slack=1.0):
"""Builds operators that take gradient steps during training.
Args:
learning_rate_theta: float, learning rate for theta parameter on descent step.
learning_rate_lambda: float, learning rate for lambda parameter on ascent step.
learning_rate_p_list: list of float, learning rate for ptilde parameters on ascent step.
constraints_slack: float, amount of slack for constraints. New constraint will be
original_constraint - constraints_slack
"""
# Hinge loss objective.
self.objective = tf.losses.hinge_loss(self.labels_placeholder, self.predictions_tensor)
# Create lagrange multiplier variables lambda.
initial_lambdas = np.zeros((self.num_groups,), dtype=np.float32)
if constraint == 'tpr_and_fpr':
initial_lambdas = np.zeros((2*self.num_groups,), dtype=np.float32)
self.lambda_variables = tf.compat.v2.Variable(
initial_lambdas,
trainable=True,
name="lambdas",
dtype=tf.float32,
constraint=self.project_lambdas)
# Create lagrange multiplier variables p.
self.p_variables_list = []
def make_projection_p(i2):
return lambda x: self.project_ptilde(x, i2)
for i in range(self.num_groups):
initial_p = np.zeros((self.num_data,), dtype=np.float32)
self.p_variables_list.append(tf.compat.v2.Variable(
initial_p,
trainable=True,
name="ptilde",
dtype=tf.float32,
constraint=make_projection_p(i)))
constraints_list = []
if constraint == 'tpr':
constraints_list = self.get_equal_tpr_constraints_dro(constraints_slack=constraints_slack)
elif constraint == 'tpr_and_fpr':
constraints_list = self.get_equal_tpr_and_fpr_constraints_dro(constraints_slack=constraints_slack)
else:
raise("constraint %s not supported for DRO." % (constraint))
self.num_constraints = len(constraints_list)
self.constraints = tf.convert_to_tensor(constraints_list)
# Lagrangian loss to minimize
lagrangian_loss = self.objective + tf.tensordot(
tf.cast(self.lambda_variables, dtype=self.constraints.dtype.base_dtype),
self.constraints, 1)
optimizer_theta = tf.train.AdamOptimizer(learning_rate_theta)
optimizer_lambda = tf.train.AdamOptimizer(learning_rate_lambda)
optimizer_p_list = []
for i in range(len(learning_rate_p_list)):
#print('create optimizer p, ', i, learning_rate_p_list[i])
optimizer_p_list.append(tf.train.AdamOptimizer(learning_rate_p_list[i]))
self.train_op_theta = optimizer_theta.minimize(lagrangian_loss, var_list=self.theta_variables)
self.train_op_lambda = optimizer_lambda.minimize(-lagrangian_loss, var_list=self.lambda_variables)
self.train_op_p_list = []
for i in range(self.num_groups):
optimizer_p = optimizer_p_list[i]
p_variable = self.p_variables_list[i]
train_op_p = optimizer_p.minimize(-lagrangian_loss, var_list=p_variable)
self.train_op_p_list.append(train_op_p)
return self.train_op_theta, self.train_op_lambda, self.train_op_p_list
def training_generator(model,
train_df,
val_df,
test_df,
minibatch_size=None,
num_iterations_per_loop=1,
num_loops=1):
tf.set_random_seed(31337)
num_rows = train_df.shape[0]
p_variables_list_all_loop = []
if minibatch_size is None:
print('minibatch is off')
minibatch_size = num_rows
else:
minibatch_size = min(minibatch_size, num_rows)
permutation = list(range(train_df.shape[0]))
random.shuffle(permutation)
session = tf.Session()
session.run((tf.global_variables_initializer(),
tf.local_variables_initializer()))
# Iterate through minibatches. Gradients are computed on each minibatch.
minibatch_start_index = 0
for n in range(num_loops):
print('start loop ', n+1, 'in loops ', num_loops)
loop_start_time = time.time()
for _ in range(num_iterations_per_loop):
minibatch_indices = []
while len(minibatch_indices) < minibatch_size:
minibatch_end_index = (
minibatch_start_index + minibatch_size - len(minibatch_indices))
if minibatch_end_index >= num_rows:
minibatch_indices += range(minibatch_start_index, num_rows)
minibatch_start_index = 0
else:
minibatch_indices += range(minibatch_start_index, minibatch_end_index)
minibatch_start_index = minibatch_end_index
minibatch_df = train_df.iloc[[permutation[ii] for ii in minibatch_indices]]
# Descent step on theta.
session.run(
model.train_op_theta,
feed_dict=model.feed_dict_helper(minibatch_df))
# Ascent step on lambda.
session.run(
model.train_op_lambda,
feed_dict=model.feed_dict_helper(minibatch_df))
# Ascent step on p.
for i in range(model.num_groups):
session.run(
model.train_op_p_list[i],
feed_dict=model.feed_dict_helper(minibatch_df))
objective = session.run(model.objective, model.feed_dict_helper(train_df))
constraints = session.run(model.constraints, model.feed_dict_helper(train_df))
train_predictions = session.run(
model.predictions_tensor,
feed_dict=model.feed_dict_helper(train_df))
val_predictions = session.run(
model.predictions_tensor,
feed_dict=model.feed_dict_helper(val_df))
test_predictions = session.run(
model.predictions_tensor,
feed_dict=model.feed_dict_helper(test_df))
lambda_variables = session.run(model.lambda_variables)
p_variables_list = session.run(model.p_variables_list)
print('finish loop ', n+1, 'in loops ', num_loops)
print('time for this loop ',time.time() - loop_start_time)
yield (objective, constraints, train_predictions, lambda_variables, p_variables_list, val_predictions, test_predictions)
def training_helper(model,
train_df,
val_df,
test_df,
protected_columns,
proxy_columns,
label_column,
train_phats, val_phats, test_phats,
constraint = 'tpr_and_fpr',
minibatch_size = None,
num_iterations_per_loop=1,
num_loops=1, maximum_p_radius = 0.5, max_num_ptilde = 20, max_diff = [0.02, 0.02]):
train_hinge_objective_vector = []
train_01_objective_vector = [] # List of T scalar values representing the 01 objective at each iteration.
#ranked by: train_01_objective_vector, s.t. train_01_proxy_Ghat_constraints_matrix
#want to change to: train_01_objective_vector, s.t. train_01_robust_constraints_matrix
train_01_true_G_constraints_matrix = [] # List of T vectors of size m, where each vector[i] is the zero-one constraint violation for group i.
# Eventually we will just pick the last vector in this list, and take the max over m entries to get the max constraint violation.
train_01_proxy_Ghat_constraints_matrix = []
train_hinge_constraints_matrix = [] # Hinge loss constraint violations on the proxy groups.
train_robust_constraints_matrix = []
lambda_variables_matrix = []
p_variables_list_matrix = []
val_01_objective_vector = []
val_01_true_G_constraints_matrix = []
val_01_proxy_Ghat_constraints_matrix = []
val_robust_constraints_matrix = []
test_01_objective_vector = []
test_01_true_G_constraints_matrix = []
test_01_proxy_Ghat_constraints_matrix = []
test_robust_constraints_matrix = []
for objective, constraints, train_predictions, lambda_variables, p_variables_list, val_predictions, test_predictions in training_generator(
model, train_df, val_df, test_df, minibatch_size, num_iterations_per_loop,
num_loops):
lambda_variables_matrix.append(lambda_variables)
p_variables_list_matrix.append(p_variables_list)
train_hinge_objective_vector.append(objective)
train_hinge_constraints_matrix.append(constraints)
train_df['predictions'] = train_predictions
train_01_objective, train_01_true_G_constraints, train_01_proxy_Ghat_constraints = losses.get_error_rate_and_constraints(train_df, protected_columns, proxy_columns, label_column, max_diff = max_diff, constraint = constraint)
train_01_objective_vector.append(train_01_objective)
train_01_true_G_constraints_matrix.append(train_01_true_G_constraints)
train_01_proxy_Ghat_constraints_matrix.append(train_01_proxy_Ghat_constraints)
train_robust_constraints = get_robust_constraints(train_df, train_phats, proxy_columns,label_column, maximum_p_radius=maximum_p_radius, max_num_ptilde = max_num_ptilde, max_diff = max_diff, constraint = constraint)
train_robust_constraints_matrix.append(train_robust_constraints)
val_df['predictions'] = val_predictions
val_01_objective, val_01_true_G_constraints, val_01_proxy_Ghat_constraints = losses.get_error_rate_and_constraints(val_df, protected_columns, proxy_columns, label_column)
val_01_objective_vector.append(val_01_objective)
val_01_true_G_constraints_matrix.append(val_01_true_G_constraints)
val_01_proxy_Ghat_constraints_matrix.append(val_01_proxy_Ghat_constraints)
val_robust_constraints = get_robust_constraints(val_df, val_phats, proxy_columns,label_column, maximum_p_radius=maximum_p_radius, max_num_ptilde = max_num_ptilde, max_diff = max_diff, constraint = constraint)
val_robust_constraints_matrix.append(val_robust_constraints)
test_df['predictions'] = test_predictions
test_01_objective, test_01_true_G_constraints, test_01_proxy_Ghat_constraints = losses.get_error_rate_and_constraints(test_df, protected_columns, proxy_columns, label_column)
test_01_objective_vector.append(test_01_objective)
test_01_true_G_constraints_matrix.append(test_01_true_G_constraints)
test_01_proxy_Ghat_constraints_matrix.append(test_01_proxy_Ghat_constraints)
test_robust_constraints = get_robust_constraints(test_df, test_phats, proxy_columns,label_column, maximum_p_radius=maximum_p_radius, max_num_ptilde = max_num_ptilde, max_diff = max_diff, constraint = constraint)
test_robust_constraints_matrix.append(test_robust_constraints)
return {'train_hinge_objective_vector': train_hinge_objective_vector,
'train_hinge_constraints_matrix': train_hinge_constraints_matrix,
'train_01_objective_vector': train_01_objective_vector,
'train_01_true_G_constraints_matrix': train_01_true_G_constraints_matrix,
'train_01_proxy_Ghat_constraints_matrix': train_01_proxy_Ghat_constraints_matrix,
'train_robust_constraints_matrix': train_robust_constraints_matrix,
'lambda_variables_matrix': lambda_variables_matrix,
'p_variables_list_matrix': p_variables_list_matrix,
'val_01_objective_vector': val_01_objective_vector,
'val_01_true_G_constraints_matrix': val_01_true_G_constraints_matrix,
'val_01_proxy_Ghat_constraints_matrix': val_01_proxy_Ghat_constraints_matrix,
'val_robust_constraints_matrix': val_robust_constraints_matrix,
'test_01_objective_vector': test_01_objective_vector,
'test_01_true_G_constraints_matrix': test_01_true_G_constraints_matrix,
'test_01_proxy_Ghat_constraints_matrix': test_01_proxy_Ghat_constraints_matrix,
'test_robust_constraints_matrix': test_robust_constraints_matrix}
def get_results_for_learning_rates(input_df,
feature_names, protected_columns, proxy_columns, label_column,
constraint = 'tpr_and_fpr',
learning_rates_theta = [0.001, 0.01, 0.1], #[0.001,0.01,0.1]
learning_rates_lambda = [0.1, 0.5, 1, 2], # [0.5, 1, 2]
learning_rate_p_lists = [[0.001, 0.001, 0.001],
[0.01, 0.01, 0.01],[0.1, 0.1, 0.1]],
num_runs=1, #10, num of splits
minibatch_size=None, #1000
num_iterations_per_loop=25, #100
num_loops=30, #30
constraints_slack=1, maximum_p_radius = [1,1,1], max_num_ptilde = 20, max_diff = [0.02, 0.02]):
#generate learning rates for p
#learning_rate_p_lists = list(itertools.product(*learning_rates_p_list))
ts = time.time()
# 10 runs with mean and stddev
results_dicts_runs = []
for i in range(num_runs):
print('Split %d of %d' % (i+1, num_runs))
t_split = time.time()
train_df, val_df, test_df = data.train_val_test_split(input_df, 0.6, 0.2, seed=88+i)
train_phats = data.compute_phats(train_df, proxy_columns)
val_phats = data.compute_phats(val_df, proxy_columns)
test_phats = data.compute_phats(test_df, proxy_columns)
val_objectives = []
val_constraints_matrix = []
results_dicts = []
learning_rates_iters_theta = []
learning_rates_iters_lambda = []
learning_rates_iters_p_list = []
for learning_rate_p_list in learning_rate_p_lists:
for learning_rate_theta in learning_rates_theta:
for learning_rate_lambda in learning_rates_lambda:
t_start_iter = time.time() - ts
print("time since start:", t_start_iter)
print("begin optimizing learning rate p list:", learning_rate_p_list)
print("begin optimizing learning rate theta: %.3f learning rate lambda: %.3f" % (learning_rate_theta, learning_rate_lambda))
model = DRO_Model(feature_names, proxy_columns, label_column, train_phats, maximum_lambda_radius=1, maximum_p_radius=maximum_p_radius)
model.build_train_ops(constraint=constraint, learning_rate_theta=learning_rate_theta, learning_rate_lambda=learning_rate_lambda,learning_rate_p_list = learning_rate_p_list, constraints_slack=constraints_slack)
# training_helper returns the list of errors and violations over each epoch.
results_dict = training_helper(
model,
train_df,
val_df,
test_df,
protected_columns,
proxy_columns,
label_column,
train_phats, val_phats, test_phats,
constraint = constraint,
minibatch_size=minibatch_size,
num_iterations_per_loop=num_iterations_per_loop,
num_loops=num_loops, maximum_p_radius=maximum_p_radius, max_num_ptilde=max_num_ptilde, max_diff = max_diff)
#find index for the best train iteration for this pair of hyper parameters
best_index_iters = utils.find_best_candidate_index(np.array(results_dict['train_01_objective_vector']),np.array(results_dict['train_robust_constraints_matrix']))
val_objectives.append(results_dict['val_01_objective_vector'][best_index_iters])
val_constraints_matrix.append(results_dict['val_robust_constraints_matrix'][best_index_iters])
results_dict_best_idx = utils.add_results_dict_best_idx_robust(results_dict, best_index_iters)
results_dicts.append(results_dict_best_idx)
learning_rates_iters_theta.append(learning_rate_theta)
learning_rates_iters_lambda.append(learning_rate_lambda)
learning_rates_iters_p_list.append(learning_rate_p_list)
print("Finished learning rate p list", learning_rate_p_list)
print("Finished optimizing learning rate theta: %.3f learning rate lambda: %.3f " % (learning_rate_theta, learning_rate_lambda))
print("Time that this run took:", time.time() - t_start_iter - ts)
#find the index of the best pair of hyper parameters
best_index = utils.find_best_candidate_index(np.array(val_objectives),np.array(val_constraints_matrix))
best_results_dict = results_dicts[best_index]
best_learning_rate_theta = learning_rates_iters_theta[best_index]
best_learning_rate_lambda = learning_rates_iters_lambda[best_index]
best_learning_rate_p_list = learning_rates_iters_p_list[best_index]
print('best_learning_rate_theta,', best_learning_rate_theta)
print('best_learning_rate_lambda', best_learning_rate_lambda)
print('best_learning_rate_p_list', best_learning_rate_p_list)
results_dicts_runs.append(best_results_dict)
print("time it took for this split", time.time() - t_split)
final_average_results_dict = utils.average_results_dict_fn(results_dicts_runs)
return final_average_results_dict
def get_robust_constraints(df, phats, proxy_columns, label_column, max_diff=[0.05, 0.05], max_num_ptilde = 20, maximum_p_radius=[1,1,1], constraint = 'tpr_and_fpr'):
"""Computes the robust fairness violations.
Args:
df: dataframe containing 'predictions' column and LABEL_COLUMN, PROXY_COLUMNS.
predictions column is not required to be thresholded.
"""
if constraint == 'tpr':
tpr_overall = losses.tpr(df, label_column)
print('tpr_overall', tpr_overall)
robust_constraints = []
for i in range(len(proxy_columns)):
robust_constraint = -5
for j in range(max_num_ptilde):
ptilde = utils.generate_rand_vec_l1_ball(phats[i], maximum_p_radius[i])
labels = np.array(df[label_column] > 0.5)
tp = np.array((df['predictions'] >= 0.0) & (df[label_column] > 0.5))
weighted_labels = np.multiply(ptilde, labels)
weighted_tp = np.multiply(ptilde, tp)
weighted_tpr = float(sum(weighted_tp)/sum(weighted_labels))
new_robust_constraint = tpr_overall - weighted_tpr - max_diff
if new_robust_constraint > robust_constraint:
robust_constraint = new_robust_constraint
robust_constraints.append(robust_constraint)
elif constraint == 'tpr_and_fpr':
tpr_overall = losses.tpr(df, label_column)
print('tpr_overall', tpr_overall)
fpr_overall = losses.fpr(df, label_column)
print('fpr_overall', fpr_overall)
robust_constraints_tpr = []
robust_constraints_fpr = []
for i in range(len(proxy_columns)):
robust_constraint_tpr = -5
robust_constraint_fpr = -5
for j in range(max_num_ptilde):
ptilde = utils.generate_rand_vec_l1_ball(phats[i], maximum_p_radius[i])
labels = np.array(df[label_column] > 0.5)
tp = np.array((df['predictions'] >= 0.0) & (df[label_column] > 0.5))
weighted_labels = np.multiply(ptilde, labels)
weighted_tp = np.multiply(ptilde, tp)
weighted_tpr = float(sum(weighted_tp)/sum(weighted_labels))
new_robust_constraint_tpr = tpr_overall - weighted_tpr - max_diff[0]
if new_robust_constraint_tpr > robust_constraint_tpr:
robust_constraint_tpr = new_robust_constraint_tpr
fp = np.array((df['predictions'] >= 0.0) & (df[label_column] <= 0.5))
weighted_flipped_labels = np.multiply(ptilde, np.array(df[label_column] <= 0.5))
weighted_fp = np.multiply(ptilde, fp)
weighted_fpr = float(sum(weighted_fp)/sum(weighted_flipped_labels))
new_robust_constraint_fpr = weighted_fpr - fpr_overall - max_diff[1]
if new_robust_constraint_fpr > robust_constraint_fpr:
robust_constraint_fpr = new_robust_constraint_fpr
robust_constraints_tpr.append(robust_constraint_tpr)
robust_constraints_fpr.append(robust_constraint_fpr)
robust_constraints = robust_constraints_tpr + robust_constraints_fpr
return robust_constraints
# Expects averaged results_dict with means and standard deviations.
def plot_optimization_avg(results_dict, protected_columns, proxy_columns):
fig, axs = plt.subplots(5, figsize=(5,25))
num_iters = len(results_dict['train_hinge_objective_vector'][0])
iters = np.arange(num_iters)
axs[0].errorbar(iters, results_dict['train_hinge_objective_vector'][0], yerr=results_dict['train_hinge_objective_vector'][1])
axs[0].set_title('train_hinge_objective_vector')
for i in range(len(protected_columns)):
axs[1].errorbar(iters, results_dict['train_hinge_constraints_matrix'][0].T[i], results_dict['train_hinge_constraints_matrix'][1].T[i], label=protected_columns[i])
axs[2].errorbar(iters, results_dict['train_01_proxy_Ghat_constraints_matrix'][0].T[i], results_dict['train_01_proxy_Ghat_constraints_matrix'][1].T[i], label=proxy_columns[i])
axs[3].errorbar(iters, results_dict['train_01_true_G_constraints_matrix'][0].T[i], results_dict['train_01_true_G_constraints_matrix'][1].T[i], label=protected_columns[i])
axs[4].errorbar(iters, results_dict['train_robust_constraints_matrix'][0].T[i], results_dict['train_robust_constraints_matrix'][1].T[i], label=protected_columns[i])
axs[1].set_title('train_hinge_constraints_matrix')
axs[1].legend()
axs[2].set_title('train_01_proxy_Ghat_constraints_matrix')
axs[2].legend()
axs[3].set_title('train_01_true_G_constraints_matrix')
axs[3].legend()
axs[4].set_title('train_robust_constraints_matrix')
axs[4].legend()
plt.show()
# Expects results dicts without averaging.
def plot_optimization_dro(results_dict):
fig, axs = plt.subplots(5, figsize=(5,25))
axs[0].plot(results_dict['train_hinge_objective_vector'])
axs[0].set_title('train_hinge_objective_vector')
axs[1].plot(results_dict['train_hinge_constraints_matrix'])
axs[1].set_title('train_hinge_constraints_matrix')
axs[2].plot(results_dict['train_01_proxy_Ghat_constraints_matrix'])
axs[2].set_title('train_01_proxy_Ghat_constraints_matrix')
axs[3].plot(results_dict['train_01_true_G_constraints_matrix'])
axs[3].set_title('train_01_true_G_constraints_matrix')
axs[4].plot(results_dict['train_robust_constraints_matrix'])
axs[4].set_title('train_robust_constraints_matrix')
plt.show()
|
from __future__ import print_function, division
import os
import json
import time
from utils import command_parser
from utils.class_finder import model_class, agent_class
from main_eval import main_eval
from tqdm import tqdm
from tabulate import tabulate
from tensorboardX import SummaryWriter
os.environ["OMP_NUM_THREADS"] = "1"
def full_eval(args=None):
if args is None:
args = command_parser.parse_arguments()
create_shared_model = model_class(args.model)
init_agent = agent_class(args.agent_type)
args.phase = 'eval'
args.episode_type = 'TestValEpisode'
args.test_or_val = 'val'
start_time = time.time()
local_start_time_str = time.strftime(
'%Y_%m_%d_%H_%M_%S', time.localtime(start_time)
)
tb_log_dir = args.log_dir + "/" + args.title + '_' + args.phase + '_' + local_start_time_str
log_writer = SummaryWriter(log_dir=tb_log_dir)
# Get all valid saved_models for the given title and sort by train_ep.
checkpoints = [(f, f.split("_")) for f in os.listdir(args.save_model_dir)]
checkpoints = [
(f, int(s[-7]))
for (f, s) in checkpoints
if len(s) >= 4 and f.startswith(args.title) and int(s[-7]) >= args.test_start_from
]
checkpoints.sort(key=lambda x: x[1])
best_model_on_val = None
best_performance_on_val = 0.0
for (f, train_ep) in tqdm(checkpoints, desc="Checkpoints."):
# break
model = os.path.join(args.save_model_dir, f)
args.load_model = model
args.present_model =f
args.test_or_val = "test"
main_eval(args, create_shared_model, init_agent)
# check if best on val.
with open(args.results_json, "r") as f:
results = json.load(f)
if results["success"] > best_performance_on_val:
best_model_on_val = model
best_performance_on_val = results["success"]
log_writer.add_scalar("val/success", results["success"], train_ep)
log_writer.add_scalar("val/spl", results["spl"], train_ep)
# best models
# HOZ_38909040_3300000_2021_09_16_15_12_10.dat
# TPNHOZ_29208145_2500000_2021_09_16_15_12_33.dat
args.test_or_val = "test"
args.load_model = best_model_on_val
# args.load_model = "./trained_models/HOZ_38909040_3300000_2021_09_16_15_12_10.dat"
# args.load_model = "./trained_models/TPNHOZ_29208145_2500000_2021_09_16_15_12_33.dat"
main_eval(args, create_shared_model, init_agent)
with open(args.results_json, "r") as f:
results = json.load(f)
print(
tabulate(
[
["SPL >= 1:", results["GreaterThan/1/spl"]],
["Success >= 1:", results["GreaterThan/1/success"]],
["SPL >= 5:", results["GreaterThan/5/spl"]],
["Success >= 5:", results["GreaterThan/5/success"]],
],
headers=["Metric", "Result"],
tablefmt="orgtbl",
)
)
print("Best model:", args.load_model)
if __name__ == "__main__":
full_eval() |
<gh_stars>0
import cv2
import argparse
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
#Generator Network with 2 hidden layers
def generator(noise,reuse=None):
with tf.variable_scope('gen',reuse=reuse): #allows to have subsets of parameters
#Layer1
hidden1 = tf.layers.dense(inputs=noise,units=128)
hidden1 = tf.nn.leaky_relu(hidden1,alpha = 0.01) #relu
#Layer2
hidden2 = tf.layers.dense(inputs=hidden1,units=256)
hidden2 = tf.nn.leaky_relu(hidden2,alpha = 0.01) #relu
#Output
output = tf.layers.dense(inputs=hidden2,units=784,activation=tf.nn.tanh)
return output
#Discriminator Network
def discriminator(X,reuse=None):
with tf.variable_scope('disc',reuse=reuse): #allows to have subsets of parameters
#Layer1
hidden1 = tf.layers.dense(inputs=X,units=256)
hidden1 = tf.nn.leaky_relu(hidden1,alpha = 0.01) #relu
#Layer2
hidden2 = tf.layers.dense(inputs=hidden1,units=128)
hidden2 = tf.nn.leaky_relu(hidden2,alpha = 0.01) #relu
#Output
logits = tf.layers.dense(inputs=hidden2,units=1)
output = tf.sigmoid(logits)
return output, logits
#Loss Calculation
def loss_calc(logits,preds):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=preds))
def mnist_gan(mnist_dir, batch_size,epochs):
mnist = input_data.read_data_sets(mnist_dir,one_hot=True)
img_input = tf.placeholder(tf.float32,shape=[None,784])
noise_ip = tf.placeholder(tf.float32,shape=[None,100])#100 random points
Gen = generator(noise_ip)
Disc_img_pred, Disc_img_logits = discriminator(img_input) #Training Discriminator on real images
Disc_gen_pred, Disc_gen_logits = discriminator(Gen,reuse=True) #Training generator on fake images form Generator
Disc_img_loss = loss_calc(Disc_img_logits,tf.ones_like(Disc_img_logits)*0.9)#Applies Smoothing for Labels
Disc_gen_loss = loss_calc(Disc_gen_logits,tf.zeros_like(Disc_img_logits))
Total_disc_loss = Disc_img_loss + Disc_gen_loss
Gen_loss = Disc_img_loss = loss_calc(Disc_gen_logits,tf.ones_like(Disc_gen_logits))
lr = 0.001 #Start small
all_vars = tf.trainable_variables()
disc_vars = [v for v in all_vars if 'disc' in v.name]
gen_vars = [v for v in all_vars if 'gen' in v.name]
#Different optimizers for Disc and Gen
Disc_optim = tf.train.AdamOptimizer(lr).minimize(Total_disc_loss, var_list = disc_vars)
Gen_optim = tf.train.AdamOptimizer(lr).minimize(Gen_loss, var_list = gen_vars)
samples = []
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
n_batch = mnist.train.num_examples//batch_size
for i in range(n_batch):
batch = mnist.train.next_batch(batch_size)
disc_batch = batch[0].reshape((batch_size,784))
disc_batch = disc_batch*2-1 #rescale for tanh
gen_batch = np.random.uniform(-1,1,size = (batch_size,100))
_ =sess.run(Disc_optim, feed_dict = {img_input:disc_batch, noise_ip:gen_batch})
_ = sess.run(Gen_optim, feed_dict = {noise_ip:gen_batch})
print ("EPOCH = {}".format(epoch))
gen_ip = np.random.uniform(-1,1,size = (1,100))
gen_op = sess.run(generator(noise_ip,reuse=True),feed_dict={noise_ip:gen_ip})
samples.append(gen_op)
#save checkpoints here
if epoch%100==0:
saver.save(sess, 'model-{}.ckpt'.format(epoch))
print ("Saving checkpoint at epoch = {}".format(epoch))
np.save('generated_images_array.npy',samples)
#cv2.imwrite('Best_generated_sample.png',samples[-1].reshape(28,28)) #Plot image generated in final epoch
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--mnist_dir", default = 'mnist_train_dir', help="Train Image Directory.")
parser.add_argument("--batch_size",default =100,type=int, help="Default batch_size = 100.")
parser.add_argument("--epochs",default =500,type=int, help="Default epochs = 500.")
args = parser.parse_args()
mnist_gan(args.mnist_dir,args.batch_size,args.epochs)
|
<reponame>zengwbz/FaceMaskClassification<gh_stars>1-10
# from pretrainedmodels import models as pm
# import pretrainedmodels
from torch import nn
from torchvision import models as tm
from config import configs
# from efficientnet_pytorch import EfficientNet
from efficientnet_lite_pytorch import EfficientNet
from efficientnet_lite1_pytorch_model import EfficientnetLite1ModelFile
from efficientnet_lite2_pytorch_model import EfficientnetLite2ModelFile
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.nn.parameter import Parameter
# weights = {
# "efficientnet-b3":"/data/dataset/detection/pretrainedmodels/efficientnet-b3-c8376fa2.pth",
# "efficientnet-b4":"/data/dataset/detection/pretrainedmodels/efficientnet-b4-6ed6700e.pth",
# "efficientnet-b5":"/data/dataset/detection/pretrainedmodels/efficientnet-b5-b6417697.pth",
# "efficientnet-b6":"/data/dataset/detection/pretrainedmodels/efficientnet-b6-c76e70fd.pth",
# }
def gem(x, p=3, eps=1e-6):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1./p)
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super(GeM,self).__init__()
self.p = Parameter(torch.ones(1)*p)
self.eps = eps
def forward(self, x):
return gem(x, p=self.p, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'
def get_model():
if configs.model_name.startswith("resnext50_32x4d"):
model = tm.resnext50_32x4d(pretrained=True)
model.avgpool = nn.AdaptiveAvgPool2d(1)
model.fc = nn.Linear(2048,configs.num_classes)
model.cuda()
elif configs.model_name.startswith("efficient"):
# efficientNet
# model_name = configs.model_name[:15]
# model = EfficientNet.from_name(model_name)
# model.load_state_dict(torch.load(weights[model_name]))
# in_features = model._fc.in_features
# model._fc = nn.Sequential(
# nn.BatchNorm1d(in_features),
# nn.Dropout(0.5),
# nn.Linear(in_features, configs.num_classes),
# )
weights_path = EfficientnetLite1ModelFile.get_model_file_path()
model = EfficientNet.from_pretrained('efficientnet-lite1', weights_path = weights_path)
in_features = model._fc.in_features
model._fc = nn.Sequential(
nn.BatchNorm1d(in_features),
nn.Dropout(0.5),
nn.Linear(in_features, configs.num_classes),
)
model.cuda()
else:
pretrained = "imagenet+5k" if configs.model_name.startswith("dpn") else "imagenet"
model = pretrainedmodels.__dict__[configs.model_name.split("-model")[0]](num_classes=1000, pretrained=pretrained)
if configs.model_name.startswith("pnasnet"):
model.last_linear = nn.Linear(4320, configs.num_classes)
model.avg_pool = nn.AdaptiveAvgPool2d(1)
elif configs.model_name.startswith("inception"):
model.last_linear = nn.Linear(1536, configs.num_classes)
model.avgpool_1a = nn.AdaptiveAvgPool2d(1)
else:
model.last_linear = nn.Linear(2048, configs.num_classes)
model.avg_pool = nn.AdaptiveAvgPool2d(1)
model.cuda()
return model |
<reponame>vumichien/gama
from collections.abc import Sequence
import logging
from gama.logging.machine_logging import TOKENS, log_event
from .components import Individual
log = logging.getLogger(__name__)
class OperatorSet:
""" Provides a thin layer for ea operators for logging, callbacks and safety. """
def __init__(
self,
mutate,
mate,
create_from_population,
create_new,
compile_,
eliminate,
evaluate_callback,
max_retry=50,
completed_evaluations=None,
):
"""
:param mutate:
:param mate:
:param create:
:param create_new:
"""
self._mutate = mutate
self._mate = mate
self._create_from_population = create_from_population
self._create_new = create_new
self._compile = compile_
self._safe_compile = None
self._eliminate = eliminate
self._max_retry = max_retry
self._evaluate = None
self._evaluate_callback = evaluate_callback
self.evaluate = None
self._completed_evaluations = completed_evaluations
def wait_next(self, async_evaluator):
future = async_evaluator.wait_next()
if future.result is not None:
evaluation = future.result
if isinstance(evaluation, Sequence):
# This signature is currently only for an ASHA evaluation result
evaluation, loss, rung, full_evaluation = evaluation
if not full_evaluation:
# We don't process low-fidelity evaluations here (for now?).
return future
individual = evaluation.individual
log_event(
log,
TOKENS.EVALUATION_RESULT,
individual.fitness.start_time,
evaluation.pid,
individual.fitness.wallclock_time,
individual.fitness.process_time,
individual.fitness.values,
individual._id,
individual.pipeline_str(),
)
if self._evaluate_callback is not None:
self._evaluate_callback(evaluation)
elif future.exception is not None:
log.warning(f"Error raised during evaluation: {str(future.exception)}.")
return future
def try_until_new(self, operator, *args, **kwargs):
for _ in range(self._max_retry):
individual, log_args = operator(*args, **kwargs)
if str(individual.main_node) not in self._completed_evaluations:
return individual, log_args
else:
log.debug(f"50 iterations of {operator.__name__} did not yield new ind.")
# For progress on solving this, see #11
return individual, log_args
def mate(self, ind1: Individual, ind2: Individual, *args, **kwargs):
def mate_with_log():
new_individual1, new_individual2 = ind1.copy_as_new(), ind2.copy_as_new()
self._mate(new_individual1, new_individual2, *args, **kwargs)
log_args = [TOKENS.CROSSOVER, new_individual1._id, ind1._id, ind2._id]
return new_individual1, log_args
individual, log_args = self.try_until_new(mate_with_log)
log_event(log, *log_args)
return individual
def mutate(self, ind: Individual, *args, **kwargs):
def mutate_with_log():
new_individual = ind.copy_as_new()
mutator = self._mutate(new_individual, *args, **kwargs)
log_args = [TOKENS.MUTATION, new_individual._id, ind._id, mutator.__name__]
return new_individual, log_args
ind, log_args = self.try_until_new(mutate_with_log)
log_event(log, *log_args)
return ind
def individual(self, *args, **kwargs):
expression = self._create_new(*args, **kwargs)
if self._safe_compile is not None:
compile_ = self._safe_compile
else:
compile_ = self._compile
return Individual(expression, to_pipeline=compile_)
def create(self, *args, **kwargs):
return self._create_from_population(self, *args, **kwargs)
def eliminate(self, *args, **kwargs):
return self._eliminate(*args, **kwargs)
|
import glob
import os
from configuration.configuration_api import ConfigurationAPI
from rest_client.AuthenticationRest import AuthenticationAPI
from files_treatment_new.xls_gen_bank_patric import XlsGenBankPatric
from files_treatment_new.fasta_contigs_patric import FastaContigsPatric
from objects_new.Contigs_new import Contig
from objects_API.ContigJ import ContigJson
from objects_API.StrainJ import StrainJson
from objects_API.BacteriumJ import BacteriumJson
from objects_API.GeneJ import GeneJson
from objects_API.ProteinJ import ProteinJson
from Patric.ImportFiles import ImportFilesPatric
def createContig(contigObj, organismID):
"""
insert a Contig into a REST API
:param contigObj: Contig DBA object that you want to insert
:param organismID: ID of the organism which has this wholeDNA
:param listProtein: List of the proteins of the contig
:type contigObj: WholeDNA
:type organismID: int
:type listProtein: List[int]
:return: id of the Contig inserted
:rtype int
"""
contigObjJson = ContigJson(id_db_online = contigObj.id_contig_db_outside, sequence_DNA= contigObj.sequence, fasta_head = contigObj.head, organism = organismID)
contigObjJson = contigObjJson.setContig()
return contigObjJson['id']
def createContigNew(contigObj, bacteriumId):
head_cnt = ''
if '>' not in contigObj.head:
head_cnt = '>' + contigObj.head
else:
head_cnt = contigObj.head
contigObj = Contig(id_contig_db_outside = contigObj.id_contig_db_outside, head = head_cnt, sequence = contigObj.sequence)
idContig = createContig(contigObj, bacteriumId)
return idContig
def createGene(id_bacterium, dna_sequence, start_contig, end_contig, fk_contig, id_db_online, function = None, fasta_head = None):
"""
insert a Gene into a REST API
:param id_bacterium: ID of the organisms
:param dna_sequence: DNA sequence of the gene
:param start_contig: start position of the gene in the contig
:param end_contig: end position of the gene in the contig
:param fk_contig: id of the contig
:param function: function of the gene
:param fasta_head: fasta head of the gene
:type id_bacterium: int
:type dna_sequence: str
:type start_contig: int - can be None
:type end_contig: int - can be None
:type fk_contig: int - can be None
:type function: str - can be None
:type fasta_head: str - can be None
:return: id of the gene inserted
:rtype int
"""
geneObjJson = GeneJson(sequence_DNA = dna_sequence, organism = id_bacterium, position_start_contig = start_contig, position_end_contig = end_contig, contig = fk_contig, fasta_head = fasta_head, id_db_online = id_db_online)
geneObjJson = geneObjJson.setGene()
return geneObjJson.id
def createProtein(id_db_online, fk_organism, fk_gene, sequence_aa, description):
"""
insert a Protein into a REST API
:param proteinOBJ: Protein DBA object that you want to insert
:param fkOrganism: id of the organism
:param fkGene: id of the gene
:type proteinOBJ: Protein
:type fkOrganism: int
:type fkGene: int
:return: id of the protein inserted
:rtype int
"""
proteinObjJson = ProteinJson(id_db_online = id_db_online, organism = fk_organism, gene = fk_gene, sequence_AA = sequence_aa, description = description)
proteinObjJson = proteinObjJson.setProtein()
return proteinObjJson.id
def createAndInsertElements(contig_obj, id_bacterium, xls_genbank_patric_obj):
#listProts = xls_genbank_patric_obj.get_proteins_information_in_excel()
list_proteins = xls_genbank_patric_obj.get_proteins_objects_by_contig_id(contig_obj.head)
list_proteins_ids = xls_genbank_patric_obj.get_proteins_ids_by_contig_id(contig_obj.head)
contig_id = createContigNew(contig_obj, id_bacterium)
for protein_obj in list_proteins:
gene_function = None
gene_id_db_online = None
if protein_obj.sequence_prot == None:
gene_function = protein_obj.description
gene_id_db_online = protein_obj.id_accession
fasta_head_gene = '>' + protein_obj.id_accession
id_gene = createGene(id_bacterium, protein_obj.sequence_dna, protein_obj.start_point_cnt, protein_obj.end_point_cnt, fk_contig = contig_id, function = gene_function, fasta_head = fasta_head_gene, id_db_online = gene_id_db_online)
if protein_obj.sequence_prot != None and len(protein_obj.sequence_prot) > 5:
createProtein(id_db_online = protein_obj.id_accession, fk_organism = id_bacterium, fk_gene = id_gene, sequence_aa = protein_obj.sequence_prot, description = protein_obj.designation)
def createStrain(designation, fk_specie):
#Information for the Strain
strain_obj = StrainJson(designation = designation, specie = fk_specie)
id_strain = strain_obj.setStrain()
return id_strain
def createBacterium(acc_number, person_responsible, source_data, fk_strain):
bacteriumObjJson = BacteriumJson(acc_number = acc_number, person_responsible = person_responsible, source_data = source_data, strain = fk_strain)
bacteriumObjJson = bacteriumObjJson.setBacterium()
return bacteriumObjJson.id
def manageOrganismsContent(dict_files):
for key, value in dict_files.items():
contig_path_file = value[0]
excel_path_file = value[1]
xls_genbank_patric_obj = XlsGenBankPatric(path_file= excel_path_file, sheet_name = 'Features in Pseudomonas aerugin')
contig_fasta_file_patric_obj = FastaContigsPatric(path_file = contig_path_file)
list_contigs_ids_fasta = contig_fasta_file_patric_obj.get_list_contigs_id()
list_contigs_ids_xls = xls_genbank_patric_obj.get_contigs_id_sorted()
list_diff = list(set(list_contigs_ids_fasta) - set(list_contigs_ids_xls))
print(list_diff)
#assert len(list_contigs_ids_fasta) == len(list_contigs_ids_xls)
#Strain creation -------------------
strain_designation = key.replace("Pseudomonas aeruginosa ","")
fk_specie = 417
#strain_obj = StrainJson.verifyStrainExistanceDesignationFkSpecie(strain_designation, fk_specie)
#if strain_obj == None:
strain = createStrain(strain_designation, fk_specie)
id_strain = strain.id
#else:
# id_strain = strain_obj.id
#Bacterium creation --------------------
acc_number = 'Greg_Patric_' + strain_designation
source_data = 5
person_responsible = 2
id_bacterium = createBacterium(acc_number, person_responsible, source_data, id_strain)
list_of_contigs = contig_fasta_file_patric_obj.create_contigs_from_file()
for contig_old in list_of_contigs:
createAndInsertElements(contig_old, id_bacterium, xls_genbank_patric_obj)
#Token connection
conf_obj = ConfigurationAPI()
conf_obj.load_data_from_ini()
AuthenticationAPI().createAutenthicationToken()
#End token connection
#end Test strain
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
path = dir_path + '/Patric/organisms/'
print(path)
import_files_obj = ImportFilesPatric(path, '.contigs.fasta','.xls')
dict_files = import_files_obj.getOrganismsFiles()
#Information for the bacterium
manageOrganismsContent(dict_files)
#### Test old methods
for contig_id in list_contigs_ids_fasta:
print(contig_id)
list_proteins = xls_genbank_patric_obj.get_proteins_objects_by_contig_id(contig_id)
print('fini') |
"""Linear inverse problems and bayesian estimation with linear restriction."""
import numpy as np
from gnss_timeseries.stats import is_not_outlier
__docformat__ = 'reStructuredText en'
# -----------------------------------------------------------------------------
# Linear regression (one variable)
def linreg(x, y, stdev_y=None, weights=None,
no_outliers=False, outliers_conf=4):
r"""
Linear regression
Computes the coefficients :math:`a_0` and :math:`a_1` of the (weighted)
least squares linear regression.
:param x: :math:`x` values :math:`\{x_k\}_{k=1}^n`
:param y: :math:`y` values :math:`\{y_k\}_{k=1}^n`.
:param stdev_y: standard deviations of :math:`y`,
:math:`\{\sigma_k\}_{k=1}^n (optional). This is equivalent to the
statistical weights :math:`w_k = \frac{1}{{\sigma_k}^2}`.
:param weights: statistical weights :math:`w_k = \frac{1}{{\sigma_k}^2}`.
Only used if `std_y` is `None`.
:param no_outliers: remove outliers?
:param outliers_conf: confidence level: 1, 2, 3 or 4,
which correspond to 90%, 95%, 99% and 99.9% two-tailed confidence
respectively (normal distribution). Default: 4 (99.9%)
:type x: numpy.ndarray
:type y: numpy.ndarray
:type stdev_y: numpy.ndarray or None
:type weights: numpy.ndarray or None
:type no_outliers: bool
:type outliers_conf: int
:return: :math:`a_1`, :math:`a_0`
:rtype: (float, float)
.. math:: \min_{a_0,\,a_1} \,\,
\sum \limits_{k=1}^{n} \frac{(a_0 + a_1 x_k - y_k)^2}{{\sigma_k}^2}
.. note:: this function is faster than *scipy.stats.linregress* by a factor
between 1.07 up to 5.0 (larger for smaller sizes).
"""
if no_outliers:
return _linreg_no_outliers(x, y, stdev_y=stdev_y, weights=weights,
confidence=outliers_conf)
return _linreg_plain(x, y, stdev_y=stdev_y, weights=weights)
def _linreg_plain(x, y, stdev_y=None, weights=None):
x_ave, y_ave, xy_ave, x2_ave = average((x, y, x*y, x*x),
stdev=stdev_y, weights=weights)
aux = x2_ave - x_ave*x_ave
if abs(aux) < 1.e-300:
return np.nan, np.nan
else:
a1 = (xy_ave - x_ave*y_ave)/aux
return a1, y_ave - a1*x_ave
def _linreg_no_outliers(x, y, stdev_y=None, weights=None, confidence=4):
# removing outliers
wt = _weights(stdev_y, weights)
mask_ok = is_not_outlier(y, confidence=confidence)
c1 = np.nan
c0 = np.nan
for k in range(3):
c1, c0, mask_ok_new = _linreg_no_outliers_step(
x[mask_ok], y[mask_ok],
None if wt is None else wt[mask_ok], confidence)
n_new_ok = mask_ok_new.sum()
if n_new_ok == mask_ok_new.size or n_new_ok < 0.75*mask_ok_new.size:
break
mask_ok[mask_ok] = mask_ok_new
return c1, c0, mask_ok
def _linreg_no_outliers_step(x, y, weights, confidence):
c1, c0 = _linreg_plain(x, y, stdev_y=None, weights=weights)
mask_ok_new = is_not_outlier(c1 + c0*x - y, confidence=confidence)
return c1, c0, mask_ok_new
# -----------------------------------------------------------------------------
# Linear regression: pinning a point or fixing the slope
def linreg_pinned(xy_point, x, y, stdev_y=None, weights=None):
delta_x = (x - xy_point[0])
sx2, sxy = average((delta_x*delta_x, delta_x*(y - xy_point[0])),
stdev=stdev_y, weights=weights)
return sx2/sxy
def linreg_fixed_slope(slope, x, y, stdev_y=None, weights=None):
return average(y - slope*x, stdev=stdev_y, weights=weights)
def average(vec, stdev=None, weights=None):
is_numpy_array = isinstance(vec, np.ndarray)
w = _weights(stdev, weights)
if w is None:
if is_numpy_array:
return vec.mean()
return tuple(v.mean() for v in vec)
if is_numpy_array:
return w.dot(vec)
return tuple(w.dot(v) for v in vec)
def _weights(stdev, weights):
if stdev is None:
if weights is None:
return None
w = weights
else:
w = 1./(stdev*stdev)
return w/w.sum()
# -----------------------------------------------------------------------------
# Special Linear regression: localized
def linreg_local(x, y, x_center=None, stdev_y=None, weights=None,
damping_length=None, no_outliers=False, outliers_conf=4):
if x_center is None:
x_center = np.median(x)
wt = _weights(stdev_y, weights)
d_length = 0.1*x.ptp() if damping_length is None else damping_length
aux = (x - x_center)/d_length
aux = np.exp(-aux*aux)
wt = aux if wt is None else wt*aux
return linreg(x, y, stdev_y=None, weights=wt,
no_outliers=no_outliers,
outliers_conf=outliers_conf)
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for CoSMoMVPA dataset (http://cosmomvpa.org)"""
from mvpa2.testing.tools import assert_raises, ok_, assert_true, \
assert_equal, assert_array_equal, with_tempfile
from mvpa2.testing import skip_if_no_external
skip_if_no_external('scipy')
from scipy.io import loadmat, savemat, matlab
from mvpa2.datasets import cosmo
from mvpa2.measures.base import Measure
from mvpa2.datasets.base import Dataset
from mvpa2.mappers.fx import mean_feature
import numpy as np
arr = np.asarray
#########################
# helper functions
def _tup2obj(tuples):
# tup is a list of (key, value) tuples
# returns a numpy object array with the same data
dtypes = []
values = []
for k, v in tuples:
dtypes.append((k, 'O'))
values.append(v)
return np.array([[tuple(values)]], dtype=np.dtype(dtypes))
def _create_small_mat_dataset_dict():
'''
Generate small dataset as represented in matlab.
Equivalent is to do in matlab:
ds=struct();
ds.samples=[1 2 3; 4 5 6];
ds.a.name='input';
ds.fa.i=[3 2 1];
ds.fa.j=[1 2 2];
ds.sa.chunks=[2 2]';
ds.sa.targets=[1 2]';
ds.sa.labels={'yin','yan'};
save('simple_ds.mat','-struct','ds');
and do in python:
ds=loadmat('simple_ds.mat')
'''
samples = arr([[1, 2, 3], [4, 5, 6]])
sa = _tup2obj([('chunks', arr([[2], [2]])),
('targets', arr([[1], [2]])),
('labels', arr([arr(['yin'], dtype='O'),
arr(['yan'], dtype='O')]))])
fa = _tup2obj([('i', arr([[3., 2., 1.]])),
('j', arr([[1., 2., 2.]]))])
a = _tup2obj([('name', arr(arr(['input'], dtype='O')))])
# dictionary with these value
return dict(samples=samples, sa=sa, fa=fa, a=a)
def _build_cell(elems):
'''
Put elements in a an array compatible
with scipy's matlab cell structure.
Necessary for recent versions of numpy
'''
n = len(elems)
c = np.zeros((1, n), dtype=object)
for i, elem in enumerate(elems):
c[0, i] = elem
return c
def _create_small_mat_nbrhood_dict():
'''
Generate small neighborhood as represented in matlab.
Equivalent is to do in matlab:
nbrhood=struct();
nbrhood.neighbors={1, [1 3], [1 2 3], [2 2]};
nbrhood.fa.k=[4 3 2 1];
nbrhood.a.name='output';
save('simple_nbrhood.mat','-struct','nbrhood');
and do in python:
nbrhood=loadmat('simple_nbrhood.mat')
'''
elems = [arr([[1]]), arr([[1, 3]]), arr([[1, 2, 3]]), arr([[2, 2]])]
neighbors = _build_cell(elems)
fa = _tup2obj([('k', arr([[4., 3., 2., 1.]]))])
a = _tup2obj([('name', arr(arr(['output'], dtype='O')))])
return dict(neighbors=neighbors, fa=fa, a=a)
def _assert_ds_mat_attributes_equal(ds, m, attr_keys=('a', 'sa', 'fa')):
# ds is a Dataset object, m a matlab-like dictionary
for attr_k in attr_keys:
attr_v = getattr(ds, attr_k)
for k in attr_v.keys():
v = attr_v[k].value
assert_array_equal(m[attr_k][k][0, 0].ravel(), v)
def _assert_ds_less_or_equal(x, y):
# x and y are a Dataset; x should contain a subset of
# elements in .sa, fa, a and have the same samples as y
# Note: no support for fancy objects such as mappers
assert_array_equal(x.samples, y.samples)
for label in ('a', 'fa', 'sa'):
vx = getattr(x, label)
vy = getattr(y, label)
_assert_array_collectable_less_or_equal(vx, vy)
def _assert_ds_equal(x, y):
# test for two Dataset objects to be equal
# Note: no support for fancy objects such as mappers
_assert_ds_less_or_equal(x, y)
_assert_ds_less_or_equal(y, x)
def _assert_array_collectable_less_or_equal(x, y):
# test for the keys in x to be a subset of those in y,
# and the values corresponding to k in x being equal to those in y
assert_true(set(x.keys()).issubset(set(y.keys())))
for k in x.keys():
assert_array_equal(x[k].value, y[k].value)
def _assert_array_collectable_equal(x, y):
# test for keys and values equal in x and y
_assert_array_collectable_less_or_equal(x, y)
_assert_array_collectable_less_or_equal(y, x)
def _assert_set_equal(x, y):
# test for two sets being equal
assert_equal(set(x), set(y))
#########################
# testing functions
@with_tempfile('.mat', 'matlab_file')
def test_cosmo_dataset(fn):
skip_if_no_external('scipy', min_version='0.8')
mat = _create_small_mat_dataset_dict()
ds_mat = cosmo.from_any(mat)
savemat(fn, mat)
# test Dataset, filename, dict in matlab form, and input from loadmat
for input in (ds_mat, fn, mat, loadmat(fn)):
# check dataset creation
ds = cosmo.from_any(mat)
# ensure dataset has expected vlaues
assert_array_equal(ds.samples, mat['samples'])
_assert_set_equal(ds.sa.keys(), ['chunks', 'labels', 'targets'])
_assert_set_equal(ds.sa.keys(), ['chunks', 'labels', 'targets'])
_assert_set_equal(ds.a.keys(), ['name'])
assert_array_equal(ds.a.name, 'input')
assert_array_equal(ds.sa.chunks, [2, 2])
assert_array_equal(ds.sa.targets, [1, 2])
assert_array_equal(ds.sa.labels, ['yin', 'yan'])
assert_array_equal(ds.fa.i, [3, 2, 1])
assert_array_equal(ds.fa.j, [1, 2, 2])
# check mapping to matlab format
mat_mapped = cosmo.map2cosmo(ds)
for m in (mat, mat_mapped):
assert_array_equal(ds_mat.samples, m['samples'])
_assert_ds_mat_attributes_equal(ds_mat, m)
@with_tempfile('.mat', 'matlab_file')
def test_cosmo_queryengine(fn):
skip_if_no_external('scipy', min_version='0.8')
nbrhood_mat = _create_small_mat_nbrhood_dict()
neighbors = nbrhood_mat['neighbors']
savemat(fn, nbrhood_mat)
# test dict in matlab form, filename, and through QueryEngine loader
for input in (nbrhood_mat, fn, cosmo.CosmoQueryEngine.from_mat(neighbors)):
qe = cosmo.from_any(input)
assert_array_equal(qe.ids, [0, 1, 2, 3])
for i in qe.ids:
nbr_fids_base0 = neighbors[0, i][0] - 1
assert_array_equal(qe.query_byid(i), nbr_fids_base0)
_assert_ds_mat_attributes_equal(qe, nbrhood_mat, ('fa', 'a'))
def test_cosmo_searchlight():
ds = cosmo.from_any(_create_small_mat_dataset_dict())
sl = cosmo.CosmoSearchlight(mean_feature(),
_create_small_mat_nbrhood_dict())
ds_count = sl(ds)
dict_count = Dataset(samples=ds_count.samples,
fa=dict(k=arr([4, 3, 2, 1])),
sa=dict((k, ds.sa[k].value) for k in ds.sa.keys()),
a=dict(name=['output']))
_assert_ds_less_or_equal(dict_count, ds_count)
@with_tempfile('.h5py', 'pymvpa_file')
def test_cosmo_io_h5py(fn):
skip_if_no_external('h5py')
from mvpa2.base.hdf5 import h5save, h5load
# Dataset from cosmo
ds = cosmo.from_any(_create_small_mat_dataset_dict())
h5save(fn, ds)
ds_loaded = h5load(fn)
_assert_ds_equal(ds, ds_loaded)
# Queryengine
qe = cosmo.from_any(_create_small_mat_nbrhood_dict())
h5save(fn, qe)
qe_loaded = h5load(fn)
assert_array_equal(qe.ids, qe_loaded.ids)
_assert_array_collectable_equal(qe.a, qe_loaded.a)
_assert_array_collectable_equal(qe.fa, qe_loaded.fa)
def test_cosmo_exceptions():
m = _create_small_mat_dataset_dict()
m.pop('samples')
assert_raises(KeyError, cosmo.cosmo_dataset, m)
assert_raises(ValueError, cosmo.from_any, m)
assert_raises(ValueError, cosmo.from_any, ['illegal input'])
mapping = {1: arr([1, 2]), 2: arr([2, 0, 0])}
qe = cosmo.CosmoQueryEngine(mapping) # should be fine
assert_raises(TypeError, cosmo.CosmoQueryEngine, [])
mapping[1] = 1.5
assert_raises(TypeError, cosmo.CosmoQueryEngine, mapping)
mapping[1] = 'foo'
assert_raises(TypeError, cosmo.CosmoQueryEngine, mapping)
mapping[1] = -1
assert_raises(TypeError, cosmo.CosmoQueryEngine, mapping)
mapping[1] = arr([1.5, 2.1])
assert_raises(ValueError, cosmo.CosmoQueryEngine, mapping)
neighbors = _create_small_mat_nbrhood_dict()['neighbors']
qe = cosmo.CosmoQueryEngine.from_mat(neighbors) # should be fine
neighbors[0, 0][0] = -1
assert_raises(ValueError, cosmo.CosmoQueryEngine.from_mat, neighbors)
neighbors[0, 0] = arr(1.5)
assert_raises(ValueError, cosmo.CosmoQueryEngine.from_mat, neighbors)
for illegal_nbrhood in (['fail'], cosmo.QueryEngineInterface):
assert_raises((TypeError, ValueError),
lambda x: cosmo.CosmoSearchlight([], x),
illegal_nbrhood)
def test_cosmo_repr_and_str():
# simple smoke test for __repr__ and __str__
creators = (_create_small_mat_nbrhood_dict, _create_small_mat_dataset_dict)
for creator in creators:
obj = cosmo.from_any(creator())
for fmt in 'rs':
obj_str = (("%%%s" % fmt) % obj)
assert_true(obj.__class__.__name__ in obj_str)
|
#By bafomet
import requests
import random
import json
from module.utils import COLORS
try:
from module import local
except ImportError:
from . import local
resp_js = None
is_private = False
total_uploads = 12
def get_page(username):
global resp_js
session = requests.session()
session.headers = {'User-Agent': random.choice(local.useragent)}
# TODO: Судя по всему, незалогиненному юзеру нельзя забирать запросы с `__a`,
# TODO: А я в рот ебал инстаграм, хуйня для обделенных вниманием людей,
# TODO: но решить проблему надо, да
resp_js = session.get(
f'https://www.instagram.com/{username}/',
params={"__a": "1"},
allow_redirects=False,
).text
return resp_js
def exinfo():
def xprint(xdict, text):
if xdict:
print(f"{COLORS.GNSL} [ {COLORS.REDL}Часто использует{COLORS.GNSL} ] {text} :")
print()
i = 0
for key, val in xdict.items():
if len(mails) == 1:
if key in mails[0]:
continue
print(f" {local.gr}{key} : {local.wh}{val}")
i += 1
if i > 4:
break
print()
else:
pass
raw = local.find(resp_js)
mails = raw['email']
tags = local.sort_list_with_counter(raw['tags'])
ment = local.sort_list_with_counter(raw['mention'])
if mails:
if len(mails) == 1:
print(f"{COLORS.GNSL} [ {COLORS.REDL}Часто использует{COLORS.GNSL} ]: {mails[0]}")
print()
else:
print(f"{COLORS.GNSL} [ {COLORS.REDL}Часто использует{COLORS.GNSL} ]: 5")
print()
for mail in mails:
print(mail)
print()
xprint(tags, "Теги")
xprint(ment, "Упоминание в сообщении или публикации владельца страницы")
def user_info(usrname):
global total_uploads, is_private
resp_js = get_page(usrname)
js = json.loads(resp_js)
js = js['graphql']['user']
if js['is_private'] is True:
is_private = True
if js['edge_owner_to_timeline_media']['count'] > 12:
pass
else:
total_uploads = js['edge_owner_to_timeline_media']['count']
usrinfo = {
f' {COLORS.WHSL}Имя профиля{COLORS.GNSL} ': js['username'],
f' {COLORS.REDL}ID Профиля{COLORS.GNSL} ': js['id'],
f' {COLORS.WHSL}Имя{COLORS.GNSL} ': js['full_name'],
f' {COLORS.WHSL}Подписчики{COLORS.GNSL} ': js['edge_followed_by']['count'],
f' {COLORS.WHSL}Он подписан{COLORS.GNSL} ': js['edge_follow']['count'],
f' {COLORS.WHSL}Сообщения Картинки{COLORS.GNSL}': js['edge_owner_to_timeline_media']['count'],
f' {COLORS.WHSL}Сообщения видео{COLORS.GNSL} ': js['edge_felix_video_timeline']['count'],
f' {COLORS.WHSL}reels{COLORS.GNSL} ': js['highlight_reel_count'],
f' {COLORS.WHSL}bio ': js['biography'].replace('\n', ', '),
f' {COLORS.REDL}Внешний URL ': js['external_url'],
f' {COLORS.WHSL}Приват ': js['is_private'],
f' {COLORS.WHSL}Верификация ': js['is_verified'],
f' {COLORS.REDL}Фото профиля ': local.urlshortner(js['profile_pic_url_hd']),
f' {COLORS.WHSL}Бизнес аккаунт ': js['is_business_account'],
#'connected to fb': js['connected_fb_page'], -- requires login
f' {COLORS.WHSL}Присоединился ': js['is_joined_recently'],
f' {COLORS.WHSL}Бизнес каталог ': js['business_category_name'],
f' {COLORS.WHSL}Категория ': js['category_enum'],
f' {COLORS.WHSL}has guides ': js['has_guides'],
}
print(f"{COLORS.GNSL} [ {COLORS.REDL} Информация о запрашиваемом профиле{COLORS.GNSL} ]")
print()
for key, val in usrinfo.items():
print(f"{key} : {val}")
print("")
exinfo()
def highlight_post_info(i):
global resp_js
postinfo = {}
total_child = 0
child_img_list = []
x = json.loads(resp_js)
js = x['graphql']['user']['edge_owner_to_timeline_media']['edges'][i]['node']
# this info will be same on evry post
info = {
' comments': js['edge_media_to_comment']['count'],
' comment disable': js['comments_disabled'],
' timestamp': js['taken_at_timestamp'],
' likes': js['edge_liked_by']['count'],
' location': js['location'],
}
# if image dosen't have caption this key dosen't exist instead of null
try:
info['caption'] = js['edge_media_to_caption']['edges'][0]['node']['text']
except IndexError:
pass
# if uploder has multiple images / vid in single post get info how much edges are
if 'edge_sidecar_to_children' in js:
total_child = len(js['edge_sidecar_to_children']['edges'])
for child in range(total_child):
js = x['graphql']['user']['edge_owner_to_timeline_media']['edges'][i]['node']['edge_sidecar_to_children']['edges'][child]['node']
img_info = {
' typename': js['__typename'],
' id': js['id'],
' shortcode': js['shortcode'],
' dimensions': str(js['dimensions']['height'] + js['dimensions']['width']),
' image url' : js['display_url'],
' fact check overall': js['fact_check_overall_rating'],
' fact check': js['fact_check_information'],
' gating info': js['gating_info'],
' media overlay info': js['media_overlay_info'],
' is_video': js['is_video'],
' accessibility': js['accessibility_caption']
}
child_img_list.append(img_info)
postinfo['imgs'] = child_img_list
postinfo['info'] = info
else:
info = {
' comments': js['edge_media_to_comment']['count'],
' comment disable': js['comments_disabled'],
' timestamp': js['taken_at_timestamp'],
' likes': js['edge_liked_by']['count'],
' location': js['location'],
}
try:
info['caption'] = js['edge_media_to_caption']['edges'][0]['node']['text']
except IndexError:
pass
img_info = {
' typename': js['__typename'],
' id': js['id'],
' shortcode': js['shortcode'],
' dimensions': str(js['dimensions']['height'] + js['dimensions']['width']),
' image url' : js['display_url'],
' fact check overall': js['fact_check_overall_rating'],
' fact check': js['fact_check_information'],
' gating info': js['gating_info'],
' media overlay info': js['media_overlay_info'],
' is_video': js['is_video'],
' accessibility': js['accessibility_caption']
}
child_img_list.append(img_info)
postinfo['imgs'] = child_img_list
postinfo['info'] = info
return postinfo
def post_info():
if is_private is True:
print(f"{local.fa} {local.gr}cannot use -p for private accounts !\n")
exit(1)
posts = []
for x in range(total_uploads):
posts.append(highlight_post_info(x))
for x in range(len(posts)):
# get 1 item from post list
print(f"{local.su}{local.re} post %s :" % x)
for key, val in posts[x].items():
if key == 'imgs':
# how many child imgs post has
postlen = len(val)
# loop over all child img
print(f"{local.su}{local.re} contains %s media" % postlen)
for y in range(postlen):
# print k,v of all child img in loop
for xkey, xval in val[y].items():
print(f" {local.gr}{xkey} : {local.wh}{xval}")
if key == 'info':
print(f"{local.su}{local.re} info :")
for key, val in val.items():
print(f" {local.gr}{key} : {local.wh}{val}")
print("")
|
import ast
import hashlib
import importlib
import numpy as np
import pandas as pd
class Utils:
"""
Utils functions
"""
@classmethod
def md5_file(cls, filename):
"""
Calculate the md5 of a file
thanks <NAME> https://www.pythoncentral.io/hashing-files-with-python/
Raise FileNotFoundError if the file does not exist
"""
blocksize = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as afile:
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
@classmethod
def import_from_dotted_path(cls, dotted_names):
""" import_from_dotted_path('foo.bar') -> from foo import bar; return bar
"""
module_name, class_name = dotted_names.rsplit('.', 1)
module = importlib.import_module(module_name)
handler_class = getattr(module, class_name)
return handler_class
@classmethod
def make_meshgrid(cls, x, y, h=.02):
"""
Create a mesh of points to plot in
(src, thanks : https://scikit-learn.org/stable/auto_examples/svm/plot_iris.html)
:param x: data to base x-axis meshgrid on (type numpy.ndarray)
:param y: data to base y-axis meshgrid on (type numpy.ndarray)
:param h: stepsize for meshgrid, optional
:return: xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
@classmethod
def transform_to_dict(cls, dictionary: dict, tuple_to_string=False):
"""
Transform a dictionary containing dictionary such as
{ "__type__": "__tuple__", "__value__": "(1, 2, 3)"}
to dictionary containing the real type (tuple)
:param dictionary: dictionary containing __tuple__ values
:param tuple_to_string: if True the tuple identified with "__type__": "__tuple__" are store as string in the
dictionary. If False, the tuple is converted to a tuple type
:return dictionary containing the real type
"""
def change_one_dict_element(value):
if '__type__' in value:
if value['__type__'] == '__tuple__':
if tuple_to_string:
result_element = value['__value__']
else:
result_element = ast.literal_eval(value['__value__'])
if not isinstance(result_element, tuple):
raise TypeError(v['__value__'] + " is not a tuple")
else:
result_element = Utils.transform_to_dict(value, tuple_to_string)
else:
result_element = Utils.transform_to_dict(value, tuple_to_string)
return result_element
result = dictionary.copy()
for k, v in result.items():
if isinstance(v, dict):
result[k] = change_one_dict_element(v)
if isinstance(v, list):
result[k] = []
for e in v:
if isinstance(e, dict):
result[k].append(change_one_dict_element(e))
else:
result[k].append(e)
return result
@classmethod
def transform_to_json(cls, dictionary):
"""
Transform a dictionary containing tuple to dictionary
such as { "__type__": "__tuple__", "__value__": "(1, 2, 3)"}
:param dictionary: dictionary containing tuple
:return dictionary containing __tuple__ values
"""
def change_one_dict_element(value):
result_element = {'__type__': '__tuple__', '__value__': value.__str__()}
return result_element
result = dictionary.copy()
for k, v in result.items():
if isinstance(v, tuple):
result[k] = change_one_dict_element(v)
if isinstance(v, dict):
result[k] = Utils.transform_to_json(v)
if isinstance(v, list):
result[k] = []
for e in v:
if isinstance(e, tuple):
result[k].append(change_one_dict_element(e))
else:
if isinstance(e, dict):
result[k].append(Utils.transform_to_json(e))
else:
result[k].append(e)
return result
@classmethod
def check_dict_python_ready(cls, dictionary):
"""
Check if a dictionary (and nested) does not contains a __type__ key,
which means is not ready to be handle by python
:param dictionary: the dictionary to check
:return: False if the dictionary contains one __type__ key, True otherwise
"""
result = True
for k, v in dictionary.items():
if not isinstance(v, list):
v = [v]
for e in v:
if isinstance(e, dict):
if '__type__' in e:
result = False
else:
result = result & Utils.check_dict_python_ready(e)
return result
@classmethod
def flatten_dict(cls, dictionary, separator='_', prefix=''):
"""SRC : https://www.geeksforgeeks.org/python-convert-nested-dictionary-into-flattened-dictionary/"""
result = {prefix + separator + k if prefix else k: v
for kk, vv in dictionary.items()
for k, v in Utils.flatten_dict(vv, separator, kk).items()
} if isinstance(dictionary, dict) else {prefix: dictionary}
return result
@classmethod
def func_create_dataframe(cls, storage):
""" return the function that create a DataFrame from an array"""
if storage == 'Pandas':
return pd.DataFrame
@classmethod
def is_dataframe_empty(cls, df):
result = True
if isinstance(df, pd.DataFrame):
result = df.empty
return result
@classmethod
def str2bool(cls, v: str):
return v.lower() in ("yes", "true", "t", "1")
|
'''
Created on 18.07.2012
@author: philkraf
'''
from base64 import b64encode
from .. import lib as web
from ... import db
from traceback import format_exc as traceback
from datetime import datetime, timedelta
import io
from ..auth import group, expose_for, users
import codecs
from ...tools.calibration import Calibration, CalibrationSource
from ...config import conf
from pytz import common_timezones
import cherrypy
@web.show_in_nav_for(1, icon='clipboard')
class DatasetPage:
"""
Serves the direct dataset manipulation and querying
"""
exposed = True
@expose_for(group.logger)
@web.method.get
def index(self, error=''):
"""
Returns the query page (datasetlist.html). Site logic is handled with ajax
"""
return web.render('datasetlist.html', error=error).render()
@expose_for(group.guest)
@web.method.get
def default(self, id='new', site_id=None, vt_id=None, user=None, error='', _=None):
"""
Returns the dataset view and manipulation page (dataset-edit.html).
Expects an valid dataset id, 'new' or 'last'. With new, a new dataset
is created, if 'last' the last chosen dataset is taken
"""
if id == 'last':
# get the last viewed dataset from web-session. If there is no
# last dataset, redirect to index
id = web.cherrypy.session.get('dataset') # @UndefinedVariable
if id is None:
raise web.redirect(conf.root_url + '/dataset/')
with db.session_scope() as session:
site = session.query(db.Site).get(site_id) if site_id else None
valuetype = session.query(db.ValueType).get(
vt_id) if vt_id else None
# All projects
if user is None:
user = web.user()
user: db.Person = session.query(db.Person).get(user) if user else None
if id == 'new':
active = db.Timeseries(
id=db.newid(db.Dataset, session),
name='New Dataset',
site=site,
valuetype=valuetype,
measured_by=user,
access=user.access_level
)
else: # Else load requested dataset
active = session.query(db.Dataset).get(int(id))
if active: # save requested dataset as 'last'
web.cherrypy.session['dataset'] = id # @UndefinedVariable
else:
raise web.redirect(conf.root_url + '/dataset', error=f'No ds{id} available')
# Setting the project, for editing and ui navigation
if active.project is not None:
project = session.query(db.Project).get(int(active.project))
else:
project = None
try:
# load data for dataset-edit.html:
# similar datasets (same site and same type)
if active.valuetype and active.site:
similar_datasets = self.subset(session, valuetype=active.valuetype.id,
site=active.site.id).filter(db.Dataset.id != active.id)
else:
similar_datasets = []
# parallel dataset (same site and same time, different type)
if active.site and active.start and active.end:
parallel_datasets = session.query(db.Dataset).filter_by(site=active.site).filter(
db.Dataset.start <= active.end, db.Dataset.end >= active.start).filter(db.Dataset.id != active.id)
else:
parallel_datasets = []
datasets = {
"same type": similar_datasets,
"same time": parallel_datasets,
}
except:
# If loading fails, don't show similar datasets
datasets = {}
# Render the resulting page
return web.render(
'dataset-edit.html',
# activedataset is the current dataset (id or new)
ds_act=active, n=active.size() if active else 0,
# Render error messages
error=error,
# similar and parallel datasets
datasets=datasets,
# The project
activeproject=project,
# All available timezones
timezones=common_timezones + ['Fixed/60'],
# The title of the page
title='ds' + str(id),
# A couple of prepared queries to fill select elements
valuetypes=session.query(db.ValueType).order_by(db.ValueType.name),
persons=session.query(db.Person).order_by(db.Person.can_supervise.desc(), db.Person.surname),
sites=session.query(db.Site).order_by(db.Site.id),
quality=session.query(db.Quality).order_by(db.Quality.id),
datasources=session.query(db.Datasource),
projects=session.query(db.Project),
).render()
@expose_for(group.editor)
@web.method.post
def saveitem(self, **kwargs):
"""
Saves the changes for an edited dataset
"""
id = kwargs.get('id', '')
try:
# Get current dataset
id = web.conv(int, id, '')
except:
raise web.redirect(conf.root_url + f'/dataset/{id}', error=traceback())
# if save button has been pressed for submitting the dataset
if 'save' in kwargs:
# get database session
with db.session_scope() as session:
try:
pers = session.query(db.Person).get(kwargs.get('measured_by'))
vt = session.query(db.ValueType).get(kwargs.get('valuetype'))
q = session.query(db.Quality).get(kwargs.get('quality'))
s = session.query(db.Site).get(kwargs.get('site'))
src = session.query(db.Datasource).get(kwargs.get('source'))
# get the dataset
ds = session.query(db.Dataset).get(int(id))
if not ds:
# If no ds with id exists, create a new one
ds = db.Timeseries(id=id)
# Get properties from the keyword arguments kwargs
ds.site = s
ds.filename = kwargs.get('filename')
ds.name = kwargs.get('name')
ds.comment = kwargs.get('comment')
ds.measured_by = pers
ds.valuetype = vt
ds.quality = q
# TODO: Is it necessary to protect this
# of being modified by somebody who isn't a supervisor or higher?
if kwargs.get('project') == '0':
ds.project = None
else:
ds.project = kwargs.get('project')
ds.timezone = kwargs.get('timezone')
if src:
ds.source = src
if 'level' in kwargs:
ds.level = web.conv(float, kwargs.get('level'))
# Timeseries only arguments
if ds.is_timeseries():
if kwargs.get('start'):
ds.start = web.parsedate(kwargs['start'])
if kwargs.get('end'):
ds.end = web.parsedate(kwargs['end'])
ds.calibration_offset = web.conv(
float, kwargs.get('calibration_offset'), 0.0)
ds.calibration_slope = web.conv(
float, kwargs.get('calibration_slope'), 1.0)
ds.access = web.conv(int, kwargs.get('access'), 1)
# Transformation only arguments
if ds.is_transformed():
ds.expression = kwargs.get('expression')
ds.latex = kwargs.get('latex')
# Save changes
session.commit()
except:
# On error render the error message
raise web.redirect(conf.root_url + f'/dataset/{id}', error=traceback())
elif 'new' in kwargs:
id = 'new'
# reload page
raise web.redirect(str(id))
@expose_for()
@web.method.get
@web.mime.json
def statistics(self, id):
"""
Returns a json file holding the statistics for the dataset (is loaded by page using ajax)
"""
with db.session_scope() as session:
ds = session.query(db.Dataset).get(int(id))
if ds:
# Get statistics
mean, std, n = ds.statistics()
# Convert to json
return web.json_out(dict(mean=mean, std=std, n=n))
else:
# Return empty dataset statistics
return web.json_out(dict(mean=0, std=0, n=0))
@expose_for(group.admin)
@web.method.post_or_delete
def remove(self, dsid):
"""
Removes a dataset. Called by javascript, page reload handled by client
"""
try:
db.removedataset(dsid)
return None
except Exception as e:
return str(e)
def subset(self, session, valuetype=None, user=None,
site=None, date=None, instrument=None,
type=None, level=None, onlyaccess=False) -> db.orm.Query:
"""
A not exposed helper function to get a subset of available datasets using filter
"""
datasets: db.orm.Query = session.query(db.Dataset)
if user:
user = session.query(db.Person).get(user)
datasets = datasets.filter_by(measured_by=user)
if site and site!='NaN':
site = session.query(db.Site).get(web.conv(int, site))
datasets = datasets.filter_by(site=site)
if date:
date = web.parsedate(date)
datasets = datasets.filter(
db.Dataset.start <= date, db.Dataset.end >= date)
if valuetype and valuetype!='NaN':
vt = session.query(db.ValueType).get(web.conv(int, valuetype))
datasets = datasets.filter_by(valuetype=vt)
if instrument:
if instrument in ('null', 'NaN'):
source = None
else:
source = session.query(db.Datasource).get(int(instrument))
datasets = datasets.filter_by(source=source)
if type:
datasets = datasets.filter_by(type=type)
if level is not None:
datasets = datasets.filter_by(level=level)
if onlyaccess:
lvl = users.current.level # @UndefinedVariable
datasets = datasets.filter(lvl >= db.Dataset.access)
return datasets.join(db.ValueType).order_by(db.ValueType.name, db.sql.desc(db.Dataset.end))
@expose_for()
@web.method.get
@web.mime.json
def attrjson(self, attribute, valuetype=None, user=None,
site=None, date=None, instrument=None,
type=None, level=None, onlyaccess=False):
"""
Gets the attributes for a dataset filter. Returns json. Used for many filters using ajax.
e.g: Map filter, datasetlist, import etc.
TODO: This function is not very well scalable. If the number of datasets grows,
please use distinct to get the distinct sites / valuetypes etc.
"""
if not hasattr(db.Dataset, attribute):
raise AttributeError("Dataset has no attribute '%s'" % attribute)
res = ''
with db.session_scope() as session:
# Get dataset for filter
datasets = self.subset(session, valuetype, user,
site, date, instrument,
type, level, onlyaccess)
# Make a set of the attribute items and cull out None elements
items = set(getattr(ds, attribute)
for ds in datasets if ds is not None)
# Not reasonable second iteration for eliminating None elements
items = set(e for e in items if e)
# Convert object set to json
return web.json_out(sorted(items))
@expose_for()
@web.method.get
@web.mime.json
def attributes(self, valuetype=None, user=None, site=None, date=None, instrument=None,
type=None, level=None, onlyaccess=False):
"""
Gets for each dataset attribute a unique list of values fitting to the filter
Should replace multiple calls to attrjson
"""
ds_attributes = ['valuetype', 'measured_by', 'site', 'source', 'type', 'level',
'uses_dst', 'timezone', 'project', 'quality']
with db.session_scope() as session:
# Get dataset for filter
datasets = self.subset(session, valuetype, user,
site, date, instrument,
type, level, onlyaccess).all()
# For each attribute iterate all datasets and find the unique values of the dataset
result = {
attr.strip('_'): sorted(
set(
getattr(ds, attr)
for ds in datasets
),
key=lambda x: (x is not None, x)
)
for attr in ds_attributes
}
return web.json_out(result)
@expose_for()
@web.method.get
@web.mime.json
def json(self, valuetype=None, user=None, site=None,
date=None, instrument=None, type=None,
level=None, onlyaccess=False):
"""
Gets a json file of available datasets with filter
"""
with db.session_scope() as session:
return web.json_out(self.subset(
session, valuetype, user, site,
date, instrument, type, level, onlyaccess
).all())
@expose_for(group.editor)
@web.method.post
def updaterecorderror(self, dataset, records):
"""
Mark record id (records) as is_error for dataset. Called by javascript
"""
with db.session_scope() as session:
recids = set(int(r) for r in records.split())
ds = session.query(db.Dataset).get(int(dataset))
q = ds.records.filter(db.Record.id.in_(recids))
for r in q:
r.is_error = True
@expose_for(group.editor)
@web.method.post
def setsplit(self, datasetid, recordid):
"""
Splits the datset at record id
"""
try:
with db.session_scope() as session:
ds = session.query(db.Dataset).get(int(datasetid))
rec = ds.records.filter_by(id=int(recordid)).first()
ds, dsnew = ds.split(rec.time)
if ds.comment:
ds.comment += '\n'
ds.comment += ('splitted by ' + web.user() +
' at ' + web.formatdate() +
'. New dataset is ' + str(dsnew))
if dsnew.comment:
dsnew.comment += '\n'
ds.comment += 'This dataset is created by a split done by ' + web.user() + ' at ' + web.formatdate() + \
'. Orignal dataset is ' + str(ds)
return "New dataset: %s" % dsnew
except:
return traceback()
@expose_for(group.logger)
@web.method.get
@web.mime.csv
def records_csv(self, dataset, raw=False):
"""
Exports the records of the timeseries as csv
TODO: replace with export function with multiple formats using pandas
"""
with db.session_scope() as session:
ds = session.query(db.Dataset).get(dataset)
st = io.BytesIO()
st.write(codecs.BOM_UTF8)
st.write(('"Dataset","ID","time","%s","site","comment"\n' %
(ds.valuetype)).encode('utf-8'))
for r in ds.iterrecords(raw):
d = dict(c=str(r.comment).replace('\r', '').replace('\n', ' / '),
v=r.calibrated if raw else r.value,
time=web.formatdate(r.time) + ' ' +
web.formattime(r.time),
id=r.id,
ds=ds.id,
s=ds.site.id)
st.write(('%(ds)i,%(id)i,%(time)s,%(v)s,%(s)i,"%(c)s"\n' %
d).encode('utf-8'))
session.close()
return st.getvalue()
@expose_for(group.logger)
@web.method.post
def plot(self, id, start='', end='', marker='', line='-', color='k', interactive=False):
"""
Plots the dataset. Might be deleted in future. Rather use PlotPage
"""
import pylab as plt
try:
with db.session_scope() as session:
ds: db.Timeseries = session.query(db.Dataset).get(int(id))
if users.current.level < ds.access:
return f"""
<div class="alert alert-danger"><h2>No access</h2><p class="lead">
Sorry, {users.current.name}, you need higher privileges to see the content of {ds}
</p></div>
"""
if start.strip():
start = web.parsedate(start.strip())
else:
start = ds.start
if end.strip():
end = web.parsedate(end.strip())
else:
end = ds.end
data = ds.asseries(start, end)
ylabel = f'{ds.valuetype.name} [{ds.valuetype.unit}]'
title = f'{ds.site}'
fig = plt.figure(figsize=(10, 5))
ax = fig.gca()
data.plot.line(ax=ax, color=color, marker=marker, linestyle=line)
ax.grid()
plt.xticks(rotation=15)
plt.ylabel(ylabel)
plt.title(title)
bytesio = io.BytesIO()
fig.savefig(bytesio, dpi=100, format='png')
data = b64encode(bytesio.getvalue())
return b'<img src="data:image/png;base64, ' + data + b'"/>'
except Exception as e:
raise web.AJAXError(500, str(e))
@web.expose
@web.mime.json
def records_json(self, dataset,
mindate=None, maxdate=None, minvalue=None, maxvalue=None,
threshold=None, limit=None, witherror=False) -> dict:
"""
Returns the records of the dataset as JSON
"""
with db.session_scope() as session:
ds = session.query(db.Dataset).get(int(dataset))
if users.current.level < ds.access: # @UndefinedVariable
raise web.HTTPError(403, 'User privileges not sufficient to access ds:' +
str(dataset))
records = ds.records.order_by(db.Record.time)
if witherror:
records = records.filter(~db.Record.is_error)
tstart = web.parsedate(mindate.strip(), raiseerror=False)
tend = web.parsedate(maxdate.strip(), raiseerror=False)
threshold = web.conv(float, threshold)
limit = web.conv(int, limit, 250)
try:
if threshold:
records = ds.findjumps(float(threshold), tstart, tend)
else:
if tstart:
records = records.filter(db.Record.time >= tstart)
if tend:
records = records.filter(db.Record.time <= tend)
if minvalue:
records = records.filter(
db.Record.value > float(minvalue))
if maxvalue:
records = records.filter(
db.Record.value < float(maxvalue))
records = records.limit(limit)
except:
raise web.HTTPError(500, traceback())
return web.json_out({'error': None, 'data': records.all()})
@expose_for(group.editor)
@web.method.post
def records(self, dataset, mindate, maxdate, minvalue, maxvalue,
threshold=None, limit=None, offset=None):
"""
Returns a html-table of filtered records
TODO: This method should be replaced by records_json.
Needs change in dataset-edit.html to create DOM elements using
jquery from the delivered JSON
"""
with db.session_scope() as session:
ds = session.query(db.Dataset).get(int(dataset))
records = ds.records.order_by(
db.Record.time).filter(~db.Record.is_error)
tstart = web.parsedate(mindate.strip(), raiseerror=False)
tend = web.parsedate(maxdate.strip(), raiseerror=False)
threshold = web.conv(float, threshold)
limit = web.conv(int, limit, 250)
try:
if threshold:
records = ds.findjumps(float(threshold), tstart, tend)
currentcount = None
totalcount = None
else:
if tstart:
records = records.filter(db.Record.time >= tstart)
if tend:
records = records.filter(db.Record.time <= tend)
if minvalue:
records = records.filter(db.Record.value > float(minvalue))
if maxvalue:
records = records.filter(db.Record.value < float(maxvalue))
totalcount = records.count()
if offset:
records = records.offset(offset)
if limit:
records = records.limit(limit)
currentcount = records.count()
except:
return web.literal('<div class="alert alert-danger">' + traceback() + '</div>')
return web.render('record.html', records=records, currentcount=currentcount,
totalrecords=totalcount, dataset=ds, actionname="split dataset",
action="/dataset/setsplit",
action_help=f'{conf.root_url}/download/wiki/dataset/split.wiki').render()
@expose_for(group.editor)
@web.method.get
@web.mime.png
def plot_coverage(self, siteid):
"""
Makes a bar plot (ganntt like) for the time coverage of datasets at a site
"""
st = io.BytesIO()
with db.session_scope() as session:
import matplotlib
matplotlib.use('Agg', warn=False)
import pylab as plt
import numpy as np
ds = session.query(db.Dataset).filter_by(_site=int(siteid)).order_by(
db.Dataset._source, db.Dataset._valuetype, db.Dataset.start).all()
left = plt.date2num([d.start for d in ds])
right = plt.date2num([d.end for d in ds])
btm = np.arange(-.5, -len(ds), -1)
# return 'left=' + str(left) + ' right=' + str(right) + ' btm=' + str(btm)
fig = plt.figure()
ax = fig.gca()
ax.barh(left=left, width=right - left, bottom=btm,
height=0.9, fc='0.75', ec='0.5')
for l, b, d in zip(left, btm, ds):
ax.text(l, b + .5, '#%i' % d.id, color='k', va='center')
ax.xaxis_date()
ax.set_yticks(btm + .5)
ax.set_yticklabels(
[d.source.name + '/' + d.valuetype.name for d in ds])
ax.set_position([0.3, 0.05, 0.7, 0.9])
ax.set_title('Site #' + siteid)
ax.set_ylim(-len(ds) - .5, .5)
ax.grid()
fig.savefig(st, dpi=100)
return st.getvalue()
@expose_for(group.editor)
@web.method.post
def create_transformation(self, sourceid):
"""
Creates a transformed timeseries from a timeseries.
Redirects to the new transformed timeseries
"""
id = int(sourceid)
try:
with db.session_scope() as session:
sts = session.query(db.Timeseries).get(id)
id = db.newid(db.Dataset, session)
tts = db.TransformedTimeseries(
id=id,
site=sts.site,
source=sts.source,
filename=sts.filename,
name=sts.name,
expression='x',
latex='x',
comment=sts.comment,
_measured_by=web.user(),
quality=sts.quality,
valuetype=sts.valuetype,
start=sts.start,
end=sts.end
)
session.add(tts)
tts.sources.append(sts)
except Exception as e:
raise web.AJAXError(500, str(e))
return '/dataset/%s' % id
@expose_for(group.editor)
@web.method.post
def transform_removesource(self, transid, sourceid):
"""
Remove a source from a transformed timeseries.
To be called from javascript. Client handles rendering
"""
try:
with db.session_scope() as session:
tts = session.query(db.TransformedTimeseries).get(int(transid))
sts = session.query(db.Timeseries).get(int(sourceid))
tts.sources.remove(sts)
tts.updatetime()
except Exception as e:
return str(e)
@expose_for(group.editor)
@web.method.post
def transform_addsource(self, transid, sourceid):
"""
Adds a source to a transformed timeseries.
To be called from javascript. Client handles rendering
"""
try:
with db.session_scope() as session:
tts = session.query(db.TransformedTimeseries).get(int(transid))
sts = session.query(db.Timeseries).get(int(sourceid))
tts.sources.append(sts)
tts.updatetime()
except Exception as e:
return str(e)
@expose_for(group.editor)
@web.method.get
@web.mime.json
def calibration_source_info(self, targetid, sourceid=None, limit=None, max_source_count=100):
"""
Returns the calibration properties for a calibration proposal
Parameters
----------
targetid
Dataset id, which should get calibrated
sourceid
Dataset id containing the "real" measurements
limit
Tolarated time gap between records of the target and records of the source
max_source_count: int
Do not perform calibration if there are more records in the source (to prevent long calculation times)
"""
with db.session_scope() as session:
error = ''
target = session.query(db.Dataset).get(int(targetid))
if sourceid:
sourceid = int(sourceid)
source_ds: db.Dataset = session.query(db.Dataset).get(sourceid)
unit = source_ds.valuetype.unit
else:
unit = '?'
limit = web.conv(int, limit, 3600)
day = timedelta(days=1)
count = 0
result = None
try:
if sourceid:
source = CalibrationSource(
[sourceid], target.start - day, target.end + day)
sourcerecords = source.records(session)
count = sourcerecords.count()
if count and count < web.conv(int, max_source_count, 0):
result = Calibration(target, source, limit)
if not result:
error = 'No matching records for the given time limit is found'
except:
error = traceback()
return web.as_json(
targetid=targetid,
sourceid=sourceid,
error=error,
count=count,
unit=unit,
limit=limit,
result=result
).encode('utf-8')
@expose_for(group.editor)
@web.method.post
def apply_calibration(self, targetid, sourceid, slope, offset):
"""
Applies calibration to dataset.
"""
error = ''
try:
with db.session_scope() as session:
target: db.Dataset = session.query(db.Dataset).get(int(targetid))
source = session.query(db.Dataset).get(int(sourceid))
target.calibration_slope = float(slope)
target.calibration_offset = float(offset)
target.valuetype = source.valuetype
if target.comment:
target.comment += '\n'
target.comment += ("Calibrated against {} at {} by {}"
.format(source, web.formatdate(), users.current))
except:
error = traceback()
return error
|
<reponame>kleutzinger/advent-of-code-2021
import os
import sys
from collections import *
from pprint import pprint
from copy import deepcopy
from itertools import *
# change to dir of script
os.chdir(os.path.dirname(os.path.abspath(__file__)))
input_file = "input.txt"
if "s" in sys.argv:
input_file = "input_small.txt"
try:
with open(input_file) as f:
data = f.read() # entire file as string
lines = data.strip().splitlines()
except:
print("no " + input_file)
data, lines = "", []
def ans(answer):
# store answer to clipboard
from shutil import which
xclip_path = which("xclip")
if xclip_path is not None:
os.system(f'echo "{answer}"| {xclip_path} -selection clipboard -in')
print("\t", answer, "| in clipboard\n")
else:
print(f"\t {answer} | (answer)\n")
############### boilerplate ###################################################
def coords(arr2d):
# return [(x0,y0), (x1, y0), ...]
for y in range(len(arr2d)):
for x in range(len(arr2d[y])):
yield (x, y)
def rotate2d(l):
"rotate a 2d list counter_clockwise once"
nu = deepcopy(l)
return list(zip(*nu))[::-1]
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
strips = lambda l: list(map(str.strip, l))
ints = lambda l: list(map(int, l))
commas = lambda s: s.split(",")
comma_ints = lambda s: ints(strips(s.split(",")))
L, I, D, S = list, int, dict, set
P, E, R, M = print, enumerate, range, map
############### end of boilerplate ############################################
### PART 1 ###
table = {
"0": "0000",
"1": "0001",
"2": "0010",
"3": "0011",
"4": "0100",
"5": "0101",
"6": "0110",
"7": "0111",
"8": "1000",
"9": "1001",
"A": "1010",
"B": "1011",
"C": "1100",
"D": "1101",
"E": "1110",
"F": "1111",
}
example1 = "110100101111111000101000"
# ----------VVVTTTAAAAABBBBBCCCCC
def parse_input():
""" """
lines = data.strip().splitlines()
print(f"{len(lines)} lines in {input_file}\n")
line_groups = data.strip().split("\n\n") # lines split by double newlines
parsed = ""
for idx, c in enumerate(lines[0]):
parsed += table[c]
pprint(parsed)
print(f"{len(parsed)=}")
return parsed
b2i = lambda n: int(n, 2)
LITERAL = 4
def parse_literal(value):
head = 0
build = ""
groups_parsed = 0
while True:
leader = value[head]
tail = value[head + 1 : head + 5]
build += tail
groups_parsed += 1
if leader == "0":
return b2i(build), groups_parsed * 5
break
head += 5
return b2i(build), bits_parsed
def parse_packet(packet):
version = b2i(packet[:3])
type_id = b2i(packet[3:6])
rest = packet[6:]
if type_id == LITERAL:
p, jmp = parse_literal(rest)
else: # OPERATOR
lti = rest[0]
if lti == "0":
length = b2i(rest[1:16])
# the length of subpackets
pass
elif lti == "1":
num_sub_packets = b2i(rest[1:12])
"""
If the length type ID is 0, then the next 15 bits are a number that
represents the total length in bits of the sub-packets contained by
this packet.
If the length type ID is 1, then the next 11 bits are a number that
represents the number of sub-packets immediately contained by this
packet.
"""
pass
example2 = "00111000000000000110111101000101001010010001001000000000"
# ----------VVVTTTILLLLLLLLLLLLLLLAAAAAAAAAAABBBBBBBBBBBBBBBB
# print(parse_packet(example1))
print(parse_packet(example2))
print(parse_packet("11010001010"))
exit()
def part12(data):
tot = 0
for idx, d in enumerate(data):
if d:
tot += 1
return tot
if __name__ == "__main__":
data = parse_input()
part12(deepcopy(data))
|
from base.base_model import BaseModel
import tensorflow as tf
import numpy as np
class ExampleModel(BaseModel):
def __init__(self, config):
super(ExampleModel, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
self.is_training = tf.placeholder(tf.bool)
self.x = tf.placeholder(tf.float32, shape=[None, 1])
self.x_uncertainty = tf.placeholder(tf.float32, shape=[None, 1])
self.y = tf.placeholder(tf.float32, shape=[None, 1])
self.y_uncertainty = tf.placeholder(tf.float32, shape=[None, 1])
# network architecture
d1 = tf.layers.dense(self.x, 10, activation=tf.nn.tanh, name="dense1")
self.predictions = tf.layers.dense(d1, 1, name="dense2")
sigma = tf.layers.dense(d1, 1, activation=tf.nn.softplus, name="dense2_uncertainty")
self.sigma_hat = tf.divide(sigma, tf.reduce_mean(sigma))
self.h_variable = tf.get_variable(name='model_width', initializer=tf.initializers.zeros, shape=[1])
with tf.name_scope("loss"):
a = tf.div(tf.losses.absolute_difference(tf.add(self.y, self.y_uncertainty),
self.predictions,
reduction=tf.losses.Reduction.NONE), self.sigma_hat)
b = tf.div(tf.losses.absolute_difference(tf.subtract(self.y, self.y_uncertainty),
self.predictions,
reduction=tf.losses.Reduction.NONE), self.sigma_hat)
self.loss = tf.math.maximum(
tf.reduce_max(a + tf.math.abs(tf.math.multiply(self.x_uncertainty, tf.gradients(a, self.x)))),
tf.reduce_max(b + tf.math.abs(tf.math.multiply(self.x_uncertainty, tf.gradients(b, self.x)))))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.learning_rate = tf.train.exponential_decay(self.config.learning_rate_init, self.global_step_tensor,
self.config.decay_steps, self.config.learning_rate_decay,
staircase=True)
optimiser = tf.train.AdamOptimizer(self.learning_rate)
h_loss = tf.losses.mean_squared_error(self.loss, self.h_variable[0])
self.loss += h_loss
self.train_step = optimiser.minimize(self.loss, global_step=self.global_step_tensor)
def init_saver(self):
# here you initialize the tensorflow saver that will be used in saving the checkpoints.
self.saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)
def get_predictions(self, sess, data, scaler, rescale=True):
n_datapoints = data.shape[0]
train_feed_dict = {
self.x: data[:, 0].reshape((n_datapoints, 1)),
self.is_training: False
}
predictions, sigma_hat = sess.run(
[self.predictions, self.sigma_hat], feed_dict=train_feed_dict)
data_predictions = np.zeros(shape=(n_datapoints, 4))
data_predictions[:, :] = data[:, :]
data_predictions[:, 2] = np.array(predictions).reshape((n_datapoints, 1))[:, 0]
if rescale:
data_predictions = scaler.inverse_transform(data_predictions)
model_half_width = self.model_half_width(scaler, rescale)
uncertainty_prediction = np.array(sigma_hat).reshape((n_datapoints, 1))[:, 0] * model_half_width
return (data_predictions[:, 0], data_predictions[:, 2], data_predictions[:, 2] + uncertainty_prediction,
data_predictions[:, 2] - uncertainty_prediction)
def model_half_width(self, scaler, rescale=True):
return self.h
class ExampleModel_deeper(BaseModel):
def __init__(self, config):
super(ExampleModel_deeper, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
self.is_training = tf.placeholder(tf.bool)
self.x = tf.placeholder(tf.float32, shape=[None, 1])
self.x_uncertainty = tf.placeholder(tf.float32, shape=[None, 1])
self.y = tf.placeholder(tf.float32, shape=[None, 1])
self.y_uncertainty = tf.placeholder(tf.float32, shape=[None, 1])
# network architecture
d1 = tf.layers.dense(self.x, 20, activation=tf.nn.tanh, name="dense1")
d2 = tf.layers.dense(d1, 10, activation=tf.nn.tanh, name="dense3")
self.predictions = tf.layers.dense(d2, 1, name="dense2")
sigma = tf.layers.dense(d2, 1, activation=tf.nn.softplus, name="dense2_uncertainty")
self.sigma_hat = tf.divide(sigma, tf.reduce_mean(sigma))
self.h_variable = tf.get_variable(name='model_width', initializer=tf.initializers.zeros, shape=[1])
with tf.name_scope("loss"):
a = tf.div(tf.losses.absolute_difference(tf.add(self.y, self.y_uncertainty),
self.predictions,
reduction=tf.losses.Reduction.NONE), self.sigma_hat)
b = tf.div(tf.losses.absolute_difference(tf.subtract(self.y, self.y_uncertainty),
self.predictions,
reduction=tf.losses.Reduction.NONE), self.sigma_hat)
self.loss = tf.math.maximum(
tf.reduce_max(a + tf.math.abs(tf.math.multiply(self.x_uncertainty, tf.gradients(a, self.x)))),
tf.reduce_max(b + tf.math.abs(tf.math.multiply(self.x_uncertainty, tf.gradients(b, self.x)))))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.learning_rate = tf.train.exponential_decay(self.config.learning_rate_init, self.global_step_tensor,
self.config.decay_steps, self.config.learning_rate_decay,
staircase=True)
optimiser = tf.train.AdamOptimizer(self.learning_rate)
h_loss = tf.losses.mean_squared_error(self.loss, self.h_variable[0])
self.loss += h_loss
self.train_step = optimiser.minimize(self.loss, global_step=self.global_step_tensor)
def init_saver(self):
# here you initialize the tensorflow saver that will be used in saving the checkpoints.
self.saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)
def get_predictions(self, sess, data, scaler, rescale=True):
n_datapoints = data.shape[0]
train_feed_dict = {
self.x: data[:, 0].reshape((n_datapoints, 1)),
self.is_training: False
}
predictions, sigma_hat = sess.run(
[self.predictions, self.sigma_hat], feed_dict=train_feed_dict)
data_predictions = np.zeros(shape=(n_datapoints, 4))
data_predictions[:, :] = data[:, :]
data_predictions[:, 2] = np.array(predictions).reshape((n_datapoints, 1))[:, 0]
if rescale:
data_predictions = scaler.inverse_transform(data_predictions)
model_half_width = self.model_half_width(scaler, rescale)
uncertainty_prediction = np.array(sigma_hat).reshape((n_datapoints, 1))[:, 0] * model_half_width
return (data_predictions[:, 0], data_predictions[:, 2], data_predictions[:, 2] + uncertainty_prediction,
data_predictions[:, 2] - uncertainty_prediction)
def model_half_width(self, scaler, rescale=True):
return self.h
|
<filename>add_criminal.py
from tkinter import *
import tkinter as tk
import tkinter.messagebox
import tkinter.font as tkFont
from PIL import Image, ImageTk
from tkinter import filedialog
import os
import sqlite3
connection = sqlite3.connect('NCD.db')
cursor = connection.cursor()
def add2(p):
t = tk.Tk()
t.title('ADD CRIMINAL RECORD')
w, h = t.winfo_screenwidth(), t.winfo_screenheight()
t.geometry("%dx%d+0+0" % (w, h))
def image_choos():
t.filename = filedialog.askopenfilename(initialdir="/", title="Select file",
filetypes=(("jpeg files", ".jpg"), ("all files", ".*")))
print(t.filename)
t.load = Image.open(t.filename)
t.load = t.load.resize((230, 200), Image.ANTIALIAS)
t.photo = ImageTk.PhotoImage(t.load,master=t)
t.img1 = Button(t, image=t.photo, command=image_choos)
t.img1.image = t.photo
t.img1.place(x = 1000, y = 140, width=250, height=350)
def convertToBinaryData(filena):
with open(filena, 'rb') as file:
blobData = file.read()
return blobData
def back():
t.destroy()
from acp_home import acp_home
acp_home(p)
def add3():
empPhoto = convertToBinaryData(t.filename)
u = str(y.get()) + '-' + str(mth.get()) + '-' + str(d.get())
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL(CRIMINALID number PRIMARY KEY,FNAME text,MNAME text,LNAME text,DOB text,BLOODGROUP text,STATUS number,PRIORITY number,GENDER text,PHOTO BLOB)')
cursor.execute("INSERT INTO CRIMINAL VALUES(?,?,?,?,?,?,?,?,?,?)", (c_id1.get(), fname1.get(), mname1.get(), lname1.get(), u,bloodgrp1.get(), status1.get(),priority1.get(),gender1.get(),empPhoto))
connection.commit()
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL1(CRIMINALID number PRIMARY KEY,IDENTIFICATIONMARKS text)')
cursor.execute("INSERT INTO CRIMINAL1 VALUES(?,?)", (c_id1.get(), im1.get()))
connection.commit()
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL2(CRIMINALID number PRIMARY KEY,ADDRESS text)')
cursor.execute("INSERT INTO CRIMINAL2 VALUES(?,?)", (c_id1.get(), ad1.get()))
connection.commit()
cursor.execute('CREATE TABLE IF NOT EXISTS CRIMINAL3(CRIMINALID number PRIMARY KEY,CONTACT text)')
cursor.execute("INSERT INTO CRIMINAL3 VALUES(?,?)",(c_id1.get(), hd1.get()))
connection.commit()
connection.commit()
tkinter.messagebox.showinfo('Confirmation', 'created')
t.destroy()
from acp_home import acp_home
acp_home(p)
fi = tkFont.Font(family="Times New Roman", size=16)
fih = tkFont.Font(family="Times New Roman", size=20)
fname = Label(t, text='Full Name',font=fi, borderwidth=2, relief="solid", width=15,height=2)
fname1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
mname1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
lname1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
priority = Label(t, text='Priority',font=fi, borderwidth=2, relief="solid", width=15,height=2)
c_id = Label(t, text='Criminal ID',font=fi, borderwidth=2, relief="solid", width=15,height=2)
status = Label(t, text='status',font=fi, borderwidth=2, relief="solid", width=15,height=2)
c_id1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
status1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
priority1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
gender = Label(t, text='Gender',font=fi, borderwidth=2, relief="solid", width=15,height=2)
gender1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
bloodgrp = Label(t, text='BLDGRP',font=fi, borderwidth=2, relief="solid", width=15,height=2)
bloodgrp1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
im = Label(t, text='INDENTIFICATION MARKS',font=fi, borderwidth=2, relief="solid", width=15,height=2)
im1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
ad = Label(t, text='ADDRESS',font=fi, borderwidth=2, relief="solid", width=15,height=2)
ad1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
hd = Label(t, text='CONTACT',font=fi, borderwidth=2, relief="solid", width=15,height=2)
hd1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
back_button = Button(t, text='Go Back', command=back, borderwidth=2, relief="solid", width=20, height=2).place(x=950, y=700)
dob = Label(t, text='DATE OF BIRTH',font=fi, borderwidth=2, relief="solid", width=15,height=2)
dayOptionList = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,28,29, 30, 31] # check for february and invalid details
d = IntVar(t)
d.set('Day')
day = OptionMenu(t, d, *dayOptionList)
day.configure(font=fi, relief="solid")
monthOptionList = [1,2,3,4,5,6,7,8,9,10,11,12] # check for february and invalid details
mth = IntVar(t)
mth.set('Month')
month = OptionMenu(t, mth, *monthOptionList)
month.configure(font=fi, relief="solid")
yearOptionList = []
for i in range(1955, 2002):
yearOptionList.append(i)
y = IntVar(t)
y.set('Year')
year = OptionMenu(t, y, *yearOptionList)
year.configure(font=fi, relief="solid")
day.place(x=300, y=140)
month.place(x=380, y=140)
year.place(x=480, y=140)
age = Label(t, text='AGE',font=fi, borderwidth=2, relief="solid", width=15,height=2)
age1 = Entry(t, font=tkFont.Font(family="Times New Roman", size=30), borderwidth=2,
relief="solid")
submitt = Button(t, text='SUBMIT', command=add3, borderwidth=2, relief="solid", width=20,height=2)
image_button = Button(t, text='CHOOSE IMAGE FILE', command=image_choos, borderwidth=2, relief="solid", width=20,height=2)
fname.place(x=50, y=10)
fname1.place(x=300,y=10)
mname1.place(x=700,y=10)
lname1.place(x=1100,y=10)
c_id.place(x=50,y=75)
priority.place(x=50, y=530)
c_id1.place(x=300, y=75)
priority1.place(x=300, y=530)
status.place(x=50, y=595)
status1.place(x=300, y=595)
hd.place(x=50, y=465)
hd1.place(x=300,y=465)
ad.place(x=50, y=335)
ad1.place(x=300,y=335)
im.place(x=50,y=400)
im1.place(x=300,y=400)
gender.place(x=50, y=270)
gender1.place(x=300,y=270)
dob.place(x=50,y=140)
bloodgrp.place(x=50, y=205)
bloodgrp1.place(x=300,y=205)
submitt.place(x=950, y=600)
image_button.place(x = 1000, y = 140, width=250, height=350)
mainloop() |
<gh_stars>0
from pandas.testing import assert_frame_equal
import pandas as pd
from sparkmagic.utils.utils import coerce_pandas_df_to_numeric_datetime
def test_no_coercing():
records = [
{"buildingID": 0, "date": "6/1/13", "temp_diff": "12"},
{"buildingID": 1, "date": "random", "temp_diff": "0adsf"},
]
desired_df = pd.DataFrame(records)
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_date_coercing():
records = [
{"buildingID": 0, "date": "6/1/13", "temp_diff": "12"},
{"buildingID": 1, "date": "6/1/13", "temp_diff": "0adsf"},
]
desired_df = pd.DataFrame(records)
desired_df["date"] = pd.to_datetime(desired_df["date"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_date_coercing_none_values():
records = [
{"buildingID": 0, "date": "6/1/13", "temp_diff": "12"},
{"buildingID": 1, "date": None, "temp_diff": "0adsf"},
]
desired_df = pd.DataFrame(records)
desired_df["date"] = pd.to_datetime(desired_df["date"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_date_none_values_and_no_coercing():
records = [
{"buildingID": 0, "date": "6/1/13", "temp_diff": "12"},
{"buildingID": 1, "date": None, "temp_diff": "0adsf"},
{"buildingID": 1, "date": "adsf", "temp_diff": "0adsf"},
]
desired_df = pd.DataFrame(records)
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_numeric_coercing():
records = [
{"buildingID": 0, "date": "6/1/13", "temp_diff": "12"},
{"buildingID": 1, "date": "adsf", "temp_diff": "0"},
]
desired_df = pd.DataFrame(records)
desired_df["temp_diff"] = pd.to_numeric(desired_df["temp_diff"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_numeric_coercing_none_values():
records = [
{"buildingID": 0, "date": "6/1/13", "temp_diff": "12"},
{"buildingID": 1, "date": "asdf", "temp_diff": None},
]
desired_df = pd.DataFrame(records)
desired_df["temp_diff"] = pd.to_numeric(desired_df["temp_diff"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_numeric_none_values_and_no_coercing():
records = [
{"buildingID": 0, "date": "6/1/13", "temp_diff": "12"},
{"buildingID": 1, "date": "asdf", "temp_diff": None},
{"buildingID": 1, "date": "adsf", "temp_diff": "0asdf"},
]
desired_df = pd.DataFrame(records)
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_df_dict_does_not_throw():
json_str = """
[{
"id": 580320,
"name": "<NAME>",
"results": "Fail",
"violations": "37. TOILET area.",
"words": ["37.",
"toilet",
"area."],
"features": {
"type": 0,
"size": 262144,
"indices": [0,
45,
97],
"values": [7.0,
5.0,
1.0]
},
"rawPrediction": {
"type": 1,
"values": [3.640841752791392,
-3.640841752791392]
},
"probability": {
"type": 1,
"values": [0.974440185187647,
0.025559814812352966]
},
"prediction": 0.0
}]
"""
df = pd.read_json(json_str)
coerce_pandas_df_to_numeric_datetime(df)
def test_overflow_coercing():
records = [{"_c0": "12345678901"}]
desired_df = pd.DataFrame(records)
desired_df["_c0"] = pd.to_numeric(desired_df["_c0"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
def test_all_null_columns():
records = [{"_c0": "12345", "nulla": None}, {"_c0": "12345", "nulla": None}]
desired_df = pd.DataFrame(records)
desired_df["_c0"] = pd.to_numeric(desired_df["_c0"])
df = pd.DataFrame(records)
coerce_pandas_df_to_numeric_datetime(df)
assert_frame_equal(desired_df, df)
|
<reponame>bondruy/sam-tensorflow
import tensorflow as tf
def vgg_net(images, _data_format):
layer01 = tf.layers.conv2d(images, 64, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv1/conv1_1")
layer02 = tf.layers.conv2d(layer01, 64, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv1/conv1_2")
layer03 = tf.layers.max_pooling2d(layer02, 2, 2,
data_format=_data_format)
layer04 = tf.layers.conv2d(layer03, 128, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv2/conv2_1")
layer05 = tf.layers.conv2d(layer04, 128, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv2/conv2_2")
layer06 = tf.layers.max_pooling2d(layer05, 2, 2,
data_format=_data_format)
layer07 = tf.layers.conv2d(layer06, 256, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv3/conv3_1")
layer08 = tf.layers.conv2d(layer07, 256, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv3/conv3_2")
layer09 = tf.layers.conv2d(layer08, 256, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv3/conv3_3")
layer10 = tf.layers.max_pooling2d(layer09, 2, 2,
data_format=_data_format)
layer11 = tf.layers.conv2d(layer10, 512, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv4/conv4_1")
layer12 = tf.layers.conv2d(layer11, 512, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv4/conv4_2")
layer13 = tf.layers.conv2d(layer12, 512, 3,
padding="same",
activation=tf.nn.relu,
data_format=_data_format,
name="conv4/conv4_3")
layer14 = tf.layers.max_pooling2d(layer13, 2, 1,
padding="same",
data_format=_data_format)
layer15 = tf.layers.conv2d(layer14, 512, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=2,
data_format=_data_format,
name="conv5/conv5_1")
layer16 = tf.layers.conv2d(layer15, 512, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=2,
data_format=_data_format,
name="conv5/conv5_2")
layer17 = tf.layers.conv2d(layer16, 512, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=2,
data_format=_data_format,
name="conv5/conv5_3")
return layer17
|
# import modules
# -------------
# built-in
import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from progress.bar import Bar
from timeit import default_timer as timer
# user
from cogen_util import find_global_cost
import pce as pce
from pce.quad4pce import columnize
# savedata flag (set to true to save the simulation in npz format)
savedata = False
# PARAMETERS
# ----------
power_graph = True
case_graph=1
# CHP
etae = 0.33
etat = 0.4
Pmin = 600
Pmax = 1000
print("CHP data:\n"
f"---------\n"
f" * etae = {etae}\n"
f" * etat = {etat}\n"
f" * Pmin = {Pmin} (kW)\n"
f" * Pmax = {Pmax} (kW)\n")
Ptmin = Pmin / etae * etat
Ptmax = Pmax / etae * etat
# Boiler
etab = 0.95
Bmax = 3000
print("Boiler data:\n"
f"------------\n"
f" * etab = {etab}\n"
f" * Bmax = {Bmax} (kW)\n")
# economic data
cNGd = 0.242 # cost of Natural Gas without tax (euro/SMC)
delta_tax = 0.008
cNGnd = cNGd + delta_tax # cost of Natural Gas with tax (euro/SMC)
Hi = 9.59 # Lower Heating Value (kWh/SMC)
print("Natural gas cost\n"
f" * for CHP = {cNGd} (euro/SMC)\n"
f" * for boiler = {cNGnd} (euro/SMC)\n"
f" * lower heating value = {Hi} (kWh/SMC)\n")
# interval set to 1 hour
# ----------------------
Deltat = 1
print("integration time = {} (h)\n".format(Deltat))
# read csv file containing thermal load Ut (kWt)
# ----------------------------------------------
print("Reading electricity prices from 'cs.csv'")
print("----------------------------------------")
Ut = []
with open('UtAL.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for line_count, row in enumerate(csv_reader):
if line_count == 0:
print(f' * Column names are {", ".join(row)}')
else:
Ut.append(float(row[1]))
print(f' * processed {line_count} lines.')
fmt = "{:.2f} {:.2f}, {:.2f} {:.2f} {:.2f}\n" * 3 + "{:.2f} {:.2f}, {:.2f} {:.2f} {:.2f}"
print("Ut = [" + fmt.format(*Ut) + "] (kW)\n")
Ut = np.array(Ut)
# read csv file containing electricity prices cs (euro/MWh)
# ---------------------------------------------------------
print("Reading electricity prices from 'cs.csv'")
cs = []
with open('cs.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for line_count, row in enumerate(csv_reader):
if line_count == 0:
print(f' * column names are {", ".join(row)}')
else:
cs.append(float(row[1]))
print(f' * processed {line_count} lines.')
fmt = "{:.2f} {:.2f}, {:.2f} {:.2f} {:.2f}\n" * 3 + "{:.2f} {:.2f}, {:.2f} {:.2f} {:.2f}"
print("cs = [" + fmt.format(*cs) + "] (euro/MWh)\n")
cs = np.array(cs)
# convert price from euro/MWh to euro/kWh
cskW=[]
for element in cs:
cskW.append(element/1000)
fmt = "{:.4f} {:.4f}, {:.4f} {:.4f} {:.4f}\n" * 3 + "{:.4f} {:.4f}, {:.4f} {:.4f} {:.4f}"
print("cskW = [" + fmt.format(*cskW) + "] (euro/kWh)\n\n")
cskW = np.array(cskW)
# PCE
# ---
# Wapper
def fun(x, etae=etae, etat=etat, Pmin=Pmin, Pmax=Pmax,
Ptmin=Ptmin, Ptmax=Ptmax, etab=etab, Bmax=Bmax,cskW=cskW,
Hi=Hi, cNGd=cNGd, cNGnd=cNGnd, Deltat=Deltat, Ut=Ut):
def inner_fun(xx):
GlobalCost, _, _, _, _ = find_global_cost(etae, etat, Pmin, Pmax, Ptmin, Ptmax, etab, Bmax, Hi, xx[2]*cskW, xx[0]*cNGd, cNGnd, Deltat, xx[1]*Ut)
return GlobalCost
from joblib import Parallel, delayed
y = Parallel(n_jobs=-1, verbose=0)(map(delayed(inner_fun), x))
return np.array(y)
# generate PCE
orders = range(2,20,2)
index = [[1], [2], [3], [1,2], [1,3], [2,3], [1,2,3]]
S = np.zeros(( len(index), len(orders)))
kind = 'n'
if kind == 'n':
distrib = ['n', 'n', 'n']
param = [[1, 0.05],[1, 0.05],[1, 0.05]]
elif kind == 'u':
distrib = ['u', 'u', 'u']
param = [[0.9, 1.1],[0.9, 1.1],[0.9, 1.1]]
mu = []
sigma = []
t1 = timer()
print('Sobol index computation at increasing PCE order:')
with Bar(' * progress: ', max=len(orders), suffix='%(percent)d%%') as bar:
for k, order in enumerate(orders):
# generate PCE
poly = pce.PolyChaos(order, distrib, param)
# level selected according to simulation PCE vs MC
if kind == 'u':
lev = 15
elif kind == 'n':
lev = 25
# compute coefficients
poly.spectral_projection(fun, lev, verbose='n')
poly.norm_fit()
mu.append(poly.mu)
sigma.append(poly.sigma)
sobol_index = poly.sobol(index)
S[:, k] = np.array(sobol_index)
bar.next()
t2 = timer()
print(" * elapsed time {:.3f} sec\n".format(t2 - t1))
# print first line
first_line = "order & " + " & ".join([str(k) for k in orders]) + " \\\\"
print(first_line)
# print sobol index
for idx, sobol, in zip(index, S):
format_str = "".join([str(ele) for ele in idx])
format_str = "S" + format_str
format_str = format_str + " & {:.4f}"*len(sobol) + " \\\\"
print(format_str.format(*sobol))
# final plots
h1 = plt.figure()
plt.plot(range(len(Ut)), Ut, 'C0-o')
plt.xlabel("hour", fontsize=14)
plt.ylabel("Ut (kW)", fontsize=14)
plt.grid()
plt.tight_layout()
h2 = plt.figure()
plt.plot(range(len(cskW)), cskW, 'C0-o')
plt.xlabel("hour", fontsize=14)
plt.ylabel("cskW (euro/kWh)", fontsize=14)
plt.grid()
plt.tight_layout()
h3 = plt.figure(figsize=(11,6))
plt.subplot(1,2,1)
plt.plot(orders, mu,'C0-o', label='PCE')
plt.xlabel('order')
plt.ylabel('mean')
plt.legend()
plt.tight_layout()
plt.subplot(1,2,2)
plt.plot(orders, sigma,'C0-o', label='PCE')
plt.xlabel('order')
plt.ylabel('standard deviation')
plt.legend()
plt.tight_layout()
plt.ion()
plt.show()
# save data reletd to higher degree PCE
if savedata:
SI = S[:, -1]
np.savez("sobol_" + kind, sobol_index=SI)
|
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision as T
import numpy as np
import pickle
import argparse
import time
import os
from utils import select_network, select_optimizer
from torch._utils import _accumulate
from torch.utils.data import Subset
from datetime import datetime
parser = argparse.ArgumentParser(description='auglang parameters')
parser.add_argument('--net-type',
type=str, default='RNN',
choices=['LSTM', 'RNN', 'RNN1', 'RNN2', 'RNN3', 'EURNN', 'expRNN', 'nnRNN', 'RNN-Orth', 'RNNSN', 'expRNN2', 'nnRNN2'],
help='options: LSTM, RNN, expRNN, nnRNN')
parser.add_argument('--nvar',
type=str, default='alpha',
choices=['none','1n', '1n2', 'expn', 'logn', 'log2n', 'sqrtlogn', 'sqrtlogn2', 'alpha'],
help='options: 1n 1n2 expn logn logn2')
parser.add_argument('--nhid', type=int,
default=512,
help='hidden size of recurrent net')
parser.add_argument('--cuda', action='store_true',
default=False, help='use cuda')
parser.add_argument('--random-seed', type=int,
default=400, help='random seed')
parser.add_argument('--permute', action='store_true',
default=False, help='permute the order of sMNIST')
parser.add_argument('--epochs', type=int, default=100,
help='upper epoch limit')
parser.add_argument('--save-freq', type=int,
default=50, help='frequency to save data')
parser.add_argument('--batch', type=int, default=100)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--lr_orth', type=float, default=2e-5)
parser.add_argument('--optimizer',type=str, default='RMSprop',
choices=['Adam', 'RMSprop'],
help='optimizer: choices Adam and RMSprop')
parser.add_argument('--alpha',type=float,
default=0.99, help='alpha value for RMSprop')
parser.add_argument('--betas',type=tuple,
default=(0.9, 0.999), help='beta values for Adam')
parser.add_argument('--bptt', type=int, default=784,
help='sequence length')
parser.add_argument('--rinit', type=str, default="henaff",
choices=['random', 'cayley', 'henaff', 'xavier'],
help='recurrent weight matrix initialization')
parser.add_argument('--iinit', type=str, default="xavier",
choices=['xavier', 'kaiming'],
help='input weight matrix initialization' )
parser.add_argument('--nonlin', type=str, default='modrelu',
choices=['none','modrelu', 'tanh', 'relu', 'sigmoid'],
help='non linearity none, relu, tanh, sigmoid')
parser.add_argument('--alam', type=float, default=0.0001,
help='decay for gamma values nnRNN')
parser.add_argument('--Tdecay', type=float,
default=0, help='weight decay on upper T')
parser.add_argument('--slen', type=int,
default=784, help='name of model')
parser.add_argument('--name', type=str,
default='RNN', help='name of model')
args = parser.parse_args()
torch.cuda.manual_seed(args.random_seed)
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
if args.permute:
rng = np.random.RandomState(1234)
order = rng.permutation(784)
else:
order = np.arange(784)
trainset = T.datasets.MNIST(root='data/',
train=True,
download=True,
transform=T.transforms.ToTensor())
valset = T.datasets.MNIST(root='data/',
train=True,
download=True,
transform=T.transforms.ToTensor())
offset = 10000
R = rng.permutation(len(trainset))
lengths = (len(trainset) - offset, offset)
trainset,valset = [Subset(trainset, R[offset - length:offset])
for offset, length in zip(_accumulate(lengths), lengths)]
testset = T.datasets.MNIST(root='data/',
train=False,
download=True,
transform=T.transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch,
shuffle=False,
num_workers=2)
valloader = torch.utils.data.DataLoader(valset,
batch_size=args.batch,
shuffle=False,
num_workers=2)
testloader = torch.utils.data.DataLoader(testset,
batch_size=args.batch,
num_workers=2)
class Model(nn.Module):
def __init__(self, hidden_size, rnn):
super(Model, self).__init__()
self.rnn = rnn
self.hidden_size = hidden_size
self.alpha = nn.Parameter(torch.ones(784))
self.lin = nn.Linear(hidden_size, 10)
self.loss_func = nn.CrossEntropyLoss()
def get_norm(self):
return self.rnn._norm()
def forward(self, inputs, y, order):
h = None
#print(f' input shape {inputs.shape}')
inputs = inputs[:, order]
for k, input in enumerate(torch.unbind(inputs, dim=1)):
if NVAR == 'alpha':
h = self.rnn(input.unsqueeze(1), h, k)
else:
h = self.rnn(input.unsqueeze(1), h)
out = self.lin(h)
loss = self.loss_func(out, y)
preds = torch.argmax(out, dim=1)
correct = torch.eq(preds, y).sum().item()
return loss, correct
def test_model(net, dataloader):
accuracy = 0
loss = 0
net.eval()
with torch.no_grad():
for i, data in enumerate(dataloader):
x,y = data
x = x.view(-1, 784)
if CUDA:
x = x.cuda()
y = y.cuda()
if NET_TYPE == 'LSTM':
net.rnn.init_states(x.shape[0])
loss,c = net.forward(x, y, order)
accuracy += c
accuracy /= len(testset)
return loss, accuracy
def save_checkpoint(state, fname):
filename = os.path.join(SAVEDIR, fname)
torch.save(state, filename)
def train_model(net, optimizer, num_epochs):
train_losses = []
train_accuracies = []
test_losses = []
test_accuracies = []
norm = []
alphas = []
best_test_acc = 0
for epoch in range(0, num_epochs):
s_t = time.time()
accs = []
losses = []
processed = 0
alpha_losses = []
net.train()
correct = 0
for i,data in enumerate(trainloader, 0):
inp_x, inp_y = data
inp_x = inp_x.view(-1, 784)
if CUDA:
inp_x = inp_x.cuda()
inp_y = inp_y.cuda()
if NET_TYPE == 'LSTM':
net.rnn.init_states(inp_x.shape[0])
optimizer.zero_grad()
if orthog_optimizer:
orthog_optimizer.zero_grad()
loss, c = net.forward(inp_x, inp_y, order)
correct += c
processed += inp_x.shape[0]
accs.append(correct/float(processed))
#calculate losses for orthogonal rnn and alpha blocks
if NET_TYPE == 'nnRNN' and alam > 0:
alpha_loss = net.rnn.alpha_loss(alam)
loss += alpha_loss
alpha_losses.append(alpha_loss.item())
loss.backward()
losses.append(loss.item())
if orthog_optimizer:
net.rnn.orthogonal_step(orthog_optimizer)
optimizer.step()
norm.append(net.get_norm())
alphas.append(net.rnn.get_alpha())
test_loss, test_acc = test_model(net, valloader)
test_accuracies.append(test_acc)
test_losses.append(test_loss)
if test_acc > best_test_acc:
best_test_acc = test_acc
save_checkpoint({
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch
},
'{}.pth.tar'.format('best_model')
)
print('Epoch {}, Time for Epoch: {}, Train Loss: {}, '
'Train Accuracy: {} Test Loss: {} Test Accuracy {}'
.format(epoch +1, time.time()- s_t, np.mean(losses),
np.mean(accs), test_loss, test_acc))
#print(f' model alpha params:\n {net.alpha}')
train_losses.append(np.mean(losses))
train_accuracies.append(np.mean(accs))
#save data
if epoch % SAVEFREQ == 0 or epoch==num_epochs -1:
with open(SAVEDIR + '{}_Train_Losses'.format(NET_TYPE),
'wb') as fp:
pickle.dump(train_losses, fp)
with open(SAVEDIR + '{}_Test_Losses'.format(NET_TYPE),
'wb') as fp:
pickle.dump(test_losses, fp)
with open(SAVEDIR + '{}_Test_Accuracy'.format(NET_TYPE),
'wb') as fp:
pickle.dump(test_accuracies, fp)
with open(SAVEDIR + '{}_Train_NORM'.format(NET_TYPE),
'wb') as fp:
pickle.dump(norm, fp)
with open(SAVEDIR + '{}_Train_Alpha'.format(NET_TYPE),
'wb') as fp:
pickle.dump(alphas, fp)
with open(SAVEDIR + '{}_Train_Accuracy'.format(NET_TYPE),
'wb') as fp:
pickle.dump(train_accuracies, fp)
save_checkpoint({
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch
},
'{}_{}.pth.tar'.format(NET_TYPE,epoch)
)
best_state = torch.load(os.path.join(SAVEDIR, 'best_model.pth.tar'))
net.load_state_dict(best_state['state_dict'])
test_loss, test_acc = test_model(net, testloader)
with open(os.path.join(SAVEDIR, 'log_test.txt'), 'w') as fp:
fp.write('Test loss: {} Test accuracy: {}'.format(test_loss, test_acc))
return
lr = args.lr
lr_orth = args.lr_orth
random_seed = args.random_seed
NET_TYPE = args.net_type
CUDA = args.cuda
SAVEFREQ = args.save_freq
inp_size = 1
hid_size = args.nhid
alam = args.alam
Tdecay = args.Tdecay
NVAR = args.nvar
exp_time = "{0:%Y-%m-%d}_{0:%H-%M-%S}".format(datetime.now())
SAVEDIR = os.path.join('./saves',
'sMNIST',
NET_TYPE,
args.name
,
str(random_seed),
exp_time)
if not os.path.exists(SAVEDIR):
os.makedirs(SAVEDIR)
with open(SAVEDIR + 'hparams.txt','w') as fp:
for key,val in args.__dict__.items():
fp.write(('{}: {}'.format(key,val)))
T = 784
batch_size = args.batch
out_size = 10
rnn = select_network(args, inp_size)
net = Model(hid_size,rnn)
if CUDA:
net = net.cuda()
net.rnn = net.rnn.cuda()
print('sMNIST task')
print(NET_TYPE)
print('Cuda: {}'.format(CUDA))
optimizer, orthog_optimizer = select_optimizer(net, args)
epoch = 0
num_epochs = args.epochs
train_model(net, optimizer, num_epochs)
|
# config.py - Reading and writing Git config files
# Copyright (C) 2011 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License or (at your option) a later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Reading and writing Git configuration files.
TODO:
* preserve formatting when updating configuration files
* treat subsection names as case-insensitive for [branch.foo] style
subsections
"""
import errno
import os
import re
try:
from collections import OrderedDict
except ImportError:
from dulwich._compat import OrderedDict
from UserDict import DictMixin
from dulwich.file import GitFile
class Config(object):
"""A Git configuration."""
def get(self, section, name):
"""Retrieve the contents of a configuration setting.
:param section: Tuple with section name and optional subsection namee
:param subsection: Subsection name
:return: Contents of the setting
:raise KeyError: if the value is not set
"""
raise NotImplementedError(self.get)
def get_boolean(self, section, name, default=None):
"""Retrieve a configuration setting as boolean.
:param section: Tuple with section name and optional subsection namee
:param name: Name of the setting, including section and possible
subsection.
:return: Contents of the setting
:raise KeyError: if the value is not set
"""
try:
value = self.get(section, name)
except KeyError:
return default
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
raise ValueError("not a valid boolean string: %r" % value)
def set(self, section, name, value):
"""Set a configuration value.
:param name: Name of the configuration value, including section
and optional subsection
:param: Value of the setting
"""
raise NotImplementedError(self.set)
class ConfigDict(Config, DictMixin):
"""Git configuration stored in a dictionary."""
def __init__(self, values=None):
"""Create a new ConfigDict."""
if values is None:
values = OrderedDict()
self._values = values
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._values)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
other._values == self._values)
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
self._values[key] = value
def keys(self):
return self._values.keys()
@classmethod
def _parse_setting(cls, name):
parts = name.split(".")
if len(parts) == 3:
return (parts[0], parts[1], parts[2])
else:
return (parts[0], None, parts[1])
def get(self, section, name):
if isinstance(section, basestring):
section = (section, )
if len(section) > 1:
try:
return self._values[section][name]
except KeyError:
pass
return self._values[(section[0],)][name]
def set(self, section, name, value):
if isinstance(section, basestring):
section = (section, )
self._values.setdefault(section, OrderedDict())[name] = value
def _format_string(value):
if (value.startswith(" ") or
value.startswith("\t") or
value.endswith(" ") or
value.endswith("\t")):
return '"%s"' % _escape_value(value)
return _escape_value(value)
def _parse_string(value):
value = value.strip()
ret = []
block = []
in_quotes = False
for c in value:
if c == "\"":
in_quotes = (not in_quotes)
ret.append(_unescape_value("".join(block)))
block = []
elif c in ("#", ";") and not in_quotes:
# the rest of the line is a comment
break
else:
block.append(c)
if in_quotes:
raise ValueError("value starts with quote but lacks end quote")
ret.append(_unescape_value("".join(block)).rstrip())
return "".join(ret)
def _unescape_value(value):
"""Unescape a value."""
def unescape(c):
return {
"\\\\": "\\",
"\\\"": "\"",
"\\n": "\n",
"\\t": "\t",
"\\b": "\b",
}[c.group(0)]
return re.sub(r"(\\.)", unescape, value)
def _escape_value(value):
"""Escape a value."""
return value.replace("\\", "\\\\").replace("\n", "\\n").replace("\t", "\\t").replace("\"", "\\\"")
def _check_variable_name(name):
for c in name:
if not c.isalnum() and c != '-':
return False
return True
def _check_section_name(name):
for c in name:
if not c.isalnum() and c not in ('-', '.'):
return False
return True
def _strip_comments(line):
line = line.split("#")[0]
line = line.split(";")[0]
return line
class ConfigFile(ConfigDict):
"""A Git configuration file, like .git/config or ~/.gitconfig.
"""
@classmethod
def from_file(cls, f):
"""Read configuration from a file-like object."""
ret = cls()
section = None
setting = None
for lineno, line in enumerate(f.readlines()):
line = line.lstrip()
if setting is None:
if len(line) > 0 and line[0] == "[":
line = _strip_comments(line).rstrip()
last = line.index("]")
if last == -1:
raise ValueError("expected trailing ]")
pts = line[1:last].split(" ", 1)
line = line[last+1:]
pts[0] = pts[0].lower()
if len(pts) == 2:
if pts[1][0] != "\"" or pts[1][-1] != "\"":
raise ValueError(
"Invalid subsection " + pts[1])
else:
pts[1] = pts[1][1:-1]
if not _check_section_name(pts[0]):
raise ValueError("invalid section name %s" %
pts[0])
section = (pts[0], pts[1])
else:
if not _check_section_name(pts[0]):
raise ValueError("invalid section name %s" %
pts[0])
pts = pts[0].split(".", 1)
if len(pts) == 2:
section = (pts[0], pts[1])
else:
section = (pts[0], )
ret._values[section] = OrderedDict()
if _strip_comments(line).strip() == "":
continue
if section is None:
raise ValueError("setting %r without section" % line)
try:
setting, value = line.split("=", 1)
except ValueError:
setting = line
value = "true"
setting = setting.strip().lower()
if not _check_variable_name(setting):
raise ValueError("invalid variable name %s" % setting)
if value.endswith("\\\n"):
value = value[:-2]
continuation = True
else:
continuation = False
value = _parse_string(value)
ret._values[section][setting] = value
if not continuation:
setting = None
else: # continuation line
if line.endswith("\\\n"):
line = line[:-2]
continuation = True
else:
continuation = False
value = _parse_string(line)
ret._values[section][setting] += value
if not continuation:
setting = None
return ret
@classmethod
def from_path(cls, path):
"""Read configuration from a file on disk."""
f = GitFile(path, 'rb')
try:
ret = cls.from_file(f)
ret.path = path
return ret
finally:
f.close()
def write_to_path(self, path=None):
"""Write configuration to a file on disk."""
if path is None:
path = self.path
f = GitFile(path, 'wb')
try:
self.write_to_file(f)
finally:
f.close()
def write_to_file(self, f):
"""Write configuration to a file-like object."""
for section, values in self._values.iteritems():
try:
section_name, subsection_name = section
except ValueError:
(section_name, ) = section
subsection_name = None
if subsection_name is None:
f.write("[%s]\n" % section_name)
else:
f.write("[%s \"%s\"]\n" % (section_name, subsection_name))
for key, value in values.iteritems():
f.write("\t%s = %s\n" % (key, _escape_value(value)))
class StackedConfig(Config):
"""Configuration which reads from multiple config files.."""
def __init__(self, backends, writable=None):
self.backends = backends
self.writable = writable
def __repr__(self):
return "<%s for %r>" % (self.__class__.__name__, self.backends)
@classmethod
def default_backends(cls):
"""Retrieve the default configuration.
This will look in the repository configuration (if for_path is
specified), the users' home directory and the system
configuration.
"""
paths = []
paths.append(os.path.expanduser("~/.gitconfig"))
paths.append("/etc/gitconfig")
backends = []
for path in paths:
try:
cf = ConfigFile.from_path(path)
except (IOError, OSError), e:
if e.errno != errno.ENOENT:
raise
else:
continue
backends.append(cf)
return backends
def get(self, section, name):
for backend in self.backends:
try:
return backend.get(section, name)
except KeyError:
pass
raise KeyError(name)
def set(self, section, name, value):
if self.writable is None:
raise NotImplementedError(self.set)
return self.writable.set(section, name, value)
|
<reponame>rky0930/yolo_v2
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""YOLOFeatureExtractor for Darknet19 features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from nets import darknet
slim = tf.contrib.slim
class YOLOv2Darknet19FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""YOLO Feature Extractor using Darknet19 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
"""Darknet19 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: Not used in YOLO
min_depth: Not used in YOLO
pad_to_multiple: Not used in YOLO
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a small batch size
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: Whether to reuse variables. Default is None.
"""
super(YOLOv2Darknet19FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams, batch_norm_trainable, reuse_weights)
def preprocess(self, resized_inputs):
"""Darknet19 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(darknet.darknet_arg_scope(is_training = self._is_training)):
#with slim.arg_scope(darknet.darknet_arg_scope()):
with tf.variable_scope('darknet_19',
reuse=self._reuse_weights) as scope:
net, end_points = darknet.darknet_19_base(preprocessed_inputs,
scope='base')
net = slim.conv2d(net, 1024, [3, 3], scope='Conv2D_19')
net = slim.conv2d(net, 1024, [3, 3], scope='Conv2D_20')
scope_name = end_points['scope_name']
conv_13 = end_points[scope_name+'/Conv2D_13']
conv_21 = slim.conv2d(conv_13, 64, [1, 1], scope='Conv2D_21')
conv_21 = tf.space_to_depth(conv_21, block_size=2)
net = tf.concat([conv_21, net], axis=-1)
net = slim.conv2d(net, 1024, [3, 3], scope='Conv2D_22')
feature_map = net
return [feature_map]
|
<reponame>medialab/bibliotools3.0
#! /usr/bin/env python
"""
Author : <NAME> (http://www.sebastian-grauwin.com/)
Copyright (C) 2012
All rights reserved.
BSD license.
"""
import os
import sys
import glob
import numpy
import argparse
## ##################################################
## ##################################################
## ##################################################
class Wosline:
def __init__(self):
self.PT = "" ## Publication Type (J=Journal; B=Book; S=Series)
self.AU = "" ## Authors
self.BA = "" ## Book Authors
self.BE = "" ## Book Editor
self.GP = "" ## Book Group Authors
self.AF = "" ## Author Full Name
self.CA = "" ## Group Authors
self.TI = "" ## Document Title
self.SO = "" ## Publication Name
self.SE = "" ## Book Series Title
self.LA = "" ## Language
self.DT = "" ## Document Type
self.CT = "" ## Conference Title
self.CY = "" ## Conference Date
self.CL = "" ## Conference Location
self.SP = "" ## Conference Sponsors
self.FO = "" ## Funding Organization
self.DE = "" ## Author Keywords
self.ID = "" ## Keywords Plus
self.AB = "" ## Abstract
self.C1 = "" ## Author Address
self.RP = "" ## Reprint Address
self.EM = "" ## E-mail Address
self.FU = "" ## Funding Agency and Grant Number
self.FX = "" ## Funding Text
self.CR = "" ## Cited References
self.NR = "" ## Cited Reference Count
self.TC = "" ## Times Cited
self.Z9 = "" ##
self.PU = "" ## Publisher
self.PI = "" ## Publisher City
self.PA = "" ## Publisher Address
self.SN = "" ## ISSN
self.BN = "" ## ISBN
self.J9 = "" ## 29-Character Source Abbreviation
self.JI = "" ## ISO Source Abbreviation
self.PD = "" ## Publication Date
self.PY = 0 ## Year Published
self.VL = "" ## Volume
self.IS = "" ## Issue
self.PN = "" ## Part Number
self.SU = "" ## Supplement
self.SI = "" ## Special Issue
self.BP = "" ## Beginning Page
self.EP = "" ## Ending Page
self.AR = "" ## Article Number
self.DI = "" ## Digital Object Identifier (DOI)
self.D2 = "" ##
self.PG = "" ## Page Count
self.P2 = "" ##
self.WC = "" ## Web of Science Category
self.SC = "" ## Subject Category
self.GA = "" ## Document Delivery Number
self.UT = "" ## Unique Article Identifier
def parse_line(self, line, defCols, numCols):
"""
parse a line of the WoS txt output file
"""
s = line.split("\t")
if len(s)==numCols:
if(s[defCols['PT']]=='J'): self.PT = 'Journal' ## Publication Type (J=Journal; B=Book; S=Series)
if(s[defCols['PT']]=='B'): self.PT = 'Book'
if(s[defCols['PT']]=='S'): self.PT = 'Series'
self.AU = s[defCols['AU']] ## Authors
self.TI = s[defCols['TI']] ## Document Title
self.SO = s[defCols['SO']] ## Publication Name
self.DT = s[defCols['DT']] ## Document Type
self.DE = s[defCols['DE']] ## Author Keywords
self.ID = s[defCols['ID']] ## Keywords Plus
self.C1 = s[defCols['C1']] ## Author Address
self.CR = s[defCols['CR']] ## Cited References
self.TC = s[defCols['TC']] ## Times Cited
self.J9 = s[defCols['J9']] ## 29-Character Source Abbreviation
self.PD = s[defCols['PD']] ## Publication Date
if s[defCols['PY']].isdigit(): self.PY = int(s[defCols['PY']])
else: self.PY = 0 ## Year Published
self.VL = s[defCols['VL']] ## Volume
self.IS = s[defCols['IS']] ## Issue
self.BP = s[defCols['BP']] ## Beginning Page
self.WC = s[defCols['WC']] ## Web of Science Category
self.UT = s[defCols['UT']] ## Unique Article Identifier
else:
print "ARG %s != %s"%(len(s),numCols)
## ##################################################
def defColumns(line):
# initialize
Cols = ['PT', 'AU', 'TI', 'SO', 'DT', 'DE', 'ID', 'C1', 'CR', 'TC', 'J9', 'PD', 'PY', 'VL', 'IS', 'BP', 'WC', 'UT'];
defCols = dict();
# match columns number in "line"
foo = line.replace('\xef\xbb\xbf','').split('\t')
for i in range(len(foo)):
if foo[i] in Cols:
defCols[foo[i]] = i
numCols = len(foo)
return (defCols, numCols)
## ##################################################
## ##################################################
## ##################################################
## ##################################################
class ArticleList:
def __init__(self):
self.articles = [] # articles list
def read_file(self,filename):
articles_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
aux = 0
for line in fd.readlines():
line = line.strip("\n") # removes \n
if (line != ""):
if (aux == 1): # do not take 1st line into account!
wline = Wosline()
wline.parse_line(line, defCols, numCols)
articles_list.append( wline )
if (aux == 0): # define columns thanks to 1st line
(defCols, numCols) = defColumns( line )
aux = 1
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.articles = articles_list
## ##################################################
## ##################################################
## ##################################################
class Article:
def __init__(self):
self.id = 0
self.firstAU = ""
self.year = 0
self.journal = ""
self.volume = ""
self.page = ""
self.doi = ""
self.pubtype = ""
self.doctype = ""
self.times_cited = ""
self.title = ""
self.uniqueID = ""
self.articles = [] # liste d'articles
def read_file(self,filename):
"""
Lecture des articles
"""
articles_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
aux = 0
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
aline = Article()
aline.id = int(s[0])
if(len(s)>1): aline.firstAU = s[1]
if(len(s)>2): aline.year = int(s[2])
if(len(s)>3): aline.journal = s[3]
if(len(s)>4): aline.volume = s[4]
if(len(s)>5): aline.page = s[5]
if(len(s)>6): aline.doi = s[6]
if(len(s)>7): aline.pubtype = s[7]
if(len(s)>8): aline.doctype = s[8]
if(len(s)>9): aline.times_cited = s[9]
if(len(s)>10): aline.title = s[10]
if(len(s)>11): aline.uniqueID = s[11]
articles_list.append( aline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.articles = articles_list
## ##################################################
## ##################################################
## ##################################################
class Author:
def __init__(self):
self.id = 0
self.rank = 0
self.author = ""
self.authors = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
alines_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
lncnt=0
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
aline = Author()
aline.id = int(s[0])
aline.rank = int(s[1])
if len(s)<3:
print "missing author in : %s %s"%(lncnt,s)
aline.author = "name missing"
else:
aline.author = s[2]
#print int(s[0]), int(s[1]),s[2], 'author'
alines_list.append( aline )
lncnt+=1
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.authors = alines_list
## ##################################################
## ##################################################
## ##################################################
class Country:
def __init__(self):
self.id = 0
self.rank = 0
self.country = ""
self.countries = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
clines_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
cline = Country()
cline.id = int(s[0])
cline.rank = int(s[1])
cline.country = s[2].lower().capitalize()
clines_list.append( cline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.countries = clines_list
## ##################################################
## ##################################################
## ##################################################
class Institution:
def __init__(self):
self.id = 0
self.rank = 0
self.institution = ""
self.institutions = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
ilines_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
iline = Institution()
if len(s)==3:
iline.id = int(s[0])
iline.rank = int(s[1])
iline.institution = s[2].upper()
ilines_list.append( iline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.institutions = ilines_list
## ##################################################
## ##################################################
## ##################################################
class Keyword:
def __init__(self):
self.id = 0
self.ktype = ""
self.keyword = ""
self.keywords = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
klines_list = []
try:
# open
if filename != 'st.lower().capitalize()din':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
kline = Keyword()
kline.id = int(s[0])
kline.ktype = s[1]
kline.keyword = s[2].upper()
klines_list.append( kline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.keywords = klines_list
## ##################################################
## ##################################################
## ##################################################
class Ref:
def __init__(self):
self.id = 0
self.firstAU = ""
self.year = 0
self.journal = ""
self.volume = 0
self.page = 0
self.refs = [] # liste de refs
def parse_ref(self, ref):
"""
parse a ref of the WoS txt format
"""
s = ref.split(', ')
if(len(s)>0):
aux1 = s[0].rfind(' ')
aux2 = len(s[0])
foo = s[0].lower().capitalize()
if aux1 > 0:
s1 = foo[aux1:aux2]
s2 = s1.upper()
foo = foo.replace(s1,s2)
foo = foo.replace('.','')
self.firstAU = foo
if(len(s)>1):
if s[1].isdigit(): self.year = int(s[1])
else: self.year = 0
if(len(s)>2): self.journal = s[2]
if(len(s)>3):
if(s[3][0]=='V'): self.volume = s[3].replace('V','')
if(len(s)>3):
if(s[3][0]=='P'): self.page = s[3].replace('P','')
if(len(s)>4):
if(s[4][0]=='P'): self.page = s[4].replace('P','')
def read_file(self,filename):
"""
Lecture des refs
"""
refs_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
refline = Ref()
refline.id = int(s[0])
refline.firstAU = s[1]
refline.year = int(s[2])
refline.journal = s[3]
refline.volume = s[4]
refline.page = s[5]
refs_list.append( refline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.refs = refs_list
## ##################################################
## ##################################################
## ##################################################
class Subject:
def __init__(self):
self.id = 0
self.subject = ""
self.subjects = [] # liste
def read_file(self,filename):
"""
Lecture des articles
"""
slines_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
sline = Subject()
sline.id = int(s[0])
sline.subject = s[1]
slines_list.append( sline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "file does not exist"
self.subjects = slines_list
## ##################################################
## ##################################################
## ##################################################
class Labo:
def __init__(self):
self.id = 0
self.labo = ""
self.labos = [] # liste
def read_file(self,filename):
"""
Lecture des labos
"""
llines_list = []
try:
# open
if filename != 'stdin':
fd = open(filename)
else:
fd = sys.stdin
# read
for line in fd.readlines():
line = line.strip() # removes \n
if (line != ""):
s = line.split("\t")
lline = Labo()
if len(s)==2:
lline.id = int(s[0])
lline.labo = s[1]
llines_list.append( lline )
# close
if filename != 'stdin':
fd.close()
except IOError:
print "..labos.dat file does not exist"
self.labos = llines_list
## ##################################################
## ##################################################
## ##################################################
## ##################################################
## ##################################################
## ##################################################
if __name__ == "__main__":
main()
## ##################################################
## ##################################################
## ##################################################
|
<reponame>mailtokartik1/electron<filename>script/native-tests.py
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
PYYAML_LIB_DIR = os.path.join(VENDOR_DIR, 'pyyaml', 'lib')
sys.path.append(PYYAML_LIB_DIR)
import yaml #pylint: disable=wrong-import-position,wrong-import-order
class Command:
LIST = 'list'
RUN = 'run'
class Verbosity:
ALL = 'all' # stdout and stderr
ERRORS = 'errors' # stderr only
SILENT = 'silent' # no output
def parse_args():
parser = argparse.ArgumentParser(description='Run Google Test binaries')
parser.add_argument('command',
choices=[Command.LIST, Command.RUN],
help='command to execute')
parser.add_argument('-b', '--binary', nargs='+', required=False,
help='binaries to run')
parser.add_argument('-c', '--config', required=True,
help='path to a tests config')
parser.add_argument('-t', '--tests-dir', required=False,
help='path to a directory with test binaries')
parser.add_argument('-o', '--output-dir', required=False,
help='path to a folder to save tests results')
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument('-v', '--verbosity', required=False,
default=Verbosity.ALL,
choices=[
Verbosity.ALL,
Verbosity.ERRORS,
Verbosity.SILENT],
help='set verbosity level')
verbosity.add_argument('-q', '--quiet', required=False, action='store_const',
const=Verbosity.ERRORS, dest='verbosity',
help='suppress stdout from test binaries')
verbosity.add_argument('-qq', '--quiet-quiet',
# https://youtu.be/o0u4M6vppCI?t=1m18s
required=False, action='store_const',
const=Verbosity.SILENT, dest='verbosity',
help='suppress stdout and stderr from test binaries')
args = parser.parse_args()
# Additional checks.
if args.command == Command.RUN and args.tests_dir is None:
parser.error("specify a path to a dir with test binaries via --tests-dir")
# Absolutize and check paths.
# 'config' must exist and be a file.
args.config = os.path.abspath(args.config)
if not os.path.isfile(args.config):
parser.error("file '{}' doesn't exist".format(args.config))
# 'tests_dir' must exist and be a directory.
if args.tests_dir is not None:
args.tests_dir = os.path.abspath(args.tests_dir)
if not os.path.isdir(args.tests_dir):
parser.error("directory '{}' doesn't exist".format(args.tests_dir))
# 'output_dir' must exist and be a directory.
if args.output_dir is not None:
args.output_dir = os.path.abspath(args.output_dir)
if not os.path.isdir(args.output_dir):
parser.error("directory '{}' doesn't exist".format(args.output_dir))
return args
def main():
args = parse_args()
tests_list = TestsList(args.config, args.tests_dir)
if args.command == Command.LIST:
all_binaries_names = tests_list.get_names()
print '\n'.join(all_binaries_names)
return 0
if args.command == Command.RUN:
if args.binary is not None:
return tests_list.run(args.binary, args.output_dir, args.verbosity)
else:
return tests_list.run_all(args.output_dir, args.verbosity)
raise Exception("unexpected command '{}'".format(args.command))
class TestsList():
def __init__(self, config_path, tests_dir):
self.config_path = config_path
self.tests_dir = tests_dir
# A dict with binary names (e.g. 'base_unittests') as keys
# and various test data as values of dict type.
self.tests = TestsList.__get_tests_list(config_path)
def __len__(self):
return len(self.tests)
def get_names(self):
return self.tests.keys()
def run(self, binaries, output_dir=None, verbosity=Verbosity.ALL):
# Don't run anything twice.
binaries = set(binaries)
# First check that all names are present in the config.
if any([binary_name not in self.tests for binary_name in binaries]):
raise Exception("binary '{0}' not found in config '{1}'".format(
binary_name, self.config_path))
# TODO(alexeykuzmin): Respect the "platform" setting.
suite_returncode = sum(
[self.__run(binary, output_dir, verbosity) for binary in binaries])
return suite_returncode
def run_only(self, binary_name, output_dir=None, verbosity=Verbosity.ALL):
return self.run([binary_name], output_dir, verbosity)
def run_all(self, output_dir=None, verbosity=Verbosity.ALL):
return self.run(self.get_names(), output_dir, verbosity)
@staticmethod
def __get_tests_list(config_path):
tests_list = {}
config_data = TestsList.__get_config_data(config_path)
for data_item in config_data['tests']:
(binary_name, test_data) = TestsList.__get_test_data(data_item)
tests_list[binary_name] = test_data
return tests_list
@staticmethod
def __get_config_data(config_path):
with open(config_path, 'r') as stream:
return yaml.load(stream)
@staticmethod
def __expand_shorthand(value):
""" Treat a string as {'string_value': None}."""
if isinstance(value, dict):
return value
if isinstance(value, basestring):
return {value: None}
assert False, "unexpected shorthand type: {}".format(type(value))
@staticmethod
def __make_a_list(value):
"""Make a list if not already a list."""
if isinstance(value, list):
return value
return [value]
@staticmethod
def __get_test_data(data_item):
data_item = TestsList.__expand_shorthand(data_item)
binary_name = data_item.keys()[0]
test_data = {
'excluded_tests': None,
'platforms': None # None means all? Weird.
}
configs = data_item[binary_name]
if configs is not None:
# List of excluded tests.
if 'to_fix' in configs:
test_data['excluded_tests'] = configs['to_fix']
# TODO(alexeykuzmin): Also add to 'excluded_tests'
# those that should be permanently disabled.
# List of platforms to run the tests on.
if 'platform' in configs:
test_data['platforms'] = TestsList.__make_a_list(configs['platform'])
return (binary_name, test_data)
def __run(self, binary_name, output_dir, verbosity):
binary_path = os.path.join(self.tests_dir, binary_name)
test_binary = TestBinary(binary_path)
test_data = self.tests[binary_name]
excluded_tests = test_data['excluded_tests']
output_file_path = TestsList.__get_output_path(binary_name, output_dir)
return test_binary.run(excluded_tests=excluded_tests,
output_file_path=output_file_path,
verbosity=verbosity)
@staticmethod
def __get_output_path(binary_name, output_dir=None):
if output_dir is None:
return None
return os.path.join(output_dir, "results_{}.xml".format(binary_name))
class TestBinary():
def __init__(self, binary_path):
self.binary_path = binary_path
# Is only used when writing to a file.
self.output_format = 'xml'
def run(self, excluded_tests=None, output_file_path=None,
verbosity=Verbosity.ALL):
gtest_filter = ""
if excluded_tests is not None and len(excluded_tests) > 0:
excluded_tests_string = TestBinary.__format_excluded_tests(
excluded_tests)
gtest_filter = "--gtest_filter={}".format(excluded_tests_string)
gtest_output = ""
if output_file_path is not None:
gtest_output = "--gtest_output={0}:{1}".format(self.output_format,
output_file_path)
args = [self.binary_path, gtest_filter, gtest_output]
stdout, stderr = TestBinary.__get_stdout_and_stderr(verbosity)
returncode = subprocess.call(args, stdout=stdout, stderr=stderr)
return returncode
@staticmethod
def __format_excluded_tests(excluded_tests):
return "-" + ":".join(excluded_tests)
@staticmethod
def __get_stdout_and_stderr(verbosity):
stdout = stderr = None
if verbosity in (Verbosity.ERRORS, Verbosity.SILENT):
devnull = open(os.devnull, 'w')
stdout = devnull
if verbosity == Verbosity.SILENT:
stderr = devnull
return (stdout, stderr)
if __name__ == '__main__':
sys.exit(main())
|
from .base_requests import AnymailRequestsBackend, RequestsPayload
from ..exceptions import AnymailRequestsAPIError
from ..message import AnymailRecipientStatus
from ..utils import get_anymail_setting, update_deep
class EmailBackend(AnymailRequestsBackend):
"""
SparkPost Email Backend
"""
esp_name = "SparkPost"
def __init__(self, **kwargs):
"""Init options from Django settings"""
self.api_key = get_anymail_setting('api_key', esp_name=self.esp_name,
kwargs=kwargs, allow_bare=True)
self.subaccount = get_anymail_setting('subaccount', esp_name=self.esp_name,
kwargs=kwargs, default=None)
api_url = get_anymail_setting('api_url', esp_name=self.esp_name, kwargs=kwargs,
default="https://api.sparkpost.com/api/v1/")
if not api_url.endswith("/"):
api_url += "/"
super().__init__(api_url, **kwargs)
def build_message_payload(self, message, defaults):
return SparkPostPayload(message, defaults, self)
def parse_recipient_status(self, response, payload, message):
parsed_response = self.deserialize_json_response(response, payload, message)
try:
results = parsed_response["results"]
accepted = results["total_accepted_recipients"]
rejected = results["total_rejected_recipients"]
transmission_id = results["id"]
except (KeyError, TypeError) as err:
raise AnymailRequestsAPIError("Invalid SparkPost API response format",
email_message=message, payload=payload,
response=response, backend=self) from err
# SparkPost doesn't (yet*) tell us *which* recipients were accepted or rejected.
# (* looks like undocumented 'rcpt_to_errors' might provide this info.)
# If all are one or the other, we can report a specific status;
# else just report 'unknown' for all recipients.
recipient_count = len(payload.recipients)
if accepted == recipient_count and rejected == 0:
status = 'queued'
elif rejected == recipient_count and accepted == 0:
status = 'rejected'
else: # mixed results, or wrong total
status = 'unknown'
recipient_status = AnymailRecipientStatus(message_id=transmission_id, status=status)
return {recipient.addr_spec: recipient_status for recipient in payload.recipients}
class SparkPostPayload(RequestsPayload):
def __init__(self, message, defaults, backend, *args, **kwargs):
http_headers = {
'Authorization': backend.api_key,
'Content-Type': 'application/json',
}
if backend.subaccount is not None:
http_headers['X-MSYS-SUBACCOUNT'] = backend.subaccount
self.recipients = [] # all recipients, for backend parse_recipient_status
self.cc_and_bcc = [] # for _finalize_recipients
super().__init__(message, defaults, backend, headers=http_headers, *args, **kwargs)
def get_api_endpoint(self):
return "transmissions/"
def serialize_data(self):
self._finalize_recipients()
return self.serialize_json(self.data)
def _finalize_recipients(self):
# https://www.sparkpost.com/docs/faq/cc-bcc-with-rest-api/
# self.data["recipients"] is currently a list of all to-recipients. We need to add
# all cc and bcc recipients. Exactly how depends on whether this is a batch send.
if self.is_batch():
# For batch sends, must duplicate the cc/bcc for *every* to-recipient
# (using each to-recipient's metadata and substitutions).
extra_recipients = []
for to_recipient in self.data["recipients"]:
for email in self.cc_and_bcc:
extra = to_recipient.copy() # capture "metadata" and "substitutions", if any
extra["address"] = {
"email": email.addr_spec,
"header_to": to_recipient["address"]["header_to"],
}
extra_recipients.append(extra)
self.data["recipients"].extend(extra_recipients)
else:
# For non-batch sends, we need to patch up *everyone's* displayed
# "To" header to show all the "To" recipients...
full_to_header = ", ".join(
to_recipient["address"]["header_to"]
for to_recipient in self.data["recipients"])
for recipient in self.data["recipients"]:
recipient["address"]["header_to"] = full_to_header
# ... and then simply add the cc/bcc to the end of the list.
# (There is no per-recipient data, or it would be a batch send.)
self.data["recipients"].extend(
{"address": {
"email": email.addr_spec,
"header_to": full_to_header,
}}
for email in self.cc_and_bcc)
#
# Payload construction
#
def init_payload(self):
# The JSON payload:
self.data = {
"content": {},
"recipients": [],
}
def set_from_email(self, email):
self.data["content"]["from"] = email.address
def set_to(self, emails):
if emails:
# In the recipient address, "email" is the addr spec to deliver to,
# and "header_to" is a fully-composed "To" header to display.
# (We use "header_to" rather than "name" to simplify some logic
# in _finalize_recipients; the results end up the same.)
self.data["recipients"].extend(
{"address": {
"email": email.addr_spec,
"header_to": email.address,
}}
for email in emails)
self.recipients += emails
def set_cc(self, emails):
# https://www.sparkpost.com/docs/faq/cc-bcc-with-rest-api/
if emails:
# Add the Cc header, visible to all recipients:
cc_header = ", ".join(email.address for email in emails)
self.data["content"].setdefault("headers", {})["Cc"] = cc_header
# Actual recipients are added later, in _finalize_recipients
self.cc_and_bcc += emails
self.recipients += emails
def set_bcc(self, emails):
if emails:
# Actual recipients are added later, in _finalize_recipients
self.cc_and_bcc += emails
self.recipients += emails
def set_subject(self, subject):
self.data["content"]["subject"] = subject
def set_reply_to(self, emails):
if emails:
self.data["content"]["reply_to"] = ", ".join(email.address for email in emails)
def set_extra_headers(self, headers):
if headers:
self.data["content"].setdefault("headers", {}).update(headers)
def set_text_body(self, body):
self.data["content"]["text"] = body
def set_html_body(self, body):
if "html" in self.data["content"]:
# second html body could show up through multiple alternatives, or html body + alternative
self.unsupported_feature("multiple html parts")
self.data["content"]["html"] = body
def add_alternative(self, content, mimetype):
if mimetype.lower() == "text/x-amp-html":
if "amp_html" in self.data["content"]:
self.unsupported_feature("multiple html parts")
self.data["content"]["amp_html"] = content
else:
super().add_alternative(content, mimetype)
def set_attachments(self, atts):
attachments = [{
"name": att.name or "",
"type": att.content_type,
"data": att.b64content,
} for att in atts if not att.inline]
if attachments:
self.data["content"]["attachments"] = attachments
inline_images = [{
"name": att.cid,
"type": att.mimetype,
"data": att.b64content,
} for att in atts if att.inline]
if inline_images:
self.data["content"]["inline_images"] = inline_images
# Anymail-specific payload construction
def set_envelope_sender(self, email):
self.data["return_path"] = email.addr_spec
def set_metadata(self, metadata):
self.data["metadata"] = metadata
def set_merge_metadata(self, merge_metadata):
for recipient in self.data["recipients"]:
to_email = recipient["address"]["email"]
if to_email in merge_metadata:
recipient["metadata"] = merge_metadata[to_email]
def set_send_at(self, send_at):
try:
start_time = send_at.replace(microsecond=0).isoformat()
except (AttributeError, TypeError):
start_time = send_at # assume user already formatted
self.data.setdefault("options", {})["start_time"] = start_time
def set_tags(self, tags):
if len(tags) > 0:
self.data["campaign_id"] = tags[0]
if len(tags) > 1:
self.unsupported_feature("multiple tags (%r)" % tags)
def set_track_clicks(self, track_clicks):
self.data.setdefault("options", {})["click_tracking"] = track_clicks
def set_track_opens(self, track_opens):
self.data.setdefault("options", {})["open_tracking"] = track_opens
def set_template_id(self, template_id):
self.data["content"]["template_id"] = template_id
# Must remove empty string "content" params when using stored template
for content_param in ["subject", "text", "html"]:
try:
if not self.data["content"][content_param]:
del self.data["content"][content_param]
except KeyError:
pass
def set_merge_data(self, merge_data):
for recipient in self.data["recipients"]:
to_email = recipient["address"]["email"]
if to_email in merge_data:
recipient["substitution_data"] = merge_data[to_email]
def set_merge_global_data(self, merge_global_data):
self.data["substitution_data"] = merge_global_data
# ESP-specific payload construction
def set_esp_extra(self, extra):
update_deep(self.data, extra)
|
import sqlite3
from .User import User
def createUserFromResponse(response):
user = User(response[1])
user.ID = response[0]
user.tg_name = response[2]
user.tg_nickname = response[3]
user.fname = response[4]
user.lname = response[5]
user.name = response[6]
user.score = response[7]
user.second_score = response[8]
user.status = response[9]
user.taskid_in_progress = response[10]
return user
class UsersDB:
"""docstring for Users"""
def __init__(self, dbFile="Users.sqlite3"):
self.mainTableName = 'users'
self.dbFile = dbFile
self.conn = None
self.lastID = 0
self.maxLengthOfName = 16
def commit(self):
self.conn.commit()
def connect(self):
self.conn = sqlite3.connect(self.dbFile)
def createTable(self):
request = 'CREATE TABLE IF NOT EXISTS ' + self.mainTableName + '(' + \
'id INT NOT NULL PRIMARY KEY,' + \
'tg_id TEXT UNIQUE,' + \
'tg_name TEXT,' + \
'tg_nickname TEXT,' + \
'fname TEXT,' + \
'lname TEXT,' + \
'name TEXT,' + \
'score INT NOT NULL,' + \
'second_score INT NOT NULL,' + \
'status INT NOT NULL,' + \
'taskid_in_progress INT' + \
')'
cursor = self.conn.cursor()
cursor.execute(request)
self.commit()
def getLastID(self):
request = 'SELECT max(id) FROM ' + self.mainTableName
cursor = self.conn.cursor()
cursor.execute(request)
lastID = cursor.fetchone()[0]
if lastID is None:
lastID = 0
return lastID
def isExistsID(self, ID):
request = 'SELECT id FROM ' + self.mainTableName + ' WHERE id = ?'
cursor = self.conn.cursor()
cursor.execute(request, (ID,))
response = cursor.fetchone()
if response is None:
return False
elif response[0] == ID:
return True
def getUsersBy(self, request, value):
cursor = self.conn.cursor()
cursor.execute(request, value)
response = cursor.fetchall()
# print(response)
if len(response) == 0:
return [None]
else:
users = []
for userTuple in response:
users.append(createUserFromResponse(userTuple))
return users
def getUserById(self, ID):
request = 'SELECT * FROM ' + self.mainTableName + ' WHERE id = ?'
return self.getUsersBy(request, (ID,))[0]
def getUserByTgId(self, tg_id):
request = 'SELECT * FROM ' + self.mainTableName + ' WHERE tg_id = ?'
return self.getUsersBy(request, (tg_id,))[0]
def start(self):
self.connect()
self.createTable()
def createAndAddNewUser(self, tg_id, tg_name=None, tg_nickname=None):
tg_id = str(tg_id)
tg_name = str(tg_name)
tg_nickname = str(tg_nickname)
ID = self.getLastID() + 1
data = (ID, tg_id, tg_name, tg_nickname, 0, 0, 10)
request = 'INSERT INTO ' + self.mainTableName + \
'(id, tg_id, tg_name, tg_nickname, score, second_score, status)' + \
'VALUES(?, ?, ?, ?, ?, ?, ?)'
cursor = self.conn.cursor()
cursor.execute(request, data)
self.commit()
user = self.getUserById(ID)
return user
def _updateValue(self, ID, _valueName, newValue):
request = 'UPDATE ' + self.mainTableName + ' SET ' + _valueName + ' = ?' + \
' WHERE id = ?'
cursor = self.conn.cursor()
cursor.execute(request, (newValue, ID))
self.commit()
def updateUser(self, user):
userID = user.getID()
existingUser = self.getUserById(userID)
if existingUser is None:
return False
if existingUser.tg_name != user.tg_name:
self._updateValue(userID, 'tg_name', user.tg_name)
if existingUser.tg_nickname != user.tg_nickname:
self._updateValue(userID, 'tg_nickname', user.tg_nickname)
if existingUser.fname != user.fname:
self._updateValue(userID, 'fname', user.fname)
if existingUser.lname != user.lname:
self._updateValue(userID, 'lname', user.lname)
if existingUser.name != user.name:
self._updateValue(userID, 'name', user.name)
if existingUser.score != user.score:
self._updateValue(userID, 'score', user.score)
if existingUser.second_score != user.second_score:
self._updateValue(userID, 'second_score', user.second_score)
if existingUser.status != user.status:
self._updateValue(userID, 'status', user.status)
if existingUser.taskid_in_progress != user.taskid_in_progress:
self._updateValue(userID, 'taskid_in_progress', user.taskid_in_progress)
def updateUserStatus(self, user):
userID = user.getID()
existingUser = self.getUserById(userID)
if existingUser is None:
return False
if existingUser.status != user.status:
self._updateValue(userID, 'status', user.status)
def top10byScoreDict(self):
request = 'SELECT id, score FROM ' + self.mainTableName + ' ORDER BY score DESC LIMIT 5'
cursor = self.conn.cursor()
cursor.execute(request)
response = cursor.fetchall()
print(response)
if len(response) == 0:
return None
else:
d = {}
# id: score
for x in response:
d[x[0]] = x[1]
return d
|
<gh_stars>1-10
import torch
import torch.nn as nn
import numpy as np
import torchvision.models as models
from torch.utils.data import DataLoader
from tqdm import tqdm
import torchvision.transforms as T
import torchvision.utils as vutils
from PIL import Image
import os
from torch.utils.tensorboard import SummaryWriter
import config
from models import Generator, Discriminator, init_weights
from dataset import TrainDataset
class ContentLoss(nn.Module):
def __init__(self):
super().__init__()
self.vgg19 = models.vgg19(pretrained = True).features[:35].eval().to(config.DEVICE)
self.loss = nn.MSELoss()
for params in self.vgg19.parameters():
params.requires_grad =False
def forward(self, ip_img: torch.Tensor, target_image: torch.Tensor) -> torch.Tensor:
ip_features = self.vgg19(ip_img)
target_features = self.vgg19(target_image)
vgg_loss = self.loss(ip_features, target_features)
return vgg_loss
def gradient_penalty(disc, real, fake):
batch_size, c, h, w = real.shape
alpha = torch.rand((batch_size, 1, 1, 1)).repeat(1, c, h, w).to(config.DEVICE)
interpolated_imgs = (real * alpha) + (fake.detach() * (1 - alpha))
interpolated_imgs.requires_grad_(True)
disc_scores = disc(interpolated_imgs)
gradient = torch.autograd.grad(inputs = interpolated_imgs, outputs = disc_scores, grad_outputs = torch.ones_like(disc_scores),
create_graph = True, retain_graph = True)[0]
gradient = gradient.view(gradient.shape[0], -1)
norm = gradient.norm(2, dim = 1)
penalty = torch.mean((norm - 1) ** 2)
return penalty
def psnrTrain(loader, gen, psnr_opt, l1_loss, mse_loss,
g_scalar, psnr_writer, psnr_step):
gen.train()
total_psnr = 0.0
loop = tqdm(loader, leave = True, position = 0)
for idx, (hr, lr) in enumerate(loop):
hr = hr.to(config.DEVICE)
lr = lr.to(config.DEVICE)
psnr_opt.zero_grad()
with torch.cuda.amp.autocast():
sr = gen(lr)
loss = l1_loss(sr, hr)
total_psnr += 10 * torch.log10(1.0 / mse_loss(sr, hr))
g_scalar.scale(loss).backward()
g_scalar.step(psnr_opt)
g_scalar.update()
psnr = total_psnr/len(loader)
psnr_writer.add_scalar('PSNR Train/PSNR', psnr, psnr_step)
psnr_writer.add_scalar('PSNR Train/Pixel_loss', loss.item(), psnr_step)
psnr_step += 1
print("\nPSNR: ", psnr)
return psnr_step
def train(loader, gen, disc, g_opt, d_opt, l1_loss, content_loss,
mse_loss, g_scalar, d_scalar, writer, step):
loop = tqdm(loader, leave = True, position = 0)
total_psnr = 0.0
for idx, (hr, lr) in enumerate(loop):
gen.train()
hr = hr.to(config.DEVICE)
lr = lr.to(config.DEVICE)
with torch.cuda.amp.autocast():
fake = gen(lr)
disc_real = disc(hr)
disc_fake = disc(fake.detach())
gp = gradient_penalty(disc, hr, fake)
disc_loss = (-(torch.mean(disc_real) - torch.mean(disc_fake)) + config.LAMBDA_GP * gp)
d_opt.zero_grad()
d_scalar.scale(disc_loss).backward()
d_scalar.step(d_opt)
d_scalar.update()
with torch.cuda.amp.autocast():
pixel_loss = l1_loss(fake, hr)
total_psnr += 10 * torch.log10(1.0 / mse_loss(fake, hr))
cont_loss = content_loss(fake, hr)
gan_loss = (-torch.mean(disc(fake)))
gen_loss = 0.01 * pixel_loss + cont_loss + 0.005 * gan_loss
g_opt.zero_grad()
g_scalar.scale(gen_loss).backward()
g_scalar.step(g_opt)
g_scalar.update()
gen.eval()
psnr = total_psnr/len(loader)
writer.add_scalar('PSNR/PSNR Score', psnr, global_step = step)
writer.add_scalar("disc_loss/Discriminator loss", disc_loss.item(), global_step = step)
writer.add_scalar("Gen_losses/Generator loss", gen_loss.item(), global_step = step)
writer.add_scalar("Gen_losses/Pixel loss", pixel_loss.item(), global_step = step)
writer.add_scalar("Gen_losses/Content loss", cont_loss.item(), global_step = step)
writer.add_scalar("Gen_losses/Adversarial loss", gan_loss.item(), global_step = step)
step += 1
return step
def main():
dataset = TrainDataset(config.TRAIN_DATA_PATH)
loader = DataLoader(dataset, batch_size = config.BATCH_SIZE, shuffle = True,
pin_memory = True, num_workers = 2)
gen = Generator(in_c = 3, out_c = 64).to(config.device)
disc = Discriminator().to(config.device)
init_weights(gen)
fixed_lr = T.Compose([T.Resize((32, 32), interpolation=T.InterpolationMode.BICUBIC), T.ToTensor()])(Image.open(config.FIXED_LR_PATH)).unsqueeze(0)
fixed_lr = fixed_lr.to(config.DEVICE)
fixed_lr1 = T.Compose([T.Resize((32, 32), interpolation=T.InterpolationMode.BICUBIC), T.ToTensor()])(Image.open(config.FIXED_LR1_PATH)).unsqueeze(0)
fixed_lr1 = fixed_lr1.to(config.DEVICE)
psnr_opt = torch.optim.Adam(gen.parameters(), lr = config.PSNR_OPT_LR, betas = (0.9, 0.999))
psnr_writer = SummaryWriter(config.PSNR_LOGS_PATH)
psnr_step = 0
writer = SummaryWriter(config.GAN_LOGS_PATH)
step = 0
g_opt = torch.optim.Adam(gen.parameters(), lr = config.LR, betas = (0.9, 0.999))
d_opt = torch.optim.Adam(disc.parameters(), lr = config.LR, betas = (0.9, 0.999))
g_scalar = torch.cuda.amp.GradScaler()
d_scalar = torch.cuda.amp.GradScaler()
l1_loss = nn.L1Loss()
content_loss = ContentLoss()
mse_loss = nn.MSELoss()
gen.train()
disc.train()
print("Starting PSNR Training...")
#Psnr Training
for epoch in range(100):
print(f"Epoch: {epoch} \n")
psnr_step = psnrTrain(loader, gen, psnr_opt, l1_loss, mse_loss, g_scalar, psnr_writer, psnr_step)
torch.save(gen.state_dict(), config.PSNR_GEN_PATH)
gen.eval()
with torch.no_grad():
sr = gen(fixed_lr)
sr1 = gen(fixed_lr1)
psnr_writer.add_image('PSNR/real', fixed_lr.squeeze(0).cpu(), global_step = psnr_step)
psnr_writer.add_image('PSNR/fake', sr.squeeze(0).cpu(), global_step = psnr_step)
psnr_writer.add_image('PSNR/real1', fixed_lr1.squeeze(0).cpu(), global_step = psnr_step)
psnr_writer.add_image('PSNR/fake1', sr1.squeeze(0).cpu(), global_step = psnr_step)
gen.load_state_dict(torch.load(config.PSNR_GEN_PATH))
#GAN Training
print("Starting GAN Training...")
for epoch in range(config.NUM_EPOCHS):
print(f"Epoch: {epoch} \n")
step = train(loader, gen, disc, g_opt, d_opt, l1_loss, content_loss,
mse_loss, g_scalar, d_scalar, writer, step)
with torch.no_grad():
gen_img = gen(fixed_lr)
gen_img1 = gen(fixed_lr1)
vutils.save_image(gen_img.detach(), open(config.SAVE_IMG1_PATH + f'img{epoch}.png', 'wb'), normalize = True)
vutils.save_image(gen_img1.detach(), open(config.SAVE_IMG2_PATH + f'img{epoch}.png', 'wb'), normalize = True)
gen.train()
writer.add_image("Real/Image1", fixed_lr.squeeze(0), global_step = step)
writer.add_image("Real/Image2", fixed_lr1.squeeze(0), global_step = step)
writer.add_image("Generated Images/image1", gen_img.squeeze(0), global_step = step)
writer.add_image("Generated Images/image2", gen_img1.squeeze(0), global_step = step)
torch.save(gen.state_dict(), config.GEN_PATH)
torch.save(disc.state_dict(), config.DISC_PATH)
if __name__ == '__main__':
main() |
<filename>main.py
import os
import sys
import time
import colorama
import pyfiglet
from colorama import Fore
import json
import subprocess
os.system(' clear ')
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
curdir = os.getcwd()
def load_animation():
load_str = "Loading DBROWSER."
ls_len = len(load_str)
time.sleep(1)
animation = "|/-\\|/-\|/-/"
anicount = 0
counttime = 0
i = 0
while (counttime != 30):
time.sleep(0.075)
load_str_list = list(load_str)
x = ord(load_str_list[i])
y = 0
if x != 32 and x != 46:
if x>90:
y = x-32
else:
y = x + 32
load_str_list[i]= chr(y)
res =''
for j in range(ls_len):
res = res + load_str_list[j]
sys.stdout.write("\r"+res + animation[anicount])
sys.stdout.flush()
load_str = res
anicount = (anicount + 1)% 4
i =(i + 1)% ls_len
counttime = counttime + 1
else:
os.system("clear")
# Driver program
if __name__ == '__main__':
load_animation()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
curdir = os.getcwd()
def screen_clear():
if name == 'nt':
_ = system('cls')
def CS(X): # CS = clear sleep
time.sleep(X)
os.system("clear")
print(Fore.RED+"")
banner = pyfiglet.figlet_format("DBROW", font = "isometric1" )
CS(2)
print(banner)
print(Fore.RED+" WelC0me to DBROW the hackers browser ")
print(" V2.0")
print(" [1] Just browse the net ")
print(" [2] go to my index for my website ")
print(" [3] just browse google ")
print(" [4] view supported websites ")
print(" [5] see whats new for vserions 2.0 ")
print("--------------------------------------------------------------------------------------------")
# run.py = index
# rung = run google
# run1 = run duckduckgo
N = str(input(" Options ===> "))
####### BROWSE DUCKDUCKGO ###########
if '5' == N:
CS(2)
print(banner)
print("-"*40)
time.sleep(1)
print(" fixxed web bugs and index bugs ")
time.sleep(1)
print(" added netsniff-ng as a packte monitor ")
time.sleep(1)
print(" added bash scripts for setup ")
time.sleep(1)
print(" added automation ")
time.sleep(1)
print(" added json files for easier loads and time ")
time.sleep(1)
print(" added newer websites that are supported for the browser itself ")
time.sleep(1)
print(" updated CSS ")
time.sleep(1)
print(" added bash script that checks for required packages ")
time.sleep(1)
print(" added new terminal for gnome where it opens a new term for netsniff ")
time.sleep(1)
print(" added termcolor ")
time.sleep(1)
print(" added more proxies into the browser ")
time.sleep(1)
print(" added a few more lines and took away input for if proxychains is installed ")
time.sleep(1)
print(" [!] stay tunned for further updates on the browser [!] ")
restart_program()
if '4' == N:
F = open('links.json','r+',encoding='utf-8') # open encoding
data = json.load(F) #load the file
for x in data['prints']:
print(x) # print value x in this case the fi;e
time.sleep(0.1)
restart_program()
if '1' == N:
time.sleep(1)
print(" [!] running Dark browser [!] ")
time.sleep(3)
Yn = str(input(" would you like to run proxys along side Netsniff-ng Y/n? "))
time.sleep(1)
if 'n' in Yn:
time.sleep(1)
print(" alright then running browser ")
CS(2)
print(banner)
os.system(' sudo python3 run1.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
print(" would you like to view the cap file from netsniff? ")
V = str(input(" Y/n: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' clear' )
os.system(' sudo wireshark pack.pcap ')
print(" Have a ncie one ")
sys.exit()
if 'n' in V:
CS(2)
print(" have a nice one :D [!] ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
if 'Y' == Yn:
time.sleep(1)
print(" [=] alright then running browser with proxychains and tor service ")
CS(2)
print(banner)
os.system(" chmod +x ./newterm.sh && ./newterm.sh ")
os.system(' sudo service tor start && proxychains python3 run1.py')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
print(" would you like to view the cap file from netsniff? ")
V = str(input(" Y/n: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' clear' )
os.system(' sudo wireshark pack.pcap ')
print(" Have a ncie one ")
sys.exit()
if 'n' in V:
CS(2)
print(" have a nice one :D [!] ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
##############################################
###########BROWSE GOOGLE
elif '3' == N:
time.sleep(1)
Yn = str(input(" Would you like to use proxies Y/n? "))
if 'Y' in Yn:
CS(2)
time.sleep(1)
print(banner)
print(" [=] alright then running browser [=] ")
os.system(" chmod +x ./newterm.sh && ./newterm.sh ")
os.system(' sudo service tor start && proxychains python3 rung.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
CS(2)
print(" would you like to view the cap file from netsniff? ")
V = str(input(" Y/n: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' clear' )
os.system(' sudo wireshark pack.pcap ')
print(" Have a ncie one ")
sys.exit()
if 'n' in V:
CS(2)
print(" have a nice one :D [!] ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
if 'n' in Yn:
CS(2)
time.sleep(1)
print(banner)
os.system(' python3 rung.py ')
os.system(' clear ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
sys.exit()
###################BROWSE MTY INDEX################
elif '2' == N:
time.sleep(1)
Yn = str(input(" Would you like to use proxies Y/n? "))
print(" [!] running Dark browser [!] ")
if 'Y' in Yn:
CS(2)
print(banner)
print(" [=] alright then running script [=] ")
time.sleep(1)
os.system(" chmod +x ./newterm.sh && ./newterm.sh ")
os.system(' sudo service tor start && proxychains python3 run.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
CS(2)
print(" would you like to view the cap file from netsniff? ")
V = str(input(" You: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" Thanks for stopping by :D [+] ")
sys.exit()
elif 'n' in V:
time.sleep(1)
print(" have a nice one :D [+] ")
sys.exit()
if 'n' == Yn:
CS(2)
print(" [=] alright then running script [=] ")
print(banner)
time.sleep(1)
os.system(' python3 run.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
else:
print(" [!] that doesnt seem to be a command ")
restart_program()
|
"""
Intro
-----
- For general context and class diagram, refer to :mod:`~sanskrit_data.schema`.
"""
import logging
import sys
from sanskrit_data.schema import common
from sanskrit_data.schema.common import JsonObject, recursively_merge_json_schemas, TYPE_FIELD, update_json_class_index
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
class UserPermission(JsonObject):
schema = recursively_merge_json_schemas(
JsonObject.schema, {
"properties": {
TYPE_FIELD: {
"enum": ["UserPermission"]
},
"service": {
"type": "string",
"description": "Allowable values should be predetermined regular expressions."
},
"actions": {
"type": "array",
"items": {
"type": "string",
"enum": ["read", "write", "admin"],
},
"description": "Should be an enum in the future."
},
},
}
)
@classmethod
def from_details(cls, service, actions):
obj = UserPermission()
obj.service = service
obj.actions = actions
return obj
def hash_password(plain_password):
import bcrypt
# (Using bcrypt, the salt is saved into the hash itself)
return bcrypt.hashpw(plain_password.encode(encoding='utf8'), bcrypt.gensalt()).decode(encoding='utf8')
class AuthenticationInfo(JsonObject):
schema = recursively_merge_json_schemas(
JsonObject.schema, {
"properties": {
TYPE_FIELD: {
"enum": ["AuthenticationInfo"]
},
"auth_user_id": {
"type": "string"
},
"auth_provider": {
"type": "string",
"enum": ["google", "vedavaapi"]
},
"auth_secret_bcrypt": {
"type": "string",
"description": "This should be hashed, and merits being stored in a database."
},
"auth_secret_plain": {
"type": "string",
"description": "This should NEVER be set when stored in a database; but is good for client-server transmission purposes."
}
}
}
)
VEDAVAAPI_AUTH = "vedavaapi"
def __str__(self):
return self.auth_provider + "____" + self.auth_user_id
def check_password(self, plain_password):
# Check hased password. Using bcrypt, the salt is saved into the hash itself
import bcrypt
return bcrypt.checkpw(plain_password.encode(encoding='utf8'), self.auth_secret_bcrypt.encode(encoding='utf8'))
@classmethod
def from_details(cls, auth_user_id, auth_provider, auth_secret_hashed=None):
obj = AuthenticationInfo()
obj.auth_user_id = auth_user_id
obj.auth_provider = auth_provider
if auth_secret_hashed:
obj.auth_secret_hashed = auth_secret_hashed
return obj
def set_bcrypt_password(self):
if getattr(self, "auth_secret_plain", None) is not None and self.auth_secret_plain != "":
# noinspection PyAttributeOutsideInit
self.auth_secret_bcrypt = hash_password(plain_password=self.auth_secret_plain)
delattr(self, "auth_secret_plain")
def validate_schema(self):
super(AuthenticationInfo, self).validate_schema()
from jsonschema import ValidationError
self.set_bcrypt_password()
if getattr(self, "auth_secret_hashed", None) is not None and (self.auth_secret_hashed == ""):
raise ValidationError(message="auth_secret_hashed should be non-empty if present.")
class User(JsonObject):
"""Represents a user of our service."""
schema = recursively_merge_json_schemas(
JsonObject.schema, {
"properties": {
TYPE_FIELD: {
"enum": ["User"]
},
"user_type": {
"type": "string",
"enum": ["human", "bot"]
},
"authentication_infos": {
"type": "array",
"items": AuthenticationInfo.schema,
},
"permissions": {
"type": "array",
"items": UserPermission.schema,
},
},
}
)
@classmethod
def from_details(cls, user_type, auth_infos, permissions=None):
obj = User()
obj.authentication_infos = auth_infos
obj.user_type = user_type
if permissions:
obj.permissions = permissions
return obj
def validate_schema(self):
super(User, self).validate_schema()
def check_permission(self, service, action):
def fullmatch(pattern, string, flags=0):
"""Emulate python-3.4 re.fullmatch()."""
import re
return re.match("(?:" + pattern + r")\Z", string, flags=flags)
if getattr(self, "permissions", None) is not None:
for permission in self.permissions:
if fullmatch(pattern=permission.service, string=service):
for permitted_action in permission.actions:
if fullmatch(pattern=permitted_action, string=action):
return True
return False
def is_admin(self, service):
return self.check_permission(service=service, action="admin")
def is_human(self):
return getattr(self, "user_type", None) is not None and self.user_type == "human"
def get_user_ids(self):
return [str(auth_info) for auth_info in self.authentication_infos]
def get_first_user_id_or_none(self):
user_ids = self.get_user_ids()
if len(user_ids) > 0:
return user_ids[0]
else:
return None
# Essential for depickling to work.
update_json_class_index(sys.modules[__name__])
logging.debug(common.json_class_index)
|
import praw
import urllib.request
import requests
import os
import sys
from tqdm import tqdm
import argparse
from datetime import datetime
# program usage: py main.py [-u] [user/subreddit] [sort category] [# posts] ['directory']
def download(posts, num, folder):
inter = False
try:
for submission in tqdm(posts, total=num):
index = submission.rfind('/')
url = submission[index + 1:]
if submission.endswith('.gifv'):
submission = submission.replace('.gifv', '.mp4')
url = url.replace('.gifv', '.mp4')
try:
urllib.request.urlretrieve(submission, url)
except:
print('Malfunction downloading image. Process aborted.')
inter = True
sys.exit()
except KeyboardInterrupt:
print('Program terminated')
sys.exit()
except:
if inter:
sys.exit()
os.chdir('..')
os.rmdir(folder)
def create_folder(user, location, directory):
try:
os.chdir(directory)
except OSError:
print('Error in directory path. Default to current directory.')
if user:
path = str(datetime.today().strftime('%Y-%m-%d')) + ' user-' + str(location)
else:
path = str(datetime.today().strftime('%Y-%m-%d')) + ' subreddit-' + str(location)
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
else:
os.chdir(path)
return path
def get_posts(reddit, user, location, category):
if user:
if category == 'hot':
post = reddit.redditor(str(location)).submissions.hot(limit=1000)
elif category == 'new':
post = reddit.redditor(str(location)).submissions.new(limit=1000)
else:
post = reddit.redditor(str(location)).submissions.top(limit=1000)
else:
if category == 'hot':
post = reddit.subreddit(str(location)).hot(limit=1000)
elif category == 'new':
post = reddit.subreddit(str(location)).new(limit=1000)
else:
post = reddit.subreddit(str(location)).top(limit=1000)
return post
def validate(posts, num_posts):
formats = ('.jpg', '.png', '.gif', '.gifv')
sites = []
cnt = 0
try:
for post in posts:
if cnt == int(num_posts):
break
if post.url.endswith(formats):
sites.append(post.url)
cnt += 1
except KeyboardInterrupt:
print('Program Terminated')
sys.exit()
except:
print('User/Subreddit does not exist')
sys.exit()
return sites
def main():
owd = os.getcwd()
parser = argparse.ArgumentParser(description='Download pics')
parser.add_argument('-u', '--user', action='store_true', help='Indicates user profile')
parser.add_argument('location', help='Download location')
parser.add_argument('category', choices=['hot', 'new', 'top'], help='Sort category')
parser.add_argument('posts', type=int, help='Number of posts to download')
parser.add_argument('directory', nargs='?', default='.', help='Directory location')
args = parser.parse_args()
reddit = praw.Reddit('redditDL')
posts = get_posts(reddit, args.user, args.location, args.category)
valid_links = validate(posts, args.posts)
folder = create_folder(args.user, args.location, args.directory)
download(valid_links, args.posts, folder)
os.chdir(owd)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Program terminated')
quit()
|
<filename>pdftools.py
import subprocess
import os
import math
from queue import Queue
from pathlib import Path
from PIL import Image
def locate_mutool():
"""
Checks for the presence of mutool and if found, returns the Path
object for it. If not it returns False.
"""
f1, f2 = Path('./mutool'), Path('./mutool.exe')
if f1.exists():
return f1
elif f2.exists():
return f2
return False
def ensure_output_path(outputPath):
"""
TODO: Write documentation
"""
if not outputPath.exists():
outputPath.mkdir(parents=True)
def clean_output_dir(outputPath):
"""
TODO: Write documentation
"""
files = os.listdir(outputPath)
for name in files:
f = outputPath / name
if not f.is_file():
continue
f.unlink()
def get_pages(outputPath, spaces=4, fileType='png', useStoredImages=False):
"""
TODO: Write documentation
"""
pages = []
def _addPage(slide, notes, pacing):
nonlocal pages
nonlocal useStoredImages
if not useStoredImages:
slide = Image.open(slide)
slide.load()
if type(notes) == str:
notes = Image.open(notes)
notes.load()
pages.append({
'slide': slide,
'notes': notes,
'pacing': pacing
})
files = os.listdir(outputPath)
files.sort()
for name in files:
f = outputPath / name
if (
not f.is_file()
or f.suffix != f'.{fileType}'
or f.stem.endswith('-2')
):
continue
pacing = 120
slide = str(f)
notes = None
if f.stem.endswith('-1'):
notes = slide.replace('-1', '-2')
_addPage(slide, notes, pacing)
return pages
def convert_pdf_to_images(
inputPath, outputPath, spaces=4, fileType='png', dpi=300
):
"""
TODO: Write documentation
"""
subprocess.run([
'mutool', 'draw',
'-o', str(outputPath / f'%0{spaces}d.{fileType}'),
'-r', f'{dpi}',
str(inputPath)
], stderr=subprocess.PIPE)
def prepare_images(outputPath, fileType='png', splitInHalf=True):
"""
TODO: Write documentation
"""
if not splitInHalf:
return
files = os.listdir(outputPath)
for name in files:
f = outputPath / name
if not f.is_file() or f.suffix != f'.{fileType}':
continue
try:
im = Image.open(f)
if not splitInHalf:
continue
elif f.stem.endswith('-1') or f.stem.endswith('-2'):
# Images have already been split and are stored as files
continue
x1 = 0
y1 = 0
w = im.width
h = im.height
if w > h:
x2 = math.floor(w / 2) - 1
y2 = h
else:
x2 = w
y2 = math.floor(h / 2) - 1
box = (x1, y1, x2, y2)
part1 = im.crop(box)
if w > h:
x1 = x2 + 1
x2 = w
else:
y1 = y2 + 1
y2 = h
box = (x1, y1, x2, y2)
part2 = im.crop(box)
part1.save(outputPath.joinpath(f.stem + '-1' + f.suffix))
part2.save(outputPath.joinpath(f.stem + '-2' + f.suffix))
f.unlink()
except IOError:
print(f'Something went wrong while processing {f}')
|
# Create a hash table with add/delete/lookup methods
class HashMap:
"""
Hash map inits with a list of a given size (m);
Keywords: map_size (default = 17)
"""
def __init__(self, map_size=17):
self.map_size = map_size
self.hash_map = self.create_map(map_size)
def create_map(self, map_size):
"""
Creates list "map" of given map_size
arguments: map_size
returns: list
"""
map = []
for i in range(map_size):
map.append(None)
return map
def hash(self, key):
"""
For a given key, hashes the key into an int value
Keywords: key
returns: int
"""
ascii_sum = 0
if type(key) == int:
ascii_sum += ord(chr(key))
else:
for item in key:
ascii_sum += ord(item)
return ascii_sum
def get_index(self, key):
"""
For a given key, invokes hash() and returns an index value n-1
where n is the size of the Hash Map
keywords: key
returns: int
"""
return self.hash(key) % self.map_size
def insert(self, key, value):
"""
For given (key, value), inserts (key, value) tuple into map.
If no item exists, (key, value) replaces None
If a collision occurs, creates list of tuples and appends list
as needed
returns: None
"""
index = self.get_index(key)
current_value = self.hash_map[index]
if current_value is None:
self.hash_map[index] = (key, value)
elif isinstance(current_value, tuple):
# if not list, but current value, then must be single tuple
if current_value[0] == key:
# If keys and value are the same, break
if current_value[1] == value:
pass
else: # if just keys are the same, update value to new value
self.hash_map[index] = (key, value)
else: # turn tuple into a list of tuples
self.hash_map[index] = [current_value, (key, value)]
else: # if current value already exists as a list
for i in range(len(current_value)):
# if (key, value) already exist, pass
if current_value[i][0] == key and current_value[i][1] == value:
break
# if key already exists, but new value, update (key, value)
elif current_value[i][0] == key:
self.hash_map[index][i] = (key, value)
break
else:
pass
else: # else, append list with new (key, value)
self.hash_map[index].append((key, value))
def remove(self, key):
index = self.get_index(key)
delete_value = self.hash_map[index]
if delete_value is None: # if default_value is None
raise KeyError('No key: {}'.format(key))
elif isinstance(delete_value, list):
for i in range(len(delete_value)):
if delete_value[i][0] == key:
del(self.hash_map[index][i])
break
else:
raise KeyError('No key: {}'.format(key))
else: # if tuple
if self.hash_map[index][0] == key:
self.hash_map[index] = None
else:
raise KeyError('No key: {}'.format(key))
if not delete_value:
self.hash_map[index] = None
def lookup(self, key):
index = self.get_index(key)
lookup_value = self.hash_map[index]
if lookup_value is None:
raise KeyError('No key: {}'.format(key))
elif isinstance(lookup_value, list):
for each in lookup_value:
if each[0] == key:
return each[1]
else:
raise KeyError('No key: {}'.format(key))
else:
if key == lookup_value[0]:
return lookup_value[1]
else:
raise KeyError('No key: {}'.format(key))
def __str__(self):
def print_bucket_summary():
return "Hashmap with {} buckets.\n\n".format(len(self.hash_map))
def print_map():
string = ""
for i in range(len(self.hash_map)):
string += "{0} | {1}\n".format(i, self.hash_map[i])
return string
return print_bucket_summary() + print_map()
def __repr__(self):
return str(self.hash_map)
def __len__(self):
return len(self.hash_map)
def __getitem__(self, position):
return self.hash_map[position]
|
<gh_stars>0
import json
import numpy as np
class AbstractBenchmark:
"""
Abstract template for benchmark classes
"""
def __init__(self, config_path=None):
"""
Initialize benchmark class
Parameters
-------
config_path : str
Path to load configuration from (if read from file)
"""
if config_path:
self.config_path = config_path
self.read_config_file(self.config_path)
else:
self.config = None
def get_config(self):
"""
Return current configuration
Returns
-------
dict
Current config
"""
return self.config
def save_config(self, path):
"""
Save configuration to .json
Parameters
----------
path : str
File to save config to
"""
conf = self.config.copy()
if "observation_space_type" in self.config:
conf["observation_space_type"] = f"{self.config['observation_space_type']}"
for k in self.config.keys():
if isinstance(self.config[k], np.ndarray) or isinstance(
self.config[k], list
):
if type(self.config[k][0]) == np.ndarray:
conf[k] = list(map(list, conf[k]))
for i in range(len(conf[k])):
if (
not type(conf[k][i][0]) == float
and np.inf not in conf[k][i]
and -np.inf not in conf[k][i]
):
conf[k][i] = list(map(int, conf[k][i]))
with open(path, "w") as fp:
json.dump(conf, fp)
def read_config_file(self, path):
"""
Read configuration from file
Parameters
----------
path : str
Path to config file
"""
with open(path, "r") as fp:
self.config = objdict(json.load(fp))
if "observation_space_type" in self.config:
# Types have to be numpy dtype (for gym spaces)s
if type(self.config["observation_space_type"]) == str:
typestring = self.config["observation_space_type"].split(" ")[1][:-2]
typestring = typestring.split(".")[1]
self.config["observation_space_type"] = getattr(np, typestring)
for k in self.config.keys():
if type(self.config[k]) == list:
if type(self.config[k][0]) == list:
map(np.array, self.config[k])
self.config[k] = np.array(self.config[k])
def get_environment(self):
"""
Make benchmark environment
Returns
-------
env : gym.Env
Benchmark environment
"""
raise NotImplementedError
def set_seed(self, seed):
"""
Set environment seed
Parameters
----------
seed : int
New seed
"""
self.config["seed"] = seed
def set_action_space(self, kind, args):
"""
Change action space
Parameters
----------
kind : str
Name of action space class
args: list
List of arguments to pass to action space class
"""
self.config["action_space"] = kind
self.config["action_space_args"] = args
def set_observation_space(self, kind, args, data_type):
"""
Change observation_space
Parameters
----------
config : str
Name of observation space class
args : list
List of arguments to pass to observation space class
data_type : type
Data type of observation space
"""
self.config["observation_space"] = kind
self.config["observation_space_args"] = args
self.config["observation_space_type"] = data_type
# This code is taken from https://goodcode.io/articles/python-dict-object/
class objdict(dict):
"""
Modified dict to make config changes more flexible
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
|
# -*- coding: utf-8 -*-
"""A Python library that understands the TUIO protocol"""
__author__ = "<NAME>, <NAME>"
__version__ = "0.1"
__copyright__ = "Copyright (c) 2007-2008 <NAME>, <NAME>"
__license__ = "MIT"
__url__ = "http://code.google.com/p/pytuio/"
import os
import sys
import math
import socket
import inspect
import OSC
import profiles
import observer
class CallbackError(Exception):
pass
class Tracking(object):
#implements Tracking as a SingleTon
def __new__(cls, *args, **kwargs):
if not hasattr(cls, 'self'):
cls.self = object.__new__(cls)
return cls.self
def __init__(self, host='127.0.0.1', port=3333):
self.host = host
self.port = port
self.current_frame = 0
self.last_frame = 0
self.open_socket()
self.manager = OSC.CallbackManager()
self.profiles = self.load_profiles()
self.eventManager = observer.EventManager()
def open_socket(self):
"""
Opens the socket and binds to the given host and port. Uses
SO_REUSEPORT to be as robust as possible.
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind((self.host, self.port))
start = open_socket
def close_socket(self):
"""
Closes the socket connection
"""
self.socket.close()
stop = close_socket
def refreshed(self):
"""
Returns True if there was a new frame
"""
return self.current_frame >= self.last_frame
def load_profiles(self):
"""
Loads all possible TUIO profiles and returns a dictionary with the
profile addresses as keys and an instance of a profile as the value
"""
_profiles = {}
for name, klass in inspect.getmembers(profiles):
if inspect.isclass(klass) and name.endswith('Profile') and name != 'TuioProfile':
# Adding profile to the self.profiles dictionary
profile = klass()
_profiles[profile.address] = profile
# setting convenient variable to access objects of profile
try:
setattr(self, profile.list_label, profile.objs)
except AttributeError:
continue
# Mapping callback method to every profile
self.manager.add(self.callback, profile.address)
return _profiles
def get_profile(self, profile):
"""Returns a specific profile from the profile list and otherwise None"""
return self.profiles.get(profile, None)
def get_helpers(self):
"""Returns a list of helper functions that provide access to the
objects of each profile."""
return list([profile.list_label for profile in self.profiles.values()])
def update(self):
"""
Tells the connection manager to receive the next 1024 byte of messages
to analyze.
"""
try:
self.manager.handle(self.socket.recv(1024))
except socket.error:
pass
def callback(self, *incoming):
"""
Gets called by the CallbackManager if a new message was received
"""
message = incoming[0]
if message:
address, command = message[0], message[2]
profile = self.get_profile(address)
if profile is not None:
try:
getattr(profile, command)(self, message)
if command == 'set' or command == 'add':
if str(type(profile)) == "<class 'tuio.profiles.Tuio2DcurProfile'>":
evt = observer.CursorEvent(profile, message)
else:
evt = observer.ObjectEvent(profile,message)
self.eventManager.notify_listeners(evt)
except AttributeError:
pass
#Instantiate the unique Tracking Class
tracking = Tracking()
#An infinite loop to keep updating the tracker
def mainLoop():
try:
while 1:
tracking.update()
except KeyboardInterrupt:
tracking.stop()
def getEventManager():
return tracking.eventManager
|
"""The script scrapes https://flagma.ua/ site. It collects contact data for
each company from the specified category. The results are saved to a CSV file.
The scraping process requires dynamic IP change, for the site has anti-scrape
protection (IP ban). Therefore the script uses free TOR proxy. In order to
make things working the TOR Windows Expert Bundle should be downloaded and
installed from here:
https://www.torproject.org/download/tor/
And then the constant TOR_EXECUTABLE_PATH in ./utils/tor_proxy.py should be
modified accordingly.
"""
import base64
import logging
import re
import os
from bs4 import BeautifulSoup
from utils.http_request import HttpRequest, PROXY_TYPE_FREE, PROXY_TYPE_TOR
from utils.scraping_utils import (
FATAL_ERROR_STR,
setup_logging,
fix_filename,
remove_umlauts,
clean_text,
save_last_page,
load_last_page,
save_items_csv,
load_items_csv,
save_items_json,
load_items_json,
)
CSV_FILENAME = 'items.csv'
JSON_FILENAME = 'items.json'
COLUMNS = [
'name',
'phones',
'link',
]
BASE_URL = 'https://flagma.ua/companies/remont-pk-i-orgtehniki-kompanii/'
PAGE_URL = BASE_URL + 'page-{}/'
ENCODED_HTML_RE = re.compile(
r'var str = "(.+)"; \$\(this\)\.parent\(\)\.html\("<em>"'
+ r'\+Base64\.decode\(str\)\+"</em>"\);'
)
PHONE_RE = re.compile(r'tel:(.+)')
setup_logging()
request = HttpRequest(proxies=PROXY_TYPE_TOR)
def get_html(url: str) -> str:
r = request.get(url)
if r == None:
return None
return r.text
def scrape_item(url: str) -> dict:
html = get_html(url)
if not html:
return None
item = {
'name': '',
'phones': '',
'link': url,
}
try:
item['name'] = clean_text(
BeautifulSoup(html, 'lxml').find('h1', itemprop='name').get_text())
except AttributeError:
logging.exception('Error while parsing company name.')
return None
phones = []
matches = re.findall(ENCODED_HTML_RE, clean_text(html))
if len(matches) > 0:
soup = BeautifulSoup(str(base64.b64decode(matches[0]), 'utf-8'),
'lxml')
for phone_link in soup.find_all('a', class_='tel'):
phone_matches = re.findall(PHONE_RE, phone_link.get('href', ''))
if len(phone_matches) > 0:
phones.append(phone_matches[0])
item['phones'] = '; '.join(phones)
return item
def get_page_count() -> int:
html = get_html(BASE_URL)
if not html:
return None
try:
page_count = int(
BeautifulSoup(html, 'lxml')
.find('li', class_='page notactive').span.get_text())
except (AttributeError, ValueError):
logging.exception('Error while parsing page count.')
return None
return page_count
# First page index is 1 (not 0), last page index is page count
def get_item_links(page: int) -> list:
html = get_html(PAGE_URL.format(page))
if not html:
return None
links = []
try:
item_header_divs = BeautifulSoup(html, 'lxml').find_all(
'div', class_='page-list-item-header')
for item_header_div in item_header_divs:
links.append(item_header_div.div.a['href'])
except (AttributeError, KeyError):
logging.exception('Error while parsing item links.')
return None
return links
def item_is_scraped(items: list, link: str) -> bool:
for item in items:
if item['link'] == link:
logging.info(f'The item {link} is already scraped. Skipping.')
return True
return False
# The items parameter may contain previous scraping result
def scrape_page_items(items: list = [], page: int = 1) -> list:
logging.info(f'Scraping items for page {page}.')
links = get_item_links(page)
if links == None:
return items
for link in links:
if item_is_scraped(items, link):
continue
item = scrape_item(link)
if item != None:
items.append(item)
if save_items_json(items, JSON_FILENAME):
result = 'OK'
else:
result = 'FAILURE'
logging.info(f'Saving intermediate results for page {page}: {result}.')
return items
def scrape_all_items() -> list:
# Anti-bot protection workaround
request.rotate_proxy()
page_count = get_page_count()
if page_count == None:
return None
logging.info(f'Total page count: {page_count}.')
if os.path.exists(JSON_FILENAME):
logging.info('Loading previous scraping result.')
items = load_items_json(JSON_FILENAME)
else:
items = []
for page in range(1, page_count + 1):
# Anti-bot protection workaround
request.rotate_proxy()
items = scrape_page_items(items, page)
return items
def main():
logging.info('Starting scraping process.')
items = scrape_all_items()
if items == None:
logging.error(FATAL_ERROR_STR)
return
logging.info('Scraping process complete. Now saving the results.')
if not save_items_csv(items, COLUMNS, CSV_FILENAME):
logging.error(FATAL_ERROR_STR)
return
logging.info('Saving complete.')
if __name__ == '__main__':
main()
|
<gh_stars>10-100
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run evaluation of VCMR or infenrece of TVR for submission
"""
import argparse
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
import numpy as np
from tqdm import tqdm
import pprint
from apex import amp
from horovod import torch as hvd
from data import (VcmrFullEvalDataset, vcmr_full_eval_collate,
VcmrVideoOnlyFullEvalDataset,
PrefetchLoader, QueryTokLmdb,
video_collate)
from load_data import (
get_video_ids, load_video_sub_dataset,
load_video_only_dataset)
from data.loader import move_to_cuda
from model.vcmr import HeroForVcmr
from utils.logger import LOGGER
from utils.const import VFEAT_DIM, VCMR_IOU_THDS
from utils.tvr_standalone_eval import eval_retrieval
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.basic_utils import (
load_json, save_json)
from utils.tvr_eval_utils import (
find_max_triples_from_upper_triangle_product,
generate_min_max_length_mask,
get_submission_top_n, post_processing_vcmr_nms,
post_processing_svmr_nms)
import pdb
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if hvd.rank() != 0:
LOGGER.disabled = True
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = Struct(load_json(hps_file))
model_config = f'{opts.output_dir}/log/model_config.json'
# load DBs and image dirs
video_ids = get_video_ids(opts.query_txt_db)
if opts.task != "didemo_video_only":
video_db = load_video_sub_dataset(
opts.vfeat_db, opts.sub_txt_db, model_opts.vfeat_interval,
model_opts)
else:
txt_meta = load_json(
os.path.join(opts.query_txt_db, "meta.json"))
video_db = load_video_only_dataset(
opts.vfeat_db, txt_meta,
model_opts.vfeat_interval,
model_opts)
assert opts.split in opts.query_txt_db
q_txt_db = QueryTokLmdb(opts.query_txt_db, -1)
if opts.task != "didemo_video_only":
inf_dataset = VcmrFullEvalDataset
else:
inf_dataset = VcmrVideoOnlyFullEvalDataset
eval_dataset = inf_dataset(
video_ids, video_db, q_txt_db,
distributed=model_opts.distributed_eval)
# Prepare model
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_step_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
img_pos_embed_weight_key = (
"v_encoder.f_encoder.img_embeddings" +
".position_embeddings.weight")
assert img_pos_embed_weight_key in checkpoint
max_frm_seq_len = len(checkpoint[img_pos_embed_weight_key])
model = HeroForVcmr.from_pretrained(
model_config,
state_dict=checkpoint,
vfeat_dim=VFEAT_DIM,
max_frm_seq_len=max_frm_seq_len,
lw_neg_ctx=model_opts.lw_neg_ctx,
lw_neg_q=model_opts.lw_neg_q, lw_st_ed=0,
ranking_loss_type=model_opts.ranking_loss_type,
use_hard_negative=False,
hard_pool_size=model_opts.hard_pool_size,
margin=model_opts.margin,
use_all_neg=model_opts.use_all_neg,
drop_svmr_prob=model_opts.drop_svmr_prob)
model.to(device)
if opts.fp16:
model = amp.initialize(model, enabled=opts.fp16, opt_level='O2')
eval_dataloader = DataLoader(eval_dataset, batch_size=opts.batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vcmr_full_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
_, results = validate_full_vcmr(
model, eval_dataloader, opts.split, opts, model_opts)
result_dir = f'{opts.output_dir}/results_{opts.split}'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
all_results_list = all_gather_list(results)
if hvd.rank() == 0:
all_results = {"video2idx": all_results_list[0]["video2idx"]}
for rank_id in range(hvd.size()):
for key, val in all_results_list[rank_id].items():
if key == "video2idx":
continue
if key not in all_results:
all_results[key] = []
all_results[key].extend(all_results_list[rank_id][key])
LOGGER.info('All results joined......')
save_json(
all_results,
f'{result_dir}/results_{opts.checkpoint}_all.json')
LOGGER.info('All results written......')
@torch.no_grad()
def validate_full_vcmr(model, val_loader, split, opts, model_opts):
n = 27 # total proposal n
nof1 = 13 # first layer proposal = nof1 - 1
nof2 = 11 # second layer proposal
nof3 = 9
nof4 = 7
LOGGER.info("start running full VCMR evaluation"
f"on {opts.task} {split} split...")
model.eval()
n_ex = 0
st = time()
val_log = {}
has_gt_target = True
val_vid2idx = val_loader.dataset.vid2idx
if split in val_vid2idx:
video2idx_global = val_vid2idx[split]
else:
video2idx_global = val_vid2idx
video_ids = sorted(list(video2idx_global.keys()))
video2idx_local = {e: i for i, e in enumerate(video_ids)}
query_data = val_loader.dataset.query_data
partial_query_data = []
total_frame_embeddings = None
video_batch, video_idx = [], []
max_clip_len = 0
for video_i, (vid, vidx) in tqdm(enumerate(video2idx_local.items()),
desc="Computing Video Embeddings",
total=len(video2idx_local)):
video_item = val_loader.dataset.video_db[vid]
video_batch.append(video_item)
video_idx.append(vidx)
if len(video_batch) == opts.vcmr_eval_video_batch_size or\
video_i == len(video2idx_local) - 1:
video_batch = move_to_cuda(video_collate(video_batch))
# Safeguard fp16
for k, item in video_batch.items():
if isinstance(item, torch.Tensor) and\
item.dtype == torch.float32:
video_batch[k] = video_batch[k].to(
dtype=next(model.parameters()).dtype)
curr_frame_embeddings,_ = model.v_encoder(video_batch, 'repr',train=False)
curr_c_attn_masks = video_batch['c_attn_masks']
curr_clip_len = curr_frame_embeddings.size(-2)
assert curr_clip_len <= model_opts.max_clip_len
if total_frame_embeddings is None:
feat_dim = curr_frame_embeddings.size(-1)
total_frame_embeddings = torch.zeros(
(len(video2idx_local), model_opts.max_clip_len, feat_dim),
dtype=curr_frame_embeddings.dtype,
device=curr_frame_embeddings.device)
total_c_attn_masks = torch.zeros(
(len(video2idx_local), model_opts.max_clip_len),
dtype=curr_c_attn_masks.dtype,
device=curr_frame_embeddings.device)
indices = torch.LongTensor(video_idx)
total_frame_embeddings[indices, :curr_clip_len] =\
curr_frame_embeddings
total_c_attn_masks[indices, :curr_clip_len] =\
curr_c_attn_masks
max_clip_len = max(max_clip_len, curr_clip_len)
video_batch, video_idx = [], []
total_frame_embeddings = total_frame_embeddings[:, :max_clip_len, :]
total_c_attn_masks = total_c_attn_masks[:, :max_clip_len]
svmr_st_probs_total, svmr_ed_probs_total = None, None
svmr_st_proposal_total, svmr_ed_proposal_total = None, None
vcmr_top_idx_total, proposal_st_total, proposal_ed_total = None, None, None
sorted_q2c_indices, sorted_q2c_scores = None, None
flat_st_ed_sorted_scores, flat_st_ed_scores_sorted_indices = None, None
total_qids, total_vids = [], []
for batch in tqdm(val_loader, desc="Computing q2vScores"):
qids = batch['qids']
vids = batch['vids']
targets = batch['targets']
if has_gt_target and targets.min() < 0:
has_gt_target = False
LOGGER.info(
"No GT annotations provided, only generate predictions")
del batch['targets']
del batch['qids']
del batch['vids']
total_qids.extend(qids)
total_vids.extend(vids)
for qid in qids:
partial_query_data.append(query_data[qid])
# Safeguard fp16
for k, item in batch.items():
if isinstance(item, torch.Tensor) and item.dtype == torch.float32:
batch[k] = batch[k].to(
dtype=next(model.parameters()).dtype)
# FIXME
_q2video_scores, _st_probs, _ed_probs, _score_idx, q2c_score_all, g2v_positive_scores =\
model.get_pred_from_raw_query(
total_frame_embeddings, total_c_attn_masks, **batch,
cross=True, val_gather_gpus=False)
_st_probs = F.softmax(_st_probs, dim=-1)
_ed_probs = F.softmax(_ed_probs, dim=-1)
# mt_scores = Fill all the scores (80,2170,# of Moment)
n_ex += len(qids)
if "SVMR" in opts.full_eval_tasks and has_gt_target:
row_indices = torch.arange(0, len(_st_probs))
svmr_gt_vidx = torch.LongTensor(
[video2idx_local[e] for e in vids])
svmr_st_probs = _st_probs[
row_indices, svmr_gt_vidx].float().cpu().numpy()
svmr_ed_probs = _ed_probs[
row_indices, svmr_gt_vidx].float().cpu().numpy()
## Proposal Based
svmr_mt = q2c_score_all[row_indices,:,svmr_gt_vidx].float().cpu()
B1,L = svmr_mt.shape
p_score = svmr_mt
p_score = torch.clamp(p_score,min=0,max=1000)
score = torch.zeros(B1,L)
origin_score = svmr_mt
integ_m = torch.zeros(B1,L,L)
for i in range(L):
integ_m[:,i,:i+1] = 1
p_score = p_score.view(B1,1,L)
p_score = p_score.repeat(1,L,1)
integ = torch.sum(integ_m*p_score,dim=2)
for j in range(B1):
score[j,:] = integ[j,:]
E,_ = torch.max(score,dim=1)
S,_ = torch.min(score,dim=1)
W = (E - S)/(nof1-1)
W = W.view(B1,1).repeat(1,nof1)
g = torch.arange(nof1).view(1,nof1).repeat(B1,1)
Wg = W*g
Wg = Wg.view(B1,1,nof1).repeat(1,L,1)
score_ = score.view(B1,L,1).repeat(1,1,nof1)
ck = score_ - Wg
ck_ = abs(-1/ck)
_,idx = torch.max(ck_,dim=1)
_,s_score_idx = torch.sort(-origin_score,dim=1)
top1_score_idx = s_score_idx[:,0]
top1_score_idx = top1_score_idx.view(B1,1).repeat(1,nof1)
sub = idx - top1_score_idx + 0.01
sub = abs(-1/sub)
_,top1_prop_idx = torch.max(sub,dim=1)
tmp = top1_prop_idx-(nof1-2)
tmp = torch.clamp(tmp,min=0,max=1)
top1_prop_idx = top1_prop_idx - tmp
top1_prop_idx = top1_prop_idx.view(B1,1).repeat(1,nof1)
st_proposal_ = torch.gather(idx,1,top1_prop_idx)
ed_proposal_ = torch.gather(idx,1,top1_prop_idx+1)
st_proposal = st_proposal_[:,0]
ed_proposal = ed_proposal_[:,0]
offset = st_proposal == ed_proposal
offset2 = offset*2
ed_proposal = ed_proposal + offset2
if svmr_st_proposal_total is None:
svmr_st_proposal_total = st_proposal.numpy()
svmr_ed_proposal_total = ed_proposal.numpy()
else:
svmr_st_proposal_total = np.concatenate((svmr_st_proposal_total, st_proposal.numpy()),axis=0)
svmr_ed_proposal_total = np.concatenate((svmr_ed_proposal_total, ed_proposal.numpy()),axis=0)
###
if svmr_st_probs_total is None:
svmr_st_probs_total = svmr_st_probs
svmr_ed_probs_total = svmr_ed_probs
else:
svmr_st_probs_total = np.concatenate(
(svmr_st_probs_total, svmr_st_probs),
axis=0)
svmr_ed_probs_total = np.concatenate(
(svmr_ed_probs_total, svmr_ed_probs),
axis=0)
if "VR" not in opts.full_eval_tasks or _q2video_scores is None:
continue
_q2video_scores = _q2video_scores.float()
# To give more importance to top scores,
# the higher opt.alpha is the more importance will be given
q2video_scores = torch.exp(model_opts.q2c_alpha * _q2video_scores)
_sorted_q2c_scores, _sorted_q2c_indices = \
torch.topk(q2video_scores, model_opts.max_vcmr_video,
dim=1, largest=True)
if sorted_q2c_indices is None:
sorted_q2c_indices = _sorted_q2c_indices.cpu().numpy()
sorted_q2c_scores = _sorted_q2c_scores.cpu().numpy()
else:
sorted_q2c_indices = np.concatenate(
(sorted_q2c_indices, _sorted_q2c_indices.cpu().numpy()),
axis=0)
sorted_q2c_scores = np.concatenate(
(sorted_q2c_scores, _sorted_q2c_scores.cpu().numpy()),
axis=0)
if "VCMR" not in opts.full_eval_tasks:
continue
row_indices = torch.arange(
0, len(_st_probs), device=_st_probs.device).unsqueeze(1)
_st_probs = _st_probs[
row_indices, _sorted_q2c_indices] # (_N_q, max_vcmr_video, L)
_ed_probs = _ed_probs[row_indices, _sorted_q2c_indices]
# (_N_q, max_vcmr_video, L, L)
_st_ed_scores = torch.einsum("qvm,qv,qvn->qvmn", _st_probs,
_sorted_q2c_scores, _ed_probs)
valid_prob_mask = generate_min_max_length_mask(
_st_ed_scores.shape, min_l=model_opts.min_pred_l,
max_l=model_opts.max_pred_l)
_st_ed_scores *= torch.from_numpy(
valid_prob_mask).to(
_st_ed_scores.device) # invalid location will become zero!
# sort across the top-max_n_videos videos (by flatten from the 2nd dim)
# the indices here are local indices, not global indices
_n_q = _st_ed_scores.shape[0]
_flat_st_ed_scores = _st_ed_scores.reshape(
_n_q, -1) # (N_q, max_vcmr_video*L*L)
_flat_st_ed_sorted_scores, _flat_st_ed_scores_sorted_indices = \
torch.sort(_flat_st_ed_scores, dim=1, descending=True)
if flat_st_ed_sorted_scores is None:
flat_st_ed_scores_sorted_indices =\
_flat_st_ed_scores_sorted_indices[
:, :model_opts.max_before_nms].cpu().numpy()
flat_st_ed_sorted_scores =\
_flat_st_ed_sorted_scores[
:, :model_opts.max_before_nms].cpu().numpy()
else:
flat_st_ed_scores_sorted_indices = np.concatenate(
(flat_st_ed_scores_sorted_indices,
_flat_st_ed_scores_sorted_indices[
:, :model_opts.max_before_nms].cpu().numpy()),
axis=0)
flat_st_ed_sorted_scores = np.concatenate(
(flat_st_ed_sorted_scores,
_flat_st_ed_sorted_scores[
:, :model_opts.max_before_nms].cpu().numpy()),
axis=0)
# Proposal Based
vcmr_mt = q2c_score_all[row_indices,:,_sorted_q2c_indices].float().cpu()
# vcmr_mt = Query length x top 100 x score (frame length)
B1,T,L = vcmr_mt.shape
p_score = vcmr_mt
p_score = torch.clamp(p_score,min=0,max=1000)
#pdb.set_trace()
p_score = torch.exp(p_score*2.5) - 1
#p_score = torch.clamp(torch.exp(p_score),min=1.001,max=1000)
score = torch.zeros(B1,T,L)
origin_score = vcmr_mt
integ_m = torch.zeros(B1,T,L,L)
for i in range(L):
integ_m[:,:,i,:i+1] = 1
p_score = p_score.view(B1,T,1,L)
p_score = p_score.repeat(1,1,L,1)
integ = torch.sum(integ_m*p_score,dim=3)
for j in range(B1):
score[j,:,:] = integ[j,:,:]
E,_ = torch.max(score,dim=2)
S,_ = torch.min(score,dim=2)
W = (E - S)/(nof1-1)
W = W.view(B1,T,1).repeat(1,1,nof1)
g = torch.arange(nof1).view(1,1,nof1).repeat(B1,T,1)
Wg = W*g
Wg = Wg.view(B1,T,1,nof1).repeat(1,1,L,1)
score_ = score.view(B1,T,L,1).repeat(1,1,1,nof1)
ck = score_ - Wg
ck_ = abs(-1/ck)
_,idx = torch.max(ck_,dim=2)
#n_total = n-1
#n_total = n-1 + int((n-1)/2)
#proposal_score = torch.zeros(B1,100,n_total)
# Multi scale Proposal Generation
# Speed version
# Double layer
proposal_score = torch.zeros(B1,100,(n))
# DH : # of Proposal
l2s = torch.arange(nof2)
l2e = l2s + 2
l3s = torch.arange(nof3)
l3e = l3s + 4
l4s = torch.arange(nof4)
l4e = l4s + 6
proposal_st = torch.cat((idx[:,:,l2s],idx[:,:,l3s],idx[:,:,l4s]),dim=2)
proposal_ed = torch.cat((idx[:,:,l2e],idx[:,:,l3e],idx[:,:,l4e]),dim=2)
st_score = torch.gather(origin_score,2,proposal_st)
ed_score = torch.gather(origin_score,2,proposal_ed)
out_score = (st_score+ed_score)/2
proposal_score = out_score
proposal_score = proposal_score.view(B1,100*(n))
_,p_idx = torch.sort(proposal_score,descending=True,dim=1)
proposal_idx = p_idx[:,:100]
if vcmr_top_idx_total is None:
vcmr_top_idx_total = proposal_idx.numpy()
proposal_st_total = proposal_st.numpy()
proposal_ed_total = proposal_ed.numpy()
#vcmr_st_proposal_total = st_proposal.numpy()
#vcmr_ed_proposal_total = ed_proposal.numpy()
else:
vcmr_top_idx_total = np.concatenate((vcmr_top_idx_total, proposal_idx.numpy()),axis=0)
proposal_st_total = np.concatenate((proposal_st_total, proposal_st.numpy()),axis=0)
proposal_ed_total = np.concatenate((proposal_ed_total, proposal_ed.numpy()),axis=0)
#vcmr_ed_proposal_total = np.concatenate((vcmr_ed_proposal_total, ed_proposal.numpy()),axis=0)
###
####### when doing a test, remove under proposal_total
#svmr_st_proposal_total = svmr_st_proposal_total.astype(np.float32)
#svmr_ed_proposal_total = svmr_ed_proposal_total.astype(np.float32)
svmr_res, vr_res, vcmr_res = [], [], []
psvmr_res, pvr_res, pvcmr_res = [], [], []
if "SVMR" in opts.full_eval_tasks and has_gt_target:
st_ed_prob_product = np.einsum(
"bm,bn->bmn", svmr_st_probs_total,
svmr_ed_probs_total) # (B, L, L)
valid_prob_mask = generate_min_max_length_mask(
st_ed_prob_product.shape, min_l=model_opts.min_pred_l,
max_l=model_opts.max_pred_l)
# invalid location will become zero!
st_ed_prob_product *= valid_prob_mask
batched_sorted_triples =\
find_max_triples_from_upper_triangle_product(
st_ed_prob_product, top_n=model_opts.max_before_nms,
prob_thd=None)
for svmr_i, (qid, vid) in tqdm(
enumerate(zip(total_qids, total_vids)),
desc="[SVMR] Loop over queries to generate predictions",
total=len(total_qids)):
vidx = video2idx_global[vid]
_sorted_triples = batched_sorted_triples[svmr_i]
# as we redefined ed_idx, which is inside the moment.
_sorted_triples[:, 1] += 1
_sorted_triples[:, :2] = (_sorted_triples[:, :2]
* model_opts.vfeat_interval)
# Proposal Based
svmr_st_proposal_total[svmr_i] = svmr_st_proposal_total[svmr_i] * model_opts.vfeat_interval
svmr_ed_proposal_total[svmr_i] = svmr_ed_proposal_total[svmr_i] * model_opts.vfeat_interval
cur_prediction = [[vidx, ] + [svmr_st_proposal_total[svmr_i],svmr_ed_proposal_total[svmr_i]]]
cur_query_prediction = dict(desc_id=int(qid),desc="",predictions=cur_prediction)
psvmr_res.append(cur_query_prediction)
####
cur_ranked_predictions = [
[vidx, ] + row for row in _sorted_triples.tolist()]
cur_query_pred = dict(desc_id=int(qid),
desc="",
predictions=cur_ranked_predictions)
svmr_res.append(cur_query_pred)
if "VR" in opts.full_eval_tasks:
for vr_i, (_sorted_q2c_scores_row, _sorted_q2c_indices_row) in tqdm(
enumerate(
zip(sorted_q2c_scores[:, :100],
sorted_q2c_indices[:, :100])),
desc="[VR] Loop over queries to generate predictions",
total=len(total_qids)):
cur_vr_redictions = []
for v_score, v_meta_idx in zip(_sorted_q2c_scores_row,
_sorted_q2c_indices_row):
video_idx = video2idx_global[video_ids[v_meta_idx]]
cur_vr_redictions.append([video_idx, 0, 0, float(v_score)])
cur_query_pred = dict(desc_id=int(total_qids[vr_i]),
desc="",
predictions=cur_vr_redictions)
vr_res.append(cur_query_pred)
pvr_res = vr_res
if "VCMR" in opts.full_eval_tasks:
for vcmr_i, _vcmr_top_idx_total in tqdm(enumerate(vcmr_top_idx_total),desc="Our measure",total=len(total_qids)):
vo = _vcmr_top_idx_total//(n)
mo = _vcmr_top_idx_total%(n)
video_meta_indices = sorted_q2c_indices[vcmr_i, vo]
moment_st_meta_indices = proposal_st_total[vcmr_i, vo, mo]
moment_ed_meta_indices = proposal_ed_total[vcmr_i, vo, mo]
tmp = moment_st_meta_indices == moment_ed_meta_indices
tmp2 = tmp*2
moment_ed_meta_indices = moment_ed_meta_indices + tmp2
pred_st_in_seconds = moment_st_meta_indices.astype(
np.float32) * model_opts.vfeat_interval
pred_ed_in_seconds = moment_ed_meta_indices.astype(
np.float32) * model_opts.vfeat_interval
mcur_vcmr_redictions = []
for j, (v_meta_idx) in enumerate(video_meta_indices): # videos
video_idx = video2idx_global[video_ids[v_meta_idx.item()]]
mcur_vcmr_redictions.append(
[video_idx, float(pred_st_in_seconds[j]),
float(pred_ed_in_seconds[j])])
mcur_query_pred = dict(
desc_id=int(total_qids[vcmr_i]),
desc="",
predictions=mcur_vcmr_redictions)
pvcmr_res.append(mcur_query_pred)
for vcmr_i, (
_flat_st_ed_scores_sorted_indices,
_flat_st_ed_sorted_scores) in tqdm(
enumerate(zip(
flat_st_ed_scores_sorted_indices,
flat_st_ed_sorted_scores)),
desc="[VCMR] Loop over queries to generate predictions",
total=len(total_qids)): # i is query_idx
# list([video_idx(int), st(float),
# ed(float), score(float)])
video_meta_indices_local, pred_st_indices, pred_ed_indices = \
np.unravel_index(
_flat_st_ed_scores_sorted_indices,
shape=(model_opts.max_vcmr_video, model_opts.max_clip_len,
model_opts.max_clip_len))
# video_meta_indices_local refers to
# the indices among the top-max_vcmr_video
# video_meta_indices refers to
# the indices in all the videos,
# which is the True indices
video_meta_indices = sorted_q2c_indices[
vcmr_i, video_meta_indices_local]
pred_st_in_seconds = pred_st_indices.astype(
np.float32) * model_opts.vfeat_interval
pred_ed_in_seconds = pred_ed_indices.astype(
np.float32
) * model_opts.vfeat_interval + model_opts.vfeat_interval
cur_vcmr_redictions = []
for j, (v_meta_idx, v_score) in enumerate(
zip(video_meta_indices,
_flat_st_ed_sorted_scores)): # videos
video_idx = video2idx_global[video_ids[v_meta_idx.item()]]
cur_vcmr_redictions.append(
[video_idx, float(pred_st_in_seconds[j]),
float(pred_ed_in_seconds[j]), float(v_score)])
cur_query_pred = dict(
desc_id=int(total_qids[vcmr_i]),
desc="",
predictions=cur_vcmr_redictions)
vcmr_res.append(cur_query_pred)
#pdb.set_trace()
qn = len(psvmr_res)
for i in range(qn):
psvmr_res[i]['predictions'] = [psvmr_res[i]['predictions'][0]]*100
#eval_res = dict(SVMR=svmr_res, VCMR=vcmr_res, VR=vr_res)
#eval_res = {k: v for k, v in eval_res.items() if len(v) != 0}
# proposal
peval_res = dict(SVMR=psvmr_res, VCMR=pvcmr_res, VR=pvr_res)
peval_res = {k: v for k, v in peval_res.items() if len(v) != 0}
eval_res = peval_res
eval_res["video2idx"] = video2idx_global
#peval_res["video2idx"] = video2idx_global
eval_submission = get_submission_top_n(
eval_res, top_n=model_opts.max_after_nms)
if has_gt_target:
metrics = eval_retrieval(eval_submission, partial_query_data,
iou_thds=VCMR_IOU_THDS,
match_number=True,
verbose=False,
use_desc_type=model_opts.eval_with_query_type)
if model_opts.distributed_eval:
n_ex_per_rank = all_gather_list(n_ex)
metrics_per_rank = all_gather_list(metrics)
else:
n_ex_per_rank = [n_ex]
metrics_per_rank = [metrics]
n_ex = sum(n_ex_per_rank)
val_log = {}
gathered_metrics = {}
for task_type, task_metric in metrics.items():
gathered_metrics[task_type] = {}
for k in task_metric.keys():
if k == "desc_type_ratio":
continue
gathered_v = 0
for idx, n in enumerate(n_ex_per_rank):
gathered_v += n*metrics_per_rank[idx][task_type][k]
gathered_v = gathered_v / n_ex
gathered_metrics[task_type][k] = gathered_v
val_log[
f'valid_{split}_{task_type}/{task_type}_{k}'] = gathered_v
if "VCMR" in gathered_metrics:
LOGGER.info("metrics_no_nms_VCMR \n{}".format(pprint.pformat(
gathered_metrics["VCMR"], indent=4)))
elif "SVMR" in gathered_metrics:
LOGGER.info("metrics_no_nms_SVMR \n{}".format(pprint.pformat(
gathered_metrics["SVMR"], indent=4)))
#model_opts.nms_thd = -1
if model_opts.nms_thd != -1:
LOGGER.info(
"Performing nms with nms_thd {}".format(
model_opts.nms_thd))
metrics_nms = metrics
if model_opts.distributed_eval:
metrics_nms_per_rank = all_gather_list(metrics_nms)
else:
metrics_nms_per_rank = [metrics_nms]
gathered_metrics_nms = {}
for task_type, task_metric in metrics_nms.items():
gathered_metrics_nms[task_type] = {}
for k in task_metric.keys():
if k == "desc_type_ratio":
continue
gathered_v_nms = 0
for idx, n in enumerate(n_ex_per_rank):
gathered_v_nms += (
n*metrics_nms_per_rank[idx][task_type][k])
gathered_v_nms = gathered_v_nms / n_ex
gathered_metrics_nms[task_type][k] = gathered_v_nms
val_log[f'valid_{split}_{task_type}'
f'_nms_{model_opts.nms_thd}/'
f'{task_type}_{k}'] = gathered_v_nms
if "VCMR" in gathered_metrics_nms:
LOGGER.info("metrics_nms_VCMR \n{}".format(pprint.pformat(
gathered_metrics_nms["VCMR"], indent=4)))
elif "SVMR" in gathered_metrics_nms:
LOGGER.info("metrics_nms_SVMR \n{}".format(pprint.pformat(
gathered_metrics_nms["SVMR"], indent=4)))
tot_time = time()-st
val_log.update(
{f'valid/vcmr_{split}_ex_per_s': n_ex/tot_time})
LOGGER.info(f"validation finished in {int(tot_time)} seconds")
model.train()
return val_log, eval_submission
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--sub_txt_db",
default="/txt/tv_subtitles.db",
type=str,
help="The input video subtitle corpus. (LMDB)")
parser.add_argument("--vfeat_db",
default="/video/tv", type=str,
help="The input video frame features.")
parser.add_argument("--query_txt_db",
default="/txt/tvr_val.db",
type=str,
help="The input test query corpus. (LMDB)")
parser.add_argument("--split", choices=["val", "test_public", "test"],
default="val", type=str,
help="The input query split")
parser.add_argument("--task", choices=["tvr", "how2r", "didemo_video_sub",
"didemo_video_only"],
default="tvr", type=str,
help="The evaluation vcmr task")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model checkpoint steps")
parser.add_argument("--batch_size",
default=80, type=int,
help="number of queries in a batch")
parser.add_argument("--vcmr_eval_video_batch_size",
default=50, type=int,
help="number of videos in a batch")
parser.add_argument(
"--full_eval_tasks", type=str, nargs="+",
choices=["VCMR", "SVMR", "VR"], default=["VCMR", "SVMR", "VR"],
help="Which tasks to run."
"VCMR: Video Corpus Moment Retrieval;"
"SVMR: Single Video Moment Retrieval;"
"VR: regular Video Retrieval. "
" (will be performed automatically with VCMR)")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# device parameters
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
args = parser.parse_args()
# options safe guard
# TODO
main(args)
|
from dd.cudd import BDD
from parity_game import parity_game, sat_to_expr
import logging
import time
logger = logging.getLogger(__name__)
debug = logger.isEnabledFor(logging.DEBUG)
#@profile
def fpj(pg: parity_game):
"""Symbolic implementation of the FPJ algorithm
:param pg: parity game instance
:type pg: parity_game
:return: BDD representing the set of vertices won by either players, and their winning strategy
:rtype: (BDD, BDD, BDD, BDD)
"""
z = pg.prio_even
j = pg.bdd.false
u = unjustified(j, pg)
while u != pg.bdd.false:
if debug:
logging.debug("\n\n\nnext")
logging.debug("J: " + pg.bdd_sat(pg.bdd.quantify(j, pg.variables_, forall=False)))
logging.debug("U(J): " + pg.bdd_sat(u))
z, j = next(z, j, u, pg)
u = unjustified(j, pg)
w0 = z
w1 = pg.v & ~z
s0 = j & pg.even & w0
s1 = j & pg.odd & w1
return w0, w1, s0, s1
#@profile
def next(z: BDD, j: BDD, u: BDD, pg: parity_game):
"""A monotonic iteration of the FPJ algorithm
:param z: BDD representing vertices estimated to be won by Even
:type z: BDD
:param j: BDD representing the justification graph
:type j: BDD
:param u: BDD representing the set of currently unjustified vertices
:type u: BDD
:param pg: parity game instance
:type pg: parity_game
:return: BDDs representing updated Z and J
:rtype: (BDD, BDD)
"""
i = 0
while (pg.p[i] & u) == pg.bdd.false:
i += 1
u = pg.p[i] & u
if debug:
logging.debug("prio: " + str(i))
logging.debug("U: " + pg.bdd_sat(u))
logging.debug("Z: " + pg.bdd_sat(z))
u_pd = xor(phi(z, pg), z) & u
if debug:
logging.debug("xor: " + pg.bdd_sat(xor(phi(z, pg), z)))
logging.debug("phi: " + pg.bdd_sat(phi(z, pg)))
logging.debug("U_pd: " + pg.bdd_sat(u_pd))
if u_pd != pg.bdd.false:
if debug:
logging.debug("found vertices to update")
r = reaches(j, u_pd, pg)
if debug:
logging.debug("reaches: " + pg.bdd_sat(r))
if i % 2 == 0:
z_r = (z & ~(r & pg.prio_odd)) & prio_lt(i, pg.p, pg)
else:
z_r = (z | (r & pg.prio_even)) & prio_lt(i, pg.p, pg)
j_t = j & ~r
if debug:
logging.debug("z_r: " + pg.bdd_sat(z_r))
logging.debug("xor(z, u_pd): " + pg.bdd_sat(xor(z, u_pd)))
z_ = (z & prio_gt(i, pg.p, pg)) | xor(z & pg.p[i], u_pd) | z_r
strat = strategy_0(z_, u_pd, pg)
j_ = j_t | strat
if debug:
logging.debug("new moves: " + pg.bdd_sat_edges(strat))
else:
z_ = z
strat = strategy_0(z_, u, pg)
j_ = j | strat
if debug:
logging.debug("found no new vertices to update")
logging.debug("new moves: " + pg.bdd_sat_edges(strat))
return z_, j_
#@profile
def strategy_0(z: BDD, u: BDD, pg: parity_game):
"""Compute winning moves from U based on estimation Z, and all moves
from a vertex in U if said vertex is lost by its owner.
:param z: BDD representing estimation of vertices won by player Z
:type z: BDD
:param u: BDD representing vertices from which new moves are computed
:type u: BDD
:param pg: parity game instance
:type pg: parity_game
:return: BDD representing to be added to the justification graph.
:rtype: BDD
"""
even = u & pg.even & z
odd = u & pg.odd & ~z
losing = u & ~(even | odd) & pg.v # vertices won by the player that does not own it
z_ = pg.bdd.let(pg.substitution_list, z)
return (even & z_ & pg.e) | (odd & (~z_) & pg.e) | (losing & pg.e)
##{v ∈ V0|∃w:(v, w) ∈ E ∧ w ∈ S } ∪ { v ∈ V1| ∀w: (v, w) ∈ E ⇒ w∈S}
#@profile
def phi(z: BDD, pg: parity_game):
"""Update the estimate Z by lookahead of 1
:param z: BDD representing vertices currently estimated to be won by Even
:type z: BDD
:param pg: parity game instance
:type pg: parity_game
:return: BDD representing new estimate Z
:rtype: BDD
"""
z_ = pg.bdd.let(pg.substitution_list, z)
res = ((pg.even & pg.bdd.quantify(pg.e & z_, pg.variables_, forall=False))
| (pg.odd & pg.bdd.quantify(~(pg.e) | z_, pg.variables_, forall=True)))
return res
#@profile
def xor(a: BDD, b: BDD):
return (a & ~b) | (~a & b)
#@profile
def unjustified(j: BDD, pg: parity_game):
return pg.v & ~pg.bdd.quantify(j, pg.variables_, forall=False)
# Set of vertices from which x can be reached over edges in j
#@profile
def reaches(j: BDD, x: BDD, pg: parity_game):
"""Compute the set of vertices from which X can be reached with moves consistent with J
:param j: BDD representing justification graph J
:type j: BDD
:param x: BDD representing the originating set
:type x: BDD
:param pg: parity game instance
:type pg: parity_game
:return: BDD representing vertices from which X can be reached over J
:rtype: BDD
"""
# Preimage of v using edges in e
def preimage(v: BDD, e: BDD):
v_next = pg.bdd.let(pg.substitution_list, v)
return pg.bdd.quantify(v_next & e, pg.variables_, forall = False)
x_ = pg.bdd.false
while x_ != x:
x_ = x
x = x | preimage(x, j)
return x
#@profile
def prio_lt(prio: int, p: dict, pg: parity_game):
"""Returns a BDD representing all vertices with priority lower than _prio_
:param prio: all selected vertices should have lower priority than prio
:type prio: int
:param p: dictionary containing BDD's for vertices of each priority
:type p: dict
:param pg: parity game instance
:type pg: parity_game
:return: bdd of all vertices with priority lower than or equal to prio
:rtype: BDD
"""
bdd = pg.bdd.false
for current in p.keys():
if current < prio:
bdd = bdd | p[current]
return bdd
#@profile
def prio_gt(prio: int, p: dict, pg: parity_game):
"""Returns a BDD representing all vertices with priority greater than _prio_
:param prio: all selected vertices should have greater priority than prio
:type prio: int
:param p: dictionary containing BDD's for vertices of each priority
:type p: dict
:param pg: parity game instance
:type pg: parity_game
:return: bdd of all vertices with priority greater than or equal to prio
:rtype: BDD
"""
bdd = pg.bdd.false
for current in p.keys():
if current > prio:
bdd = bdd | p[current]
return bdd |
<reponame>somespecialone/csgo-items-db
import asyncio
import json
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Any
import aiohttp
import vdf
import vpk
from . import _typings
from ._vpk_extractor import VpkExtractor
from ._fields import FieldsCollector
from ._items import ItemsCollector
from ._sticker_kits import StickerPatchCollector
from ._cases import CasesCollector
# https://stackoverflow.com/a/287944
class BCOLORS:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
@dataclass(eq=False, repr=False)
class ResourceCollector:
RES_DIR: Path
ITEMS_GAME_URL: str
CSGO_ENGLISH_URL: str
ITEMS_GAME_CDN_URL: str
ITEMS_SCHEMA_URL: str
vpk_path: Path
categories_path: Path
phases_mapping_path: Path
def __post_init__(self):
assert self.vpk_path.exists(), "No vpk file on this path" # some validation
@staticmethod
def _keys_to_lowercase(target: dict[str, Any]) -> dict[str, Any]:
return {k.lower(): v for k, v in target.items()}
async def _fetch_data_files(self) -> list[dict | list]:
async with aiohttp.ClientSession() as session:
tasks = (
session.get(self.ITEMS_GAME_URL),
session.get(self.CSGO_ENGLISH_URL),
session.get(self.ITEMS_GAME_CDN_URL),
session.get(self.ITEMS_SCHEMA_URL),
)
resps = await asyncio.gather(*tasks)
return [await resp.text() for resp in resps]
@classmethod
def _parse_data_files(cls, *texts: str) -> tuple:
items_game_text, csgo_english_text, items_game_cdn_text, items_schema_text = texts
items_game: _typings.ITEMS_GAME = vdf.loads(items_game_text)["items_game"]
csgo_english: _typings.CSGO_ENGLISH = cls._keys_to_lowercase(vdf.loads(csgo_english_text)["lang"]["Tokens"])
items_cdn: _typings.ITEMS_CDN = {
line.split("=")[0]: line.split("=")[1] for line in items_game_cdn_text.splitlines()[3:]
}
items_schema: _typings.ITEMS_SCHEMA = json.loads(items_schema_text)["result"]
return items_game, csgo_english, items_cdn, items_schema
def _dump_files(self, *files: tuple[str | Path, Any]):
for file_name, file in files:
with (self.RES_DIR / file_name).open("w") as f:
json.dump(file, f, sort_keys=True, indent=2)
async def collect(self):
print(f"{BCOLORS.OKCYAN}[{datetime.now()}] Start parsing data...")
with self.categories_path.open("r") as c:
categories: dict[str, str] = json.load(c)
with self.phases_mapping_path.open("r") as p:
phases_mapping: dict[str, str] = json.load(p)
texts = await self._fetch_data_files()
items_game, csgo_english, items_cdn, items_schema = self._parse_data_files(*texts)
pak = VpkExtractor(vpk.open(str(self.vpk_path)))
fields_collector = FieldsCollector(items_game, csgo_english, items_schema, categories, phases_mapping)
qualities, types, paints, rarities, origins = fields_collector()
cases_collector = CasesCollector(items_game, csgo_english, items_schema)
cases = cases_collector()
items_collector = ItemsCollector(
items_game, csgo_english, items_schema, items_cdn, paints, types, categories, cases
)
items = items_collector()
sticker_collector = StickerPatchCollector(pak, items_game, csgo_english)
stickers, patches, graffities, tints = sticker_collector()
sticker_kits = {**stickers, **patches, **graffities}
# test(pak, items_cdn, csgo_english)
to_dump = (
("qualities.json", qualities),
("types.json", types),
("paints.json", paints),
("rarities.json", rarities),
("origins.json", origins),
("cases.json", cases),
("items.json", items),
("sticker_kits.json", sticker_kits),
("tints.json", tints),
)
self._dump_files(*to_dump)
print(f"{BCOLORS.OKCYAN}[{datetime.now()}] All data parsed and saved!")
|
"""与疫情通截图相关的方法
"""
import datetime
import json
import logging
import pytz
import random
from io import BytesIO
import requests
from PIL import Image, ImageDraw, ImageFont
from conf import settings
logger = logging.getLogger('screenshot')
def generate_screenshot(
name: str,
stu_id: str,
date: str = None,
shot_time: str = None,
battery: int = None
) -> Image:
"""生成疫情通截图
:param name: 姓名
:param stu_id: 学号
:param date: 日期(%Y-%m-%d),缺省值是今天
:param shot_time: 截图时间(%H:%M),缺省值是现在
:param battery: 电量,两位数, 10-99 ,缺省值随机
:return: 生成的图片
"""
now_datetime = datetime.datetime.now(tz=pytz.timezone(settings.TIMEZONE))
# 日期缺省值
if date is None:
date = now_datetime.strftime('%Y-%m-%d')
# 时间缺省值
if shot_time is None:
shot_time = now_datetime.strftime('%H:%M')
# 电量缺省值是 10-99 的随机整数
if battery is None:
battery = random.randint(10, 99)
logger.info(f'正在生成疫情通截图:{name}, {stu_id} , {date} ,电量 {battery} ,时间 {shot_time}')
# 打开模板图片
template_img = Image.open('./assets/template.png')
dw = ImageDraw.Draw(template_img)
# 添加日期、姓名、学号
form_font = ImageFont.truetype('./assets/msyh.ttc', 43)
dw.text((92, 1279), date, fill=(20, 20, 20), font=form_font)
dw.text((92, 1604), name, fill=(20, 20, 20), font=form_font)
dw.text((92, 1939), stu_id, fill=(20, 20, 20), font=form_font)
# 添加电池电量、时间
dw.text((1187, 29), str(battery), fill=(102, 102, 102), font=ImageFont.truetype('./assets/msyh.ttc', 31))
dw.text((1259, 23), shot_time, fill=(102, 102, 102), font=ImageFont.truetype('./assets/msyh.ttc', 42))
# 添加提交成功弹窗
alert_img = Image.open('./assets/alert.png')
template_img.paste(alert_img, (260, 1460))
alert_img.close()
logger.info('截图生成完成.')
return template_img
def upload_yqt_screenshot(
name: str,
stu_id: str,
dorm: str,
shot_time: datetime.datetime = None
) -> None:
"""生成并上传疫情通截图
:param name: 姓名
:param stu_id: 学号
:param dorm: 寝室号
:param shot_time: 截图时间,缺省值是今天 06:00:00 - 08:59:59 的一个随机数
:return: None
"""
logger.info(f'准备上传 {name} 的截图')
# 截图时间缺省值是今天 06:00:00 - 08:59:59 中的一个随机数
if shot_time is None:
shot_time = datetime.datetime.now(tz=pytz.timezone(settings.TIMEZONE))
shot_time = shot_time.replace(
hour=random.randint(6, 8),
minute=random.randint(0, 59),
second=random.randint(0, 59)
)
# 生成截图
screenshot_name = shot_time.strftime('Screenshot_%Y%m%d_%H%M%S_com.tencent.mm.png')
screenshot = generate_screenshot(
name,
stu_id,
date=shot_time.strftime('%Y-%m-%d'),
shot_time=shot_time.strftime('%H:%M')
)
screenshot_fp = BytesIO()
screenshot.save(screenshot_fp, format='png')
screenshot.close()
ret = requests.post(
settings.YQT_SCREENSHOT_UPLOAD_URL,
data={
'name': name,
'dorm': dorm
},
files={
'photo': (screenshot_name, screenshot_fp.getvalue(), 'image/png')
}
)
try:
ret_data = json.loads(ret.text)
if not isinstance(ret_data, dict):
ret_data = {}
except json.JSONDecodeError:
ret_data = {}
if ret.status_code == 200 and ret_data.get('code') == 0:
logger.info(f'{name} 的截图上传成功')
else:
err_msg = f'{name} 的截图上传失败'
logger.error(err_msg)
raise RuntimeError(err_msg)
|
from transformers import pipeline
import tweepy as tw
import praw
import streamlit as st
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import random
import requests
#import pandas_datareader as pdr
from pandas import json_normalize
from alpha_vantage.timeseries import TimeSeries
# Utils
import joblib
pipe_lr = joblib.load(open("model/sentiment_classifier.pkl","rb"))
from operator import add
import altair as alt
# Fxn
def predict_emotions(docx):
results = pipe_lr.predict([docx])
return results[0]
def get_prediction_proba(docx):
results = pipe_lr.predict_proba([docx])
return results
emotions_emoji_dict = {"anger":"😠","disgust":"🤮", "fear":"😨😱", "happy":"🤗", "joy":"😂", "neutral":"😐", "sad":"😔", "sadness":"😔", "shame":"😳", "surprise":"😮"}
REDDITCLIENTID = os.environ['REDDIT_CLIENT_ID']
REDDITCLIENTSECRET = os.environ['REDDIT_CLIENT_SECRET']
USERAGENT = os.environ['USER_AGENT']
USERNAME = os.environ['USERNAME']
PASSWORD = os.<PASSWORD>['PASSWORD']
auth = tw.OAuthHandler(os.environ['API_KEY'], os.environ['API_KEY_SECRET'])
auth.set_access_token(os.environ['ACCESS_TOKEN'], os.environ['ACCESS_TOKEN_SECRET'])
api = tw.API(auth, wait_on_rate_limit=True)
# By default downloads the distilbert-base-uncased-finetuned-sst-2-english model
# Uses the DistilBERT architecture
classifier = pipeline('sentiment-analysis')
def home():
home_page = st.sidebar.radio('Welcome to our project!', ['Twitter', 'Reddit','AlphaVantage'])
if home_page == 'Twitter':
twitter()
elif home_page == 'Reddit':
reddit()
elif home_page == 'AlphaVantage':
alpha()
def alpha():
st.title('AlphaVantage API stock data analysis')
st.markdown('Fill the below details')
with st.form(key='form_input'):
st.write('Welcome to AlphaVantage API stock data analysis')
keyword=st.text_input('Please enter the name of the company you wish to get financial stock data for:')
date = st.date_input('Enter the date for which you would like the stock change analysis')
#date = st.date_input('Enter the date until when to fetch')
submit_button = st.form_submit_button(label = 'Fetch')
# aux = 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords='+keyword+'&apikey=<KEY>'
# if submit_button:
# av=requests.get(aux)
# data=av.json()
# st.write(data)
#confirm from
# ts = pdr.av.time_series.AVTimeSeriesReader(keyword, api_key='<KEY>')
# df = ts.read()
# df.index = pd.to_datetime(df.index, format='%Y-%m-%d')
date=str(date)
st.write(f'On Date {date}')
api_key = '<KEY>'
#date = '2021-11-22'
#confirm to
ts = TimeSeries(key = api_key, output_format= 'pandas')
data = ts.get_daily(keyword)
df = data[0]
df1 = df.loc[date]
o = df1.iloc[0]['1. open']
c = df1.iloc[0]['4. close']
percent_change = 100*(c - o)/o
st.write(f'Change in opening and closing value, in terms of percentage : {percent_change} ')
#dataframe[dataframe['Percentage'] >80]
def reddit():
st.title('Reddit Sentiment Analysis')
st.markdown('Fill the form')
with st.form(key='form_input'):
st.write('Welcome to Reddit Sentiment Analyser. Please click the Fetch button to analyse comments of the top post as of now')
#number_of_posts=st.number_input('Enter the number of latest posts(Maximum 10 posts)', min_value = 0, max_value = 10, value = 1)
submit_button = st.form_submit_button(label = 'Fetch')
#st.write('Nothing here to show. Mind your business -_-')
if submit_button:
reddit = praw.Reddit(client_id = REDDITCLIENTID, client_secret = REDDITCLIENTSECRET, user_agent = USERAGENT, username = USERNAME, password = PASSWORD)
subreddit=reddit.subreddit('wallstreetbets').hot(limit=1)
#subroutine to get the comment id
id_list=[]
for i in subreddit:
id_list.append(i.id)
post_id=id_list[0]
submission = reddit.submission(post_id)
post_title=submission.title
submission.comments.replace_more(limit=0)
comments_list=[]
for top_level_comments in submission.comments:
comments_list.append(top_level_comments.body)
#comments_list
#comments_list has 50+ comments, limiting to 25 for easy training of model.
comment_list=comments_list[1:25] #0th index is metadata, we dont want to confuse poor distilbert
emotion_list = [emotion for emotion in classifier(comment_list)]
emotion_label = [emotion['label'] for emotion in emotion_list]
emotion_score = [emotion['score'] for emotion in emotion_list]
label_list = [emotion_list[i]['label'] for i in range(len(emotion_list))]
df = pd.DataFrame(
list(zip(comment_list, emotion_label, emotion_score)),
columns =['Latest post on '+post_title, 'Sentiment', 'Score']
)
df
negative_count = (df['Sentiment'] == 'NEGATIVE').sum()
positive_count = (df['Sentiment'] == 'POSITIVE').sum()
st.write(f'Negative count : {negative_count} Positive count : {positive_count}')
count = [i for i in range(0,51,10)]
fig = plt.figure(figsize=(10,7))
sns.barplot(x='Sentiment', y='Score', data=df, order=['NEGATIVE','POSITIVE'])
st.pyplot(fig)
def twitter():
st.title('Twitter Sentiment Analysis')
st.markdown('Fill the form')
with st.form(key='form_input'):
search_word = st.text_input('Enter the word')
number_of_tweets = st.number_input('Enter the number of latest tweets(Maximum 50 tweets)', min_value = 0, max_value = 50, value = 1)
date_since = st.date_input('Enter the date until when to fetch')
submit_button = st.form_submit_button(label = 'Fetch')
if submit_button:
tweets = api.search_tweets(q=search_word, count = number_of_tweets, result_type='mixed', until = date_since, lang='en')
tweet_list = [tweets[i].text for i in range(number_of_tweets)]
tweet_location = [tweets[i].user.location for i in range(number_of_tweets)]
emotion_list = [emotion for emotion in classifier(tweet_list)]
sentiment_list = [predict_emotions(tweet) for tweet in tweet_list]
sentiment_proba_list = [get_prediction_proba(tweet) for tweet in tweet_list]
total_sentiment = sentiment_proba_list[0]
for i in range(1,len(sentiment_proba_list)):
total_sentiment[0] = list( map(add, total_sentiment[0], sentiment_proba_list[i][0]) )
avg_sentiment = [[sentiment/len(sentiment_proba_list) for sentiment in total_sentiment[0]]]
emotion_label = [emotion['label'] for emotion in emotion_list]
emotion_score = [emotion['score'] for emotion in emotion_list]
label_list = [emotion_list[i]['label'] for i in range(len(emotion_list))]
df = pd.DataFrame(
list(zip(tweet_list, emotion_label, emotion_score,sentiment_list)),
columns =['Latest '+str(number_of_tweets)+ ' tweets'+' on '+search_word, 'Emotion', 'Score', "Sentiment"]
)
df
negative_count = (df['Emotion'] == 'NEGATIVE').sum()
positive_count = (df['Emotion'] == 'POSITIVE').sum()
st.write(f'Negative count : {negative_count} Positive count : {positive_count}')
count = [i for i in range(0,51,10)]
fig = plt.figure(figsize=(10,7))
sns.barplot(x='Emotion', y='Score', data=df, order=['NEGATIVE','POSITIVE'])
st.pyplot(fig)
st.success("Prediction Probability")
# st.write(probability)
proba_df = pd.DataFrame(avg_sentiment,columns=pipe_lr.classes_)
# st.write(proba_df.T)
proba_df_clean = proba_df.T.reset_index()
proba_df_clean.columns = ["emotions","probability"]
fig = alt.Chart(proba_df_clean).mark_bar().encode(x='emotions',y='probability',color='emotions')
st.altair_chart(fig,use_container_width=True)
if __name__=='__main__':
home() |
<reponame>Tigraine/dotless
#!/usr/bin/python
#
# Copyright 2008, Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code sample creates a new local business ad given an existing ad
group. To create an ad group, you can run add_ad_group.py."""
import base64
import SOAPpy
# Provide AdWords login information.
email = 'INSERT_LOGIN_EMAIL_HERE'
password = '<PASSWORD>'
client_email = 'INSERT_CLIENT_LOGIN_EMAIL_HERE'
useragent = 'INSERT_COMPANY_NAME: AdWords API Python Sample Code'
developer_token = 'INSERT_DEVELOPER_TOKEN_HERE'
application_token = 'INSERT_APPLICATION_TOKEN_HERE'
# Define SOAP headers.
headers = SOAPpy.Types.headerType()
headers.email = email
headers.password = password
headers.clientEmail = client_email
headers.useragent = useragent
headers.developerToken = developer_token
headers.applicationToken = application_token
# Set up service connection. To view XML request/response, change value of
# ad_service.config.debug to 1. To send requests to production
# environment, replace "sandbox.google.com" with "adwords.google.com".
namespace = 'https://sandbox.google.com/api/adwords/v12'
ad_service = SOAPpy.SOAPProxy(namespace + '/AdService',
header=headers)
ad_service.config.debug = 0
# Find similar businesses in Local Business Center.
business_name = '<NAME>'
business_address = '89 Charlwood St, London, SW1V 4PB'
business_country_code = 'GB'
in_local_business_center = 0
# Get business from Local Business Center or find similar business.
if in_local_business_center == 1:
businesses = ad_service.getMyBusinesses()
else:
businesses = ad_service.findBusinesses(business_name,
business_address,
business_country_code)
# Convert to a list if we get back a single object.
if len(businesses) > 0 and not isinstance(businesses, list):
businesses = [businesses]
# Get business key.
for business in businesses:
name = business['name']
address = business['address']
country_code = business['countryCode']
if (business_name.find(name) > -1 or business_address.find(address) > -1 or
business_country_code.find(country_code) > -1):
business_key = business['key']
# Create new local business ad structure.
if business_key != None:
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
business_image = open('INSERT_BUSINESS_IMAGE_PATH_HERE', 'r').read()
custom_icon = open('INSERT_CUSTOM_ICON_PATH_HERE', 'r').read()
local_business_ad = {
'adGroupId': SOAPpy.Types.untypedType(ad_group_id),
'adType': SOAPpy.Types.untypedType('LocalBusinessAd'),
'businessImage':
{'data': SOAPpy.Types.untypedType(base64.encodestring(business_image))},
'businessKey': business_key,
'countryCode': SOAPpy.Types.untypedType('GB'),
'customIcon':
{'data': SOAPpy.Types.untypedType(base64.encodestring(custom_icon))},
'description1': 'Choose from our delicious range now',
'description2': 'Pre-order or delivered to your door',
'destinationUrl': 'http://www.dominos.co.uk/',
'displayUrl': 'www.dominos.co.uk'
}
# Check new ad for policy violations before adding it.
language_target = {'languages': ['en']}
geo_target = {'countryTargets': {'countries': ['GB']}}
errors = ad_service.checkAds([local_business_ad], language_target, geo_target)
# Convert to a list if we get back a single object.
if len(errors) > 0 and not isinstance(errors, list):
errors = [errors]
# Add local business ad if there are no policy violations.
if len(errors) == 0:
ads = ad_service.addAds([local_business_ad])
# Convert to a list if we get back a single object.
if len(ads) > 0 and not isinstance(ads, list):
ads = [ads]
# Display new local business ad.
for ad in ads:
print 'New local business ad with name "%s" and ' \
'id "%s" was created.' % (ad['businessName'], ad['id'])
else:
print 'New local business ad was not created due to the following policy ' \
'violations:'
for error in errors:
print ' Detail: %s\nisExemptable: %s' % \
(error['detail'], error['isExemptable'])
print
else:
print 'New local business ad was not created because business key was not ' \
'found.'
|
# -*- coding: utf-8 -*-
import proxy
import urllib
import requests
from django.conf import settings
from django.http import HttpResponse
#import xml.etree.ElementTree as ET
from lxml import etree as ET
from django.contrib.auth.models import User
from geoprisma.utils import isAuthorized
from geoprisma.models import Datastore
from django.db.models import Q
class WMSProxyFactory(object):
"""
Un proxy factory pour le WMS qui retourne le bon proxy WMS selon l'operation demande.
"""
WMS_OP_GETCAPABILITIES = 1
WMS_OP_GETMAP = 2
WMS_OP_GETLEGENDGRAPHIC = 3
WMS_OP_GETFEATUREINFO = 4
def getWMSProxy(self, pobjService, prequest):
"""
Recupere le proxy selon l'operation
Args:
pobjService: Object service
prequest: La requete
Returns:
Un proxy WMS
"""
iOPType = self.getOperationFromGET(prequest)
objWMSProxy = None
if iOPType == self.WMS_OP_GETCAPABILITIES:
objWMSProxy = WMSGetCapabilityProxy(pobjService, prequest)
elif iOPType == self.WMS_OP_GETMAP:
objWMSProxy = WMSProxy(pobjService, prequest)
elif iOPType == self.WMS_OP_GETLEGENDGRAPHIC:
objWMSProxy = WMSProxy(pobjService, prequest)
elif iOPType == self.WMS_OP_GETFEATUREINFO:
objWMSProxy = WMSProxy(pobjService, prequest)
if objWMSProxy is None:
raise Exception("Proxy method not handled.")
return objWMSProxy
def getOperationFromGET(self, prequest):
"""
Recupere l'operation dans l'url
Args:
prequest: La requete contenant l'url
Returns:
l'operation
"""
strRequest = ''
for (strKey, strValue) in prequest.GET.iteritems():
if strKey.upper() == 'REQUEST':
strRequest = strValue
break
if strRequest == 'GetCapabilities':
return self.WMS_OP_GETCAPABILITIES
elif strRequest == 'GetMap':
return self.WMS_OP_GETMAP
elif strRequest == 'GetLegendGraphic':
return self.WMS_OP_GETLEGENDGRAPHIC
elif strRequest == 'GetFeatureInfo':
return self.WMS_OP_GETFEATUREINFO
return None
class WMSProxy(proxy.Proxy):
"""
Class WMSProxy qui herite de la class proxy de base
"""
def getAction(self):
return self.CRUD_READ
def setResources(self, pobjArrayResources):
"""
Defini la resource du proxy
Args:
pobjArrayResources: Object resource
"""
self.m_objResource = pobjArrayResources
def process(self):
"""
Traite l'information a retourner
Returns:
HttpResponce
"""
excluded_headers = ('connection',
'keep-alive',
'proxy-authenticate',
'proxy-authorization',
'te',
'trailers',
'transfer-encoding',
'content-encoding',
'content-length',
'upgrade')
if self.m_objRequest.method == "POST":
strServiceURL = self.m_objService.source
strParams = self.getRequestParams()
requestUrl = requests.post(strServiceURL, data=strParams)
else:
strRequestURL = self.addParam(self.m_objService.source)
requestUrl = requests.get(strRequestURL)
responce = HttpResponse(requestUrl)
for header in requestUrl.headers:
if header not in excluded_headers:
responce[header] = requestUrl.headers.get(header)
return responce
def getLayers(self):
"""
Recupere les couches
Returns:
Un tableau de couches
"""
objArrayLayer = []
for (strKey, strValue) in self.m_objRequest.GET.iteritems():
if strKey.upper() == "LAYERS":
objArrayLayer = self.m_objRequest.GET.get(strKey).split(",")
return objArrayLayer
def getCaching(self):
pass
class WMSGetCapabilityProxy(proxy.Proxy):
"""
Class WMSGetCapabilityProxy qui traite seulement le getCapabilities
"""
def getAction(self):
return self.CRUD_READ
def process(self):
"""
Fonction qui recupere le XML retourne par mapserver le decoupe selon les droits de l'utilisateur.
Chaque version de WMS est traite differament.
Returns:
HttpResponce
"""
excluded_headers = ('connection',
'keep-alive',
'proxy-authenticate',
'proxy-authorization',
'te',
'trailers',
'transfer-encoding',
'upgrade',
'content-encoding',
'content-length')
url = self.addParam(self.m_objService.source)
requestUrl = requests.get(url)
objXml = ET.fromstring(requestUrl.text.encode("utf-8"))
docinfo = objXml.getroottree().docinfo
wmsversion = objXml.get("version")
user = User.objects.get(email=self.m_objRequest.user)
# Gestion des sandbox
baseUrl = ""
if hasattr(settings, 'DEBUG_APP_URL') and settings.DEBUG_APP_URL:
baseUrl = settings.DEBUG_APP_URL
onlineResourceUrl = "http://"+self.m_objRequest.get_host()+baseUrl+"/gp/proxy/"+self.m_objService.slug+""
def changeUrl(url):
"""
Convertie l'url du XMl pour correspondre a l'url de geoprisma
"""
splitUrl = url.split("&", 1)
newUrl = onlineResourceUrl+"?"+splitUrl[1]
return newUrl
def getAndValidateRes(layer, removeList):
"""
Recupere les resources d'un datastore et verifie les droits de l'utilisateur
Args:
layer: la couche pour aider a trouver le datastore
removeList: une liste ou on ajoute les couches non authorisees
"""
if wmsversion == "1.0.0" or wmsversion == "1.1.0" or wmsversion == "1.1.1":
layerName = layer.find('Name')
else:
layerName = layer.find("{http://www.opengis.net/wms}Name")
try:
datastore = Datastore.objects.get(service=self.m_objService, layers=layerName.text)
dataResourceList = datastore.resource_set.all()
for resource in dataResourceList:
if isAuthorized(user, resource.name, "read"):
break
else:
removeList.append(layer)
except Datastore.DoesNotExist:
removeList.append(layer)
#WMS VERSION 1.0.0
if wmsversion == "1.0.0":
for elem in objXml:
if elem.tag == "Service":
onlineRes = elem.find("OnlineResource")
onlineRes.text = onlineResourceUrl
if elem.tag == "Capability":
removeCapabilityList = list()
for capability in elem:
if capability.tag == "Request":
for request in capability:
httptag = request.find("DCPType").find("HTTP")
for method in httptag:
method.set("onlineResource", onlineResourceUrl)
if capability.tag == "Layer":
removeGroupList = list()
for layerGroup in capability:
removeList = list()
if layerGroup.tag == "Layer":
layerList = layerGroup.findall("Layer")
for layer in layerList:
if layer.tag == "Layer":
getAndValidateRes(layer, removeList)
for layer in removeList:
layerGroup.remove(layer)
filtredLayerList = layerGroup.findall("Layer")
if filtredLayerList.__len__() == 0:
getAndValidateRes(layerGroup, removeGroupList)
else:
for layer in filtredLayerList:
layerStyle = layer.find("Style")
if layerStyle is not None:
legendUrl = layerStyle.find("LegendURL")
legendOnlineRes = legendUrl.find("OnlineResource")
newLegendOnlineResUrl = changeUrl(legendOnlineRes.get("{http://www.w3.org/1999/xlink}href"))
legendOnlineRes.set("{http://www.w3.org/1999/xlink}href", newLegendOnlineResUrl)
for layer in removeGroupList:
try:
capability.remove(layer)
except ValueError:
pass
capabilityList = capability.findall("Layer")
if capabilityList.__len__() == 0:
getAndValidateRes(capability, removeCapabilityList)
else:
for layer in capabilityList:
layerStyle = layer.find("Style")
if layerStyle is not None:
legendUrl = layerStyle.find("LegendURL")
legendOnlineRes = legendUrl.find("OnlineResource")
newLegendOnlineResUrl = changeUrl(legendOnlineRes.get("{http://www.w3.org/1999/xlink}href"))
legendOnlineRes.set("{http://www.w3.org/1999/xlink}href", newLegendOnlineResUrl)
for capability in removeCapabilityList:
elem.remove(capability)
#WMS VERSION 1.1.0
if wmsversion == "1.1.0":
for elem in objXml:
if elem.tag == "Service":
onlineRes = elem.find("OnlineResource")
onlineRes.text = onlineResourceUrl
if elem.tag == "Capability":
removeCapabilityList = list()
for capability in elem:
if capability.tag == "Request":
for request in capability:
httptag = request.find("DCPType").find("HTTP")
for method in httptag:
onlineRes = method.find("OnlineResource")
onlineRes.set("{http://www.w3.org/1999/xlink}href", onlineResourceUrl)
if capability.tag == "Layer":
removeGroupList = list()
for layerGroup in capability:
removeList = list()
if layerGroup.tag == "Layer":
layerList = layerGroup.findall("Layer")
for layer in layerList:
if layer.tag == "Layer":
getAndValidateRes(layer, removeList)
for layer in removeList:
layerGroup.remove(layer)
filtredLayerList = layerGroup.findall("Layer")
if filtredLayerList.__len__() == 0:
getAndValidateRes(layerGroup, removeGroupList)
else:
for layer in filtredLayerList:
layerStyle = layer.find("Style")
if layerStyle is not None:
legendUrl = layerStyle.find("LegendURL")
legendOnlineRes = legendUrl.find("OnlineResource")
newLegendOnlineResUrl = changeUrl(legendOnlineRes.get("{http://www.w3.org/1999/xlink}href"))
legendOnlineRes.set("{http://www.w3.org/1999/xlink}href", newLegendOnlineResUrl)
for layer in removeGroupList:
try:
capability.remove(layer)
except ValueError:
pass
capabilityList = capability.findall("Layer")
if capabilityList.__len__() == 0:
getAndValidateRes(capability, removeCapabilityList)
else:
for layer in capabilityList:
layerStyle = layer.find("Style")
if layerStyle is not None:
legendUrl = layerStyle.find("LegendURL")
legendOnlineRes = legendUrl.find("OnlineResource")
newLegendOnlineResUrl = changeUrl(legendOnlineRes.get("{http://www.w3.org/1999/xlink}href"))
legendOnlineRes.set("{http://www.w3.org/1999/xlink}href", newLegendOnlineResUrl)
for capability in removeCapabilityList:
elem.remove(capability)
# WMS VERSION 1.1.1
elif wmsversion == "1.1.1":
for elem in objXml:
if elem.tag == "Service":
onlineRes = elem.find("OnlineResource")
onlineRes.set("{http://www.w3.org/1999/xlink}href", onlineResourceUrl)
if elem.tag == "Capability":
removeCapabilityList = list()
for capability in elem:
if capability.tag == "Request":
for request in capability:
httptag = request.find("DCPType").find("HTTP")
for method in httptag:
onlineRes = method.find("OnlineResource")
onlineRes.set("{http://www.w3.org/1999/xlink}href", onlineResourceUrl)
if capability.tag == "Layer":
removeGroupList = list()
for layerGroup in capability:
removeList = list()
if layerGroup.tag == "Layer":
layerList = layerGroup.findall("Layer")
for layer in layerList:
if layer.tag == "Layer":
getAndValidateRes(layer, removeList)
for layer in removeList:
layerGroup.remove(layer)
filtredLayerList = layerGroup.findall("Layer")
if filtredLayerList.__len__() == 0:
getAndValidateRes(layerGroup, removeGroupList)
else:
for layer in filtredLayerList:
layerStyle = layer.find("Style")
if layerStyle is not None:
legendUrl = layerStyle.find("LegendURL")
legendOnlineRes = legendUrl.find("OnlineResource")
newLegendOnlineResUrl = changeUrl(legendOnlineRes.get("{http://www.w3.org/1999/xlink}href"))
legendOnlineRes.set("{http://www.w3.org/1999/xlink}href", newLegendOnlineResUrl)
for layer in removeGroupList:
try:
capability.remove(layer)
except ValueError:
pass
capabilityList = capability.findall("Layer")
if capabilityList.__len__() == 0:
getAndValidateRes(capability, removeCapabilityList)
else:
for layer in capabilityList:
layerStyle = layer.find("Style")
if layerStyle is not None:
legendUrl = layerStyle.find("LegendURL")
legendOnlineRes = legendUrl.find("OnlineResource")
newLegendOnlineResUrl = changeUrl(legendOnlineRes.get("{http://www.w3.org/1999/xlink}href"))
legendOnlineRes.set("{http://www.w3.org/1999/xlink}href", newLegendOnlineResUrl)
for capability in removeCapabilityList:
elem.remove(capability)
#WMS VERSION 1.3.0
elif wmsversion == "1.3.0":
schemaLocation = objXml.get("{http://www.w3.org/2001/XMLSchema-instance}schemaLocation")
schemaLocationList = schemaLocation.split(" ")
schemaLocationList[-1] = changeUrl(schemaLocationList[-1])
objXml.set("{http://www.w3.org/2001/XMLSchema-instance}schemaLocation", " ".join(schemaLocationList))
for elem in objXml:
if elem.tag == "{http://www.opengis.net/wms}Service":
onlineRes = elem.find("{http://www.opengis.net/wms}OnlineResource")
onlineRes.set("{http://www.w3.org/1999/xlink}href", onlineResourceUrl)
if elem.tag == "{http://www.opengis.net/wms}Capability":
removeCapabilityList = list()
for capability in elem:
if capability.tag == "{http://www.opengis.net/wms}Request":
for request in capability:
httptag = request.find("{http://www.opengis.net/wms}DCPType").find("{http://www.opengis.net/wms}HTTP")
for method in httptag:
onlineRes = method.find("{http://www.opengis.net/wms}OnlineResource")
onlineRes.set("{http://www.w3.org/1999/xlink}href", onlineResourceUrl)
if capability.tag == "{http://www.opengis.net/wms}Layer":
removeGroupList = list()
for layerGroup in capability:
removeList = list()
if layerGroup.tag == "{http://www.opengis.net/wms}Layer":
layerList = layerGroup.findall("{http://www.opengis.net/wms}Layer")
for layer in layerList:
if layer.tag == "{http://www.opengis.net/wms}Layer":
getAndValidateRes(layer, removeList)
for layer in removeList:
layerGroup.remove(layer)
filtredLayerList = layerGroup.findall("{http://www.opengis.net/wms}Layer")
if filtredLayerList.__len__() == 0:
getAndValidateRes(layerGroup, removeGroupList)
else:
for layer in filtredLayerList:
layerStyle = layer.find("{http://www.opengis.net/wms}Style")
if layerStyle is not None:
legendUrl = layerStyle.find("{http://www.opengis.net/wms}LegendURL")
legendOnlineRes = legendUrl.find("{http://www.opengis.net/wms}OnlineResource")
newLegendOnlineResUrl = changeUrl(legendOnlineRes.get("{http://www.w3.org/1999/xlink}href"))
legendOnlineRes.set("{http://www.w3.org/1999/xlink}href", newLegendOnlineResUrl)
for layer in removeGroupList:
try:
capability.remove(layer)
except ValueError:
pass
capabilityList = capability.findall("{http://www.opengis.net/wms}Layer")
if capabilityList.__len__() == 0:
getAndValidateRes(capability, removeCapabilityList)
else:
for layer in capabilityList:
layerStyle = layer.find("{http://www.opengis.net/wms}Style")
if layerStyle is not None:
legendUrl = layerStyle.find("{http://www.opengis.net/wms}LegendURL")
legendOnlineRes = legendUrl.find("{http://www.opengis.net/wms}OnlineResource")
newLegendOnlineResUrl = changeUrl(legendOnlineRes.get("{http://www.w3.org/1999/xlink}href"))
legendOnlineRes.set("{http://www.w3.org/1999/xlink}href", newLegendOnlineResUrl)
for capability in removeCapabilityList:
elem.remove(capability)
responce = HttpResponse(ET.tostring(objXml, xml_declaration=True, encoding=docinfo.encoding))
#responce = HttpResponse(requestUrl)
for header in requestUrl.headers:
if header not in excluded_headers:
responce[header] = requestUrl.headers.get(header)
responce['content-type'] = "text/xml"
return responce
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# from tinymce.models import HTMLField
# from pyuploadcare.dj.models import ImageField
# from django.db.models import Avg, Max, Min
# from pyuploadcare.dj.forms import FileWidget
from django.conf import settings
import numpy as np
import datetime as dt
class Profile(models.Model):
prof_pic = models.ImageField('avatar', default='default.jpg')
bio = models.TextField(max_length=140, blank=True, default='')
user = models.OneToOneField(User,on_delete=models.CASCADE, primary_key=True)
def save_profile(self):
self.save()
@classmethod
def search_profile(cls, name):
profile = Profile.objects.filter(user__username__icontains = name)
return profile
@classmethod
def get_by_id(cls, id):
profile = Profile.objects.get(user = id)
return profile
@classmethod
def filter_by_id(cls, id):
profile = Profile.objects.filter(user = id).first()
return profile
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
class Image(models.Model):
photo = models.ImageField(upload_to='pics', height_field=None, width_field=None, max_length=None)
profile = models.ForeignKey(User, on_delete=models.CASCADE)
def save_image(self):
self.save()
@classmethod
def get_profile_images(cls, profile):
images = Image.objects.filter(profile__pk = profile)
return images
class Project(models.Model):
'''
class that contains Project properties
'''
title = models.CharField(max_length=40)
image = models.ImageField(upload_to='pics', height_field=None, width_field=None, max_length=None)
description = models.TextField()
link = models.URLField(max_length=70)
user = models.ForeignKey(
User, on_delete=models.CASCADE, default="", blank=True, null=True)
profile = models.ForeignKey(
Profile, on_delete=models.CASCADE, default="", blank=True, null=True)
rating = models.TextField()
post_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
def save_project(self):
self.save()
def update_project(self):
self.update()
def delete_project(self):
self.delete()
@classmethod
def search_project(cls, title):
project = cls.objects.filter(title__icontains=title)
return project
@classmethod
def get_posted_projects(cls):
projects = Project.objects.all()
return projects
@classmethod
def get_projects_on_profile(cls, profile):
projects = Project.objects.filter(profile__pk=profile)
return projects
@classmethod
def get_project_by_id(cls, id):
project = Project.objects.filter(user_id=id).all()
return project
def average_design(self):
total_ratings = list(
map(lambda x: x.rating, self.designrating_set.all()))
return np.mean(total_ratings)
def average_usability(self):
total_ratings = list(
map(lambda x: x.rating, self.usabilityrating_set.all()))
return np.mean(total_ratings)
def average_content(self):
total_ratings = list(
map(lambda x: x.rating, self.contentrating_set.all()))
return np.mean(total_ratings)
def __str__(self):
return self.title
class DesignRating(models.Model):
CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10')
)
project = models.ForeignKey(Project)
pub_date = models.DateTimeField(auto_now_add=True)
profile = models.ForeignKey(Profile)
comment = models.CharField(max_length=200)
rating = models.IntegerField(choices=CHOICES, default=0)
class UsabilityRating(models.Model):
CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10')
)
project = models.ForeignKey(Project)
pub_date = models.DateTimeField(auto_now_add=True)
profile = models.ForeignKey(Profile)
comment = models.CharField(max_length=200)
rating = models.IntegerField(choices=CHOICES, default=0)
class ContentRating(models.Model):
CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10')
)
project = models.ForeignKey(Project)
pub_date = models.DateTimeField(auto_now_add=True,)
profile = models.ForeignKey(Profile)
comment = models.CharField(max_length=200)
rating = models.IntegerField(choices=CHOICES, default=0) |
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import json
import os
import pickle
from functools import partial
from pandapower.io_utils import PPJSONEncoder, to_dict_with_coord_transform, \
get_raw_data_from_pickle, transform_net_with_df_and_geo, PPJSONDecoder
from pandapower.io_utils import pp_hook, encrypt_string, decrypt_string
from pandapipes.io.convert_format import convert_format
from pandapipes.io.io_utils import isinstance_partial, FromSerializableRegistryPpipe
from pandapipes.pandapipes_net import pandapipesNet
def to_pickle(net, filename):
"""
Saves a pandapipes Network with the pickle library.
:param net: The pandapipes Network to save.
:type net: pandapipesNet
:param filename: The absolute or relative path to the output file or a writable file-like object
:type filename: str, file-object
:return: No output.
:Example:
>>> pandapipes.to_pickle(net, os.path.join("C:", "example_folder", "example1.p")) # absolute path
>>> pandapipes.to_pickle(net, "example2.p") # relative path
"""
if hasattr(filename, 'write'):
pickle.dump(dict(net), filename, protocol=2)
return
if not filename.endswith(".p"):
raise Exception("Please use .p to save pandapipes networks!")
save_net = to_dict_with_coord_transform(net, ["junction_geodata"], ["pipe_geodata"])
with open(filename, "wb") as f:
pickle.dump(save_net, f, protocol=2) # use protocol 2 for py2 / py3 compatibility
def to_json(net, filename=None, encryption_key=None):
"""
Saves a pandapipes Network in JSON format. The index columns of all pandas DataFrames will be
saved in ascending order. net elements which name begins with "_" (internal elements) will not
be saved. Std types will also not be saved.
:param net: The pandapipes Network to save.
:type net: pandapipesNet
:param filename: The absolute or relative path to the output file or a writable file-like \
object. If None, a JSON string is returned.
:type filename: str, file-object, default None
:param encryption_key: If given, the pandapipes network is stored as an encrypted json string
:type encryption_key: str, default None
:return: JSON string of the Network (only if filename is None)
:Example:
>>> pandapipes.to_json(net, "example.json")
"""
json_string = json.dumps(net, cls=PPJSONEncoder, indent=2, isinstance_func=isinstance_partial)
if encryption_key is not None:
json_string = encrypt_string(json_string, encryption_key)
if filename is None:
return json_string
if hasattr(filename, 'write'):
filename.write(json_string)
else:
with open(filename, "w") as fp:
fp.write(json_string)
def from_pickle(filename):
"""
Load a pandapipes format Network from pickle file.
:param filename: The absolute or relative path to the input file or file-like object
:type filename: str, file-object
:return: net - The pandapipes Network which was saved as pickle
:rtype: pandapipesNet
:Example:
>>> net1 = pandapipes.from_pickle(os.path.join("C:", "example_folder", "example1.p"))
>>> net2 = pandapipes.from_pickle("example2.p") #relative path
"""
net = pandapipesNet(get_raw_data_from_pickle(filename))
transform_net_with_df_and_geo(net, ["junction_geodata"], ["pipe_geodata"])
return net
def from_json(filename, convert=True, encryption_key=None):
"""
Load a pandapipes network from a JSON file or string.
The index of the returned network is not necessarily in the same order as the original network.
Index columns of all pandas DataFrames are sorted in ascending order.
:param filename: The absolute or relative path to the input file or file-like object
:type filename: str, file-object
:param convert: whether or not to convert the format from earlier versions
:type convert: bool
:param encryption_key: if given, key to decrypt an encrypted pandapower network
:type encryption_key: str
:return: net - The pandapipes network that was saved as JSON
:rtype: pandapipesNet
:Example:
>>> net = pandapipes.from_json("example.json")
"""
if hasattr(filename, 'read'):
json_string = filename.read()
elif not os.path.isfile(filename):
raise UserWarning("File {} does not exist!!".format(filename))
else:
with open(filename) as fp:
json_string = fp.read()
return from_json_string(json_string, convert=convert, encryption_key=encryption_key)
def from_json_string(json_string, convert=False, encryption_key=None):
"""
Load a pandapipes network from a JSON string.
The index of the returned network is not necessarily in the same order as the original network.
Index columns of all pandas DataFrames are sorted in ascending order.
:param json_string: The JSON string representation of the network
:type json_string: str
:param convert: whether or not to convert the format from earlier versions
:type convert: bool
:param encryption_key: if given, key to decrypt an encrypted pandapower network
:type encryption_key: str
:return: net - The pandapipes network that was contained in the JSON string
:rtype: pandapipesNet
:Example:
>>> net = pandapipes.from_json_string(json_str)
"""
if encryption_key is not None:
json_string = decrypt_string(json_string, encryption_key)
net = json.loads(json_string, cls=PPJSONDecoder, object_hook=partial(pp_hook,
registry_class=FromSerializableRegistryPpipe))
if convert:
convert_format(net)
return net
|
<gh_stars>1-10
import unittest
from unittest import mock
import numpy as np
from smac.intensification.abstract_racer import RunInfoIntent
from smac.intensification.parallel_scheduling import ParallelScheduler
from smac.runhistory.runhistory import RunInfo, RunValue
from smac.tae import StatusType
def mock_ranker(sh):
return sh.stage, len(sh.run_tracker)
class TestParallelScheduler(unittest.TestCase):
def test_sort_instances_by_stage(self):
"""Ensures that we prioritize the more advanced stage iteration"""
scheduler = ParallelScheduler(
stats=None,
traj_logger=None,
instances=[1, 2, 3],
rng=np.random.RandomState(12345), deterministic=True,
)
scheduler._get_intensifier_ranking = mock_ranker
def add_sh_mock(stage, config_inst_pairs):
sh = mock.Mock()
sh.run_tracker = []
for i in range(config_inst_pairs):
sh.run_tracker.append((i, i, i))
sh.stage = stage
return sh
# Add more SH to make testing interesting
instances = {}
instances[0] = add_sh_mock(stage=1, config_inst_pairs=6)
instances[1] = add_sh_mock(stage=1, config_inst_pairs=2)
# We only have two configurations in the same stage.
# In this case, we want to prioritize the one with more launched runs
# that is zero
self.assertEqual(
list(scheduler._sort_instances_by_stage(instances)),
[0, 1]
)
# One more instance comparison to be supper safe
instances[2] = add_sh_mock(stage=1, config_inst_pairs=7)
self.assertEqual(
list(scheduler._sort_instances_by_stage(instances)),
[2, 0, 1]
)
# Not let us add a more advanced stage run
instances[3] = add_sh_mock(stage=2, config_inst_pairs=1)
self.assertEqual(
list(scheduler._sort_instances_by_stage(instances)),
[3, 2, 0, 1]
)
# Make 1 the oldest stage
instances[1] = add_sh_mock(stage=4, config_inst_pairs=1)
self.assertEqual(
list(scheduler._sort_instances_by_stage(instances)),
[1, 3, 2, 0]
)
# Add a new run that's empty
instances[4] = add_sh_mock(stage=0, config_inst_pairs=0)
self.assertEqual(
list(scheduler._sort_instances_by_stage(instances)),
[1, 3, 2, 0, 4]
)
# Make 4 stage 4 but with not as many instances as 1
instances[4] = add_sh_mock(stage=4, config_inst_pairs=0)
self.assertEqual(
list(scheduler._sort_instances_by_stage(instances)),
[1, 4, 3, 2, 0]
)
# And lastly 0 -> stage 4
instances[0] = add_sh_mock(stage=4, config_inst_pairs=0)
self.assertEqual(
list(scheduler._sort_instances_by_stage(instances)),
[1, 0, 4, 3, 2]
)
def test_process_results(self):
"""Ensures that the results are processed by the pertinent intensifer,
based on the source id"""
scheduler = ParallelScheduler(
stats=None,
traj_logger=None,
instances=[1, 2, 3],
rng=np.random.RandomState(12345), deterministic=True,
)
scheduler.intensifier_instances = {
0: mock.Mock(),
1: mock.Mock(),
2: mock.Mock(),
}
run_info = RunInfo(
config=None,
instance=0,
instance_specific="0",
cutoff=None,
seed=0,
capped=False,
budget=0.0,
source_id=2,
)
result = RunValue(
cost=1,
time=0.5,
status=StatusType.SUCCESS,
starttime=1,
endtime=2,
additional_info={}
)
scheduler.process_results(run_info=run_info, result=result, incumbent=None,
run_history=None, time_bound=None)
self.assertIsNone(scheduler.intensifier_instances[0].process_results.call_args)
self.assertIsNone(scheduler.intensifier_instances[1].process_results.call_args)
self.assertEqual(scheduler.intensifier_instances[2].process_results.call_args[1]['run_info'],
run_info)
def test_get_next_run_wait(self):
"""Makes sure we wait if all intensifiers are busy, and no new instance got added.
This test the case that number of workers are equal to number of instances
"""
scheduler = ParallelScheduler(
stats=None,
traj_logger=None,
instances=[1, 2, 3],
rng=np.random.RandomState(12345), deterministic=True,
)
scheduler._get_intensifier_ranking = mock_ranker
scheduler.intensifier_instances = {0: mock.Mock()}
scheduler.intensifier_instances[0].get_next_run.return_value = (RunInfoIntent.WAIT, None)
scheduler.intensifier_instances[0].stage = 0
scheduler.intensifier_instances[0].run_tracker = ()
with unittest.mock.patch(
'smac.intensification.parallel_scheduling.ParallelScheduler._add_new_instance'
) as add_new_instance:
add_new_instance.return_value = False
intent, run_info = scheduler.get_next_run(
challengers=None, incumbent=None, chooser=None,
run_history=None, repeat_configs=False,
num_workers=1
)
self.assertEqual(intent, RunInfoIntent.WAIT)
def test_get_next_run_add_instance(self):
"""Makes sure we add an instance only when all other instances are waiting,
This happens when n_workers greater than the number of instances
"""
with unittest.mock.patch(
'smac.intensification.parallel_scheduling.ParallelScheduler._add_new_instance'
) as add_new_instance:
scheduler = ParallelScheduler(
stats=None,
traj_logger=None,
instances=[1, 2, 3],
rng=np.random.RandomState(12345), deterministic=True,
)
def instance_added(args):
source_id = len(scheduler.intensifier_instances)
scheduler.intensifier_instances[source_id] = mock.Mock()
scheduler.intensifier_instances[source_id].get_next_run.return_value = (
RunInfoIntent.RUN,
None
)
return True
add_new_instance.side_effect = instance_added
scheduler._get_intensifier_ranking = mock_ranker
scheduler.intensifier_instances = {0: mock.Mock()}
scheduler.intensifier_instances[0].get_next_run.return_value = (RunInfoIntent.WAIT, None)
scheduler.intensifier_instances[0].stage = 0
scheduler.intensifier_instances[0].run_tracker = ()
intent, run_info = scheduler.get_next_run(
challengers=None, incumbent=None, chooser=None,
run_history=None, repeat_configs=False,
num_workers=1
)
self.assertEqual(len(scheduler.intensifier_instances), 2)
self.assertEqual(intent, RunInfoIntent.RUN)
if __name__ == "__main__":
unittest.main()
|
wrong_input = [
"IL",
"IC",
"ID",
"IM",
"VV",
"VX",
"VL",
"VC",
"VD",
"VM",
"XD",
"XM",
"LL",
"LC",
"LD",
"LM",
"DD",
"DM",
"IIV",
"IIX",
"IIL",
"IIC",
"IID",
"IIM",
"IVI",
"IVV",
"IVX",
"IVL",
"IVC",
"IVD",
"IVM",
"IXI",
"IXV",
"IXX",
"IXL",
"IXC",
"IXD",
"IXM",
"ILI",
"ILV",
"ILX",
"ILL",
"ILC",
"ILD",
"ILM",
"ICI",
"ICV",
"ICX",
"ICL",
"ICC",
"ICD",
"ICM",
"IDI",
"IDV",
"IDX",
"IDL",
"IDC",
"IDD",
"IDM",
"IMI",
"IMV",
"IMX",
"IML",
"IMC",
"IMD",
"IMM",
"VIV",
"VIX",
"VIL",
"VIC",
"VID",
"VIM",
"VVI",
"VVV",
"VVX",
"VVL",
"VVC",
"VVD",
"VVM",
"VXI",
"VXV",
"VXX",
"VXL",
"VXC",
"VXD",
"VXM",
"VLI",
"VLV",
"VLX",
"VLL",
"VLC",
"VLD",
"VLM",
"VCI",
"VCV",
"VCX",
"VCL",
"VCC",
"VCD",
"VCM",
"VDI",
"VDV",
"VDX",
"VDL",
"VDC",
"VDD",
"VDM",
"VMI",
"VMV",
"VMX",
"VML",
"VMC",
"VMD",
"VMM",
"XIL",
"XIC",
"XID",
"XIM",
"XVV",
"XVX",
"XVL",
"XVC",
"XVD",
"XVM",
"XXL",
"XXC",
"XXD",
"XXM",
"XLX",
"XLL",
"XLC",
"XLD",
"XLM",
"XCX",
"XCL",
"XCC",
"XCD",
"XCM",
"XDI",
"XDV",
"XDX",
"XDL",
"XDC",
"XDD",
"XDM",
"XMI",
"XMV",
"XMX",
"XML",
"XMC",
"XMD",
"XMM",
"LIL",
"LIC",
"LID",
"LIM",
"LVV",
"LVX",
"LVL",
"LVC",
"LVD",
"LVM",
"LXL",
"LXC",
"LXD",
"LXM",
"LLI",
"LLV",
"LLX",
"LLL",
"LLC",
"LLD",
"LLM",
"LCI",
"LCV",
"LCX",
"LCL",
"LCC",
"LCD",
"LCM",
"LDI",
"LDV",
"LDX",
"LDL",
"LDC",
"LDD",
"LDM",
"LMI",
"LMV",
"LMX",
"LML",
"LMC",
"LMD",
"LMM",
"CIL",
"CIC",
"CID",
"CIM",
"CVV",
"CVX",
"CVL",
"CVC",
"CVD",
"CVM",
"CXD",
"CXM",
"CLL",
"CLC",
"CLD",
"CLM",
"CCD",
"CCM",
"CDC",
"CDD",
"CDM",
"CMC",
"CMD",
"CMM",
"DIL",
"DIC",
"DID",
"DIM",
"DVV",
"DVX",
"DVL",
"DVC",
"DVD",
"DVM",
"DXD",
"DXM",
"DLL",
"DLC",
"DLD",
"DLM",
"DCD",
"DCM",
"DDI",
"DDV",
"DDX",
"DDL",
"DDC",
"DDD",
"DDM",
"DMI",
"DMV",
"DMX",
"DML",
"DMC",
"DMD",
"DMM",
"MIL",
"MIC",
"MID",
"MIM",
"MVV",
"MVX",
"MVL",
"MVC",
"MVD",
"MVM",
"MXD",
"MXM",
"MLL",
"MLC",
"MLD",
"MLM",
"MDD",
"MDM",
"IIII",
"IIIV",
"IIIX",
"IIIL",
"IIIC",
"IIID",
"IIIM",
"IIVI",
"IIVV",
"IIVX",
"IIVL",
"IIVC",
"IIVD",
"IIVM",
"IIXI",
"IIXV",
"IIXX",
"IIXL",
"IIXC",
"IIXD",
"IIXM",
"IILI",
"IILV",
"IILX",
"IILL",
"IILC",
"IILD",
"IILM",
"IICI",
"IICV",
"IICX",
"IICL",
"IICC",
"IICD",
"IICM",
"IIDI",
"IIDV",
"IIDX",
"IIDL",
"IIDC",
"IIDD",
"IIDM",
"IIMI",
"IIMV",
"IIMX",
"IIML",
"IIMC",
"IIMD",
"IIMM",
"IVII",
"IVIV",
"IVIX",
"IVIL",
"IVIC",
"IVID",
"IVIM",
"IVVI",
"IVVV",
"IVVX",
"IVVL",
"IVVC",
"IVVD",
"IVVM",
"IVXI",
"IVXV",
"IVXX",
"IVXL",
"IVXC",
"IVXD",
"IVXM",
"IVLI",
"IVLV",
"IVLX",
"IVLL",
"IVLC",
"IVLD",
"IVLM",
"IVCI",
"IVCV",
"IVCX",
"IVCL",
"IVCC",
"IVCD",
"IVCM",
"IVDI",
"IVDV",
"IVDX",
"IVDL",
"IVDC",
"IVDD",
"IVDM",
"IVMI",
"IVMV",
"IVMX",
"IVML",
"IVMC",
"IVMD",
"IVMM",
"IXII",
"IXIV",
"IXIX",
"IXIL",
"IXIC",
"IXID",
"IXIM",
"IXVI",
"IXVV",
"IXVX",
"IXVL",
"IXVC",
"IXVD",
"IXVM",
"IXXI",
"IXXV",
"IXXX",
"IXXL",
"IXXC",
"IXXD",
"IXXM",
"IXLI",
"IXLV",
"IXLX",
"IXLL",
"IXLC",
"IXLD",
"IXLM",
"IXCI",
"IXCV",
"IXCX",
"IXCL",
"IXCC",
"IXCD",
"IXCM",
"IXDI",
"IXDV",
"IXDX",
"IXDL",
"IXDC",
"IXDD",
"IXDM",
"IXMI",
"IXMV",
"IXMX",
"IXML",
"IXMC",
"IXMD",
"IXMM",
"ILII",
"ILIV",
"ILIX",
"ILIL",
"ILIC",
"ILID",
"ILIM",
"ILVI",
"ILVV",
"ILVX",
"ILVL",
"ILVC",
"ILVD",
"ILVM",
"ILXI",
"ILXV",
"ILXX",
"ILXL",
"ILXC",
"ILXD",
"ILXM",
"ILLI",
"ILLV",
"ILLX",
"ILLL",
"ILLC",
"ILLD",
"ILLM",
"ILCI",
"ILCV",
"ILCX",
"ILCL",
"ILCC",
"ILCD",
"ILCM",
"ILDI",
"ILDV",
"ILDX",
"ILDL",
"ILDC",
"ILDD",
"ILDM",
"ILMI",
"ILMV",
"ILMX",
"ILML",
"ILMC",
"ILMD",
"ILMM",
"ICII",
"ICIV",
"ICIX",
"ICIL",
"ICIC",
"ICID",
"ICIM",
"ICVI",
"ICVV",
"ICVX",
"ICVL",
"ICVC",
"ICVD",
"ICVM",
"ICXI",
"ICXV",
"ICXX",
"ICXL",
"ICXC",
"ICXD",
"ICXM",
"ICLI",
"ICLV",
"ICLX",
"ICLL",
"ICLC",
"ICLD",
"ICLM",
"ICCI",
"ICCV",
"ICCX",
"ICCL",
"ICCC",
"ICCD",
"ICCM",
"ICDI",
"ICDV",
"ICDX",
"ICDL",
"ICDC",
"ICDD",
"ICDM",
"ICMI",
"ICMV",
"ICMX",
"ICML",
"ICMC",
"ICMD",
"ICMM",
"IDII",
"IDIV",
"IDIX",
"IDIL",
"IDIC",
"IDID",
"IDIM",
"IDVI",
"IDVV",
"IDVX",
"IDVL",
"IDVC",
"IDVD",
"IDVM",
"IDXI",
"IDXV",
"IDXX",
"IDXL",
"IDXC",
"IDXD",
"IDXM",
"IDLI",
"IDLV",
"IDLX",
"IDLL",
"IDLC",
"IDLD",
"IDLM",
"IDCI",
"IDCV",
"IDCX",
"IDCL",
"IDCC",
"IDCD",
"IDCM",
"IDDI",
"IDDV",
"IDDX",
"IDDL",
"IDDC",
"IDDD",
"IDDM",
"IDMI",
"IDMV",
"IDMX",
"IDML",
"IDMC",
"IDMD",
"IDMM",
"IMII",
"IMIV",
"IMIX",
"IMIL",
"IMIC",
"IMID",
"IMIM",
"IMVI",
"IMVV",
"IMVX",
"IMVL",
"IMVC",
"IMVD",
"IMVM",
"IMXI",
"IMXV",
"IMXX",
"IMXL",
"IMXC",
"IMXD",
"IMXM",
"IMLI",
"IMLV",
"IMLX",
"IMLL",
"IMLC",
"IMLD",
"IMLM",
"IMCI",
"IMCV",
"IMCX",
"IMCL",
"IMCC",
"IMCD",
"IMCM",
"IMDI",
"IMDV",
"IMDX",
"IMDL",
"IMDC",
"IMDD",
"IMDM",
"IMMI",
"IMMV",
"IMMX",
"IMML",
"IMMC",
"IMMD",
"IMMM",
"VIIV",
"VIIX",
"VIIL",
"VIIC",
"VIID",
"VIIM",
"VIVI",
"VIVV",
"VIVX",
"VIVL",
"VIVC",
"VIVD",
"VIVM",
"VIXI",
"VIXV",
"VIXX",
"VIXL",
"VIXC",
"VIXD",
"VIXM",
"VILI",
"VILV",
"VILX",
"VILL",
"VILC",
"VILD",
"VILM",
"VICI",
"VICV",
"VICX",
"VICL",
"VICC",
"VICD",
"VICM",
"VIDI",
"VIDV",
"VIDX",
"VIDL",
"VIDC",
"VIDD",
"VIDM",
"VIMI",
"VIMV",
"VIMX",
"VIML",
"VIMC",
"VIMD",
"VIMM",
"VVII",
"VVIV",
"VVIX",
"VVIL",
"VVIC",
"VVID",
"VVIM",
"VVVI",
"VVVV",
"VVVX",
"VVVL",
"VVVC",
"VVVD",
"VVVM",
"VVXI",
"VVXV",
"VVXX",
"VVXL",
"VVXC",
"VVXD",
"VVXM",
"VVLI",
"VVLV",
"VVLX",
"VVLL",
"VVLC",
"VVLD",
"VVLM",
"VVCI",
"VVCV",
"VVCX",
"VVCL",
"VVCC",
"VVCD",
"VVCM",
"VVDI",
"VVDV",
"VVDX",
"VVDL",
"VVDC",
"VVDD",
"VVDM",
"VVMI",
"VVMV",
"VVMX",
"VVML",
"VVMC",
"VVMD",
"VVMM",
"VXII",
"VXIV",
"VXIX",
"VXIL",
"VXIC",
"VXID",
"VXIM",
"VXVI",
"VXVV",
"VXVX",
"VXVL",
"VXVC",
"VXVD",
"VXVM",
"VXXI",
"VXXV",
"VXXX",
"VXXL",
"VXXC",
"VXXD",
"VXXM",
"VXLI",
"VXLV",
"VXLX",
"VXLL",
"VXLC",
"VXLD",
"VXLM",
"VXCI",
"VXCV",
"VXCX",
"VXCL",
"VXCC",
"VXCD",
"VXCM",
"VXDI",
"VXDV",
"VXDX",
"VXDL",
"VXDC",
"VXDD",
"VXDM",
"VXMI",
"VXMV",
"VXMX",
"VXML",
"VXMC",
"VXMD",
"VXMM",
"VLII",
"VLIV",
"VLIX",
"VLIL",
"VLIC",
"VLID",
"VLIM",
"VLVI",
"VLVV",
"VLVX",
"VLVL",
"VLVC",
"VLVD",
"VLVM",
"VLXI",
"VLXV",
"VLXX",
"VLXL",
"VLXC",
"VLXD",
"VLXM",
"VLLI",
"VLLV",
"VLLX",
"VLLL",
"VLLC",
"VLLD",
"VLLM",
"VLCI",
"VLCV",
"VLCX",
"VLCL",
"VLCC",
"VLCD",
"VLCM",
"VLDI",
"VLDV",
"VLDX",
"VLDL",
"VLDC",
"VLDD",
"VLDM",
"VLMI",
"VLMV",
"VLMX",
"VLML",
"VLMC",
"VLMD",
"VLMM",
"VCII",
"VCIV",
"VCIX",
"VCIL",
"VCIC",
"VCID",
"VCIM",
"VCVI",
"VCVV",
"VCVX",
"VCVL",
"VCVC",
"VCVD",
"VCVM",
"VCXI",
"VCXV",
"VCXX",
"VCXL",
"VCXC",
"VCXD",
"VCXM",
"VCLI",
"VCLV",
"VCLX",
"VCLL",
"VCLC",
"VCLD",
"VCLM",
"VCCI",
"VCCV",
"VCCX",
"VCCL",
"VCCC",
"VCCD",
"VCCM",
"VCDI",
"VCDV",
"VCDX",
"VCDL",
"VCDC",
"VCDD",
"VCDM",
"VCMI",
"VCMV",
"VCMX",
"VCML",
"VCMC",
"VCMD",
"VCMM",
"VDII",
"VDIV",
"VDIX",
"VDIL",
"VDIC",
"VDID",
"VDIM",
"VDVI",
"VDVV",
"VDVX",
"VDVL",
"VDVC",
"VDVD",
"VDVM",
"VDXI",
"VDXV",
"VDXX",
"VDXL",
"VDXC",
"VDXD",
"VDXM",
"VDLI",
"VDLV",
"VDLX",
"VDLL",
"VDLC",
"VDLD",
"VDLM",
"VDCI",
"VDCV",
"VDCX",
"VDCL",
"VDCC",
"VDCD",
"VDCM",
"VDDI",
"VDDV",
"VDDX",
"VDDL",
"VDDC",
"VDDD",
"VDDM",
"VDMI",
"VDMV",
"VDMX",
"VDML",
"VDMC",
"VDMD",
"VDMM",
"VMII",
"VMIV",
"VMIX",
"VMIL",
"VMIC",
"VMID",
"VMIM",
"VMVI",
"VMVV",
"VMVX",
"VMVL",
"VMVC",
"VMVD",
"VMVM",
"VMXI",
"VMXV",
"VMXX",
"VMXL",
"VMXC",
"VMXD",
"VMXM",
"VMLI",
"VMLV",
"VMLX",
"VMLL",
"VMLC",
"VMLD",
"VMLM",
"VMCI",
"VMCV",
"VMCX",
"VMCL",
"VMCC",
"VMCD",
"VMCM",
"VMDI",
"VMDV",
"VMDX",
"VMDL",
"VMDC",
"VMDD",
"VMDM",
"VMMI",
"VMMV",
"VMMX",
"VMML",
"VMMC",
"VMMD",
"VMMM",
"XIIV",
"XIIX",
"XIIL",
"XIIC",
"XIID",
"XIIM",
"XIVI",
"XIVV",
"XIVX",
"XIVL",
"XIVC",
"XIVD",
"XIVM",
"XIXI",
"XIXV",
"XIXX",
"XIXL",
"XIXC",
"XIXD",
"XIXM",
"XILI",
"XILV",
"XILX",
"XILL",
"XILC",
"XILD",
"XILM",
"XICI",
"XICV",
"XICX",
"XICL",
"XICC",
"XICD",
"XICM",
"XIDI",
"XIDV",
"XIDX",
"XIDL",
"XIDC",
"XIDD",
"XIDM",
"XIMI",
"XIMV",
"XIMX",
"XIML",
"XIMC",
"XIMD",
"XIMM",
"XVIV",
"XVIX",
"XVIL",
"XVIC",
"XVID",
"XVIM",
"XVVI",
"XVVV",
"XVVX",
"XVVL",
"XVVC",
"XVVD",
"XVVM",
"XVXI",
"XVXV",
"XVXX",
"XVXL",
"XVXC",
"XVXD",
"XVXM",
"XVLI",
"XVLV",
"XVLX",
"XVLL",
"XVLC",
"XVLD",
"XVLM",
"XVCI",
"XVCV",
"XVCX",
"XVCL",
"XVCC",
"XVCD",
"XVCM",
"XVDI",
"XVDV",
"XVDX",
"XVDL",
"XVDC",
"XVDD",
"XVDM",
"XVMI",
"XVMV",
"XVMX",
"XVML",
"XVMC",
"XVMD",
"XVMM",
"XXIL",
"XXIC",
"XXID",
"XXIM",
"XXVV",
"XXVX",
"XXVL",
"XXVC",
"XXVD",
"XXVM",
"XXXX",
"XXXL",
"XXXC",
"XXXD",
"XXXM",
"XXLI",
"XXLV",
"XXLX",
"XXLL",
"XXLC",
"XXLD",
"XXLM",
"XXCI",
"XXCV",
"XXCX",
"XXCL",
"XXCC",
"XXCD",
"XXCM",
"XXDI",
"XXDV",
"XXDX",
"XXDL",
"XXDC",
"XXDD",
"XXDM",
"XXMI",
"XXMV",
"XXMX",
"XXML",
"XXMC",
"XXMD",
"XXMM",
"XLIL",
"XLIC",
"XLID",
"XLIM",
"XLVV",
"XLVX",
"XLVL",
"XLVC",
"XLVD",
"XLVM",
"XLXI",
"XLXV",
"XLXX",
"XLXL",
"XLXC",
"XLXD",
"XLXM",
"XLLI",
"XLLV",
"XLLX",
"XLLL",
"XLLC",
"XLLD",
"XLLM",
"XLCI",
"XLCV",
"XLCX",
"XLCL",
"XLCC",
"XLCD",
"XLCM",
"XLDI",
"XLDV",
"XLDX",
"XLDL",
"XLDC",
"XLDD",
"XLDM",
"XLMI",
"XLMV",
"XLMX",
"XLML",
"XLMC",
"XLMD",
"XLMM",
"XCIL",
"XCIC",
"XCID",
"XCIM",
"XCVV",
"XCVX",
"XCVL",
"XCVC",
"XCVD",
"XCVM",
"XCXI",
"XCXV",
"XCXX",
"XCXL",
"XCXC",
"XCXD",
"XCXM",
"XCLI",
"XCLV",
"XCLX",
"XCLL",
"XCLC",
"XCLD",
"XCLM",
"XCCI",
"XCCV",
"XCCX",
"XCCL",
"XCCC",
"XCCD",
"XCCM",
"XCDI",
"XCDV",
"XCDX",
"XCDL",
"XCDC",
"XCDD",
"XCDM",
"XCMI",
"XCMV",
"XCMX",
"XCML",
"XCMC",
"XCMD",
"XCMM",
"XDII",
"XDIV",
"XDIX",
"XDIL",
"XDIC",
"XDID",
"XDIM",
"XDVI",
"XDVV",
"XDVX",
"XDVL",
"XDVC",
"XDVD",
"XDVM",
"XDXI",
"XDXV",
"XDXX",
"XDXL",
"XDXC",
"XDXD",
"XDXM",
"XDLI",
"XDLV",
"XDLX",
"XDLL",
"XDLC",
"XDLD",
"XDLM",
"XDCI",
"XDCV",
"XDCX",
"XDCL",
"XDCC",
"XDCD",
"XDCM",
"XDDI",
"XDDV",
"XDDX",
"XDDL",
"XDDC",
"XDDD",
"XDDM",
"XDMI",
"XDMV",
"XDMX",
"XDML",
"XDMC",
"XDMD",
"XDMM",
"XMII",
"XMIV",
"XMIX",
"XMIL",
"XMIC",
"XMID",
"XMIM",
"XMVI",
"XMVV",
"XMVX",
"XMVL",
"XMVC",
"XMVD",
"XMVM",
"XMXI",
"XMXV",
"XMXX",
"XMXL",
"XMXC",
"XMXD",
"XMXM",
"XMLI",
"XMLV",
"XMLX",
"XMLL",
"XMLC",
"XMLD",
"XMLM",
"XMCI",
"XMCV",
"XMCX",
"XMCL",
"XMCC",
"XMCD",
"XMCM",
"XMDI",
"XMDV",
"XMDX",
"XMDL",
"XMDC",
"XMDD",
"XMDM",
"XMMI",
"XMMV",
"XMMX",
"XMML",
"XMMC",
"XMMD",
"XMMM",
"LIIV",
"LIIX",
"LIIL",
"LIIC",
"LIID",
"LIIM",
"LIVI",
"LIVV",
"LIVX",
"LIVL",
"LIVC",
"LIVD",
"LIVM",
"LIXI",
"LIXV",
"LIXX",
"LIXL",
"LIXC",
"LIXD",
"LIXM",
"LILI",
"LILV",
"LILX",
"LILL",
"LILC",
"LILD",
"LILM",
"LICI",
"LICV",
"LICX",
"LICL",
"LICC",
"LICD",
"LICM",
"LIDI",
"LIDV",
"LIDX",
"LIDL",
"LIDC",
"LIDD",
"LIDM",
"LIMI",
"LIMV",
"LIMX",
"LIML",
"LIMC",
"LIMD",
"LIMM",
"LVIV",
"LVIX",
"LVIL",
"LVIC",
"LVID",
"LVIM",
"LVVI",
"LVVV",
"LVVX",
"LVVL",
"LVVC",
"LVVD",
"LVVM",
"LVXI",
"LVXV",
"LVXX",
"LVXL",
"LVXC",
"LVXD",
"LVXM",
"LVLI",
"LVLV",
"LVLX",
"LVLL",
"LVLC",
"LVLD",
"LVLM",
"LVCI",
"LVCV",
"LVCX",
"LVCL",
"LVCC",
"LVCD",
"LVCM",
"LVDI",
"LVDV",
"LVDX",
"LVDL",
"LVDC",
"LVDD",
"LVDM",
"LVMI",
"LVMV",
"LVMX",
"LVML",
"LVMC",
"LVMD",
"LVMM",
"LXIL",
"LXIC",
"LXID",
"LXIM",
"LXVV",
"LXVX",
"LXVL",
"LXVC",
"LXVD",
"LXVM",
"LXXL",
"LXXC",
"LXXD",
"LXXM",
"LXLI",
"LXLV",
"LXLX",
"LXLL",
"LXLC",
"LXLD",
"LXLM",
"LXCI",
"LXCV",
"LXCX",
"LXCL",
"LXCC",
"LXCD",
"LXCM",
"LXDI",
"LXDV",
"LXDX",
"LXDL",
"LXDC",
"LXDD",
"LXDM",
"LXMI",
"LXMV",
"LXMX",
"LXML",
"LXMC",
"LXMD",
"LXMM",
"LLII",
"LLIV",
"LLIX",
"LLIL",
"LLIC",
"LLID",
"LLIM",
"LLVI",
"LLVV",
"LLVX",
"LLVL",
"LLVC",
"LLVD",
"LLVM",
"LLXI",
"LLXV",
"LLXX",
"LLXL",
"LLXC",
"LLXD",
"LLXM",
"LLLI",
"LLLV",
"LLLX",
"LLLL",
"LLLC",
"LLLD",
"LLLM",
"LLCI",
"LLCV",
"LLCX",
"LLCL",
"LLCC",
"LLCD",
"LLCM",
"LLDI",
"LLDV",
"LLDX",
"LLDL",
"LLDC",
"LLDD",
"LLDM",
"LLMI",
"LLMV",
"LLMX",
"LLML",
"LLMC",
"LLMD",
"LLMM",
"LCII",
"LCIV",
"LCIX",
"LCIL",
"LCIC",
"LCID",
"LCIM",
"LCVI",
"LCVV",
"LCVX",
"LCVL",
"LCVC",
"LCVD",
"LCVM",
"LCXI",
"LCXV",
"LCXX",
"LCXL",
"LCXC",
"LCXD",
"LCXM",
"LCLI",
"LCLV",
"LCLX",
"LCLL",
"LCLC",
"LCLD",
"LCLM",
"LCCI",
"LCCV",
"LCCX",
"LCCL",
"LCCC",
"LCCD",
"LCCM",
"LCDI",
"LCDV",
"LCDX",
"LCDL",
"LCDC",
"LCDD",
"LCDM",
"LCMI",
"LCMV",
"LCMX",
"LCML",
"LCMC",
"LCMD",
"LCMM",
"LDII",
"LDIV",
"LDIX",
"LDIL",
"LDIC",
"LDID",
"LDIM",
"LDVI",
"LDVV",
"LDVX",
"LDVL",
"LDVC",
"LDVD",
"LDVM",
"LDXI",
"LDXV",
"LDXX",
"LDXL",
"LDXC",
"LDXD",
"LDXM",
"LDLI",
"LDLV",
"LDLX",
"LDLL",
"LDLC",
"LDLD",
"LDLM",
"LDCI",
"LDCV",
"LDCX",
"LDCL",
"LDCC",
"LDCD",
"LDCM",
"LDDI",
"LDDV",
"LDDX",
"LDDL",
"LDDC",
"LDDD",
"LDDM",
"LDMI",
"LDMV",
"LDMX",
"LDML",
"LDMC",
"LDMD",
"LDMM",
"LMII",
"LMIV",
"LMIX",
"LMIL",
"LMIC",
"LMID",
"LMIM",
"LMVI",
"LMVV",
"LMVX",
"LMVL",
"LMVC",
"LMVD",
"LMVM",
"LMXI",
"LMXV",
"LMXX",
"LMXL",
"LMXC",
"LMXD",
"LMXM",
"LMLI",
"LMLV",
"LMLX",
"LMLL",
"LMLC",
"LMLD",
"LMLM",
"LMCI",
"LMCV",
"LMCX",
"LMCL",
"LMCC",
"LMCD",
"LMCM",
"LMDI",
"LMDV",
"LMDX",
"LMDL",
"LMDC",
"LMDD",
"LMDM",
"LMMI",
"LMMV",
"LMMX",
"LMML",
"LMMC",
"LMMD",
"LMMM",
"CIIV",
"CIIX",
"CIIL",
"CIIC",
"CIID",
"CIIM",
"CIVI",
"CIVV",
"CIVX",
"CIVL",
"CIVC",
"CIVD",
"CIVM",
"CIXI",
"CIXV",
"CIXX",
"CIXL",
"CIXC",
"CIXD",
"CIXM",
"CILI",
"CILV",
"CILX",
"CILL",
"CILC",
"CILD",
"CILM",
"CICI",
"CICV",
"CICX",
"CICL",
"CICC",
"CICD",
"CICM",
"CIDI",
"CIDV",
"CIDX",
"CIDL",
"CIDC",
"CIDD",
"CIDM",
"CIMI",
"CIMV",
"CIMX",
"CIML",
"CIMC",
"CIMD",
"CIMM",
"CVIV",
"CVIX",
"CVIL",
"CVIC",
"CVID",
"CVIM",
"CVVI",
"CVVV",
"CVVX",
"CVVL",
"CVVC",
"CVVD",
"CVVM",
"CVXI",
"CVXV",
"CVXX",
"CVXL",
"CVXC",
"CVXD",
"CVXM",
"CVLI",
"CVLV",
"CVLX",
"CVLL",
"CVLC",
"CVLD",
"CVLM",
"CVCI",
"CVCV",
"CVCX",
"CVCL",
"CVCC",
"CVCD",
"CVCM",
"CVDI",
"CVDV",
"CVDX",
"CVDL",
"CVDC",
"CVDD",
"CVDM",
"CVMI",
"CVMV",
"CVMX",
"CVML",
"CVMC",
"CVMD",
"CVMM",
"CXIL",
"CXIC",
"CXID",
"CXIM",
"CXVV",
"CXVX",
"CXVL",
"CXVC",
"CXVD",
"CXVM",
"CXXL",
"CXXC",
"CXXD",
"CXXM",
"CXLX",
"CXLL",
"CXLC",
"CXLD",
"CXLM",
"CXCX",
"CXCL",
"CXCC",
"CXCD",
"CXCM",
"CXDI",
"CXDV",
"CXDX",
"CXDL",
"CXDC",
"CXDD",
"CXDM",
"CXMI",
"CXMV",
"CXMX",
"CXML",
"CXMC",
"CXMD",
"CXMM",
"CLIL",
"CLIC",
"CLID",
"CLIM",
"CLVV",
"CLVX",
"CLVL",
"CLVC",
"CLVD",
"CLVM",
"CLXL",
"CLXC",
"CLXD",
"CLXM",
"CLLI",
"CLLV",
"CLLX",
"CLLL",
"CLLC",
"CLLD",
"CLLM",
"CLCI",
"CLCV",
"CLCX",
"CLCL",
"CLCC",
"CLCD",
"CLCM",
"CLDI",
"CLDV",
"CLDX",
"CLDL",
"CLDC",
"CLDD",
"CLDM",
"CLMI",
"CLMV",
"CLMX",
"CLML",
"CLMC",
"CLMD",
"CLMM",
"CCIL",
"CCIC",
"CCID",
"CCIM",
"CCVV",
"CCVX",
"CCVL",
"CCVC",
"CCVD",
"CCVM",
"CCXD",
"CCXM",
"CCLL",
"CCLC",
"CCLD",
"CCLM",
"CCCC",
"CCCD",
"CCCM",
"CCDI",
"CCDV",
"CCDX",
"CCDL",
"CCDC",
"CCDD",
"CCDM",
"CCMI",
"CCMV",
"CCMX",
"CCML",
"CCMC",
"CCMD",
"CCMM",
"CDIL",
"CDIC",
"CDID",
"CDIM",
"CDVV",
"CDVX",
"CDVL",
"CDVC",
"CDVD",
"CDVM",
"CDXD",
"CDXM",
"CDLL",
"CDLC",
"CDLD",
"CDLM",
"CDCI",
"CDCV",
"CDCX",
"CDCL",
"CDCC",
"CDCD",
"CDCM",
"CDDI",
"CDDV",
"CDDX",
"CDDL",
"CDDC",
"CDDD",
"CDDM",
"CDMI",
"CDMV",
"CDMX",
"CDML",
"CDMC",
"CDMD",
"CDMM",
"CMIL",
"CMIC",
"CMID",
"CMIM",
"CMVV",
"CMVX",
"CMVL",
"CMVC",
"CMVD",
"CMVM",
"CMXD",
"CMXM",
"CMLL",
"CMLC",
"CMLD",
"CMLM",
"CMCI",
"CMCV",
"CMCX",
"CMCL",
"CMCC",
"CMCD",
"CMCM",
"CMDI",
"CMDV",
"CMDX",
"CMDL",
"CMDC",
"CMDD",
"CMDM",
"CMMI",
"CMMV",
"CMMX",
"CMML",
"CMMC",
"CMMD",
"CMMM",
"DIIV",
"DIIX",
"DIIL",
"DIIC",
"DIID",
"DIIM",
"DIVI",
"DIVV",
"DIVX",
"DIVL",
"DIVC",
"DIVD",
"DIVM",
"DIXI",
"DIXV",
"DIXX",
"DIXL",
"DIXC",
"DIXD",
"DIXM",
"DILI",
"DILV",
"DILX",
"DILL",
"DILC",
"DILD",
"DILM",
"DICI",
"DICV",
"DICX",
"DICL",
"DICC",
"DICD",
"DICM",
"DIDI",
"DIDV",
"DIDX",
"DIDL",
"DIDC",
"DIDD",
"DIDM",
"DIMI",
"DIMV",
"DIMX",
"DIML",
"DIMC",
"DIMD",
"DIMM",
"DVIV",
"DVIX",
"DVIL",
"DVIC",
"DVID",
"DVIM",
"DVVI",
"DVVV",
"DVVX",
"DVVL",
"DVVC",
"DVVD",
"DVVM",
"DVXI",
"DVXV",
"DVXX",
"DVXL",
"DVXC",
"DVXD",
"DVXM",
"DVLI",
"DVLV",
"DVLX",
"DVLL",
"DVLC",
"DVLD",
"DVLM",
"DVCI",
"DVCV",
"DVCX",
"DVCL",
"DVCC",
"DVCD",
"DVCM",
"DVDI",
"DVDV",
"DVDX",
"DVDL",
"DVDC",
"DVDD",
"DVDM",
"DVMI",
"DVMV",
"DVMX",
"DVML",
"DVMC",
"DVMD",
"DVMM",
"DXIL",
"DXIC",
"DXID",
"DXIM",
"DXVV",
"DXVX",
"DXVL",
"DXVC",
"DXVD",
"DXVM",
"DXXL",
"DXXC",
"DXXD",
"DXXM",
"DXLX",
"DXLL",
"DXLC",
"DXLD",
"DXLM",
"DXCX",
"DXCL",
"DXCC",
"DXCD",
"DXCM",
"DXDI",
"DXDV",
"DXDX",
"DXDL",
"DXDC",
"DXDD",
"DXDM",
"DXMI",
"DXMV",
"DXMX",
"DXML",
"DXMC",
"DXMD",
"DXMM",
"DLIL",
"DLIC",
"DLID",
"DLIM",
"DLVV",
"DLVX",
"DLVL",
"DLVC",
"DLVD",
"DLVM",
"DLXL",
"DLXC",
"DLXD",
"DLXM",
"DLLI",
"DLLV",
"DLLX",
"DLLL",
"DLLC",
"DLLD",
"DLLM",
"DLCI",
"DLCV",
"DLCX",
"DLCL",
"DLCC",
"DLCD",
"DLCM",
"DLDI",
"DLDV",
"DLDX",
"DLDL",
"DLDC",
"DLDD",
"DLDM",
"DLMI",
"DLMV",
"DLMX",
"DLML",
"DLMC",
"DLMD",
"DLMM",
"DCIL",
"DCIC",
"DCID",
"DCIM",
"DCVV",
"DCVX",
"DCVL",
"DCVC",
"DCVD",
"DCVM",
"DCXD",
"DCXM",
"DCLL",
"DCLC",
"DCLD",
"DCLM",
"DCCD",
"DCCM",
"DCDI",
"DCDV",
"DCDX",
"DCDL",
"DCDC",
"DCDD",
"DCDM",
"DCMI",
"DCMV",
"DCMX",
"DCML",
"DCMC",
"DCMD",
"DCMM",
"DDII",
"DDIV",
"DDIX",
"DDIL",
"DDIC",
"DDID",
"DDIM",
"DDVI",
"DDVV",
"DDVX",
"DDVL",
"DDVC",
"DDVD",
"DDVM",
"DDXI",
"DDXV",
"DDXX",
"DDXL",
"DDXC",
"DDXD",
"DDXM",
"DDLI",
"DDLV",
"DDLX",
"DDLL",
"DDLC",
"DDLD",
"DDLM",
"DDCI",
"DDCV",
"DDCX",
"DDCL",
"DDCC",
"DDCD",
"DDCM",
"DDDI",
"DDDV",
"DDDX",
"DDDL",
"DDDC",
"DDDD",
"DDDM",
"DDMI",
"DDMV",
"DDMX",
"DDML",
"DDMC",
"DDMD",
"DDMM",
"DMII",
"DMIV",
"DMIX",
"DMIL",
"DMIC",
"DMID",
"DMIM",
"DMVI",
"DMVV",
"DMVX",
"DMVL",
"DMVC",
"DMVD",
"DMVM",
"DMXI",
"DMXV",
"DMXX",
"DMXL",
"DMXC",
"DMXD",
"DMXM",
"DMLI",
"DMLV",
"DMLX",
"DMLL",
"DMLC",
"DMLD",
"DMLM",
"DMCI",
"DMCV",
"DMCX",
"DMCL",
"DMCC",
"DMCD",
"DMCM",
"DMDI",
"DMDV",
"DMDX",
"DMDL",
"DMDC",
"DMDD",
"DMDM",
"DMMI",
"DMMV",
"DMMX",
"DMML",
"DMMC",
"DMMD",
"DMMM",
"MIIV",
"MIIX",
"MIIL",
"MIIC",
"MIID",
"MIIM",
"MIVI",
"MIVV",
"MIVX",
"MIVL",
"MIVC",
"MIVD",
"MIVM",
"MIXI",
"MIXV",
"MIXX",
"MIXL",
"MIXC",
"MIXD",
"MIXM",
"MILI",
"MILV",
"MILX",
"MILL",
"MILC",
"MILD",
"MILM",
"MICI",
"MICV",
"MICX",
"MICL",
"MICC",
"MICD",
"MICM",
"MIDI",
"MIDV",
"MIDX",
"MIDL",
"MIDC",
"MIDD",
"MIDM",
"MIMI",
"MIMV",
"MIMX",
"MIML",
"MIMC",
"MIMD",
"MIMM",
"MVIV",
"MVIX",
"MVIL",
"MVIC",
"MVID",
"MVIM",
"MVVI",
"MVVV",
"MVVX",
"MVVL",
"MVVC",
"MVVD",
"MVVM",
"MVXI",
"MVXV",
"MVXX",
"MVXL",
"MVXC",
"MVXD",
"MVXM",
"MVLI",
"MVLV",
"MVLX",
"MVLL",
"MVLC",
"MVLD",
"MVLM",
"MVCI",
"MVCV",
"MVCX",
"MVCL",
"MVCC",
"MVCD",
"MVCM",
"MVDI",
"MVDV",
"MVDX",
"MVDL",
"MVDC",
"MVDD",
"MVDM",
"MVMI",
"MVMV",
"MVMX",
"MVML",
"MVMC",
"MVMD",
"MVMM",
"MXIL",
"MXIC",
"MXID",
"MXIM",
"MXVV",
"MXVX",
"MXVL",
"MXVC",
"MXVD",
"MXVM",
"MXXL",
"MXXC",
"MXXD",
"MXXM",
"MXLX",
"MXLL",
"MXLC",
"MXLD",
"MXLM",
"MXCX",
"MXCL",
"MXCC",
"MXCD",
"MXCM",
"MXDI",
"MXDV",
"MXDX",
"MXDL",
"MXDC",
"MXDD",
"MXDM",
"MXMI",
"MXMV",
"MXMX",
"MXML",
"MXMC",
"MXMD",
"MXMM",
"MLIL",
"MLIC",
"MLID",
"MLIM",
"MLVV",
"MLVX",
"MLVL",
"MLVC",
"MLVD",
"MLVM",
"MLXL",
"MLXC",
"MLXD",
"MLXM",
"MLLI",
"MLLV",
"MLLX",
"MLLL",
"MLLC",
"MLLD",
"MLLM",
"MLCI",
"MLCV",
"MLCX",
"MLCL",
"MLCC",
"MLCD",
"MLCM",
"MLDI",
"MLDV",
"MLDX",
"MLDL",
"MLDC",
"MLDD",
"MLDM",
"MLMI",
"MLMV",
"MLMX",
"MLML",
"MLMC",
"MLMD",
"MLMM",
"MCIL",
"MCIC",
"MCID",
"MCIM",
"MCVV",
"MCVX",
"MCVL",
"MCVC",
"MCVD",
"MCVM",
"MCXD",
"MCXM",
"MCLL",
"MCLC",
"MCLD",
"MCLM",
"MCCD",
"MCCM",
"MCDC",
"MCDD",
"MCDM",
"MCMC",
"MCMD",
"MCMM",
"MDIL",
"MDIC",
"MDID",
"MDIM",
"MDVV",
"MDVX",
"MDVL",
"MDVC",
"MDVD",
"MDVM",
"MDXD",
"MDXM",
"MDLL",
"MDLC",
"MDLD",
"MDLM",
"MDCD",
"MDCM",
"MDDI",
"MDDV",
"MDDX",
"MDDL",
"MDDC",
"MDDD",
"MDDM",
"MDMI",
"MDMV",
"MDMX",
"MDML",
"MDMC",
"MDMD",
"MDMM",
"MMIL",
"MMIC",
"MMID",
"MMIM",
"MMVV",
"MMVX",
"MMVL",
"MMVC",
"MMVD",
"MMVM",
"MMXD",
"MMXM",
"MMLL",
"MMLC",
"MMLD",
"MMLM",
"MMDD",
"MMDM",
]
|
<reponame>balintmaci/drone_intro_exercises
from math import pi, cos, asin, sqrt, sin
import numpy as np
from mercator.utm import utmconv
from gps.data_helper import *
from shapely.geometry import LineString
import time
def convert_to_meters(line):
uc = utmconv()
(hemisphere1, zone1, letter1, e1, n1) = uc.geodetic_to_utm(float(line[1]),float(line[2]))
return [float(line[0]), e1, n1, float(line[3])]
def convert_all_to_meters(lines):
points = []
for line in lines:
points.append(convert_to_meters(line))
return points
def calculate_distance(p1, p2):
return sqrt((p1[1]-p2[1])*(p1[1]-p2[1]) + (p1[2]-p2[2])*(p1[2]-p2[2]) + (p1[3]-p2[3])*(p1[3]-p2[3]))
def calculate_distance_errors(points):
dist_0_1 = calculate_distance(points[0], points[1])
dist_1_2 = calculate_distance(points[1], points[2])
real_distance = dist_0_1 + dist_1_2
simplified_distance = calculate_distance(points[0], points[2])
error = abs(real_distance - simplified_distance)
return error
def enabled(criteria):
return not criteria < 0
def simplify_track(input_path, output_path, config):
"""
Parameters
----------
config : tuple
must contain the following:
max_distance_diff : float
max_angular_diff : float
-1 means disabled criteria
"""
print("simplifying track for " + input_path)
print("parameters:")
maxdist = config[0]
maxang = config[1]
print("maximum distance error\t"+("disabled" if maxdist < 0 else str(maxdist)))
print("maximum angular error \t"+("disabled" if maxang < 0 else str(maxang)))
incount = 0
outcount = 0
lines = []
with open(input_path, 'r') as input_file:
with open(output_path, 'w') as output_file:
while True:
new_line = get_data(input_file)
incount = incount + 1
if empty_line(new_line):
break
lines.append(new_line)
if len(lines) < 3:
continue
# we have 3 points, check if second can be removed or should be kept
points = convert_all_to_meters(lines)
if enabled(maxdist):
distance_error = calculate_distance_errors(points)
if distance_error > maxdist:
# cannot remove middle point
first_line = lines.pop(0)
write_data(output_file, first_line)
outcount += 1
continue
# if we get to this part, point can be removed
lines.pop(1)
# write remaining points
for line in lines:
write_data(output_file, line)
outcount += 1
print("removed " + str(incount - outcount) + " points")
print("simplyfying finished: " + output_path)
def simplify_track_rdp(input_path, output_path, treshold):
print("simplifying track with rdp for " + input_path)
print("threshold\t"+str(treshold))
incount = 0
outcount = 0
hemisphere = 'N'
zone = 0.0
points = []
uc = utmconv()
with open(input_path, 'r') as input_file:
with open(output_path, 'w') as output_file:
# load all points in format for shapely
while True:
new_line = get_data(input_file)
incount = incount + 1
if empty_line(new_line):
break
converted = convert_to_meters(new_line)
point = (converted[1], converted[2], converted[3])
points.append(point)
(hemisphere, zone, letter, e, n) = uc.geodetic_to_utm(float(new_line[1]),float(new_line[2]))
# call shapely function
line = LineString(points)
line = line.simplify(treshold, preserve_topology=False)
simp_points = list(line.coords)
outcount = len(simp_points)
for p in simp_points:
(lat, lon) = uc.utm_to_geodetic(hemisphere, zone, p[0], p[1])
formatted = [0, lat, lon, p[2]]
write_data(output_file, formatted)
print("removed " + str(incount - outcount) + " points")
print("simplyfying with rdp finished: " + output_path)
if __name__ == "__main__":
# test functions
config = (0.1, -1)
simplify_track("Week 11 - ex6/input/gps_data_1_clean.txt", "Week 11 - ex6/output/gps_data_1_simp.txt", config)
simplify_track("Week 11 - ex6/input/gps_data_2_clean.txt", "Week 11 - ex6/output/gps_data_2_simp.txt", config)
treshold = 0.1
simplify_track_rdp("Week 11 - ex6/input/gps_data_1_clean.txt", "Week 11 - ex6/output/gps_data_1_simp_rdp.txt", treshold)
simplify_track_rdp("Week 11 - ex6/input/gps_data_2_clean.txt", "Week 11 - ex6/output/gps_data_2_simp_rdp.txt", treshold) |
#We used the Recipe Puppy API :)
api_key = "01c7a5dd19fe082baf1a1bd40c11bd9f"
#We also used these three modules
import webbrowser
import json
import urllib.request
def foodFind():
y={}
x = input("Would you like to search by 1)Keywords or by 2)Ingredients? ")
if x == "1":
print(y)
return searchRecipes()
elif x == "2":
return searchRecipesByIngredients()
else:
print("Please enter 1 or 2")
foodFind()
def searchRecipes():
x = input("Search: ")
x = x.replace(" ", "%20")
f = urllib.request.urlopen("http://www.recipepuppy.com/api/?q=" + str(x) + "&p=1")
mydict = json.loads(f.read())
smalldict = mydict['results']
i = 0
for dict in smalldict:
smallerdict = smalldict[i]
evensmallerdict = smallerdict['href']
otherdict = smallerdict['title']
print(otherdict + ": " + evensmallerdict)
i += 1
#return webbrowser.open_new("http://www.recipepuppy.com/api/?q=" + str(x) + "&p=1")
def searchRecipesByIngredients():
def enter_ingredients():
ingredients = []
input_ingredients = input("Enter Ingredients: ")
while input_ingredients != "":
input_ingredients = input_ingredients.replace(' ',"%20")
ingredients.append(input_ingredients)
input_ingredients = input("Any More? ")
return ingredients
def get_recipes_from_ingredients1(a):
f = urllib.request.urlopen("http://www.recipepuppy.com/api/?i=" + str(a) + "&page=1")
mydict = json.loads(f.read())
smalldict = mydict['results']
i = 0
for dict in smalldict:
smallerdict = smalldict[i]
evensmallerdict = smallerdict['href']
otherdict = smallerdict['title']
print(otherdict + ": " + evensmallerdict)
i += 1
#return webbrowser.open_new("http://www.recipepuppy.com/api/?i=" + str(a) + "&page=1")
def get_recipes_from_ingredients2(a, b):
f = urllib.request.urlopen("http://www.recipepuppy.com/api/?i=" + str(a) + "," + str(b) + "&page=1")
mydict = json.loads(f.read())
smalldict = mydict['results']
i = 0
for dict in smalldict:
smallerdict = smalldict[i]
evensmallerdict = smallerdict['href']
otherdict = smallerdict['title']
print(otherdict + ": " + evensmallerdict)
i += 1
#return webbrowser.open_new("http://www.recipepuppy.com/api/?i=" + str(a) + "," + str(b) + "&page=1")
def get_recipes_from_ingredients3(a, b, c):
f = urllib.request.urlopen("http://www.recipepuppy.com/api/?i=" + str(a) + "," + str(b) + "," + str(c) + "&page=1")
mydict = json.loads(f.read())
smalldict = mydict['results']
i = 0
for dict in smalldict:
smallerdict = smalldict[i]
evensmallerdict = smallerdict['href']
otherdict = smallerdict['title']
print(otherdict + ": " + evensmallerdict)
i += 1
#return webbrowser.open_new("http://www.recipepuppy.com/api/?i=" + str(a) + "," + str(b) + "," + str(c) + "&page=1")
def get_recipes_from_ingredients4(a, b, c, d):
f = urllib.request.urlopen("http://www.recipepuppy.com/api/?i=" + str(a) + "," + str(b) + "," + str(c) + "," + str(d) + "&page=1")
mydict = json.loads(f.read())
smalldict = mydict['results']
i = 0
for dict in smalldict:
smallerdict = smalldict[i]
evensmallerdict = smallerdict['href']
otherdict = smallerdict['title']
print(otherdict + ": " + evensmallerdict)
i += 1
#return webbrowser.open_new("http://www.recipepuppy.com/api/?i=" + str(a) + "," + str(b) + "," + str(c) + "," + str(d) + "&page=1")
def get_recipes_from_ingredients5(a, b, c, d, e):
f = urllib.request.urlopen("http://www.recipepuppy.com/api/?i=" + str(a) + "," + str(b) + "," + str(c) + "," + str(d) + str(e) + "&page=1")
mydict = json.loads(f.read())
smalldict = mydict['results']
i = 0
for dict in smalldict:
smallerdict = smalldict[i]
evensmallerdict = smallerdict['href']
otherdict = smallerdict['title']
print(otherdict + ": " + evensmallerdict)
i += 1
#return webbrowser.open_new("http://www.recipepuppy.com/api/?i=" + str(a) + "," + str(b) + "," + str(c) + "," + str(d) + str(e) + "&page=1")
def number_of_ingredients(x):
length_of_ingredients = len(x)
if length_of_ingredients == 1:
return get_recipes_from_ingredients1(x[0])
elif length_of_ingredients == 2:
return get_recipes_from_ingredients2(x[0], x[1])
elif length_of_ingredients == 3:
return get_recipes_from_ingredients3(x[0], x[1], x[2])
elif length_of_ingredients == 4:
return get_recipes_from_ingredients4(x[0], x[1], x[2], x[3])
elif length_of_ingredients == 5:
return get_recipes_from_ingredients5(x[0], x[1], x[2], x[3], x[4])
return number_of_ingredients(enter_ingredients())
# Run Function:
foodFind() |
#!/usr/bin/env python2
import json
import argparse
import os
import os.path
import sys
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
import cElementTree as ElementTree
# Put shared python modules in path
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
"modules"))
import ssgcommon
script_desc = \
"Obtains and displays XCCDF profile statistics. Namely number " + \
"of rules in the profile, how many of these rules have their OVAL " + \
"check implemented, how many have a remediation available, ..."
xccdf_ns = ssgcommon.XCCDF11_NS
oval_ns = ssgcommon.oval_namespace
bash_rem_system = ssgcommon.bash_system
ansible_rem_system = ssgcommon.ansible_system
puppet_rem_system = ssgcommon.puppet_system
anaconda_rem_system = ssgcommon.anaconda_system
cce_system = ssgcommon.cce_system
ssg_version_uri = ssgcommon.ssg_version_uri
stig_ns = ssgcommon.stig_ns
console_width = 80
class RuleStats(object):
def __init__(self, rid=None, roval=None,
rbash_fix=None, ransible_fix=None,
rpuppet_fix=None, ranaconda_fix=None,
rcce=None, stig_id=None):
self.dict = {
'id': rid,
'oval': roval,
'bash_fix': rbash_fix,
'ansible_fix': ransible_fix,
'puppet_fix': rpuppet_fix,
'anaconda_fix': ranaconda_fix,
'cce': rcce,
'stig_id': stig_id,
}
class XCCDFBenchmark(object):
def __init__(self, filepath):
self.tree = None
try:
with open(filepath, 'r') as xccdf_file:
file_string = xccdf_file.read()
tree = ElementTree.fromstring(file_string)
self.tree = tree
except IOError as ioerr:
print("%s" % ioerr)
sys.exit(1)
self.indexed_rules = {}
for rule in self.tree.findall(".//{%s}Rule" % (xccdf_ns)):
rule_id = rule.get("id")
if rule_id is None:
raise RuntimeError("Can't index a rule with no id attribute!")
assert(rule_id not in self.indexed_rules)
self.indexed_rules[rule_id] = rule
def get_profile_stats(self, profile):
"""Obtain statistics for the profile"""
# Holds the intermediary statistics for profile
profile_stats = {
'profile_id': None,
'ssg_version': 0,
'rules_count': 0,
'implemented_ovals': [],
'implemented_ovals_pct': 0,
'missing_ovals': [],
'implemented_bash_fixes': [],
'implemented_bash_fixes_pct': 0,
'implemented_ansible_fixes': [],
'implemented_ansible_fixes_pct': 0,
'implemented_puppet_fixes': [],
'implemented_puppet_fixes_pct': 0,
'implemented_anaconda_fixes': [],
'implemented_anaconda_fixes_pct': 0,
'missing_bash_fixes': [],
'missing_ansible_fixes': [],
'missing_puppet_fixes': [],
'missing_anaconda_fixes': [],
'assigned_cces': [],
'assigned_cces_pct': 0,
'missing_cces': [],
'missing_stig_ids': [],
}
rule_stats = []
ssg_version_elem = self.tree.find("./{%s}version[@update=\"%s\"]" %
(xccdf_ns, ssg_version_uri))
rules = []
if profile == "all":
# "all" is a virtual profile that selects all rules
rules = self.indexed_rules.values()
else:
xccdf_profile = self.tree.find("./{%s}Profile[@id=\"%s\"]" %
(xccdf_ns, profile))
if xccdf_profile is None:
print("No such profile \"%s\" found in the benchmark!"
% profile)
print("* Available profiles:")
profiles_avail = self.tree.findall("./{%s}Profile" % (xccdf_ns))
for profile in profiles_avail:
print("** %s" % profile.get('id'))
sys.exit(1)
# This will only work with SSG where the (default) profile has zero
# selected rule. If you want to reuse this for custom content, you
# need to change this to look into Rule/@selected
selects = xccdf_profile.findall("./{%s}select[@selected=\"true\"]" %
xccdf_ns)
for select in selects:
rule_id = select.get('idref')
xccdf_rule = self.indexed_rules.get(rule_id)
if xccdf_rule is not None:
# it could also be a Group
rules.append(xccdf_rule)
for rule in rules:
if rule is not None:
oval = rule.find("./{%s}check[@system=\"%s\"]" %
(xccdf_ns, oval_ns))
bash_fix = rule.find("./{%s}fix[@system=\"%s\"]" %
(xccdf_ns, bash_rem_system))
ansible_fix = rule.find("./{%s}fix[@system=\"%s\"]" %
(xccdf_ns, ansible_rem_system))
puppet_fix = rule.find("./{%s}fix[@system=\"%s\"]" %
(xccdf_ns, puppet_rem_system))
anaconda_fix = rule.find("./{%s}fix[@system=\"%s\"]" %
(xccdf_ns, anaconda_rem_system))
cce = rule.find("./{%s}ident[@system=\"%s\"]" %
(xccdf_ns, cce_system))
stig_id = rule.find("./{%s}reference[@href=\"%s\"]" %
(xccdf_ns, stig_ns))
rule_stats.append(
RuleStats(rule.get("id"), oval,
bash_fix, ansible_fix, puppet_fix, anaconda_fix,
cce, stig_id)
)
if not rule_stats:
print('Unable to retrieve statistics for %s profile' % profile)
sys.exit(1)
rule_stats.sort(key=lambda r: r.dict['id'])
profile_stats['profile_id'] = profile
if ssg_version_elem is not None:
profile_stats['ssg_version'] = \
'SCAP Security Guide %s' % ssg_version_elem.text
profile_stats['rules_count'] = len(rule_stats)
profile_stats['implemented_ovals'] = \
[x.dict['id'] for x in rule_stats if x.dict['oval'] is not None]
profile_stats['implemented_ovals_pct'] = \
float(len(profile_stats['implemented_ovals'])) / \
profile_stats['rules_count'] * 100
profile_stats['missing_ovals'] = \
[x.dict['id'] for x in rule_stats if x.dict['oval'] is None]
profile_stats['implemented_bash_fixes'] = \
[x.dict['id'] for x in rule_stats if x.dict['bash_fix'] is not None]
profile_stats['implemented_bash_fixes_pct'] = \
float(len(profile_stats['implemented_bash_fixes'])) / \
profile_stats['rules_count'] * 100
profile_stats['missing_bash_fixes'] = \
[x.dict['id'] for x in rule_stats if x.dict['bash_fix'] is None]
profile_stats['implemented_ansible_fixes'] = \
[x.dict['id'] for x in rule_stats if x.dict['ansible_fix'] is not None]
profile_stats['implemented_ansible_fixes_pct'] = \
float(len(profile_stats['implemented_ansible_fixes'])) / \
profile_stats['rules_count'] * 100
profile_stats['missing_ansible_fixes'] = \
[x.dict['id'] for x in rule_stats if x.dict['ansible_fix'] is None]
profile_stats['implemented_puppet_fixes'] = \
[x.dict['id'] for x in rule_stats if x.dict['puppet_fix'] is not None]
profile_stats['implemented_puppet_fixes_pct'] = \
float(len(profile_stats['implemented_puppet_fixes'])) / \
profile_stats['rules_count'] * 100
profile_stats['missing_puppet_fixes'] = \
[x.dict['id'] for x in rule_stats if x.dict['puppet_fix'] is None]
profile_stats['implemented_anaconda_fixes'] = \
[x.dict['id'] for x in rule_stats if x.dict['anaconda_fix'] is not None]
profile_stats['missing_stig_ids'] = []
if 'stig' in profile_stats['profile_id']:
profile_stats['missing_stig_ids'] = \
[x.dict['id'] for x in rule_stats if x.dict['stig_id'] is None]
profile_stats['implemented_anaconda_fixes_pct'] = \
float(len(profile_stats['implemented_anaconda_fixes'])) / \
profile_stats['rules_count'] * 100
profile_stats['missing_anaconda_fixes'] = \
[x.dict['id'] for x in rule_stats if x.dict['anaconda_fix'] is None]
profile_stats['assigned_cces'] = \
[x.dict['id'] for x in rule_stats if x.dict['cce'] is not None]
profile_stats['assigned_cces_pct'] = \
float(len(profile_stats['assigned_cces'])) / \
profile_stats['rules_count'] * 100
profile_stats['missing_cces'] = \
[x.dict['id'] for x in rule_stats if x.dict['cce'] is None]
return profile_stats
def show_profile_stats(self, profile, options):
"""Displays statistics for specific profile"""
profile_stats = self.get_profile_stats(profile)
rules_count = profile_stats['rules_count']
impl_ovals_count = len(profile_stats['implemented_ovals'])
impl_bash_fixes_count = len(profile_stats['implemented_bash_fixes'])
impl_ansible_fixes_count = len(profile_stats['implemented_ansible_fixes'])
impl_puppet_fixes_count = len(profile_stats['implemented_puppet_fixes'])
impl_anaconda_fixes_count = len(profile_stats['implemented_anaconda_fixes'])
missing_stig_ids_count = len(profile_stats['missing_stig_ids'])
impl_cces_count = len(profile_stats['assigned_cces'])
if options.format == "plain":
print("\nProfile %s:" % profile)
print("* rules: %d" % rules_count)
print("* checks (OVAL): %d\t[%d%% complete]" %
(impl_ovals_count,
profile_stats['implemented_ovals_pct']))
print("* fixes (bash): %d\t[%d%% complete]" %
(impl_bash_fixes_count,
profile_stats['implemented_bash_fixes_pct']))
print("* fixes (ansible): %d\t[%d%% complete]" %
(impl_ansible_fixes_count,
profile_stats['implemented_ansible_fixes_pct']))
print("* fixes (puppet): %d\t[%d%% complete]" %
(impl_puppet_fixes_count,
profile_stats['implemented_puppet_fixes_pct']))
print("* fixes (anaconda): %d\t[%d%% complete]" %
(impl_anaconda_fixes_count,
profile_stats['implemented_anaconda_fixes_pct']))
print("* CCEs: %d\t[%d%% complete]" %
(impl_cces_count,
profile_stats['assigned_cces_pct']))
if options.implemented_ovals and \
profile_stats['implemented_ovals']:
print("** Rules of '%s' " % profile +
"profile having OVAL check: %d of %d [%d%% complete]" %
(impl_ovals_count, rules_count,
profile_stats['implemented_ovals_pct']))
self.console_print(profile_stats['implemented_ovals'],
console_width)
if options.implemented_fixes:
if profile_stats['implemented_bash_fixes']:
print("*** Rules of '%s' profile having "
"a bash fix script: %d of %d [%d%% complete]"
% (profile, impl_bash_fixes_count, rules_count,
profile_stats['implemented_bash_fixes_pct']))
self.console_print(profile_stats['implemented_bash_fixes'],
console_width)
if profile_stats['implemented_ansible_fixes']:
print("*** Rules of '%s' profile having "
"a ansible fix script: %d of %d [%d%% complete]"
% (profile, impl_ansible_fixes_count, rules_count,
profile_stats['implemented_ansible_fixes_pct']))
self.console_print(
profile_stats['implemented_ansible_fixes'],
console_width)
if profile_stats['implemented_puppet_fixes']:
print("*** Rules of '%s' profile having "
"a puppet fix script: %d of %d [%d%% complete]"
% (profile, impl_puppet_fixes_count, rules_count,
profile_stats['implemented_puppet_fixes_pct']))
self.console_print(
profile_stats['implemented_puppet_fixes'],
console_width)
if profile_stats['implemented_anaconda_fixes']:
print("*** Rules of '%s' profile having "
"a anaconda fix script: %d of %d [%d%% complete]"
% (profile, impl_anaconda_fixes_count, rules_count,
profile_stats['implemented_anaconda_fixes_pct']))
self.console_print(
profile_stats['implemented_anaconda_fixes'],
console_width)
if options.assigned_cces and \
profile_stats['assigned_cces']:
print("*** Rules of '%s' " % profile +
"profile having CCE assigned: %d of %d [%d%% complete]" %
(impl_cces_count, rules_count,
profile_stats['assigned_cces_pct']))
self.console_print(profile_stats['assigned_cces'],
console_width)
if options.missing_ovals and profile_stats['missing_ovals']:
print("*** Rules of '%s' " % profile + "profile missing " +
"OVAL: %d of %d [%d%% complete]" %
(rules_count - impl_ovals_count, rules_count,
profile_stats['implemented_ovals_pct']))
self.console_print(profile_stats['missing_ovals'],
console_width)
if options.missing_fixes:
if profile_stats['missing_bash_fixes']:
print("*** rules of '%s' profile missing "
"a bash fix script: %d of %d [%d%% complete]"
% (profile, rules_count - impl_bash_fixes_count,
rules_count,
profile_stats['implemented_bash_fixes_pct']))
self.console_print(profile_stats['missing_bash_fixes'],
console_width)
if profile_stats['missing_ansible_fixes']:
print("*** rules of '%s' profile missing "
"a ansible fix script: %d of %d [%d%% complete]"
% (profile, rules_count - impl_ansible_fixes_count,
rules_count,
profile_stats['implemented_ansible_fixes_pct']))
self.console_print(profile_stats['missing_ansible_fixes'],
console_width)
if profile_stats['missing_puppet_fixes']:
print("*** rules of '%s' profile missing "
"a puppet fix script: %d of %d [%d%% complete]"
% (profile, rules_count - impl_puppet_fixes_count,
rules_count,
profile_stats['implemented_puppet_fixes_pct']))
self.console_print(profile_stats['missing_puppet_fixes'],
console_width)
if profile_stats['missing_anaconda_fixes']:
print("*** rules of '%s' profile missing "
"a anaconda fix script: %d of %d [%d%% complete]"
% (profile, rules_count - impl_anaconda_fixes_count,
rules_count,
profile_stats['implemented_anaconda_fixes_pct']))
self.console_print(profile_stats['missing_anaconda_fixes'],
console_width)
if options.missing_stig_ids and profile_stats['missing_stig_ids']:
print("*** rules of '%s' profile missing "
"STIG IDs: %d of %d have them [%d%% missing]"
% (profile, rules_count - missing_stig_ids_count,
rules_count,
(100.0 * missing_stig_ids_count / rules_count)))
self.console_print(profile_stats['missing_stig_ids'],
console_width)
if options.missing_cces and profile_stats['missing_cces']:
print("***Rules of '%s' " % profile + "profile missing " +
"CCE identifier: %d of %d [%d%% complete]" %
(rules_count - impl_cces_count, rules_count,
profile_stats['assigned_cces_pct']))
self.console_print(profile_stats['missing_cces'],
console_width)
else:
# First delete the not requested information
if not options.missing_ovals:
del profile_stats['missing_ovals']
if not options.missing_fixes:
del profile_stats['missing_bash_fixes']
del profile_stats['missing_ansible_fixes']
del profile_stats['missing_puppet_fixes']
del profile_stats['missing_anaconda_fixes']
del profile_stats['missing_stig_ids']
if not options.missing_cces:
del profile_stats['missing_cces']
if not options.implemented_ovals:
del profile_stats['implemented_ovals']
if not options.implemented_fixes:
del profile_stats['implemented_bash_fixes']
del profile_stats['implemented_ansible_fixes']
del profile_stats['implemented_puppet_fixes']
del profile_stats['implemented_anaconda_fixes']
if not options.assigned_cces:
del profile_stats['assigned_cces']
return profile_stats
def console_print(self, content, width):
"""Prints the 'content' array left aligned, each time 45 characters
long, each row 'width' characters wide"""
msg = ''
for x in content:
if len(msg) + len(x) < width - 6:
msg += ' ' + "%-45s" % x
else:
print("%s" % msg)
msg = ' ' + "%-45s" % x
if msg != '':
print("%s" % msg)
def main():
parser = argparse.ArgumentParser(description=script_desc)
parser.add_argument("--profile", "-p",
action="store",
help="Show statistics for this XCCDF Profile only. If "
"not provided the script will show stats for all "
"available profiles.")
parser.add_argument("--benchmark", "-b", required=True,
action="store",
help="Specify XCCDF file to act on. Must be a plain "
"XCCDF file, doesn't work on source datastreams yet!")
parser.add_argument("--implemented-ovals", default=False,
action="store_true", dest="implemented_ovals",
help="Show IDs of implemented OVAL checks.")
parser.add_argument("--missing-stig-ids", default=False,
action="store_true", dest="missing_stig_ids",
help="Show rules in STIG profiles that don't have STIG IDs.")
parser.add_argument("--missing-ovals", default=False,
action="store_true", dest="missing_ovals",
help="Show IDs of unimplemented OVAL checks.")
parser.add_argument("--implemented-fixes", default=False,
action="store_true", dest="implemented_fixes",
help="Show IDs of implemented remediations.")
parser.add_argument("--missing-fixes", default=False,
action="store_true", dest="missing_fixes",
help="Show IDs of unimplemented remediations.")
parser.add_argument("--assigned-cces", default=False,
action="store_true", dest="assigned_cces",
help="Show IDs of rules having CCE assigned.")
parser.add_argument("--missing-cces", default=False,
action="store_true", dest="missing_cces",
help="Show IDs of rules missing CCE element.")
parser.add_argument("--implemented", default=False,
action="store_true",
help="Equivalent of --implemented-ovals, "
"--implemented_fixes and --assigned-cves "
"all being set.")
parser.add_argument("--missing", default=False,
action="store_true",
help="Equivalent of --missing-ovals, --missing-fixes"
" and --missing-cces all being set.")
parser.add_argument("--all", default=False,
action="store_true", dest="all",
help="Show all available statistics.")
parser.add_argument("--format", default="plain",
choices=["plain", "json", "csv"],
help="Which format to use for output.")
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write(
"Unknown positional arguments " + ",".join(unknown) + ".\n"
)
sys.exit(1)
if args.all:
args.implemented = True
args.missing = True
if args.implemented:
args.implemented_ovals = True
args.implemented_fixes = True
args.assigned_cces = True
if args.missing:
args.missing_ovals = True
args.missing_fixes = True
args.missing_cces = True
args.missing_stig_ids = True
benchmark = XCCDFBenchmark(args.benchmark)
ret = []
if args.profile:
ret.append(benchmark.show_profile_stats(args.profile, args))
else:
all_profile_elems = benchmark.tree.findall("./{%s}Profile" % (xccdf_ns))
ret = []
for elem in all_profile_elems:
profile = elem.get('id')
if profile is not None:
ret.append(benchmark.show_profile_stats(profile, args))
if args.format == "json":
print(json.dumps(ret, indent=4))
elif args.format == "csv":
# we can assume ret has at least one element
# CSV header
print(",".join(ret[0].keys()))
for line in ret:
print(",".join([str(value) for value in line.values()]))
if __name__ == '__main__':
main()
|
import argparse
import pickle
import operator
import pprint
import fileinput
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Check correlations with different alpha')
parser.add_argument('-m', '--model', help='input pickle file for the model', required=True)
parser.add_argument('-t', '--test_files', nargs='+', help='files to check correlation with', required=True)
parser.add_argument('-l', '--linear_search', action='store_true', default=False,
help='toggle linear grid search, false by default')
parser.add_argument('-r', '--range', type=int, nargs=2, default=[1, 1000],
help='the range of values to search for an optimal alpha')
parser.add_argument('-n', '--nvals', type=int, default=10, help='the nbr of values to evaluate')
parser.add_argument('--nthreads', help='number of threads to use', default=4, type=int)
parser.add_argument('--multi_pass', help='toggle multiple pass search', default=False, action='store_true')
# parser.add_argument('-n', help='name of the model', default='hmm_model')
# parser.add_argument('-p', help='print/pickle location', default='hmm_models')
args = parser.parse_args()
def testAlpha(a, m, testdata):
model = m.finalizeModel(alpha=a)
corrs = []
with fileinput.input(files=testdata) as line_iter:
for line in line_iter:
try:
tokens = line.rstrip('\r\n').split('\t')
z, seq, score, y_ions, y_ints, b_ions, b_ints, y_frac = tokens
if len(y_ints) == 0:
continue
if int(z) == 1:
continue
except ValueError as e:
print(f"Unexpected number of tokens found on line: {line}")
e.args += (line,)
continue
y_ints = [float(i) for i in y_ints.split(' ')]
y_ions = y_ions.split(' ')
# b_ints = [float(i) for i in b_ints.split(' ')]
# b_ions = b_ions.split(' ')
ions, probs = model.calc_fragments(charge=int(z), seq=seq, ion_type='y', use_yfrac=False)
d = {'exp': {i: ii for i, ii in zip(y_ions, y_ints)},
'model': {ion: p for ion, p in zip(ions, probs)}}
df = pd.DataFrame.from_dict(d)
p = df.corr(method='pearson').iat[0, 1]
corrs.append(p)
corrs = np.asarray(corrs)
mean_corr = np.nanmean(corrs)
return a, mean_corr
def estimateForAlphas(alphas):
#testdata = itertools.chain.from_iterable(file_gen)
from multiprocessing import Pool as ThreadPool
from functools import partial
with open(args.model, 'rb') as pickle_f, ThreadPool(args.nthreads) as pool:
m = pickle.load(pickle_f)
corrs = pool.map(partial(testAlpha, m=m, testdata=args.test_files), alphas)
return corrs
def main(low, high, iter_counter):
search_space = np.linspace(low, high, args.nvals, dtype=int) if args.linear_search else np.geomspace(low, high, args.nvals, dtype=int)
iter_counter += 1
res = estimateForAlphas(search_space)
sorted_res = sorted(res, key=operator.itemgetter(1), reverse=True)
print(f"Iteration {iter_counter}:")
pprint.pprint(dict(sorted_res))
if args.multi_pass:
# Re-run with second and third best, assumption is that best value is in the range [left, right]
best = sorted_res[0][0]
ind = list(search_space).index(best)
x = search_space[ind-1]
y = search_space[ind+1]
assert not ((best < x and best < y) or (best > x and best > y)), f"best val={best}, sec={x}, third={y}\nsearch_space={search_space}"
if abs(x-y) < 5:
print(f"Optimal alpha is between {x}-{y}")
elif x < y:
main(x, y, iter_counter)
else:
main(y, x, iter_counter)
else:
print(f"Top alpha value is {sorted_res[0][0]} with an average corr {sorted_res[0][1]}")
print(f"For comparison: the second best alpha was {sorted_res[1][0]} with an average corr {sorted_res[1][1]}")
if __name__ == "__main__":
low, high = args.range
main(low, high, 0)
|
<reponame>sedlakovi/mafTools
##################################################
# Copyright (C) 2013 by
# <NAME> (<EMAIL>, <EMAIL>)
# ... and other members of the Reconstruction Team of <NAME>'s
# lab (BME Dept. UCSC).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##################################################
import os
import random
import sys
import unittest
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../lib/')))
import mafToolsTest as mtt
g_headers = ['''##maf version=1 scoring=tba.v8
# tba.v8 (((human chimp) baboon) (mouse rat))
''',
'''##maf version=1 scoring=tba.v8
# tba.v8 (((human chimp) baboon) (mouse rat))
''']
g_duplicateBlocks = [('''a score=0
#dup block 1, name4 is duplicate
s target.chr0 38 13 + 158545518 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s mm4.chr6 53310102 13 + 151104725 gcagctgaaaaca
s name.chr1 0 13 + 100 gcagctgaaaaca
s name2.chr1 50 13 + 100 gcagctgaaaaca
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
s name4.chrA 50 13 + 100 gcagctgaaaacT
''',
'''a score=0
#dup block 1, name4 is duplicate
s target.chr0 38 13 + 158545518 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s mm4.chr6 53310102 13 + 151104725 gcagctgaaaaca
s name.chr1 0 13 + 100 gcagctgaaaaca
s name2.chr1 50 13 + 100 gcagctgaaaaca
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
''',),
('''a score=0
#dup block 2, target is duplicate
s name 0 13 + 100 gcagctgaaaaca
s name2.chr1 50 13 + 100 gcagctgaaaaca
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
s name5 50 13 + 100 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s target.chr0 158545457 13 - 158545518 gcagctgaaaacT
s target.chr1 158545457 13 - 158545518 gcagctgaaaaca
''',
'''a score=0
#dup block 2, target is duplicate
s name 0 13 + 100 gcagctgaaaaca
s name2.chr1 50 13 + 100 gcagctgaaaaca
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
s name5 50 13 + 100 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s target.chr1 158545457 13 - 158545518 gcagctgaaaaca
''',),
('''a score=0
#dup block 3, panTro1 and baboon are duplicates
s name 10 13 + 100 gcagctgaaaaca
s name2.chr1 50 13 + 100 gcagctgaaaaca
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
s name5 50 13 + 100 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 Acagctgaatact
s target.chr0 62 9 + 158545518 gca---gaa-aca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca
s panTro1.chr7 28869787 13 + 161576975 gcagctgaatact
s baboon 249182 13 + 4622798 gcagctgaaaaca
s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA
''',
'''a score=0
#dup block 3, panTro1 and baboon are duplicates
s name 10 13 + 100 gcagctgaaaaca
s name2.chr1 50 13 + 100 gcagctgaaaaca
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
s name5 50 13 + 100 gcagctgaaaaca
s target.chr0 62 9 + 158545518 gca---gaa-aca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca
s panTro1.chr7 28869787 13 + 161576975 gcagctgaatact
s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA
''',),
('''a score=0
#dup block 4, name, panTro1 and baboon are duplicates
s name 10 13 + 100 gcagctgaaaaca
s name.chr1 50 13 + 100 gcagctgaaaact
s name.chr2 50 13 + 100 gcagctgaaaact
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
s name5 50 13 + 100 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 Acagctgaatact
s target.chr0 62 9 + 158545518 gca---gaa-aca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca
s panTro1.chr7 28869787 13 + 161576975 gcagctgaatacT
s baboon 249182 13 + 4622798 gcagctgaaaaca
s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA
''',
'''a score=0
#dup block 4, name, panTro1 and baboon are duplicates
s name 10 13 + 100 gcagctgaaaaca
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
s name5 50 13 + 100 gcagctgaaaaca
s target.chr0 62 9 + 158545518 gca---gaa-aca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s hg16.chr7 27707221 13 + 158545518 gcagctgaaaaca
s panTro1.chr7 28869787 13 + 161576975 gcagctgaatacT
s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA
''',),
('''a score=0
#dup block 1, name4 is duplicate
s target.chr0 38 13 + 158545518 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s mm4.chr6 53310102 13 + 151104725 gcagctgannnnn
s name.chr1 0 13 + 100 gcagctgaaaacN
s name2.chr1 50 13 + 100 gcagctgaaaacN
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
s name4.chrA 50 13 + 100 gcagctgaaaacT
''',
'''a score=0
#dup block 1, name4 is duplicate
s target.chr0 38 13 + 158545518 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s mm4.chr6 53310102 13 + 151104725 gcagctgannnnn
s name.chr1 0 13 + 100 gcagctgaaaacN
s name2.chr1 50 13 + 100 gcagctgaaaacN
s name3.chr9 50 13 + 100 gcagctgaaaaca
s name4.chr& 50 13 + 100 gcagctgaaaaca
''',),
]
g_nonDuplicateBlocks = ['''a score=23262.0
#non-dup block 1
s hg18.chr7 27578828 38 + 158545518 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG
s panTro1.chr6 28741140 38 + 161576975 AAA-GGGAATGTTAACCAAATGA---ATTGTCTCTTACGGTG
s baboon 116834 38 + 4622798 AAA-GGGAATGTTAACCAAATGA---GTTGTCTCTTATGGTG
s mm4.chr6 53215344 38 + 151104725 -AATGGGAATGTTAAGCAAACGA---ATTGTCTCTCAGTGTG
s rn3.chr4 81344243 40 + 187371129 -AA-GGGGATGCTAAGCCAATGAGTTGTTGTCTCTCAATGTG
''',
'''a score=5062.0
#non-dup block 2
s hg18.chr7 27699739 6 + 158545518 TAAAGA
s panTro1.chr6 28862317 6 + 161576975 TAAAGA
s baboon 241163 6 + 4622798 TAAAGA
# ignore this comment line
s mm4.chr6 53303881 6 + 151104725 TAAAGA
s rn3.chr4 81444246 6 + 187371129 taagga
q i dont remember what q lines do, but we should be ignoring them.
''',
'''a score=6636.0
#non-dup block 3
# this comment line should not screw anything up
s hg18.chr7 27707221 13 + 158545518 gcagctgaaaaca
s panTro1.chr6 28869787 13 + 161576975 gcagctgaaaaca
s baboon 249182 13 + 4622798 gcagctgaaaaca
s mm4.chr6 53310102 13 + 151104725 ACAGCTGAAAATA
# nor should this comment line.
''',
]
def mafIsFiltered(maf, blockList):
f = open(maf)
lastLine = mtt.processHeader(f)
for i in xrange(0, len(blockList)):
# walk through the maf, assessing the equivalence to the blockList items
b = mtt.extractBlockStr(f, lastLine)
lastLine = None
if b != blockList[i]:
print 'dang'
print 'observed:'
print b
print '!='
print 'expected:'
print blockList[i]
return False
return True
class DuplicationFilterTest(unittest.TestCase):
def testFilter(self):
""" mafDuplicateFilter should filter out duplicates in blocks according to sequence similarity to the consensus.
"""
mtt.makeTempDirParent()
for i in xrange(0, 10):
shuffledBlocks = []
expectedOutput = []
tmpDir = os.path.abspath(mtt.makeTempDir('filter'))
order = [1] * len(g_duplicateBlocks) + [0] * len(g_nonDuplicateBlocks)
random.shuffle(order)
random.shuffle(g_duplicateBlocks)
random.shuffle(g_nonDuplicateBlocks)
j, k = 0, 0
for dupBlock in order:
if dupBlock:
shuffledBlocks.append(g_duplicateBlocks[j][0])
expectedOutput.append(g_duplicateBlocks[j][1])
j += 1
else:
shuffledBlocks.append(g_nonDuplicateBlocks[k])
expectedOutput.append(g_nonDuplicateBlocks[k])
k += 1
testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')),
''.join(shuffledBlocks), g_headers)
parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafDuplicateFilter')),
'--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf'))]
outpipes = [os.path.abspath(os.path.join(tmpDir, 'filtered.maf'))]
mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)
mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)
self.assertTrue(mafIsFiltered(os.path.join(tmpDir, 'filtered.maf'), expectedOutput))
mtt.removeDir(tmpDir)
def testNonFilter(self):
""" mafDuplicateFilter should not filter out any sequences from blocks when there are no duplicates.
"""
mtt.makeTempDirParent()
for i in xrange(0, 10):
tmpDir = os.path.abspath(mtt.makeTempDir('nonFilter'))
random.shuffle(g_nonDuplicateBlocks)
testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')),
''.join(g_nonDuplicateBlocks), g_headers)
expectedOutput = g_nonDuplicateBlocks
parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafDuplicateFilter')),
'--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf'))]
outpipes = [os.path.abspath(os.path.join(tmpDir, 'filtered.maf'))]
mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)
mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)
self.assertTrue(mafIsFiltered(os.path.join(tmpDir, 'filtered.maf'), expectedOutput))
mtt.removeDir(tmpDir)
def testMemory1(self):
""" If valgrind is installed on the system, check for memory related errors (1).
"""
mtt.makeTempDirParent()
valgrind = mtt.which('valgrind')
if valgrind is None:
return
for i in xrange(0, 10):
shuffledBlocks = []
expectedOutput = []
tmpDir = os.path.abspath(mtt.makeTempDir('memory1'))
order = [1] * len(g_duplicateBlocks) + [0] * len(g_nonDuplicateBlocks)
random.shuffle(order)
random.shuffle(g_duplicateBlocks)
random.shuffle(g_nonDuplicateBlocks)
j, k = 0, 0
for dupBlock in order:
if dupBlock:
shuffledBlocks.append(g_duplicateBlocks[j][0])
expectedOutput.append(g_duplicateBlocks[j][1])
j += 1
else:
shuffledBlocks.append(g_nonDuplicateBlocks[k])
expectedOutput.append(g_nonDuplicateBlocks[k])
k += 1
testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')),
''.join(shuffledBlocks), g_headers)
parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cmd = mtt.genericValgrind(tmpDir)
cmd += [os.path.abspath(os.path.join(parent, 'test', 'mafDuplicateFilter')),
'--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf'))]
outpipes = [os.path.abspath(os.path.join(tmpDir, 'filtered.maf'))]
mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)
mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)
self.assertTrue(mtt.noMemoryErrors(os.path.join(tmpDir, 'valgrind.xml')))
mtt.removeDir(tmpDir)
def testMemory2(self):
""" If valgrind is installed on the system, check for memory related errors (2).
"""
mtt.makeTempDirParent()
valgrind = mtt.which('valgrind')
if valgrind is None:
return
for i in xrange(0, 10):
tmpDir = os.path.abspath(mtt.makeTempDir('memory2'))
random.shuffle(g_nonDuplicateBlocks)
testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')),
''.join(g_nonDuplicateBlocks), g_headers)
expectedOutput = g_nonDuplicateBlocks
parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cmd = mtt.genericValgrind(tmpDir)
cmd += [os.path.abspath(os.path.join(parent, 'test', 'mafDuplicateFilter')),
'--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf'))]
outpipes = [os.path.abspath(os.path.join(tmpDir, 'filtered.maf'))]
mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)
mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)
self.assertTrue(mtt.noMemoryErrors(os.path.join(tmpDir, 'valgrind.xml')))
mtt.removeDir(tmpDir)
if __name__ == '__main__':
unittest.main()
|
<gh_stars>10-100
import chainer
import numpy as np
import scipy.sparse as sp
import sklearn
from sklearn.datasets import fetch_20newsgroups
from nlp_utils import tokenize
def moving_window_window_iterator(sentences, window):
for sentence in sentences:
for i in range(0, len(sentence) - window + 1):
yield sentence[i:i + window]
def calc_pmi(X):
Y = X.T.dot(X).astype(np.float32)
Y_diag = Y.diagonal()
Y.data /= Y_diag[Y.indices]
Y.data *= X.shape[0]
for col in range(Y.shape[1]):
Y.data[Y.indptr[col]:Y.indptr[col + 1]] /= Y_diag[col]
Y.data = np.maximum(0., np.log(Y.data))
return Y
def create_text_adjacency_matrix(texts):
"""Create adjacency matrix from texts
Arguments:
texts (list of list of str): List of documents, each consisting
of tokenized list of text
Returns:
adj (scipy.sparse.coo_matrix): (Node, Node) shape
normalized adjency matrix.
"""
# The authors removed words occuring less than 5 times. It is not directory
# applicable to min_df, so I set bit smaller value
transformer = sklearn.feature_extraction.text.TfidfVectorizer(
max_df=1.0, ngram_range=(1, 1), min_df=3, analyzer='word',
preprocessor=lambda x: x, tokenizer=lambda x: x,
norm=None, smooth_idf=False
)
freq_doc = transformer.fit_transform(texts)
freq_window = transformer.transform(
moving_window_window_iterator(texts, 20))
freq_window.data.fill(1)
mat_pmi = calc_pmi(freq_window)
adj = sp.bmat([[None, freq_doc], [freq_doc.T, mat_pmi]])
adj.setdiag(np.ones([adj.shape[0]], dtype=adj.dtype))
adj.eliminate_zeros()
# it should already be COO, but behavior of bmat is not well documented
# so apply it
adj = adj.tocoo()
return adj
def load_20newsgroups(validation_ratio, normalization):
"""Load text network (20 news group)
Arguments:
validation_ratio (float): Ratio of validation split
normalization (str): Variant of normalization method to use.
Returns:
adj (chainer.utils.sparse.CooMatrix): (Node, Node) shape
normalized adjency matrix.
labels (np.ndarray): (Node, ) shape labels array
idx_train (np.ndarray): Indices of the train
idx_val (np.ndarray): Indices of val array
idx_test (np.ndarray): Indices of test array
"""
train = fetch_20newsgroups(subset='train')
test = fetch_20newsgroups(subset='test')
adj = create_text_adjacency_matrix(
[tokenize(t) for t in (train['data'] + test['data'])])
if normalization == 'gcn':
adj = normalize(adj)
else:
adj = normalize_pygcn(adj)
n_train = int(len(train['data']) * (1.0 - validation_ratio))
n_all = len(train['data']) + len(test['data'])
idx_train = np.array(list(range(n_train)), np.int32)
idx_val = np.array(list(range(n_train, len(train['data']))), np.int32)
idx_test = np.array(list(range(len(train['data']), n_all)), np.int32)
labels = np.concatenate(
(train['target'], test['target'], np.full([adj.shape[0] - n_all], -1)))
labels = labels.astype(np.int32)
adj = to_chainer_sparse_variable(adj)
return adj, labels, idx_train, idx_val, idx_test
def normalize_pygcn(a):
""" normalize adjacency matrix with normalization-trick. This variant
is proposed in https://github.com/tkipf/pygcn .
Refer https://github.com/tkipf/pygcn/issues/11 for the author's comment.
Arguments:
a (scipy.sparse.coo_matrix): Unnormalied adjacency matrix
Returns:
scipy.sparse.coo_matrix: Normalized adjacency matrix
"""
# no need to add identity matrix because self connection has already been added
# a += sp.eye(a.shape[0])
rowsum = np.array(a.sum(1))
rowsum_inv = np.power(rowsum, -1).flatten()
rowsum_inv[np.isinf(rowsum_inv)] = 0.
# ~D in the GCN paper
d_tilde = sp.diags(rowsum_inv)
return d_tilde.dot(a)
def normalize(adj):
""" normalize adjacency matrix with normalization-trick that is faithful to
the original paper.
Arguments:
a (scipy.sparse.coo_matrix): Unnormalied adjacency matrix
Returns:
scipy.sparse.coo_matrix: Normalized adjacency matrix
"""
# no need to add identity matrix because self connection has already been added
# a += sp.eye(a.shape[0])
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
# ~D in the GCN paper
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)
def to_chainer_sparse_variable(mat):
mat = mat.tocoo().astype(np.float32)
ind = np.argsort(mat.row)
data = mat.data[ind]
row = mat.row[ind]
col = mat.col[ind]
shape = mat.shape
# check that adj's row indices are sorted
assert np.all(np.diff(row) >= 0)
return chainer.utils.CooMatrix(data, row, col, shape, order='C')
|
<filename>Plug-ins/PlexSportsAgent.bundle/Contents/Code/Schedules/ESPNAPIScheduleAdapter.py
import json
import re
import threading
import Queue
from Constants import *
from Hashes import *
from StringUtils import *
from TimeZoneUtils import *
from Vectors import *
from ..Data.ESPNAPIDownloader import *
from ScheduleEvent import *
espnapi_abbreviation_corrections = {
LEAGUE_NBA: {
"GS": "GSW",
"NO": "NOP",
"NY": "NYK",
"SA": "SAS",
"UTAH": "UTA",
"WSH": "WAS",
},
LEAGUE_NFL: {
"WSH": "WAS",
}
}
ESPN_SUBSEASON_FLAG_PRESEASON = 1
ESPN_SUBSEASON_FLAG_REGULAR_SEASON = 2
ESPN_SUBSEASON_FLAG_POSTSEASON = 3
ESPN_SUBSEASON_FLAG_OFFSEASON = 4
espn_subseason_flags_by_league = {
LEAGUE_MLB: {
ESPN_SUBSEASON_FLAG_PRESEASON: MLB_SUBSEASON_FLAG_PRESEASON,
ESPN_SUBSEASON_FLAG_REGULAR_SEASON: MLB_SUBSEASON_FLAG_REGULAR_SEASON,
ESPN_SUBSEASON_FLAG_POSTSEASON: MLB_SUBSEASON_FLAG_POSTSEASON,
},
LEAGUE_NBA: {
ESPN_SUBSEASON_FLAG_PRESEASON: NBA_SUBSEASON_FLAG_PRESEASON,
ESPN_SUBSEASON_FLAG_REGULAR_SEASON: NBA_SUBSEASON_FLAG_REGULAR_SEASON,
ESPN_SUBSEASON_FLAG_POSTSEASON: NBA_SUBSEASON_FLAG_POSTSEASON,
},
LEAGUE_NFL: {
ESPN_SUBSEASON_FLAG_PRESEASON: NFL_SUBSEASON_FLAG_PRESEASON,
ESPN_SUBSEASON_FLAG_REGULAR_SEASON: NFL_SUBSEASON_FLAG_REGULAR_SEASON,
ESPN_SUBSEASON_FLAG_POSTSEASON: NFL_SUBSEASON_FLAG_POSTSEASON,
},
LEAGUE_NHL: {
ESPN_SUBSEASON_FLAG_PRESEASON: NHL_SUBSEASON_FLAG_PRESEASON,
ESPN_SUBSEASON_FLAG_REGULAR_SEASON: NHL_SUBSEASON_FLAG_REGULAR_SEASON,
ESPN_SUBSEASON_FLAG_POSTSEASON: NHL_SUBSEASON_FLAG_POSTSEASON,
},
}
__cached_schedule_dates = dict() # [league][yyyy][mm][dd] = True
def GetSchedule(sched, navigator, sport, league, season):
# Retrieve data from MLB API
processing = True
def monitor():
while True: # processing:
threadpool = []
while q.unfinished_tasks: # len(q.queue) > 0:
date = q.get()
semaphore.acquire()
t = threading.Thread(target=process_date, kwargs={"date": date})
threadpool.append(t)
t.start()
if not threadpool: break
for t in threadpool:
t.join()
# Mmmmkay so this is how we gone have to do it:
#
# Because ESPN
def process_date(date):
try:
downloadedJson = DownloadScheduleForLeagueAndDate(league, date)
process_json(downloadedJson)
finally:
semaphore.release()
q.task_done()
def process_json(downloadedJson):
if downloadedJson:
try: espnApiSchedule = json.loads(downloadedJson)
except ValueError: espnApiSchedule = None
if espnApiSchedule:
if espnApiSchedule.get("events"):
for schedEvent in espnApiSchedule["events"]:
if schedEvent and schedEvent.get("competitions"):
seasonType = schedEvent["season"]["type"]
subseason = __get_subseason(league, seasonType)
for competition in schedEvent["competitions"]:
id = deunicode(competition["id"])
# Errant data
if id in ["170501031", "170429031"] : continue
date = __hashedDateParse(deunicode(competition["date"]))
if id in ["250326006"]:
date = date.replace(tzinfo=EasternTime).astimezone(tz=UTC)
elif id in ["231030025"]: # 00:30Z, it's actually 00:30 EST
date = date.replace(tzinfo=JapanStandardTime, month=10, day=30, hour=19).astimezone(tz=UTC)
elif id in ["231031012"]: # 17:00Z, it's actually 22:00 EST (12:00 JST)
date = date.replace(tzinfo=JapanStandardTime, month=11, day=1, hour=12).astimezone(tz=UTC)
title = None
altTitle = None
altDescription = None
for note in competition["notes"]:
if note.get("type") == "event":
if not altTitle: altTitle = deunicode(note["headline"])
elif not title: title = deunicode(note["headline"])
else: break
if altTitle == "*": altTitle = None
elif altTitle == "FINA": altTitle = None
elif altTitle == "PPD": altTitle = None
elif altTitle == "IF NECESSARY": altTitle = None
elif altTitle and unicode(altTitle).isnumeric(): altTitle = None
elif altTitle and type(date) == datetime.datetime and altTitle.upper() == ("%s%s" % (date.astimezone(tz=EasternTime).strftime("%A, %b. "), date.astimezone(tz=EasternTime).day)).upper(): altTitle = None
elif altTitle and altTitle.upper().find("<NAME>") >= 0:
altDescription = altTitle[0:]
altTitle = None
elif altTitle and altTitle.upper().find("NFL PRESEASON") >= 0:
altDescription = altTitle[0:]
altTitle = None
teams = dict()
for competitor in competition["competitors"]:
key = deunicode(competitor["homeAway"])
teams.setdefault(key, {"fullName": None, "abbrev": None})
abbrev = deunicode(competitor["team"].get("abbreviation"))
if league in espnapi_abbreviation_corrections.keys() and abbrev in espnapi_abbreviation_corrections[league].keys():
abbrev = espnapi_abbreviation_corrections[league][abbrev]
teams[key]["abbrev"] = abbrev
teams[key]["fullName"] = deunicode(competitor["team"]["displayName"])
homeTeamKey = None
awayTeamKey = None
homeTeamName = teams["home"]["fullName"]
awayTeamName = teams["away"]["fullName"]
homeTeam = navigator.GetTeam(season, homeTeamName, abbreviation=teams["home"]["abbrev"])
awayTeam = navigator.GetTeam(season, awayTeamName, abbreviation=teams["away"]["abbrev"])
if homeTeam:
homeTeamKey = homeTeam.key
homeTeamName = homeTeam.fullName
if awayTeam:
awayTeamKey = awayTeam.key
awayTeamName = awayTeam.fullName
(xsubseason, playoffRound, eventIndicator, xaltTitle) = __get_playoffRound(league, subseason, title, altTitle, competition)
if xaltTitle and xaltTitle != altTitle: altTitle = xaltTitle
gameNumber = None
if altTitle:
foundGame = False
for expr in [r"(?:^|\b)(?:Game\s+(?P<game_number>\d+))(?:\b|$)"]:
if foundGame: break
m = re.search(expr, altTitle, re.IGNORECASE)
if m:
gameNumber = int(m.group("game_number"))
foundGame = True
break
ysubseason = None
week = None
if league == LEAGUE_NFL:
(ysubseason, week) = __get_nfl_week(league, date, seasonType, calendar)
if ysubseason != None and ysubseason != xsubseason: xsubseason = ysubseason
if not gameNumber and \
id not in ["400899377"] and \
league in [LEAGUE_MLB, LEAGUE_NBA, LEAGUE_NHL]:
if competition.get("series") and competition["series"].get("type") == "playoff":
if not xsubseason:
if league == LEAGUE_MLB:
xsubseason = MLB_SUBSEASON_FLAG_POSTSEASON
elif league == LEAGUE_NBA:
xsubseason = NBA_SUBSEASON_FLAG_POSTSEASON
elif league == LEAGUE_NHL:
xsubseason = NHL_SUBSEASON_FLAG_POSTSEASON
seriesSummary = competition["series"]["summary"]
mss = re.search(r"(?:^|\b)(?P<wins>\d+)\s*[\-]\s*(?P<losses>\d+)(?:\b|$)", seriesSummary, re.IGNORECASE)
if mss:
gameNumber = int(mss.group("wins")) + int(mss.group("losses"))
description = None
if competition.get("headlines"):
for headline in competition["headlines"]:
if headline["type"] == "Recap":
if headline.get("description"):
description = deunicode(normalize(headline["description"]))
elif headline.get("shortLinkText"):
description = deunicode(normalize(headline["shortLinkText"]))
# TODO Date strings as headlines
while description and description[:1] == '\u2014': description = description[1:]
while description and description[:3] == "\xe2\x80\x94": description = description[3:]
if description: description = description.strip()
networks = []
if competition.get("broadcasts"):
for broadcast in competition["broadcasts"]:
networks += broadcast["names"]
kwargs = {
"sport": sport,
"league": league,
"season": season,
"date": date,
"ESPNAPIID": id,
"eventTitle": title,
"altTitle": altTitle,
"description": description,
"altDescription": altDescription,
"homeTeam": homeTeamKey,
"homeTeamName": homeTeamName if not homeTeamKey else None,
"awayTeam": awayTeamKey,
"awayTeamName": awayTeamName if not awayTeamKey else None,
"subseason": xsubseason,
"week": week,
"playoffround": playoffRound,
"eventindicator": eventIndicator,
"game": gameNumber,
"networks": networks,
"vs": "%s vs. %s" % (homeTeamName, awayTeamName)
}
event = ScheduleEvent(**kwargs)
if gameNumber != None and event.game == None: print("FAILED TO SET GAME FROM '%s'" % altTitle)
event = AddOrAugmentEvent(sched, event)
if gameNumber != None and event.game == None: print("FAILED TO SET GAME FROM '%s'" % altTitle)
q = Queue.Queue()
semaphore = threading.BoundedSemaphore(value = 25)
datesToProcess = []
calendarsToProcess = []
# Verify Calendar matches season requested
year = int(season)
calendar = __process_calendar(league, season)
if calendar:
# shouldIncrementYear = False
# if calendar["dates"] and calendar["dates"][0].year < year:
# shouldIncrementYear = True
# elif calendar.get("startDate") and calendar["startDate"].year < year:
# shouldIncrementYear = True
# if shouldIncrementYear:
# xyear += 1
# calendar = __process_calendar(league, str(xyear), True)
calendarsToProcess.append(calendar)
for calendar in calendarsToProcess:
if calendar and calendar["dates"]:
for date in calendar["dates"]:
datesToProcess.append(date)
monitorThread = threading.Thread(target=monitor)
monitorThread.daemon = True
monitorThread.start()
now = datetime.datetime.utcnow().date()
for date in sorted(set(datesToProcess)):
if date > now: continue
q.put(date)
q.join()
__calendar_parse_hashes.clear()
__calendar_label_hashes.clear()
pass
def __process_calendar(league, season, isWhitelist = False):
def project_dates(obj):
dates = []
startDate = obj.get("startDate")
endDate = obj.get("endDate")
if startDate and endDate:
startDate = startDate.astimezone(tz=EasternTime).date()
endDate = endDate.astimezone(tz=EasternTime).date()
if endDate < startDate:
x = startDate
startDate = endDate
endDate = x
current = startDate
while current <= endDate:
dates.append(current)
current = current + datetime.timedelta(days=1)
dates = list(sorted(set(dates)))
return dates
apiScores = None
calendarJson = DownloadCalendarForLeagueAndSeason(league, season, isWhitelist)
if calendarJson:
try: apiScores = json.loads(calendarJson)
except ValueError: apiScores = None
apiLeague = {}
apiCalendar = []
apiCalendarObj = None
if apiScores and apiScores.get("leagues"):
apiLeague = apiScores["leagues"][0]
apiCalendar = apiLeague.get("calendar") or []
if apiCalendar: apiCalendarObj = apiCalendar[0]
calendar = {
"subseasons": [],
"dates": [],
"startDate": ParseISO8601Date(apiLeague["calendarStartDate"]) if apiLeague.get("calendarStartDate") else None,
"endDate": ParseISO8601Date(apiLeague["calendarEndDate"]) if apiLeague.get("calendarEndDate") else None,
}
#apiLeague["calendarType]: 'list'/'day'
if apiLeague.get("calendarIsWhitelist") == False and apiLeague.get("calendarType") == "day":
dates = project_dates(calendar)
blacklist = []
for x in apiCalendar:
blacklist.append(ParseISO8601Date(x).date())
if blacklist:
bl = list(sorted(set(blacklist)))
for i in range(len(dates)-1, -1, -1):
if not bl: break
if dates[i] == bl[-1]:
del(dates[i])
del(bl[-1])
calendar["dates"] = dates
else:
for x in apiCalendar:
# x could be a date string
if isinstance(x, basestring):
calendar["dates"].append(ParseISO8601Date(x).date())
continue
apiSubseasonObj = x
if apiSubseasonObj.get("label") == "Off Season": continue
subseasonObj = dict()
subseasonObj["label"] = deunicode(apiSubseasonObj["label"])
subseasonObj["value"] = int(apiSubseasonObj["value"])
subseasonObj["startDate"] = ParseISO8601Date(apiSubseasonObj["startDate"])
subseasonObj["endDate"] = ParseISO8601Date(apiSubseasonObj["endDate"])
dates = []
entries = []
subseasonObj["entries"] = entries
if apiSubseasonObj.get("entries"):
for apiEntry in apiSubseasonObj["entries"]:
entry = dict()
# Careful here. It looked like MLB was doin somethin DIFFERENT with entries
if isinstance(apiEntry, basestring):
entry["label"] = deunicode(apiEntry)
dates += project_dates(subseasonObj)
else:
entry["label"] = deunicode(apiEntry["label"])
entry["alternateLabel"] = deunicode(apiEntry["alternateLabel"])
entry["value"] = apiEntry["value"]
if apiEntry.get("startDate"): entry["startDate"] = ParseISO8601Date(apiEntry["startDate"])
if apiEntry.get("endDate"): entry["endDate"] = ParseISO8601Date(apiEntry["endDate"])
dates += project_dates(entry)
entries.append(entry)
dates = list(set(dates))
subseasonObj["dates"] = sorted(dates)
calendar["dates"] = calendar["dates"] + dates
calendar["subseasons"].append(subseasonObj)
if not calendar["dates"]:
calendar["dates"] = project_dates(calendar)
calendar["dates"] = list(sorted(set(calendar["dates"])))
# TODO: if today in range, Project dates from greatest date prior to today, up to and including today (account for august gaps in MLB)
return calendar
def __get_subseason(league, seasonType):
subseason = None
if league in espn_subseason_flags_by_league.keys():
subseason = espn_subseason_flags_by_league[league].get(seasonType)
return subseason
def __get_playoffRound(league, subseason, title, altTitle, competition):
"""League-specific analysis."""
playoffRound = None
eventIndicator = None
subseason = subseason or 0
title = title or ""
altTitle = altTitle or ""
typeAbbrev = competition["type"]["abbreviation"] if competition.get("type") and competition["type"].get("abbreviation") else None
if league == LEAGUE_MLB and subseason == MLB_SUBSEASON_FLAG_POSTSEASON:
pass # TODO
elif league == LEAGUE_NBA:
if subseason == NBA_SUBSEASON_FLAG_POSTSEASON:
if typeAbbrev == "FINAL":
subseason = NBA_SUBSEASON_FLAG_POSTSEASON
playoffRound = NBA_PLAYOFF_ROUND_FINALS
elif typeAbbrev == "SEMI":
subseason = NBA_SUBSEASON_FLAG_POSTSEASON
playoffRound = NBA_PLAYOFF_ROUND_SEMIFINALS
elif typeAbbrev == "QTR":
subseason = NBA_SUBSEASON_FLAG_POSTSEASON
playoffRound = NBA_PLAYOFF_ROUND_QUARTERFINALS
elif typeAbbrev == "RD16":
subseason = NBA_SUBSEASON_FLAG_POSTSEASON
playoffRound = NBA_PLAYOFF_1ST_ROUND
elif subseason == NBA_SUBSEASON_FLAG_REGULAR_SEASON:
if altTitle and altTitle.upper().find("ALL-STAR GAME") >= 0:
eventIndicator = NBA_EVENT_FLAG_ALL_STAR_GAME
elif competition["type"]["id"] == 4 or competition["type"].get("abbreviation") == "ALLSTAR":
eventIndicator = NBA_EVENT_FLAG_ALL_STAR_GAME
elif altTitle and altTitle.upper() == "RISING STARS":
eventIndicator = NBA_EVENT_FLAG_RISING_STARS_GAME
elif league == LEAGUE_NFL:
if indexOf(altTitle.lower(), "hall of fame") >= 0:
subseason = NFL_SUBSEASON_FLAG_PRESEASON
eventIndicator = NFL_EVENT_FLAG_HALL_OF_FAME
elif indexOf(altTitle.lower(), "wild card") >= 0 or indexOf(altTitle.lower(), "wildcard") >= 0 or typeAbbrev == "RD16":
subseason = NFL_SUBSEASON_FLAG_POSTSEASON
playoffRound = NFL_PLAYOFF_ROUND_WILDCARD
elif indexOf(altTitle.lower(), "division") >= 0 or typeAbbrev == "QTR":
subseason = NFL_SUBSEASON_FLAG_POSTSEASON
playoffRound = NFL_PLAYOFF_ROUND_DIVISION
elif indexOf(altTitle.lower(), "championship") >= 0 or indexOf(altTitle.lower(), "conference") >= 0 or typeAbbrev == "SEMI":
subseason = NFL_SUBSEASON_FLAG_POSTSEASON
playoffRound = NFL_PLAYOFF_ROUND_CHAMPIONSHIP
elif indexOf(altTitle.lower(), "super") >= 0 or typeAbbrev == "FINAL":
subseason = NFL_SUBSEASON_FLAG_POSTSEASON
playoffRound = NFL_PLAYOFF_ROUND_SUPERBOWL
eventIndicator = NFL_EVENT_FLAG_SUPERBOWL
altTitle = altTitle.upper().replace("SUPER BOWL", "SUPERBOWL")
elif indexOf(altTitle.lower(), "pro bowl") >= 0 or typeAbbrev == "ALLSTAR":
eventIndicator = NFL_EVENT_FLAG_PRO_BOWL
elif league == LEAGUE_NHL:
if subseason == NHL_SUBSEASON_FLAG_POSTSEASON:
if indexOf(altTitle.lower(), "1st round") >= 0:
subseason = NHL_SUBSEASON_FLAG_POSTSEASON
playoffRound = NHL_PLAYOFF_ROUND_1
elif indexOf(altTitle.lower(), "2nd round") >= 0:
subseason = NHL_SUBSEASON_FLAG_POSTSEASON
playoffRound = NHL_PLAYOFF_ROUND_2
elif indexOf(altTitle.lower(), "stanley cup final") >= 0:
subseason = NHL_SUBSEASON_FLAG_POSTSEASON
playoffRound = NHL_PLAYOFF_ROUND_3
elif indexOf(altTitle.lower(), " finals") >= 0:
subseason = NHL_SUBSEASON_FLAG_POSTSEASON
playoffRound = NHL_PLAYOFF_ROUND_3
else:
if altTitle and altTitle.find("ALL-STAR") >= 0:
if altTitle.find("SEMIFINAL") >= 0:
eventIndicator = NHL_EVENT_FLAG_ALL_STAR_SEMIFINAL
elif altTitle.find("FINAL") >= 0:
eventIndicator = NHL_EVENT_FLAG_ALL_STAR_GAME
else:
eventIndicator = NHL_EVENT_FLAG_ALL_STAR_GAME
elif competition.get("type") and competition["type"].get("id") == "4":
eventIndicator = NHL_EVENT_FLAG_ALL_STAR_GAME
return (subseason, playoffRound, eventIndicator, altTitle)
__nfl_week_title_expr = re.compile(r"(?P<preseason>(?:Preseason)\s)?Week\s(?P<week>\d+)", re.IGNORECASE)
__calendar_parse_hashes = dict() # [dateStr] = date()
__calendar_label_hashes = dict() # [label] = (subseason, week)
def __hashedDateParse(str):
if str in __calendar_parse_hashes.keys():
return __calendar_parse_hashes[str]
# Time-aware in zulu time
date = ParseISO8601Date(str)
if date: date = date.astimezone(tz=UTC)
if date.time() == datetime.time(5,0,0):
date = date.date()
__calendar_parse_hashes[str] = date
return date
def __get_nfl_week(league, date, seasonType, calendar):
subseason = None
week = None
if calendar.get("subseasons"):
for subseasonObj in calendar["subseasons"]:
xseasonType = int(subseasonObj["value"])
if subseasonObj.get("entries"):
for entry in subseasonObj["entries"]:
fromDate = entry["startDate"]
toDate = entry["endDate"]
if __is_date_in_range(date, fromDate, toDate):
weekStr = deunicode(entry["label"])
key = weekStr.lower()
if key in __calendar_label_hashes.keys():
subseason = __calendar_label_hashes[key][0]
week = __calendar_label_hashes[key][1]
else:
m = __nfl_week_title_expr.match(weekStr)
if not m:
__calendar_label_hashes.setdefault(key, (subseason, None))
__calendar_label_hashes[key] = (__calendar_label_hashes[key][0], None)
else:
gd = m.groupdict()
week = int(m.group("week"))
subseason = __get_subseason(league, xseasonType)
__calendar_label_hashes[key] = (subseason, week)
return (subseason, week)
def __is_date_in_range(date, fromDate, toDate):
"""Does specified date lie betwen the ends of a given date range?"""
x = date
if type(date) == datetime.date:
x = datetime.datetime(date.year, date.month, date.day, tzinfo=UTC)
# toDate is up-to-the-minute (inclusive)
return x >= fromDate and x <= toDate |
from __future__ import print_function
'''
R-1.1 Write a short Python function, is_multiple(n, m), that takes two integer values and
returns True if n is a multiple of m, that is, n = mi for some integer i, and False otherwise
'''
def is_multiple(n, m):
try:
return n % m == 0
except ZeroDivisionError:
return True if n == 0 else False
'''
R-1.2 Write a short Python function, is_even(k), that takes an integer value and
returns True if k is even, and False otherwise.
However, your function, your function cannot use the multiplication, modulo, or division operators.
'''
def is_even(k):
return bin(k).endswith('0')
'''
R-1.3 Write a short Python function, minmax(data), that takes a sequence of one or more numbers, and
returns the smallest and largest numbers, in the form of a tuple of length two.
Do not use the built-in functions min or max in implementing your solution.
'''
def minmax(data):
result = sorted(data)
return result[0], result[-1]
'''
R-1.4 Write a short Python function that takes a positive integer n and
returns the sum of the squares of all the positive integers smaller than n.
'''
def sumofsquares(n):
result = 0
i = 1
while i < n:
result += i * i
i += 1
return result
'''
R-1.5 Give a single command that computes the sum from Exercise R-1.4,
relying on Python's comprehension syntax and the built-in sum function.
'''
def sumofsquares2(n):
return sum(k * k for k in range(n))
'''
R-1.6 Write a short Python function that takes a positive integer n and
returns the sum of the squares of all the odd positive integers smaller than n.
'''
def sumofoddsquares(n):
result = 0
i = 1
while i < n:
if i % 2 == 1:
result += i * i
i += 1
return result
'''
R-1.7 Give a single command that computes the sum from Exercise R-1.6,
relying on Python's comprehension syntax and the built-in sum function.
'''
def sumofoddsquares2(n):
return sum(k * k for k in range(n) if k % 2 == 1)
'''
R-1.8 Python allows negative integers to be used as indices into a sequence, such as a string.
If string s has length n, and expression s[k] is used for index -n <= k < 0,
what is the equivalent index j >= 0 such that s[j] references the same element?
Answer: j = n + k
'''
'''
R-1.9 What parameters should be sent to the range constructor,
to produce a range with values 50, 60, 70, 80?
Answer: range(50, 90, 10)
'''
'''
R-1.10 What parameters should be sent to the range constructor,
to produce a range with values 8, 6, 4, 2, 0, -2, -4, -6, -8?
Answer: range(8, -10, -2)
'''
'''
R-1.11 Demonstrate how to use Python's list comprehension syntax
to produce the list [1, 2, 4, 8, 16, 32, 64, 128, 256]
Answer: list(pow(2, k) for k in range(9))
'''
'''
R-1.12 Python's random module includes a function choice(data) that returns a random element
from a non-empty sequence. The random module includes a more basic function randrange, with
parameterization similar to the built-in range function, that return a random choice from
the given range. Using only the randrange function, implement your own version of the choice function.
'''
import random
def mychoice(data):
return data[random.randrange(0, len(data))]
'''
C-1.13 Write a pseudo-code description of a function that reverses a list of n integers, so that
the numbers are listed in the opposite order than they were before, and compare this method to an
equivalent Python function for doing the same thing.
Answer:
current index is 0
mid is the ceiling of n / 2
while current index is smaller than mid
exchange list[current index] and list[n - 1 - current index]
current index increases by 1
'''
'''
C-1.14 Write a short Python function that takes a sequence of integer values and
determines if there is a distinct pair of numbers in the sequence whose product is odd.
'''
def is_product_odd_pair_in(data):
return len([k for k in data if k % 2 == 1]) > 1
'''
C-1.15 Write a Python function that takes a sequence of numbers and determines
if all the numbers are different from each other(that is, they are distinct).
'''
def is_sequence_elements_distinct(data):
i = 0
while i < len(data):
i += 1
if data[0] in data[i:]:
return False
return True
'''
C-1.16 In our implementation of the scale function (page 25), the body of the loop executes the command
data[j] *= factor. We have discussed that numeric types are immutable, and that use of the *= operator
in this context causes the creation of a new instance (not the mutation of an existing instance).
How is it still possible, then, that our implementation of scale changes the actual parameter sent by
the caller?
Answer: It's only possible if the sequence, that is data in this case, is mutable.
'''
'''
C-1.17 Had we implemented the scale function (page 25) as follows, does it work properly?
def scale(data, factor):
for val in data:
val *= factor
Explain why or why not.
Answer: No, it doesn't. Because val is only an alias to the current loop value,
and changing it won't affact the actual element.
'''
'''
C-1.18 Demonstrate how to use Python's list comprehension syntax
to product the list [0, 2, 6, 12, 20, 30, 42, 56, 72, 90]
Answer: list(k * (k + 1) for k in range(10))
'''
'''
C-1.19 Demonstrate how to use Python's list comprehension syntax to produce the list
['a', 'b', 'c', ..., 'z'], but without having to type all 26 such characters literally.
Answer: list(chr(k + ord('a')) for k in range(26))
'''
'''
C-1.20 Python's random module includes a function shuffle(data) that accepts a list of elements
and randomly reorders the elements so that each possible order occurs with equal probability.
The random module includes a more basic function randint(a, b) that returns a uniformly random
integer from a to b (including both endpoints). Using only the randint function, implement your
own version fo the shuffle function.
'''
def my_shuffle(data):
import random
index = 0
while index < len(data):
target = data[random.randint(index, len(data) - 1)]
data.remove(target)
data.insert(0, target)
index += 1
return data
'''
C-1.21 Write a Python program that repeatedly reads lines from standard input until an EOFError
is raised, and then outputs those lines in reverse order (a user can indicate end of input by
typing Ctrl-D).
'''
def main121():
lines = []
try:
while True:
lines.insert(0, input())
except EOFError:
pass
for line in lines:
print(line)
'''
C-1.22 Write a short Python program that takes two arrays a and b of length n storing int values,
and returns the dot product of a and b. That is, it returns an array c of length n such that
c[i] = a[i] * b[i], for i = 0, ..., n - 1.
'''
def array_dot_product(a, b):
return [a[i] * b[i] for i in range(len(a))]
'''
C-1.23 Give an example of a Python code fragment that attempts to write an element to a list based
on an index that may be out of bounds. If that index is out of bounds, the program should catch the
exception that results, and print the following error message:
"Don't try buffer overflow attacks in Python!"
'''
def try_write_out_of_bound():
some_list = []
try:
some_list[1] = 3
except IndexError:
print("Don't try buffer overflow attacks in Python!")
'''
C-1.24 Write a short Python function that counts the number of vowels in a give character string.
'''
def count_vowels(s):
return len([c for c in list(s) if c in ['a', 'e', 'i', 'o', 'u']])
'''
C-1.25 Write a short Python function that takes a string s, representing a sentence, and
returns a acopy of the string with all punctuation removed. For example, if given the string
"Let's try, Mike.", this function would return "Let's try Mike"
'''
def remove_punc(s):
return ''.join([c for c in list(s) if 'A' <= c <= 'z' or c == ' '])
'''
C-1.26 Write a short program that takes as input three integers, a, b, and c, from the console and
determines if they can be used in a correct arithmetic formula (in the given order), like
"a + b = c", "a = b - c", or "a * b = c".
'''
def main2():
a = int(input())
b = int(input())
c = int(input())
import operator
arithmetics = [
operator.add,
operator.sub,
operator.mul,
operator.floordiv
]
try:
return c in [op(a, b) for op in arithmetics]
except ZeroDivisionError:
return False
'''
C-1.27 In section 1.8, we provided three different implementations of a generator that
computes factors of a given integer. The third of those implementations, from page 41,
was the most efficient, but we notes that it did not yield the factors in increasing order.
Modify the generator so that it reports factors in increasing order, while maintaining
its general performance advantages.
'''
def factors(n):
k = 1
bigger_half = []
while k * k <= n:
if n % k == 0:
yield k
bigger_half.insert(0, n // k)
k += 1
for k in bigger_half:
yield k
'''
C-1.28 The p-norm of a vector v = (v1, v2, ..., vn) in n-dimensional space is defined as
||v|| = p√(v1 ** p + v2 ** p + ... + vn ** p)
For the special case of p = 2, this results in the traditional Euclidean norm, which represents
the length of the vector. For example, the Euclidean norm of a two-dimensional vector with
coordinates (4, 3) has Euclidean norm of √(4 ** 2 + 3 ** 2) = √(16 + 9) = √25 = 5.
Give an implementation of a function named norm such that norm(v, p) returns the p-norm value of v
and norm(v) returns the Euclidean norm of v. You may assume that v is a list of numbers.
'''
def norm(v, p = 2):
import math
return math.pow(sum(math.pow(k, p) for k in v), 1 / p)
'''
P-1.29 Write a Python program that outputs all possible string formed by useing the characters
'c', 'a', 't', 'd', 'o', and 'g' exactly once
'''
def main129():
results = ['c']
for k in ['a', 't', 'd', 'o', 'g']:
temp = []
for result in results:
temp += [(result[:i] + k + result[i:]) for i in range(len(result) + 1)]
results = temp
return results
'''
P-1.30 Write a Python program that can take a positive integer greater than 2 as input and
write out the number of times one must repeatedly divide this number by 2
before getting a value less than 2.
'''
import sys
import argparse
import math
def main130():
parser = argparse.ArgumentParser()
parser.add_argument('input', action = GreaterThanTwoAction, metavar = 'N', type = int, nargs = 1)
args = parser.parse_args()
print(math.ceil(math.log2(args.input[0])))
class GreaterThanTwoAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string = None):
if values[0] <= 2:
raise ValueError("N must be bigger than 2.")
setattr(namespace, self.dest, values)
'''
P-1.31 Write a Python program that can "make change." Your program should take two numbers as input
, one that is a monetary amount charged and the other that is a monetary amount given. It should
then return the number of each kind of bill and coin to give back as change for the difference
between the amount given and the amount charged. The values assigned to the bills and coins can be
based on the monetary system of any current or former government. Try to design your program
so that it returns as few bills and coins as possible.
The monetary system of PRC:
Banknotes: 100, 50, 20, 10, 5, 2, 1
Coins: 0.5, 0.1
'''
def main131():
parser = argparse.ArgumentParser()
parser.add_argument('charged', action = PositiveAction, metavar = 'charged', type = float, nargs = 1)
parser.add_argument('given', action = PositiveAction, metavar = 'given', type = float, nargs = 1)
args = parser.parse_args()
charged = args.charged[0]
given = args.given[0]
diff = given - charged
if diff < 0:
return print('Insufficient fund...')
print('Change is: ' + str(diff))
bills = [100, 50, 20, 10, 5, 2, 1, 0.5, 0.1]
change = [0] * len(bills)
for i in range(len(bills)):
while diff >= bills[i] or math.isclose(diff, bills[i], rel_tol=1e-1):
change[i] += 1
diff -= bills[i]
print('Banknotes: 100: {}, 50: {}, 20: {}, 10: {}, 5: {}, 2: {}, 1: {} Coins: 0.5: {}, 0.1: {}'.format(*change))
class PositiveAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string = None):
if values[0] <= 0:
raise ValueError("The input must be positive.")
setattr(namespace, self.dest, values)
'''
P-1.32 Write a Python program that can simulate a simple calculator, using the console
as the exclusive input and output device. That is, each input to the calculator, be it a number,
like 12.34 or 1034, or an operator, like + or =, can be done on a seperate line. After
each such input, you should output to the Python console what would be displayed on your calculator
'''
'''
P-1.33 Write a Python program that simulates a handheld calculator. Your program should
process input from the Python console representing buttons that are "pushed," and then output
the contents of the screen after each operation is performed. Minimally, your calculator should
be able to process the basic arithmetic operations and a reset/clear operation.
'''
import operator
def main_calculator():
user_input = ''
op_dict = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv, '%': operator.mod}
current = None
op = None
operand = None
try:
while True:
user_input = input()
if user_input.replace('.', '', 1).isdigit():
if op == None:
current = int(user_input) if is_int(user_input) else float(user_input)
else:
operand = int(user_input) if is_int(user_input) else float(user_input)
current = op_dict[op](current, operand)
op = None
operand = None
print('=' + str(current))
elif user_input in '+-*/%' and operand == None:
op = user_input
if current == None:
current = 0
elif user_input in 'cC':
current = None
op = None
operand = None
except EOFError:
pass
def is_int(k):
try:
int(k)
return True
except ValueError:
return False
'''
P-1.34 A common punishment for school children is to write out a sentence multiple times. Write
a Python stand-alone program that will write out the following sentence one hundred times:
"I will never spam my friends again." Your program should number each of the sentences and
it should make eight different random-looking typos.
'''
def main134():
homework = "I will never spam my friends again."
typo = [
lambda s: s.replace('will', 'vill'),
lambda s: s.replace('never', 'navr'),
lambda s: s.replace('spam', 'span'),
lambda s: s.replace('friends', 'friend'),
lambda s: s.replace('again', 'agin'),
lambda s: s.replace('friends', 'frinds'),
lambda s: s.replace('will', 'wil'),
lambda s: s.replace('spam my', 'spammy')
]
random_lines = set()
while len(random_lines) < 8:
random_lines.add(random.randint(0, 99))
for i in range(100):
if i in random_lines:
random_typo = typo[random.randint(0, len(typo) - 1)]
print('{}: {}'.format(i + 1, random_typo(homework)))
typo.remove(random_typo)
else:
print('{}: {}'.format(i + 1, homework))
'''
P-1.35 The birthday praradox says that the probability that two peiple in a room will have the same
birthday is more than half, provided by n, the number of people in the room, is more than 23. This
property is not really a paradox, but many people find it surprising. Design a Python program that
can test this paradox by a series of experiments on randomly generated birthdays, which test this
paradox for n = 5, 10, 15, 20, ..., 100.
'''
def main135():
n = range(5, 105, 5)
# the possibility that each has a unique birthday
for i in n:
p_unique = math.prod(day / 365 for day in range(365, 365 - i, -1))
print('The possibility of n = {} is {}'.format(i, 1 - p_unique))
'''
P-1.36 Write a Python program that inputs a list of words, separated by whitespace, and outputs
how many times each word appears in the list. You need not worry about efficiency at this point,
however, as this topic is something that will be addressed later in this book.
'''
def main136():
src = 'path of some file'
fp = open(src, "r")
word_count = {}
for line in fp.readlines():
words = line.split(' ')
for word in words:
if word in word_count:
word_count[word] += 1
else:
word_count[word] = 1
if __name__ == "__main__":
main()
|
<filename>control_de_flujo.py<gh_stars>0
"""Guarde en lista `naturales` los primeros 100 números naturales (desde el 1)
usando el bucle while
"""
naturales = []
n = 0
while n < 100:
n +=1
naturales.append(n)
#print(naturales)
"""Guarde en `acumulado` una lista con el siguiente patrón:
['1','1 2','1 2 3','1 2 3 4','1 2 3 4 5',...,'...47 48 49 50']
Hasta el número 50.
"""
rango = list(range(1,51))
p = ''
acumulado = []
#print(rango)
for n in (rango):
p = p+' '+str(n)
p = p.lstrip()
#print(p)
acumulado.append(p)
#print(acumulado)
"""Guarde en `suma100` el entero de la suma de todos los números entre 1 y 100:
"""
#lista = [1,2,3,4,5,6,7]
lista = list(range(1,101))
suma100 = 0
for elemento in lista:
#print(elemento)
suma100 += elemento
#print(suma100)
"""Guarde en `tabla100` un string con los primeros 10 múltiplos del número 134,
separados por coma, así:
'134,268,...'
"""
tabla100 = ''
cant = 0
for i in range(1,134*11):
#print('num',i)
#print('residuo', i % 134)
if i % 134 == 0 and cant < 10:
#tabla100 = multi + 'x '
tabla100 += str(i) + ','
#print('tabla:',tabla100)
#print('cant:',cant)
cant += 1
tabla100 = tabla100.rstrip(",")
#print(tabla100)
#print(cant)
"""Guardar en `multiplos3` la cantidad de números que son múltiplos de 3 y
menores o iguales a 300 en la lista `lista1` que se define a continuación (la lista
está ordenada).
"""
lista1 = [12, 15, 20, 27, 32, 39, 42, 48, 55, 66, 75, 82, 89, 91, 93, 105, 123, 132, 150, 180, 201, 203, 231, 250, 260, 267, 300, 304, 310, 312, 321, 326]
# multiplos3 = [if el %3 == 0 else False for el in lista1]
# print(multiplos3)
n = 0
multiplos3 = 0
for el in lista1:
# print(el)
n = el % 3
#print(n)
if n == 0 and el < 300:
multiplos3 += 1
#print(cant)
"""Guardar en `regresivo50` una lista con la cuenta regresiva desde el número
50 hasta el 1, así:
[
'50 49 48 47...',
'49 48 47 46...',
...
'5 4 3 2 1',
'4 3 2 1',
'3 2 1',
'2 1',
'1'
]
"""
regresivo50 = []
inicio = 50
while inicio > 0:
val = ''
for i in range(inicio,0,-1):
val += str(i) +' '
inicio = inicio -1
val = val.rstrip(" ")
regresivo50.append(val)
#print('lista: ',regresivo50)
"""Invierta la siguiente lista usando el bucle for y guarde el resultado en
`invertido` (sin hacer uso de la función `reversed` ni del método `reverse`)
"""
lista2 = list(range(1, 70, 5))
#print(lista2)
long = len(lista2)
invertido = []
for i in lista2:
#print(long)
long = long -1
invertido.append(lista2[long])
#print(invertido)
"""Guardar en `primos` una lista con todos los números primos desde el 37 al 300
Nota: Un número primo es un número entero que no se puede calcular multiplicando
otros números enteros.
"""
lista = list(range(37,301))
primos = []
noprimos = []
x = ''
for el in lista:
for i in range(2,301):
if el % i != 0 or el == i:
primos.append(el)
else:
noprimos.append(el)
primos = set(primos)
noprimos = set(noprimos)
primos = primos.difference(noprimos)
primos = sorted(list(primos))
#print(len(primos))
"""Guardar en `fibonacci` una lista con los primeros 60 términos de la serie de
Fibonacci.
Nota: En la serie de Fibonacci, los 2 primeros términos son 0 y 1, y a partir
del segundo cada uno se calcula sumando los dos anteriores términos de la serie.
[0, 1, 1, 2, 3, 5, 8, ...]
"""
fibonacci = [0,1]
n = 2
while n < 60:
long = len(fibonacci) -1
long2 = len(fibonacci) -2
fibonacci.append(fibonacci[long]+fibonacci[long2])
n = n+1
#print(fibonacci)
"""Guardar en `factorial` el factorial de 30
El factorial (símbolo:!) Significa multiplicar todos los números enteros desde
el 1 hasta el número elegido.
Por ejemplo, el factorial de 5 se calcula así:
5! = 5 × 4 × 3 × 2 × 1 = 120
"""
factorial = 1
num = list(range(30,0,-1))
for i in num:
factorial = factorial * i
#print(factorial)
"""Guarde en lista `pares` los elementos de la siguiente lista que esten
presentes en posiciones pares, pero solo hasta la posición 80.
"""
lista3 = [941, 149, 672, 208, 99, 562, 749, 947, 251, 750, 889, 596, 836, 742, 512, 19, 674, 142, 272, 773, 859, 598, 898, 930, 119, 107, 798, 447, 348, 402, 33, 678, 460, 144, 168, 290, 929, 254, 233, 563, 48, 249, 890, 871, 484, 265, 831, 694, 366, 499, 271, 123, 870, 986, 449, 894, 347, 346, 519, 969, 242, 57, 985, 250, 490, 93, 999, 373, 355, 466, 416, 937, 214, 707, 834, 126, 698, 268, 217, 406, 334, 285, 429, 130, 393, 396, 936, 572, 688, 765, 404, 970, 159, 98, 545, 412, 629, 361, 70, 602]
rango = list(range(0,81,2))
pares = []
for el in rango:
pares.append(lista3[el])
#print(pares)
"""Guarde en lista `cubos` el cubo (potencia elevada a la 3) de los números del
1 al 100.
"""
rango = list(range(1,101))
cubos = []
for el in rango:
cubos.append(el ** 3)
#print(cubos)
"""Encuentre la suma de la serie 2 +22 + 222 + 2222 + .. hasta sumar 10 términos
y guardar resultado en variable `suma_2s`
"""
n = 0
num = ''
suma_2s = 0
while n < 10:
num = int(str(num) +'2')
#print(num)
suma_2s = suma_2s + num
n = n+1
#print(suma_2s)
"""Guardar en un string llamado `patron` el siguiente patrón llegando a una
cantidad máxima de asteriscos de 30.
*
**
***
****
*****
******
*******
********
*********
********
*******
******
*****
****
***
**
*
"""
n = 0
patron = ''
p = ''
x = ''
for el in range(0,30):
#print(el)
p = '*' * el
#print(p)
patron += p + '\n'
for i in range(30,0,-1):
#print('i',i)
x = '*' * i
#print(x)
patron += x + '\n'
patron = patron.lstrip('\n')
patron = patron.rstrip('\n')
#print(patron) |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: cosmos/distribution/v1beta1/distribution.proto, cosmos/distribution/v1beta1/genesis.proto, cosmos/distribution/v1beta1/query.proto, cosmos/distribution/v1beta1/tx.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import Dict, List, Optional
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
@dataclass(eq=False, repr=False)
class MsgSetWithdrawAddress(betterproto.Message):
"""
MsgSetWithdrawAddress sets the withdraw address for a delegator (or
validator self-delegation).
"""
delegator_address: str = betterproto.string_field(1)
withdraw_address: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class MsgSetWithdrawAddressResponse(betterproto.Message):
"""
MsgSetWithdrawAddressResponse defines the Msg/SetWithdrawAddress response
type.
"""
pass
@dataclass(eq=False, repr=False)
class MsgWithdrawDelegatorReward(betterproto.Message):
"""
MsgWithdrawDelegatorReward represents delegation withdrawal to a delegator
from a single validator.
"""
delegator_address: str = betterproto.string_field(1)
validator_address: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class MsgWithdrawDelegatorRewardResponse(betterproto.Message):
"""
MsgWithdrawDelegatorRewardResponse defines the Msg/WithdrawDelegatorReward
response type.
"""
pass
@dataclass(eq=False, repr=False)
class MsgWithdrawValidatorCommission(betterproto.Message):
"""
MsgWithdrawValidatorCommission withdraws the full commission to the
validator address.
"""
validator_address: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class MsgWithdrawValidatorCommissionResponse(betterproto.Message):
"""
MsgWithdrawValidatorCommissionResponse defines the
Msg/WithdrawValidatorCommission response type.
"""
pass
@dataclass(eq=False, repr=False)
class MsgFundCommunityPool(betterproto.Message):
"""
MsgFundCommunityPool allows an account to directly fund the community pool.
"""
amount: List["__base_v1_beta1__.Coin"] = betterproto.message_field(1)
depositor: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class MsgFundCommunityPoolResponse(betterproto.Message):
"""
MsgFundCommunityPoolResponse defines the Msg/FundCommunityPool response
type.
"""
pass
@dataclass(eq=False, repr=False)
class Params(betterproto.Message):
"""Params defines the set of params for the distribution module."""
community_tax: str = betterproto.string_field(1)
base_proposer_reward: str = betterproto.string_field(2)
bonus_proposer_reward: str = betterproto.string_field(3)
withdraw_addr_enabled: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class ValidatorHistoricalRewards(betterproto.Message):
"""
ValidatorHistoricalRewards represents historical rewards for a validator.
Height is implicit within the store key. Cumulative reward ratio is the sum
from the zeroeth period until this period of rewards / tokens, per the
spec. The reference count indicates the number of objects which might need
to reference this historical entry at any point. ReferenceCount = number
of outstanding delegations which ended the associated period (and might
need to read that record) + number of slashes which ended the associated
period (and might need to read that record) + one per validator for the
zeroeth period, set on initialization
"""
cumulative_reward_ratio: List[
"__base_v1_beta1__.DecCoin"
] = betterproto.message_field(1)
reference_count: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class ValidatorCurrentRewards(betterproto.Message):
"""
ValidatorCurrentRewards represents current rewards and current period for a
validator kept as a running counter and incremented each block as long as
the validator's tokens remain constant.
"""
rewards: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(1)
period: int = betterproto.uint64_field(2)
@dataclass(eq=False, repr=False)
class ValidatorAccumulatedCommission(betterproto.Message):
"""
ValidatorAccumulatedCommission represents accumulated commission for a
validator kept as a running counter, can be withdrawn at any time.
"""
commission: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class ValidatorOutstandingRewards(betterproto.Message):
"""
ValidatorOutstandingRewards represents outstanding (un-withdrawn) rewards
for a validator inexpensive to track, allows simple sanity checks.
"""
rewards: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class ValidatorSlashEvent(betterproto.Message):
"""
ValidatorSlashEvent represents a validator slash event. Height is implicit
within the store key. This is needed to calculate appropriate amount of
staking tokens for delegations which are withdrawn after a slash has
occurred.
"""
validator_period: int = betterproto.uint64_field(1)
fraction: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class ValidatorSlashEvents(betterproto.Message):
"""
ValidatorSlashEvents is a collection of ValidatorSlashEvent messages.
"""
validator_slash_events: List["ValidatorSlashEvent"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class FeePool(betterproto.Message):
"""FeePool is the global fee pool for distribution."""
community_pool: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class CommunityPoolSpendProposal(betterproto.Message):
"""
CommunityPoolSpendProposal details a proposal for use of community funds,
together with how many coins are proposed to be spent, and to which
recipient account.
"""
title: str = betterproto.string_field(1)
description: str = betterproto.string_field(2)
recipient: str = betterproto.string_field(3)
amount: List["__base_v1_beta1__.Coin"] = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class DelegatorStartingInfo(betterproto.Message):
"""
DelegatorStartingInfo represents the starting info for a delegator reward
period. It tracks the previous validator period, the delegation's amount of
staking token, and the creation height (to check later on if any slashes
have occurred). NOTE: Even though validators are slashed to whole staking
tokens, the delegators within the validator may be left with less than a
full token, thus sdk.Dec is used.
"""
previous_period: int = betterproto.uint64_field(1)
stake: str = betterproto.string_field(2)
height: int = betterproto.uint64_field(3)
@dataclass(eq=False, repr=False)
class DelegationDelegatorReward(betterproto.Message):
"""
DelegationDelegatorReward represents the properties of a delegator's
delegation reward.
"""
validator_address: str = betterproto.string_field(1)
reward: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CommunityPoolSpendProposalWithDeposit(betterproto.Message):
"""
CommunityPoolSpendProposalWithDeposit defines a CommunityPoolSpendProposal
with a deposit
"""
title: str = betterproto.string_field(1)
description: str = betterproto.string_field(2)
recipient: str = betterproto.string_field(3)
amount: str = betterproto.string_field(4)
deposit: str = betterproto.string_field(5)
@dataclass(eq=False, repr=False)
class DelegatorWithdrawInfo(betterproto.Message):
"""
DelegatorWithdrawInfo is the address for where distributions rewards are
withdrawn to by default this struct is only used at genesis to feed in
default withdraw addresses.
"""
# delegator_address is the address of the delegator.
delegator_address: str = betterproto.string_field(1)
# withdraw_address is the address to withdraw the delegation rewards to.
withdraw_address: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class ValidatorOutstandingRewardsRecord(betterproto.Message):
"""
ValidatorOutstandingRewardsRecord is used for import/export via genesis
json.
"""
# validator_address is the address of the validator.
validator_address: str = betterproto.string_field(1)
# outstanding_rewards represents the oustanding rewards of a validator.
outstanding_rewards: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(
2
)
@dataclass(eq=False, repr=False)
class ValidatorAccumulatedCommissionRecord(betterproto.Message):
"""
ValidatorAccumulatedCommissionRecord is used for import / export via
genesis json.
"""
# validator_address is the address of the validator.
validator_address: str = betterproto.string_field(1)
# accumulated is the accumulated commission of a validator.
accumulated: "ValidatorAccumulatedCommission" = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class ValidatorHistoricalRewardsRecord(betterproto.Message):
"""
ValidatorHistoricalRewardsRecord is used for import / export via genesis
json.
"""
# validator_address is the address of the validator.
validator_address: str = betterproto.string_field(1)
# period defines the period the historical rewards apply to.
period: int = betterproto.uint64_field(2)
# rewards defines the historical rewards of a validator.
rewards: "ValidatorHistoricalRewards" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class ValidatorCurrentRewardsRecord(betterproto.Message):
"""
ValidatorCurrentRewardsRecord is used for import / export via genesis json.
"""
# validator_address is the address of the validator.
validator_address: str = betterproto.string_field(1)
# rewards defines the current rewards of a validator.
rewards: "ValidatorCurrentRewards" = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class DelegatorStartingInfoRecord(betterproto.Message):
"""
DelegatorStartingInfoRecord used for import / export via genesis json.
"""
# delegator_address is the address of the delegator.
delegator_address: str = betterproto.string_field(1)
# validator_address is the address of the validator.
validator_address: str = betterproto.string_field(2)
# starting_info defines the starting info of a delegator.
starting_info: "DelegatorStartingInfo" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class ValidatorSlashEventRecord(betterproto.Message):
"""
ValidatorSlashEventRecord is used for import / export via genesis json.
"""
# validator_address is the address of the validator.
validator_address: str = betterproto.string_field(1)
# height defines the block height at which the slash event occured.
height: int = betterproto.uint64_field(2)
# period is the period of the slash event.
period: int = betterproto.uint64_field(3)
# validator_slash_event describes the slash event.
validator_slash_event: "ValidatorSlashEvent" = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class GenesisState(betterproto.Message):
"""GenesisState defines the distribution module's genesis state."""
# params defines all the paramaters of the module.
params: "Params" = betterproto.message_field(1)
# fee_pool defines the fee pool at genesis.
fee_pool: "FeePool" = betterproto.message_field(2)
# fee_pool defines the delegator withdraw infos at genesis.
delegator_withdraw_infos: List["DelegatorWithdrawInfo"] = betterproto.message_field(
3
)
# fee_pool defines the previous proposer at genesis.
previous_proposer: str = betterproto.string_field(4)
# fee_pool defines the outstanding rewards of all validators at genesis.
outstanding_rewards: List[
"ValidatorOutstandingRewardsRecord"
] = betterproto.message_field(5)
# fee_pool defines the accumulated commisions of all validators at genesis.
validator_accumulated_commissions: List[
"ValidatorAccumulatedCommissionRecord"
] = betterproto.message_field(6)
# fee_pool defines the historical rewards of all validators at genesis.
validator_historical_rewards: List[
"ValidatorHistoricalRewardsRecord"
] = betterproto.message_field(7)
# fee_pool defines the current rewards of all validators at genesis.
validator_current_rewards: List[
"ValidatorCurrentRewardsRecord"
] = betterproto.message_field(8)
# fee_pool defines the delegator starting infos at genesis.
delegator_starting_infos: List[
"DelegatorStartingInfoRecord"
] = betterproto.message_field(9)
# fee_pool defines the validator slash events at genesis.
validator_slash_events: List[
"ValidatorSlashEventRecord"
] = betterproto.message_field(10)
@dataclass(eq=False, repr=False)
class QueryParamsRequest(betterproto.Message):
"""
QueryParamsRequest is the request type for the Query/Params RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryParamsResponse(betterproto.Message):
"""
QueryParamsResponse is the response type for the Query/Params RPC method.
"""
# params defines the parameters of the module.
params: "Params" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QueryValidatorOutstandingRewardsRequest(betterproto.Message):
"""
QueryValidatorOutstandingRewardsRequest is the request type for the
Query/ValidatorOutstandingRewards RPC method.
"""
# validator_address defines the validator address to query for.
validator_address: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryValidatorOutstandingRewardsResponse(betterproto.Message):
"""
QueryValidatorOutstandingRewardsResponse is the response type for the
Query/ValidatorOutstandingRewards RPC method.
"""
rewards: "ValidatorOutstandingRewards" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QueryValidatorCommissionRequest(betterproto.Message):
"""
QueryValidatorCommissionRequest is the request type for the
Query/ValidatorCommission RPC method
"""
# validator_address defines the validator address to query for.
validator_address: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryValidatorCommissionResponse(betterproto.Message):
"""
QueryValidatorCommissionResponse is the response type for the
Query/ValidatorCommission RPC method
"""
# commission defines the commision the validator received.
commission: "ValidatorAccumulatedCommission" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QueryValidatorSlashesRequest(betterproto.Message):
"""
QueryValidatorSlashesRequest is the request type for the
Query/ValidatorSlashes RPC method
"""
# validator_address defines the validator address to query for.
validator_address: str = betterproto.string_field(1)
# starting_height defines the optional starting height to query the slashes.
starting_height: int = betterproto.uint64_field(2)
# starting_height defines the optional ending height to query the slashes.
ending_height: int = betterproto.uint64_field(3)
# pagination defines an optional pagination for the request.
pagination: "__base_query_v1_beta1__.PageRequest" = betterproto.message_field(4)
@dataclass(eq=False, repr=False)
class QueryValidatorSlashesResponse(betterproto.Message):
"""
QueryValidatorSlashesResponse is the response type for the
Query/ValidatorSlashes RPC method.
"""
# slashes defines the slashes the validator received.
slashes: List["ValidatorSlashEvent"] = betterproto.message_field(1)
# pagination defines the pagination in the response.
pagination: "__base_query_v1_beta1__.PageResponse" = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class QueryDelegationRewardsRequest(betterproto.Message):
"""
QueryDelegationRewardsRequest is the request type for the
Query/DelegationRewards RPC method.
"""
# delegator_address defines the delegator address to query for.
delegator_address: str = betterproto.string_field(1)
# validator_address defines the validator address to query for.
validator_address: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class QueryDelegationRewardsResponse(betterproto.Message):
"""
QueryDelegationRewardsResponse is the response type for the
Query/DelegationRewards RPC method.
"""
# rewards defines the rewards accrued by a delegation.
rewards: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QueryDelegationTotalRewardsRequest(betterproto.Message):
"""
QueryDelegationTotalRewardsRequest is the request type for the
Query/DelegationTotalRewards RPC method.
"""
# delegator_address defines the delegator address to query for.
delegator_address: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryDelegationTotalRewardsResponse(betterproto.Message):
"""
QueryDelegationTotalRewardsResponse is the response type for the
Query/DelegationTotalRewards RPC method.
"""
# rewards defines all the rewards accrued by a delegator.
rewards: List["DelegationDelegatorReward"] = betterproto.message_field(1)
# total defines the sum of all the rewards.
total: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class QueryDelegatorValidatorsRequest(betterproto.Message):
"""
QueryDelegatorValidatorsRequest is the request type for the
Query/DelegatorValidators RPC method.
"""
# delegator_address defines the delegator address to query for.
delegator_address: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryDelegatorValidatorsResponse(betterproto.Message):
"""
QueryDelegatorValidatorsResponse is the response type for the
Query/DelegatorValidators RPC method.
"""
# validators defines the validators a delegator is delegating for.
validators: List[str] = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryDelegatorWithdrawAddressRequest(betterproto.Message):
"""
QueryDelegatorWithdrawAddressRequest is the request type for the
Query/DelegatorWithdrawAddress RPC method.
"""
# delegator_address defines the delegator address to query for.
delegator_address: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryDelegatorWithdrawAddressResponse(betterproto.Message):
"""
QueryDelegatorWithdrawAddressResponse is the response type for the
Query/DelegatorWithdrawAddress RPC method.
"""
# withdraw_address defines the delegator address to query for.
withdraw_address: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryCommunityPoolRequest(betterproto.Message):
"""
QueryCommunityPoolRequest is the request type for the Query/CommunityPool
RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryCommunityPoolResponse(betterproto.Message):
"""
QueryCommunityPoolResponse is the response type for the Query/CommunityPool
RPC method.
"""
# pool defines community pool's coins.
pool: List["__base_v1_beta1__.DecCoin"] = betterproto.message_field(1)
class MsgStub(betterproto.ServiceStub):
async def set_withdraw_address(
self, *, delegator_address: str = "", withdraw_address: str = ""
) -> "MsgSetWithdrawAddressResponse":
request = MsgSetWithdrawAddress()
request.delegator_address = delegator_address
request.withdraw_address = withdraw_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Msg/SetWithdrawAddress",
request,
MsgSetWithdrawAddressResponse,
)
async def withdraw_delegator_reward(
self, *, delegator_address: str = "", validator_address: str = ""
) -> "MsgWithdrawDelegatorRewardResponse":
request = MsgWithdrawDelegatorReward()
request.delegator_address = delegator_address
request.validator_address = validator_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Msg/WithdrawDelegatorReward",
request,
MsgWithdrawDelegatorRewardResponse,
)
async def withdraw_validator_commission(
self, *, validator_address: str = ""
) -> "MsgWithdrawValidatorCommissionResponse":
request = MsgWithdrawValidatorCommission()
request.validator_address = validator_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Msg/WithdrawValidatorCommission",
request,
MsgWithdrawValidatorCommissionResponse,
)
async def fund_community_pool(
self,
*,
amount: Optional[List["__base_v1_beta1__.Coin"]] = None,
depositor: str = "",
) -> "MsgFundCommunityPoolResponse":
amount = amount or []
request = MsgFundCommunityPool()
if amount is not None:
request.amount = amount
request.depositor = depositor
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Msg/FundCommunityPool",
request,
MsgFundCommunityPoolResponse,
)
class QueryStub(betterproto.ServiceStub):
async def params(self) -> "QueryParamsResponse":
request = QueryParamsRequest()
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/Params", request, QueryParamsResponse
)
async def validator_outstanding_rewards(
self, *, validator_address: str = ""
) -> "QueryValidatorOutstandingRewardsResponse":
request = QueryValidatorOutstandingRewardsRequest()
request.validator_address = validator_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/ValidatorOutstandingRewards",
request,
QueryValidatorOutstandingRewardsResponse,
)
async def validator_commission(
self, *, validator_address: str = ""
) -> "QueryValidatorCommissionResponse":
request = QueryValidatorCommissionRequest()
request.validator_address = validator_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/ValidatorCommission",
request,
QueryValidatorCommissionResponse,
)
async def validator_slashes(
self,
*,
validator_address: str = "",
starting_height: int = 0,
ending_height: int = 0,
pagination: "__base_query_v1_beta1__.PageRequest" = None,
) -> "QueryValidatorSlashesResponse":
request = QueryValidatorSlashesRequest()
request.validator_address = validator_address
request.starting_height = starting_height
request.ending_height = ending_height
if pagination is not None:
request.pagination = pagination
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/ValidatorSlashes",
request,
QueryValidatorSlashesResponse,
)
async def delegation_rewards(
self, *, delegator_address: str = "", validator_address: str = ""
) -> "QueryDelegationRewardsResponse":
request = QueryDelegationRewardsRequest()
request.delegator_address = delegator_address
request.validator_address = validator_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/DelegationRewards",
request,
QueryDelegationRewardsResponse,
)
async def delegation_total_rewards(
self, *, delegator_address: str = ""
) -> "QueryDelegationTotalRewardsResponse":
request = QueryDelegationTotalRewardsRequest()
request.delegator_address = delegator_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/DelegationTotalRewards",
request,
QueryDelegationTotalRewardsResponse,
)
async def delegator_validators(
self, *, delegator_address: str = ""
) -> "QueryDelegatorValidatorsResponse":
request = QueryDelegatorValidatorsRequest()
request.delegator_address = delegator_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/DelegatorValidators",
request,
QueryDelegatorValidatorsResponse,
)
async def delegator_withdraw_address(
self, *, delegator_address: str = ""
) -> "QueryDelegatorWithdrawAddressResponse":
request = QueryDelegatorWithdrawAddressRequest()
request.delegator_address = delegator_address
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/DelegatorWithdrawAddress",
request,
QueryDelegatorWithdrawAddressResponse,
)
async def community_pool(self) -> "QueryCommunityPoolResponse":
request = QueryCommunityPoolRequest()
return await self._unary_unary(
"/cosmos.distribution.v1beta1.Query/CommunityPool",
request,
QueryCommunityPoolResponse,
)
class MsgBase(ServiceBase):
async def set_withdraw_address(
self, delegator_address: str, withdraw_address: str
) -> "MsgSetWithdrawAddressResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def withdraw_delegator_reward(
self, delegator_address: str, validator_address: str
) -> "MsgWithdrawDelegatorRewardResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def withdraw_validator_commission(
self, validator_address: str
) -> "MsgWithdrawValidatorCommissionResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def fund_community_pool(
self, amount: Optional[List["__base_v1_beta1__.Coin"]], depositor: str
) -> "MsgFundCommunityPoolResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_set_withdraw_address(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"delegator_address": request.delegator_address,
"withdraw_address": request.withdraw_address,
}
response = await self.set_withdraw_address(**request_kwargs)
await stream.send_message(response)
async def __rpc_withdraw_delegator_reward(
self, stream: grpclib.server.Stream
) -> None:
request = await stream.recv_message()
request_kwargs = {
"delegator_address": request.delegator_address,
"validator_address": request.validator_address,
}
response = await self.withdraw_delegator_reward(**request_kwargs)
await stream.send_message(response)
async def __rpc_withdraw_validator_commission(
self, stream: grpclib.server.Stream
) -> None:
request = await stream.recv_message()
request_kwargs = {
"validator_address": request.validator_address,
}
response = await self.withdraw_validator_commission(**request_kwargs)
await stream.send_message(response)
async def __rpc_fund_community_pool(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"amount": request.amount,
"depositor": request.depositor,
}
response = await self.fund_community_pool(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/cosmos.distribution.v1beta1.Msg/SetWithdrawAddress": grpclib.const.Handler(
self.__rpc_set_withdraw_address,
grpclib.const.Cardinality.UNARY_UNARY,
MsgSetWithdrawAddress,
MsgSetWithdrawAddressResponse,
),
"/cosmos.distribution.v1beta1.Msg/WithdrawDelegatorReward": grpclib.const.Handler(
self.__rpc_withdraw_delegator_reward,
grpclib.const.Cardinality.UNARY_UNARY,
MsgWithdrawDelegatorReward,
MsgWithdrawDelegatorRewardResponse,
),
"/cosmos.distribution.v1beta1.Msg/WithdrawValidatorCommission": grpclib.const.Handler(
self.__rpc_withdraw_validator_commission,
grpclib.const.Cardinality.UNARY_UNARY,
MsgWithdrawValidatorCommission,
MsgWithdrawValidatorCommissionResponse,
),
"/cosmos.distribution.v1beta1.Msg/FundCommunityPool": grpclib.const.Handler(
self.__rpc_fund_community_pool,
grpclib.const.Cardinality.UNARY_UNARY,
MsgFundCommunityPool,
MsgFundCommunityPoolResponse,
),
}
class QueryBase(ServiceBase):
async def params(self) -> "QueryParamsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def validator_outstanding_rewards(
self, validator_address: str
) -> "QueryValidatorOutstandingRewardsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def validator_commission(
self, validator_address: str
) -> "QueryValidatorCommissionResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def validator_slashes(
self,
validator_address: str,
starting_height: int,
ending_height: int,
pagination: "__base_query_v1_beta1__.PageRequest",
) -> "QueryValidatorSlashesResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delegation_rewards(
self, delegator_address: str, validator_address: str
) -> "QueryDelegationRewardsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delegation_total_rewards(
self, delegator_address: str
) -> "QueryDelegationTotalRewardsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delegator_validators(
self, delegator_address: str
) -> "QueryDelegatorValidatorsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def delegator_withdraw_address(
self, delegator_address: str
) -> "QueryDelegatorWithdrawAddressResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def community_pool(self) -> "QueryCommunityPoolResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_params(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.params(**request_kwargs)
await stream.send_message(response)
async def __rpc_validator_outstanding_rewards(
self, stream: grpclib.server.Stream
) -> None:
request = await stream.recv_message()
request_kwargs = {
"validator_address": request.validator_address,
}
response = await self.validator_outstanding_rewards(**request_kwargs)
await stream.send_message(response)
async def __rpc_validator_commission(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"validator_address": request.validator_address,
}
response = await self.validator_commission(**request_kwargs)
await stream.send_message(response)
async def __rpc_validator_slashes(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"validator_address": request.validator_address,
"starting_height": request.starting_height,
"ending_height": request.ending_height,
"pagination": request.pagination,
}
response = await self.validator_slashes(**request_kwargs)
await stream.send_message(response)
async def __rpc_delegation_rewards(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"delegator_address": request.delegator_address,
"validator_address": request.validator_address,
}
response = await self.delegation_rewards(**request_kwargs)
await stream.send_message(response)
async def __rpc_delegation_total_rewards(
self, stream: grpclib.server.Stream
) -> None:
request = await stream.recv_message()
request_kwargs = {
"delegator_address": request.delegator_address,
}
response = await self.delegation_total_rewards(**request_kwargs)
await stream.send_message(response)
async def __rpc_delegator_validators(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"delegator_address": request.delegator_address,
}
response = await self.delegator_validators(**request_kwargs)
await stream.send_message(response)
async def __rpc_delegator_withdraw_address(
self, stream: grpclib.server.Stream
) -> None:
request = await stream.recv_message()
request_kwargs = {
"delegator_address": request.delegator_address,
}
response = await self.delegator_withdraw_address(**request_kwargs)
await stream.send_message(response)
async def __rpc_community_pool(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.community_pool(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/cosmos.distribution.v1beta1.Query/Params": grpclib.const.Handler(
self.__rpc_params,
grpclib.const.Cardinality.UNARY_UNARY,
QueryParamsRequest,
QueryParamsResponse,
),
"/cosmos.distribution.v1beta1.Query/ValidatorOutstandingRewards": grpclib.const.Handler(
self.__rpc_validator_outstanding_rewards,
grpclib.const.Cardinality.UNARY_UNARY,
QueryValidatorOutstandingRewardsRequest,
QueryValidatorOutstandingRewardsResponse,
),
"/cosmos.distribution.v1beta1.Query/ValidatorCommission": grpclib.const.Handler(
self.__rpc_validator_commission,
grpclib.const.Cardinality.UNARY_UNARY,
QueryValidatorCommissionRequest,
QueryValidatorCommissionResponse,
),
"/cosmos.distribution.v1beta1.Query/ValidatorSlashes": grpclib.const.Handler(
self.__rpc_validator_slashes,
grpclib.const.Cardinality.UNARY_UNARY,
QueryValidatorSlashesRequest,
QueryValidatorSlashesResponse,
),
"/cosmos.distribution.v1beta1.Query/DelegationRewards": grpclib.const.Handler(
self.__rpc_delegation_rewards,
grpclib.const.Cardinality.UNARY_UNARY,
QueryDelegationRewardsRequest,
QueryDelegationRewardsResponse,
),
"/cosmos.distribution.v1beta1.Query/DelegationTotalRewards": grpclib.const.Handler(
self.__rpc_delegation_total_rewards,
grpclib.const.Cardinality.UNARY_UNARY,
QueryDelegationTotalRewardsRequest,
QueryDelegationTotalRewardsResponse,
),
"/cosmos.distribution.v1beta1.Query/DelegatorValidators": grpclib.const.Handler(
self.__rpc_delegator_validators,
grpclib.const.Cardinality.UNARY_UNARY,
QueryDelegatorValidatorsRequest,
QueryDelegatorValidatorsResponse,
),
"/cosmos.distribution.v1beta1.Query/DelegatorWithdrawAddress": grpclib.const.Handler(
self.__rpc_delegator_withdraw_address,
grpclib.const.Cardinality.UNARY_UNARY,
QueryDelegatorWithdrawAddressRequest,
QueryDelegatorWithdrawAddressResponse,
),
"/cosmos.distribution.v1beta1.Query/CommunityPool": grpclib.const.Handler(
self.__rpc_community_pool,
grpclib.const.Cardinality.UNARY_UNARY,
QueryCommunityPoolRequest,
QueryCommunityPoolResponse,
),
}
from ...base import v1beta1 as __base_v1_beta1__
from ...base.query import v1beta1 as __base_query_v1_beta1__
|
<reponame>benjiec/pychemy
# Tools for grouping proteins into non-conflicting sets for
# multiplexed screening of homologues
import re
from pychemy.peptides import mass_from_sequence
import numpy as np
import csv
import os
AA = re.compile('[ACDEFGHILMNPQSTVWY]+[RK]')
AA_check = re.compile('[ACDEFGHILKMNPQRSTVWY]')
def check_AA(test_string):
"""Check that all entries in sequence represent valid amino acids"""
if len(AA_check.findall(test_string)) == len(test_string):
return True
else:
raise Exception("Invalid amino acid in seq: " + test_string)
def check_proteins(proteins):
"""
Check that all proteins in list contain only valid amino acids
input:
proteins: list of protein sequences as strings ['protein_seq', ...]
"""
for p in proteins:
check_AA(p)
return True
def get_peptides(protein_seq):
"""
Creates list of peptides from protein sequence
input:
protein_seq: string containing protein sequence of amino acids
output:
list of strings containing peptide sequences from protein sequence
['peptide_seq', ...]
"""
check_AA(protein_seq)
return AA.findall(protein_seq)
def all_peptides(proteins):
"""
Creates list of all peptides from protein set
inputs:
proteins: list of protein sequences as strings ['protein_seq', ...]
output:
list of strings containing peptide sequences from all protein
sequences ['peptide_seq', ...]
"""
return [p for peptides in [get_peptides(ps)
for ps in proteins] for p in peptides]
def distinct_peptides(proteins):
"""
Creates list of all distinct peptides from protein set
inputs:
proteins: list of protein sequences as strings ['protein_seq', ...]
output:
list of strings containing distinct peptide sequences from all
protein sequences ['peptide_seq', ...]
"""
return list(set(all_peptides(proteins)))
def unique_identifiers(proteins):
"""
Creates list of unique identifiers for each protein from set
inputs:
proteins: list of protein sequences as strings ['protein_seq', ...]
output:
list of lists containing protein sequence and a unique identifying
peptide sequence [['protein_seq', 'peptide_seq'], ...]
"""
all_pep = [[ps[0], pep] for ps in enumerate(proteins)
for pep in get_peptides(ps[1])]
dp = distinct_peptides(proteins)
ui = []
for idx, seq in enumerate(dp):
idx = [item[0] for item in all_pep if item[1] == seq]
if len(idx) == 1:
ui.append([proteins[idx[0]], seq])
return ui
def peptides_per_protein(ui):
"""
Creates list of unique identifier peptides grouped by protein
input:
ui: a set of unique identifiers such as that produced by
unique_identifiers([proteins])
[['protein_seq', 'peptide_seq'],.....]
output:
list of unique identifier peptides grouped by their protein
[['protein_seq', ['peptide_seq', 'peptide_seq', ...]],.....]
"""
proteins = list(set([item[0] for item in ui]))
return [[p, [item[1] for item in ui if item[0] == p]] for p in proteins]
def min_ui_count(proteins):
"""
Counts the minimum number of unique identifier peptides across all proteins
in a set
input:
proteins: list of protein sequences as strings ['protein_seq', ...]
output:
minimum number of unique identifier peptides across all proteins in
a set
"""
temp = []
for p in peptides_per_protein(unique_identifiers(proteins)):
temp.append(len(p[1]))
if len(proteins) > len(temp):
return 0
else:
return min(temp)
def balanced_sets(proteins, max_set_size=10, unique=1):
"""
Creates balanced sets of proteins based on unique peptides subject to
contraints
inputs:
proteins: list of protein sequences as strings
['protein_seq', ...]
max_set_size: maximum number of proteins permitted in a set
unique: minimum number of unique peptides required per protein
output:
sets of protein sequences that meet the set size and uniqueness
requirements [['protein_seq', ...], ...]
"""
check_proteins(proteins)
sets = []
for p in proteins:
placed = False
for idx, s in enumerate(sets):
if len(s) < max_set_size and not placed:
if min_ui_count(s + [p]) >= unique:
placed = True
sets[idx] += [p]
if not placed:
sets.append([p])
return sets
##############################################################################
# Calculate peptide ionizability based on the method used by STEPP from PNNL #
##############################################################################
class peptide_flyability():
def __init__(self):
self.NUM_FEATURES = 35
self.AA_ORDER = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dir, "resources/STEPP/STEPP_NormFactor_mean.txt")) as tsv:
self.STEPP_mean = np.array([float(line[0])
for line in csv.reader(tsv, delimiter="\t")])
with open(os.path.join(dir, "resources/STEPP/STEPP_NormFactor_std.txt")) as tsv:
self.STEPP_std = np.array([float(line[0])
for line in csv.reader(tsv, delimiter="\t")])
with open(os.path.join(dir, "resources/STEPP/STEPP_Weights.txt")) as tsv:
self.STEPP_weights = np.array([float(line[0])
for line in csv.reader(tsv,
delimiter="\t")])
self.non_polar_hydrophobic = set(['A', 'F', 'G', 'I', 'L',
'M', 'P', 'V', 'W', 'Y'])
self.polar_hydrophillic = set(['C', 'D', 'E', 'H', 'K',
'N', 'Q', 'R', 'S', 'T'])
self.uncharged_polar_hydrophillic = set(['C', 'N', 'Q', 'S', 'T'])
self.charged_polar_hydrophillic = set(['D', 'E', 'H', 'K', 'R'])
self.postive_polar_hydrophillic = set(['R', 'H', 'K'])
self.negative_polar_hydrophillic = set(['D', 'E'])
self.eisenberg_hydrophobicity = {'A': 0.620, 'C': 0.290, 'D': -0.900,
'E': -0.740, 'F': 1.190, 'G': 0.480,
'H': -0.400, 'I': 1.380, 'K': -1.500,
'L': 1.060, 'M': 0.640, 'N': -0.780,
'P': 0.120, 'Q': -0.850, 'R': -2.530,
'S': -0.180, 'T': -0.050, 'V': 1.080,
'W': 0.810, 'Y': 0.260}
self.hopp_woods_hydrophobicity = {'A': -0.500, 'C': -1.000, 'D': 3.000,
'E': 3.000, 'F': -2.500, 'G': 0.000,
'H': -0.500, 'I': -1.800, 'K': 3.000,
'L': -1.800, 'M': -1.300, 'N': 0.200,
'P': 0.000, 'Q': 0.200, 'R': 3.000,
'S': 0.300, 'T': -0.400, 'V': -1.500,
'W': -3.400, 'Y': -2.300}
self.kyte_doolittle_hydrophobicity = {'A': 1.800, 'C': 2.500,
'D': -3.500, 'E': -3.500,
'F': 2.800, 'G': -0.400,
'H': -3.200, 'I': 4.500,
'K': -3.900, 'L': 3.800,
'M': 1.900, 'N': -3.500,
'P': -1.600, 'Q': -3.500,
'R': -4.500, 'S': -0.800,
'T': -0.700, 'V': 4.200,
'W': -0.900, 'Y': -1.300}
self.roseman_hydropathicity = {'A': 0.390, 'C': 0.250, 'D': -3.810,
'E': -2.910, 'F': 2.270, 'G': 0.000,
'H': -0.640, 'I': 1.820, 'K': -2.770,
'L': 1.820, 'M': 0.960, 'N': -1.910,
'P': 0.990, 'Q': -1.300, 'R': -3.950,
'S': -1.240, 'T': -1.000, 'V': 1.300,
'W': 2.130, 'Y': 1.470}
self.grantham_polarity = {'A': 8.100, 'C': 5.500, 'D': 13.000,
'E': 12.300, 'F': 5.200, 'G': 9.000,
'H': 10.400, 'I': 5.200, 'K': 11.300,
'L': 4.900, 'M': 5.700, 'N': 11.600,
'P': 8.000, 'Q': 10.500, 'R': 10.500,
'S': 9.200, 'T': 8.600, 'V': 5.900,
'W': 5.400, 'Y': 6.200}
self.zimmerman_polarity = {'A': 0.000, 'C': 1.480, 'D': 49.700,
'E': 49.900, 'F': 0.350, 'G': 0.000,
'H': 51.600, 'I': 0.130, 'K': 49.500,
'L': 0.130, 'M': 1.430, 'N': 3.380,
'P': 1.580, 'Q': 3.530, 'R': 52.000,
'S': 1.670, 'T': 1.660, 'V': 0.130,
'W': 2.100, 'Y': 1.610}
self.zimmerman_bulkiness = {'A': 11.500, 'C': 13.460, 'D': 11.680,
'E': 13.570, 'F': 19.800, 'G': 3.400,
'H': 13.690, 'I': 21.400, 'K': 15.710,
'L': 21.400, 'M': 16.250, 'N': 12.820,
'P': 17.430, 'Q': 14.450, 'R': 14.280,
'V': 21.570, 'S': 9.470, 'T': 15.770,
'W': 21.670, 'Y': 18.030}
self.zeta_pos = -0.3821 # K_pos
self.sigma_pos = 0.3831 # scale_pos
self.mu_pos = 0.0739 # location_pos
self.zeta_neg = -0.1945 # K_neg
self.sigma_neg = 0.3860 # scale_neg
self.mu_neg = -0.4283 # location_neg
def SVM_props(self, seq=''):
"""
Creates feature vector containing properties used in SVM model
inputs:
seq: string containing amino acid sequence
output:
numpy.array feature vector containing 35 properties of amino acid
sequence
FEATURE VECTOR
1 Length
2 Molecular weight
3 Number of non-polar hydrophobic residues
4 Number of polar hydrophilic residues
5 Number of uncharged polar hydrophilic residues
6 Number of charged polar hydrophilic residues
7 Number of positively charged polar hydrophilic residues
8 Number of negatively charged polar hydrophilic residues
9 Hydrophobicity-Eisenberg scale (Eisenberg et al., 1984)
10 Hydrophilicity-Hopp-Woods scale (Hopp and Woods, 1981)
11 Hydrophobicity-Kyte-Doolittle (Kyte and Doolittle, 1982)
12 Hydropathicity-Roseman scale (Roseman, 1988)
13 Polarity-Grantham scale (Grantham, 1974)
14 Polarity-Zimmerman scale (Zimmerman et al., 1968)
15 Bulkiness (Zimmerman et al., 1968)
16-35 Amino acid singlet counts in order: ACDEFGHIKLMNPQRSTVWY
"""
if seq:
props = [float(len(seq)),
mass_from_sequence(seq),
sum([1.0 for aa in seq if aa in self.non_polar_hydrophobic]),
sum([1.0 for aa in seq if aa in self.polar_hydrophillic]),
sum([1.0 for aa in seq
if aa in self.uncharged_polar_hydrophillic]),
sum([1.0 for aa in seq
if aa in self.charged_polar_hydrophillic]),
sum([1.0 for aa in seq
if aa in self.postive_polar_hydrophillic]),
sum([1.0 for aa in seq
if aa in self.negative_polar_hydrophillic]),
float(sum(map(lambda x: self.eisenberg_hydrophobicity[x],
seq)))/float(len(seq)),
float(sum(map(lambda x: self.hopp_woods_hydrophobicity[x],
seq)))/float(len(seq)),
float(sum(map(lambda x: self.kyte_doolittle_hydrophobicity[x],
seq)))/float(len(seq)),
float(sum(map(lambda x: self.roseman_hydropathicity[x],
seq)))/float(len(seq)),
float(sum(map(lambda x: self.grantham_polarity[x],
seq)))/float(len(seq)),
float(sum(map(lambda x: self.zimmerman_polarity[x],
seq)))/float(len(seq)),
float(sum(map(lambda x: self.zimmerman_bulkiness[x],
seq)))/float(len(seq))]
props += [sum([1 for aa in seq if aa == AA]) for AA in self.AA_ORDER]
return np.array(props)
else:
return np.array([])
def SVM_score(self, fv=np.array([])):
"""
Calculates SVM score of a feature vector
inputs:
fv = numpy.array feature vector containing 35 properties of amino
acid sequence
output:
SVM score
"""
if len(fv) == self.NUM_FEATURES:
dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dir, "resources/STEPP/STEPP_SupportVectors.txt")) as tsv:
sv_idx = 0
svm_score = 0
for line in csv.reader(tsv, delimiter="\t"):
sv = np.array([float(i) for i in line])
kxy = np.dot(fv, sv)
kxx = np.dot(fv, fv)
kyy = np.dot(sv, sv)
kxy /= np.sqrt(kxx * kyy)
kxy += 10
kxy = kxy ** 2
kxy *= self.STEPP_weights[sv_idx]
svm_score += kxy
sv_idx += 1
return svm_score
else:
return None
def ionization_prob(self, svm_score):
"""
Calculates ionization probability based on SVM score
inputs:
svm_score = SVM score output of calc_svm_score()
output:
ionization probability
"""
def gevcdf(x, mu, sigma, zeta):
z = (x-mu)/sigma
t = np.real((1 + z * zeta + 0j) ** (-1/zeta))
return np.exp(-t)
def positive_probability(x):
val = gevcdf(x, self.mu_pos, self.sigma_pos, self.zeta_pos)
if np.isnan(val) or np.isinf(val):
return 1
else:
return val
def negative_probability(x):
val = gevcdf(x, self.mu_neg, self.sigma_neg, self.zeta_neg)
if np.isnan(val) or np.isinf(val):
return 1
else:
return 1 - val
pos_prob = positive_probability(svm_score)
neg_prob = negative_probability(svm_score)
divisor = pos_prob + neg_prob
prob = pos_prob / divisor
return prob if prob > 0 else 0
def ionization_probs(self, peptides=[]):
"""
Calculates ionization probability and SVM score for all peptides in a set
inputs:
peptides = array of peptide sequence strings
['SAMPLE', 'SAMPLE',...]
output:
array of dictionaries
[{'seq': string, 'prob': float, 'svm_score': float}, ...]
"""
# Normalize vector based on baseline mean and stdev
norm_props = [np.divide(self.SVM_props(p) - self.STEPP_mean,
self.STEPP_std)
for p in peptides]
out = []
for idx, norm_prop in enumerate(norm_props):
svm_score = self.SVM_score(norm_prop)
if svm_score:
out += [{'seq': peptides[idx],
'prob': self.ionization_prob(svm_score),
'svm_score': svm_score}]
else:
out += [{'seq': peptides[idx],
'prob': 0,
'svm_score': None}]
return out
|
<filename>t/ehos/instances_test.py<gh_stars>1-10
import pytest
from unittest.mock import Mock, patch
import ehos.instances as I
import ehos
#import ehos.tyt
import sys
print(sys.modules['ehos.instances'] )
print( I )
db_name = 'ehos_testing'
url = "postgresql://ehos:ehos@127.0.0.1:5432/{db_name}".format( db_name=db_name )
def test_init():
i = I.Instances()
assert i._nodes == {}
def test_connect_disconnect():
i = I.Instances()
i.connect( url )
i.disconnect()
def test_add_cloud():
i = I.Instances()
i.add_cloud(name='cph', instance='12345')
def test_add_cloud_002():
i = I.Instances()
i.connect( url )
i.add_cloud(name='cph', instance='12345')
def test_add_cloud_dup():
i = I.Instances()
i.add_cloud(name='cph', instance='12345')
with pytest.raises( RuntimeError ):
i.add_cloud(name='cph', instance='12345')
def test_get_cloud():
i = I.Instances()
i.add_cloud(name='cph', instance='12345')
i.add_cloud(name='osl', instance='56789')
c = i.get_cloud('cph')
assert c == '12345'
def test_add_clouds():
i = I.Instances()
i.add_clouds({'a':123, 'b':456})
assert i.clouds() == {'a':123, 'b':456}
def test_get_cloud_unknown():
i = I.Instances()
i.add_cloud(name='cph', instance='12345')
i.add_cloud(name='osl', instance='56789')
with pytest.raises( RuntimeError ):
i.get_cloud('bgn')
def test_get_cloud_names():
i = I.Instances()
i.add_cloud(name='cph', instance='12345')
i.add_cloud(name='osl', instance='56789')
names = i.cloud_names()
assert names == ['cph', 'osl']
def test_get_cloud_names_empty():
i = I.Instances()
names = i.cloud_names()
assert names == []
def test_get_clouds():
i = I.Instances()
i.add_cloud(name='cph', instance='12345')
i.add_cloud(name='osl', instance='56789')
names = i.clouds()
assert names == {'cph':'12345', 'osl':'56789'}
def test_add_node():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
i.add_node( '123', name = 'qwerty', cloud='tyt')
assert i._nodes[ '123'][ 'name'] == 'qwerty'
assert i._nodes[ '123'][ 'cloud'] == 'tyt'
assert i._nodes[ '123'][ 'node_state'] == 'node_starting'
def test_add_node_002():
i = I.Instances()
i.connect( url )
i.add_cloud(name='tyt', instance='12345')
i.add_node( '123', name = 'qwerty', cloud='tyt')
assert i._nodes[ '123'][ 'name'] == 'qwerty'
assert i._nodes[ '123'][ 'cloud'] == 'tyt'
assert i._nodes[ '123'][ 'node_state'] == 'node_starting'
def test_add_node_duplicate_id():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
i.add_node( '123', name = 'qw<PASSWORD>2', cloud='tyt')
with pytest.raises( RuntimeError ):
i.add_node( '123', name = 'qwerty2', cloud='tyt')
def test_add_node_duplicate_name():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
i.add_node( '1234', name = 'qwerty2', cloud='tyt')
with pytest.raises( RuntimeError ):
i.add_node( '123', name = 'qwerty2', cloud='tyt')
def test_add_node_unknown_cloud():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
i.add_node( '1234', name = 'qwerty2', cloud='tyt')
with pytest.raises( RuntimeError ):
i.add_node( '123', name = 'qwerty2', cloud='tytss')
def test_add_node_unknown_illegal_state():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
with pytest.raises( RuntimeError ):
i.add_node( '123', name = 'qwerty2', cloud='tyt', vm_state='bla')
def test_add_node_unknown_illegal_node_state():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
with pytest.raises( RuntimeError ):
i.add_node( '123', name = 'qwerty2', cloud='tyt', node_state='bla')
def test_get_node():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_node( '123_1', name = 'qwerty1', cloud='tyt', node_state='node_idle')
i.add_node( '123_2', name = 'qwerty2', cloud='tyt')
i.add_node( '123_3', name = 'qwerty3', cloud='tyt2')
node = i.get_node('123_1')
print( node )
assert node == {'id':'123_1', 'name': 'qwerty1', 'cloud':'tyt', 'node_state':'node_idle', 'vm_state':'vm_booting'}
def test_get_node_ids():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_1', name = 'qwerty11', cloud='tyt1', node_state="node_busy")
i.add_node( '123_2', name = 'qwerty12', cloud='tyt2', node_state="node_busy")
i.add_node( '123_3', name = 'qwerty13', cloud='tyt3', node_state="node_busy")
nodes = i.get_node_ids()
assert nodes == ['123_1','123_2','123_3']
def test_get_node_names():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_node( '123_1', name = 'qwerty1', cloud='tyt')
i.add_node( '123_2', name = 'qwerty2', cloud='tyt')
i.add_node( '123_3', name = 'qwerty3', cloud='tyt2')
nodes = i.get_node_names()
assert nodes == ['qwerty1','qwerty2','qwerty3']
def test_get_node_names_by_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_node_names(vm_state='vm_booting')
assert nodes == ['qwerty11']
def test_get_node_names_by_node_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_node_names(node_state='node_idle')
assert nodes == ['qwerty11']
def test_get_node_names_by_cloud():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_node_names(cloud='tyt2')
assert nodes == ['qwerty12']
def test_get_node_unknown_id():
i = I.Instances()
i.add_cloud(name='tyt', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_node( '1234', name = 'qwerty2', cloud='tyt')
i.add_node( '123_2', name = 'qwerty22', cloud='tyt')
i.add_node( '123_3', name = 'qwerty32', cloud='tyt2')
with pytest.raises( RuntimeError ):
node = i.get_node('123_10')
def test_get_nodes_all():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', node_state="node_suspended")
nodes = i.get_nodes()
assert nodes == [{'cloud': 'tyt1', 'id': '123_11', 'name': 'qwerty11', 'vm_state': 'vm_booting', 'node_state': 'node_idle'},
{'cloud': 'tyt2', 'id': '123_12', 'name': 'qwerty12', 'vm_state': 'vm_booting', 'node_state': 'node_busy'},
{'cloud': 'tyt3', 'id': '123_13', 'name': 'qwerty13', 'vm_state': 'vm_booting', 'node_state': 'node_suspended'}]
def test_get_nodes_by_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_nodes(vm_state=['vm_booting'])
assert nodes == [{'cloud': 'tyt1', 'id': '123_11', 'name': 'qwerty11', 'vm_state': 'vm_booting', 'node_state': 'node_idle'},]
def test_get_nodes_illegal_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
with pytest.raises( RuntimeError ):
nodes = i.get_nodes(vm_state=['bootings'])
def test_get_nodes_node_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_nodes(node_state=['node_idle'])
assert nodes == [{'cloud': 'tyt1', 'id': '123_11', 'name': 'qwerty11', 'vm_state': 'vm_booting', 'node_state': 'node_idle'}]
def test_get_nodes_illegal_node_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
with pytest.raises( RuntimeError ):
nodes = i.get_nodes(node_state=['nay'])
def test_get_nodes_cloud():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_nodes(cloud=['tyt2'])
assert nodes == [{'cloud': 'tyt2', 'id': '123_12', 'name': 'qwerty12', 'vm_state': 'vm_active', 'node_state': 'node_busy'}]
def test_get_nodes_unknown_cloud():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
with pytest.raises( RuntimeError ):
nodes = i.get_nodes(cloud=['aws'])
def test_get_nodes_names():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_nodes(node_state=['node_idle'])
assert nodes == [{'cloud': 'tyt1', 'id': '123_11', 'name': 'qwerty11', 'vm_state': 'vm_booting', 'node_state': 'node_idle'}]
def test_get_node_ids_by_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_node_ids(vm_state='vm_booting')
assert nodes == ['123_11']
def test_get_node_ids_by_node_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_node_ids(node_state='node_idle')
assert nodes == ['123_11']
def test_get_node_ids_by_cloud():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.get_node_ids(cloud='tyt2')
assert nodes == ['123_12']
def test_get_node_ids_filtered_empty():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
nodes = i.get_node_ids(node_state='node_retiring')
assert nodes == []
def test_node_name2id():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
assert i.vm_name2id('qwerty21') == '123_21'
def test_node_name2id_unknown():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
with pytest.raises( RuntimeError ):
i.vm_name2id('qwerty213')
def test_get_node_id2name():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
assert i.vm_id2name('123_21') == 'qwerty21'
def test_node_id2name_unknown():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
with pytest.raises( RuntimeError ):
i.vm_id2name('123_91') == 'qwerty213'
def test_nodes_in_cloud():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
print( i.nodes_in_cloud('tyt3') )
assert sorted(i.nodes_in_cloud('tyt3')) == sorted(['123_23', '123_13'])
def test_nodes_in_cloud_empty():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
assert i.nodes_in_cloud('tyt4') == []
def test_node_state_counts( ):
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle", vm_state='vm_active')
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_busy", vm_state='vm_active')
i.add_node( '123_23', name = 'qwerty23', cloud='tyt2', node_state="node_benchmarking", vm_state='vm_active')
i.add_node( '123_24', name = 'qwerty24', cloud='tyt1', node_state="node_starting", vm_state='vm_booting')
i.add_node( '123_25', name = 'qwerty25', cloud='tyt1', node_state="node_vacating", vm_state='vm_unknown')
i.add_node( '123_26', name = 'qwerty26', cloud='tyt1', node_state="node_lost", vm_state='vm_booting')
assert i.node_state_counts()['all'] == {'node_idle': 1, 'node_busy': 3, 'node_other': 1, 'node_total': 4}
assert i.node_state_counts() == {'all': {'node_busy': 3, 'node_idle': 1, 'node_other': 1, 'node_total': 4},
'tyt1': {'node_busy': 1, 'node_idle': 1, 'node_other': 1, 'node_total': 2},
'tyt2': {'node_busy': 2, 'node_idle': 0, 'node_other': 0, 'node_total': 2}}
def test_set_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt1', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt1', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.set_vm_state('123_12', 'vm_deleted')
assert i._nodes[ '123_12']['vm_state'] == 'vm_deleted'
def test_set_state_unknown():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt1', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt1', vm_state='vm_suspended', node_state="node_suspended")
with pytest.raises( RuntimeError ):
nodes = i.set_vm_state('123_12', 'bla')
def test_set_state_unknown_node():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt1', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt1', vm_state='vm_suspended', node_state="node_suspended")
with pytest.raises( RuntimeError ):
nodes = i.set_vm_state('12ss3_12', 'bladsfsdf')
def test_set_state_same():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt1', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt1', vm_state='vm_suspended', node_state="node_suspended")
nodes = i.set_vm_state('123_12', 'vm_active')
assert i._nodes[ '123_12']['vm_state'] == 'vm_active'
def test_get_vm_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
i.add_node( '123_12', name = 'qwerty12', cloud='tyt1', vm_state='vm_active', node_state="node_busy")
i.add_node( '123_13', name = 'qwerty13', cloud='tyt1', vm_state='vm_suspended', node_state="node_suspended")
assert i.vm_state('123_12') == 'vm_active'
def test_get_state_unknown_node():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1', vm_state='vm_booting', node_state="node_idle")
with pytest.raises( RuntimeError ):
i.vm_state('123d_11')
def test_set_node_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
i.set_node_state( '123_22', 'node_busy')
assert i._nodes[ '123_22']['node_state'] == 'node_busy'
def test_set_node_state_unknown_node():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
with pytest.raises( RuntimeError ):
i.set_node_state( '123_23', 'yt')
def test_set_node_state_illegal_node_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
with pytest.raises( RuntimeError ):
i.set_node_state( '123_11', 'yt')
def test_get_node_state():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
i.set_node_state( '123_23','node_idle')
assert i.get_node_state('123_23') == 'node_idle'
def test_get_node_state_unknown():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
with pytest.raises( RuntimeError ):
i.get_node_state( '123_23dd')
def test_find_id():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
assert i.find( id='123_23') == {'cloud': 'tyt3',
'id': '123_23',
'name': 'qwerty23',
'vm_state': 'vm_booting',
'node_state': 'node_idle'}
def test_find_name():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
assert i.find( name='not-there') == None
def test_find_name_002():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
print( i.find( name='qwerty23') )
assert i.find( name='qwerty23') == {'cloud': 'tyt3',
'id': '123_23',
'name': 'qwerty23',
'vm_state': 'vm_booting',
'node_state': 'node_idle'}
def test_find_name_002():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
assert i.find( ) == None
def test_find_id_wrong():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
assert i.find( id='does_not_exist') == None
def test_find_name_wrong():
i = I.Instances()
i.add_cloud(name='tyt1', instance='12345')
i.add_cloud(name='tyt2', instance='12345')
i.add_cloud(name='tyt3', instance='12345')
i.add_node( '123_11', name = 'qwerty11', cloud='tyt1')
i.add_node( '123_12', name = 'qwerty12', cloud='tyt2')
i.add_node( '123_13', name = 'qwerty13', cloud='tyt3')
i.add_node( '123_21', name = 'qwerty21', cloud='tyt1', node_state="node_idle")
i.add_node( '123_22', name = 'qwerty22', cloud='tyt2', node_state="node_idle")
i.add_node( '123_23', name = 'qwerty23', cloud='tyt3', node_state="node_idle")
assert i.find( id='does_not_exist') == None
def fake_vm_list(clouds):
'''
node_idle = 1
node_starting = 2
node_busy = 3
node_suspended = 4
node_vacating = 5
node_killing = 6
node_benchmarking = 7
node_retiring = 8
node_lost = 9
vm_booting = 1
vm_active = 2
vm_suspended = 3
vm_restarting = 4
vm_stopping = 5
vm_deleted = 6
vm_unknown = 7
'''
vms = { 'uuid_1':{'name':'n1', 'cloud_name':'bgo', 'id':'uuid_1', 'vm_state':'vm_booting', 'node_state':'node_idle'},
'uuid_2':{'name':'n2', 'cloud_name':'bgo', 'id':'uuid_2', 'vm_state':'vm_active', 'node_state':'node_starting'},
'uuid_3':{'name':'n3', 'cloud_name':'osl', 'id':'uuid_3', 'vm_state':'vm_suspended', 'node_state':'node_idle'},
'uuid_4':{'name':'n4', 'cloud_name':'osl', 'id':'uuid_4', 'vm_state':'vm_restarting', 'node_state':'node_idle'},
'uuid_5':{'name':'n5', 'cloud_name':'osl', 'id':'uuid_5', 'vm_state':'vm_stopping', 'node_state':'node_idle'},
'uuid_6':{'name':'n6', 'cloud_name':'osl', 'id':'uuid_6', 'vm_state':'vm_deleted', 'node_state':'node_idle'},
'uuid_7':{'name':'n7', 'cloud_name':'osl', 'id':'uuid_7', 'vm_state':'vm_unknown', 'node_state':'node_idle'},
'uuid_8':{'name':'n8', 'cloud_name':'osl', 'id':'uuid_8', 'vm_state':'vm_unknown', 'node_state':'node_idle'},
}
return vms
@patch.object(ehos, 'vm_list', fake_vm_list)
def test_update_000():
i = I.Instances()
i.add_cloud(name='bgo', instance='12345')
i.add_node( id='uuid_2', name = 'n2', cloud='bgo', vm_state='vm_active', node_state='node_idle')
nodes = {'not_ehos':'node_bla bla bla'}
i.update(nodes = nodes)
assert i.get_nodes() == [{'cloud': 'bgo',
'id': 'uuid_2',
'name': 'n2',
'node_state': 'node_idle',
'vm_state': 'vm_active'}]
@patch.object(ehos, 'vm_list', fake_vm_list)
def test_update_001():
i = I.Instances()
i.add_cloud(name='bgo', instance='12345')
i.add_node( id='uuid_2', name = 'n2', cloud='bgo', vm_state='vm_active', node_state='node_idle')
nodes = {'n2':"node_busy"}
i.update(nodes = nodes)
assert i.get_nodes() == [{'cloud': 'bgo',
'id': 'uuid_2',
'name': 'n2',
'node_state': 'node_busy',
'vm_state': 'vm_active'}]
@patch.object(ehos, 'vm_list', fake_vm_list)
def test_update_002():
i = I.Instances()
i.add_cloud(name='bgo', instance='12345')
i.add_node( id='uuid_99', name = 'n99', cloud='bgo', vm_state='vm_active', node_state='node_idle')
nodes = {'n2':"node_busy"}
i.update(nodes = nodes)
print( i.get_nodes())
assert i.get_nodes() == [{'cloud': 'bgo',
'id': 'uuid_99',
'name': 'n99',
'node_state': 'node_lost',
'vm_state': 'vm_deleted'},
{'cloud': 'bgo',
'id': 'uuid_2',
'name': 'n2',
'node_state': 'node_busy',
'vm_state': 'vm_active'},
]
@patch.object(ehos, 'vm_list', fake_vm_list)
def test_update_003():
i = I.Instances()
i.add_cloud(name='bgo', instance='12345')
i.add_node( id='uuid_1', name = 'n1', cloud='bgo', vm_state='vm_active', node_state='node_idle')
nodes = {'n1':"node_busy"}
print( i._nodes)
i.update(nodes = nodes)
assert i.get_nodes() == [{'cloud': 'bgo',
'id': 'uuid_1',
'name': 'n1',
'node_state': 'node_busy',
'vm_state': 'vm_active'}]
@patch.object(ehos, 'vm_list', fake_vm_list)
def test_update_004():
i = I.Instances()
i.add_cloud(name='bgo', instance='12345')
i.add_node( id='uuid_1', name = 'n1', cloud='bgo', vm_state='vm_active', node_state='node_idle')
nodes = {'n1':"node_suspended"}
print( i._nodes)
i.update(nodes = nodes)
assert i.get_nodes() == [{'cloud': 'bgo',
'id': 'uuid_1',
'name': 'n1',
'node_state': 'node_lost',
'vm_state': 'vm_deleted'}]
@patch.object(ehos, 'vm_list', fake_vm_list)
def test_update_005():
i = I.Instances()
i.add_cloud(name='bgo', instance='12345')
i.add_node( id='uuid_1', name = 'n1', cloud='bgo', vm_state='vm_booting', node_state='node_starting')
nodes = {'n1':"node_idle"}
print( i._nodes)
i.update(nodes = nodes)
assert i.get_nodes() == [{'cloud': 'bgo',
'id': 'uuid_1',
'name': 'n1',
'node_state': 'node_idle',
'vm_state': 'vm_active'}]
|
<reponame>Vishal324140/ElainaRobot<gh_stars>1-10
# MIT License
# Copyright (c) 2022 <NAME>™
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file is part of YuiGBot (Telegram Bot)
# I give credit for this module to YuiGBot.
from elaina import telethn
from elaina.events import register
TMP_DOWNLOAD_DIRECTORY = "./"
import os
from datetime import datetime
from PIL import Image
from telegraph import Telegraph, exceptions, upload_file
wibu = "Elaina"
telegraph = Telegraph()
r = telegraph.create_account(short_name=wibu)
auth_url = r["auth_url"]
@register(pattern="^/t(gm|gt) ?(.*)")
async def _(event):
if event.fwd_from:
return
optional_title = event.pattern_match.group(2)
if event.reply_to_msg_id:
start = datetime.now()
r_message = await event.get_reply_message()
input_str = event.pattern_match.group(1)
if input_str == "gm":
downloaded_file_name = await telethn.download_media(
r_message, TMP_DOWNLOAD_DIRECTORY
)
end = datetime.now()
ms = (end - start).seconds
h = await event.reply(
"Downloaded to {} in {} seconds.".format(downloaded_file_name, ms)
)
if downloaded_file_name.endswith((".webp")):
resize_image(downloaded_file_name)
try:
start = datetime.now()
media_urls = upload_file(downloaded_file_name)
except exceptions.TelegraphException as exc:
await h.edit("ERROR: " + str(exc))
os.remove(downloaded_file_name)
else:
end = datetime.now()
ms_two = (end - start).seconds
os.remove(downloaded_file_name)
await h.edit(
"Uploaded to https://telegra.ph{}".format(
media_urls[0], (ms + ms_two)
),
link_preview=True,
)
elif input_str == "gt":
user_object = await telethn.get_entity(r_message.sender_id)
title_of_page = user_object.first_name # + " " + user_object.last_name
# apparently, all Users do not have last_name field
if optional_title:
title_of_page = optional_title
page_content = r_message.message
if r_message.media:
if page_content != "":
title_of_page = page_content
downloaded_file_name = await telethn.download_media(
r_message, TMP_DOWNLOAD_DIRECTORY
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
for m in m_list:
page_content += m.decode("UTF-8") + "\n"
os.remove(downloaded_file_name)
page_content = page_content.replace("\n", "<br>")
response = telegraph.create_page(title_of_page, html_content=page_content)
end = datetime.now()
ms = (end - start).seconds
await event.reply(
"Pasted to https://telegra.ph/{}".format(response["path"], ms),
link_preview=True,
)
else:
await event.reply("Reply to a message to get a permanent telegra.ph link.")
def resize_image(image):
im = Image.open(image)
im.save(image, "PNG")
file_help = os.path.basename(__file__)
file_help = file_help.replace(".py", "")
file_helpo = file_help.replace("_", " ")
__mod_name__ = "Telegraph"
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import itertools
from enum import Enum
from typing import Union, List, Dict
import numpy as np
from rl_coach.agents.agent_interface import AgentInterface
from rl_coach.base_parameters import AgentParameters, VisualizationParameters
from rl_coach.core_types import ActionInfo, EnvResponse, ActionType, RunPhase
from rl_coach.filters.observation.observation_crop_filter import ObservationCropFilter
from rl_coach.saver import SaverCollection
from rl_coach.spaces import ActionSpace
from rl_coach.spaces import AgentSelection, AttentionActionSpace, SpacesDefinition
from rl_coach.utils import short_dynamic_import
class DecisionPolicy(object):
def choose_action(self, actions_info: Dict[str, ActionInfo]) -> ActionInfo:
"""
Given a list of actions from multiple agents, decide on a single action to take.
:param actions_info: a dictionary of agent names and their corresponding
ActionInfo instances containing information for each agents action
:return: a single action and the corresponding action info
"""
raise NotImplementedError("")
class SingleDecider(DecisionPolicy):
"""
A decision policy that chooses the action according to the agent that is currently in control.
"""
def __init__(self, default_decision_maker: str):
super().__init__()
self._decision_maker = default_decision_maker
@property
def decision_maker(self):
"""
Get the decision maker that was set by the upper level control.
"""
return self._decision_maker
@decision_maker.setter
def decision_maker(self, decision_maker: str):
"""
Set the decision maker by the upper level control.
:param action: the incoming action from the upper level control.
"""
self._decision_maker = decision_maker
def choose_action(self, actions_info: Dict[str, ActionInfo]) -> ActionInfo:
"""
Given a list of actions from multiple agents, take the action of the current decision maker
:param actions_info: a list of ActionInfo instances containing the information for each agents action
:return: a single action
"""
if self.decision_maker not in actions_info.keys():
raise ValueError("The current decision maker ({}) does not exist in the given actions ({})"
.format(self.decision_maker, actions_info.keys()))
return actions_info[self.decision_maker]
class RoundRobin(DecisionPolicy):
"""
A decision policy that chooses the action according to agents selected in a circular order.
"""
def __init__(self, num_agents: int):
super().__init__()
self.round_robin = itertools.cycle(range(num_agents))
def choose_action(self, actions_info: Dict[str, ActionInfo]) -> ActionInfo:
"""
Given a list of actions from multiple agents, take the action of the current decision maker, which is set in a
circular order
:param actions_info: a list of ActionInfo instances containing the information for each agents action
:return: a single action
"""
decision_maker = self.round_robin.__next__()
if decision_maker not in range(len(actions_info.keys())):
raise ValueError("The size of action_info does not match the number of agents set to RoundRobin decision"
" policy.")
return actions_info.items()[decision_maker]
class MajorityVote(DecisionPolicy):
"""
A decision policy that chooses the action that most of the agents chose.
This policy is only useful for discrete control.
"""
def __init__(self):
super().__init__()
def choose_action(self, actions_info: Dict[str, ActionInfo]) -> ActionInfo:
"""
Given a list of actions from multiple agents, take the action that most agents agree on
:param actions_info: a list of ActionInfo instances containing the information for each agents action
:return: a single action
"""
# TODO: enforce discrete action spaces
if len(actions_info.keys()) == 0:
raise ValueError("The given list of actions is empty")
vote_count = np.bincount([action_info.action for action_info in actions_info.values()])
majority_vote = np.argmax(vote_count)
return actions_info.items()[majority_vote]
class MeanDecision(DecisionPolicy):
"""
A decision policy that takes the mean action given the actions of all the agents.
This policy is only useful for continuous control.
"""
def __init__(self):
super().__init__()
def choose_action(self, actions_info: Dict[str, ActionInfo]) -> ActionInfo:
"""
Given a list of actions from multiple agents, take the mean action
:param actions_info: a list of ActionInfo instances containing the information for each agents action
:return: a single action
"""
# TODO: enforce continuous action spaces
if len(actions_info.keys()) == 0:
raise ValueError("The given list of actions is empty")
mean = np.mean([action_info.action for action_info in actions_info.values()], axis=0)
return ActionInfo(mean)
class RewardPolicy(Enum):
ReachingGoal = 0
NativeEnvironmentReward = 1
AccumulatedEnvironmentRewards = 2
class CompositeAgent(AgentInterface):
"""
A CompositeAgent is a group of agents in the same hierarchy level.
In a CompositeAgent, each agent may take the role of either a controller or an observer.
Each agent that is defined as observer, gets observations from the environment.
Each agent that is defined as controller, can potentially also control the environment, in addition to observing it.
There are several ways to decide on the action from different controller agents:
1. Ensemble -
- Take the majority vote (discrete controls)
- Take the mean action (continuous controls)
- Round robin between the agents (discrete/continuous)
2. Skills -
- At each step a single agent decides (Chosen by the uppoer hierarchy controlling agent)
A CompositeAgent can be controlled using one of the following methods (ActionSpaces):
1. Goals (in terms of measurements, observation, embedding or a change in those values)
2. Agent Selection (skills) / Discrete action space.
3. Attention (a subset of the real environment observation / action space)
"""
def __init__(self,
agents_parameters: Union[AgentParameters, Dict[str, AgentParameters]],
visualization_parameters: VisualizationParameters,
decision_policy: DecisionPolicy,
out_action_space: ActionSpace,
in_action_space: Union[None, ActionSpace]=None,
decision_makers: Union[bool, Dict[str, bool]]=True,
reward_policy: RewardPolicy=RewardPolicy.NativeEnvironmentReward,
name="CompositeAgent"):
"""
Construct an agent group
:param agents_parameters: a list of presets describing each one of the agents in the group
:param decision_policy: the decision policy of the group which describes how actions are consolidated
:param out_action_space: the type of action space that is used by this composite agent in order to control the
underlying environment
:param in_action_space: the type of action space that is used by the upper level agent in order to control this
group
:param decision_makers: a list of booleans representing for each corresponding agent if it has a decision
privilege or if it is just an observer
:param reward_policy: the type of the reward that the group receives
"""
super().__init__()
if isinstance(agents_parameters, AgentParameters):
decision_makers = {agents_parameters.name: True}
agents_parameters = {agents_parameters.name: agents_parameters}
self.agents_parameters = agents_parameters
self.visualization_parameters = visualization_parameters
self.decision_makers = decision_makers
self.decision_policy = decision_policy
self.in_action_space = in_action_space
self.out_action_space = out_action_space # TODO: this is not being used
self.reward_policy = reward_policy
self.full_name_id = self.name = name
self.current_decision_maker = 0
self.environment = None
self.agents = {} # key = agent_name, value = agent
self.incoming_action = None
self.last_state = None
self._phase = RunPhase.HEATUP
self.last_action_info = None
self.current_episode = 0
self.parent_level_manager = None
# environment spaces
self.spaces = None
# counters for logging
self.total_steps_counter = 0
self.current_episode_steps_counter = 0
self.total_reward_in_current_episode = 0
# validate input
if set(self.decision_makers) != set(self.agents_parameters):
raise ValueError("The decision_makers dictionary keys does not match the names of the given agents")
if sum(self.decision_makers.values()) > 1 and type(self.decision_policy) == SingleDecider \
and type(self.in_action_space) != AgentSelection:
raise ValueError("When the control policy is set to single decider, the master policy should control the"
"agent group via agent selection (ControlType.AgentSelection)")
@property
def parent(self):
"""
Get the parent class of the composite agent
:return: the current phase
"""
return self._parent
@parent.setter
def parent(self, val):
"""
Change the parent class of the composite agent.
Additionally, updates the full name of the agent
:param val: the new parent
:return: None
"""
self._parent = val
if not hasattr(self._parent, 'name'):
raise ValueError("The parent of a composite agent must have a name")
self.full_name_id = "{}/{}".format(self._parent.name, self.name)
def create_agents(self):
for agent_name, agent_parameters in self.agents_parameters.items():
agent_parameters.name = agent_name
# create agent
self.agents[agent_parameters.name] = short_dynamic_import(agent_parameters.path)(agent_parameters,
parent=self)
self.agents[agent_parameters.name].parent_level_manager = self.parent_level_manager
# TODO: this is a bit too specific to be defined here
# add an attention cropping filter if the incoming directives are attention boxes
if isinstance(self.in_action_space, AttentionActionSpace):
attention_size = self.in_action_space.forced_attention_size
for agent in self.agents.values():
agent.input_filter.observation_filters['attention'] = \
ObservationCropFilter(crop_low=np.zeros_like(attention_size), crop_high=attention_size)
agent.input_filter.observation_filters.move_to_end('attention', last=False) # add the cropping at the beginning
def setup_logger(self) -> None:
"""
Setup the logger for all the agents in the composite agent
:return: None
"""
[agent.setup_logger() for agent in self.agents.values()]
def set_session(self, sess) -> None:
"""
Set the deep learning framework session for all the agents in the composite agent
:return: None
"""
[agent.set_session(sess) for agent in self.agents.values()]
def set_environment_parameters(self, spaces: SpacesDefinition):
"""
Sets the parameters that are environment dependent. As a side effect, initializes all the components that are
dependent on those values, by calling init_environment_dependent_modules
:param spaces: the definitions of all the spaces of the environment
:return: None
"""
self.spaces = copy.deepcopy(spaces)
[agent.set_environment_parameters(self.spaces) for agent in self.agents.values()]
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, val: RunPhase) -> None:
"""
Change the current phase of all the agents in the group
:param phase: the new phase
:return: None
"""
self._phase = val
for agent in self.agents.values():
agent.phase = val
def handle_episode_ended(self) -> None:
"""
Make any changes needed when each episode is ended.
This includes incrementing counters, updating full episode dependent values, updating logs, etc.
This function is called right after each episode is ended.
:return: None
"""
self.current_episode += 1
[agent.handle_episode_ended() for agent in self.agents.values()]
def reset_internal_state(self) -> None:
"""
Reset the episode for all the agents in the group
:return: None
"""
# update counters
self.total_steps_counter = 0
self.current_episode_steps_counter = 0
self.total_reward_in_current_episode = 0
# reset all sub modules
[agent.reset_internal_state() for agent in self.agents.values()]
def train(self) -> Union[float, List]:
"""
Make a single training step for all the agents of the group
:return: a list of loss values from the training step
"""
return [agent.train() for agent in self.agents.values()]
def act(self) -> ActionInfo:
"""
Get the actions from all the agents in the group. Then use the decision policy in order to
extract a single action out of the list of actions.
:return: the chosen action and its corresponding information
"""
# update counters
self.total_steps_counter += 1
self.current_episode_steps_counter += 1
# get the actions info from all the agents
actions_info = {}
for agent_name, agent in self.agents.items():
action_info = agent.act()
actions_info[agent_name] = action_info
# decide on a single action to apply to the environment
action_info = self.decision_policy.choose_action(actions_info)
# TODO: make the last action info a property?
# pass the action info to all the observers
for agent_name, is_decision_maker in self.decision_makers.items():
if not is_decision_maker:
self.agents[agent_name].last_action_info = action_info
self.last_action_info = action_info
return self.last_action_info
def observe(self, env_response: EnvResponse) -> bool:
"""
Given a response from the environment as a env_response, filter it and pass it to the agents.
This method has two main jobs:
1. Wrap the previous transition, ending with the new observation coming from EnvResponse.
2. Save the next_state as the current_state to take action upon for the next call to act().
:param env_response:
:param action_info: additional info about the chosen action
:return:
"""
# accumulate the unfiltered rewards for visualization
self.total_reward_in_current_episode += env_response.reward
episode_ended = env_response.game_over
# pass the env_response to all the sub-agents
# TODO: what if one agent decides to end the episode but the others don't? who decides?
for agent_name, agent in self.agents.items():
goal_reached = agent.observe(env_response)
episode_ended = episode_ended or goal_reached
# TODO: unlike for a single agent, here we also treat a game over by the environment.
# probably better to only return the agents' goal_reached decisions.
return episode_ended
def save_checkpoint(self, checkpoint_prefix: str) -> None:
[agent.save_checkpoint(checkpoint_prefix) for agent in self.agents.values()]
def restore_checkpoint(self, checkpoint_dir: str) -> None:
[agent.restore_checkpoint(checkpoint_dir) for agent in self.agents.values()]
def set_incoming_directive(self, action: ActionType) -> None:
self.incoming_action = action
if isinstance(self.decision_policy, SingleDecider) and isinstance(self.in_action_space, AgentSelection):
self.decision_policy.decision_maker = list(self.agents.keys())[action]
if isinstance(self.in_action_space, AttentionActionSpace):
# TODO: redesign to be more modular
for agent in self.agents.values():
agent.input_filter.observation_filters['attention'].crop_low = action[0]
agent.input_filter.observation_filters['attention'].crop_high = action[1]
agent.output_filter.action_filters['masking'].set_masking(action[0], action[1])
# TODO rethink this scheme. we don't want so many if else clauses lying around here.
# TODO - for incoming actions which do not involve setting the acting agent we should change the
# observation_space, goal to pursue, etc accordingly to the incoming action.
def sync(self) -> None:
"""
Sync the agent networks with the global network
:return:
"""
[agent.sync() for agent in self.agents.values()]
def collect_savers(self, parent_path_suffix: str) -> SaverCollection:
"""
Collect all of agent's network savers
:param parent_path_suffix: path suffix of the parent of the agent
(could be name of level manager or composite agent)
:return: collection of all agent savers
"""
savers = SaverCollection()
for agent in self.agents.values():
savers.update(agent.collect_savers(
parent_path_suffix="{}.{}".format(parent_path_suffix, self.name)))
return savers
|
import logging
from typing import Dict, Type
from discord.ext import commands
from miyu_bot.bot import models
import miyu_bot.bot.bot
from miyu_bot.bot.models import PreferenceScope, all_preferences
class Preferences(commands.Cog):
bot: 'miyu_bot.bot.bot.MiyuBot'
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger(__name__)
from miyu_bot.commands.master_filter.localization_manager import LocalizationManager
self.l10n = LocalizationManager(self.bot.fluent_loader, 'preferences.ftl')
@commands.command(name='setpref',
description='',
help='')
async def setpref(self, ctx: commands.Context, scope: str, name: str, value: str):
scope = preference_scope_aliases.get(scope)
if not scope:
await ctx.send(f'Invalid scope.')
return
if name not in scope.preferences:
await ctx.send(f'Invalid preference.')
return
preference = scope.preferences[name]
if not (await ctx.bot.is_owner(ctx.author) or (scope.has_permissions(ctx) and not preference.is_privileged)):
await ctx.send(f'Insufficient permissions.')
return
if error_message := preference.validate_or_get_error_message(value):
await ctx.send(f'Invalid value: {error_message}')
return
entry = await scope.get_from_context(ctx)
if not entry:
await ctx.send(f'Scope not available in current channel.')
return
entry.set_preference(name, value)
await entry.save()
await ctx.send(f'Preference updated.')
@commands.command(name='getpref',
description='',
help='')
async def getpref(self, ctx: commands.Context, scope: str, name: str = ''):
scope = preference_scope_aliases.get(scope)
if not scope:
await ctx.send(f'Invalid scope.')
return
entry = await scope.get_from_context(ctx)
if not entry:
await ctx.send(f'Scope not available in current channel.')
return
if name:
if name not in scope.preferences:
await ctx.send(f'Invalid preference.')
return
await ctx.send(str(getattr(entry, scope.preferences[name].attribute_name) or None))
else:
await ctx.send('\n'.join(f'{name}: {getattr(entry, pref.attribute_name)}'
for name, pref in scope.preferences.items()
if not pref.is_privileged))
@commands.command(name='clearpref',
description='',
help='')
async def clearpref(self, ctx: commands.Context, scope: str, name: str = ''):
scope = preference_scope_aliases.get(scope)
if not scope:
await ctx.send(f'Invalid scope.')
return
if name not in scope.preferences:
await ctx.send(f'Invalid preference.')
return
preference = scope.preferences[name]
if not (await ctx.bot.is_owner(ctx.author) or (scope.has_permissions(ctx) and not preference.is_privileged)):
await ctx.send(f'Insufficient permissions.')
return
entry = await scope.get_from_context(ctx)
if not entry:
await ctx.send(f'Scope not available in current channel.')
return
entry.clear_preference(name)
await entry.save()
await ctx.send(f'Successfully cleared preference.')
preference_scope_aliases: Dict[str, Type[PreferenceScope]] = {
'user': models.User,
'self': models.User,
'channel': models.Channel,
'server': models.Guild,
'guild': models.Guild,
}
async def get_preferences(ctx: commands.Context, toggle_user_prefs: bool = False):
sources = []
if guild_prefs := ctx.guild and await models.Guild.get_or_none(id=ctx.guild.id):
sources.append(guild_prefs)
if channel_prefs := await models.Channel.get_or_none(id=ctx.channel.id):
sources.append(channel_prefs)
if user_prefs := await models.User.get_or_none(id=ctx.author.id):
if not toggle_user_prefs:
sources.append(user_prefs)
preference_values = {}
for source in sources:
for k, v in source.preferences.items():
if source.preference_set(k):
preference_values[v.name] = source.get_preference(k)
for v in all_preferences.values():
if v.name not in preference_values:
preference_values[v.name] = v.default_value
return preference_values
def setup(bot):
bot.add_cog(Preferences(bot))
|
# -*- coding: utf-8 -*-
"""
Shows headphones symbol with x or tick according to if Bose are connected or not
"""
import subprocess
class Py3status:
def __init__(self):
headphones_icon = "Blackout"
# self.format_connected = "🎧 ✔"
# self.format_connected = headphones_icon + " ✔"
# self.format_disconnected = headphones_icon + " ✘"
self.format_connected = headphones_icon + " ON"
self.format_disconnected = headphones_icon + " OFF"
self.format_default = "N/A"
self.format_output = headphones_icon + " "
# self.format_output = headphones_icon + " ✘"
self.check_blackout = "bash /home/alessap/dotfiles/scripts/is_blackout_connected"
self.cached_until = 1
output = subprocess.check_output(self.check_blackout.split())
if output == b"Yes\n":
self.format_output=self.format_connected
elif output == b"No\n":
self.format_output=self.format_disconnected
else:
self.format_output=self.format_default
def click_info(self):
return {
'full_text': self.format_output,
# 'cached_until': self.py3.CACHE_FOREVER
'cached_until': self.py3.time_in(self.cached_until) # time in seconds
}
def on_click(self, event):
button = event['button']
if button == 1 or button == 3:
output = subprocess.check_output(self.check_blackout.split())
if output == b"Yes\n":
self.format_output=self.format_connected
elif output == b"No\n":
self.format_output=self.format_disconnected
else:
self.format_output=self.format_default
return {
'full_text': self.format_output,
'cached_until': self.py3.time_in(self.cached_until) # time in seconds
}
output = subprocess.check_output(self.check_blackout.split())
if output == b"No\n":
bt_on = subprocess.check_output("bluetoothctl power on".split())
connect_blackout = subprocess.check_output("bluetoothctl connect 04:52:C7:FF:8D:B5".split())
if output == b"Yes\n":
disconnect_blackout = subprocess.check_output("bluetoothctl disconnect 04:52:C7:FF:8D:B5".split())
output = subprocess.check_output(self.check_blackout.split())
if output == b"Yes\n":
self.format_output=self.format_connected
elif output == b"No\n":
self.format_output=self.format_disconnected
else:
self.format_output=self.format_default
return {
'full_text': self.format_output,
'cached_until': self.py3.time_in(self.cached_until) # time in seconds
}
# def is_blackout_connected(self):
# output = subprocess.check_output(self.check_blackout.split())
# if output == b"Yes\n":
# self.format_output=self.format_connected
# elif output == b"No\n":
# self.format_output=self.format_disconnected
# else:
# self.format_output=self.format_default
# # return {
# # 'full_text': self.format_output,
# # 'cached_until': self.py3.time_in(59) # time in seconds
# # # 'cached_until': self.py3.CACHE_FOREVER
# # }
# class Py3status:
#
# def __init__(self):
# self.full_text = 'Click me'
#
# def click_info(self):
# return {
# 'full_text': self.full_text,
# 'cached_until': self.py3.CACHE_FOREVER
# }
#
# def on_click(self, event):
# """
# event will be a dict like
# {'y': 13, 'x': 1737, 'button': 1, 'name': 'example', 'instance': 'first'}
# """
# button = event['button']
# # update our output (self.full_text)
# format_string = 'You pressed button {button}'
# data = {'button': button}
# self.full_text = self.py3.safe_format(format_string, data)
# # Our modules update methods will get called automatically.
|
#SPDX-License-Identifier: MIT
import os, subprocess
from datetime import datetime
import logging
from workers.worker_git_integration import WorkerGitInterfaceable
import requests
import json
from urllib.parse import quote
from multiprocessing import Process, Queue
import traceback
import pandas as pd
import sqlalchemy as s
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import MetaData
from workers.worker_base import Worker
# from workers.deps_worker import dependancy_calculator as dep_calc
from libyear_utils import get_deps_libyear_data
class DepsLibyearWorker(WorkerGitInterfaceable):
def __init__(self, config={}):
worker_type = "deps_libyear_worker"
# Define what this worker can be given and know how to interpret
given = [['git_url']]
models = ['deps_libyear']
# Define the tables needed to insert, update, or delete on
data_tables = ['repo_deps_libyear']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
self.config.update({
'repo_directory': self.augur_config.get_value('Workers', 'facade_worker')['repo_directory']
})
self.tool_source = 'Deps Libyear Worker'
self.tool_version = '1.0.0'
self.data_source = 'Augur Repository Data'
def deps_libyear_model(self, entry_info, repo_id):
""" Data collection and storage method
"""
self.logger.info(f"This is the entry info: {entry_info}.")
self.logger.info(f"This is the repo id: {repo_id}")
repo_path_sql = s.sql.text("""
SELECT repo_id, CONCAT(repo_group_id || chr(47) || repo_path || repo_name) AS path
FROM repo
WHERE repo_id = :repo_id
""")
relative_repo_path = self.db.execute(repo_path_sql, {'repo_id': repo_id}).fetchone()[1]
absolute_repo_path = self.config['repo_directory'] + relative_repo_path
try:
self.generate_deps_libyear_data(repo_id, absolute_repo_path)
except Exception as e:
self.print_traceback("Deps_libyear_worker: generate_deps_libyear_data", e, True)
self.register_task_completion(entry_info, repo_id, "deps_libyear")
def generate_deps_libyear_data(self, repo_id, path):
"""Scans for package files and calculates libyear
:param repo_id: Repository ID
:param path: Absolute path of the Repostiory
"""
self.logger.info('Searching for deps in repo')
self.logger.info(f'Repo ID: {repo_id}, Path: {path}')
deps = get_deps_libyear_data(path)
try:
for dep in deps:
repo_deps = {
'repo_id': repo_id,
'name' : dep['name'],
'requirement' : dep['requirement'],
'type' : dep['type'],
'package_manager' : dep['package'],
'current_verion' : dep['current_version'],
'latest_version' : dep['latest_version'],
'current_release_date' : dep['current_release_date'],
'latest_release_date' : dep['latest_release_date'],
'libyear' : dep['libyear'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source,
'data_collection_date': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
}
result = self.db.execute(self.repo_deps_libyear_table.insert().values(repo_deps))
self.logger.info(f"Added dep: {result.inserted_primary_key}")
except Exception as e:
self.print_traceback("Deps_libyear_worker: generating and inserting data", e, True)
|
<gh_stars>10-100
# Random RGB Sticklet by @PhycoNinja13b
#Exclusive for My personal Repo
#Requirement of this plugin is very high (Kumbhkaran ki aulad)
#Currently Loaded 74 Font Options
#Dare To edit this part! U will be tored apart!
import io
import textwrap
import random
from telethon import events
from PIL import Image, ImageDraw, ImageFont
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="plet (.*)"))
async def sticklet(event):
R = random.randint(0,256)
G = random.randint(0,256)
B = random.randint(0,256)
FC = random.randint(1,75)
sticktext = event.pattern_match.group(1)
if not sticktext:
await event.edit("`I need text to sticklet!`")
return
await event.delete()
sticktext = textwrap.wrap(sticktext, width=10)
sticktext = '\n'.join(sticktext)
image = Image.new("RGBA", (512, 512), (255, 255, 255, 0))
draw = ImageDraw.Draw(image)
fontsize = 230
if FC==1:
FONT_FILE = "Fonts/Aksana-8KnB.ttf"
if FC==2:
FONT_FILE = "Fonts/AlChevrolaPersonaluseonly-Ea47r.ttf"
if FC==3:
FONT_FILE = "Fonts/AlChevrolaPersonaluseonly-axY2a.otf"
if FC==4:
FONT_FILE = "Fonts/AlLeporschePersonaluseonly-Eaqln.otf"
if FC==5:
FONT_FILE = "Fonts/AlLeporschePersonaluseonly-OVZ24.ttf"
if FC==6:
FONT_FILE = "Fonts/Alberto-yx2q.ttf"
if FC==7:
FONT_FILE = "Fonts/AlexBrush-Regular.ttf"
if FC==8:
FONT_FILE = "Fonts/Alienzmonkey-ze7l.ttf"
if FC==9:
FONT_FILE = "Fonts/Allura-Regular.otf"
if FC==10:
FONT_FILE = "Fonts/ArchitectsDaughter.ttf"
if FC==11:
FONT_FILE = "Fonts/Arizonia-Regular.ttf"
if FC==12:
FONT_FILE = "Fonts/BetterCaramel-ajrK.otf"
if FC==13:
FONT_FILE = "Fonts/BetterCaramelSans-EjRW.otf"
if FC==14:
FONT_FILE = "Fonts/BetterCaramelSansBold-Oj76.otf"
if FC==15:
FONT_FILE = "Fonts/BetterCaramelSansHollow-Zjl3.otf"
if FC==16:
FONT_FILE = "Fonts/BetterCaramelSerif-37Kp.otf"
if FC==17:
FONT_FILE = "Fonts/BetterCaramelSerifBold-x0Ym.otf"
if FC==18:
FONT_FILE = "Fonts/BetterCaramelSerifHollow-pm0d.otf"
if FC==19:
FONT_FILE = "Fonts/BlackthornsDemoBlack-L2GE.ttf"
if FC==20:
FONT_FILE = "Fonts/BlackthornsDemoRegular-X0MZ.ttf"
if FC==21:
FONT_FILE = "Fonts/Bulgatti-xgMV.ttf"
if FC==22:
FONT_FILE = "Fonts/ChampagneAndLimousines-7KRB.ttf"
if FC==23:
FONT_FILE = "Fonts/ChampagneAndLimousinesBold-myr2.ttf"
if FC==24:
FONT_FILE = "Fonts/ChampagneAndLimousinesBoldItalic-dqex.ttf"
if FC==25:
FONT_FILE = "Fonts/ChampagneAndLimousinesItalic-PlRZ.ttf"
if FC==26:
FONT_FILE = "Fonts/Entreaty-5Y7V.ttf"
if FC==27:
FONT_FILE = "Fonts/Fancy-GxRO.otf"
if FC==28:
FONT_FILE = "Fonts/Gardenparty-p0MD.ttf"
if FC==29:
FONT_FILE = "Fonts/GreatVibes-Regular.otf"
if FC==30:
FONT_FILE = "Fonts/Jolly-OOw6.ttf"
if FC==31:
FONT_FILE = "Fonts/JollyBold-ZGW3.ttf"
if FC==32:
FONT_FILE = "Fonts/JollyBoldItalic-3wmp.ttf"
if FC==33:
FONT_FILE = "Fonts/JollyItalic-xxjm.ttf"
if FC==34:
FONT_FILE = "Fonts/KaushanScript-Regular.otf"
if FC==35:
FONT_FILE = "Fonts/LitleSimpleSt-2lZ3.ttf"
if FC==36:
FONT_FILE = "Fonts/LobsterTwo-Bold.otf"
if FC==37:
FONT_FILE = "Fonts/LobsterTwo-BoldItalic.otf"
if FC==38:
FONT_FILE = "Fonts/LobsterTwo-Italic.otf"
if FC==39:
FONT_FILE = "Fonts/LobsterTwo-Regular.otf"
if FC==40:
FONT_FILE = "Fonts/LordZeddLjStudios-4YzB.ttf"
if FC==41:
FONT_FILE = "Fonts/LuisSmartTx-rW6y.ttf"
if FC==42:
FONT_FILE = "Fonts/MountainsofChristmas.ttf"
if FC==43:
FONT_FILE = "Fonts/NightmarePills-BV2w.ttf"
if FC==44:
FONT_FILE = "Fonts/Pacifico.ttf"
if FC==45:
FONT_FILE = "Fonts/PierceRegular-6OWY.ttf"
if FC==46:
FONT_FILE = "Fonts/Pierceregular-BgeV.otf"
if FC==47:
FONT_FILE = "Fonts/Pixeboy-z8XGD.ttf"
if FC==48:
FONT_FILE = "Fonts/PussyCat-Dy69.ttf"
if FC==49:
FONT_FILE = "Fonts/RaconteurNf-LOlE.ttf"
if FC==50:
FONT_FILE = "Fonts/Rolande-8Ydg.ttf"
if FC==51:
FONT_FILE = "Fonts/RolandeBold-YLaO.ttf"
if FC==52:
FONT_FILE = "Fonts/Sail-Regular.otf"
if FC==53:
FONT_FILE = "Fonts/Skarpalt-qx4V.ttf"
if FC==54:
FONT_FILE = "Fonts/Sofia-Regular.otf"
if FC==55:
FONT_FILE = "Fonts/Tangerine_Bold.ttf"
if FC==56:
FONT_FILE = "Fonts/Tangerine_Regular.ttf"
if FC==57:
FONT_FILE = "Fonts/ThechampDemo-2OvqK.ttf"
if FC==58:
FONT_FILE = "Fonts/ThechampDemoGradient-K7V2p.ttf"
if FC==59:
FONT_FILE = "Fonts/ThechampDemoItalic-MVAwr.ttf"
if FC==60:
FONT_FILE = "Fonts/ThechampDemoStroke-vmneO.ttf"
if FC==61:
FONT_FILE = "Fonts/Timeburner-xJB8.ttf"
if FC==62:
FONT_FILE = "Fonts/TimeburnerBold-peGR.ttf"
if FC==63:
FONT_FILE = "Fonts/Windsong.ttf"
if FC==64:
FONT_FILE = "Fonts/Zexo-0Myd.ttf"
if FC==65:
FONT_FILE = "Fonts/Zexo-4wl4.otf"
if FC==66:
FONT_FILE = "Fonts/blackjack.otf"
if FC==67:
FONT_FILE = "Fonts/1942.ttf"
if FC==68:
FONT_FILE = "Fonts/AguafinaScript-Regular.ttf"
if FC==69:
FONT_FILE = "Fonts/AirAmerica-Regular.otf"
if FC==70:
FONT_FILE = "Fonts/Airstream.ttf"
if FC==71:
FONT_FILE = "Fonts/Amadeus.ttf"
if FC==72:
FONT_FILE = "Fonts/berkshireswash-regular.ttf"
if FC==73:
FONT_FILE = "Fonts/DEFTONE.ttf"
if FC==74:
FONT_FILE = "Fonts/FontleroyBrown.ttf"
font = ImageFont.truetype(FONT_FILE, size=fontsize)
while draw.multiline_textsize(sticktext, font=font) > (512, 512):
fontsize -= 3
font = ImageFont.truetype(FONT_FILE, size=fontsize)
width, height = draw.multiline_textsize(sticktext, font=font)
draw.multiline_text(((512-width)/2,(512-height)/2), sticktext, font=font, fill=(R, G, B))
image_stream = io.BytesIO()
image_stream.name = "sticker.webp"
image.save(image_stream, "WebP")
image_stream.seek(0)
await event.client.send_file(event.chat_id, image_stream, reply_to=event.message.reply_to_msg_id)
await event.delete()
|
"""Toplevel parser script that can run wikipedia search."""
import logging
from pathlib import Path
from time import sleep
from typing import Union
from wiki_music.constants.colors import CYAN, GREEN, RESET
from wiki_music.utilities import (Action, exception, flatten_set, to_bool,
we_are_frozen)
from .process_page import WikipediaParser
log = logging.getLogger(__name__)
class WikipediaRunner(WikipediaParser):
r"""Toplevel Wikipedia Parser class.
Inherits all other parser subclasses. This is the class that is intended
for user interaction. Its methods know how to run the parser in order to
produce meaningfull results.
Warnings
--------
This is the only parser class that is ment for user interaction. Calling
its subclasses directly might result in unexpected behaviour.
Parameters
----------
album: str
album name
albumartist: str
band name
work_dir: str
directory with music files
with_log: bool
If parser should output its progress to logger, only for CLI mode
GUI: bool
if True - assume app is running in GUI mode\n
if False - assume app is running in CLI mode
protected_vars: bool
whether to initialize protected variables or not
multi_threaded: bool
whether to run some parts of code in threads
"""
def __init__(self, album: str = "", band: str = "",
work_dir: Union[str, Path] = "", with_log: bool = False,
GUI: bool = True, protected_vars: bool = True,
offline_debug: bool = False, write_json: bool = False,
multi_threaded: bool = True) -> None:
log.debug("init parser runner")
super().__init__(protected_vars=protected_vars, GUI=GUI)
self._GUI = GUI
self.with_log = with_log
self.ALBUM = album
self.ALBUMARTIST = band
self.work_dir = Path(work_dir)
self.offline_debug = offline_debug
self.write_json = write_json
self.multi_threaded = multi_threaded
log.debug("init parser runner done")
@exception(log)
def run_wiki(self):
"""Runs the whole wikipedia search, together with lyrics finding."""
if self._GUI:
self._run_wiki_gui()
else:
self._run_wiki_nogui()
def _run_wiki_gui(self):
"""Runs wikipedia search with specifics of the GUI mode."""
# download wikipedia page and track progress
for message in self._get_preload_progress():
self._log.info(message)
# get error messages
error_msg = self.get_wiki()
if error_msg:
self._log.exception(error_msg)
return
if not we_are_frozen():
# basic html textout for debug
self.basic_out()
# find release date
self._log.info(f"Found release date: {self.get_release_date()}")
# find list of genres
self._log.info(f"Found genre(s): {', '.join(self.get_genres())}")
# download cover art from wikipedia
self._log.info("Downloading cover art")
self.get_cover_art(in_thread=True)
# print out page contents
self._log.info(f"Found page contents: "
f"{', '.join(self.get_contents())}")
# extract track list
self._log.info("Extracting tracks")
self.get_tracks()
# extract personel names
self._log.info("Extracting additional personnel")
self.get_personnel()
# extract writers, composers
self._log.info("Extracting composers")
self.get_composers()
if not we_are_frozen():
# save to files
self._log.info("Writing to disc")
self.disk_write()
# select genre
self._log.info("Select genre")
if not self.GENRE:
if len(self.genres) == 1:
msg = "Input genre"
else:
msg = "Select genre"
self.GENRE = Action("genres", msg, options=self.genres).response
# decide what to do with artists
self._log.info("Assign artists to composers")
a = Action("composers", "Do you want to copy artists to composers?",
load=True)
if a.response:
self.merge_artist_composers()
# decide if you want to find lyrics
self._log.info("Searching for Lyrics")
a = Action("lyrics", "Do you want to find lyrics?")
self.save_lyrics(a.response)
Action("load", load=True)
self._log.info("Done")
def _run_wiki_nogui(self):
"""Runs wikipedia search with specifics of the CLI mode."""
# start wikipedia page download
self._log_print(msg_WHITE="Accessing Wikipedia...")
# download wikipedia page and track progress
for message in self._get_preload_progress():
if "Searching for" in message:
print(f"Searching for: {GREEN}{self.ALBUM}{RESET} by "
f"{GREEN}{self.ALBUMARTIST}")
elif "Using offline" in message:
self._log_print(msg_GREEN="Using offline cached page insted "
"of web page")
elif "Found at" in message:
self._log_print(msg_GREEN="Found at: ", msg_WHITE=self.url)
else:
self._log_print(msg_WHITE=message)
# get error messages
error_msg = self.get_wiki()
if error_msg:
self._log_print(msg_GREEN=error_msg)
return
if not we_are_frozen():
# basic html textout for debug
self.basic_out()
# find release date
self._log_print(msg_GREEN="Found release date:",
msg_WHITE=self.get_release_date())
# find list of genres
self._log_print(msg_GREEN="Found genre(s)",
msg_WHITE="\n".join(self.get_genres()))
# get and print out page contents
self._log_print(msg_GREEN="Found page contents",
msg_WHITE="\n".join(self.get_contents()))
# extract track list
self.get_tracks()
# extract personel names
self._log_print(msg_GREEN="Found aditional personel")
self.get_personnel()
if not we_are_frozen():
print(self.personnel_2_str())
# extract writers, composers
self._log_print(msg_GREEN="Found composers",
msg_WHITE="\n".join(flatten_set(self.get_composers())))
if not we_are_frozen():
# save to files
self._log_print(msg_WHITE="Writing to disk")
self.disk_write()
# print out found tracklist
self._log_print(msg_GREEN="Found Track list(s)")
self.print_tracklist()
# select genre
if not self.GENRE:
if not self.genres:
print(CYAN + "Input genre:", end="")
self.genre = input()
else:
print(CYAN + "Specify which genre you want to write: [1.]")
for i, gen in enumerate(self.genres, 1):
print(f"{i}. {gen}")
print("Input number:", CYAN, end="")
index = input()
try:
index = int(index) - 1
except ValueError:
index = 0
self.GENRE = self.genres[index]
# decide what to do with artists
print(CYAN + "Do you want to assign artists to composers? ([y]/n)",
RESET, end=" ")
if to_bool(input()):
self.merge_artist_composers()
# decide if you want to find lyrics
print(CYAN + "\nDo you want to find and save lyrics? ([y]/n): " +
RESET, end="")
# download lyrics
self.save_lyrics(to_bool(input()))
print(CYAN + "Write data to ID3 tags? ([y]/n): " + RESET, end="")
if to_bool(input()):
if not self.write_tags():
self._log_print(
msg_WHITE="Cannot write tags because there are no "
"coresponding files")
else:
self._log_print(msg_GREEN="Done")
@exception(log)
def run_lyrics(self):
"""Runs only the lyrics search."""
if self._GUI:
self._run_lyrics_gui()
else:
self._run_lyrics_nogui()
def _run_lyrics_gui(self):
"""Runs only lyrics search with specifics of the GUI mode."""
self._log.info("Searching for lyrics")
self.save_lyrics(find=True)
Action("load", load=True)
self._log.info("Done")
def _run_lyrics_nogui(self):
"""Runs only lyrics search with specifics of the CLI mode."""
self.read_files()
# find lyrics
self._log_print(msg_GREEN="Searching for lyrics")
self.save_lyrics()
if not self.write_tags():
self._log_print(msg_WHITE="Cannot write tags because there are no "
"coresponding files")
else:
self._log_print(msg_GREEN="Done")
def _log_print(self, msg_GREEN: str = "", msg_WHITE: str = "",
level: str = "INFO"):
"""Redirects the input to sandard print function and to logger.
Parameters
----------
msg_GREEN: str
message that shoul be highlighted in green in print output
msg_WHITE: str
message that should be left with the default font color
level: str
logger level for output message
"""
if msg_GREEN != "":
print(GREEN + "\n" + msg_GREEN)
if msg_WHITE != "":
print(msg_WHITE)
if self.with_log:
msg_GREEN = msg_GREEN + msg_WHITE
if level == "INFO":
log.info(msg_GREEN)
if level == "WARN":
log.warning(msg_GREEN)
|
<gh_stars>0
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import atexit
from threading import Lock, Thread
from .remote_agent import RemoteAgent, RemoteAgentException
class RemoteAgentBuffer:
def __init__(self, buffer_size=1):
self._log = logging.getLogger(self.__class__.__name__)
self._buffer_size = buffer_size
self._quiescing = False
self._agent_buffer = []
self._agent_buffer_lock = Lock()
self._replenish_thread = None
self._start_replenish_thread()
atexit.register(self.__del__)
def __del__(self):
self.destroy()
def destroy(self):
self._quiescing = True
if self._replenish_thread_is_running():
self._replenish_thread.join() # wait for the replenisher to finish
with self._agent_buffer_lock:
for remote_agent in self._agent_buffer:
remote_agent.terminate()
def _replenish_thread_is_running(self):
return self._replenish_thread is not None and self._replenish_thread.is_alive()
def _replenish_agents(self):
# For high-availability, we allow for the possibility that we may create
# more than `buffer_size` number of agents in the buffer under certain race conditions.
#
# To prevent this we would have to put the `RemoteAgent()` creation calls inside the lock
# context, this would slow down the `acquire_remote_agent` code path.
fresh_procs = []
for _ in range(self._buffer_size - len(self._agent_buffer)):
if self._quiescing:
return # early out if we happened to be shutting down midway replenishmen
try:
fresh_procs.append(RemoteAgent())
except RemoteAgentException:
self._log.error("Failed to initialize remote agent")
with self._agent_buffer_lock:
self._agent_buffer.extend(fresh_procs)
def _start_replenish_thread(self):
if self._quiescing:
# not starting thread since we are shutting down
pass
elif self._replenish_thread is not None and self._replenish_thread.is_alive():
# not starting thread since there's already one running
pass
else:
# otherwise start the replenishment thread
self._replenish_thread = Thread(target=self._replenish_agents, daemon=True)
self._replenish_thread.start()
def acquire_remote_agent(self) -> RemoteAgent:
with self._agent_buffer_lock:
if len(self._agent_buffer) > 0:
remote_agent = self._agent_buffer.pop()
else:
remote_agent = None
self._start_replenish_thread()
if remote_agent is None:
# Do this here instead of in the else branch above to avoid holding the lock
# while the RemoteAgent() is being created
remote_agent = RemoteAgent()
return remote_agent
|
<filename>code/imgaug/augmenters/flip.py
"""
Augmenters that apply mirroring/flipping operations to images.
Do not import directly from this file, as the categorization is not final.
Use instead
`from imgaug import augmenters as iaa`
and then e.g. ::
seq = iaa.Sequential([
iaa.Fliplr((0.0, 1.0)),
iaa.Flipud((0.0, 1.0))
])
List of augmenters:
* Fliplr
* Flipud
"""
from __future__ import print_function, division, absolute_import
from .. import imgaug as ia
# TODO replace these imports with iap.XYZ
from ..parameters import StochasticParameter, Deterministic, Binomial, Choice, DiscreteUniform, Normal, Uniform, FromLowerResolution
from .. import parameters as iap
from abc import ABCMeta, abstractmethod
import random
import numpy as np
import copy as copy_module
import re
import math
from scipy import misc, ndimage
from skimage import transform as tf, segmentation, measure
import itertools
import cv2
import six
import six.moves as sm
import types
import warnings
from .meta import Augmenter
class Fliplr(Augmenter):
"""
Flip/mirror input images horizontally.
Parameters
----------
p : int or float or StochasticParameter, optional(default=0)
Probability of each image to get flipped.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Fliplr(0.5)
would horizontally flip/mirror 50 percent of all input images.
>>> aug = iaa.Fliplr(1.0)
would horizontally flip/mirror all input images.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
if ia.is_single_number(p):
self.p = Binomial(p)
elif isinstance(p, StochasticParameter):
self.p = p
else:
raise Exception("Expected p to be int or float or StochasticParameter, got %s." % (type(p),))
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i in sm.xrange(nb_images):
if samples[i] == 1:
images[i] = np.fliplr(images[i])
return images
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if samples[i] == 1:
width = keypoints_on_image.shape[1]
for keypoint in keypoints_on_image.keypoints:
keypoint.x = (width - 1) - keypoint.x
return keypoints_on_images
def get_parameters(self):
return [self.p]
class Flipud(Augmenter):
"""
Flip/mirror input images vertically.
Parameters
----------
p : int or float or StochasticParameter, optional(default=0)
Probability of each image to get flipped.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Flipud(0.5)
would vertically flip/mirror 50 percent of all input images.
>>> aug = iaa.Flipud(1.0)
would vertically flip/mirror all input images.
"""
def __init__(self, p=0, name=None, deterministic=False, random_state=None):
super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
if ia.is_single_number(p):
self.p = Binomial(p)
elif isinstance(p, StochasticParameter):
self.p = p
else:
raise Exception("Expected p to be int or float or StochasticParameter, got %s." % (type(p),))
def _augment_images(self, images, random_state, parents, hooks):
nb_images = len(images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i in sm.xrange(nb_images):
if samples[i] == 1:
images[i] = np.flipud(images[i])
return images
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if samples[i] == 1:
height = keypoints_on_image.shape[0]
for keypoint in keypoints_on_image.keypoints:
keypoint.y = (height - 1) - keypoint.y
return keypoints_on_images
def get_parameters(self):
return [self.p]
'''
class Rettangolo(Augmenter):
def _augment_images(self, image,x,y,p_x,p_y):
# x e y saranno in percentuale (valori da 0--1) ma il limite deve essere 0.5
# p_x e p_x punto da cui parte il rettangolo (influenzeranno il limite del rettangolo)
# di x e y
x = x * 100
y = y * 100
#print ("le dim sono " , image.shape) # 256 224
lato_x = image.shape[1] #224
lato_y = image.shape[0] #256
#print ("le ascisse sono " ,lato_x)
#print ("le ordinate sono ", lato_y)
x_lunghezza = int ( round( ( x * lato_x ) / 100.0 ) )
y_lunghezza = int ( round( ( y * lato_y ) / 100.0 ) )
print x_lunghezza
if ( (p_x + x_lunghezza) >= lato_x ):
x_lunghezza = x_lunghezza - (( p_x + x_lunghezza ) - lato_x )
if ( (p_y + y_lunghezza) >= lato_y ):
y_lunghezza = y_lunghezza - (( p_y + y_lunghezza ) - lato_y )
print x_lunghezza
media = np.mean(image)
print ("media" , media)
for i in range(0,x_lunghezza):
for j in range(0,y_lunghezza):
#print image_mod.shape
image[p_y+j][p_x+i][0] = np.mean(image[:][:][0])
image[p_y+j][p_x+i][1] = np.mean(image[:][:][1])
image[p_y+j][p_x+i][2] = np.mean(image[:][:][2])
return image
'''
class Rettangolo(Augmenter):
def __init__(self, x,y,p_x,p_y, name=None, deterministic=False, random_state=None):
super(Rettangolo, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.x = x
self.y = y
self.p_x = p_x
self.p_y = p_y
def _augment_images(self, images, random_state, parents, hooks):
# x e y saranno in percentuale (valori da 0--1) ma il limite deve essere 0.5
# p_x e p_x punto da cui parte il rettangolo (influenzeranno il limite del rettangolo)
# di x e y
x = self.x
y = self.y
p_x = self.p_x
p_y = self.p_y
nb_images = len(images)
#samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i in sm.xrange(nb_images):
image = images[i]
x = x * 100
y = y * 100
#print ("le dim sono " , image.shape) # 256 224
lato_x = image.shape[1] #224
lato_y = image.shape[0] #256
#print ("le ascisse sono " ,lato_x)
#print ("le ordinate sono ", lato_y)
x_lunghezza = int ( round( ( x * lato_x ) / 100.0 ) )
y_lunghezza = int ( round( ( y * lato_y ) / 100.0 ) )
#print (x_lunghezza)
if ( (p_x + x_lunghezza) >= lato_x ):
x_lunghezza = x_lunghezza - (( p_x + x_lunghezza ) - lato_x )
if ( (p_y + y_lunghezza) >= lato_y ):
y_lunghezza = y_lunghezza - (( p_y + y_lunghezza ) - lato_y )
#print ("",x_lunghezza)
media = np.mean(image)
#print ("media" , media)
media_0 = np.mean(image[:][:][0])
media_1 = np.mean(image[:][:][1])
media_2 = np.mean(image[:][:][2])
for ii in range(0,x_lunghezza):
for j in range(0,y_lunghezza):
#print image_mod.shape
image[p_y+j][p_x+ii][0] = media_0
image[p_y+j][p_x+ii][1] = media_1
image[p_y+j][p_x+ii][2] = media_2
return images
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
nb_images = len(keypoints_on_images)
samples = self.p.draw_samples((nb_images,), random_state=random_state)
for i, keypoints_on_image in enumerate(keypoints_on_images):
if samples[i] == 1:
width = keypoints_on_image.shape[1]
for keypoint in keypoints_on_image.keypoints:
keypoint.x = (width - 1) - keypoint.x
return keypoints_on_images
def get_parameters(self):
return [self.p] |
"""
split.py - Word Splitting
Nice blog post on the complexity/corner cases/differing intuition of splitting
strings:
https://chriszetter.com/blog/2017/10/29/splitting-strings/
python-dev doesn't want to touch it anymore!
Other possible splitters:
- AwkSplitter -- how does this compare to awk -F?
- RegexSplitter
- CsvSplitter
- TSV2Splitter -- Data is transformed because of # \u0065 in JSON. So it's not
a pure slice, but neither is IFS splitting because of backslashes.
- Perl?
- does perl have a spilt context?
with SPLIT_REGEX = / digit+ / {
echo $#
echo $len(argv)
echo $1 $2
echo @argv
}
"""
from _devbuild.gen import runtime_asdl
from _devbuild.gen.runtime_asdl import value_e, span_e
from core import util
from core.util import log
# Enums for the state machine
CH = runtime_asdl.char_kind_e
EMIT = runtime_asdl.emit_e
ST = runtime_asdl.state_e
DEFAULT_IFS = ' \t\n'
def _SpansToParts(s, spans):
"""Helper for SplitForWordEval."""
parts = []
start_index = 0
# If the last span was black, and we get a backslash, set join_next to merge
# two black spans.
join_next = False
last_span_was_black = False
for span_type, end_index in spans:
if span_type == span_e.Black:
if parts and join_next:
parts[-1] += s[start_index:end_index]
join_next = False
else:
parts.append(s[start_index:end_index])
last_span_was_black = True
elif span_type == span_e.Backslash:
if last_span_was_black:
join_next = True
last_span_was_black = False
else:
last_span_was_black = False
start_index = end_index
return parts
class SplitContext(object):
""" A polymorphic interface to field splitting.
It respects a STACK of IFS values, for example:
echo $x # uses default shell IFS
IFS=':' myfunc # new splitter
echo $x # uses default shell IFS again.
"""
def __init__(self, mem):
self.mem = mem
# Split into (ifs_whitespace, ifs_other)
self.splitters = {} # IFS value -> splitter instance
def _GetSplitter(self):
"""Based on the current stack frame, get the splitter."""
val = self.mem.GetVar('IFS')
if val.tag == value_e.Undef:
ifs = DEFAULT_IFS
elif val.tag == value_e.Str:
ifs = val.s
else:
# TODO: Raise proper error
raise AssertionError("IFS shouldn't be an array")
try:
sp = self.splitters[ifs]
except KeyError:
# Figure out what kind of splitter we should instantiate.
ifs_whitespace = ''
ifs_other = ''
for c in ifs:
if c in ' \t\n': # Happens to be the same as DEFAULT_IFS
ifs_whitespace += c
else:
ifs_other += c
sp = IfsSplitter(ifs_whitespace, ifs_other)
# NOTE: Technically, we could make the key more precise. IFS=$' \t' is
# the same as IFS=$'\t '. But most programs probably don't do that, and
# everything should work in any case.
self.splitters[ifs] = sp
return sp
def GetJoinChar(self):
"""
For decaying arrays by joining, eg. "$@" -> $@.
array
"""
# https://www.gnu.org/software/bash/manual/bashref.html#Special-Parameters
# http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_05_02
# "When the expansion occurs within a double-quoted string (see
# Double-Quotes), it shall expand to a single field with the value of
# each parameter separated by the first character of the IFS variable, or
# by a <space> if IFS is unset. If IFS is set to a null string, this is
# not equivalent to unsetting it; its first character does not exist, so
# the parameter values are concatenated."
val = self.mem.GetVar('IFS')
if val.tag == value_e.Undef:
return ''
elif val.tag == value_e.Str:
return val.s[0]
else:
# TODO: Raise proper error
raise AssertionError("IFS shouldn't be an array")
def Escape(self, s):
"""Escape IFS chars."""
sp = self._GetSplitter()
return sp.Escape(s)
def SplitForWordEval(self, s):
"""Split the string into slices, some of which are marked ignored.
IGNORED can be used for two reasons:
1. The slice is a delimiter.
2. The slice is a a backslash escape.
Example: If you have one\:two, then there are four slices. Only the
backslash one is ignored. In 'one:two', then you have three slices. The
colon is ignored.
Args:
allow_escape, whether \ can escape IFS characters and newlines.
Returns:
Array of (ignored Bool, start_index Int) tuples.
"""
sp = self._GetSplitter()
spans = sp.Split(s, True)
if 0:
for span in spans:
log('SPAN %s', span)
return _SpansToParts(s, spans)
def SplitForRead(self, line, allow_escape):
sp = self._GetSplitter()
return sp.Split(line, allow_escape)
class _BaseSplitter(object):
def __init__(self, escape_chars):
self.escape_chars = escape_chars + '\\' # Backslash is always escaped
def Escape(self, s):
# Note the characters here are DYNAMIC, unlike other usages of
# BackslashEscape().
return util.BackslashEscape(s, self.escape_chars)
# TODO: Used this when IFS='' or IFS isn't set? This is the fast path for Oil!
class NullSplitter(_BaseSplitter):
def __init__(self, ifs_whitespace):
_BaseSplitter.__init__(self, ifs_whitespace)
self.ifs_whitespace = ifs_whitespace
def Split(self, s, allow_escape):
raise NotImplementedError
# IFS splitting is complicated in general. We handle it with three concepts:
#
# - CH.* - Kinds of characters (edge labels)
# - ST.* - States (node labels)
# - EMIT.* Actions
#
# The Split() loop below classifies characters, follows state transitions, and
# emits spans. A span is a (ignored Bool, end_index Int) pair.
# As an example, consider this string:
# 'a _ b'
#
# The character classes are:
#
# a ' ' _ ' ' b
# Black DE_White DE_Gray DE_White Black
#
# The states are:
#
# a ' ' _ ' ' b
# Black DE_White1 DE_Gray DE_White2 Black
#
# DE_White2 is whitespace that follows a "gray" non-whitespace IFS character.
#
# The spans emitted are:
#
# (part 'a', ignored ' _ ', part 'b')
# SplitForRead() will check if the last two spans are a \ and \\n. Easy.
TRANSITIONS = {
# Whitespace should have been stripped
(ST.Start, CH.DE_White): (ST.Invalid, EMIT.Nothing), # ' '
(ST.Start, CH.DE_Gray): (ST.DE_Gray, EMIT.Empty), # '_'
(ST.Start, CH.Black): (ST.Black, EMIT.Nothing), # 'a'
(ST.Start, CH.Backslash): (ST.Backslash, EMIT.Nothing), # '\'
(ST.DE_White1, CH.DE_White): (ST.DE_White1, EMIT.Nothing), # ' '
(ST.DE_White1, CH.DE_Gray): (ST.DE_Gray, EMIT.Nothing), # ' _'
(ST.DE_White1, CH.Black): (ST.Black, EMIT.Delim), # ' a'
(ST.DE_White1, CH.Backslash): (ST.Backslash, EMIT.Delim), # ' \'
(ST.DE_Gray, CH.DE_White): (ST.DE_White2, EMIT.Nothing), # '_ '
(ST.DE_Gray, CH.DE_Gray): (ST.DE_Gray, EMIT.Empty), # '__'
(ST.DE_Gray, CH.Black): (ST.Black, EMIT.Delim), # '_a'
(ST.DE_Gray, CH.Backslash): (ST.Black, EMIT.Delim), # '_\'
(ST.DE_White2, CH.DE_White): (ST.DE_White2, EMIT.Nothing), # '_ '
(ST.DE_White2, CH.DE_Gray): (ST.DE_Gray, EMIT.Empty), # '_ _'
(ST.DE_White2, CH.Black): (ST.Black, EMIT.Delim), # '_ a'
(ST.DE_White2, CH.Backslash): (ST.Backslash, EMIT.Delim), # '_ \'
(ST.Black, CH.DE_White): (ST.DE_White1, EMIT.Part), # 'a '
(ST.Black, CH.DE_Gray): (ST.DE_Gray, EMIT.Part), # 'a_'
(ST.Black, CH.Black): (ST.Black, EMIT.Nothing), # 'aa'
(ST.Black, CH.Backslash): (ST.Backslash, EMIT.Part), # 'a\'
# Here we emit an ignored \ and the second character as well.
# We're emitting TWO spans here; we don't wait until the subsequent
# character. That is OK.
#
# Problem: if '\ ' is the last one, we don't want to emit a trailing span?
# In all other cases we do.
(ST.Backslash, CH.DE_White): (ST.Black, EMIT.Escape), # '\ '
(ST.Backslash, CH.DE_Gray): (ST.Black, EMIT.Escape), # '\_'
(ST.Backslash, CH.Black): (ST.Black, EMIT.Escape), # '\a'
# NOTE: second character is a backslash, but new state is ST.Black!
(ST.Backslash, CH.Backslash): (ST.Black, EMIT.Escape), # '\\'
}
LAST_SPAN_ACTION = {
ST.Black: EMIT.Part,
ST.Backslash: EMIT.Escape,
# Ignore trailing IFS whitespace too. This is necessary for the case:
# IFS=':' ; read x y z <<< 'a : b : c :'.
ST.DE_White1: EMIT.Nothing,
ST.DE_Gray: EMIT.Delim,
ST.DE_White2: EMIT.Delim,
}
class IfsSplitter(_BaseSplitter):
"""Split a string when IFS has non-whitespace characters."""
def __init__(self, ifs_whitespace, ifs_other):
_BaseSplitter.__init__(self, ifs_whitespace + ifs_other)
self.ifs_whitespace = ifs_whitespace
self.ifs_other = ifs_other
def Split(self, s, allow_escape):
"""
Args:
s: string to split
allow_escape: False for read -r, this means \ doesn't do anything.
Returns:
List of (runtime.span, end_index) pairs
TODO: This should be (frag, do_split) pairs, to avoid IFS='\'
double-escaping issue.
"""
ws_chars = self.ifs_whitespace
other_chars = self.ifs_other
n = len(s)
spans = [] # NOTE: in C, could reserve() this to len(s)
if n == 0:
return spans # empty
# Ad hoc rule from POSIX: ignore leading whitespace.
# "IFS white space shall be ignored at the beginning and end of the input"
# This can't really be handled by the state machine.
i = 0
while i < n and s[i] in self.ifs_whitespace:
i += 1
# Append an ignored span.
if i != 0:
spans.append((span_e.Delim, i))
# String is ONLY whitespace. We want to skip the last span after the
# while loop.
if i == n:
return spans
state = ST.Start
while i < n:
c = s[i]
if c in ws_chars:
ch = CH.DE_White
elif c in other_chars:
ch = CH.DE_Gray
elif allow_escape and c == '\\':
ch = CH.Backslash
else:
ch = CH.Black
new_state, action = TRANSITIONS[state, ch]
if new_state == ST.Invalid:
raise AssertionError(
'Invalid transition from %r with %r' % (state, ch))
if 0:
log('i %d c %r ch %s current: %s next: %s %s',
i, c, ch, state, new_state, action)
if action == EMIT.Part:
spans.append((span_e.Black, i))
elif action == EMIT.Delim:
spans.append((span_e.Delim, i)) # ignored delimiter
elif action == EMIT.Empty:
spans.append((span_e.Delim, i)) # ignored delimiter
spans.append((span_e.Black, i)) # EMPTY part that is NOT ignored
elif action == EMIT.Escape:
spans.append((span_e.Backslash, i)) # \
elif action == EMIT.Nothing:
pass
else:
raise AssertionError
state = new_state
i += 1
last_action = LAST_SPAN_ACTION[state]
#log('n %d state %s last_action %s', n, state, last_action)
if last_action == EMIT.Part:
spans.append((span_e.Black, n))
elif last_action == EMIT.Delim:
spans.append((span_e.Delim, n))
elif last_action == EMIT.Escape:
spans.append((span_e.Backslash, n))
elif last_action == EMIT.Nothing:
pass
else:
raise AssertionError
return spans
|
"""Generic BEL relation API methods."""
import re
import cgi
import requests
import xmltodict
from enum import Enum
from collections import namedtuple, defaultdict, Counter
from copy import deepcopy
from math import ceil
from typing import List, Optional, Dict, Set, NamedTuple, Any, Union, Tuple
from flask import request
from graphviz import Digraph
from ebel import Bel
from ebel.manager.orientdb.odb_structure import get_columns, get_node_view_labels
from ebel.web.api.ebel.v1 import _get_pagination, DataType, OrientDbSqlOperator
PathLengthDict = Dict[int, List[Dict[str, list]]]
PathLength = int
EdgeInfo = dict
NodeInfo = dict
Rid = str
RidList = List[str]
EdgePathsByLength = Dict[PathLength, RidList]
ErrorMessage = Dict[str, str]
class EnumExtension(Enum):
@classmethod
def has_value(cls, value):
return any([x.value==value for x in cls])
@classmethod
def has_name(cls, name):
return any([x.name==name for x in cls])
BELishEdge = namedtuple('Edge', ['name', 'direction', 'params_str'])
edge_colours = {
'increases': "limegreen",
'directly_increases': "springgreen4",
'decreases': "orangered",
'directly_decreases': "red3",
'rate_limiting_step_of': "lightslateblue",
'regulates': "lightblue3",
'causes_no_change': "yellow2",
'positive_correlation': "darkolivegreen4",
'negative_correlation': "coral3",
}
node_colours = {
'protein': "lightblue1",
'complex': "khaki1",
'component': "aquamarine",
'rna': "goldenrod1",
'gene': "lightslateblue",
'activity': "palegreen3",
'abundance': 'darksalmon',
'pathology': "palegreen",
'drug_db': "yellow1",
'biological_process': "snow"
}
class Column:
"""Column definition class."""
def __init__(self,
form_name: str,
column: str,
sql_operator: OrientDbSqlOperator = OrientDbSqlOperator.EQUALS,
data_type: DataType = DataType.STRING,
value: str = None,
show_in_results: bool = True,
switch_where_terms=False):
"""Init method for column.
Parameters
----------
form_name
column
sql_operator
data_type
"""
self.column = column
self.display_column = column + ".asString()" if "@" in column else column
self.sql_operator = sql_operator
self.form_name = form_name
self.data_type = data_type
self.value = value
self.show_in_results = show_in_results
self.switch_where_terms = switch_where_terms
def set_search_term(self, value: str):
"""Return value for a given search term."""
if value:
self.value = value.strip()
def get_sql(self):
"""Build the SQL query."""
column = f"{self.column}.asString()" if "@" in self.column else self.column
return f"{column} as {self.form_name}"
def __str__(self):
return f"<Column: form_name={self.form_name}; column={self.column}; sql_operator={self.sql_operator}; data_type={self.data_type}>"
bel_relation_default_columns: List[Column] = [
Column('subject_rid', 'out.@rid'),
Column('subject_node_class', 'out.@class'),
Column('subject_namespace', 'out.namespace'),
Column('subject_name', 'out.name', OrientDbSqlOperator.LIKE),
Column('subject_bel', 'out.bel', OrientDbSqlOperator.LIKE),
Column('subject_gene_symbol_involved_in', 'out.involved_genes', OrientDbSqlOperator.IN, DataType.LIST_STRING),
Column('subject_other_involved_in', 'out.involved_other', OrientDbSqlOperator.IN, DataType.LIST_STRING),
Column('relation_rid', '@rid'),
Column('relation', '@class'),
Column('evidence', 'evidence', OrientDbSqlOperator.LIKE),
Column('citation_full_journal_name', 'citation.full_journal_name', OrientDbSqlOperator.LIKE),
Column('citation_pub_date', 'citation.pub_date'),
Column('citation_pub_year', 'citation.pub_year'),
Column('citation_last_author', 'citation.last_author', OrientDbSqlOperator.LIKE),
Column('citation_type', 'citation.type'),
Column('author_in_author_list', 'citation.author_list', OrientDbSqlOperator.IN, DataType.LIST_STRING),
Column('title', 'citation.title', OrientDbSqlOperator.LIKE),
Column('doi', 'citation.doi'),
Column('object_rid', 'in.@rid'),
Column('object_node_class', 'in.@class'),
Column('object_namespace', 'in.namespace'),
Column('object_name', 'in.name', OrientDbSqlOperator.LIKE),
Column('object_bel', 'in.bel', OrientDbSqlOperator.LIKE),
Column('object_gene_symbol_involved_in', 'in.involved_genes', OrientDbSqlOperator.IN, DataType.LIST_STRING),
Column('object_other_involved_in', 'in.involved_other', OrientDbSqlOperator.IN, DataType.LIST_STRING),
]
Pagination = namedtuple('Pagination', ['page', 'page_size', 'skip'])
class Query:
"""Generic class for creating a SQL query."""
def __init__(self, odb_class: str, columns: List[Column]):
"""Init method for hte Query class."""
self.odb_class: str = odb_class
self.columns: List[Column] = columns
self.ebel = Bel()
self.where = self.get_where()
@staticmethod
def get_pagination() -> Pagination:
"""Separate results into pages of a specific length."""
page_size = request.args.get('page_size', '10')
page_size = int(page_size) if re.search(r"^\d+$", page_size) else 10
page_size = 10 if page_size >= 100 else page_size
page = request.args.get('page', '1')
page = int(page) if re.search(r"^\d+$", page) else 1
skip = (page - 1) * page_size
return Pagination(page=page, page_size=page_size, skip=skip)
def get_where(self):
"""Generic filter execution method."""
where = ''
wheres = []
for col in self.columns:
if col.value:
if col.column.endswith('@rid'):
if "," in col.value:
rids = [x.strip() for x in col.value.split(",") if re.search(r"^#\d+:\d+$", x.strip())]
rids_str = "[" + ','.join(rids) + "]"
wheres.append(f"{col.column} in {rids_str}")
else:
rid = col.value.strip()
if rid:
wheres.append(f"{col.column} = {rid}")
elif col.column != "@class":
if col.data_type in [DataType.STRING, DataType.LIST_STRING]:
value = f'"{col.value}"'
else:
value = col.value
if col.data_type in [DataType.LIST_STRING, DataType.LIST_NUMBER, DataType.LIST_INTEGER]:
wheres.append(f'{value} {col.sql_operator.value} {col.column}')
else:
if col.switch_where_terms:
wheres.append(f'{value} {col.sql_operator.value} {col.column}')
else:
wheres.append(f'{col.column} {col.sql_operator.value} {value}')
if wheres:
where = " WHERE " + ' AND '.join(wheres)
return where
@property
def sql(self):
"""Generic sql execution method."""
select = "SELECT "
select += ', '.join([f"{sw.display_column} as {sw.form_name}" for sw in self.columns if sw.show_in_results])
select += " FROM " + self.odb_class
sql = select + self.where
return sql
def get_number_of_results(self):
"""Count number of results."""
sql = "SELECT count(*) FROM " + self.odb_class + self.where
return self.ebel.query_get_dict(sql)[0]['count']
def get_result(self, pagination: Optional[Pagination] = None):
"""Return total number of results."""
if pagination:
p = pagination
else:
p = self.get_pagination()
if not (p.page and p.page_size):
return {'error': "Please add page and page_size to your method."}
number_of_results = self.get_number_of_results()
pages = ceil(number_of_results / p.page_size)
sql_paginated = self.sql + f" skip {p.skip} limit {p.page_size}"
# print(sql_paginated)
return {
'page': p.page,
'page_size': p.page_size,
'number_of_results': number_of_results,
'pages': pages,
'results': [x for x in self.ebel.query_get_dict(sql_paginated)]
}
def _get_where_by_how(column: str, value: str, how_to_search: str):
how_to_search = how_to_search if SearchTpye.has_value(how_to_search) else SearchTpye.EXACT.value
value_by_how = {
SearchTpye.EXACT.value: f" = '{value}'",
SearchTpye.CONTAINS.value: f" like '%{value}%'",
SearchTpye.CASE_SENSITIVE.value: f" like '{value}'",
SearchTpye.STARTS_WITH.value: f" like '{value}%'",
SearchTpye.ENDS_WITH.value: f" like '%{value}'",
SearchTpye.GREATER_THAN.value: f" > {value}",
SearchTpye.GREATER_OR_EQUALS_THAN.value: f" >= {value}",
SearchTpye.SMALLER_THAN.value: f" < {value}",
SearchTpye.SMALLER_OR_EQUALS_THAN.value: f" <= {value}",
}
return column + value_by_how[how_to_search]
def get_node_class_bel_name_ns():
default_args = ('bel', 'node_name', 'namespace', 'node_class', 'how_bel', 'how_name')
args = {x: '' for x in default_args}
filtered_request_args = {k: v for k, v in request.args.items() if k in default_args}
args.update(filtered_request_args)
namespaces = _get_node_namespace_list(**args)
node_classes = _get_node_class_list(**args)
suggested_node_names = _get_suggested_node_names(**args)
suggested_bels = _get_suggested_bels(**args)
return {
'namespaces': namespaces,
'node_classes': node_classes,
'suggested_node_names': suggested_node_names,
'suggested_bels': suggested_bels
}
def _get_suggested_bels(bel: str, node_name: str, node_class: str, namespace: str, how_name: str, how_bel: str):
node_class = node_class if node_class else 'bel'
sql = f"Select bel from {node_class}"
where = []
if bel:
where.append(_get_where_by_how(column='bel', value=bel, how_to_search=how_bel))
if namespace:
where.append(f"namespace = '{namespace}'")
if node_name:
where.append(_get_where_by_how('name', node_name, how_name))
if where:
sql += " where " + ' and '.join(where)
sql += " order by bel limit 30"
print(sql)
return [y for y in [x.oRecordData.get('bel') for x in Bel().execute(sql)] if y is not None]
def _get_suggested_node_names(bel: str, node_name: str, node_class: str, namespace: str, how_name: str, how_bel: str):
node_class = node_class if node_class else 'bel'
sql = f"Select name from {node_class} where "
where = []
where.append(_get_where_by_how(column='name', value=node_name, how_to_search=how_name))
if namespace:
where.append(f"namespace = '{namespace}'")
if bel:
where.append(_get_where_by_how('bel', bel, how_bel))
sql += ' and '.join(where) + " group by name order by name limit 30"
# print(sql)
return [x.oRecordData['name'] for x in Bel().execute(sql)]
def _get_node_namespace_list(bel: str, node_name: str, namespace: str, node_class: str, how_name: str, how_bel: str):
"""Get first names from BEL nodes (by namespace and node_class)"""
if not namespace:
node_class = node_class if node_class else 'bel'
sql = f"Select namespace from {node_class} where namespace is not null "
if node_name:
sql += " and " + _get_where_by_how(column='name',
value=node_name,
how_to_search=how_name)
if bel:
sql += " and " + _get_where_by_how(column='bel',
value=bel,
how_to_search=how_bel)
sql += " group by namespace order by namespace"
# print(sql)
return [x.oRecordData['namespace'] for x in Bel().execute(sql)]
else:
return [namespace]
def _get_node_class_list(bel: str, node_name: str, node_class: str, namespace: str, how_name: str, how_bel):
if not node_class:
sql = f"Select @class.asString() as node_class from bel"
where = []
if node_name or namespace or bel:
if node_name:
where.append(_get_where_by_how(column='name',
value=node_name,
how_to_search=how_name))
if namespace:
where.append(f"namespace = '{namespace}'")
if bel:
where.append(_get_where_by_how(column='bel',
value=bel,
how_to_search=how_bel))
sql += " where " + ' and '.join(where)
sql += " group by @class order by @class"
# print(sql)
return [x.oRecordData['node_class'] for x in Bel().execute(sql)]
else:
return [node_class]
def get_namespaces():
"""Get ordered list of namespaces"""
sql = "Select distinct(namespace) as namespace from bel where namespace is not null order by namespace"
print(sql)
return [x.oRecordData['namespace'] for x in Bel().execute(sql)]
def get_node_classes():
"""Get ordered list of node classes"""
sql = "Select distinct(@class) as node_class from bel order by node_class"
print(sql)
return [x.oRecordData['node_class'] for x in Bel().execute(sql)]
def get_bel_relations_by_pmid():
columns: List[Column] = [
Column('subject_rid', 'out.@rid'),
Column('subject_node_class', 'out.@class'),
Column('subject_namespace', 'out.namespace'),
Column('subject_name', 'out.name', OrientDbSqlOperator.LIKE),
Column('subject_bel', 'out.bel', OrientDbSqlOperator.LIKE),
Column('subject_gene_symbol_involved_in', 'out.involved_genes', OrientDbSqlOperator.IN, DataType.LIST_STRING),
Column('subject_other_involved_in', 'out.involved_other', OrientDbSqlOperator.IN, DataType.LIST_STRING),
Column('relation_rid', '@rid'),
Column('relation', '@class'),
Column('evidence', 'evidence', OrientDbSqlOperator.LIKE),
Column('object_rid', 'in.@rid'),
Column('object_node_class', 'in.@class'),
Column('object_namespace', 'in.namespace'),
Column('object_name', 'in.name', OrientDbSqlOperator.LIKE),
Column('object_bel', 'in.bel', OrientDbSqlOperator.LIKE),
Column('object_gene_symbol_involved_in', 'in.involved_genes', OrientDbSqlOperator.IN, DataType.LIST_STRING),
Column('object_other_involved_in', 'in.involved_other', OrientDbSqlOperator.IN, DataType.LIST_STRING),
]
column_pmid = Column('pmid', 'pmid', data_type=DataType.INTEGER, value=request.args.get('pmid'))
columns.append(column_pmid)
sql_builder = Query('bel_relation', columns)
return sql_builder.get_result(Pagination(1, 1000, 0))
def get_edge_by_annotation() -> list:
"""Return list of edges with a given annotation."""
columns = deepcopy(bel_relation_default_columns)
annotation_key = request.args.get('annotation_key')
annotation_term = request.args.get('annotation_term')
if annotation_key and annotation_term:
column = Column('annotation_key', f"annotation['{annotation_key}']",
sql_operator=OrientDbSqlOperator.IN, value=annotation_term, switch_where_terms=True)
columns.append(column)
sql_builder = Query('bel_relation', columns)
return sql_builder.get_result()
def get_edge_rids():
"""Get edge OrientDB rids."""
subject_rid = request.args.get('subject_rid')
relation_rid = request.args.get('relation_rid')
object_rid = request.args.get('object_rid')
document_rid = request.args.get('document_rid')
columns = [
Column(form_name='subject_rid', column='in.@rid', value=subject_rid),
Column(form_name='relation_rid', column='@rid', value=relation_rid),
Column(form_name='object_rid', column='out.@rid', value=object_rid),
Column(form_name='document_rid', column='document.@rid', value=document_rid),
]
sql_builder = Query('bel_relation', columns)
return sql_builder.get_result()
def get_annotation_keys():
sql = """Select value as annotation_key, count(*) as number_of_edges from
(Select expand(annotation.keys()) as mesh from bel_relation
where annotation.mesh is not null) group by value order by number_of_edges desc"""
return [x.oRecordData for x in Bel().execute(sql)]
def get_mesh_terms_statistics_by_node_rid():
rid = request.args.get('node_rid')
direction = request.args.get('direction')
limit = request.args.get('limit')
sql = f"Select list(annotation.mesh) as mesh_terms from (traverse {direction}E() FROM {rid} MAXDEPTH 1) where @rid!={rid} and annotation.mesh is not null"
res = Bel().query_get_dict(sql)
if 'mesh_terms' in res[0]:
res_dict = Counter(res[0]['mesh_terms'])
mesh_counter_list = [{'mesh_term': x[0], 'count':x[1]}
for x in sorted(res_dict.items(), key=lambda item: item[1], reverse=True)]
return mesh_counter_list[:int(limit)] if limit else mesh_counter_list
return []
def get_annotation_terms():
annotation_key = request.args.get('annotation_key')
if annotation_key:
sql = "Select value as annotation_term, count(*) as number_of_edges from " \
f"(Select expand(annotation['{annotation_key}']) as mesh from bel_relation " \
"where annotation.mesh is not null) group by value order by number_of_edges desc"
return [x.oRecordData for x in Bel().execute(sql)]
def get_edges():
"""Return data for a BEL relation edge."""
columns = deepcopy(bel_relation_default_columns)
for column in columns:
column.set_search_term(request.args.get(column.form_name))
relation = request.args.get('relation', 'bel_relation')
sql_builder = Query(relation, columns)
return sql_builder.get_result()
def get_nodes() -> dict:
"""Return list of nodes with a given namespace."""
b = Bel()
where_list: List[str] = []
params = {k: v for k, v in request.args.items() if k in ['namespace', 'name'] and v}
if request.args.get('pure') == 'true':
params.update(pure=True)
conn2bel_rel = request.args.get('connected_to_bel_relation')
if conn2bel_rel:
conn2bel_rel_dir = request.args.get('connected_to_bel_relation_direction', 'both')
where_list.append(f"{conn2bel_rel_dir}('{conn2bel_rel}').size()>0")
conn2ebel_rel = request.args.get('connected_to_ebel_relation')
if conn2ebel_rel:
conn2ebel_rel_dir = request.args.get('connected_to_ebel_relation_direction', 'both')
where_list.append(f"{conn2ebel_rel_dir}('{conn2ebel_rel}').size()>0")
node_class = request.args.get('node_class')
p = _get_pagination()
number_of_results = b.query_class(class_name=node_class,
columns=['count(*)'],
with_rid=False,
**params)[0]['count']
pages = ceil(number_of_results / p.page_size)
results = b.query_class(class_name=node_class,
columns=['namespace', 'name', 'bel', 'pure', 'involved_genes', 'involved_other'],
skip=p.skip,
limit=p.page_size,
where_list=tuple(where_list),
print_sql=True,
**params)
return {
'page': p.page,
'page_size': p.page_size,
'number_of_results': number_of_results,
'pages': pages,
'results': results
}
def _get_rid() -> Optional[str]:
"""Get rID."""
rid = request.args.get('rid')
if rid:
rid = rid.strip()
if re.search(r'#\d+:\d+', rid):
return rid
def get_edge_statistics_by_rid():
rid = request.args.get('rid')
direction = request.args.get('direction', 'both') # in, out or both
sql = "Select @class, count(*) from (traverse {dir}E() FROM {rid} MAXDEPTH 1) where @rid!={rid} group by @class"
res = Bel().query_get_dict(sql.format(dir=direction, rid=rid))
return res
def get_by_rid() -> Optional[str]:
"""Return BEL node by rid."""
result_dict = {}
rid = _get_rid()
if rid:
b = Bel()
for key, value in b.client.record_load(rid).oRecordData.items():
if isinstance(value, (str, int, float, dict)):
result_dict[key] = value
elif isinstance(value, list):
if all([isinstance(x, (str, int, float)) for x in value]):
result_dict[key] = value
return result_dict
def get_adjacent_nodes_by_rid() -> list:
"""Return neighboring nodes of given rID."""
# d := direction
# od := oposite direction
rid = _get_rid()
relation = request.args.get('relation', 'bel_relation')
sql_temp = "Select '{d}' as direction, @rid.asString() as edge_rid, @class.asString() " \
"as edge_class, {d}.<EMAIL>() as node_rid, {d}.<EMAIL>()as node_class , " \
"{d}.bel as bel, {d}.name as name, {d}.namespace as namespace, {d}.involved_genes as "\
f"involved_genes, {{d}}.involved_other as involved_other from {relation} " \
f"where {{od}}.@rid = {rid}"
direction = request.args.get('direction', 'both')
if rid:
sql_in = sql_temp.format(d='in', od='out')
sql_out = sql_temp.format(d='out', od='in')
if direction == 'in':
sql = sql_in
elif direction == 'out':
sql = sql_out
else:
sql = f"select expand($c) let $a = ({sql_in}), $b = ({sql_out}), $c = unionAll( $a, $b )"
return [x.oRecordData for x in Bel().execute(sql)]
def get_number_of_edges() -> int:
"""Return the number of edges."""
b = Bel()
relation = request.args.get('relation', 'E')
r = b.execute(f"Select count(*) as number_of_edges from {relation} limit 1")
return r[0].oRecordData['number_of_edges']
def get_citation_by_pmid() -> dict:
"""Return the number of edges."""
b = Bel()
pmid = request.args.get('pmid')
r = b.execute(f"Select citation from bel_relation where pmid = {pmid} limit 1")
return r[0].oRecordData['citation']
def get_abstract_by_pmid():
pmid = request.args.get('pmid')
url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id={pmid}&retmode=XML&rettype=abstract"
r = requests.get(url.format(pmid=pmid))
d = xmltodict.parse(r.text)
return d['PubmedArticleSet']['PubmedArticle']['MedlineCitation']['Article']['Abstract']['AbstractText']
def get_number_of_nodes() -> int:
"""Return the number of edges."""
b = Bel()
node_class = request.args.get('node_class', 'V')
pure = request.args.get('pure')
where_pure = "where pure = true" if pure else ''
sql = f"Select count(*) as number_of_nodes from {node_class} {where_pure} limit 1"
r = b.execute(sql)
return r[0].oRecordData['number_of_nodes']
def get_pure_rid() -> Optional[str]:
"""Return None or the rID from the node class."""
b = Bel()
node_class = request.args.get('node_class', 'protein')
namespace = request.args.get('namespace', 'HGNC')
name = request.args.get('name')
if name:
sql = f"Select @rid.asString() as rid from {node_class} " \
f"where name='{name}' and pure=true and namespace='{namespace}' limit 1"
return b.execute(sql)[0].oRecordData['rid']
class Position(EnumExtension):
"""Why is there a class for defining constants."""
FIRST = "first"
LAST = "last"
INSIDE = "inside"
class SearchTpye(EnumExtension):
EXACT = 'exact'
CONTAINS = 'contains'
CASE_SENSITIVE = 'case_insensitive'
STARTS_WITH = 'starts_with'
ENDS_WITH = 'ends_with'
GREATER_THAN = 'greater_than'
GREATER_OR_EQUALS_THAN = 'greater_or_equals_than'
SMALLER_THAN = 'smaller_than'
SMALLER_OR_EQUALS_THAN = 'smaller_or_equals_than'
class MatchEdge:
"""Class to construct the edge portions of a MATCH query."""
def __init__(self, edge_class: str, multiple_edge_classes: str, mesh_terms: List[str], pmids: List[int]):
"""Init method."""
self.position: Optional[Position] = None
self.node_class = 'bel'
self.edge_class = edge_class
self.pmids = pmids
self.mesh_terms = mesh_terms
self.multiple_edge_classes = multiple_edge_classes
def set_last(self, node_class: Optional[str]):
"""Set object to the LAST position."""
self.position = Position.LAST
if node_class:
self.node_class = node_class
def get_edge(self, alias_number: int) -> str:
"""Return edge based on alias number."""
mesh_or = ''
if self.mesh_terms:
mesh_or = " OR ".join(["'" + x.replace("'", '') + "' in annotation.mesh" for x in self.mesh_terms])
mesh_or = f"({mesh_or})"
pmids_in = ''
if self.pmids:
if len(self.pmids) == 1:
pmids_in = f"pmid = {self.pmids[0]}"
else:
pmids_in = "pmid in " + str(self.pmids)
pmids_in = f"{pmids_in}"
where = ' AND '.join([x for x in [mesh_or, pmids_in] if x])
where_str = f"where:({where}), " if any([mesh_or, pmids_in]) else ''
e_class_multi, e_class_single = self.get_edge_classes()
return f".outE({e_class_multi}){{{e_class_single}{where_str}as:e{alias_number}}}.inV()"
def get_edge_classes(self):
"""Return edge classes of match query."""
""""""
e_class_single = ''
e_class_multi = ''
if isinstance(self.multiple_edge_classes, str) and self.multiple_edge_classes.strip():
edge_classes = [x.strip() for x in self.multiple_edge_classes.split(',') if x.strip()]
if len(edge_classes) == 1:
e_class_single = f"class:{edge_classes[0]}, "
elif len(edge_classes) > 1:
edge_classes = ["'" + re.sub(r'\W+', "", x) + "'" for x in edge_classes]
e_class_multi = ",".join(edge_classes)
elif isinstance(self.edge_class, str) and self.edge_class.strip():
e_class_single = f"class:{self.edge_class}, "
return e_class_multi, e_class_single
class MatchNode:
"""Class to construct the node portions of a MATCH query."""
def __init__(self):
"""Init method."""
self.name: Optional[str] = None
self.position: Optional[Position] = None
self.node_class: Optional[str] = None
self.namespace: Optional[str] = None
self.bel: Optional[str] = None
self.gene_path: bool = False
self.how_name = None
self.how_bel = None
def set_outside(self,
position: Position,
name: Optional[str] = None,
node_class: Optional[str] = None,
namespace: Optional[str] = None,
bel: Optional[str] = None,
how_name: Optional[str] = SearchTpye.EXACT.value,
how_bel: Optional[str] = SearchTpye.EXACT.value):
"""Assign attributes to the "outside" position."""
self.position = position
if name:
self.name = name
if node_class:
self.node_class = node_class
if namespace:
self.namespace = namespace
if bel:
self.bel = bel
if how_name:
self.how_name = how_name if SearchTpye.has_value(how_name) else SearchTpye.EXACT.value
print("how_name:", how_name, self.how_name)
if how_bel:
self.how_bel = how_bel if SearchTpye.has_value(how_bel) else SearchTpye.EXACT.value
print("how_bel:", how_bel, self.how_bel)
def set_inside(self, gene_path: bool, node_class: None):
"""Assign attributes to the "inside" position."""
self.gene_path = gene_path
self.position = Position.INSIDE
self.node_class = node_class
def get_node(self, alias_number: int) -> str:
"""Return node based on alias number."""
print(f"hows: \n\tposition{self.position} \n\thow_name: {self.how_name}, \n\thow_bel: {self.how_bel}")
namespace = f"namespace='{self.namespace}'"
name = _get_where_by_how('name', self.name, self.how_name)
bel = _get_where_by_how('bel', self.bel, self.how_bel)
name_involved = f"('{self.name}' in involved_genes OR '{self.name}' in involved_other)"
involved_genes = "involved_genes.size()>0"
not_like_node = "$matched.n{}!=$currentMatch"
where_inside_list = []
where_str = ''
if self.position in (Position.FIRST, Position.LAST):
where_first_last = []
if self.bel:
where_first_last.append(bel)
if self.namespace and not self.name:
where_first_last.append(namespace)
elif self.name and not self.namespace:
if self.node_class in ['gene', 'rna', 'protein']:
where_first_last.append(name)
else:
where_first_last.append(name_involved)
elif self.name and self.namespace:
where_first_last.append(f'{name} AND {namespace}')
where_str = " AND ".join(where_first_last)
if self.position == Position.LAST:
where_inside_list.append(not_like_node.format(1))
if self.gene_path and not any([self.node_class, self.name, self.namespace]):
where_inside_list.append(involved_genes)
where_str = ' AND '.join([x for x in ([where_str] + where_inside_list) if x])
alias = f"as:n{alias_number}"
where = f"where:({where_str})" if where_str else ''
node_class = self.get_node_class()
node_query_str = ', '.join([x for x in [node_class, where, alias] if x])
return "{" + node_query_str + "}"
def get_node_class(self):
"""Return node class of match query."""
node_class = ''
if self.node_class:
node_class = f"class:{self.node_class}"
return node_class
class GraphType(EnumExtension):
"""Not sure why there is a class for defining constants."""
NODES = 'nodes'
EDGES = 'edges'
class PathsResult(NamedTuple):
"""Class to build results of path query."""
edge_paths_by_length: EdgePathsByLength
unique_edges: Dict[Rid, EdgeInfo]
unique_nodes: Dict[Rid, NodeInfo]
class PathQuery:
"""Class for constructing a path-based query."""
def __init__(self,
start_name: str,
end_name: str,
min_length: int,
max_length: int,
start_how_name: Optional[str] = None,
end_how_name: Optional[str] = None,
start_class: Optional[str] = None,
end_class: Optional[str] = None,
start_ns: Optional[str] = None,
end_ns: Optional[str] = None,
start_bel: Optional[str] = None,
start_how_bel: Optional[str] = None,
end_bel: Optional[str] = None,
end_how_bel: Optional[str] = None,
gene_path: bool = False,
edge_class: Optional[str] = None,
multiple_edge_classes: Optional[str] = None,
inside_node_class: Optional[str] = None,
mesh_term: Optional[str] = None,
pmids: str = '',
belish: Optional[str] = None,
limit: int = 0,
skip: int = 0):
"""Init method."""
self.multiple_edge_classes = multiple_edge_classes
self.limit = limit
if isinstance(self.limit, str) and self.limit.isnumeric():
self.limit = int(self.limit)
self.skip = skip
if isinstance(self.skip, str) and self.skip.isnumeric():
self.skip = int(self.skip)
self.pmids = [int(x.strip()) for x in pmids.split(',') if x.strip().isdigit()]
self.mesh_terms = [x.strip() for x in mesh_term.split(';') if x.strip()]
self.execute = Bel().execute
self.min_length = min_length
self.max_length = max_length
self.edge_class = edge_class
self.belish = belish
self.max_paths = 100000
self.max_unique_edges = 1000
self.nodes = [MatchNode() for _ in range(self.max_length + 1)]
self.nodes[0].set_outside(
position=Position.FIRST,
name=start_name,
node_class=start_class,
namespace=start_ns,
bel=start_bel,
how_name=start_how_name,
how_bel=start_how_bel)
self.nodes[-1].set_outside(
position=Position.LAST,
name=end_name,
node_class=end_class,
namespace=end_ns,
bel=end_bel,
how_name=end_how_name,
how_bel=end_how_bel)
self.edges = [MatchEdge(self.edge_class,
self.multiple_edge_classes,
self.mesh_terms,
self.pmids) for _ in range(self.max_length)]
self.edges[-1].set_last(end_class)
# TODO: why not using normal edge_class?
for node in self.nodes[1:-1]:
node.set_inside(gene_path, inside_node_class)
self.too_many_paths = "With the path length of {} we found already more than " \
f"{self.max_paths} pathways. Please specify you query (or set limit) and run again."
self.too_many_edges = f"We found too many unique edges ({{}}, max allowed={self.max_unique_edges} ) with an " \
f"allowed maximum of {self.max_paths} paths. Please specify you query and run again. " \
"Decrease max path length, use limit or state start- and end-node more precisely."
def get_query_str(self, number_of_edges):
"""Create query string by number of edges."""
query = "match " + self.nodes[0].get_node(1)
edges = self.edges[-1 * number_of_edges:]
nodes = self.nodes[-1 * (number_of_edges + 1):]
for i in range(1, number_of_edges + 1):
query += edges[i - 1].get_edge(alias_number=i) + nodes[i].get_node(alias_number=i + 1)
query += self.get_match_return(number_of_edges)
return query
@staticmethod
def _get_unique_rids(graph_type: GraphType, path_length_dict: PathLengthDict) -> Set[Rid]:
"""Get unique node or edge rid set."""
rids = {w for z in [[x for y in [en[graph_type.value] for en in v] for x in y] for _, v in
path_length_dict.items()] for w in z}
return rids
def get_unique_edge_list(self, path_length_dict: PathLengthDict) -> Dict[Rid, EdgeInfo]:
"""Get unique list of edges."""
edge_rids = self._get_unique_rids(GraphType.EDGES, path_length_dict)
return {rid: self.get_edge_info(rid) for rid in edge_rids}
def get_unique_node_list(self, path_length_dict: PathLengthDict) -> Dict[Rid, NodeInfo]:
"""Get unique list of nodes."""
node_rids = self._get_unique_rids(GraphType.NODES, path_length_dict)
return {rid: self.get_node_info(rid) for rid in node_rids}
def get_edge_info(self, rid: Rid):
"""Get edge metadata by given rID."""
sql = f"Select <EMAIL>() as subject_rid, in.<EMAIL>() as object_rid, " \
f"out.bel as subject_bel, in.bel as object_bel," \
f"@class.asString() as class, citation, evidence, pmid, annotation.mesh from {rid}"
return self.execute(sql)[0].oRecordData
def get_node_info(self, rid: Rid):
"""Get node metadata by given rID."""
sql = f"Select @class.asString() as class, * from {rid}"
data = self.execute(sql)[0].oRecordData
serializable_columns = get_columns(data['class'], exclude_non_serializable=True) + ['class']
serializable_data = {k: v for k, v in data.items() if k in serializable_columns}
return serializable_data
@property
def allowed_edges(self) -> List[str]:
"""Return the number of allowed edges, includes both eBEL and BEL edge types."""
bel_relations = list(get_bel_relation_types().keys())
ebel_relations = list(get_ebel_relation_types().keys())
return list(bel_relations + ebel_relations)
def get_match_return(self, number_of_edges):
"""Standard way to return node and edges."""
edges = [f"e{i}.@rid.asString()" for i in range(1, number_of_edges + 1)]
edges_join = ','.join(edges)
nodes = [f"n{i}.@rid.asString()" for i in range(1, number_of_edges + 2)]
nodes_join = ','.join(nodes)
if self.limit and self.limit <= (self.max_paths + 1):
limit = self.limit
else:
limit = self.max_paths + 1
skip = f" skip {self.skip} " if self.skip else ''
return f" return [{edges_join}] as edges, [{nodes_join}] as nodes {skip} limit {limit}"
def get_query_str_belish_num_edges(self):
"""Find the number of edges that match the BELish query string."""
node_strings, edges = [], []
if self.belish:
node_strings, edges = self.get_belish_nodes_edges()
if len(node_strings) == len(edges) + 1:
match_str = self.get_belish_match_str(edges, node_strings)
return match_str, len(edges)
def get_belish_match_str(self, edges, node_strings):
"""Build the MATCH string based on the BELish query."""
# ALERT: if a where in the following node multi class of edge are ignored
edge_direction = {
'->': {'one_class': ".outE(){{class:{},as:e{}{}}}.inV()", 'multi_class': ".outE({}){{as:e{}{}}}.inV()"},
'<-': {'one_class': ".inE(){{class:{},as:e{}{}}}.outV()", 'multi_class': ".inE({}){{as:e{}{}}}.outV()"},
}
re_node_in_box = re.compile(
r'^\[\s*(?P<class_name>\w+)(?P<params>(\s+\w+(\.\w+)?(!=|=|>|<|~|\*)(\d+|\d+\.\d+|[\w%]+|"[^"]+"))*)\s*\]$')
match_str = 'match '
for i in range(len(node_strings)):
found_node_in_box = re_node_in_box.search(node_strings[i])
if found_node_in_box:
node_where = ''
node_groups = found_node_in_box.groupdict()
if node_groups['params']:
node_where = self.get_where_list_by_params(node_groups['params'])
match_str += f"{{class:{node_groups['class_name']} {node_where}, as:n{i + 1}}}"
else:
match_str += f"{{class:bel, where:(bel like '{node_strings[i]}'), as:n{i + 1}}}"
if i <= len(edges) - 1:
edge_temp = edge_direction[edges[i].direction]
edge_class_names = [x.strip() for x in edges[i].name.split(',') if x.strip()]
edge_where = ''
if edges[i].params_str:
edge_where = self.get_where_list_by_params(edges[i].params_str)
if len(edge_class_names) == 1:
match_str += edge_temp['one_class'].format(edge_class_names[0], i + 1, edge_where)
else:
edge_class_names_joined = ','.join([f'"{x}"' for x in edge_class_names])
match_str += edge_temp['multi_class'].format(edge_class_names_joined, i + 1, edge_where)
match_str += self.get_match_return(len(edges))
return match_str
@staticmethod
def get_where_list_by_params(params_str):
"""Build WHERE section of query based on the passed parameters."""
where_list = []
re_params_in_box = re.compile(r'(\w+(\.\w+)?)(!=|=|>|<|~|\*)(\d+|\d+\.\d+|[\w%]+|"[^"]+")')
for param, sub_param, operator, value in re_params_in_box.findall(params_str):
operator = 'like' if operator == '~' else operator
operator = 'in' if operator == '*' else operator
equals_or_in_and_number = operator in ['=', 'in'] and re.search(r'^\d+(\.\d+)?$', value)
quotes_surrounded = re.search('^".*"$', value)
if not (operator in ['>', '<'] or equals_or_in_and_number or quotes_surrounded):
value = f'"{value}"'
if operator == 'in':
where_list.append(f"{value} {operator} {param}")
else:
where_list.append(f"{param} {operator} {value}")
where = ", where:(" + ' AND '.join(where_list) + ")"
return where
def get_belish_nodes_edges(self) -> Tuple[List[str], List[BELishEdge]]:
"""Return all BELish nodes and edges."""
r = re.split(r"\s+(-(([a-z_0-9,]+)(\s+.*?)?)(->)|(<-)(([a-z_0-9,]+)(\s+.*?)?)-|-(->)|(<-)-)\s+", self.belish)
nodes: List[str] = r[::12]
edge_zip = zip(r[8::12], r[3::12], r[1::12], r[5::12], r[6::12], r[10::12], r[11::12], r[4::12])
edges: List[BELishEdge] = [
BELishEdge(x[0] or x[1] or '', x[3] or x[4] or x[5] or x[6], x[7]) for x in edge_zip
]
return nodes, edges
def get_paths(self) -> Union[PathsResult, Dict]:
"""Get paths by query."""
self.max_paths = 100000
if self.edge_class and not (self.edge_class in self.allowed_edges or self.edge_class == 'E'):
return {'error': "Unknown relation type."}
path_length_dict: PathLengthDict = {}
edge_paths_by_length: EdgePathsByLength = {}
for number_of_edges in range(self.min_length, self.max_length + 1):
query_str = self.get_query_str(number_of_edges)
print(query_str)
paths: List[Dict[str, Any]] = [x.oRecordData for x in self.execute(query_str)]
if len(paths) > self.max_paths:
return {'error': self.too_many_paths.format(number_of_edges)}
path_length_dict[number_of_edges] = paths
edge_paths_by_length[number_of_edges] = [x['edges'] for x in paths]
unique_edges: Dict[Rid, EdgeInfo] = self.get_unique_edge_list(path_length_dict)
if len(unique_edges) > self.max_unique_edges:
return {'error': self.too_many_edges.format(len(unique_edges))}
unique_nodes: Dict[Rid, NodeInfo] = self.get_unique_node_list(path_length_dict)
paths_results = PathsResult(edge_paths_by_length=edge_paths_by_length,
unique_edges=unique_edges,
unique_nodes=unique_nodes)
return paths_results
def get_paths_by_belish(self):
"""Get paths by BELish query."""
self.max_paths = 100
path_length_dict: PathLengthDict = {}
edge_paths_by_length: EdgePathsByLength = {}
query_str_belish_num_edges = self.get_query_str_belish_num_edges()
print(query_str_belish_num_edges)
if query_str_belish_num_edges:
query_str, number_of_edges = query_str_belish_num_edges
paths: List[Dict[str, Any]] = [x.oRecordData for x in self.execute(query_str)]
if len(paths) > self.max_paths:
return {'error': self.too_many_paths.format(number_of_edges)}
path_length_dict[number_of_edges] = paths
edge_paths_by_length[number_of_edges] = [x['edges'] for x in paths]
unique_edges: Dict[Rid, EdgeInfo] = self.get_unique_edge_list(path_length_dict)
if len(unique_edges) > self.max_unique_edges:
return {'error': self.too_many_edges.format(len(unique_edges))}
unique_nodes: Dict[Rid, NodeInfo] = self.get_unique_node_list(path_length_dict)
paths_results = PathsResult(edge_paths_by_length=edge_paths_by_length,
unique_edges=unique_edges,
unique_nodes=unique_nodes)
return paths_results
def _get_number(number_string: str, default_value: int, min_value: int = None, max_value: int = None) -> int:
"""Parse the number string.
Check if the number_string is numeric and >= min_value or <= max_value, otherwise assign min_value
respectively max_value.
"""
return_value: Optional[int] = None
if number_string is not None and number_string.isnumeric():
number = int(number_string)
if min_value:
return_value = number if number >= min_value else min_value
elif max_value:
return_value = number if number <= max_value else max_value
else:
if min_value:
return_value = default_value
elif max_value:
return_value = default_value
return return_value
def _get_path_query() -> Union[PathQuery, ErrorMessage]:
"""Return paths found for query.
Raises
------
ErrorMessage
"""
start_name = request.args.get('start_node_name')
end_name = request.args.get('end_node_name')
start_how_name = request.args.get('start_how_node_name')
end_how_name = request.args.get('end_how_node_name')
start_class = request.args.get('start_node_class', 'bel')
end_class = request.args.get('end_node_class', 'bel')
start_ns = request.args.get('start_node_namespace')
end_ns = request.args.get('end_node_namespace')
start_bel = request.args.get('start_bel')
start_how_bel = request.args.get('start_how_bel')
end_bel = request.args.get('end_bel')
end_how_bel = request.args.get('end_how_bel')
edge_class = request.args.get('connecting_relation')
multiple_edge_classes = request.args.get('multiple_connecting_relations', '')
inside_node_class = request.args.get('connecting_node_class')
gene_path = request.args.get('only_gene_related_nodes_on_path')
pmid = request.args.get('pmid', '')
mesh_term = request.args.get('mesh_term', '')
gene_path = True if gene_path == 'true' else False
limit = request.args.get('limit', '')
limit = int(limit) if limit.isnumeric() else 0
belish = request.args.get('belish', '')
min_value = 1
max_value = 10
min_str = request.args.get('min_path_length')
min_length = _get_number(min_str, 1, min_value=min_value)
max_str = request.args.get('max_path_length')
max_length = _get_number(max_str, 3, max_value=max_value)
if pmid:
edge_class = 'bel_relation'
end_class = 'bel'
max_length = 1
path_query = PathQuery(
start_name=start_name,
end_name=end_name,
min_length=min_length,
max_length=max_length,
start_how_name=start_how_name,
end_how_name=end_how_name,
start_class=start_class,
end_class=end_class,
start_ns=start_ns,
end_ns=end_ns,
start_bel=start_bel,
start_how_bel=start_how_bel,
end_bel=end_bel,
end_how_bel=end_how_bel,
gene_path=gene_path,
edge_class=edge_class,
multiple_edge_classes=multiple_edge_classes,
inside_node_class=inside_node_class,
mesh_term=mesh_term,
pmids=pmid,
belish=belish,
limit=limit)
return path_query
def get_paths() -> Union[dict, PathQuery]:
"""Return paths found for query."""
print('get_paths:\n\n', request.args)
path_query = _get_path_query()
if isinstance(path_query, PathQuery):
paths = path_query.get_paths()
if isinstance(paths, dict):
return paths
else:
return paths._asdict()
else:
return path_query
def get_paths_by_belish() -> Union[dict, PathQuery]:
"""Find all paths from given BELish query and return results as dictionary."""
path_query = _get_path_query()
if isinstance(path_query, PathQuery):
paths_by_belish = path_query.get_paths_by_belish()
print('path_query:', type(paths_by_belish))
if isinstance(paths_by_belish, dict):
return paths_by_belish
else:
return paths_by_belish._asdict()
else:
return path_query
def get_paths_as_dot():
"""Execute protected method."""
path_query = _get_path_query()
return _get_paths_as_dot(path_query.get_paths())
def get_paths_by_belish_as_dot():
"""Find all paths from given BELish query and return results as DOT."""
path_query = _get_path_query()
return _get_paths_as_dot(path_query.get_paths_by_belish())
def _get_paths_as_dot(paths):
"""Find all paths between specified query and return results as DOT."""
if isinstance(paths, PathsResult):
edges = defaultdict(int)
d = Digraph()
d.attr('graph', fontname="helvetica")
d.attr('node', shape='note')
if len(paths.unique_nodes) == 0:
row_template = '<B><FONT POINT-SIZE="6">{}:</FONT></B>' \
'<FONT POINT-SIZE="6">{}</FONT>'
key_value_dict = {}
for k, v in request.args.items():
key_value_dict[k] = cgi.html.escape(v).encode('ascii', 'xmlcharrefreplace').decode("utf-8")
legend_rows = '<BR/>'.join([row_template.format(k, v) for k, v in key_value_dict.items()])
d.node('legend', f'<<FONT POINT-SIZE="16">NO PATHS FOUND!</FONT><BR/>{legend_rows}>')
d.attr('node', shape='box')
d.attr('node', style='filled')
for rid, v in paths.unique_nodes.items():
node_id = rid.replace(':', '.')
d.attr('node', fillcolor=node_colours.get(v['class'], 'grey'))
view_labels = get_node_view_labels(v['class'])
sub_label = view_labels['sub_label']
if 'involved_genes' in v or 'involved_other' in v:
involved = ','.join(
v['involved_genes'] + v['involved_other']
).replace('<', '<').replace('>', '>')
bel_str = v["bel"].replace('<', '<').replace('>', '>')
node_label = f'<<FONT POINT-SIZE="10">{v["class"]}</FONT><BR/><FONT POINT-SIZE="16">{involved}</FONT>'\
f'<BR/><FONT POINT-SIZE="6">{bel_str}</FONT>>'
else:
label_col_list = view_labels['label']
label_value = ''
if len(label_col_list) == 1:
label_value = v[label_col_list[0]]
elif len(label_col_list) >= 1:
label_value = '; '.join([str(v.get(x, '')) for x in label_col_list])
sub_label_value = ''
if sub_label and v.get(sub_label[0]):
sub_label_value = '</FONT><BR/><FONT POINT-SIZE="6">' + v[sub_label[0]]
node_label = f'<<FONT POINT-SIZE="10">{v["class"]}</FONT><BR/>' \
f'<FONT POINT-SIZE="16">{label_value}{sub_label_value}</FONT>>'
d.node(node_id, node_label)
for v in paths.unique_edges.values():
s_rid = v['subject_rid'].replace(':', '.')
o_rid = v['object_rid'].replace(':', '.')
edges[(s_rid, o_rid, v['class'])] += 1
for edge, number_of_edge in edges.items():
s_rid, o_rid, label = edge
d.attr('edge', color=edge_colours.get(label, 'grey'))
d.edge(s_rid, o_rid, f'<<FONT POINT-SIZE="8">{label} [{number_of_edge}]</FONT>>')
return d.source
else:
return paths
def get_publication_year_statistics():
"""Return publication counts by year derived from BEL edges."""
sql = "Select year, count(*) as number_of_edges from (Select citation.pub_date.left(4) as year " \
"from bel_relation) where year!='' group by year order by year desc"
return [x.oRecordData for x in Bel().execute(sql)]
def get_class_infos():
"""Return node or edge class metadata."""
b = Bel()
sql = "SELECT name, superClass, abstract, properties FROM (select expand(classes) " \
"FROM metadata:schema) WHERE NOT (name LIKE 'O%' OR name like '_%')"
class_name_dict = {}
parent_dict = defaultdict(list)
in_out_dict = {}
in_edge_class_dict = defaultdict(list)
out_edge_class_dict = defaultdict(list)
for row in b.execute(sql):
r = dict(row.oRecordData)
in_out = {p['name']: p['linkedClass'] for p in r['properties'] if
'linkedClass' in p and p['name'] in ['in', 'out']}
if in_out:
in_out_dict[r['name']] = in_out
in_edge_class_dict[in_out['in']].append(r['name'])
out_edge_class_dict[in_out['out']].append(r['name'])
r.pop('properties')
class_name_dict[r['name']] = r
if r.get('superClass'): # all except roots
parent_dict[r['superClass']].append({'name': r['name'], 'abstract': r['abstract']})
results = {}
for class_name in class_name_dict:
cnd = class_name_dict[class_name]
result = {'abstract': cnd['abstract'],
'parents_path': [class_name],
'children': parent_dict[class_name],
}
check4parent = True
while check4parent:
last_parent = result['parents_path'][-1]
if last_parent not in class_name_dict:
break
parent = class_name_dict[last_parent].get('superClass')
if parent:
result['parents_path'].append(parent)
else:
check4parent = False
results[class_name] = result
# get in_out from parents
for parent in result['parents_path']:
if parent in in_out_dict:
results[class_name]['in_out'] = in_out_dict[parent]
break
if class_name in in_edge_class_dict:
results[class_name]['in_relations'] = in_edge_class_dict[class_name]
if class_name in out_edge_class_dict:
results[class_name]['out_relations'] = out_edge_class_dict[class_name]
return results
def get_class_info_by_name():
"""Return node type by given name."""
results = get_class_infos()
return results.get(request.args.get('name'))
def get_class_infos_by_parent_name(childs_of) -> dict:
"""Get node or edge class information as DOT."""
results = get_class_infos()
return {k: v for k, v in results.items() if childs_of in v['parents_path']}
def _get_class_info_as_dot(get_class_method):
"""Get node or edge class information as DOT."""
classes = get_class_method()
graph = Digraph()
graph.graph_attr['rankdir'] = 'LR'
graph.node_attr['shape'] = 'plaintext'
for node_name, v in classes.items():
for child in [x['name'] for x in v['children']]:
graph.edge(node_name, child)
return graph.source
def get_bel_node_types():
"""Return BEL nodes and their metadata."""
return get_class_infos_by_parent_name('bel')
def get_all_node_types():
"""Return BEL nodes and their metadata."""
bel_node_types = get_class_infos_by_parent_name('bel')
bel_node_types.update(get_class_infos_by_parent_name('ebel'))
return bel_node_types
def get_bel_node_types_as_dot():
"""Return BEL nodes as DOT."""
return _get_class_info_as_dot(get_bel_node_types)
def get_ebel_node_types():
"""Return eBEL added nodes and their metadata."""
return get_class_infos_by_parent_name('ebel')
def get_ebel_node_types_as_dot():
"""Return eBEL added nodes as DOT."""
return _get_class_info_as_dot(get_ebel_node_types)
def get_bel_relation_types():
"""Return BEL edges and their metadata."""
return get_class_infos_by_parent_name('bel_relation')
def get_bel_relation_types_as_dot():
"""Return BEL edges as DOT."""
return _get_class_info_as_dot(get_bel_relation_types)
def get_ebel_relation_types():
"""Return eBEL added edges and their metadata."""
return get_class_infos_by_parent_name('ebel_relation')
def get_ebel_relation_types_as_dot():
"""Return eBEL added edges as DOT."""
return _get_class_info_as_dot(get_ebel_relation_types)
# ApiResult = namedtuple('ApiResult', ['results', 'number_of_results', 'page', 'pages','page_size'])
def get_documents():
"""Return a list of documents that were imported to compile BEL graph."""
sql = """Select
@rid.asString() as rid,
description,
contact_info,
version,
licence,
date.uploaded as uploaded,
copyright,
keywords.label as keywords,
file.last_modified as file_last_modified,
name as file_name,
git_info.origin_url as git_origin_url,
git_info.hexsha as git_hexsha,
git_info.repo_path as git_repo_path,
authors
from bel_document"""
results = [x.oRecordData for x in Bel().execute(sql)]
len_results = len(results)
return {'results': results, 'number_of_results': len_results, 'page': 1, 'pages': 1, 'page_size': len_results}
def get_pmids():
"""Return all PMIDs and their counts from BEL edges."""
sql = "Select pmid, count(*) as number_of_edges, citation" \
" from bel_relation " \
"where pmid!=0 group by pmid order by number_of_edges desc"
return [x.oRecordData for x in Bel().execute(sql)]
|
##### インポート #####
from discord.ext import commands
from discord.ext import tasks
from datetime import time
from collections import Counter
import discord
import random
import asyncio
import aiohttp
import json
import os
import subprocess
import sys
import datetime
import time
import ast
import re
import zlib
import io
import execjs
import requests
import xml.etree.ElementTree as ET
##### 設定 #####
no = '👎'
ok = '👍'
left = '⏪'
right = '⏩'
yl = "⬅"
yr = "➡"
counts = 0
col = random.randint(0, 0xFFFFFF)
role = discord.Role
dicenum = random.randint(0, 6)
token = "<PASSWORD>"
ver = "1.9β"
release = "0.1"
status = "Beta"
updateinfos = "・コマンド追加"
##### 最初の定義 #####
bot = commands.Bot(command_prefix="y>",activety=discord.Game(name="YuMe bot"))
##### 設定2 #####
bot.remove_command('help')
bot.load_extension("jishaku")
##### 最初の処理 #####
@bot.event
async def on_ready():
print("ログインに成功しました")
await bot.change_presence(activity = discord.Game(name="起動しています…|y>help|YuMe Project"),status =discord.Status.idle)
print(bot.user.name)
print(bot.user.id)
print("起動時の情報を送信しています… / Owner")
channel = bot.get_channel(675906231394762762)
e = discord.Embed(title="起動成功 - 詳細情報", description="起動処理が正常に終了しました。")
e.add_field(name="バージョン情報", value=f"Ver:{ver}\nRelease:{release}\nStatus:{status}")
e.add_field(name="更新情報", value=f"```\n{updateinfos}```")
e.add_field(name="導入サーバー数", value=len(bot.guilds), inline=False)
pingtime = bot.latency * 1000
e.add_field(name="応答速度", value=pingtime)
await channel.send(embed=e)
print("起動時の情報を送信しています… / User")
for ready_channel in bot.get_all_channels():
if ready_channel.name == "yui_ready":
e = discord.Embed(title="起動成功", description="起動処理が正常に終了しました。")
await ready_channel.send(embed=e)
elif ready_channel.name == "yui_advance_ready":
e = discord.Embed(title="起動成功 - 詳細情報", description="起動処理が正常に終了しました。")
e.add_field(name="バージョン情報", value=f"Ver:{ver}\nRelease:{release}\nStatus:{status}")
e.add_field(name="更新情報", value=f"```\n{updateinfos}```")
print("最終処理を実行しています…")
await bot.change_presence(activity = discord.Game(name=f"y>help|Ver:{ver}|Release:{release}|{len(bot.guilds)}Guilds & {len(bot.users)}Users|discord.py rewrite"),status =discord.Status.online)
print("Debug Console.")
for allguild in bot.guilds:
print(allguild)
print("正常に起動しました。")
##### グローバルチャット #####
@bot.event
async def on_message(message):
await bot.process_commands(message)
if message.author.bot:return
if message.channel.name == "yume-chat":
for ch in bot.get_all_channels():
if isinstance(ch,discord.TextChannel):
if ch.name == "yume-chat":
e = discord.Embed(description=message.content).set_footer(text=message.guild).set_author(name=message.author,icon_url=message.author.avatar_url)
await ch.send(embed=e)
await message.channel.send(embed=e)
##### デバッグ系コード #####
@bot.command(name="eval",description="Pythonのソースを評価するよ!\n一部の人だけ使用できるね!")
async def eval_(ctx, *, cmd):
if ctx.author.id in[584008752005513216,539787492711464960,631786733511376916,563172752555638794,561000119495819290]:
try:
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
env = {
"client": bot,
"discord": discord,
"commands": commands,
"ctx": ctx,
"__import__": __import__,
"bot": bot,
"_message": ctx.message,
"_guild": ctx.guild,
"_author": ctx.author,
"_channel": ctx.channel,
"_msg": ctx.message,
"_mes": ctx.message,
"tasks": tasks,
"re": re,
"os": os,
"subprocess": subprocess,
"asyncio": asyncio
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
await eval(f"{fn_name}()", env)
if ctx.message is not None:await ctx.message.add_reaction("🆗")
except Exception as e:
await ctx.send([e])
if ctx.message is not None:await ctx.message.add_reaction("🆖")
else:
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(description="JavaScriptのソースを評価するよ!")
async def evalnode(ctx, *, code):
cmd = code.strip("")
default = execjs.get()
try:
result = default.eval(cmd)
except Exception as er:
e = discord.Embed(title="Eval JavaScript Code",color=ctx.author.color)
e.add_field(name="入力",value=f'```js\n{str(cmd)}\n```',inline=False)
e.add_field(name="出力", value=f'```js\n{str(er).replace(token,"Token(Hide)")}\n```',inline=False)
await ctx.send(embed=e)
try:await ctx.message.add_reaction("🆖")
except:return
else:
e = discord.Embed(title="Eval JavaScript",color=ctx.author.color)
e.add_field(name="入力",value=f'```js\n{str(cmd)}\n```',inline=False)
e.add_field(name="出力", value=f'```js\n{str(result).replace(token,"Token(Hide)")}\n```',inline=False)
await ctx.send(embed=e)
try:await ctx.message.add_reaction("🆗")
except:return
@bot.command(description="コマンドプロンプトのコマンドを実行するよ!\n製作者しか使えないね!")
async def cmd(ctx, *, command):
try:
if ctx.author.id == 584008752005513216:
os.system(command)
e = discord.Embed(title="Command", description="操作は正常に終了しました。")
await ctx.send(embed=e)
else:
e = discord.Embed(title="Command", description="あなたはこのコマンドを実行する権限を持っていません。")
await ctx.send(embed=e)
except Exception as error:
e = discord.Embed(title="Command", description=f"Error\n```\n{error}\n```")
await ctx.send(embed=e)
@bot.command(aliases=["end","shutdown","close"],description="BOTをシャットダウンするよ!\n製作者しか使えないね!")
async def down(ctx):
if ctx.message.author.id == <PASSWORD>:
await ctx.send(embed=discord.Embed(title="シャットダウン", description="BOTをシャットダウンするよ~!", color=ctx.author.color))
await bot.close()
else:
await ctx.send(embed=discord.Embed(title="終了できないよ?", description="君霜月君なの~?", color=0xff0000))
@bot.command(aliases=["restart","run","reload"],description="BOTを再起動するよ!\n制作者しか使えないね!\n※何故か使えません。")
async def reboot(ctx):
if ctx.message.author.id == <PASSWORD>:
e = discord.Embed(title="再起動", description="BOTを再起動するよ~!", color=ctx.author.color)
await ctx.send(embed=e)
os.system("python YuMe.py")
else:
e = discord.Embed(title="実行エラー", description="あなたはこのコマンドを実行する権限を持っていません", color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(aliases=["changeact","cact"],description="BOTのアクティビティを変更するよ!\n制作者しか使えないね!")
async def changeactivity(ctx, status):
if ctx.message.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(name=f"{status}"),status=discord.Status.online)
e = discord.Embed(title="操作成功", description=f"アクティビティを変更したよ~\n現在のアクテビティ:{status}", color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー", description="あなたはこのコマンドを実行する権限を持っていません", color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(aliases=["resetact","ract"],description="アクティビティをリセットするよ!\n制作者しか使えないね!")
async def resetactivity(ctx):
if ctx.message.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(name=f"y>help|Ver:{ver}|Release:{release}|{len(bot.guilds)}Guilds & {len(bot.users)}Users|discord.py rewrite"),status=discord.Status.online)
e = discord.Embed(title="操作成功", description="アクティビティをデフォルトに戻したよ~", color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー", description="あなたはこのコマンドを実行する権限を持っていません", color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(aliases=["changesto","csto"],description="BOTのステータスをオンラインにするよ!\n制作者しか使えないね!")
async def chengestatusonline(ctx):
if ctx.message.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(name=f"y>help|Ver:{ver}|Release:{release}|{len(bot.guilds)}Guilds & {len(bot.users)}Users|discord.py rewrite"),status=discord.Status.online)
e = discord.Embed(title="操作成功", description="ステータスをオンラインにしたよ~", color=0x5eff00)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー", description="あなたはこのコマンドを実行する権限を持っていません", color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(aliases=["changesti","csti"],description="BOTのステータスを退席中にするよ!\n制作者しか使えないね!")
async def changestatusidle(ctx):
if ctx.message.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(name=f"y>help|Ver:{ver}|Release:{release}|{len(bot.guilds)}Guilds & {len(bot.users)}Users|discord.py rewrite"),status=discord.Status.idle)
e = discord.Embed(title="操作成功", description="ステータスを退席中にしたよ~", color=0xff9500)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー", description="あなたはこのコマンドを実行する権限を持っていません", color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(aliases=["changestd","cstd"],description="BOTのステータスを取り込み中にするよ!\n制作者しか使えないね!")
async def changestatusdnd(ctx):
if ctx.message.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(name=f"y>help|Ver:{ver}|Release:{release}|{len(bot.guilds)}Guilds & {len(bot.users)}Users|discord.py rewrite"),status=discord.Status.dnd)
e = discord.Embed(title="操作成功", description="ステータスを取り込み中にしたよ~", color=0xff0000)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー", description="あなたはこのコマンドを実行する権限を持っていません", color=ctx.author.color)
await ctx.send(embed=e)
##### BAN&KICK #####
@bot.command(description="指定したユーザーをBANするよ!\nユーザーをKICK出来る人のみ!")
async def kick(self, user: discord.User=None):
no = '👎'
ok = '👍'
if self.guild.get_member(user.id).top_role < self.author.top_role and self.author.guild_permissions.kick_members:
if user is None:
e = discord.Embed(title="実行エラー",description="名前を指定してね~",color=0xff0000)
await self.send(embed=e)
else:
embeds = discord.Embed(
title=f"**「@{user.name}」KICKしちゃう?**",color=0xC41415)
msg = await self.send(embed=embeds)
await msg.add_reaction(no)
await msg.add_reaction(ok)
try:
def predicate1(message,author):
def check(reaction,users):
if reaction.message.id != message.id or users == self.bot.user or author != users:
return False
if reaction.emoji == ok or reaction.emoji == no:
return True
return False
return check
react = await self.bot.wait_for('reaction_add',timeout=20,check=predicate1(msg,self.message.author))
if react[0].emoji == ok:
await self.guild.kick(user)
print(f"{user.name}が{self.message.author.name}によってKICKされたよ~。")
embed = discord.Embed(title=f"{user.name}はKICKされたよ~。",color=0xC41415)
embed.add_field(name="-------------------------", value=f"名前: **{user.name}**\nID: **{user.id}**", inline=False)
return await self.send(embed=embed)
elif react[0].emoji == no:
embeds = discord.Embed(
title=f"{user.name}はKICKされなかったよ~。",color=0x10cfee)
return await self.send(embed=embeds)
except asyncio.TimeoutError:
embeds = discord.Embed(
title=f"{user.name}はKICKされなかったよ~。",color=0x10cfee)
return await self.send(embed=embeds)
else:
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await self.send(embed=e)
@bot.command(description="指定したユーザーをBANするよ!\nユーザーをBAN出来る人のみ!")
async def ban(self, user: discord.User=None):
no = '👎'
ok = '👍'
if self.guild.get_member(user.id).top_role < self.author.top_role and self.author.guild_permissions.ban_members:
if user is None:
e = discord.Embed(title="実行エラー",description="名前を指定してね~",color=0xff0000)
await self.send(embed=e)
else:
embeds = discord.Embed(
title=f"**「@{user.name}」BANしちゃう?**",color=0xC41415)
msg = await self.send(embed=embeds)
await msg.add_reaction(no)
await msg.add_reaction(ok)
try:
def predicate1(message,author):
def check(reaction,users):
if reaction.message.id != message.id or users == self.bot.user or author != users:
return False
if reaction.emoji == ok or reaction.emoji == no:
return True
return False
return check
react = await self.bot.wait_for('reaction_add',timeout=20,check=predicate1(msg,self.message.author))
if react[0].emoji == ok:
await self.guild.ban(user)
print(f"{user.name}が{self.message.author.name}によってBANされたよ~。")
embed = discord.Embed(title=f"{user.name}はBANされたよ~。",color=0xC41415)
embed.add_field(name="-------------------------", value=f"名前: **{user.name}**\nID: **{user.id}**", inline=False)
return await self.send(embed=embed)
elif react[0].emoji == no:
embeds = discord.Embed(
title=f"{user.name}はBANされなかったよ~。",color=0x10cfee)
return await self.send(embed=embeds)
except asyncio.TimeoutError:
embeds = discord.Embed(
title=f"{user.name}はBANされなかったよ~。",color=0x10cfee)
return await self.send(embed=embeds)
else:
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await self.send(embed=e)
##### 役職系コード #####
@bot.command(aliases=["radd"],description="指定したユーザーに役職を付与するよ!\n役職を管理できる人のみ!")
async def roleadd(ctx, member: discord.Member, role: discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
await member.add_roles(role)
e = discord.Embed(title="操作成功", description=f'{member.mention}さんに{role.mention}を付与したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["rre"],description="指定したユーザーから役職を削除するよ!\n役職を管理できる人のみ!")
async def roleremove(ctx, member: discord.Member, role: discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
await member.remove_roles(role)
e = discord.Embed(title="操作成功", description=f'{member.mention}さんから{role.mention}を剥奪したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["rdel"],description="役職を削除するよ!\n役職を管理できる人のみ!")
async def roledelete(ctx, role: discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
await role.delete()
e = discord.Embed(title="操作成功", description=f'{role.name}を削除したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["rcr"],description="役職を作成するよ!\n役職を管理できる人のみ!")
async def rolecreate(ctx, rolename):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
role = await ctx.guild.create_role(name=rolename)
e = discord.Embed(title="操作成功", description=f'{role.mention}を作成したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私は役職を作成する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["rusers","ru"],description="役職を持つメンバー一覧を表示するよ!")
async def roleusers(ctx,role:discord.Role):
e = discord.Embed(title=f"{role}を持つメンバー一覧",description=f"{role.members}",color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(aliases=["rcol"],description="役職の色を変更するよ!\n役職を管理できる人のみ!\n※未実装")
async def rolecolor(ctx,role:discord.Role,color):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
await role.edit(color)
e = discord.Embed(title="操作成功", description=f'{role.mention}の色を変更したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私は役職の色を変更する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["roleallmemadd","rama"],description="指定した役職を全メンバーに付与するよ!\n役職を管理できる人のみ!\n※BOT含む")
async def roleallmembersadd(ctx, role:discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
embed = discord.Embed(title="操作開始", description=f"全員に{role}を付与するよ~", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
embed.set_footer(icon_url=ctx.author.avatar_url,text=ctx.author.name)
await ctx.send(embed=embed)
[await member.add_roles(role) for member in ctx.guild.members]
embed = discord.Embed(title="操作成功", description=f"{role}を全員に付与したよ~", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
embed.set_footer(icon_url=ctx.author.avatar_url,text=ctx.author.name)
await ctx.send(embed=embed)
else:
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["roleallmemremove","roleallmemr","ramr"],description="指定した役職を全メンバーから削除するよ!\n役職を管理できる人のみ!\n※BOT含む")
async def roleallmembersremove(ctx, role:discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
embed = discord.Embed(title="操作開始", description=f"全員から{role}を剥奪するよ~", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
embed.set_footer(icon_url=ctx.author.avatar_url,text=ctx.author.name)
await ctx.send(embed=embed)
[await member.remove_roles(role) for member in ctx.guild.members]
embed = discord.Embed(title="操作成功", description=f"{role}を全員から剥奪したよ~", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
embed.set_footer(icon_url=ctx.author.avatar_url,text=ctx.author.name)
await ctx.send(embed=embed)
else:
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
##### チャンネル&カテゴリー系コード #####
@bot.command(aliases=["textchannelcr","textchcr","tchc"],description="指定した名前のテキストチャンネルを作成するよ!\nチャンネルを管理できる人のみ!")
async def textchannelcreate(ctx,channel):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
channel = await ctx.channel.category.create_text_channel(name=channel)
e = discord.Embed(title="操作成功", description=f'テキストチャンネル:{channel.mention}を作成したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私はチャンネルを作成する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["textchanneldel","textchdel","tchd"],description="指定した名前のチャンネルを削除するよ!\nチャンネルを管理できる人のみ!")
async def textchanneldelete(ctx,channel:discord.TextChannel):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
await channel.delete()
e = discord.Embed(title="操作成功", description=f'テキストチャンネル:{channel.name}を削除したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私はチャンネルを削除する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["voicechannelcr","voicechcr","vchc"],description="指定した名前のボイスチャンネルを作成するよ!\nチャンネルを管理できる人のみ!")
async def voicechannelcreate(ctx,channel):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
channel = await ctx.channel.category.create_voice_channel(name=channel)
e = discord.Embed(title="操作成功", description=f'ボイスチャンネル:{channel.name}を作成したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私はチャンネルを作成する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["voicechanneldel","voicechdel","vchd"],description="指定した名前のボイスチャンネルを作成するよ!\nチャンネルを管理できる人のみ!")
async def voicechanneldelete(ctx,channel:discord.VoiceChannel):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
await channel.delete()
e = discord.Embed(title="操作成功", description=f'ボイスチャンネル:{channel.name}を削除したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私はチャンネルを削除する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["categorycr","ctc"],description="指定した名前のカテゴリーを作成するよ!\nチャンネルを管理できる人のみ!")
async def categorycreate(ctx,category):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
category = await ctx.guild.create_category(name=category)
e = discord.Embed(title="操作成功", description=f'カテゴリー:{category}を作成したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私はカテゴリーを作成する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["categorydel","ctd"],description="指定した名前のカテゴリーを削除するよ!\nチャンネルを管理できる人のみ!")
async def categorydelete(ctx,category:discord.CategoryChannel):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
await category.delete()
e = discord.Embed(title="操作成功", description=f'カテゴリー:{category}を削除したよ~',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私はカテゴリーを削除する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["chedit","che"],description="コマンドを実行したチャンネル名を変更するよ!\nチャンネルを管理できる人のみ!")
async def channeledit(ctx,channelname):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channelss) or ctx.guild.owner == ctx.author:
await ctx.channel.edit(name=f"{channelname}")
e = discord.Embed(title="操作成功", description=f'チャンネル名を変更したよ~\n現在のチャンネル名:{channelname}',color=ctx.author.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title="実行エラー",description="私はチャンネル名を変更する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
##### メッセージ系コード #####
@bot.command(aliases=["cl","clean","purge"],description="指定した件数のメッセージを削除するよ!\nメッセージを管理できる人のみ!")
async def clear(ctx, num:int):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_messages) or ctx.guild.owner == ctx.author:
try:
await ctx.channel.purge(limit=num)
e = discord.Embed(title="メッセージ削除", description=f"{num}件のメッセージを削除したよ~",color=ctx.author.color)
l = await ctx.send(embed=e)
await asyncio.sleep(3)
await l.delete()
except IndexError:
e = discord.Embed(title="メッセージ削除", description="引数が不正です。",color=0xff0000)
await ctx.send(embed=e)
else:
e= discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["acl","allclean","allpurge","apu"],description="チャンネル内のメッセージを全て削除するよ!\nメッセージを管理できる人のみ!\n※誤爆注意")
async def allclear(ctx):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_messages) or ctx.guild.owner == ctx.author:
await ctx.channel.purge(limit=999999999999999999999999999999999)
e = discord.Embed(title="全メッセージ削除", description=f"チャンネルのメッセージを全て削除したよ~",color=ctx.author.color)
l = await ctx.send(embed=e)
await asyncio.sleep(3)
await l.delete()
else:
e= discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await ctx.send(embed=e)
@bot.command(aliases=["messagehis","mhis"],description="指定した数のメッセージの履歴を表示するよ!")
async def messagehistory(ctx, num:int):
async for i in ctx.channel.history(limit=num):
await ctx.send(f"{i.author.name}#{i.author.discriminator}: {i.content}")
##### 情報系コード #####
@bot.command(description="BOTの情報を表示するよ!")
async def info(ctx):
supporters = [345342072045174795,586157827400400907,631786733511376916,561000119495819290]
embed = discord.Embed(title=f"{bot.user.name}の情報", description="", color=ctx.author.color)
embed.set_thumbnail(url=ctx.bot.user.avatar_url)
embed.add_field(name="作成者", value=f"{bot.get_user(584008752005513216).name}", inline=False)
embed.add_field(name="サポーター", value="\n".join(bot.get_user(s).name for s in supporters), inline=False)
embed.add_field(name="導入サーバー数", value=f"{len(bot.guilds)}", inline=False)
embed.add_field(name="利用ユーザー数", value=f"{len(bot.users)}", inline=False)
embed.add_field(name="言語", value="Python", inline=False)
embed.add_field(name="バージョン情報", value=f"Ver:{ver}\nRelese:{release}\nStatus:{status}", inline=False)
embed.add_field(name="現在合計コマンド数", value=f"{len(bot.commands)}", inline=False)
embed.add_field(name="BOTを導入する", value=f"[結芽を導入](https://discordapp.com/api/oauth2/authorize?client_id=657936162966601740&permissions=8&scope=bot)|[参照BOT「{bot.get_user(641121614129266729).name}」](https://discordapp.com/oauth2/authorize?client_id=641121614129266729&permissions=2146958847&scope=bot)|[{bot.get_user(553841194699063319).name}](https://discordapp.com/oauth2/authorize?client_id=553841194699063319&scope=bot&permissions=775286087)", inline=False)
embed.add_field(name="参考サイト", value="[APIリファレンス](https://discordpy.readthedocs.io/en/latest/api.html)|[Python-izm 基礎編](https://www.python-izm.com/basic/)", inline=False)
await ctx.send(embed=embed)
@bot.command(aliases=["rolei","ri"],description="指定した役職の情報を表示するよ!\n※役職IDでやるのがいいよ!")
async def roleinfo(ctx, role:discord.Role):
e = discord.Embed(title="役職情報", description="",color=ctx.author.color)
e.add_field(name="名前", value=role.name)
e.add_field(name="ID", value=role.id)
e.add_field(name="所属サーバー", value=role.guild.name+f"({role.guild.id})")
e.add_field(name="他のメンバーと別に表示するか?", value=role.hoist)
e.add_field(name="その他サービスによって管理されているか?", value=role.managed)
e.add_field(name="メンション可能か?", value=role.mentionable)
e.add_field(name="役職順位(一番下を0としたとき)", value=role.position)
e.add_field(name="役職の色", value=role.color)
e.add_field(name="役職作成日(UTC)", value=role.created_at)
await ctx.send(embed=e)
@bot.command(aliases=["chinfo","chi","ci"],description="指定したチャンネルの情報を表示するよ!")
async def channelinfo(ctx, channelid=None):
if channelid == None:
e = discord.Embed(title="チャンネル情報", description="")
e.add_field(name="チャンネル名", value=ctx.channel.name)
e.add_field(name="チャンネルID", value=ctx.channel.id)
e.add_field(name="所属サーバー", value=ctx.channel.guild.name+f"({ctx.channel.guild.id})")
e.add_field(name="トピック", value=ctx.channel.topic)
await ctx.send(embed=e)
else:
try:
await bot.wait_until_ready()
channel = bot.get_channel(channelid)
e = discord.Embed(title="チャンネル情報", description="")
e.add_field(name="チャンネル名", value=channel.name)
e.add_field(name="チャンネルID", value=channel.id)
e.add_field(name="所属サーバー", value=channel.guild.name+f"({channel.guild.id})")
e.add_field(name="トピック", value=channel.topic)
await ctx.send(embed=e)
except Exception:
try:
await bot.wait_until_ready()
channel = await bot.fetch_channel(channelid)
e = discord.Embed(title="チャンネル情報", description="")
e.add_field(name="チャンネル名", value=channel.name)
e.add_field(name="チャンネルID", value=channel.id)
e.add_field(name="所属サーバー", value=channel.guild.name+f"({channel.guild.id})")
e.add_field(name="トピック", value=channel.topic)
await ctx.send(embed=e)
except discord.NotFound:
e = discord.Embed(title="チャンネル情報", description="指定されたチャンネルは存在しません。")
await ctx.send(embed=e)
except discord.Forbidden:
e = discord.Embed(title="チャンネル情報", description="指定されたチャンネルへアクセスできませんでした。")
await ctx.send(embed=e)
@bot.command(aliases=["userse","use"],description="指定したユーザーの情報を表示するよ!\nサーバーに居ない人の情報も検索できるね!\nでもID限定、表示できる情報がuserinfoより少ないよ")
async def userserch(ctx, user_id=""):
try:user = await bot.fetch_user(int(user_id))
except:await ctx.send(embed=discord.Embed(description="ユーザーが見つかりませんでした…。",color=ctx.author.color))
else:
member = discord.utils.get(bot.get_all_members(),id=int(user_id))
g_m = discord.utils.get(ctx.guild.members, id=int(user_id))
embed = discord.Embed(title=f"{user.name}さんの情報",color=col)
embed.set_thumbnail(url=f'{user.avatar_url_as(static_format="png")}')
embed.add_field(name="名前#タグ",value=f"{user}", inline=False)
embed.add_field(name="ID",value=f"{user.id}", inline=False)
embed.add_field(name="BOT?",value=f"{user.bot}", inline=False)
if g_m is not None:embed.add_field(name="サーバー上の名前",value=f"{member.nick}", inline=False)
if member is not None:
embed.add_field(name="アクティビティ",value=f"{member.activity}", inline=False)
embed.add_field(name="ステータス",value=f"{member.status}", inline=False)
embed.add_field(name="アカウント作成日",value=f"{user.created_at}", inline=False)
await ctx.send(embed=embed)
@bot.command(aliases=["useri","ui"],description="指定したユーザーの情報を表示するよ!")
async def userinfo(ctx, user: discord.Member):
embed = discord.Embed(title=f"{user.name}さんの情報",color=ctx.author.color)
embed.set_thumbnail(url=f'{user.avatar_url_as(static_format="png")}')
embed.add_field(name="名前#タグ",value=f"{user}")
embed.add_field(name="ID",value=f"{user.id}")
embed.add_field(name="ステータス",value=f"{user.status}")
embed.add_field(name="BOT?",value=f"{user.bot}")
if user.activity != None:embed.add_field(name="アクティビティ", value=user.activity.name)
embed.add_field(name="サーバー上の名前",value=f"{user.nick}")
embed.add_field(name="サーバー参加時間",value=f"{user.joined_at}")
embed.add_field(name="アカウント作成日",value=f"{user.created_at}")
embed.add_field(name="権限",value=f'`{",".join([row[0] for row in list(user.guild_permissions) if row[1]])}`', inline=False)
await ctx.send(embed=embed)
@bot.command(aliases=["serveri","si"],description="指定したサーバーの情報を表示するよ!\n※サーバーIDでやってね!")
async def serverinfo(ctx,guild_id=None):
if guild_id == None:
guild = ctx.guild
else:
guild = bot.get_guild(int(guild_id))
ch_tcount =len(guild.text_channels)
ch_vcount =len(guild.voice_channels)
ch_count =len(guild.channels)
kt_count =len(guild.categories)
guild = discord.utils.get(bot.guilds,id=int(guild_id))
embed = discord.Embed(title=f"{guild.name}の情報",color=ctx.author.color)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.add_field(name="名前",value=f"{guild.name}",inline=False)
embed.add_field(name="ID",value=f"{guild.id}",inline=False)
embed.add_field(name="サーバー地域",value=f"{guild.region}",inline=False)
embed.add_field(name="作成日",value=f"{guild.created_at}",inline=False)
embed.add_field(name="オーナー",value=f"{guild.owner.name}",inline=False)
embed.add_field(name="テキストチャンネル数",value=f"{ch_tcount}")
embed.add_field(name="ボイスチャンネル数",value=f"{ch_vcount}")
embed.add_field(name="カテゴリー数",value=f"{kt_count}")
embed.add_field(name="合計チャンネル数(カテゴリー含む)",value=f"{ch_count}")
embed.add_field(name="サーバー承認レベル",value=f"{guild.mfa_level}")
embed.add_field(name="サーバー検証レベル",value=f"{guild.verification_level}")
embed.add_field(name="サーバーブーストレベル",value=f"{guild.premium_tier}")
embed.add_field(name="サーバーをブーストしたユーザー数",value=f"{guild.premium_subscription_count}")
embed.add_field(name="サーバーは大きい?",value=f"{guild.large}")
embed.set_footer(text="サーバー大きさ基準:250人以上")
await ctx.send(embed=embed)
@bot.command(aliases=["joinserverl","joins"],description="Botが導入されているサーバーを表示するよ!")
async def joinserverlist(ctx):
await ctx.send(embed=discord.Embed(description=",".join([guild.name for guild in bot.guilds])))
##### 一般ユーザー系コマンド #####
@bot.command(description="BOTの反応速度を測定するよ!")
async def ping(ctx):
before = time.monotonic()
msg = await ctx.send(
embed=discord.Embed(
title="結芽BOTの反応速度", description="計測中・・・", color=0x0080FF
)
)
return await msg.edit(
embed=discord.Embed(
title="結芽BOTの反応速度", description=f"Pingを取得したよ~\nPong!`{int((time.monotonic() - before) * 1000)}ms`", color=ctx.author.color
)
)
##### メッセージ #####
@bot.command(description="指定したユーザーに結芽からDMを送信するよ!")
async def senddm(ctx, userid, title, desc):
try:
user = await bot.fetch_user(userid)
e = discord.Embed(title=title, description=desc)
e.set_author(name=ctx.author.name)
await user.send(embed=e)
c = discord.Embed(title="Senddm", description=f"{user.mention}にDMを送信しました。")
await ctx.send(embed=c)
except discord.NotFound:
e = discord.Embed(title="Senddm", description="指定されたユーザーは存在しません")
except discord.Forbidden:
e = discord.Embed(title="Senddm", description="指定されたユーザーにDMを送信できませんでした。")
await ctx.send(embed=e)
@bot.command(description="指定した文を送信するよ!")
async def say(ctx, message=""):
await ctx.send(message)
await ctx.message.delete()
@bot.command(description="指定したチャンネルに文を送信するよ!")
async def send(ctx, ch:discord.TextChannel, txt):
try:
await ch.send(txt)
e = discord.Embed(title="Send", description=f"{ch.mention}に{txt}を送信しました。")
await ctx.send(embed=e)
except discord.NotFound:
e = discord.Embed(title="Send", description="指定されたチャンネルが存在しません")
await ctx.send(embed=e)
except discord.Forbidden:
e = discord.Embed(title="Send", description="指定されたチャンネルにアクセスできません")
await ctx.send(embed=e)
##### 計算 #####
@bot.command(description="足し算をするよ!")
async def plus(ctx, tasi1, tasi2):
keisantyuu1 = int(tasi1)
keisantyuu2 = int(tasi2)
kekkadayo = keisantyuu1 + keisantyuu2
await ctx.send(kekkadayo)
@bot.command(description="引き算をするよ!")
async def minus(ctx, tasi1, tasi2):
keisantyuu1 = int(tasi1)
keisantyuu2 = int(tasi2)
kekkadayo = keisantyuu1 - keisantyuu2
await ctx.send(kekkadayo)
@bot.command(description="割り算をするよ!")
async def dby(ctx, tasi1, tasi2):
keisantyuu1 = int(tasi1)
keisantyuu2 = int(tasi2)
kekkadayo = keisantyuu1 / keisantyuu2
await ctx.send(kekkadayo)
@bot.command(description="掛け算をするよ!")
async def times(ctx, tasi1, tasi2):
keisantyuu1 = int(tasi1)
keisantyuu2 = int(tasi2)
kekkadayo = keisantyuu1 * keisantyuu2
await ctx.send(kekkadayo)
##### 遊び #####
@bot.command(aliases=["mkembed"],description="embed(埋め込み表示)を作成するよ!")
async def makeembed(ctx, title, *, word):
e = discord.Embed(title=title, description=word, color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(aliases=["randomnum","rnum"],description="ランダムな数(乱数)を出すよ!")
async def randomnumber(ctx, startnum:int, endnum:int):
randomnumgen = random.randint(startnum, endnum)
await ctx.send(randomnumgen)
@bot.command(description="サイコロを振るよ!")
async def dice(ctx):
dicenum = random.randint(0, 6)
await ctx.send(dicenum)
@bot.command(name="time",description="現在時刻を表示するよ!")
async def time_(ctx):
import locale
locale.setlocale(locale.LC_CTYPE, "English_United States.932")
await ctx.send(datetime.datetime.now().strftime("%Y年%m月%d日 %H時%M分%S秒"))
@bot.command(description="おみくじを引くよ!")
async def omikuji(ctx):
embed = discord.Embed(title="おみくじ", description=f"{ctx.author.mention}さんの今日の運勢は!\nジャカジャカジャカジャカジャカ…ジャン!",color=0x5dc7fc)
embed.set_thumbnail(url=ctx.author.avatar_url)
embed.add_field(name="[運勢] ", value=random.choice(('福沢諭吉\nお~!福沢ゆきt・・・え?(笑)','大吉!\nすごいね!大吉だよ?!', '吉\nいいね~!', '凶\nそんなこともあるさ!', '大凶\nあ、ありゃりゃ・・・')), inline=False)
await ctx.send(embed=embed)
@bot.command(description="投票を作成するよ!")
async def poll(ctx,*content):
if len(content) == 1:
msg = await ctx.send(content[:1][0])
[await msg.add_reaction(emoji) for emoji in ["👍","👎"]]
elif len(content) > 1:
title = content[:1][0]
answers = content[1:]
emojis = [chr(127462 + i) for i in range(len(answers))]
answer = "\n".join(emoji + answer for emoji,answer in zip(emojis,answers))
col = random.randint(0, 0xFFFFFF)
embed = discord.Embed(title=title,description=answer,color=col,timestamp=datetime.datetime.utcnow())
embed.set_footer(icon_url=ctx.author.avatar_url,text=ctx.author.name)
msg = await ctx.send(embed=embed)
[await msg.add_reaction(emoji) for emoji in emojis]
##### 報告 #####
@bot.command(description="BOTの感想を送るよ!")
async def feedback(ctx, text):
await bot.wait_until_ready()
ch = bot.get_channel(675971969816068107)
r = discord.Embed(title="FeedBack", description=text)
r.set_author(name=f"{ctx.author.name}#{ctx.author.discriminator} / {ctx.author.id}", icon_url=ctx.author.avatar_url)
await ch.send(embed=r)
e = discord.Embed(title="FeedBack", description="Botの感想を送信しました!ご利用ありがとうございます!", color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(description="BOTのバグを報告するよ!")
async def report(ctx, text):
await bot.wait_until_ready()
ch = bot.get_channel(675972021166931968)
r = discord.Embed(title="Report", description=text)
r.set_author(name=f"{ctx.author.name}#{ctx.author.discriminator} / {ctx.author.id}", icon_url=ctx.author.avatar_url)
await ch.send(embed=r)
e = discord.Embed(title="Report", description="Botのバグを報告しました。", color=ctx.author.color)
await ctx.send(embed=e)
@bot.command(description="BOTのリクエストを送るよ!")
async def request(ctx, text):
await bot.wait_until_ready()
ch = bot.get_channel(675972112636444682)
r = discord.Embed(title="Request", description=text)
r.set_author(name=f"{ctx.author.name}#{ctx.author.discriminator} / {ctx.author.id}", icon_url=ctx.author.avatar_url)
await ch.send(embed=r)
e = discord.Embed(title="Request", description="リクエストを送信しました。Botの開発者が詳細を訪ねるため、DMに行く可能性があります。", color=ctx.author.color)
await ctx.send(embed=e)
##### ログ #####
@bot.event
async def on_member_join(member):
ch = bot.get_channel(675930097328324667)
e = discord.Embed(title="入室",description=f"{member}さんが、{member.guild}に参加しました。",color=col,timestamp=datetime.datetime.utcnow())
e.set_thumbnail(url=f'{member.avatar_url_as(static_format="png")}')
await ch.send(embed=e)
print(f'{member}さんが{member.guild}に参加しました。')
@bot.event
async def on_member_remove(member):
ch = bot.get_channel(675930097328324667)
e = discord.Embed(title="退出",description=f"{member}さんが、{member.guild}から退出しました。",color=col,timestamp=datetime.datetime.utcnow())
e.set_thumbnail(url=f'{member.avatar_url_as(static_format="png")}')
await ch.send(embed=e)
print(f'{member}さんが{member.guild}から退出しました。')
##### エラー系コード #####
@bot.event
async def on_command_error(context,exception):
if isinstance(exception, commands.CommandNotFound):
word = context.message.content.split(" ")[0].strip("y>")
des = ",".join(c.name for c in bot.commands if word in c.name or c.name in word)
embed = discord.Embed(title="コマンドエラー",description=f"{context.author.name}さん!`{context.message.content}`っていうコマンドは無いよ!\n`y>help`で確認してね!\nもしかして:`{des}`", color=0xff0000)
await context.send(embed=embed)
elif isinstance(exception, commands.MissingRequiredArgument):
e = discord.Embed(title="コマンドエラー",description="パラメーターが不足してるみたい・・・", color=0xff0000)
await context.send(embed=e)
elif isinstance(exception,commands.NotOwner):
e = discord.Embed(title="実行エラー",description="君はコマンドを実行する権限を持ってないよ~",color=0xff0000)
await context.send(embed=e)
else:
e = discord.Embed(title="例外発生", description=f"例外が発生しました。\n```{exception}```\n", color=0xff0000)
print (f"{exception}")
await context.send(embed=e)
ch = 684612890489257984
embed = discord.Embed(title="エラー情報", description=f"\n```{exception}```", color=0xff0000)
embed.add_field(name="発生サーバー名", value=context.guild.name)
embed.add_field(name="発生ユーザー名", value=context.author.name)
embed.add_field(name="発生コマンド", value=context.message.content)
await bot.get_channel(ch).send(embed=embed)
##### その他のコード #####
@bot.command(description="最近の地震情報を表示するよ!")
async def jishin(ctx):
er = e()
embed = discord.Embed(title='**地震情報**', description='', color=er['color'])
embed.set_thumbnail(url=er['icon'])
embed.add_field(name='発生時刻', value=er['time'], inline=True)
embed.add_field(name='震源地', value=er['epicenter'], inline=True)
embed.add_field(name='最大震度', value=er['intensity'], inline=True)
embed.add_field(name='マグニチュード', value=er['magnitude'], inline=True)
embed.add_field(name='震度1以上を観測した地域', value=er['e_1'], inline=False)
embed.set_image(url=er['map'])
await ctx.channel.send(embed=embed)
def e():
xml_data_module = requests.get('https://www3.nhk.or.jp/sokuho/jishin/data/JishinReport.xml')
xml_data_module.encoding = "Shift_JIS"
root = ET.fromstring(xml_data_module.text)
for item in root.iter('item'):
deta_url = (item.attrib['url'])
break
deta = requests.get(deta_url)
deta.encoding = "Shift_JIS"
root = ET.fromstring(deta.text)
e_1 = ''
for Earthquake in root.iter('Earthquake'):
time = (Earthquake.attrib['Time'])
Intensity = (Earthquake.attrib['Intensity'])
Epicenter = (Earthquake.attrib['Epicenter'])
Magnitude = (Earthquake.attrib['Magnitude'])
Depth = (Earthquake.attrib['Depth'])
map_url = 'https://www3.nhk.or.jp/sokuho/jishin/'
count = 1
for Area in root.iter('Area'):
e_1 += '\n' + Area.attrib['Name']
if count == 10:
e_1 += '\n他'
break
count = count + 1
for Detail in root.iter('Detail'):
map = map_url + Detail.text
edic = {'time': time, 'epicenter': Epicenter, "intensity": Intensity, "depth": Depth, "magnitude": Magnitude, "map": map, "icon": eicon(Intensity), "color": eicolor(Intensity), 'e_1': e_1}
return edic
def eicon(i):
if i == '1':
return('https://i.imgur.com/yalXlue.png')
elif i == '2':
return('https://i.imgur.com/zPSFvj6.png')
elif i == '3':
return('https://i.imgur.com/1DVoItF.png')
elif i == '4':
return("https://i.imgur.com/NqC3CE0.png")
elif i == '5-':
return("https://i.imgur.com/UlFLa3G.png")
elif i == '5+':
return("https://i.imgur.com/hExQwf2.png")
elif i == '6-':
return("https://i.imgur.com/p9RrO96.png")
elif i == '6+':
return("https://i.imgur.com/pNaFJ2Y.png")
elif i == '7':
return("https://i.imgur.com/ZoOhL4v.png")
def eicolor(i):
if i == '1':
return(0x51b3fc)
elif i == '2':
return(0x7dd45a)
elif i == '3':
return(0xf0ed7e)
elif i == '4':
return(0xfa782c)
elif i == '5-':
return(0xb30f20)
elif i == '5+':
return(0xb30f20)
elif i == '6-':
return(0xffcdde)
elif i == '6+':
return(0xffcdde)
elif i == '7':
return(0xffff6c)
##### ヘルプ #####
@bot.command(description="制作者用コマンドヘルプを表示するよ!")
async def helpowner(ctx):
e = discord.Embed(title="Command Help Owner - コマンドヘルプ",description="コマンドの先頭には、必ず`y>`がいるよ~!",color=0x5dc7fc)
e.add_field(name="Debug commands/デバッグコマンド",value="`reboot`,`down`,`cmd`,`jsk`",inline=False)
e.add_field(name="Status&Activity commands/ステータス&アクテビティコマンド",value="`changeactivity`,`resetactivity`,`changestatusonline`,`changestatusidle`,`changestatusdnd`",inline=False)
await ctx.send(embed=e)
@bot.command(description="コマンドヘルプを表示するよ!\n引数はあってもなくてもOK!")
async def help(ctx,name=None):
if name is not None:
if [c for c in bot.commands if c.name == name or name in c.aliases]:
command = [c for c in bot.commands if c.name == name or name in c.aliases][0]
embed = discord.Embed(title=f"Command Help - 『{command.name}』",description=command.description,color=0x5dc7fc)
embed.add_field(name="使い方",value=f"y>{command.name} {((' '.join(f'[{c}]' for c in command.clean_params.keys())) if len(command.clean_params) > 0 else '')}")
if command.aliases:embed.add_field(name="エイリアスor短縮形",value=",".join(c for c in command.aliases))
else:embed.add_field(name="エイリアスor短縮形",value="エイリアスはないよ")
await ctx.send(embed=embed)
else:
e = discord.Embed(title="Command Help - コマンドヘルプ",description="コマンドの先頭には、必ず`y>`がいるよ~!",color=0x5dc7fc)
e.add_field(name="Bot information commands/ボット情報系コマンド",value="`info`,`help`,`ping`,`joinserverlist`",inline=False)
e.add_field(name="Normal user commands/一般ユーザー向けコマンド",value="Message:`makeembed`,`say`,`send`,`senddm`\nVote:`poll`\nInfo:`time`\nPlay:`randomnumber`,`dice`,`omikuji`\nCalculation:`plus`,`minus`,`times`,`dby`",inline=False)
e.add_field(name="Report commands/報告コマンド",value="`feedback`,`report`,`request`",inline=False)
e.add_field(name="Information commands/情報コマンド",value="`userinfo`,`userserch`,`serverinfo`,`roleinfo`,`channelinfo`",inline=False)
e.add_field(name="Role commands/役職コマンド",value="`rolecreat`,`roledelete`,`roleadd`,`roleremove`,~~`roleusers`~~,~~`rolecolor`~~,`roleallmembersadd`,`roleallmembersremove`",inline=False)
e.add_field(name="Server management commands/サーバー管理コマンド",value="User:`ban`,`kick`\nMessage:`clear`,`allclear`,`messagehistory`\nChannel:`textchannelcreate`,`textchanneldelete`,`voicechannelcreate`,`voicechanneldelete`,`categorycreate`,`categorydelete`,`channeledit`",inline=False)
e.add_field(name="Other commands/その他のコマンド",value=f"`jishin`,`eval`,`evalnode`\n\n[導入はこちら](https://discordapp.com/api/oauth2/authorize?client_id=657936162966601740&permissions=8&scope=bot)|[ヘルプ参照BOT {bot.get_user(553841194699063319).name}](https://discordapp.com/oauth2/authorize?client_id=553841194699063319&scope=bot&permissions=775286087)",inline=False)
e.add_field(name="各コマンドヘルプのやり方",value="helpの後に引数としてコマンド名を入力すると、コマンドヘルプが表示されます。\n例:`y>help info`")
await ctx.send(embed=e)
bot.run("token")
|
<reponame>wesleyegberto/machine-learning-courses
#!/usr/bin/env python
# coding: utf-8
# # Machine Learning using Logistic Regression
#
# Classifier model that estimates an applicant’s probability of admission based the scores from those two exams.
# In[1]:
import pandas as pd
import numpy as np
# import pandas_profiling
import seaborn as sns
import matplotlib.pyplot as plt
# In[2]:
data = pd.read_csv('ex2data1.txt', names = ['score1', 'score2', 'admitted'])
data = data.astype('float128')
print('Profiling Data')
print(data.info())
print(data.head())
# profile = pandas_profiling.ProfileReport(df)
# profile.to_file(outputfile="output.html")
# In[3]:
x = np.array(data[['score1', 'score2']]) # training set
y = np.array(data['admitted']) # labels
[m, n] = np.shape(x)
# ## ==================== Part 1: Plotting ====================
# In[4]:
print('Plotting data with "x" indicating (y = 1) examples and "o" indicating (y = 0) examples.')
sns.scatterplot('score1', 'score2', hue='admitted', style='admitted', data=data)
# ## =========== Part 2: Mapping Features ============
#
# One way to fit the data better is to create more features from each data point. We will map the features into all polynomial terms of $x_1$ and $x_2$ up to the sixth power.
#
# As a result of this mapping, our vector of two features (the scores on two QA tests) has been transformed into a 28-dimensional vector. A logistic regression classifier trained on this higher-dimension feature vector will have a more complex decision boundary and will appear nonlinear when drawn in our 2-dimensional plot.
#
# While the feature mapping allows us to build a more expressive classifier, it also more susceptible to overfitting. To void that we will implement regularized logistic regression to fit the data and combat the overfitting problem.
# In[5]:
"""
Normalizes the features in X
returns a normalized version of X where the mean value of each feature is 0 and the standard deviation is 1.
This is often a good preprocessing step to do when working with learning algorithms.
First, for each feature dimension, compute the mean of the feature and subtract it from the dataset,
storing the mean value in mu. Next, compute the standard deviation of each feature and divide
each feature by it's standard deviation, storing the standard deviation in sigma.
"""
def featureNormalize(X):
X_norm = np.zeros(np.shape(X))
qty_features = np.shape(X)[1]
mu = np.zeros(qty_features)
sigma = np.zeros(qty_features)
for i in range(qty_features):
mu[i] = np.mean(X[:,i])
X_norm[:,i] = X[:,i] - mu[i]
# by default np.std calculate the population std, here we want sample std (as done in Octave)
sigma[i] = np.std(X_norm[:,i], ddof=1) # default: ddof=0 (N - 0) will predict 293092.21273075533
X_norm[:,i] = X_norm[:,i] / sigma[i]
return X_norm, mu, sigma
# In[6]:
# Tessting Feature Normalization
X_ = np.array([
[2104, 3],
[1600, 3],
[2400, 3],
[1416, 2],
[3000, 4]
])
X_n, mu_, sigma_ = featureNormalize(X_)
print("X Norm", X_n)
print("mu", mu_) # [2104. 3.]
print("sigma", sigma_) # [635.96226303 0.70710678]
# In[7]:
# Scale features and set them to zero mean
print('Normalizing Features ...\n')
# We normalize to avoid log(0) because our sigmoid will return small values
X, mu, sigma = featureNormalize(x)
print('Normalized data')
X[:5]
# In[8]:
# Add a column of ones to X to facilitate the manipulation
X = np.column_stack((np.ones(m), X))
X[:5]
# ## ============ Part 3: Compute Cost and Gradient ============
# ### Activation Function
# We will use the sigmoid function as our activation function.
#
# $g(z) = \frac{1}{1 + e^{-z}}$
#
# When:
#
# $z = 0$ then $g = 0.5$
#
# $z \rightarrow +\infty$ then $g \rightarrow +1$
#
# $z \rightarrow -\infty$ then $g \rightarrow 0$
#
# In[9]:
def sigmoid(z):
# return np.divide(np.float128(1.0), (np.float128(1.0) + np.exp(-z)), dtype=np.float128)
return 1 / (1 + np.exp(-z))
# In[10]:
print(sigmoid(-5)) # ~= 0.0066929
print(sigmoid(0)) # ~= 0.5
print(sigmoid(5)) # ~= 0.99331
print(sigmoid(np.array([4, 5, 6]))) # ~= [0.98201 0.99331 0.99753]
print(sigmoid(np.array([-1, 0, 1]))) # ~= [0.26894 0.50000 0.73106]
print(sigmoid(np.array([[4, 5, 6], [-1, 0, 1]])))
# Add a column of ones to X to facilitate the manipulation.
#
# Each row is a input with the following format:
#
# $X[0] = [ x_0, x_1, x_2 ]$ where $x_0 = 1$
# ### Hypothesis Function
# Function that defines our logistic model.
#
# Definition:
#
# $h_\theta(x) = g(\theta_0 + \theta_1 * x_1 + \theta_2 * x_2)$
#
# Vectorial form:
#
# $h_\theta(x) = g(\theta^{T} * x)$
#
# where:
#
# $g$ is the sigmoid function; $x = [x_0, x_1, x_2]$; $x_0 = 1$ and $\theta = [\theta_0, \theta_1, \theta_2]$
# In[11]:
def hypothesis(X, theta):
# z = np.array([np.dot(xi, theta) for xi in X])
z = X.dot(theta)
return sigmoid(z)
# ### Logistic Cost Function
# Computes the logistic cost of using theta as the parameter for logistic regression to fit the data points in X and y.
#
# Function cost:
#
# $ J(\theta) = \frac{1}{m} \sum_{i=1}^{m} [ -y^{(i)} log(h_\theta(x^{(i)}) - (1 - y^{(i)}) log(1 - h_\theta(x^{(i)})) ]$
#
# Vectorial form:
#
# $ J(\theta) = \frac{1}{m} * [-\vec{y}^{T} \cdot log(h_\theta(\vec{x})) - (1 - \vec{y})^{T} \cdot log(1 - h_\theta(\vec{x}))] $
#
# If any time we got a $log(h_\theta(x^{(i)})) = 0$ that means we need to normalize the features.
# In[12]:
"""
Inputs:
X = [
[ x_0, x_1, ..., x_n ]
]
y = [
[ y_0 ]
...
[ y_m ]
]
theta = [ theta_0, ..., theta_n ]
"""
def computeLogisticCostIterative(X, y, theta):
m = len(y)
errorSum = 0 # total error
for i in range(m):
h = hypothesis(X[i], theta)
errorSum = errorSum + (-y[i] * np.log(h) - (1 - y[i]) * np.log(1 - h))
return errorSum / m
# Better way using Matrix/Vectors
def computeLogisticCostMatrix(X, y, theta):
m = len(y)
h = hypothesis(X, theta)
return (1 / m) * (-y.T.dot(np.log(h)) - (1 - y).T.dot(np.log(1 - h)))
# In[13]:
print('Testing cost function')
X_ = np.array([
[1, 8, 1, 6],
[1, 3, 5, 7],
[1, 4, 9, 2]
]);
y_ = np.array([1, 0, 1]);
theta_ = np.array([-2, -1, 1, 2]);
print('J ~= 4.6832 ->', computeLogisticCostIterative(X_, y_, theta_))
print('J ~= 4.6832 ->', computeLogisticCostMatrix(X_, y_, theta_))
# In[14]:
# Initialize fitting parameters
initial_theta = np.zeros([n + 1])
#initial_theta = np.array([0.1, 12.00921659, 11.26284221], dtype=np.float128)
cost = computeLogisticCostMatrix(X, y, initial_theta)
print('Cost at initial theta (zeros): %f', cost)
print('Expected cost (approx): 0.693')
# ### Running Gradient Descent
# Performs gradient descent to learn $\theta$ parameters.
#
# It return an array with $\theta$ containing the values found by taking num_iters gradient steps with learning rate alpha.
#
# Also it return an array with the history of $J(\theta)$ to be plotted.
#
# Step to update each parameter:
#
# $\theta_j := \theta_j - \alpha * \frac{\partial J}{\partial \theta_j} $
#
# Where:
#
# $\frac{\partial J}{\partial \theta_j} = \frac{1}{m} \sum_{i=1}^{m} [( h_\theta(x^{(i)}) - y^{(i)}) * x^{(i)}]$
#
# Metrix form:
#
# $ \frac{\partial J}{\partial \theta_j} = \frac{1}{m} = X^{T} ( h_\theta(x^{(i)}) - y^{(i)}) $
# In[15]:
def logisticGradientDescent(X, y, theta, alpha, num_iters):
m = len(y)
J_history = np.zeros(num_iters)
for i in range(num_iters):
h_theta = hypothesis(X, theta)
# gradient of our cost function
nabla = (1 / m) * X.T.dot(h_theta - y)
# print (nabla) # first iteration: [ 0.31722, 0.87232, 1.64812, 2.23787 ]
theta = theta - alpha * nabla
# Save the cost J in every iteration
J_history[i] = computeLogisticCostIterative(X, y, theta)
return theta, J_history
# In[16]:
# ~= [-2, -1, 1, 2], [4.6832]
logisticGradientDescent(X_, y_, theta_, 0, 1)
# In[17]:
num_iters = 50; # with alpha = 0.01 we should rise the # of iterations
alphas = [0.01, 0.03, 0.1, 0.3, 1, 3, 10]
colors = ['b', 'r', 'y', 'black', 'brown', 'gray'];
# In[18]:
# To plot the J(theta) using different alphas
fig, ax = plt.subplots()
iterations = range(num_iters)
print('Running gradient descent ...\n')
for alpha, color in zip(alphas, colors):
theta = np.zeros([n + 1]) # reset the theta to the current alpha
theta, J_history = logisticGradientDescent(X, y, theta, alpha, num_iters)
# print('alpha ', alpha, ' found theta ', theta)
plt.plot(iterations, J_history, color=color, label='alpha %.2f' % alpha)
plt.legend(loc='best', ncol=2)
# In[19]:
# Display gradient descent's result
print('Theta computed from gradient descent:');
# Expected ~= [ 1.426919, 3.391232, 3.142802]
print(theta);
# In[20]:
predict = [45, 85]
# normalize and add the x_0 = 1
predict_norm = np.column_stack((np.ones(1), [((predict - mu) / sigma)]))
prob = hypothesis(predict_norm, theta)
print('For a student with scores 45 and 85, we predict an admission probability of', prob)
print('Expected value: 0.732');
# Compute accuracy on our training set
p = hypothesis(X, theta)
print('Train Accuracy: ', ((p >= 0.5) == y).sum().mean())
print('Expected accuracy (approx): 89.0\n')
# ## ==================== Part 3: Plotting Boundary Decision ====================
# In[21]:
# Calculate the boundary decision
score1_min = x[:,0].min()
score1_max = x[:,0].max()
score2_min = x[:,1].min()
score2_max = x[:,1].max()
xgrid = np.arange(score1_min, score1_max, (score1_max - score1_min) / 100)
ygrid = np.arange(score2_min, score2_max, (score2_max - score2_min) / 100)
X,Y = np.meshgrid(xgrid, ygrid) # grid of points
gridPoints = np.c_[X.ravel(), Y.ravel()]
predictions = np.zeros(len(gridPoints))
for i in range(len(gridPoints)):
point = gridPoints[i]
predict_norm = np.column_stack((np.ones(1), [((point - mu) / sigma)]))
prob = hypothesis(predict_norm, theta)
predictions[i] = 1 if prob >= 0.5 else 0
predictions = predictions.reshape(X.shape)
# In[22]:
fig, ax = plt.subplots()
# setup plot for decision boundary (0 = Refused, 1 = Admitted)
plt.contourf(X, Y, predictions, alpha=0.3)
sns.scatterplot('score1', 'score2', hue='admitted', style='admitted', style_order=[1,0], data=data)
plt.legend()
# plt.show()
# In[ ]:
|
<gh_stars>10-100
# Implementing TF-IDF for preidctive analytics on unstructured texts
#---------------------------------------
# Run the below command to have the nltk and its pre-trained toxkenizer models instelled on your machine.
# sudo python3 -c "import nltk; nltk.download('all')"
import tensorflow as tf # TensorFlow
import matplotlib.pyplot as plt # For matplotlib for plotting
import csv # For parsing and preprcessing CSV file
import numpy as np # For the NumPy array
import os # For the regular OS support
import string # For string manipulation
import requests # For handling HTTP request
import io # For the I/O operation
import nltk # For the nltk and its pre-trained toxkenizer models
from nltk.corpus import stopwords # For removing stop-words
from nltk import word_tokenize,sent_tokenize # For the nltk and its pre-trained toxkenizer models
from zipfile import ZipFile # For handling zipped files
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer # TF_IDF
from tensorflow.python.framework import ops # For TensorFlow python framework
ops.reset_default_graph() # reset the deafult graph setting
from tensorflow.python.framework import ops
import warnings
import random
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()
# Start a graph session
sess = tf.Session()
batch_size = 200
max_features = 1000
# Check if data was downloaded, otherwise download it and save for future use
save_file_name = 'temp/temp_spam_data.csv'
if os.path.isfile(save_file_name):
text_data = []
with open(save_file_name, 'r') as temp_output_file:
reader = csv.reader(temp_output_file)
for row in reader:
text_data.append(row)
else:
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x)>=1]
# And write to csv
with open(save_file_name, 'w') as temp_output_file:
writer = csv.writer(temp_output_file)
writer.writerows(text_data)
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
# Relabel 'spam' as 1, 'ham' as 0
target = [1. if x=='spam' else 0. for x in target]
# Normalize text
# Lower case
texts = [x.lower() for x in texts]
# Remove punctuation
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts]
# Remove numbers
texts = [''.join(c for c in x if c not in '0123456789') for x in texts]
# Trim extra whitespace
texts = [' '.join(x.split()) for x in texts]
# Define tokenizer
def tokenizer(text):
words = nltk.word_tokenize(text)
return words
# Create TF-IDF of texts
tfidf = TfidfVectorizer(tokenizer=tokenizer, stop_words='english', max_features=max_features)
sparse_tfidf_texts = tfidf.fit_transform(texts)
# Split up data set into train/test
train_indices = np.random.choice(sparse_tfidf_texts.shape[0], round(0.75*sparse_tfidf_texts.shape[0]), replace=False)
test_indices = np.array(list(set(range(sparse_tfidf_texts.shape[0])) - set(train_indices)))
texts_train = sparse_tfidf_texts[train_indices]
texts_test = sparse_tfidf_texts[test_indices]
target_train = np.array([x for ix, x in enumerate(target) if ix in train_indices])
target_test = np.array([x for ix, x in enumerate(target) if ix in test_indices])
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[max_features,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[None, max_features], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Declare logistic model (sigmoid in loss function)
model_output = tf.add(tf.matmul(x_data, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
# Actual Prediction
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
# Declare optimizer
train_op = tf.train.GradientDescentOptimizer(0.01)
# train_op = tf.train.AdamOptimizer(0.01)
train_step = train_op.minimize(loss)
# Intitialize Variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Start Logistic Regression
train_loss = []
test_loss = []
train_acc = []
test_acc = []
i_data = []
for i in range(10000):
rand_index = np.random.choice(texts_train.shape[0], size=batch_size)
rand_x = texts_train[rand_index].todense()
rand_y = np.transpose([target_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
# Only record loss and accuracy every 100 generations
if (i+1)%100==0:
i_data.append(i+1)
train_loss_temp = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
train_loss.append(train_loss_temp)
test_loss_temp = sess.run(loss, feed_dict={x_data: texts_test.todense(), y_target: target_test.reshape(-1,1)})
test_loss.append(test_loss_temp)
train_acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, y_target: rand_y})
train_acc.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict={x_data: texts_test.todense(), y_target: target_test.reshape(-1,1)})
test_acc.append(test_acc_temp)
if (i+1)%500==0:
acc_and_loss = [i+1, train_loss_temp, test_loss_temp, train_acc_temp, test_acc_temp]
acc_and_loss = [np.round(x,2) for x in acc_and_loss]
print('Iteration # {}. Train Loss (Test Loss): {:.2f} ({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))
print('\nOverall accuracy on the training set (%): {}'.format(np.mean(train_acc)*100.0))
print('Overall accuracy on the test set (%): {}'.format(np.mean(test_acc)*100.0))
# Plot loss over time
plt.plot(i_data, train_loss, 'k-', label='Training loss')
plt.plot(i_data, test_loss, 'r--', label='Test loss', linewidth=4)
plt.title('Cross entropy loss per iteration')
plt.xlabel('Iteration')
plt.ylabel('Cross entropy loss')
plt.legend(loc='upper right')
plt.show()
# Plot train and test accuracy
plt.plot(i_data, train_acc, 'k-', label='Accuracy on the training set')
plt.plot(i_data, test_acc, 'r--', label='Accuracy on the test set', linewidth=4)
plt.title('Accuracy on the train and test set')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
|
<filename>challenges/linux_misc/linuxadv/answer.py
#!/usr/bin/python3
import os
import sys
import datetime
import stat
import signal
class COLORS:
PURPLE = '\033[95m%s\033[0m'
BLUE = '\033[94m%s\033[0m'
GREEN = '\033[92m%s\033[0m'
YELLOW = '\033[93m%s\033[0m'
EMPHASIS = '\033[1m\033[4m'
FLAG="ECS{R3D_GR33N_8LU3_A8E848A2EF5876D0CEC2779A8EBC75A7}"
class Challenge:
def __init__(self,handler,secret):
self.handler=handler
self.secret=secret
def solved(self):
return os.access("/tmp/qaframework/%s"%self.secret,os.F_OK)
class SimpleChallenge(Challenge):
def __init__(self,question,answer,secret):
self.question=question
self.answer=answer
self.secret=secret
def handler(self):
while True:
print(COLORS.BLUE%self.question)
useranswer=input().strip()
if useranswer==self.answer:
return
print(COLORS.YELLOW%"Sorry that's wrong, try again!")
class SignalChallenge(Challenge):
def __init__(self,question,sig,secret):
self.question=question
self.sig=sig
self.secret=secret
def handler(self):
wokenup=False
def sighandler(sig, frame):
nonlocal wokenup
if sig==self.sig:
print(COLORS.GREEN%"You did it! Press enter to continue")
wokenup=True
print(COLORS.BLUE%self.question)
signal.signal(self.sig, sighandler)
while not wokenup:
pass
def q4handler():
print(COLORS.BLUE%"Redirect my standard input to be the contents of /home/user/in.txt")
if os.fstat(0).st_ino==os.stat("/home/user/in.txt").st_ino:
return
print(COLORS.YELLOW%"Sorry, that didn't work. Try again.")
sys.exit(1)
def q5handler():
print(COLORS.BLUE%"Redirect my standard error into /dev/null.")
if os.fstat(2).st_ino==os.stat("/dev/null").st_ino:
return
if not sys.stdout.isatty(): #then they probably did >/dev/null instead of 2>/dev/null, print helpful msg
print(COLORS.YELLOW%"You probably redirected stdout instead of stderr. Check your command again!",file=sys.stderr)
sys.exit(1)
print(COLORS.YELLOW%"Sorry, that didn't work. Try again.")
sys.exit(1)
def q6handler():
print(COLORS.BLUE%"Cat out the contents of /home/user/in.txt, grep for the lines that contain the lowercase letter 'a', and pipe the result into me.")
if sys.stdin.isatty():
print(COLORS.YELLOW%"Sorry, that didn't work. Try again.")
sys.exit(1)
try:
with open("/home/user/in.txt","r") as f:
for l in f.readlines():
if "a" in l:
if l.strip()!=input():
print(COLORS.YELLOW%"Sorry, the input isn't what I'd expect. Double check your command?")
sys.exit(1)
except EOFError:
print(COLORS.YELLOW%"Sorry, the input isn't what I'd expect. Double check your command?")
sys.exit(1)
try:
s=input()
except EOFError:
return
print(COLORS.YELLOW%"Sorry, the input isn't what I'd expect. Double check your command?")
def q7handler():
print(COLORS.BLUE%"Run `. spawner` to solve this challenge.")
sys.exit(1)
CONFIG={
"1": SignalChallenge("Try backgrounding this process and foregrounding it again.",
signal.SIGCONT,
"de69c0d1ec6680008751436f97e11d99"),
"2": SimpleChallenge("What's my PID? (You're free to background this process, figure out the answer, then foreground it again to answer)",
str(os.getpid()),
"01db71f7e59479c8998647faf703be66"),
"3": SignalChallenge("Send me a SIGUSR1 signal.",
signal.SIGUSR1,
"6706038ae52a5ce18d39a9540e568ba7"),
"4": Challenge(q4handler,"348b6f0a3cbe991b5df15d3c36d83c57"),
"5": Challenge(q5handler,"25d112dd48c09d3aedc2aa2e7d610a13"),
"6": Challenge(q6handler,"59b4156f08c83f8e437f4ab7baa3ca9a"),
"7": Challenge(q7handler,"45fd98f0038fcdd95dc182a3e5d37f36"),
}
def main():
if len(sys.argv)!=2 or sys.argv[1] not in CONFIG:
print("To use this tool: Run `answer x` to answer question x.\nThere are {0} questions in total, from 1 to {0}.".format(len(CONFIG)))
sys.exit(1)
challenge=CONFIG[sys.argv[1]]
if not challenge.solved():
challenge.handler()
os.mknod("/tmp/qaframework/%s"%challenge.secret)
else:
print(COLORS.YELLOW%"You already solved this challenge!")
unsolved=[]
for k,v in CONFIG.items():
if not v.solved():
unsolved.append(k)
if len(unsolved)==0:
print(COLORS.GREEN%("You did it! The flag is "+COLORS.EMPHASIS+FLAG+"."))
else:
print(COLORS.PURPLE%("You solved {0} out of {1} challenges! The challenge{3} {2} remain.".format(
len(CONFIG)-len(unsolved),len(CONFIG),", ".join(unsolved),"" if len(unsolved)==1 else "s")))
if __name__=="__main__":
main()
|
from aiohttp import web
from aiohttp_validate import validate
from . import schemas, utils
from ..auth.mixins import TokenRequiredMixin
from ..users.utils import user_exists
class SendMessageView(TokenRequiredMixin, web.View):
"""View to send message to one user."""
@validate(**schemas.send_message_schema)
async def post(data, request):
"""Create `message` entry.
Request parameters:
* user_id (int) - ID of recipient user
* message (str) - message
* token (str) - API token
"""
errors = []
if not await user_exists(user_id=data['user_id']):
errors.append({
'user_id': 'User does not exist'
})
if not errors:
await utils.send_message_to_user(
sender_id=request.user_id,
recipient_id=data['user_id'],
message=data['message']
)
if errors:
return web.json_response({'errors': errors}, status=400)
return web.Response()
class SendMessageToAllView(TokenRequiredMixin, web.View):
"""View to send message to all users."""
@validate(**schemas.send_message_to_all_schema)
async def post(data, request):
"""Create multiple `message` entries.
Request parameters:
* message (str) - message
* token (str) - API token
"""
await utils.send_message_to_all_users(
sender_id=request.user_id,
message=data['message']
)
return web.Response()
class AllChatsView(TokenRequiredMixin, web.View):
"""View for getting all chats."""
async def get(self):
"""Get list of all chats."""
return web.json_response({
'chats': await utils.get_chats_for_user(self.request.user_id)
})
class UserChatView(TokenRequiredMixin, web.View):
"""View for specific user chat."""
async def get(self):
"""Get messages from chat with one user."""
errors = []
user_id = self.request.match_info.get('user_id')
if not user_id or not user_id.isdigit():
errors.append({
'user_id': 'User id is not specified or incorrect'
})
elif not await user_exists(user_id=int(user_id)):
errors.append({
'user_id': 'User with that id does not exist'
})
if errors:
return web.json_response({'errors': errors}, status=400)
return web.json_response({
'messages': await utils.get_messages(
current_user_id=self.request.user_id,
user_id=int(user_id)
)
})
class MarkAsReadView(TokenRequiredMixin, web.View):
"""View to mark messages as read."""
async def post(self):
"""Mark messages from chat with one user as read.
Note, that views DOES NOT mark all messages as read. Only those
messages, there recipient is current user.
"""
errors = []
user_id = self.request.match_info.get('user_id')
if not user_id or not user_id.isdigit():
errors.append({
'user_id': 'User id is not specified or incorrect'
})
elif not await user_exists(user_id=int(user_id)):
errors.append({
'user_id': 'User with that id does not exist'
})
if errors:
return web.json_response({'errors': errors}, status=400)
async with self.request.app['pool'].acquire() as connection:
await connection.execute('''
UPDATE messages SET read = TRUE
WHERE recipient_id = $1 AND sender_id = $2 AND read = FALSE
''', self.request.user_id, int(user_id))
return web.Response()
|
<filename>sfft/utils/StampGenerator.py
import numpy as np
import os.path as pa
from astropy.io import fits
from astropy.wcs import WCS
from tempfile import mkdtemp
from astropy.nddata.utils import Cutout2D
__author__ = "<NAME> <<EMAIL>>"
__version__ = "v1.0"
"""
# MeLOn Notes
# @Stamp Generator
# * Remarks on the 2 basic Coordinate System
# A. Fortran & FITS ( ds9 & SEx & WCS ) version
# @ Matrix Index start from 1. Thus the first pixel of a matrix is < r_F, c_F > = < 1, 1 >
# @ [Basic Consistence Rule in Fortran]
# Corresponding xy-Coordinate System: Pixel < r_F, c_F > has Center with xy-coordinate ( x_F, y_F ) = ( r_F, c_F )
# NOTE the origin point this system is located at the center point of the pixel < r_F, c_F > = < 0, 0 >,
# which is not physical existed.
# @ [Basic inclusion Rule in Fortran]
# Given Coordinate is enclosed in which pixel ?
# < r_F, c_F > = < int(x_F + 0.5), int(y_F + 0.5) >
#
# B. Numpy & C version
# @ Matrix Index start from 0. Thus the first pixel of a matrix is < r_C, c_C > = < 0, 0 >
# @ [Basic Consistence Rule in C]
# Corresponding xy-Coordinate System: Pixel < r_C, c_C > has Center with xy-coordinate ( x_C, y_C ) = ( r_C, c_C )
# NOTE the origin point this system is located at the center point of the pixel < r_C, c_C > = < 0, 0 >,
# which is just the first pixel.
# @ [Basic inclusion Rule in C]
# Given Coordinate is enclosed in which pixel ?
# Hold Pixel < r_C, c_C > = < int(x_C + 0.5), int(y_C + 0.5) >
# (Hold Pixel has solid boundary at bottom and left side, and dashed boundary at upper and right side.)
#
# NOTE < r_F, c_F > = < r_C + 1, c_C + 1 >
# NOTE < x_F, y_F > = < x_C + 1, y_C + 1 >
#
# * Our Convention
# We generally use C Matrix Index System with Fortran Coordinate System !
# henceforth, Let r, c means r_C, c_C and x, y indicate x_F, y_F.
#
# @ [Basic Consistence Rule in Our Convention]
# Pixel < r, c > has Center with xy-coordinate (x, y) = (r + 1.0, c + 1.0)
# @ [Basic Inclusion Rule in Our Convention]
# Hold Pixel < r, c > = < int(x - 0.5), int(y - 0.5) >
# @ Image Center has xy-coordinate (x, y) = (NX/2 + 0.5, NY/2 + 0.5)
# @ Image valid domain can be defined as,
# Without boundary: 0 <= r, c < NX, NY
# 0.5 <= x, y < NX + 0.5, NY + 0.5
# With boundary: NBX, NBY <= r, c < NX - NBX, NY - NBY
# NBX + 0.5, NBY + 0.5 <= x, y < NX - NBX + 0.5, NY - NBY + 0.5
#
# * The convertion function in Astropy
# Both w.all_pix2world and w.all_world2pix has an argument called origin
# origin = 1, Fortran xy-coordinate | This is what we generally use !
# origin = 0, C xy-coordinate
#
# * Stamp-Procedure for digital image
# Given (x, y) ----- Enclosed in Pixel < r, c > = < ⌊x - 0.5⌋, ⌊y - 0.5⌋ >
# Stamp(PixA, x, y, P, Q) = PixA[r - ⌊P/2⌋: r + ⌈P/2⌉, c - ⌊Q/2⌋: c + ⌈Q/2⌉]
#
# * Equivalent Stamp-Procedure for given row & column range
# Given RowRange = (r0, r1) and ColRange = (c0, c1)
# Let P = r1 - r0 and Q = c1 - c0, then
# PixA[r0: r1, c0: c1] = Stamp(PixA, x, y, P, Q)
# where x = r0 + ⌊P/2⌋ + 1.0 and y = c0 + ⌊Q/2⌋ + 1.0
#
# * Might be right, but haven't been Checked yet.
# PixA[E: -E, F: -F] = Stamp(PixA, U/2, V/2, U-2*E, V-2*F), where U, V = PixA.shape
# Stamp(PixA, x, y, P, Q)[E: -E, F: -F] = Stamp(PixA, x, y, P-E, Q-F)
#
# * Additional Remarks
# a. Only Support Single-Extension FITS file currently.
# b. The Boundary Pixels could be filled with np.nan | median vaule | given value
# c. Here we just care the essential cases with CoorType='Image' since other 'World' cases could be transformed.
# d. Cutout2D size parameter has the form of (ny, nx), However the required stamp with StampImgSize has a form of (NSX, NSY)
# We require the input of Cutout2D should only be < r, c >, even this function actually support float values as input.
# e. We should note NAXIS in the header would be automatic updated to adjust the image size.
# f. For general purpose, We do use the astropy generated wcs for the stamps.
# Alternatively, a more naive method is employed with correction of CRPIX1 & CRPIX2
# which is reasonable at least for TPV and SIP transformation, where the equations
# are only dependent on the relative pixel coordinate, that it, the pixel-vector.
# Ref https://fits.gsfc.nasa.gov/registry/tpvwcs/tpv.html
# Ref https://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf
"""
class Stamp_Generator:
@staticmethod
def SG(FITS_obj=None, StampImgSize=None, Coordinates=None, \
CoorType='Image', AutoFill='Nan', MDIR=None):
NSX, NSY = StampImgSize
NCoors = Coordinates.shape[0]
hdl = fits.open(FITS_obj)
hdr = hdl[0].header
data = hdl[0].data
# * Covert Coordinates to Image-Type
positions = (Coordinates - 0.5).astype(int)
if CoorType == 'World':
w_obj = WCS(hdr, hdl)
positions = (w_obj.all_world2pix(Coordinates, 1) - 0.5).astype(int)
hdl.close()
# * Determin the auto fill value for boundary case
fill_value = AutoFill
if AutoFill == 'Median':
fill_value = np.nanmedian(data)
if AutoFill == 'Nan':
fill_value = -65536 # FIXME In most cases, It works
# * Make stamps with function Cutout2D
# FIXME collection of stamp arrays can be memory-consuming
PixA_StpLst = []
Rsize = (NSY, NSX)
for i in range(NCoors):
try: PixA_Stp = Cutout2D(data, positions[i], Rsize, mode='partial', fill_value=fill_value).data.T.astype(float)
except: PixA_Stp = fill_value * np.ones((StampImgSize[0], StampImgSize[1])).astype(float)
if AutoFill == 'Nan': PixA_Stp[PixA_Stp == -65536] = np.nan
PixA_StpLst.append(PixA_Stp)
# * Make header for the stamps with modification of CRPIX1 CRPIX2 offset then save them
# NOTE I have checked the correctness of this operation.
FITS_StpLst = []
if MDIR is not None:
FNAME = pa.basename(FITS_obj)
TDIR = mkdtemp(suffix=None, prefix='SG_', dir=MDIR)
hdr_StpLst = []
for i in range(NCoors):
hdr_Stp = hdr.copy()
row_stpcent, col_stpcent = positions[i]
row_stpo, col_stpo = row_stpcent - int(NSX/2), col_stpcent - int(NSY/2)
try:
hdr_Stp['CRPIX1'] = float(hdr_Stp['CRPIX1']) - row_stpo
hdr_Stp['CRPIX2'] = float(hdr_Stp['CRPIX2']) - col_stpo
except: pass
hdr_Stp['COMMENT'] = 'Make Stamp from %s ' %FNAME + \
'with row_stpcent %d col_stpcent %d ' %(row_stpcent, col_stpcent) + \
'with stamp image size %d %d' %(NSX, NSY)
hdr_StpLst.append(hdr_Stp)
for i in range(NCoors):
PixA_Stp, hdr_Stp = PixA_StpLst[i], hdr_StpLst[i]
FITS_Stp = pa.join(TDIR, '%s.Stp%d.fits' %(FNAME[:-5], i))
fits.HDUList([fits.PrimaryHDU(PixA_Stp.T, header=hdr_Stp)]). writeto(FITS_Stp, overwrite=True)
FITS_StpLst.append(FITS_Stp)
return PixA_StpLst, FITS_StpLst
|
# coding: utf-8
# In[1]:
# get_ipython().magic(u'matplotlib inline')
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy.random as npr
from sklearn.cluster import KMeans
from scipy.stats import invgamma
from scipy import sparse, stats
# plt.style.use('ggplot')
# In[2]:
# import seaborn as sns
# sns.set_style("white")
# sns.set_context("paper")
# color_names = ["red",
# "windows blue",
# "medium green",
# "dusty purple",
# "orange",
# "amber",
# "clay",
# "pink",
# "greyish",
# "light cyan",
# "steel blue",
# "forest green",
# "pastel purple",
# "mint",
# "salmon",
# "dark brown"]
# colors = sns.xkcd_palette(color_names)
# In[3]:
DATA_DIR = '../dat/raw/Webscope_R3'
# In[4]:
OUT_DATA_DIR = '../dat/proc/R3_wg'
# ## R3
# In[5]:
tr_vd_data = pd.read_csv(os.path.join(DATA_DIR, 'ydata-ymusic-rating-study-v1_0-train.txt'), sep="\t", header=None,
names=['userId', 'songId', 'rating'],engine="python")
test_data = pd.read_csv(os.path.join(DATA_DIR, 'ydata-ymusic-rating-study-v1_0-test.txt'), sep="\t", header=None,
names=['userId', 'songId', 'rating'],engine="python")
# In[6]:
tr_vd_data.head(), tr_vd_data.shape
# In[7]:
test_data.head(), test_data.shape
# In[8]:
def split_train_test_proportion(data, uid, test_prop=0.5, random_seed=0):
data_grouped_by_user = data.groupby(uid)
tr_list, te_list = list(), list()
np.random.seed(random_seed)
for u, (_, group) in enumerate(data_grouped_by_user):
n_items_u = len(group)
if n_items_u >= 5:
idx = np.zeros(n_items_u, dtype='bool')
idx[np.random.choice(n_items_u, size=int(test_prop * n_items_u), replace=False).astype('int64')] = True
tr_list.append(group[np.logical_not(idx)])
te_list.append(group[idx])
else:
tr_list.append(group)
if u % 5000 == 0:
print("%d users sampled" % u)
sys.stdout.flush()
data_tr = pd.concat(tr_list)
data_te = pd.concat(te_list)
return data_tr, data_te
# In[9]:
def get_count(tp, id):
playcount_groupbyid = tp[[id]].groupby(id, as_index=False)
count = playcount_groupbyid.size()
return count
# In[10]:
user_activity = get_count(tr_vd_data, 'userId')
item_popularity = get_count(tr_vd_data, 'songId')
# In[11]:
unique_uid = user_activity.index
unique_sid = item_popularity.index
# In[12]:
n_users = len(unique_uid)
n_items = len(unique_sid)
# In[13]:
n_users, n_items
# In[14]:
song2id = dict((sid, i) for (i, sid) in enumerate(unique_sid))
user2id = dict((uid, i) for (i, uid) in enumerate(unique_uid))
# In[15]:
# for the test set, only keep the users/items from the training set
test_data = test_data.loc[test_data['userId'].isin(unique_uid)]
test_data = test_data.loc[test_data['songId'].isin(unique_sid)]
# In[16]:
with open(os.path.join(OUT_DATA_DIR, 'unique_uid.txt'), 'w') as f:
for uid in unique_uid:
f.write('%s\n' % uid)
with open(os.path.join(OUT_DATA_DIR, 'unique_sid.txt'), 'w') as f:
for sid in unique_sid:
f.write('%s\n' % sid)
# # Turn userId and songId to 0-based index
# In[17]:
def numerize(tp):
uid = list(map(lambda x: user2id[x], tp['userId']))
sid = list(map(lambda x: song2id[x], tp['songId']))
tp.loc[:, 'uid'] = uid
tp.loc[:, 'sid'] = sid
return tp[['uid', 'sid', 'rating']]
# In[18]:
tr_vd_data = numerize(tr_vd_data)
test_data = numerize(test_data)
# In[19]:
train_data, vad_data = split_train_test_proportion(tr_vd_data, 'uid', test_prop=0.6, random_seed=12345)
obs_test_data, vad_data = split_train_test_proportion(vad_data, 'uid', test_prop=0.5, random_seed=12345)
# In[20]:
print("There are total of %d unique users in the training set and %d unique users in the entire dataset" % (len(pd.unique(train_data['uid'])), len(unique_uid)))
# In[21]:
print("There are total of %d unique items in the training set and %d unique items in the entire dataset" % (len(pd.unique(train_data['sid'])), len(unique_sid)))
# In[22]:
def move_to_fill(part_data_1, part_data_2, unique_id, key):
# move the data from part_data_2 to part_data_1 so that part_data_1 has the same number of unique "key" as unique_id
part_id = set(pd.unique(part_data_1[key]))
left_id = list()
for i, _id in enumerate(unique_id):
if _id not in part_id:
left_id.append(_id)
move_idx = part_data_2[key].isin(left_id)
part_data_1 = part_data_1.append(part_data_2[move_idx])
part_data_2 = part_data_2[~move_idx]
return part_data_1, part_data_2
# In[23]:
train_data, vad_data = move_to_fill(train_data, vad_data, np.arange(n_items), 'sid')
train_data, obs_test_data = move_to_fill(train_data, obs_test_data, np.arange(n_items), 'sid')
# In[24]:
print("There are total of %d unique items in the training set and %d unique items in the entire dataset" % (len(pd.unique(train_data['sid'])), len(unique_sid)))
# In[25]:
train_data.to_csv(os.path.join(OUT_DATA_DIR, 'train.csv'), index=False)
vad_data.to_csv(os.path.join(OUT_DATA_DIR, 'validation.csv'), index=False)
tr_vd_data.to_csv(os.path.join(OUT_DATA_DIR, 'train_full.csv'), index=False)
# In[26]:
obs_test_data.to_csv(os.path.join(OUT_DATA_DIR, 'obs_test_full.csv'), index=False)
test_data.to_csv(os.path.join(OUT_DATA_DIR, 'test_full.csv'), index=False)
# # Load the data
# In[27]:
unique_uid = list()
with open(os.path.join(OUT_DATA_DIR, 'unique_uid.txt'), 'r') as f:
for line in f:
unique_uid.append(line.strip())
unique_sid = list()
with open(os.path.join(OUT_DATA_DIR, 'unique_sid.txt'), 'r') as f:
for line in f:
unique_sid.append(line.strip())
# In[28]:
n_items = len(unique_sid)
n_users = len(unique_uid)
print(n_users, n_items)
# In[29]:
def load_data(csv_file, shape=(n_users, n_items)):
tp = pd.read_csv(csv_file)
rows, cols, vals = np.array(tp['uid']), np.array(tp['sid']), np.array(tp['rating'])
data = sparse.csr_matrix((vals, (rows, cols)), dtype=np.float32, shape=shape)
return data
# In[30]:
def binarize_rating(data, cutoff=3, eps=1e-6):
data.data[data.data < cutoff] = eps # small value so that it will not be treated as 0 in sparse matrix
data.data[data.data >= cutoff] = 1
return data
# In[31]:
def exp_to_imp(data, cutoff=0.5):
# turn data (explicit feedback) to implict with cutoff
data_imp = data.copy()
data_imp.data[data_imp.data < cutoff] = 0
data_imp.data[data_imp.data >= cutoff] = 1
data_imp.data = data_imp.data.astype('int32')
data_imp.eliminate_zeros()
return data_imp
# In[32]:
def binarize_spmat(spmat):
spmat_binary = spmat.copy()
spmat_binary.data = np.ones_like(spmat_binary.data)
return spmat_binary
# In[33]:
def subsample_negatives(data, full_data=None, random_state=0, verbose=False):
# roughly subsample the same number of negative as the positive in `data` for each user
# `full_data` is all the positives we *are supposed to* know
n_users, n_items = data.shape
if full_data is None:
full_data = data
rows_neg, cols_neg = [], []
np.random.seed(random_state)
for u in xrange(n_users):
p = np.ones(n_items, dtype='float32')
p[full_data[u].nonzero()[1]] = 0
p /= p.sum()
neg_items = np.random.choice(n_items, size=data[u].nnz, replace=False, p=p)
rows_neg.append([u] * data[u].nnz)
cols_neg.append(neg_items)
if verbose and u % 5000 == 0:
print("%d users sampled" % u)
sys.stdout.flush()
rows_neg = np.hstack(rows_neg)
cols_neg = np.hstack(cols_neg)
return rows_neg, cols_neg
# In[34]:
train_data = load_data(os.path.join(OUT_DATA_DIR, 'train_full.csv'))
# In[35]:
# bins = np.histogram(train_data.data, bins=5)[0]
# plt.bar(np.arange(1, 6), bins)
# pass
# In[36]:
test_data = load_data(os.path.join(OUT_DATA_DIR, 'test_full.csv'))
vad_data = load_data(os.path.join(OUT_DATA_DIR, 'validation.csv'))
# In[37]:
# bins = np.histogram(test_data.data, bins=5)[0]
# plt.bar(np.arange(1, 6), bins)
# pass
# In[38]:
# bins = np.histogram(vad_data.data, bins=5)[0]
# plt.bar(np.arange(1, 6), bins)
# pass
|
"""
Ingest data from a genomic center manifest CSV file which dropped by biobank.
"""
import os
import csv
import datetime
import logging
import collections
from rdr_service import clock
from rdr_service import config
from rdr_service.api_util import list_blobs, open_cloud_file
from rdr_service.config import GENOMIC_GENOTYPING_SAMPLE_MANIFEST_FOLDER_NAME
from rdr_service.dao.genomics_dao import GenomicSetMemberDao
from rdr_service.genomic.genomic_set_file_handler import DataError
_MAX_INPUT_AGE = datetime.timedelta(hours=24)
BIOBANK_ID_PREFIX = 'T'
def process_genotyping_manifest_files():
bucket_names = config.getSettingList(config.GENOMIC_CENTER_BUCKET_NAME)
genotyping_folder_name = config.getSetting(GENOMIC_GENOTYPING_SAMPLE_MANIFEST_FOLDER_NAME)
for bucket_name in bucket_names:
process_genotyping_manifest_file_from_bucket(bucket_name, genotyping_folder_name)
def process_genotyping_manifest_file_from_bucket(bucket_name, genotyping_folder_name):
bucket_stat_list = list_blobs(bucket_name)
if not bucket_stat_list:
logging.info('No files in cloud bucket %r.' % bucket_name)
return None
bucket_stat_list = [s for s in bucket_stat_list if s.name.lower().endswith('.csv')
and '%s' % genotyping_folder_name in s.name]
if not bucket_stat_list:
logging.info(
'No CSVs in cloud bucket %r folder %r (all files: %s).' % (bucket_name,
genotyping_folder_name,
bucket_stat_list))
return None
bucket_stat_list.sort(key=lambda s: s.updated)
path = os.path.normpath(bucket_name + '/' + bucket_stat_list[-1].name)
timestamp = bucket_stat_list[-1].updated.replace(tzinfo=None)
logging.info('Opening latest genotyping manifest CSV in %r: %r.', bucket_name + '/'
+ genotyping_folder_name, path)
now = clock.CLOCK.now()
if now - timestamp > _MAX_INPUT_AGE:
logging.info('Input %r (timestamp %s UTC) is > 24h old (relative to %s UTC), not processing.'
% (path, timestamp, now))
return None
with open_cloud_file(path) as csv_file:
update_sample_info_from_genotyping_manifest_file(csv_file)
class CsvColumns(object):
PACKAGE_ID = 'Package Id'
SAMPLE_ID = 'Sample Id'
BIOBANK_ID = 'Biobank Id'
SAMPLE_TYPE = 'Sample Type'
TEST_NAME = 'Test Name'
REQUIRED_COLS = (PACKAGE_ID, SAMPLE_ID, BIOBANK_ID, SAMPLE_TYPE, TEST_NAME)
def update_sample_info_from_genotyping_manifest_file(csv_file):
csv_reader = csv.DictReader(csv_file, delimiter=',')
if not set(CsvColumns.REQUIRED_COLS).issubset(set(csv_reader.fieldnames)):
raise DataError(
'CSV is missing columns %s, had columns %s.' %
(CsvColumns.REQUIRED_COLS, csv_reader.fieldnames))
genotypying_data = collections.namedtuple('genotypingData', [
'biobank_id',
'genome_type',
'sample_id',
'sample_type',
])
update_queue = collections.deque()
dao = GenomicSetMemberDao()
try:
rows = list(csv_reader)
for row in rows:
if row[CsvColumns.BIOBANK_ID] and row[CsvColumns.SAMPLE_ID] and row[CsvColumns.SAMPLE_TYPE] \
and row[CsvColumns.TEST_NAME]:
biobank_id = row[CsvColumns.BIOBANK_ID][len(BIOBANK_ID_PREFIX):] \
if row[CsvColumns.BIOBANK_ID].startswith(BIOBANK_ID_PREFIX) \
else row[CsvColumns.BIOBANK_ID]
update_queue.append(genotypying_data(
biobank_id,
row[CsvColumns.TEST_NAME],
row[CsvColumns.SAMPLE_ID],
row[CsvColumns.SAMPLE_TYPE]
))
dao.bulk_update_genotyping_sample_manifest_data(update_queue)
except ValueError as e:
raise DataError(e)
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for parsing substitutions."""
import os
import re
from typing import Text
from ament_index_python.packages import get_package_share_directory
from lark import Lark
from lark import Token
from lark import Transformer
from .expose import instantiate_substitution
from ..substitutions import TextSubstitution
from ..utilities.type_utils import NormalizedValueType
from ..utilities.type_utils import StrSomeValueType
def replace_escaped_characters(data: Text) -> Text:
"""Search escaped characters and replace them."""
return re.sub(r'\\(.)', r'\1', data)
class ExtractSubstitution(Transformer):
"""Extract a substitution."""
def part(self, content):
assert(len(content) == 1)
content = content[0]
if isinstance(content, Token):
assert content.type.endswith('_RSTRING')
return TextSubstitution(text=replace_escaped_characters(content.value))
return content
single_quoted_part = part
double_quoted_part = part
def value(self, parts):
if len(parts) == 1 and isinstance(parts[0], list):
# Deal with single and double quoted templates
return parts[0]
return parts
single_quoted_value = value
double_quoted_value = value
def arguments(self, values):
if len(values) > 1:
# Deal with tail recursive argument parsing
return [*values[0], values[1]]
return values
single_quoted_arguments = arguments
double_quoted_arguments = arguments
def substitution(self, args):
assert len(args) >= 1
name = args[0]
assert isinstance(name, Token)
assert name.type == 'IDENTIFIER'
return instantiate_substitution(name.value, *args[1:])
single_quoted_substitution = substitution
double_quoted_substitution = substitution
def fragment(self, content):
assert len(content) == 1
content = content[0]
if isinstance(content, Token):
assert content.type.endswith('_STRING')
return TextSubstitution(text=replace_escaped_characters(content.value))
return content
single_quoted_fragment = fragment
double_quoted_fragment = fragment
def template(self, fragments):
return fragments
single_quoted_template = template
double_quoted_template = template
def get_grammar_path():
return os.path.join(
get_package_share_directory('launch'), 'frontend', 'grammar.lark')
_parser = None
def parse_substitution(string_value):
global _parser
if not string_value:
# Grammar cannot deal with zero-width expressions.
return [TextSubstitution(text=string_value)]
if _parser is None:
with open(get_grammar_path(), 'r') as h:
_parser = Lark(h, start='template')
tree = _parser.parse(string_value)
transformer = ExtractSubstitution()
return transformer.transform(tree)
def parse_if_substitutions(
value: StrSomeValueType
) -> NormalizedValueType:
"""
Parse substitutions in `value`, if there are any, and return a normalized value type.
If `value` is a `str`, substitutions will be interpolated in it.
If `value` is any other scalar type, it will be returned as-is.
If `value` is a list, the two rules above will be applied to each item.
When interpolating substitutions in a string, `TextSubstitution` instances are resolved
and the original `str` is left.
:raise: `ValueError` if the result cannot be parsed into a valid type.
"""
data_types = set()
def _parse_if(value):
if isinstance(value, str):
output = parse_substitution(value)
if len(output) == 1 and isinstance(output[0], TextSubstitution):
data_types.add(str)
return output[0].text
return output
data_types.add(type(value))
return value
if isinstance(value, list):
output = [_parse_if(x) for x in value]
else:
output = _parse_if(value)
if len(data_types) > 1:
raise ValueError('The result is a non-uniform list')
return output
|
<gh_stars>1-10
#!/usr/bin/env python
# __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
#
#The xGDS platform is licensed under the Apache License, Version 2.0
#(the "License"); you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0.
#
#Unless required by applicable law or agreed to in writing, software distributed
#under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#CONDITIONS OF ANY KIND, either express or implied. See the License for the
#specific language governing permissions and limitations under the License.
# __END_LICENSE__
import logging
import datetime
import json
from geocamUtil.datetimeJsonEncoder import DatetimeJsonEncoder
from zmq.eventloop import ioloop
ioloop.install()
import gevent
from gevent import socket
from gevent.queue import Queue
from geocamUtil.zmqUtil.publisher import ZmqPublisher
from geocamUtil.zmqUtil.util import zmqLoop
import os
from django.core.cache import caches
from xgds_status_board.util import *
DEFAULT_HOST = '10.10.91.5' # this is for in the field
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 30000 # this is for in the field
DEFAULT_PORT = 50000
DATA_DELIVERY_PROTOCOL = "UDP"
cache = caches['default']
def socketListenTcp(opts, q):
logging.info('constructing socket')
s = socket.socket()
logging.info('connecting to server at host %s port %s',
opts.host, opts.port)
s.connect((opts.host, opts.port))
logging.info('connection established')
buf = ''
while True:
buf += s.recv(4096)
while '\n' in buf:
line, buf = buf.split('\n', 1)
q.put(line)
def socketListenUdp(opts, q):
logging.info('constructing socket')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("", int(opts.port)))
logging.info('Listening for UDP on port %s', opts.port)
logging.info('connection established')
buf = ''
while True:
data, addr = s.recvfrom(1024)
data = data.rstrip()
q.put(data)
def socketListen(opts, q):
if opts.proto == "UDP":
socketListenUdp(opts, q)
if opts.proto == "TCP":
socketListenTcp(opts, q)
def zmqPublish(opts, q):
p = ZmqPublisher(**ZmqPublisher.getOptionValues(opts))
p.start()
for line in q:
msg = '%s:%s:%s:' % (opts.dataTopic, opts.evaNumber, opts.trackName) + line
logging.debug('publishing: %s', msg)
updateStatus(opts.evaNumber)
p.pubStream.send(msg)
def updateStatus(evaNumber):
'''
update the status in memcache so the status board knows we are listening
'''
myKey = "trackListenerEV%s" % str(evaNumber)
status = {'name': myKey,
'displayName': 'Track Listener EV%s' % str(evaNumber),
'statusColor': OKAY_COLOR,
"refreshRate": 1,
'lastUpdated': datetime.datetime.utcnow().isoformat()}
cache.set(myKey, json.dumps(status, cls=DatetimeJsonEncoder))
def evaTrackListener(opts):
q = Queue()
jobs = []
try:
jobs.append(gevent.spawn(socketListen, opts, q))
jobs.append(gevent.spawn(zmqPublish, opts, q))
jobs.append(gevent.spawn(zmqLoop))
timer = ioloop.PeriodicCallback(lambda: gevent.sleep(0.1), 0.1)
timer.start()
gevent.joinall(jobs)
finally:
gevent.killall(jobs)
def main():
import optparse
parser = optparse.OptionParser('usage: %prog')
ZmqPublisher.addOptions(parser, 'tracLinkListener')
parser.add_option('-p', '--port',
default=DEFAULT_PORT,
help='TCP or UDP port where EVA track server listens [%default]')
parser.add_option('-o', '--host',
default=DEFAULT_HOST,
help='TCP host where EVA track server listens [%default]')
parser.add_option('--proto',
default=DATA_DELIVERY_PROTOCOL,
help='UDP or TCP. Use default of UDP in field. [%default]')
parser.add_option('-n', '--evaNumber',
default=1,
help=\
'EVA identifier for multi-EVA ops. e.g. 1,2... [%default]')
parser.add_option('-t', '--trackName',
default="",
help=\
'Track name to store GPS points. If blank will use active flight then EVA #')
parser.add_option('-d', '--dataTopic',
default="gpsposition",
help=\
'ZMQ topic to publish data record under. Compass and GPS are on separate topics')
opts, _args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
if not opts.host:
opts.host = DEFAULT_HOST
print 'host is %s' % opts.host
if not opts.port:
opts.port = DEFAULT_PORT
print 'port is %d' % opts.port
evaTrackListener(opts)
if __name__ == '__main__':
main()
|
<gh_stars>0
import asyncio
import structlog
import argparse
import random
import time
import socket
from hubtraf.user import User, OperationError
from hubtraf.auth.dummy import login_dummy
from functools import partial
async def simulate_user(
hub_url, username, password, delay_seconds,
exec_seconds, code_output=None, port=None, kernel=None):
if code_output is None:
code_output = ("5 * 4", "20")
code, output = code_output
await asyncio.sleep(delay_seconds)
async with User(
username, hub_url, partial(login_dummy, password=password),
port=port, kernel=kernel) as u:
try:
await u.login()
await u.ensure_server()
await u.start_kernel()
await u.assert_code_output(code, output, 5, exec_seconds)
except OperationError:
pass
finally:
try:
if u.state == User.States.KERNEL_STARTED:
await u.stop_kernel()
except OperationError:
# We'll try to sto the server anyway
pass
try:
if u.state == User.States.SERVER_STARTED:
await u.stop_server()
except OperationError:
# Nothing to do
pass
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'hub_url',
help='Hub URL to send traffic to (without a trailing /)'
)
argparser.add_argument(
'user_count',
type=int,
help='Number of users to simulate'
)
argparser.add_argument(
'--user-prefix',
default=socket.gethostname(),
help='Prefix to use when generating user names'
)
argparser.add_argument(
'--user-session-min-runtime',
default=60,
type=int,
help='Min seconds user is active for'
)
argparser.add_argument(
'--user-session-max-runtime',
default=300,
type=int,
help='Max seconds user is active for'
)
argparser.add_argument(
'--user-session-max-start-delay',
default=60,
type=int,
help='Max seconds by which all users should have logged in'
)
argparser.add_argument(
'--port',
default=None,
type=int,
help='Port for jupyterhub server'
)
argparser.add_argument(
'--json',
action='store_true',
help='True if output should be JSON formatted'
)
argparser.add_argument(
'--code',
default="5 * 4",
type=str,
help='Code for users to execute'
)
argparser.add_argument(
'--output',
default="20",
type=str,
help='Expected result of `--code`'
)
argparser.add_argument(
'--kernel',
default=None,
type=str,
help='Kernel to run code with (e.g. bash; defaults to server default)'
)
args = argparser.parse_args()
processors=[structlog.processors.TimeStamper(fmt="ISO")]
if args.json:
processors.append(structlog.processors.JSONRenderer())
else:
processors.append(structlog.dev.ConsoleRenderer())
structlog.configure(processors=processors)
awaits = []
for i in range(args.user_count):
awaits.append(simulate_user(
args.hub_url,
f'{args.user_prefix}-' + str(i),
'hello',
int(random.uniform(0, args.user_session_max_start_delay)),
int(random.uniform(args.user_session_min_runtime, args.user_session_max_runtime)),
code_output=(args.code, args.output),
port=args.port,
kernel=args.kernel
))
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*awaits))
if __name__ == '__main__':
main()
|
import pytz
from settings import GBE_TABLE_FORMAT
from django.db.models import(
CASCADE,
CharField,
ForeignKey,
OneToOneField,
TextField,
URLField,
)
from django.core.exceptions import (
NON_FIELD_ERRORS,
ValidationError,
)
from gbe.models import (
Biddable,
Conference,
Performer,
TechInfo,
)
from gbetext import (
acceptance_states,
act_not_unique,
video_options,
)
from scheduler.idd import get_schedule
class Act (Biddable):
'''
A performance, either scheduled or proposed.
Until approved, an Act is simply a proposal.
'''
performer = ForeignKey(Performer,
on_delete=CASCADE,
related_name='acts',
blank=True,
null=True)
tech = OneToOneField(TechInfo, on_delete=CASCADE, blank=True)
video_link = URLField(blank=True)
video_choice = CharField(max_length=2,
choices=video_options,
blank=True)
shows_preferences = TextField(blank=True)
other_performance = TextField(blank=True)
why_you = TextField(blank=True)
def clone(self):
act = Act(
performer=self.performer,
tech=self.tech.clone(),
video_link=self.video_link,
video_choice=self.video_link,
other_performance=self.other_performance,
why_you=self.why_you,
b_title=self.b_title,
b_description=self.b_description,
submitted=False,
accepted=False,
b_conference=Conference.objects.filter(
status="upcoming").first()
)
act.save()
return act
def get_performer_profiles(self):
'''
Gets all of the performers involved in the act.
'''
return self.performer.get_profiles()
@property
def bid_review_header(self):
return (['Performer',
'Act Title',
'Last Update',
'State',
'Show', ])
@property
def bid_review_summary(self):
castings = ""
cast_shows = []
for item in get_schedule(commitment=self,
roles=["Performer", "Waitlisted"]
).schedule_items:
if item.event.event_type_name == "Show" and (
item.event.eventitem.pk not in cast_shows):
if len(castings) > 0:
castings += ", %s" % str(item.event.eventitem)
else:
castings += str(item.event.eventitem)
if item.commitment.role and len(item.commitment.role) > 0:
castings += ' - %s' % item.commitment.role
cast_shows += [item.event.eventitem.pk]
return [self.performer.name,
self.b_title,
self.updated_at.strftime(GBE_TABLE_FORMAT),
acceptance_states[self.accepted][1],
castings]
@property
def is_complete(self):
if self.tech.is_complete:
if self.tech.confirm_no_rehearsal:
return True
for item in get_schedule(commitment=self).schedule_items:
if item.event.event_type_name == 'GenericEvent':
return True
return False
def validate_unique(self, *args, **kwargs):
# conference, title and performer contact should all be unique before
# the act is saved.
super(Act, self).validate_unique(*args, **kwargs)
if self.performer is None or not self.performer.contact:
raise ValidationError({'performer': "Performer is not valid"})
if Act.objects.filter(
b_conference=self.b_conference,
b_title=self.b_title,
performer__contact=self.performer.contact
).exclude(pk=self.pk).exists():
raise ValidationError({
NON_FIELD_ERRORS: [act_not_unique, ]
})
@property
def profile(self):
return self.performer.contact
class Meta:
app_label = "gbe"
permissions = [
("assign_act",
"Coordinate acts - assign status, book, and create for others"),
("review_act", "Can read other's acts and create reviews."),
]
|
<reponame>thepabloaguilar/argocd-client
# coding: utf-8
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argocd_client.configuration import Configuration
class SessionGetUserInfoResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'groups': 'list[str]',
'iss': 'str',
'logged_in': 'bool',
'username': 'str'
}
attribute_map = {
'groups': 'groups',
'iss': 'iss',
'logged_in': 'loggedIn',
'username': 'username'
}
def __init__(self, groups=None, iss=None, logged_in=None, username=None, local_vars_configuration=None): # noqa: E501
"""SessionGetUserInfoResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._groups = None
self._iss = None
self._logged_in = None
self._username = None
self.discriminator = None
if groups is not None:
self.groups = groups
if iss is not None:
self.iss = iss
if logged_in is not None:
self.logged_in = logged_in
if username is not None:
self.username = username
@property
def groups(self):
"""Gets the groups of this SessionGetUserInfoResponse. # noqa: E501
:return: The groups of this SessionGetUserInfoResponse. # noqa: E501
:rtype: list[str]
"""
return self._groups
@groups.setter
def groups(self, groups):
"""Sets the groups of this SessionGetUserInfoResponse.
:param groups: The groups of this SessionGetUserInfoResponse. # noqa: E501
:type: list[str]
"""
self._groups = groups
@property
def iss(self):
"""Gets the iss of this SessionGetUserInfoResponse. # noqa: E501
:return: The iss of this SessionGetUserInfoResponse. # noqa: E501
:rtype: str
"""
return self._iss
@iss.setter
def iss(self, iss):
"""Sets the iss of this SessionGetUserInfoResponse.
:param iss: The iss of this SessionGetUserInfoResponse. # noqa: E501
:type: str
"""
self._iss = iss
@property
def logged_in(self):
"""Gets the logged_in of this SessionGetUserInfoResponse. # noqa: E501
:return: The logged_in of this SessionGetUserInfoResponse. # noqa: E501
:rtype: bool
"""
return self._logged_in
@logged_in.setter
def logged_in(self, logged_in):
"""Sets the logged_in of this SessionGetUserInfoResponse.
:param logged_in: The logged_in of this SessionGetUserInfoResponse. # noqa: E501
:type: bool
"""
self._logged_in = logged_in
@property
def username(self):
"""Gets the username of this SessionGetUserInfoResponse. # noqa: E501
:return: The username of this SessionGetUserInfoResponse. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this SessionGetUserInfoResponse.
:param username: The username of this SessionGetUserInfoResponse. # noqa: E501
:type: str
"""
self._username = username
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SessionGetUserInfoResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SessionGetUserInfoResponse):
return True
return self.to_dict() != other.to_dict()
|
'''
Compares folders of clean and dirty audio
Calculates STOI, PESQ (tbd) and MSS
'''
import argparse
import pathlib
import typing
from typing import Union
from dataclasses import dataclass, fields
import logging
import csv
from tqdm import tqdm # type: ignore
import soundfile # type: ignore
from mir_eval.separation import bss_eval_sources # type: ignore
from pystoi.stoi import stoi # type: ignore
@dataclass
class Metrics:
""" dataclass for metrics for each file
"""
sdr: float
sir: float
sar: float
stoi: float
def evaluate_metrics(dirty_file: Union[str, typing.BinaryIO],
clean_file: Union[str, typing.BinaryIO]) -> Metrics:
"""Evaluate metrics for a dirty/clean file pair
Args:
dirty_file (Union[str, typing.BinaryIO]): A dirty/noisy audio file
clean_file (Union[str, typing.BinaryIO]): The original clean file
Raises:
ValueError: If sample rates do not match
Returns:
Metrics: A Metrics dataclass
"""
dirty, dirty_fs = soundfile.read(dirty_file)
clean, clean_fs = soundfile.read(clean_file)
if dirty_fs != clean_fs:
raise ValueError("Files have different sample rates!")
if (dirty.ndim > 1) or (clean.ndim > 1):
raise ValueError("Files are not mono!")
# HACK: Reduce length
if len(dirty) > len(clean):
logging.warning("File %s is different length %d from clean file %d",
dirty_file, len(dirty), len(clean))
dirty = dirty[:len(clean)]
elif len(clean) > len(dirty):
logging.warning("File %s is different length %d from clean file %d",
dirty_file, len(dirty), len(clean))
clean = clean[:len(dirty)]
# Calculate STOI, original version
d = stoi(clean, dirty, dirty_fs, extended=False)
# Calculate BSS statistics
# Use compute_permutation as this is what bss did
[sdr, sir, sar, _] = bss_eval_sources(
clean, dirty, compute_permutation=True)
# Flatten out from numpy array as mono so single element
sdr = sdr.item()
sir = sir.item()
sar = sar.item()
# Return value
return Metrics(sdr=sdr, sir=sir, sar=sar, stoi=d)
def matching_clean_file(dirty_filename, clean_folder: Union[str, pathlib.Path])\
-> pathlib.Path:
""" Find matching clean filepath given dirty filename/path
Args:
dirty_filename (Union[str, pathlib.Path]): Name of the dirty file
clean_folder (Union[str,pathlib.path]): The folder of clean files
Returns:
pathlib.Path: Path to the clean
"""
# Cast to Path immediately
dirty_filename = pathlib.Path(dirty_filename)
clean_folder = pathlib.Path(clean_folder)
# Get first part of name, e.g. "/workspace/dirty/abc.n2.wav" -> "abc"
name = dirty_filename.stem.split('.')[0]
# Find wav file in clean folder matching
clean_file = clean_folder.glob(f"{name}.wav")
clean_file = list(clean_file) # Convert to list from generator
if len(clean_file) != 1:
raise FileNotFoundError(
f"Could not find single {name} in {clean_folder}")
return clean_file[0]
def compare_folder(clean_folder, dirty_folder, print_progress=False) -> typing.Dict[str, Metrics]:
""" Calculates metrics for a folder of clean and dirty audio
Args:
clean_folder ([type]): A folder (path or string) of the original audio
dirty_folder ([type]): A folder of the dirty/recovered audio to compare
Returns:
typing.Dict[str,Metrics]: A dictionary of clean filename : Metrics
"""
clean_folder = pathlib.Path(clean_folder)
dirty_folder = pathlib.Path(dirty_folder)
# Find all wav files in the folder. List so get len() for progress
dirty_files = list(dirty_folder.glob("*.wav"))
metrics = dict()
for dirty_file in tqdm(dirty_files, unit='files',disable=not print_progress):
clean_file = matching_clean_file(dirty_file, clean_folder)
file_metrics = evaluate_metrics(str(dirty_file), str(clean_file))
name = dirty_file.name # Name excluding folder etc
metrics[name] = file_metrics
return metrics
def write_metrics(metrics_dict: typing.Dict[str, Metrics],
metrics_file: Union[str, pathlib.Path]) -> None:
""" Write a dictionary of metrics to a specified file path """
with open(metrics_file, 'w', newline='') as csvfile:
# Generate header from dataclass key names
metrics_fields = [field.name for field in fields(Metrics)]
fieldnames = ['name'] + metrics_fields
# Create DictWriter and write header
writer = csv.DictWriter(csvfile, fieldnames, dialect='excel')
writer.writeheader()
for name, m in metrics_dict.items():
row = {'name': name}
row.update(m.__dict__) # Convert metrics to dict and add
writer.writerow(row)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--clean_dir', type=str, required=True)
parser.add_argument('--dirty_dir', type=str, required=True)
parser.add_argument('--output_file', type=str,
required=False, default="metrics.csv")
parser.add_argument('--show_names', action='store_true', default=False)
parser.add_argument(
'-d', '--debug',
help="Show all debug information",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
'-q', '--quiet',
help="Show only errors",
action="store_const", dest="loglevel", const=logging.ERROR,
)
args = parser.parse_args()
# Set logging verbosity
logging.basicConfig(level=args.loglevel)
# Check directories exist
if not pathlib.Path(args.clean_dir).is_dir():
raise FileNotFoundError("--clean_dir must be valid directory")
if not pathlib.Path(args.dirty_dir).is_dir():
raise FileNotFoundError("--dirty_dir must be valid directory")
# Calculate metrics
metrics = compare_folder(args.clean_dir, args.dirty_dir,
print_progress=True)
# Output to file
write_metrics(metrics, args.output_file)
|
import os
import random
import pickle
import pytrec_eval
from eval.eval_bm25_coliee2021 import read_label_file
from analysis.ttest import measure_per_query
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import matplotlib
from analysis.compare_bm25_dpr import read_in_run_from_pickle, remove_query_from_ranked_list
from analysis.diff_bm25_dpr import first_diff_analysis, write_diff_cases
def ranking_eval(qrels, run, output_dir, measurements, output_file= 'eval_bm25_aggregate_overlap.txt'):
# trec eval
evaluator = pytrec_eval.RelevanceEvaluator(qrels, measurements)
#{'recall_1', 'recall_2', 'recall_3', 'recall_4', 'recall_5', 'recall_6', 'recall_7', 'recall_8',
#'recall_9', 'recall_10','recall_11', 'recall_12', 'recall_13', 'recall_14', 'recall_15', 'recall_16', 'recall_17', 'recall_18',
#'recall_19', 'recall_20','P_1', 'P_2', 'P_3', 'P_4', 'P_5', 'P_6', 'P_7', 'P_8', 'P_9', 'P_10',
#'P_11', 'P_12', 'P_13', 'P_14', 'P_15', 'P_16', 'P_17', 'P_18', 'P_19', 'P_20'}) # {'recall_100', 'recall_200', 'recall_300', 'recall_500', 'recall_1000'})
results = evaluator.evaluate(run)
def print_line(measure, scope, value):
print('{:25s}{:8s}{:.4f}'.format(measure, scope, value))
def write_line(measure, scope, value):
return '{:25s}{:8s}{:.4f}'.format(measure, scope, value)
per_query = {}
for query_id, query_measures in sorted(results.items()):
for measure, value in sorted(query_measures.items()):
if per_query.get(query_id):
per_query.get(query_id).update({measure : value})
else:
per_query.update({query_id:{}})
per_query.get(query_id).update({measure: value})
#print_line(measure, query_id, value)
#for measure in sorted(query_measures.keys()):
# print_line(
# measure,
# 'all',
# pytrec_eval.compute_aggregated_measure(
# measure,
# [query_measures[measure]
# for query_measures in results.values()]))
with open(os.path.join(output_dir, output_file), 'w') as output:
for measure in sorted(query_measures.keys()):
output.write(write_line(
measure,
'all',
pytrec_eval.compute_aggregated_measure(
measure,
[query_measures[measure]
for query_measures in results.values()])) + '\n')
return per_query
def get_diff_per_query(per_query_baseline, per_query_legalbert_doc, measure):
diff_per_query = {}
for query, measurements in per_query_legalbert_doc.items():
diff_per_query.update({query: (measurements.get(measure) - per_query_baseline.get(query).get(measure))})
return diff_per_query
def get_performance_per_query(per_query_baseline, measure):
diff_per_query = {}
for query, measurements in per_query_baseline.items():
diff_per_query.update({query: measurements.get(measure)})
return diff_per_query
def plot_wins_losses(diff_legalbert_doc_r, diff_legalbert_doc_p, measure1, measure2, output_dir, m1_min=-1, m1_max=1, m1_step=1, m2_min=-0.05, m2_max=0.05, m2_step=0.05, color='purple', sort='sort_recall'):
# now plot them
x = np.array(list(range(len(diff_legalbert_doc_r.keys()))))
custom_params = {"axes.spines.right": False, "axes.spines.top": False}
sns.set_theme(style="ticks", rc=custom_params)
# The below code will create two plots. The parameters that .subplot take are (row, column, no. of plots).
plt.subplot(2, 1, 1)
#plt.tight_layout()
# This will create the bar graph for population
#sns.set_style('darkgrid')
#sns.set_style("whitegrid")
custom_params = {"axes.spines.right": False, "axes.spines.top": False}
sns.set_theme(style="ticks", rc=custom_params)
ax = sns.barplot(x, np.array(list(diff_legalbert_doc_r.values())),color=color, edgecolor = color)
#bar1 = plt.bar(x, diff_legalbert_doc_r.values(),color='purple', edgecolor = 'purple')
#ax = plt.gca()
plt.ylim(m1_min, m1_max)
#ax.set_facecolor('white')
ax.yaxis.set_ticks(np.arange(m1_min, m1_max+m1_step/2, m1_step))
plt.ylabel('{}'.format(measure1))
plt.xticks([], [])
ax.axhline(y=0, color='black')
ax.spines['bottom'].set_color('none')
plt.subplots_adjust(left=0.15) #, right=0.96, top=0.96)
# The below code will create the second plot.
plt.subplot(2, 1, 2)
# This will create the bar graph for gdp i.e gdppercapita divided by population.
#sns.set_style('darkgrid')
#sns.set_style("whitegrid")
custom_params = {"axes.spines.right": False, "axes.spines.top": False}
sns.set_theme(style="ticks", rc=custom_params)
ax = sns.barplot(x, np.array(list(diff_legalbert_doc_p.values())),color=color, edgecolor = color)
#bar2 = plt.bar(x, diff_legalbert_doc_p.values(),color='purple', edgecolor = 'purple')
#ax = plt.gca()
ax.set_ylim([m2_min, m2_max])
#plt.ylim(-0.05, 0.05)
ax.yaxis.set_ticks(np.arange(m2_min, m2_max+m2_step/2, m2_step))
ax.axhline(y=0, color='black')
ax.spines['bottom'].set_color('none')
#ax.set_facecolor('white')
plt.ylabel('{}'.format(measure2))
plt.xticks([], [])
if sort=='sort_bm25':
plt.xlabel('easy -> hard (by BM25 {})'.format(measure1))
elif sort=='sort_recall':
plt.xlabel('sorted by {}'.format(measure1))
else:
plt.xlabel(sort)
plt.subplots_adjust(left=0.15)
plt.savefig(os.path.join(output_dir, '{}_{}_wins_losses_{}.svg'.format(measure1, measure2, sort)))
plt.figure().clear()
plt.close()
plt.cla()
plt.clf()
def sort_diff(diff_legalbert_doc_r, diff_legalbert_doc_p, per_query_baseline, sort):
if sort == 'sort_bm25':
sort_baseline = get_performance_per_query(per_query_baseline, measure1)
sort_baseline = {k: v for k, v in sorted(sort_baseline.items(), key=lambda item: item[1], reverse=True)}
# now sort by recall of bm25!
diff_legalbert_doc_r = {k: v for k, v in sorted(diff_legalbert_doc_r.items(),
key=lambda pair: list(sort_baseline.keys()).index(pair[0]))}
print(diff_legalbert_doc_r)
# sort the same way as diff_legalbert_doc
diff_legalbert_doc_p = {k: v for k, v in sorted(diff_legalbert_doc_p.items(),
key=lambda pair: list(diff_legalbert_doc_r.keys()).index(pair[0]))}
print(diff_legalbert_doc_p)
else:
# sort by recall increasing order
diff_legalbert_doc_r = {k: v for k, v in sorted(diff_legalbert_doc_r.items(), key=lambda item: item[1])}
# sort the same way as diff_legalbert_doc
diff_legalbert_doc_p = {k: v for k, v in sorted(diff_legalbert_doc_p.items(), key=lambda pair: list(diff_legalbert_doc_r.keys()).index(pair[0]))}
return diff_legalbert_doc_r, diff_legalbert_doc_p
if __name__ == "__main__":
# coliee data
# load bm25 runs
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/bm25/aggregate/test/separately_para_w_summ_intro/'
with open(os.path.join(output_dir, 'run_bm25_aggregated_test_whole_doc_overlap_docs.pickle'), 'rb') as f:
run_bm25_doc = pickle.load(f)
with open(os.path.join(output_dir, 'run_aggregated_test_rrf_overlap_ranks.pickle'), 'rb') as f:
run_bm25_parm = pickle.load(f)
# load bert para based runs
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/dpr/legal_task2/bert/aggregate/test/'
with open(os.path.join(output_dir, 'run_aggregated_test_separate_para_rrf.pickle'), 'rb') as f:
run_bert_parm_rrf = pickle.load(f)
with open(os.path.join(output_dir, 'run_aggregated_test_vrrf.pickle'), 'rb') as f:
run_bert_parm_vrrf = pickle.load(f)
# load legalbert para based runs
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/dpr/legal_task2/legalbert/aggregate/test/'
with open(os.path.join(output_dir, 'run_aggregated_test_rrf_overlap_ranks.pickle'), 'rb') as f:
run_legbert_para_rrf = pickle.load(f)
with open(os.path.join(output_dir, 'run_aggregated_test_vrrf.pickle'), 'rb') as f:
run_legbert_para_vrrf = pickle.load(f)
# load legalbert doc based runs
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/dpr/legal_task1/legalbert/eval/test/'
with open(os.path.join(output_dir, 'run_aggregated_test_separate_para_rrf.pickle'), 'rb') as f:
run_legbert_doc_rrf = pickle.load(f)
with open(os.path.join(output_dir, 'run_aggregated_test_vrrf_legalbert_doc.pickle'), 'rb') as f:
run_legbert_doc_vrrf = pickle.load(f)
qrels = read_label_file(
'/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/test/task1_test_labels_2021.json')
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/analysis_low_precision'
# 1. evaluate ndcg@100/500/1k
measurements = {'recall_100', 'recall_200', 'recall_300', 'recall_500', 'recall_1000', 'ndcg_cut_10', 'ndcg_cut_100', 'ndcg_cut_500', 'ndcg_cut_1000', 'P_10', 'P_100', 'P_500', 'P_1000'}
#measurements = {'recall_1', 'recall_2', 'recall_3', 'recall_4', 'recall_5', 'recall_6', 'recall_7', 'recall_8',
#'recall_9', 'recall_10','recall_11', 'recall_12', 'recall_13', 'recall_14', 'recall_15', 'recall_16', 'recall_17', 'recall_18',
#'recall_19', 'recall_20','P_1', 'P_2', 'P_3', 'P_4', 'P_5', 'P_6', 'P_7', 'P_8', 'P_9', 'P_10',
#'P_11', 'P_12', 'P_13', 'P_14', 'P_15', 'P_16', 'P_17', 'P_18', 'P_19', 'P_20'}
per_query = ranking_eval(qrels, run_bm25_parm, output_dir, measurements, 'eval_bm25_parm_ncdg.txt')
# 2. compare recall, precision @500 per query, and then also the delta r, delta p to baseline (bm25 doc) per query
# is there a correlation
per_query_baseline = ranking_eval(qrels, run_bm25_doc, output_dir, measurements, 'eval_bm25_whole_doc_ncdg.txt')
per_query_legalbert_doc = ranking_eval(qrels, run_legbert_doc_vrrf, output_dir, measurements,
'eval_legalbert_doc_vrrf_ncdg.txt')
per_query_legalbert_para = ranking_eval(qrels, run_legbert_para_vrrf, output_dir, measurements,
'eval_legalbert_para_vrrf_ncdg.txt')
per_query_bm25_parm = ranking_eval(qrels, run_bm25_parm, output_dir, measurements, 'eval_bm25_parm_ncdg.txt')
# use for comparion of bm25 doc and bm25 parm rrf
#per_query_legalbert_doc = per_query_bm25_parm
n = 1000
sort = 'sort_recall'
# difference plots
measure1 = 'recall_{}'.format(n)
diff_legalbert_doc_r = get_diff_per_query(per_query_baseline, per_query_legalbert_doc, measure1)
measure2 = 'P_{}'.format(n)
diff_legalbert_doc_p = get_diff_per_query(per_query_baseline, per_query_legalbert_doc, measure2)
diff_legalbert_doc_r, diff_legalbert_doc_p = sort_diff(diff_legalbert_doc_r, diff_legalbert_doc_p, per_query_baseline, sort)
if n==100:
color = 'violet'
elif n==500:
color = 'purple'
elif n==1000:
color = 'indigo'
plot_wins_losses(diff_legalbert_doc_r, diff_legalbert_doc_p, measure1, measure2, output_dir, -1, 1, 1, -0.05, 0.05, 0.05, color, sort)
# now plots where you have the bm25 performance vs legalbertdoc performance in terms of 1 measure
n = 100
one_measure = 'P_{}'.format(n)
if n==100:
color = 'violet'
elif n==500:
color = 'purple'
elif n==1000:
color = 'indigo'
elif n==10:
color = 'pink'
# get the performance
per_query_baseline_measure = get_performance_per_query(per_query_baseline, one_measure)
per_query_legalbert_doc_measure = get_performance_per_query(per_query_legalbert_doc, one_measure)
# sorted after baseline
per_query_baseline_measure, per_query_legalbert_doc_measure = sort_diff(per_query_baseline_measure, per_query_legalbert_doc_measure, per_query_baseline, 'sort_recall')
plot_wins_losses(per_query_baseline_measure, per_query_legalbert_doc_measure, 'BM25', 'LegalBERT Doc', output_dir, -0.5, 0.5, 0.5, -0.5, 0.5, 0.5,
color, one_measure)
#
# now qualitative analysis of the biggest wins and losses of the cases, write the cases in an ouptut folder
#
dpr_dict_doc = remove_query_from_ranked_list(run_legbert_doc_vrrf)
bm25_dict_doc = remove_query_from_ranked_list(run_bm25_doc)
# legalbert doc vs bm25 doc
dpr_dict_doc_rel, bm25_dict_doc_rel, query_diff_doc, query_diff_length_doc = first_diff_analysis(dpr_dict_doc,
bm25_dict_doc,
qrels)
# then i only want to write the cases where the wins/losses are high!
corpus_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/corpus'
pickle_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/pickle_files'
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task1/analysis_low_precision/qual/output/legalbert_doc'
write_diff_cases(query_diff_length_doc, query_diff_doc, pickle_dir, output_dir, corpus_dir)
# table: per query: recall, precision@n and delta recall and delta precision@n, analyze in pandas dataframe? |
<gh_stars>0
'''
Tic-tac-toe is played by two players A and B on a 3 x 3 grid.
Here are the rules of Tic-Tac-Toe:
Players take turns placing characters into empty squares (" ").
The first player A always places "X" characters, while the second player B always places "O" characters.
"X" and "O" characters are always placed into empty squares, never on filled ones.
The game ends when there are 3 of the same (non-empty) character filling any row, column, or diagonal.
The game also ends if all squares are non-empty.
No more moves can be played if the game is over.
Given an array moves where each element is another array of size 2 corresponding to the row and column of the grid where they mark their respective character in the order in which A and B play.
Return the winner of the game if it exists (A or B), in case the game ends in a draw return "Draw", if there are still movements to play return "Pending".
You can assume that moves is valid (It follows the rules of Tic-Tac-Toe), the grid is initially empty and A will play first.
'''
#I spent 4h doing it, cuz did not know how to implement with helper functions - but eventually i did it!
#feel proud!
class Solution:
def tictactoe(self, moves: List[List[int]]) -> str:
def check_empty(self,grid):
for i in range(3):
for j in range(3):
if grid[i][j] == '':
return False
return True
def check_diag(self,grid):
diag1 = [grid[0][0], grid[1][1], grid[2][2]]
if diag1 == ['X', 'X', 'X'] :
#print('diag1')
print('player a wins ')
return 'A'
if diag1 == ['0', '0', '0']:
#print('diag1')
#print('player b wins')
return 'B'
diag2 = [grid[0][2], grid[1][1], grid[2][0] ]
if diag2 == ['X', 'X', 'X']:
#print('diag2')
print('player a wins')
return 'A'
if diag2 == ['0', '0', '0']:
#print('diag2')
#print(' player b wins')
return 'B'
#print('no diag detected')
return 'c'
def check_rows(self,grid):
for i in range(3):
row = grid[i]
if row == ['X', 'X', 'X']:
#print('row X %d player a win' % i)
return 'A'
elif row == ['0', '0', '0']:
#print('row o %d player b win' % i)
return 'B'
#print('no win row detected')
return 'c'
def check_columns(self,grid):
for i in range(3):
x = 0
y = 0
for j in range(3):
if grid[j][i] == 'X':
x+=1
elif grid[j][i] == '0':
y+=1
if x == 3:
#print('column X win detected')
return 'A'
elif y == 3:
#print('column O win detected')
return 'B'
#print('no win columns')
return 'c'
def check_all(self,grid):
cols = check_columns(self,grid)
rows = check_rows(self,grid)
diags = check_diag(self,grid)
if cols != 'c':
#print('cols not win')
return cols
elif rows != 'c':
#print('rows not win')
return rows
elif diags!= 'c':
#print('diags not win')
return diags
def check_draw_pending(self,grid):
if check_empty(self,grid):
print('Draw')
return 'Draw'
else:
print('Pending')
return 'Pending'
def print_grid(self,grid):
#print('--------------------------------------------')
for i in range(3):
for j in range(3):
print(grid[i][j], end = ' ')
print()
grid = []
for i in range(3):
grid.append([''] * 3)
a_player = True
for move in moves:
x = move[0]
y = move[1]
if grid[x][y] == '':
if a_player:
grid[x][y] = 'X'
a_player = False
if check_all(self,grid) == 'A':
#print('A')
#print_grid(grid)
return 'A'
elif not a_player:
grid[x][y] = '0'
a_player = True
if check_all(self,grid) == 'B':
#print('B')
#print_grid(grid)
return 'B'
#print_grid(grid)
#print('checking draw pending')
#print_grid(grid)
return check_draw_pending(self,grid)
'''
def helper(self, x):
t = [x]*3
print(t)
return t
helper(self,moves[0][0])
'''
|
import numpy as np
import scipy.sparse as sp
import warnings
import properties
from .base import BaseRegularization, BaseComboRegularization
from .. import Utils
class BaseSparse(BaseRegularization):
"""
Base class for building up the components of the Sparse Regularization
"""
def __init__(self, mesh, **kwargs):
self._stashedR = None
super(BaseSparse, self).__init__(mesh=mesh, **kwargs)
model = properties.Array(
"current model", dtype=float
)
epsilon = properties.Float(
"Threshold value for the model norm", default=1e-3,
required=True
)
norm = properties.Array(
"norm used", dtype=float
)
space = properties.String(
"By default inherit the objctive", default='linear'
)
gradientType = properties.String(
"type of gradient", default='components'
)
scale = properties.Array(
"General nob for scaling", dtype=float,
)
# Give the option to scale or not
scaledIRLS = properties.Bool(
"Scale the gradients of the IRLS norms",
default=True
)
@properties.validator('scale')
def _validate_scale(self, change):
if change['value'] is not None:
# todo: residual size? we need to know the expected end shape
if self._nC_residual != '*':
assert len(change['value']) == self._nC_residual, (
'scale must be length {} not {}'.format(
self._nC_residual, len(change['value'])
)
)
@property
def stashedR(self):
return self._stashedR
@stashedR.setter
def stashedR(self, value):
self._stashedR = value
class SparseSmall(BaseSparse):
"""
Sparse smallness regularization
**Inputs**
:param int norm: norm on the smallness
"""
_multiplier_pair = 'alpha_s'
def __init__(self, mesh, **kwargs):
super(SparseSmall, self).__init__(
mesh=mesh, **kwargs
)
# Give the option to scale or not
scaledIRLS = properties.Bool(
"Scale the gradients of the IRLS norms",
default=True
)
@property
def f_m(self):
return self.mapping * self._delta_m(self.model)
@property
def W(self):
if getattr(self, 'model', None) is None:
R = Utils.speye(self.mapping.shape[0])
else:
r = self.R(self.f_m)
R = Utils.sdiag(r)
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.cell_weights is not None:
return Utils.sdiag((self.scale *
self.cell_weights)**0.5) * R
else:
return Utils.sdiag((self.scale * self.regmesh.vol)**0.5) * R
def R(self, f_m):
# if R is stashed, return that instead
if getattr(self, 'stashedR') is not None:
return self.stashedR
# Default
eta = np.ones_like(f_m)
if self.scaledIRLS:
# Eta scaling is important for mix-norms...do not mess with it
# Scale on l2-norm gradient: f_m.max()
maxVal = np.ones_like(f_m) * np.abs(f_m).max()
# Compute theoritical maximum gradients for p < 1
maxVal[self.norm < 1] = (
self.epsilon / np.sqrt(1.-self.norm[self.norm < 1])
)
maxGrad = (
maxVal /
(maxVal**2. + self.epsilon**2.)**(1.-self.norm/2.)
)
# Scaling factor
eta[maxGrad != 0] = np.abs(f_m).max()/maxGrad[maxGrad != 0]
# Scaled IRLS weights
r = (eta / (f_m**2. + self.epsilon**2.)**(1.-self.norm/2.))**0.5
self.stashedR = r # stash on the first calculation
return r
@Utils.timeIt
def deriv(self, m):
"""
The regularization is:
.. math::
R(m) = \\frac{1}{2}\mathbf{(m-m_\\text{ref})^\\top W^\\top
W(m-m_\\text{ref})}
So the derivative is straight forward:
.. math::
R(m) = \mathbf{W^\\top W (m-m_\\text{ref})}
"""
mD = self.mapping.deriv(self._delta_m(m))
r = self.W * (self.mapping * (self._delta_m(m)))
return mD.T * (self.W.T * r)
class SparseDeriv(BaseSparse):
"""
Base Class for sparse regularization on first spatial derivatives
"""
def __init__(self, mesh, orientation='x', **kwargs):
self.orientation = orientation
super(SparseDeriv, self).__init__(mesh=mesh, **kwargs)
mrefInSmooth = properties.Bool(
"include mref in the smoothness calculation?", default=False
)
# Give the option to scale or not
scaledIRLS = properties.Bool(
"Scale the gradients of the IRLS norms",
default=True
)
@Utils.timeIt
def __call__(self, m):
"""
We use a weighted 2-norm objective function
.. math::
r(m) = \\frac{1}{2}
"""
if self.mrefInSmooth:
f_m = self._delta_m(m)
else:
f_m = m
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.space == 'spherical':
Ave = getattr(self.regmesh, 'aveCC2F{}'.format(self.orientation))
if getattr(self, 'model', None) is None:
R = Utils.speye(self.cellDiffStencil.shape[0])
else:
r = self.R(self.f_m)
R = Utils.sdiag(r)
if self.cell_weights is not None:
W = (
Utils.sdiag(
(Ave*(self.scale * self.cell_weights))**0.5
) *
R
)
else:
W = Utils.sdiag(
(Ave * (self.scale * self.regmesh.vol))**0.5
) * R
theta = self.cellDiffStencil * (self.mapping * f_m)
dmdx = Utils.matutils.coterminal(theta)
r = W * dmdx
else:
r = self.W * (self.mapping * f_m)
return 0.5 * r.dot(r)
def R(self, f_m):
# if R is stashed, return that instead
if getattr(self, 'stashedR') is not None:
return self.stashedR
# Default
eta = np.ones_like(f_m)
if self.scaledIRLS:
# Eta scaling is important for mix-norms...do not mess with it
# Scale on l2-norm gradient: f_m.max()
maxVal = np.ones_like(f_m) * np.abs(f_m).max()
# Compute theoritical maximum gradients for p < 1
maxVal[self.norm < 1] = (
self.epsilon / np.sqrt(1.-self.norm[self.norm < 1])
)
maxGrad = (
maxVal /
(maxVal**2. + self.epsilon**2.)**(1.-self.norm/2.)
)
# Scaling Factor
eta[maxGrad != 0] = np.abs(f_m).max()/maxGrad[maxGrad != 0]
# Scaled-IRLS weights
r = (eta / (f_m**2. + self.epsilon**2.)**(1.-self.norm/2.))**0.5
self.stashedR = r # stash on the first calculation
return r
@Utils.timeIt
def deriv(self, m):
"""
The regularization is:
.. math::
R(m) = \\frac{1}{2}\mathbf{(m-m_\\text{ref})^\\top W^\\top
W(m-m_\\text{ref})}
So the derivative is straight forward:
.. math::
R(m) = \mathbf{W^\\top W (m-m_\\text{ref})}
"""
if self.mrefInSmooth:
model = self._delta_m(m)
else:
model = m
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.space == 'spherical':
Ave = getattr(self.regmesh, 'aveCC2F{}'.format(self.orientation))
if getattr(self, 'model', None) is None:
R = Utils.speye(self.cellDiffStencil.shape[0])
else:
r = self.R(self.f_m)
R = Utils.sdiag(r)
if self.cell_weights is not None:
W = (
Utils.sdiag(
((Ave * (self.scale * self.cell_weights)))**0.5
) *
R
)
else:
W = Utils.sdiag(
(Ave * (self.scale * self.regmesh.vol))**0.5
) * R
theta = self.cellDiffStencil * (self.mapping * model)
dmdx = Utils.matutils.coterminal(theta)
r = W * dmdx
else:
r = self.W * (self.mapping * model)
mD = self.mapping.deriv(model)
return mD.T * (self.W.T * r)
@property
def _multiplier_pair(self):
return 'alpha_{orientation}'.format(orientation=self.orientation)
@property
def f_m(self):
if self.mrefInSmooth:
f_m = self._delta_m(self.model)
else:
f_m = self.model
if self.space == 'spherical':
theta = self.cellDiffStencil * (self.mapping * f_m)
dmdx = Utils.matutils.coterminal(theta)
else:
if self.gradientType == 'total':
Ave = getattr(
self.regmesh,
'aveCC2F{}'.format(self.orientation)
)
dmdx = np.abs(self.regmesh.aveFx2CC *
self.regmesh.cellDiffxStencil *
(self.mapping * f_m)
)
if self.regmesh.dim > 1:
dmdx += np.abs(self.regmesh.aveFy2CC *
self.regmesh.cellDiffyStencil *
(self.mapping * f_m)
)
if self.regmesh.dim > 2:
dmdx += np.abs(self.regmesh.aveFz2CC *
self.regmesh.cellDiffzStencil *
(self.mapping * f_m)
)
dmdx = Ave * dmdx
else:
dmdx = self.cellDiffStencil * (self.mapping * f_m)
return dmdx
@property
def cellDiffStencil(self):
return getattr(
self.regmesh, 'cellDiff{}Stencil'.format(self.orientation)
)
@property
def W(self):
Ave = getattr(self.regmesh, 'aveCC2F{}'.format(self.orientation))
if getattr(self, 'model', None) is None:
R = Utils.speye(self.cellDiffStencil.shape[0])
else:
r = self.R(self.f_m)
R = Utils.sdiag(r)
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.cell_weights is not None:
return (
Utils.sdiag(
(Ave*(self.scale * self.cell_weights))**0.5
) *
R * self.cellDiffStencil
)
else:
return Utils.sdiag(
(Ave*(self.scale * self.regmesh.vol))**0.5
) * R * self.cellDiffStencil
class Sparse(BaseComboRegularization):
"""
The regularization is:
.. math::
R(m) = \\frac{1}{2}\mathbf{(m-m_\\text{ref})^\\top W^\\top R^\\top R
W(m-m_\\text{ref})}
where the IRLS weight
.. math::
R = \eta TO FINISH LATER!!!
So the derivative is straight forward:
.. math::
R(m) = \mathbf{W^\\top R^\\top R W (m-m_\\text{ref})}
The IRLS weights are recomputed after each beta solves.
It is strongly recommended to do a few Gauss-Newton iterations
before updating.
"""
def __init__(
self, mesh,
alpha_s=1.0, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0,
**kwargs
):
objfcts = [
SparseSmall(mesh=mesh, **kwargs),
SparseDeriv(mesh=mesh, orientation='x', **kwargs)
]
if mesh.dim > 1:
objfcts.append(SparseDeriv(mesh=mesh, orientation='y', **kwargs))
if mesh.dim > 2:
objfcts.append(SparseDeriv(mesh=mesh, orientation='z', **kwargs))
super(Sparse, self).__init__(
mesh=mesh, objfcts=objfcts,
alpha_s=alpha_s, alpha_x=alpha_x, alpha_y=alpha_y, alpha_z=alpha_z,
**kwargs
)
# Utils.setKwargs(self, **kwargs)
# Properties
norms = properties.Array(
"Norms used to create the sparse regularization",
default=np.c_[2., 2., 2., 2.], shape={('*', '*')}
)
eps_p = properties.Float(
"Threshold value for the model norm", required=True
)
eps_q = properties.Float(
"Threshold value for the model gradient norm", required=True
)
model = properties.Array("current model", dtype=float)
space = properties.String(
"type of model", default='linear'
)
gradientType = properties.String(
"type of gradient", default='components'
)
scales = properties.Array(
"General nob for scaling",
default=np.c_[1., 1., 1., 1.], shape={('*', '*')}
)
# Give the option to scale or not
scaledIRLS = properties.Bool(
"Scale the gradients of the IRLS norms",
default=True
)
# Save the l2 result during the IRLS
l2model = None
@properties.validator('norms')
def _validate_norms(self, change):
if change['value'].shape[0] == 1:
change['value'] = np.kron(
np.ones((self.regmesh.Pac.shape[1], 1)), change['value']
)
elif change['value'].shape[0] > 1:
assert change['value'].shape[0] == self.regmesh.Pac.shape[1], (
"Vector of norms must be the size"
" of active model parameters ({})"
"The provided vector has length "
"{}".format(
self.regmesh.Pac.shape[0], len(change['value'])
)
)
# Observers
@properties.observer('norms')
def _mirror_norms_to_objfcts(self, change):
self.objfcts[0].norm = change['value'][:, 0]
for i, objfct in enumerate(self.objfcts[1:]):
Ave = getattr(objfct.regmesh, 'aveCC2F{}'.format(objfct.orientation))
objfct.norm = Ave*change['value'][:, i+1]
@properties.observer('model')
def _mirror_model_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.model = change['value']
@properties.observer('eps_p')
def _mirror_eps_p_to_smallness(self, change):
for objfct in self.objfcts:
if isinstance(objfct, SparseSmall):
objfct.epsilon = change['value']
@properties.observer('eps_q')
def _mirror_eps_q_to_derivs(self, change):
for objfct in self.objfcts:
if isinstance(objfct, SparseDeriv):
objfct.epsilon = change['value']
@properties.observer('space')
def _mirror_space_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.space = change['value']
@properties.observer('gradientType')
def _mirror_gradientType_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.gradientType = change['value']
@properties.observer('scaledIRLS')
def _mirror_scaledIRLS_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.scaledIRLS = change['value']
@properties.validator('scales')
def _validate_scales(self, change):
if change['value'].shape[0] == 1:
change['value'] = np.kron(
np.ones((self.regmesh.Pac.shape[1], 1)), change['value']
)
elif change['value'].shape[0] > 1:
assert change['value'].shape[0] == self.regmesh.Pac.shape[1], (
"Vector of scales must be the size"
" of active model parameters ({})"
"The provided vector has length "
"{}".format(
self.regmesh.Pac.shape[0], len(change['value'])
)
)
# Observers
@properties.observer('scales')
def _mirror_scale_to_objfcts(self, change):
for i, objfct in enumerate(self.objfcts):
objfct.scale = change['value'][:, i]
|
import numpy as np
_DEFAULT_ALPHA = 2.
_MASS_UNIT = "Msol h**-1"
def Okamoto_Mc_fn():
from seren3 import config
from scipy import interpolate
# from seren3.analysis.interpolate import extrap1d
fname = "%s/Mc_Okamoto08.txt" % config.get('data', 'data_dir')
data = np.loadtxt(fname)
ok_a, ok_z, ok_Mc = data.T
#ok_1_plus_z, ok_Mc = data.T
#ok_z = ok_1_plus_z - 1.
fn = interpolate.interp1d(ok_z, np.log10(ok_Mc), fill_value="extrapolate")
return lambda z: 10**fn(z)
def interp_Okamoto_Mc(z):
fn = Okamoto_Mc_fn()
return fn(z)
# fn = interpolate.InterpolatedUnivariateSpline(ok_z, np.log10(ok_Mc), k=1) # interpolate on log mass
# return 10**fn(z)
def compute_fb(context, return_stats=False, mass_unit="Msol h**-1"):
'''
Computes the baryon fraction for this container
'''
part_dset = context.p[["id", "mass", "epoch"]].flatten()
ix_dm = np.where(np.logical_and( part_dset["id"] > 0., part_dset["epoch"] == 0 )) # index of dm particles
ix_stars = np.where( np.logical_and( part_dset["id"] > 0., part_dset["epoch"] != 0 ) ) # index of star particles
gas_dset = context.g["mass"].flatten()
part_mass_tot = part_dset["mass"].in_units(mass_unit).sum()
star_mass_tot = part_dset["mass"].in_units(mass_unit)[ix_stars].sum()
gas_mass_tot = gas_dset["mass"].in_units(mass_unit).sum()
tot_mass = part_mass_tot + gas_mass_tot
fb = (gas_mass_tot + star_mass_tot)/tot_mass
if return_stats:
return fb, tot_mass, len(gas_dset["mass"]), len(part_dset["mass"][ix_dm])
return fb, tot_mass
def gnedin_fitting_func(Mh, Mc, alpha=_DEFAULT_ALPHA, **cosmo):
f_bar = cosmo["omega_b_0"] / cosmo["omega_M_0"]
return f_bar * (1 + (2**(alpha/3.) - 1) * (Mh/Mc)**(-alpha))**(-3./alpha)
def lmfit_gnedin_fitting_func(params, mass, data, **cosmo):
# For use with the lmfit module
Mc = params["Mc"].value
alpha = params["alpha"].value
f_bar = cosmo["omega_b_0"] / cosmo["omega_M_0"]
model = f_bar * (1 + (2**(alpha/3.) - 1) * (mass/Mc)**(-alpha))**(-3./alpha)
return model - data # what we want to minimise
def fit(mass, fb, fix_alpha, use_lmfit=True, alpha=_DEFAULT_ALPHA, **cosmo):
import scipy.optimize as optimization
# Make an initial guess at Mc
cosmic_mean_b = cosmo["omega_b_0"] / cosmo["omega_M_0"]
fb_cosmic_mean = fb/cosmic_mean_b
idx_Mc_guess = np.abs( fb_cosmic_mean - 0.5 ).argmin()
Mc_guess = mass[idx_Mc_guess]
p0 = [Mc_guess]
if fix_alpha is False:
alpha_guess = alpha
p0.append(alpha_guess)
if use_lmfit:
# Alternative least-squares fitting routine
from lmfit import minimize, Parameters
fit_params = Parameters()
fit_params.add("Mc", value=p0[0], min=0.)
fit_params.add("alpha", value=alpha, vary=np.logical_not(fix_alpha), min=0.)
# print fit_params
result = minimize( lmfit_gnedin_fitting_func, fit_params, args=(mass, fb), kws=cosmo)
if result.success:
Mc_res = result.params['Mc']
alpha_res = result.params['alpha']
return {"Mc" : {"fit" : Mc_res.value, "stderr" : Mc_res.stderr},\
"alpha" : {"fit" : alpha_res.value, "stderr" : alpha_res.stderr}}
else:
raise Exception("Could not fit params: %s" % result.message)
else:
# Curve fit
fn = lambda *args: gnedin_fitting_func(*args, **cosmo)
popt, pcov = optimization.curve_fit( fn, mass, fb, p0=p0, maxfev=1000 )
# Fit
Mc_fit = popt[0]
# Errors
sigma_Mc = np.sqrt(pcov[0,0])
print "Mc = ", Mc_fit
if fix_alpha:
return {"Mc" : {"fit" : Mc_fit, "sigma" : sigma_Mc},\
"alpha" : {"fit" : alpha, "sigma" : None}}
else:
alpha_fit = popt[1]
print "alpha = ", alpha_fit
sigma_alpha = np.sqrt(pcov[1,1])
# correlation between Mc and alpha
corr = pcov[0,1] / (sigma_Mc * sigma_alpha)
# print 'corr = ', corr
return {"Mc" : {"fit" : Mc_fit, "sigma" : sigma_Mc},\
"alpha" : {"fit" : alpha_fit, "sigma" : sigma_alpha},\
"corr" : corr}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
import time
pygame.init()
screen = pygame.display.set_mode((1000, 1000))
level_number = 1
number_of_levels = 2
font = pygame.font.SysFont("comicsans", 60)
small_font = pygame.font.SysFont("comicsans", 30)
boss_defeated_msg = font.render("Congretulations ! You Won !", False, (0, 0, 0))
treasure_found_msg = font.render("Yeah ! You just found a treasure !", False, (0, 0, 0))
level_number_msg = font.render("Level : {}".format(level_number), False, (255, 255, 255))
finished = False
leaving_button = False
treasure_found = False
boss_fight_started = False
boss_life_points = 20
boss_alive = True
player_position_x = 65
player_position_y = 650
opponent_position_x = 770
opponent_position_y = 600
opponent_moving_right = True
opponent_moving_left = False
treasure_position_x = 450
treasure_position_y = 560
boss_defeated_msg_position_x = 250
boss_defeated_msg_position_y = 500
level_reached_number_msg_position_x =250
level_reached_number_msg_position_y = 600
treasure_found_msg_position_x = 250
treasure_found_msg_position_y = 300
player_profil_img = pygame.image.load("player.png")
player_profil_img = pygame.transform.scale(player_profil_img, (80, 80)) #scale it
player_profil_img = player_profil_img.convert_alpha()
opponent_profil_img = pygame.image.load("opponent.png")
opponent_profil_img = pygame.transform.scale(opponent_profil_img, (80, 80))
opponent_profil_img = opponent_profil_img.convert_alpha()
treasure_img = pygame.image.load("treasure.jpeg")
treasure_img = pygame.transform.scale(treasure_img, (60 ,60))
treasure_img = treasure_img.convert_alpha()
background_img = pygame.image.load("background.jpeg")
background_img = pygame.transform.scale(background_img, (1000, 1000))
screen.blit(background_img, (0, 0))
screen.blit(treasure_img, (treasure_position_x, treasure_position_y))
white = (255, 255, 255)
frame = pygame.time.Clock()
def put_player_back():
global player_position_x
global player_position_y
player_position_x = 65
player_position_y = 650
def update_screen():
# Background
screen.blit(background_img, (0, 0))
# Level Number Message
screen.blit(level_number_msg, (0, 0))
while not leaving_button and not finished:
for event in pygame.event.get():
if event.type == pygame.QUIT:
leaving_button = True
pressed_keys = pygame.key.get_pressed()
if pressed_keys[pygame.K_ESCAPE] == 1:
leaving_button = True
if pressed_keys[pygame.K_UP] == 1:
player_position_y -= 5
if pressed_keys[pygame.K_DOWN] == 1:
player_position_y += 5
if pressed_keys[pygame.K_LEFT] == 1:
player_position_x -= 5
if pressed_keys[pygame.K_RIGHT] == 1:
player_position_x += 5
# Boss move
if opponent_position_x >= 770:
opponent_moving_right = True
opponent_moving_left = False
if opponent_position_x <= 730:
opponent_moving_left = True
opponent_moving_right = False
if opponent_moving_right:
if level_number == 1:
opponent_position_x -= 1
elif level_number == 2:
opponent_position_x -= 2
elif level_number == 3:
opponent_position_x -= 3
if opponent_moving_left:
if level_number == 1:
opponent_position_x += 1
elif level_number == 2:
opponent_position_x += 2
elif level_number == 3:
opponent_position_x += 3
# Treasure
if abs(player_position_x - treasure_position_x) < 60 and abs(player_position_y - treasure_position_y) < 60 and not treasure_found:
print("Player found a treasure !")
screen.blit(treasure_found_msg, (treasure_found_msg_position_x, treasure_found_msg_position_y))
pygame.display.flip()
frame.tick(1)
treasure_found = True
if abs(player_position_x - treasure_position_x) > 60 or abs(player_position_y - treasure_position_y) > 60: #circular hitbox with a radius of 30
print("Distance x : {}, Distance y : {}".format(player_position_x - treasure_position_x, player_position_y - treasure_position_y))
# Boss
if abs(player_position_x - opponent_position_x) < 81 and abs(player_position_y - opponent_position_y) < 81: #circular hitbox with a radius of 40.5
print("Player is fighting the Boss !")
boss_life_points -= 1
print("Boss life : {}".format(boss_life_points))
# Make hero takes damages
player_position_x -= 10
player_position_y -= 10
if boss_life_points <= 0:
boss_alive = False
update_screen()
if not treasure_found:
screen.blit(treasure_img, (treasure_position_x, treasure_position_y))
if boss_alive:
screen.blit(opponent_profil_img, (opponent_position_x, opponent_position_y))
screen.blit(player_profil_img, (player_position_x, player_position_y))
if not boss_alive:
put_player_back()
treasure_found = False
boss_alive = True
boss_life_points = 20
level_number += 1
level_number_msg = font.render("Level : {}".format(level_number), False, (255, 255, 255))
level_reached_number_msg = font.render("You've reached the level number {}".format(level_number), False, (0, 0, 0))
screen.fill(white)
screen.blit(boss_defeated_msg, (boss_defeated_msg_position_x, boss_defeated_msg_position_y))
screen.blit(level_reached_number_msg, (level_reached_number_msg_position_x, level_reached_number_msg_position_y))
pygame.display.flip()
frame.tick(5)
time.sleep(3)
if level_number > number_of_levels:
finished = True
# Update the display
pygame.display.flip()
# 1/30 of a frame
frame.tick(30)
|
<reponame>Mirantis/contrail-controller
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2015 Juniper Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, eNovance.
import os
import re
import urllib
from collections import OrderedDict
import sys
import cgitb
import cStringIO
import logging
# Masking of password from openstack/common/log.py
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', '<PASSWORD>']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : '<PASSWORD>'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : '<PASSWORD>'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "<PASSWORD>"')
'"password" : "***"'
>>> mask_password("'original_password' : '<PASSWORD>'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'<PASSWORD>'")
"u'original_password' : u'***'"
"""
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
# end mask_password
def cgitb_hook(info=None, **kwargs):
buf = sys.stdout
if 'file' in kwargs:
buf = kwargs['file']
local_buf = cStringIO.StringIO()
kwargs['file'] = local_buf
cgitb.Hook(**kwargs).handle(info or sys.exc_info())
doc = local_buf.getvalue()
local_buf.close()
buf.write(mask_password(doc))
buf.flush()
# end cgitb_hook
def detailed_traceback():
buf = cStringIO.StringIO()
cgitb_hook(format="text", file=buf)
tb_txt = buf.getvalue()
buf.close()
return tb_txt
# end detailed_traceback
def encode_string(enc_str, encoding='utf-8'):
"""Encode the string using urllib.quote_plus
Eg. @input:
enc_str = 'neté
type - 'unicode' or 'str'
@retval
enc_str = 'net%C3%A9%C3%B9'
type - str
"""
try:
enc_str.encode()
except (UnicodeDecodeError, UnicodeEncodeError):
if type(enc_str) is unicode:
enc_str = enc_str.encode(encoding)
enc_str = urllib.quote_plus(enc_str)
except Exception:
pass
return enc_str
def decode_string(dec_str, encoding='utf-8'):
"""Decode the string previously encoded using urllib.quote_plus.
Eg. If dec_str = 'net%C3%A9%C3%B9'
type - 'unicode' or 'str'
@retval
ret_dec_str = 'neté
type - unicode
"""
ret_dec_str = dec_str
try:
if type(ret_dec_str) is unicode:
ret_dec_str = str(ret_dec_str)
ret_dec_str = urllib.unquote_plus(ret_dec_str)
return ret_dec_str.decode(encoding)
except Exception:
return dec_str
class CacheContainer(object):
def __init__(self, size):
self.container_size = size
self.dictionary = OrderedDict()
def __getitem__(self, key, default=None):
value = self.dictionary[key]
# item accessed - put it in the front
del self.dictionary[key]
self.dictionary[key] = value
return value
def __setitem__(self, key, value):
self.dictionary[key] = value
if len(self.dictionary.keys()) > self.container_size:
# container is full, loose the least used item
self.dictionary.popitem(last=False)
def __contains__(self, key):
return key in self.dictionary
def __repr__(self):
return str(self.dictionary)
def CamelCase(input):
words = input.replace('_', '-').split('-')
name = ''
for w in words:
name += w.capitalize()
return name
# end CamelCase
def str_to_class(class_name, module_name):
try:
return reduce(getattr, class_name.split("."), sys.modules[module_name])
except Exception as e:
logger = logging.getLogger(module_name)
logger.warn("Exception: %s", str(e))
return None
# end str_to_class
def obj_type_to_vnc_class(obj_type, module_name):
return str_to_class(CamelCase(obj_type), module_name)
# end obj_type_to_vnc_class
def getCertKeyCaBundle(bundle, certs):
if os.path.isfile(bundle):
# Check if bundle needs to be replaced if
# constituent files were updated
bundle_is_stale = False
bundle_mod_time = os.path.getmtime(bundle)
for cert in certs:
if os.path.getmtime(cert) > bundle_mod_time:
bundle_is_stale = True
break
if not bundle_is_stale:
return bundle
with open(bundle, 'w') as ofile:
for cert in certs:
with open(cert) as ifile:
for line in ifile:
ofile.write(line)
os.chmod(bundle,0o777)
return bundle
# end CreateCertKeyCaBundle
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.