code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import os
import time
import requests
from tweepy.parsers import JSONParser
from tweepy.error import TweepError, RateLimitError, is_rate_limit_error_message
from tweepy.models import Status
MEDIA_ENDPOINT_URL = 'https://upload.twitter.com/1.1/media/upload.json'
POST_TWEET_URL = 'https://api.twitter.com/1.1/statuses/update.json'
class VideoTweet(object):
def __init__(self, api):
self.api = api
self.oauth = self.api.auth.apply_auth()
def post(self, url, data=None, json=None, **kwargs):
kwargs['auth'] = self.oauth
response = requests.post(url=url, data=data, json=json, **kwargs)
if response.status_code and not 200 <= response.status_code < 300:
try:
error_msg, api_error_code = \
JSONParser().parse_error(response.text)
except Exception:
error_msg = "Twitter error response: status code = %s" % response.status_code
api_error_code = None
if is_rate_limit_error_message(error_msg):
raise RateLimitError(error_msg, response)
else:
raise TweepError(error_msg, response, api_code=api_error_code)
return response
def upload_init(self, file_path):
total_bytes = os.path.getsize(file_path)
request_data = {
'command': 'INIT',
'media_type': 'video/mp4',
'total_bytes': total_bytes,
'media_category': 'tweet_video'
}
req = self.post(url=MEDIA_ENDPOINT_URL, data=request_data)
media_id = req.json()['media_id']
return media_id
def upload_append(self, file_path, media_id):
segment_id = 0
bytes_sent = 0
total_bytes = os.path.getsize(file_path)
file = open(file_path, 'rb')
while bytes_sent < total_bytes:
chunk = file.read(4*1024*1024)
request_data = {
'command': 'APPEND',
'media_id': media_id,
'segment_index': segment_id
}
files = {
'media': chunk
}
self.post(url=MEDIA_ENDPOINT_URL, data=request_data, files=files)
segment_id = segment_id + 1
bytes_sent = file.tell()
def check_status(self, media_id, processing_info):
if processing_info is None:
return
state = processing_info['state']
if state == u'succeeded':
return
if state == u'failed':
raise TweepError("Uploading video has failed.")
check_after_secs = processing_info['check_after_secs']
time.sleep(check_after_secs)
request_params = {
'command': 'STATUS',
'media_id': media_id
}
req = requests.get(url=MEDIA_ENDPOINT_URL, params=request_params, auth=self.oauth)
processing_info = req.json().get('processing_info', None)
self.check_status(media_id, processing_info)
def upload_finalize(self, media_id):
request_data = {
'command': 'FINALIZE',
'media_id': media_id
}
req = self.post(url=MEDIA_ENDPOINT_URL, data=request_data)
processing_info = req.json().get('processing_info', None)
self.check_status(media_id, processing_info)
def post_tweet(self, media_id, status, in_reply_to_status_id):
request_data = {
'status': status,
'media_ids': media_id,
'in_reply_to_status_id': in_reply_to_status_id
}
req = self.post(
url=POST_TWEET_URL,
data={
key: val for key, val in request_data.items()
if val is not None
})
return Status.parse(self.api, req.json())
def tweet(self, file_path, status, in_reply_to_status_id=None):
media_id = self.upload_init(file_path)
self.upload_append(file_path, media_id)
self.upload_finalize(media_id)
return self.post_tweet(media_id, status, in_reply_to_status_id) | docker/cpdpbot/cpdpbot/video_tweet.py | import os
import time
import requests
from tweepy.parsers import JSONParser
from tweepy.error import TweepError, RateLimitError, is_rate_limit_error_message
from tweepy.models import Status
MEDIA_ENDPOINT_URL = 'https://upload.twitter.com/1.1/media/upload.json'
POST_TWEET_URL = 'https://api.twitter.com/1.1/statuses/update.json'
class VideoTweet(object):
def __init__(self, api):
self.api = api
self.oauth = self.api.auth.apply_auth()
def post(self, url, data=None, json=None, **kwargs):
kwargs['auth'] = self.oauth
response = requests.post(url=url, data=data, json=json, **kwargs)
if response.status_code and not 200 <= response.status_code < 300:
try:
error_msg, api_error_code = \
JSONParser().parse_error(response.text)
except Exception:
error_msg = "Twitter error response: status code = %s" % response.status_code
api_error_code = None
if is_rate_limit_error_message(error_msg):
raise RateLimitError(error_msg, response)
else:
raise TweepError(error_msg, response, api_code=api_error_code)
return response
def upload_init(self, file_path):
total_bytes = os.path.getsize(file_path)
request_data = {
'command': 'INIT',
'media_type': 'video/mp4',
'total_bytes': total_bytes,
'media_category': 'tweet_video'
}
req = self.post(url=MEDIA_ENDPOINT_URL, data=request_data)
media_id = req.json()['media_id']
return media_id
def upload_append(self, file_path, media_id):
segment_id = 0
bytes_sent = 0
total_bytes = os.path.getsize(file_path)
file = open(file_path, 'rb')
while bytes_sent < total_bytes:
chunk = file.read(4*1024*1024)
request_data = {
'command': 'APPEND',
'media_id': media_id,
'segment_index': segment_id
}
files = {
'media': chunk
}
self.post(url=MEDIA_ENDPOINT_URL, data=request_data, files=files)
segment_id = segment_id + 1
bytes_sent = file.tell()
def check_status(self, media_id, processing_info):
if processing_info is None:
return
state = processing_info['state']
if state == u'succeeded':
return
if state == u'failed':
raise TweepError("Uploading video has failed.")
check_after_secs = processing_info['check_after_secs']
time.sleep(check_after_secs)
request_params = {
'command': 'STATUS',
'media_id': media_id
}
req = requests.get(url=MEDIA_ENDPOINT_URL, params=request_params, auth=self.oauth)
processing_info = req.json().get('processing_info', None)
self.check_status(media_id, processing_info)
def upload_finalize(self, media_id):
request_data = {
'command': 'FINALIZE',
'media_id': media_id
}
req = self.post(url=MEDIA_ENDPOINT_URL, data=request_data)
processing_info = req.json().get('processing_info', None)
self.check_status(media_id, processing_info)
def post_tweet(self, media_id, status, in_reply_to_status_id):
request_data = {
'status': status,
'media_ids': media_id,
'in_reply_to_status_id': in_reply_to_status_id
}
req = self.post(
url=POST_TWEET_URL,
data={
key: val for key, val in request_data.items()
if val is not None
})
return Status.parse(self.api, req.json())
def tweet(self, file_path, status, in_reply_to_status_id=None):
media_id = self.upload_init(file_path)
self.upload_append(file_path, media_id)
self.upload_finalize(media_id)
return self.post_tweet(media_id, status, in_reply_to_status_id) | 0.282988 | 0.086787 |
from pyfiglet import Figlet
f = Figlet(font='slant')
print('Script Created by : ')
print(f.renderText('gaurav'))
sound = int(input("Do you want sound : 1 for yes , 2 for no : "))
sound = 0 if sound == 2 else 1
print('****************************************************')
if sound :
print("Sound is set to on ! \nOne beep each second will be played regularly \n3 beeps per second will be played as soon as vaccine for your age group is available")
else :
print("As no sound will be played, you need to manually keep observing the table ;)")
input("press Enter to continue")
#Imports
import requests
import time
from prettytable import PrettyTable
from playsound import playsound
# Code to clear screen
from os import system, name
from time import sleep
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
# Script starts here
# Do not change headers
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
res = 1
#Fetch a list of states
try :
res = requests.get('https://cdn-api.co-vin.in/api/v2/admin/location/states', headers=headers)
except :
# If request fails, stop executing the script
print("Check your internet connection and try again !")
exit()
# Load the state data
states =res.json()['states']
# Show a list of states along with index to user
print('ID : Name of state')
for i in states:
print(str(i['state_id']) + ' : '+ str(i['state_name']))
# ask the user to enter the index of state he wants
state_id = input('Enter the serial number of your state : ')
#Fetch a list of districts in that state
try :
res = requests.get('https://cdn-api.co-vin.in/api/v2/admin/location/districts/' + state_id
, headers=headers)
except :
# If request fails, stop executing the script
print("Check your internet connection and try again !")
exit()
# Load the districts data
districts = res.json()['districts']
# Show a list of districts to the users
for i in range(len(districts)):
print(str(i+1) + ' : ' +districts[i]['district_name'])
# Ask the user to enter the district he is in
district_id = districts[int(input('Enter the serial number of your district : ')) - 1]['district_id']
print('****************************************************')
month = input('Enter the current month in number, eg 5 for May : ')
print('****************************************************')
date = input('Enter the date of the month that you want to book : ')
# append neccessary zeros before single digits
if len(str(date)) == 1:
date = '0' + date
if len(str(month)) == 1:
month = '0' + month
# Input users age group
print('What age group you belong to : ')
print('1. 18-44')
print('2. 45+')
age_group = input('Enter your choice :')
age_group = int(age_group)
age_group = 2 if age_group == 1 else 1
show_all_info = int(input('Do you want to display info for just your age group(press 1) or all age groups(press 2) : ')) -1
def yes_or_no(inp):
if inp:
return "YESSSS"
else :
return "NO"
aa = 1
while 1:
uri = 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id='+ str(district_id) + '&date='+ str(date) + '-'+ str(month) +'-2021'
print(uri)
res = requests.get(uri, headers = headers)
if res.status_code != 200:
#print(uri)
print("Failed to fetch details !")
print("Please check your Internet connectivity. If the script does not work for you email me the screenshot on <EMAIL>")
continue
centers = res.json()['centers']
table = PrettyTable()
table.field_names = ['Center name', 'Number of doses available','18+ dose available ? ', '45+ dose available ?','min age limit']
play_sound = 0
for i in centers:
min_age_limit = i['sessions'][0]['min_age_limit']
available_capacity = i['sessions'][0]['available_capacity']
vaccine_above_18 = ( available_capacity > 0 and min_age_limit == 18 )
vaccine_above_45 = ( available_capacity > 0 and min_age_limit == 45 )
if play_sound == 0 and ((vaccine_above_18 and (age_group == 2)) or (vaccine_above_45 and (age_group == 1))):
play_sound = 1
if(i['sessions'][0]['min_age_limit'] == 18 and age_group == 2) or show_all_info:
table.add_row([i['name'], available_capacity, yes_or_no(vaccine_above_18), yes_or_no(vaccine_above_45), min_age_limit])
if(i['sessions'][0]['min_age_limit'] == 45 and age_group == 1) or show_all_info:
table.add_row([i['name'], available_capacity, yes_or_no(vaccine_above_18), yes_or_no(vaccine_above_45), min_age_limit])
if (sound == 1) and (play_sound == 1):
playsound('beep.mp3')
playsound('beep.mp3')
if sound:
playsound('beep.mp3')
time.sleep(0.5)
clear()
print(table)
#print("Vaccination drive for 18-45 has been stopped. So you may not see any vaccination centres in the table if you selected that age group.")
#print(str(i['name']) + ' has '+ str(i['sessions'][0]['available_capacity']) + ' with minimum age limit of '+ str(i['sessions'][0]['min_age_limit'])) | script.py | from pyfiglet import Figlet
f = Figlet(font='slant')
print('Script Created by : ')
print(f.renderText('gaurav'))
sound = int(input("Do you want sound : 1 for yes , 2 for no : "))
sound = 0 if sound == 2 else 1
print('****************************************************')
if sound :
print("Sound is set to on ! \nOne beep each second will be played regularly \n3 beeps per second will be played as soon as vaccine for your age group is available")
else :
print("As no sound will be played, you need to manually keep observing the table ;)")
input("press Enter to continue")
#Imports
import requests
import time
from prettytable import PrettyTable
from playsound import playsound
# Code to clear screen
from os import system, name
from time import sleep
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
# Script starts here
# Do not change headers
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
res = 1
#Fetch a list of states
try :
res = requests.get('https://cdn-api.co-vin.in/api/v2/admin/location/states', headers=headers)
except :
# If request fails, stop executing the script
print("Check your internet connection and try again !")
exit()
# Load the state data
states =res.json()['states']
# Show a list of states along with index to user
print('ID : Name of state')
for i in states:
print(str(i['state_id']) + ' : '+ str(i['state_name']))
# ask the user to enter the index of state he wants
state_id = input('Enter the serial number of your state : ')
#Fetch a list of districts in that state
try :
res = requests.get('https://cdn-api.co-vin.in/api/v2/admin/location/districts/' + state_id
, headers=headers)
except :
# If request fails, stop executing the script
print("Check your internet connection and try again !")
exit()
# Load the districts data
districts = res.json()['districts']
# Show a list of districts to the users
for i in range(len(districts)):
print(str(i+1) + ' : ' +districts[i]['district_name'])
# Ask the user to enter the district he is in
district_id = districts[int(input('Enter the serial number of your district : ')) - 1]['district_id']
print('****************************************************')
month = input('Enter the current month in number, eg 5 for May : ')
print('****************************************************')
date = input('Enter the date of the month that you want to book : ')
# append neccessary zeros before single digits
if len(str(date)) == 1:
date = '0' + date
if len(str(month)) == 1:
month = '0' + month
# Input users age group
print('What age group you belong to : ')
print('1. 18-44')
print('2. 45+')
age_group = input('Enter your choice :')
age_group = int(age_group)
age_group = 2 if age_group == 1 else 1
show_all_info = int(input('Do you want to display info for just your age group(press 1) or all age groups(press 2) : ')) -1
def yes_or_no(inp):
if inp:
return "YESSSS"
else :
return "NO"
aa = 1
while 1:
uri = 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id='+ str(district_id) + '&date='+ str(date) + '-'+ str(month) +'-2021'
print(uri)
res = requests.get(uri, headers = headers)
if res.status_code != 200:
#print(uri)
print("Failed to fetch details !")
print("Please check your Internet connectivity. If the script does not work for you email me the screenshot on <EMAIL>")
continue
centers = res.json()['centers']
table = PrettyTable()
table.field_names = ['Center name', 'Number of doses available','18+ dose available ? ', '45+ dose available ?','min age limit']
play_sound = 0
for i in centers:
min_age_limit = i['sessions'][0]['min_age_limit']
available_capacity = i['sessions'][0]['available_capacity']
vaccine_above_18 = ( available_capacity > 0 and min_age_limit == 18 )
vaccine_above_45 = ( available_capacity > 0 and min_age_limit == 45 )
if play_sound == 0 and ((vaccine_above_18 and (age_group == 2)) or (vaccine_above_45 and (age_group == 1))):
play_sound = 1
if(i['sessions'][0]['min_age_limit'] == 18 and age_group == 2) or show_all_info:
table.add_row([i['name'], available_capacity, yes_or_no(vaccine_above_18), yes_or_no(vaccine_above_45), min_age_limit])
if(i['sessions'][0]['min_age_limit'] == 45 and age_group == 1) or show_all_info:
table.add_row([i['name'], available_capacity, yes_or_no(vaccine_above_18), yes_or_no(vaccine_above_45), min_age_limit])
if (sound == 1) and (play_sound == 1):
playsound('beep.mp3')
playsound('beep.mp3')
if sound:
playsound('beep.mp3')
time.sleep(0.5)
clear()
print(table)
#print("Vaccination drive for 18-45 has been stopped. So you may not see any vaccination centres in the table if you selected that age group.")
#print(str(i['name']) + ' has '+ str(i['sessions'][0]['available_capacity']) + ' with minimum age limit of '+ str(i['sessions'][0]['min_age_limit'])) | 0.257205 | 0.219118 |
from abc import ABC, abstractmethod
import tensorflow as tf
from cvnn import logger
import sys
from typing import Union
from cvnn.layers import t_layers_shape
class Optimizer(ABC):
def __init__(self):
pass
def compile(self, shape: t_layers_shape) -> None:
pass
def optimize(self, variables, gradients):
pass
def summary(self) -> str:
"""
:returns: A one line short string with the description of the optimizer
"""
pass
def __deepcopy__(self, memodict=None):
pass
class SGD(Optimizer):
def __init__(self, learning_rate: float = 0.01, momentum: float = 0.0, name: str = 'SGD'):
"""
Gradient descent (with momentum) optimizer.
:param learning_rate: The learning rate. Defaults to 0.001.
:param momentum: float hyperparameter between [0, 1) that accelerates gradient descent in the relevant
direction and dampens oscillations. Defaults to 0, i.e., vanilla gradient descent.
:param name: Optional name for the operations created when applying gradients. Defaults to "Adam".
"""
self.name = name
self.learning_rate = learning_rate
if momentum > 1 or momentum < 0:
logger.error("momentum must be between 1 and 0. {} was given".format(momentum))
sys.exit(-1)
self.momentum = momentum
self.velocity = []
self.first_time = True
super().__init__()
def __deepcopy__(self, memodict={}):
if memodict is None:
memodict = {}
return SGD(learning_rate=self.learning_rate, momentum=self.momentum, name=self.name)
def summary(self) -> str:
return "SDG optimizer " + self.name + \
": learning rate = " + str(self.learning_rate) + \
"; momentum = " + str(self.momentum) + "\n"
def compile(self, shape: t_layers_shape) -> None:
for layer in shape:
for elem in layer.trainable_variables():
self.velocity.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
def optimize(self, variables, gradients):
with tf.name_scope(self.name):
for i, val in enumerate(variables):
if self.first_time:
self.velocity.append(tf.Variable((1-self.momentum) * gradients[i]))
else:
self.velocity[i].assign(self.momentum*self.velocity[i] + (1 - self.momentum) * gradients[i])
val.assign(val - self.learning_rate * self.velocity[i])
self.first_time = False
class RMSprop(Optimizer):
def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, name="RMSprop"):
"""
Optimizer that implements the RMSprop algorithm.
Reference: http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
The gist of RMSprop is to:
- Maintain a moving (discounted) average of the square of gradients
- Divide the gradient by the root of this average
- This implementation of RMSprop uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving average of the gradients, and uses that average to estimate the variance.
:param learning_rate: The learning rate. Defaults to 0.001.
:param rho: Discounting factor for the history/coming gradient. Defaults to 0.9.
:param momentum: The exponential decay rate for the 1st moment estimates. Defaults to 0.9.
:param epsilon: A small constant for numerical stability. Default 1e-07.
:param name: Optional name for the operations created when applying gradients. Defaults to "Adam".
"""
self.name = name
self.learning_rate = learning_rate
if rho > 1 or rho < 0:
logger.error("rho must be between 1 and 0. {} was given".format(rho))
sys.exit(-1)
if rho > 1 or rho < 0:
logger.error("momentum must be between 1 and 0. {} was given".format(momentum))
sys.exit(-1)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.vdw = []
self.sdw = []
super().__init__()
def __deepcopy__(self, memodict={}):
if memodict is None:
memodict = {}
return RMSprop(learning_rate=self.learning_rate, rho=self.rho, momentum=self.momentum, epsilon=self.epsilon,
name=self.name)
def summary(self) -> str:
return "RMSprop optimizer " + self.name + \
": learning rate = " + str(self.learning_rate) + " rho = " + str(self.rho) + \
"; momentum = " + str(self.momentum) + "; epsilon = " + str(self.epsilon) + "\n"
def compile(self, shape: t_layers_shape) -> None:
for layer in shape:
for elem in layer.trainable_variables():
self.vdw.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
self.sdw.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
def optimize(self, variables, gradients):
with tf.name_scope(self.name):
for i, val in enumerate(variables):
self.vdw[i].assign(self.momentum * self.vdw[i] + (1 - self.momentum) * gradients[i])
self.sdw[i].assign(self.rho * self.sdw[i] + (1 - self.rho) * tf.math.square(gradients[i]))
val.assign(val - self.learning_rate * self.vdw[i] / tf.math.sqrt(self.sdw[i] + self.epsilon))
class Adam(Optimizer):
def __init__(self, learning_rate: float = 0.001, beta_1: float = 0.9, beta_2: float = 0.999,
epsilon: float = 1e-07, name="Adam"):
"""
Optimizer that implements the Adam algorithm.
Reference: https://arxiv.org/abs/1412.6980
Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of
first-order and second-order moments.
:param learning_rate: The learning rate. Defaults to 0.001.
:param beta_1: The exponential decay rate for the 1st moment estimates. Defaults to 0.9.
:param beta_2: The exponential decay rate for the 2nd moment estimates. Defaults to 0.999.
:param epsilon: A small constant for numerical stability. Default 1e-07.
:param name: Optional name for the operations created when applying gradients. Defaults to "Adam".
"""
self.name = name
self.learning_rate = learning_rate
if beta_1 >= 1 or beta_1 < 0:
logger.error("beta_1 must be between [0, 1). {} was given".format(beta_1))
sys.exit(-1)
if beta_2 >= 1 or beta_2 < 0:
logger.error("beta_2 must be between [0, 1). {} was given".format(beta_2))
sys.exit(-1)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.vdw = []
self.sdw = []
self.iter = 1
super().__init__()
def __deepcopy__(self, memodict={}):
if memodict is None:
memodict = {}
return Adam(learning_rate=self.learning_rate, beta_1=self.beta_1, beta_2=self.beta_2, epsilon=self.epsilon,
name=self.name)
def summary(self) -> str:
return "RMSprop optimizer " + self.name + \
": learning rate = " + str(self.learning_rate) + " beta_1 = " + str(self.beta_1) + \
"; beta_2 = " + str(self.beta_2) + "; epsilon = " + str(self.epsilon) + "\n"
def compile(self, shape: t_layers_shape) -> None:
for layer in shape:
for elem in layer.trainable_variables():
self.vdw.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
self.sdw.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
def optimize(self, variables, gradients):
with tf.name_scope(self.name):
for i, val in enumerate(variables):
self.vdw[i].assign(tf.add(
tf.scalar_mul(self.beta_1, self.vdw[i]),
tf.scalar_mul(1 - self.beta_1, gradients[i])))
self.sdw[i].assign(tf.add(
tf.scalar_mul(self.beta_2, self.sdw[i]),
tf.scalar_mul(1 - self.beta_2, tf.math.square(gradients[i]))))
vdw_corr = tf.math.divide(self.vdw[i], tf.math.pow(1 - self.beta_1, self.iter))
sdw_corr = tf.math.divide(self.sdw[i], tf.math.pow(1 - self.beta_2, self.iter))
val.assign(val - self.learning_rate * vdw_corr / (tf.math.sqrt(sdw_corr) + self.epsilon))
self.iter += 1
t_optimizer = Union[str, Optimizer]
def get_optimizer(optimizer: t_optimizer) -> Optimizer:
if isinstance(optimizer, Optimizer):
return optimizer
elif isinstance(optimizer, str):
try:
# TODO: For the moment is not possible to give parameters to constructors
return opt_dispatcher[optimizer.lower()]
except KeyError:
logger.warning(str(optimizer) + " is not a known optimizer. Known optimizers:" +
s for s in opt_dispatcher.keys())
sys.exit(-1)
opt_dispatcher = {
'sgd': SGD(),
'rmsprop': RMSprop(),
'adam': Adam(),
} | cvnn/optimizers.py | from abc import ABC, abstractmethod
import tensorflow as tf
from cvnn import logger
import sys
from typing import Union
from cvnn.layers import t_layers_shape
class Optimizer(ABC):
def __init__(self):
pass
def compile(self, shape: t_layers_shape) -> None:
pass
def optimize(self, variables, gradients):
pass
def summary(self) -> str:
"""
:returns: A one line short string with the description of the optimizer
"""
pass
def __deepcopy__(self, memodict=None):
pass
class SGD(Optimizer):
def __init__(self, learning_rate: float = 0.01, momentum: float = 0.0, name: str = 'SGD'):
"""
Gradient descent (with momentum) optimizer.
:param learning_rate: The learning rate. Defaults to 0.001.
:param momentum: float hyperparameter between [0, 1) that accelerates gradient descent in the relevant
direction and dampens oscillations. Defaults to 0, i.e., vanilla gradient descent.
:param name: Optional name for the operations created when applying gradients. Defaults to "Adam".
"""
self.name = name
self.learning_rate = learning_rate
if momentum > 1 or momentum < 0:
logger.error("momentum must be between 1 and 0. {} was given".format(momentum))
sys.exit(-1)
self.momentum = momentum
self.velocity = []
self.first_time = True
super().__init__()
def __deepcopy__(self, memodict={}):
if memodict is None:
memodict = {}
return SGD(learning_rate=self.learning_rate, momentum=self.momentum, name=self.name)
def summary(self) -> str:
return "SDG optimizer " + self.name + \
": learning rate = " + str(self.learning_rate) + \
"; momentum = " + str(self.momentum) + "\n"
def compile(self, shape: t_layers_shape) -> None:
for layer in shape:
for elem in layer.trainable_variables():
self.velocity.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
def optimize(self, variables, gradients):
with tf.name_scope(self.name):
for i, val in enumerate(variables):
if self.first_time:
self.velocity.append(tf.Variable((1-self.momentum) * gradients[i]))
else:
self.velocity[i].assign(self.momentum*self.velocity[i] + (1 - self.momentum) * gradients[i])
val.assign(val - self.learning_rate * self.velocity[i])
self.first_time = False
class RMSprop(Optimizer):
def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.0, epsilon=1e-07, name="RMSprop"):
"""
Optimizer that implements the RMSprop algorithm.
Reference: http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
The gist of RMSprop is to:
- Maintain a moving (discounted) average of the square of gradients
- Divide the gradient by the root of this average
- This implementation of RMSprop uses plain momentum, not Nesterov momentum.
The centered version additionally maintains a moving average of the gradients, and uses that average to estimate the variance.
:param learning_rate: The learning rate. Defaults to 0.001.
:param rho: Discounting factor for the history/coming gradient. Defaults to 0.9.
:param momentum: The exponential decay rate for the 1st moment estimates. Defaults to 0.9.
:param epsilon: A small constant for numerical stability. Default 1e-07.
:param name: Optional name for the operations created when applying gradients. Defaults to "Adam".
"""
self.name = name
self.learning_rate = learning_rate
if rho > 1 or rho < 0:
logger.error("rho must be between 1 and 0. {} was given".format(rho))
sys.exit(-1)
if rho > 1 or rho < 0:
logger.error("momentum must be between 1 and 0. {} was given".format(momentum))
sys.exit(-1)
self.rho = rho
self.momentum = momentum
self.epsilon = epsilon
self.vdw = []
self.sdw = []
super().__init__()
def __deepcopy__(self, memodict={}):
if memodict is None:
memodict = {}
return RMSprop(learning_rate=self.learning_rate, rho=self.rho, momentum=self.momentum, epsilon=self.epsilon,
name=self.name)
def summary(self) -> str:
return "RMSprop optimizer " + self.name + \
": learning rate = " + str(self.learning_rate) + " rho = " + str(self.rho) + \
"; momentum = " + str(self.momentum) + "; epsilon = " + str(self.epsilon) + "\n"
def compile(self, shape: t_layers_shape) -> None:
for layer in shape:
for elem in layer.trainable_variables():
self.vdw.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
self.sdw.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
def optimize(self, variables, gradients):
with tf.name_scope(self.name):
for i, val in enumerate(variables):
self.vdw[i].assign(self.momentum * self.vdw[i] + (1 - self.momentum) * gradients[i])
self.sdw[i].assign(self.rho * self.sdw[i] + (1 - self.rho) * tf.math.square(gradients[i]))
val.assign(val - self.learning_rate * self.vdw[i] / tf.math.sqrt(self.sdw[i] + self.epsilon))
class Adam(Optimizer):
def __init__(self, learning_rate: float = 0.001, beta_1: float = 0.9, beta_2: float = 0.999,
epsilon: float = 1e-07, name="Adam"):
"""
Optimizer that implements the Adam algorithm.
Reference: https://arxiv.org/abs/1412.6980
Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of
first-order and second-order moments.
:param learning_rate: The learning rate. Defaults to 0.001.
:param beta_1: The exponential decay rate for the 1st moment estimates. Defaults to 0.9.
:param beta_2: The exponential decay rate for the 2nd moment estimates. Defaults to 0.999.
:param epsilon: A small constant for numerical stability. Default 1e-07.
:param name: Optional name for the operations created when applying gradients. Defaults to "Adam".
"""
self.name = name
self.learning_rate = learning_rate
if beta_1 >= 1 or beta_1 < 0:
logger.error("beta_1 must be between [0, 1). {} was given".format(beta_1))
sys.exit(-1)
if beta_2 >= 1 or beta_2 < 0:
logger.error("beta_2 must be between [0, 1). {} was given".format(beta_2))
sys.exit(-1)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.vdw = []
self.sdw = []
self.iter = 1
super().__init__()
def __deepcopy__(self, memodict={}):
if memodict is None:
memodict = {}
return Adam(learning_rate=self.learning_rate, beta_1=self.beta_1, beta_2=self.beta_2, epsilon=self.epsilon,
name=self.name)
def summary(self) -> str:
return "RMSprop optimizer " + self.name + \
": learning rate = " + str(self.learning_rate) + " beta_1 = " + str(self.beta_1) + \
"; beta_2 = " + str(self.beta_2) + "; epsilon = " + str(self.epsilon) + "\n"
def compile(self, shape: t_layers_shape) -> None:
for layer in shape:
for elem in layer.trainable_variables():
self.vdw.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
self.sdw.append(tf.Variable(tf.zeros(elem.shape, dtype=layer.get_input_dtype())))
def optimize(self, variables, gradients):
with tf.name_scope(self.name):
for i, val in enumerate(variables):
self.vdw[i].assign(tf.add(
tf.scalar_mul(self.beta_1, self.vdw[i]),
tf.scalar_mul(1 - self.beta_1, gradients[i])))
self.sdw[i].assign(tf.add(
tf.scalar_mul(self.beta_2, self.sdw[i]),
tf.scalar_mul(1 - self.beta_2, tf.math.square(gradients[i]))))
vdw_corr = tf.math.divide(self.vdw[i], tf.math.pow(1 - self.beta_1, self.iter))
sdw_corr = tf.math.divide(self.sdw[i], tf.math.pow(1 - self.beta_2, self.iter))
val.assign(val - self.learning_rate * vdw_corr / (tf.math.sqrt(sdw_corr) + self.epsilon))
self.iter += 1
t_optimizer = Union[str, Optimizer]
def get_optimizer(optimizer: t_optimizer) -> Optimizer:
if isinstance(optimizer, Optimizer):
return optimizer
elif isinstance(optimizer, str):
try:
# TODO: For the moment is not possible to give parameters to constructors
return opt_dispatcher[optimizer.lower()]
except KeyError:
logger.warning(str(optimizer) + " is not a known optimizer. Known optimizers:" +
s for s in opt_dispatcher.keys())
sys.exit(-1)
opt_dispatcher = {
'sgd': SGD(),
'rmsprop': RMSprop(),
'adam': Adam(),
} | 0.834171 | 0.468243 |
from smbus2 import SMBus, i2c_msg
_ADS1X15_DEFAULT_ADDRESS = 0x48
_ADS1X15_POINTER_CONVERSION = 0x00
_ADS1X15_POINTER_CONFIG = 0x01
_ADS1X15_CONFIG_OS_SINGLE = 0x8000
_ADS1X15_CONFIG_MUX_OFFSET = 12
_ADS1X15_CONFIG_COMP_QUE_DISABLE = 0x0003
_ADS1X15_CONFIG_GAIN = {
2 / 3: 0x0000,
1: 0x0200,
2: 0x0400,
4: 0x0600,
8: 0x0800,
16: 0x0A00
}
class Mode:
"""An enum-like class representing possible ADC operating modes."""
# See datasheet "Operating Modes" section
# values here are masks for setting MODE bit in Config Register
CONTINUOUS = 0x0000
SINGLE = 0x0100
class ADS1x15(object):
"""Base functionality for ADS1x15 analog to digital converters."""
def __init__(self, address=_ADS1X15_DEFAULT_ADDRESS,
gain=1,
data_rate=None,
mode=Mode.SINGLE
):
self._last_pin_read = None
self.buf = bytearray(3)
self._data_rate = self._gain = self._mode = None
self.gain = gain
self.data_rate = self._data_rate_default() if data_rate is None else data_rate
self.mode = mode
self.address = address
# -----Open I2C interface:
# self.bus = SMBus(0) # Rev 1 Pi uses 0
self.bus = SMBus(1) # Rev 2 Pi uses 1
@property
def data_rate(self):
"""The data rate for ADC conversion in samples per second."""
return self._data_rate
@data_rate.setter
def data_rate(self, rate):
possible_rates = self.rates
if rate not in possible_rates:
raise ValueError("Data rate must be one of: {}".format(possible_rates))
self._data_rate = rate
@property
def rates(self):
"""Possible data rate settings."""
raise NotImplementedError('Subclass must implement rates property.')
@property
def rate_config(self):
"""Rate configuration masks."""
raise NotImplementedError('Subclass must implement rate_config property.')
@property
def gain(self):
"""The ADC gain."""
return self._gain
@gain.setter
def gain(self, gain):
possible_gains = self.gains
if gain not in possible_gains:
raise ValueError("Gain must be one of: {}".format(possible_gains))
self._gain = gain
@property
def gains(self):
"""Possible gain settings."""
g = list(_ADS1X15_CONFIG_GAIN.keys())
g.sort()
return g
@property
def mode(self):
"""The ADC conversion mode."""
return self._mode
@mode.setter
def mode(self, mode):
if mode != Mode.CONTINUOUS and mode != Mode.SINGLE:
raise ValueError("Unsupported mode.")
self._mode = mode
def read(self, pin, is_differential=False):
"""I2C Interface for ADS1x15-based ADCs reads.
params:
:param pin: individual or differential pin.
:param bool is_differential: single-ended or differential read.
"""
pin = pin if is_differential else pin + 0x04
return self._read(pin)
def _data_rate_default(self):
"""Retrieve the default data rate for this ADC (in samples per second).
Should be implemented by subclasses.
"""
raise NotImplementedError('Subclasses must implement _data_rate_default!')
def _conversion_value(self, raw_adc):
"""Subclasses should override this function that takes the 16 raw ADC
values of a conversion result and returns a signed integer value.
"""
raise NotImplementedError('Subclass must implement _conversion_value function!')
def _read(self, pin):
"""Perform an ADC read. Returns the signed integer result of the read."""
if self.mode == Mode.CONTINUOUS and self._last_pin_read == pin:
return self._conversion_value(self.get_last_result(True))
else:
self._last_pin_read = pin
config = _ADS1X15_CONFIG_OS_SINGLE
config |= (pin & 0x07) << _ADS1X15_CONFIG_MUX_OFFSET
config |= _ADS1X15_CONFIG_GAIN[self.gain]
config |= self.mode
config |= self.rate_config[self.data_rate]
config |= _ADS1X15_CONFIG_COMP_QUE_DISABLE
self._write_register(_ADS1X15_POINTER_CONFIG, config)
if self.mode == Mode.SINGLE:
while not self._conversion_complete():
pass
return self._conversion_value(self.get_last_result(False))
def _conversion_complete(self):
"""Return status of ADC conversion."""
# OS is bit 15
# OS = 0: Device is currently performing a conversion
# OS = 1: Device is not currently performing a conversion
return self._read_register(_ADS1X15_POINTER_CONFIG) & 0x8000
def get_last_result(self, fast=False):
"""Read the last conversion result when in continuous conversion mode.
Will return a signed integer value. If fast is True, the register
pointer is not updated as part of the read. This reduces I2C traffic
and increases possible read rate.
"""
return self._read_register(_ADS1X15_POINTER_CONVERSION, fast)
def _write_register(self, reg, value):
"""Write 16 bit value to register."""
self.buf[0] = reg
self.buf[1] = (value >> 8) & 0xFF
self.buf[2] = value & 0xFF
# Write some bytes to address
msg = i2c_msg.write(self.address, [self.buf[0], self.buf[1], self.buf[2]])
self.bus.i2c_rdwr(msg)
def _read_register(self, reg, fast=False):
"""Read 16 bit register value. If fast is True, the pointer register
is not updated.
"""
if fast:
self.buf = self.bus.read_i2c_block_data(80, 0, 2) # read 16 bit (2 byte of data)
else:
write = i2c_msg.write(self.address, [reg])
read = i2c_msg.read(self.address, 2)
self.bus.i2c_rdwr(write, read)
return ord(read.buf[0]) << 8 | ord(read.buf[1]) | components/ADC_LCD/ads1115/ads1x15.py | from smbus2 import SMBus, i2c_msg
_ADS1X15_DEFAULT_ADDRESS = 0x48
_ADS1X15_POINTER_CONVERSION = 0x00
_ADS1X15_POINTER_CONFIG = 0x01
_ADS1X15_CONFIG_OS_SINGLE = 0x8000
_ADS1X15_CONFIG_MUX_OFFSET = 12
_ADS1X15_CONFIG_COMP_QUE_DISABLE = 0x0003
_ADS1X15_CONFIG_GAIN = {
2 / 3: 0x0000,
1: 0x0200,
2: 0x0400,
4: 0x0600,
8: 0x0800,
16: 0x0A00
}
class Mode:
"""An enum-like class representing possible ADC operating modes."""
# See datasheet "Operating Modes" section
# values here are masks for setting MODE bit in Config Register
CONTINUOUS = 0x0000
SINGLE = 0x0100
class ADS1x15(object):
"""Base functionality for ADS1x15 analog to digital converters."""
def __init__(self, address=_ADS1X15_DEFAULT_ADDRESS,
gain=1,
data_rate=None,
mode=Mode.SINGLE
):
self._last_pin_read = None
self.buf = bytearray(3)
self._data_rate = self._gain = self._mode = None
self.gain = gain
self.data_rate = self._data_rate_default() if data_rate is None else data_rate
self.mode = mode
self.address = address
# -----Open I2C interface:
# self.bus = SMBus(0) # Rev 1 Pi uses 0
self.bus = SMBus(1) # Rev 2 Pi uses 1
@property
def data_rate(self):
"""The data rate for ADC conversion in samples per second."""
return self._data_rate
@data_rate.setter
def data_rate(self, rate):
possible_rates = self.rates
if rate not in possible_rates:
raise ValueError("Data rate must be one of: {}".format(possible_rates))
self._data_rate = rate
@property
def rates(self):
"""Possible data rate settings."""
raise NotImplementedError('Subclass must implement rates property.')
@property
def rate_config(self):
"""Rate configuration masks."""
raise NotImplementedError('Subclass must implement rate_config property.')
@property
def gain(self):
"""The ADC gain."""
return self._gain
@gain.setter
def gain(self, gain):
possible_gains = self.gains
if gain not in possible_gains:
raise ValueError("Gain must be one of: {}".format(possible_gains))
self._gain = gain
@property
def gains(self):
"""Possible gain settings."""
g = list(_ADS1X15_CONFIG_GAIN.keys())
g.sort()
return g
@property
def mode(self):
"""The ADC conversion mode."""
return self._mode
@mode.setter
def mode(self, mode):
if mode != Mode.CONTINUOUS and mode != Mode.SINGLE:
raise ValueError("Unsupported mode.")
self._mode = mode
def read(self, pin, is_differential=False):
"""I2C Interface for ADS1x15-based ADCs reads.
params:
:param pin: individual or differential pin.
:param bool is_differential: single-ended or differential read.
"""
pin = pin if is_differential else pin + 0x04
return self._read(pin)
def _data_rate_default(self):
"""Retrieve the default data rate for this ADC (in samples per second).
Should be implemented by subclasses.
"""
raise NotImplementedError('Subclasses must implement _data_rate_default!')
def _conversion_value(self, raw_adc):
"""Subclasses should override this function that takes the 16 raw ADC
values of a conversion result and returns a signed integer value.
"""
raise NotImplementedError('Subclass must implement _conversion_value function!')
def _read(self, pin):
"""Perform an ADC read. Returns the signed integer result of the read."""
if self.mode == Mode.CONTINUOUS and self._last_pin_read == pin:
return self._conversion_value(self.get_last_result(True))
else:
self._last_pin_read = pin
config = _ADS1X15_CONFIG_OS_SINGLE
config |= (pin & 0x07) << _ADS1X15_CONFIG_MUX_OFFSET
config |= _ADS1X15_CONFIG_GAIN[self.gain]
config |= self.mode
config |= self.rate_config[self.data_rate]
config |= _ADS1X15_CONFIG_COMP_QUE_DISABLE
self._write_register(_ADS1X15_POINTER_CONFIG, config)
if self.mode == Mode.SINGLE:
while not self._conversion_complete():
pass
return self._conversion_value(self.get_last_result(False))
def _conversion_complete(self):
"""Return status of ADC conversion."""
# OS is bit 15
# OS = 0: Device is currently performing a conversion
# OS = 1: Device is not currently performing a conversion
return self._read_register(_ADS1X15_POINTER_CONFIG) & 0x8000
def get_last_result(self, fast=False):
"""Read the last conversion result when in continuous conversion mode.
Will return a signed integer value. If fast is True, the register
pointer is not updated as part of the read. This reduces I2C traffic
and increases possible read rate.
"""
return self._read_register(_ADS1X15_POINTER_CONVERSION, fast)
def _write_register(self, reg, value):
"""Write 16 bit value to register."""
self.buf[0] = reg
self.buf[1] = (value >> 8) & 0xFF
self.buf[2] = value & 0xFF
# Write some bytes to address
msg = i2c_msg.write(self.address, [self.buf[0], self.buf[1], self.buf[2]])
self.bus.i2c_rdwr(msg)
def _read_register(self, reg, fast=False):
"""Read 16 bit register value. If fast is True, the pointer register
is not updated.
"""
if fast:
self.buf = self.bus.read_i2c_block_data(80, 0, 2) # read 16 bit (2 byte of data)
else:
write = i2c_msg.write(self.address, [reg])
read = i2c_msg.read(self.address, 2)
self.bus.i2c_rdwr(write, read)
return ord(read.buf[0]) << 8 | ord(read.buf[1]) | 0.766031 | 0.36923 |
from urllib.parse import quote_plus
from requests import get
import os
import globals
token = os.environ['TOKEN']
url = f'https://api.telegram.org/bot{token}/'
__all__ = [
'chunks',
'copy_file_name',
'delete',
'download_file',
'escape_md',
'get_reply',
'send',
'send_up',
'send_photo',
'url',
]
def send(chat, msg, markdown = 2, preview = False):
u = url + f'sendMessage?chat_id={chat}&text={quote_plus(msg)}'
if markdown == 2:
u += '&parse_mode=markdownv2'
elif markdown == 1:
u += '&parse_mode=markdown'
if not preview:
u += '&disable_web_page_preview=True'
res = get(u).json()
if not res['ok']:
raise SyntaxError(f'\n---\nFailed to send message {msg}.\n{res}\n---\n')
if chat == -1001533648966:
globals.messages.append(res['result']['message_id'])
return res
def send_up(update, msg, *args, **kwargs):
send(update.message.chat.id, msg, *args, **kwargs)
def send_photo(chat, photo, msg = None, markdown = None, preview = False):
u = url + f'sendPhoto?chat_id={chat}&photo={photo}'
if msg:
u += f'&caption={msg}'
if markdown == 2:
u += '&parse_mode=markdownv2'
elif markdown == 1:
u += '&parse_mode=markdown'
if not preview:
u += '&disable_web_page_preview=True'
res = get(u).json()
if not res['ok']:
print('\n\n', res, '\n\n')
if chat == -1001533648966:
globals.messages.append(res['result']['message_id'])
return res
def delete(chat_id, msg_id):
get(url + f'deleteMessage?chat_id={chat_id}&message_id={msg_id}')
def download_file(file_id, mb = 20):
if not (req := get(url + f'getFile?file_id={file_id}').json())['ok']:
return
req = req['result']
if req['file_size'] > 1048576 * mb:
return
req = req['file_path']
with open(req.split('/')[-1], 'wb') as f:
f.write(get(f'https://api.telegram.org/file/bot{token}/{req}', allow_redirects = True).content)
return req.split('/')[-1]
def get_reply(update):
reply_msg = update.to_dict()['message']['reply_to_message']
msg_id = reply_msg['message_id']
try:
doc = reply_msg['audio']
try:
file_name = doc['file_name']
except:
file_name = doc['file_unique_id']
except:
try:
doc = reply_msg['voice']
file_name = doc['file_unique_id'] + '.ogg'
except:
try:
doc = reply_msg['video']
try:
file_name = doc['file_name']
except:
file_name = doc['file_unique_id'] + '.mp4'
except:
doc = reply_msg['document']
try:
file_name = doc['file_name']
except:
file_name = doc['file_unique_id']
file_id = doc['file_id']
return msg_id, file_name, file_id
def copy_file_name(file_name):
return '.'.join(file_name.split('.')[:-1]) + '_copy.' + file_name.split('.')[-1]
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def escape_md(text):
chars = '_-~' + '*+=>' + '({[]})' + '|!#`.'
for i in chars:
text = text.replace(i, f'\\{i}')
return text | functions/utils.py | from urllib.parse import quote_plus
from requests import get
import os
import globals
token = os.environ['TOKEN']
url = f'https://api.telegram.org/bot{token}/'
__all__ = [
'chunks',
'copy_file_name',
'delete',
'download_file',
'escape_md',
'get_reply',
'send',
'send_up',
'send_photo',
'url',
]
def send(chat, msg, markdown = 2, preview = False):
u = url + f'sendMessage?chat_id={chat}&text={quote_plus(msg)}'
if markdown == 2:
u += '&parse_mode=markdownv2'
elif markdown == 1:
u += '&parse_mode=markdown'
if not preview:
u += '&disable_web_page_preview=True'
res = get(u).json()
if not res['ok']:
raise SyntaxError(f'\n---\nFailed to send message {msg}.\n{res}\n---\n')
if chat == -1001533648966:
globals.messages.append(res['result']['message_id'])
return res
def send_up(update, msg, *args, **kwargs):
send(update.message.chat.id, msg, *args, **kwargs)
def send_photo(chat, photo, msg = None, markdown = None, preview = False):
u = url + f'sendPhoto?chat_id={chat}&photo={photo}'
if msg:
u += f'&caption={msg}'
if markdown == 2:
u += '&parse_mode=markdownv2'
elif markdown == 1:
u += '&parse_mode=markdown'
if not preview:
u += '&disable_web_page_preview=True'
res = get(u).json()
if not res['ok']:
print('\n\n', res, '\n\n')
if chat == -1001533648966:
globals.messages.append(res['result']['message_id'])
return res
def delete(chat_id, msg_id):
get(url + f'deleteMessage?chat_id={chat_id}&message_id={msg_id}')
def download_file(file_id, mb = 20):
if not (req := get(url + f'getFile?file_id={file_id}').json())['ok']:
return
req = req['result']
if req['file_size'] > 1048576 * mb:
return
req = req['file_path']
with open(req.split('/')[-1], 'wb') as f:
f.write(get(f'https://api.telegram.org/file/bot{token}/{req}', allow_redirects = True).content)
return req.split('/')[-1]
def get_reply(update):
reply_msg = update.to_dict()['message']['reply_to_message']
msg_id = reply_msg['message_id']
try:
doc = reply_msg['audio']
try:
file_name = doc['file_name']
except:
file_name = doc['file_unique_id']
except:
try:
doc = reply_msg['voice']
file_name = doc['file_unique_id'] + '.ogg'
except:
try:
doc = reply_msg['video']
try:
file_name = doc['file_name']
except:
file_name = doc['file_unique_id'] + '.mp4'
except:
doc = reply_msg['document']
try:
file_name = doc['file_name']
except:
file_name = doc['file_unique_id']
file_id = doc['file_id']
return msg_id, file_name, file_id
def copy_file_name(file_name):
return '.'.join(file_name.split('.')[:-1]) + '_copy.' + file_name.split('.')[-1]
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def escape_md(text):
chars = '_-~' + '*+=>' + '({[]})' + '|!#`.'
for i in chars:
text = text.replace(i, f'\\{i}')
return text | 0.148417 | 0.073663 |
import numpy as np
from scipy.linalg import pinv
def distance_vec_rep_of_fibers(fi):
'''This function calculates the distance of each point on the fiber fr m th first point
Input:
fi - a (n,3) np.ndarray of a single fiber. n is the number of points that represent the fiber
Output:
dist_vec - a (,n) column vec of distance represntation of the fiber'''
p1 = fi[0,:]
dist_vec = np.zeros(fi.shape[0])
for pi,i in zip(fi,range(fi.shape[0])):
disti = np.linalg.norm(p1-pi)
dist_vec[i] = disti
return dist_vec
def distance_powered_matrix(dist_vec, degree):
'''This function calculates the matrix to interpolate polynomial function for X,Y & Z of each fiber.
it takes the distance representation vector and power it according to the chosen degree.
Input:
dist_vec - a (,n) column vec of distance represntation of the fiber
degree - the polynomial degree wanted
Output:
dist_mat - a (n, degree+1) np.ndarray of fiber points an their calculated powers'''
dist_mat = np.zeros([len(dist_vec), degree+1])
for i in range(degree+1):
dist_mat[:,i] = dist_vec.T**i
return dist_mat
def least_squares_poly_rep(fi,comp,dist_mat):
'''This function calculates the least square polynomial function for a single component of the fiber
Calculates the follow Eq: poly_vec = (dist_mat.T * dist_mat).pinv * dist_mat.T * fi[:,comp]
Input:
fi - a (n,3) np.ndarray of a single fiber. n is the number of points that represent the fiber
comp - {'X','Y','Z'} is the current component for polynomial calculation
dist_mat - a (n, degree+1) np.ndarray of fiber points an their calculated powers
Output:
poly_vec - a (,degree+1) vec representation of the polynomial parameters
'''
if comp == 'X':
ax = 0
elif comp == 'Y':
ax = 1
elif comp == 'Z':
ax = 2
dup_mat = np.matmul(dist_mat.T, dist_mat)
inv_dup_mat = pinv(dup_mat)
poly_vec = np.matmul(np.matmul(inv_dup_mat, dist_mat.T), fi[:,ax])
return poly_vec
def poly_xyz_vec_calc(fi, degree=3):
''''''
dist_vec = distance_vec_rep_of_fibers(fi)
dist_mat = distance_powered_matrix(dist_vec,degree)
poly_vec_x = least_squares_poly_rep(fi,'X',dist_mat)
poly_vec_y = least_squares_poly_rep(fi,'Y',dist_mat)
poly_vec_z = least_squares_poly_rep(fi,'Z',dist_mat)
poly_xyz = np.concatenate([poly_vec_x,poly_vec_y,poly_vec_z],0)
return poly_xyz | clustering/poly_representaion_fibers.py | import numpy as np
from scipy.linalg import pinv
def distance_vec_rep_of_fibers(fi):
'''This function calculates the distance of each point on the fiber fr m th first point
Input:
fi - a (n,3) np.ndarray of a single fiber. n is the number of points that represent the fiber
Output:
dist_vec - a (,n) column vec of distance represntation of the fiber'''
p1 = fi[0,:]
dist_vec = np.zeros(fi.shape[0])
for pi,i in zip(fi,range(fi.shape[0])):
disti = np.linalg.norm(p1-pi)
dist_vec[i] = disti
return dist_vec
def distance_powered_matrix(dist_vec, degree):
'''This function calculates the matrix to interpolate polynomial function for X,Y & Z of each fiber.
it takes the distance representation vector and power it according to the chosen degree.
Input:
dist_vec - a (,n) column vec of distance represntation of the fiber
degree - the polynomial degree wanted
Output:
dist_mat - a (n, degree+1) np.ndarray of fiber points an their calculated powers'''
dist_mat = np.zeros([len(dist_vec), degree+1])
for i in range(degree+1):
dist_mat[:,i] = dist_vec.T**i
return dist_mat
def least_squares_poly_rep(fi,comp,dist_mat):
'''This function calculates the least square polynomial function for a single component of the fiber
Calculates the follow Eq: poly_vec = (dist_mat.T * dist_mat).pinv * dist_mat.T * fi[:,comp]
Input:
fi - a (n,3) np.ndarray of a single fiber. n is the number of points that represent the fiber
comp - {'X','Y','Z'} is the current component for polynomial calculation
dist_mat - a (n, degree+1) np.ndarray of fiber points an their calculated powers
Output:
poly_vec - a (,degree+1) vec representation of the polynomial parameters
'''
if comp == 'X':
ax = 0
elif comp == 'Y':
ax = 1
elif comp == 'Z':
ax = 2
dup_mat = np.matmul(dist_mat.T, dist_mat)
inv_dup_mat = pinv(dup_mat)
poly_vec = np.matmul(np.matmul(inv_dup_mat, dist_mat.T), fi[:,ax])
return poly_vec
def poly_xyz_vec_calc(fi, degree=3):
''''''
dist_vec = distance_vec_rep_of_fibers(fi)
dist_mat = distance_powered_matrix(dist_vec,degree)
poly_vec_x = least_squares_poly_rep(fi,'X',dist_mat)
poly_vec_y = least_squares_poly_rep(fi,'Y',dist_mat)
poly_vec_z = least_squares_poly_rep(fi,'Z',dist_mat)
poly_xyz = np.concatenate([poly_vec_x,poly_vec_y,poly_vec_z],0)
return poly_xyz | 0.822759 | 0.810329 |
import os
from django.db import models
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
from applications.alumniprofile.models import Profile
from applications.events_news.models import Event
from applications.gallery.models import Album
def upload_photo(instance, filename):
name, extension = os.path.splitext(filename)
return 'Chapter_Walls/' + str(instance.name) + ".jpg"
class Constants:
POST = (
('President', 'President'),
('Hon. Secretary', 'Hon. Secretary'),
('Treasurer', 'Treasurer'),
('Other', 'Other')
)
class Chapters(models.Model):
name = models.CharField(max_length=100)
description = RichTextUploadingField(blank=True, null=True)
wall_picture = models.ImageField(null=True, blank=True, upload_to=upload_photo)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class ChapterTeam(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.PROTECT)
user = models.ForeignKey(User, on_delete=models.PROTECT)
post = models.CharField(choices=Constants.POST, max_length=50)
other_post = models.CharField(max_length=100, blank=True, null=True)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' User: ' + str(self.user) + ' Post: ' + str(self.post)
class ChapterEvent(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.CASCADE)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
class Meta:
unique_together = (('chapter', 'event'),)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' Event: ' + str(self.event)
class ChapterAlbum(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.CASCADE)
album = models.ForeignKey(Album, on_delete=models.CASCADE)
class Meta:
unique_together = (('chapter', 'album'),)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' Event: ' + str(self.album) | applications/chapter/models.py | import os
from django.db import models
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
from applications.alumniprofile.models import Profile
from applications.events_news.models import Event
from applications.gallery.models import Album
def upload_photo(instance, filename):
name, extension = os.path.splitext(filename)
return 'Chapter_Walls/' + str(instance.name) + ".jpg"
class Constants:
POST = (
('President', 'President'),
('Hon. Secretary', 'Hon. Secretary'),
('Treasurer', 'Treasurer'),
('Other', 'Other')
)
class Chapters(models.Model):
name = models.CharField(max_length=100)
description = RichTextUploadingField(blank=True, null=True)
wall_picture = models.ImageField(null=True, blank=True, upload_to=upload_photo)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class ChapterTeam(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.PROTECT)
user = models.ForeignKey(User, on_delete=models.PROTECT)
post = models.CharField(choices=Constants.POST, max_length=50)
other_post = models.CharField(max_length=100, blank=True, null=True)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' User: ' + str(self.user) + ' Post: ' + str(self.post)
class ChapterEvent(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.CASCADE)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
class Meta:
unique_together = (('chapter', 'event'),)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' Event: ' + str(self.event)
class ChapterAlbum(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.CASCADE)
album = models.ForeignKey(Album, on_delete=models.CASCADE)
class Meta:
unique_together = (('chapter', 'album'),)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' Event: ' + str(self.album) | 0.444806 | 0.113973 |
import os
import torch
import numpy as np
from utils.datasets import DeepFashionDataset
from torchvision.transforms import Compose
from torchvision.transforms import Resize
from torchvision.transforms import ToTensor
from torchvision.transforms import Normalize
from config.deep_fashion import DeepFashionConfig as cfg
from torch.utils.data import DataLoader
from torch.utils.data import Subset
from network.resnet import ResidualEmbNetwork
from os.path import join
# utils
from utils import extract_embeddings
from utils.plot_deep_fashion import plot_embeddings
# Search tree
from tqdm import tqdm
from annoy import AnnoyIndex
# matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
# take the input args
import sys
exp_folder = sys.argv[1]
print("Experiment result folder:", exp_folder)
# Mdoels
emb_net = ResidualEmbNetwork()
emb_net.load_state_dict(torch.load(join(exp_folder, "_emb_net_20.pth")))
# Dataset
trans = Compose([
Resize(cfg.sizes), ToTensor(),
Normalize(cfg.mean, cfg.std)
])
train_ds = DeepFashionDataset(cfg.root_dir, 'val', transform=trans)
rnd_state = np.random.RandomState(200)
samples = rnd_state.choice(len(train_ds), 5000, replace=False)
train_ds = Subset(train_ds, samples)
# Extract embedding vectors
load_kwargs = {
'batch_size': 128,
'num_workers': os.cpu_count()
}
# test_embs, _ = extract_embeddings(emb_net, DataLoader(test_ds, **load_kwargs))
embs, labels = extract_embeddings(emb_net, DataLoader(train_ds, **load_kwargs))
# translate them to cpu + numpy
embs = embs.cpu().numpy()
labels = labels.cpu().numpy()
# -----------------------------------------------------------------------------
print("Plotting T-sne....")
from cuml.manifold import TSNE
tsne = TSNE(n_iter=1000, metric="euclidean")
projected_emb = tsne.fit_transform(embs)
fig = plot_embeddings(projected_emb, labels)
png_fname = join(exp_folder, 't-sne.png')
fig.savefig(png_fname, bbox_inches='tight')
pdf_fname = join(exp_folder, 't-sne.pdf')
fig.savefig(pdf_fname, bbox_inches='tight')
# -----------------------------------------------------------------------------
print("Plotting PCA....")
from cuml import PCA
pca_float = PCA(n_components=2)
cudf = pca_float.fit_transform(embs)
projected_emb = cudf.to_pandas().to_numpy()
fig = plot_embeddings(projected_emb, labels)
png_fname = join(exp_folder, 'pca.png')
fig.savefig(png_fname, bbox_inches='tight')
pdf_fname = join(exp_folder, 't-sne.pdf')
fig.savefig(pdf_fname, bbox_inches='tight') | plt_emb.py | import os
import torch
import numpy as np
from utils.datasets import DeepFashionDataset
from torchvision.transforms import Compose
from torchvision.transforms import Resize
from torchvision.transforms import ToTensor
from torchvision.transforms import Normalize
from config.deep_fashion import DeepFashionConfig as cfg
from torch.utils.data import DataLoader
from torch.utils.data import Subset
from network.resnet import ResidualEmbNetwork
from os.path import join
# utils
from utils import extract_embeddings
from utils.plot_deep_fashion import plot_embeddings
# Search tree
from tqdm import tqdm
from annoy import AnnoyIndex
# matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
# take the input args
import sys
exp_folder = sys.argv[1]
print("Experiment result folder:", exp_folder)
# Mdoels
emb_net = ResidualEmbNetwork()
emb_net.load_state_dict(torch.load(join(exp_folder, "_emb_net_20.pth")))
# Dataset
trans = Compose([
Resize(cfg.sizes), ToTensor(),
Normalize(cfg.mean, cfg.std)
])
train_ds = DeepFashionDataset(cfg.root_dir, 'val', transform=trans)
rnd_state = np.random.RandomState(200)
samples = rnd_state.choice(len(train_ds), 5000, replace=False)
train_ds = Subset(train_ds, samples)
# Extract embedding vectors
load_kwargs = {
'batch_size': 128,
'num_workers': os.cpu_count()
}
# test_embs, _ = extract_embeddings(emb_net, DataLoader(test_ds, **load_kwargs))
embs, labels = extract_embeddings(emb_net, DataLoader(train_ds, **load_kwargs))
# translate them to cpu + numpy
embs = embs.cpu().numpy()
labels = labels.cpu().numpy()
# -----------------------------------------------------------------------------
print("Plotting T-sne....")
from cuml.manifold import TSNE
tsne = TSNE(n_iter=1000, metric="euclidean")
projected_emb = tsne.fit_transform(embs)
fig = plot_embeddings(projected_emb, labels)
png_fname = join(exp_folder, 't-sne.png')
fig.savefig(png_fname, bbox_inches='tight')
pdf_fname = join(exp_folder, 't-sne.pdf')
fig.savefig(pdf_fname, bbox_inches='tight')
# -----------------------------------------------------------------------------
print("Plotting PCA....")
from cuml import PCA
pca_float = PCA(n_components=2)
cudf = pca_float.fit_transform(embs)
projected_emb = cudf.to_pandas().to_numpy()
fig = plot_embeddings(projected_emb, labels)
png_fname = join(exp_folder, 'pca.png')
fig.savefig(png_fname, bbox_inches='tight')
pdf_fname = join(exp_folder, 't-sne.pdf')
fig.savefig(pdf_fname, bbox_inches='tight') | 0.529507 | 0.54468 |
import importlib
import json
from typing import Dict, List, Optional
import requests
from pydantic import BaseSettings, Field, root_validator, validator
from pydantic.types import Path
DEFAULT_CONFIG_FILE_PATH = str(Path.home().joinpath(".emmet.json"))
class EmmetSettings(BaseSettings):
"""
Settings for the emmet- packages
The default way to modify these is to modify ~/.emmet.json or set the environment variable
EMMET_CONFIG_FILE to point to the json with emmet settings
"""
config_file: str = Field(
DEFAULT_CONFIG_FILE_PATH, description="File to load alternative defaults from"
)
LTOL: float = Field(
0.2, description="Fractional length tolerance for structure matching"
)
STOL: float = Field(
0.3,
description="Site tolerance for structure matching. Defined as the fraction of the"
" average free length per atom = ( V / Nsites ) ** (1/3)",
)
SYMPREC: float = Field(
0.1, description="Symmetry precision for spglib symmetry finding"
)
ANGLE_TOL: float = Field(
5, description="Angle tolerance for structure matching in degrees."
)
MAX_PIEZO_MILLER: int = Field(
10,
description="Maximum miller allowed for computing strain direction for maximal piezo response",
)
TAGS_TO_SANDBOXES: Optional[Dict[str, List[str]]] = Field(
None,
description="Mapping of calcuation tags to sandboxes: Dict[sandbox, list of tags]."
" Any calculation without these tags will be kept as core.",
)
VASP_SPECIAL_TAGS: List[str] = Field(
["LASPH"], description="Special tags to prioritize for VASP Task Documents"
)
VASP_QUALITY_SCORES: Dict[str, int] = Field(
{"SCAN": 3, "GGA+U": 2, "GGA": 1},
description="Dictionary Mapping VASP calculation run types to rung level for VASP materials builders",
)
VASP_KPTS_TOLERANCE: float = Field(
0.9,
description="Relative tolerance for kpt density to still be a valid task document",
)
VASP_DEFAULT_INPUT_SETS: Dict = Field(
{
"GGA Structure Optimization": "pymatgen.io.vasp.sets.MPRelaxSet",
"GGA+U Structure Optimization": "pymatgen.io.vasp.sets.MPRelaxSet",
},
description="Default input sets for task validation",
)
VASP_CHECKED_LDAU_FIELDS: List[str] = Field(
["LDAUU", "LDAUJ", "LDAUL"], description="LDAU fields to validate for tasks"
)
class Config:
env_prefix = "emmet_"
extra = "ignore"
@root_validator(pre=True)
def load_default_settings(cls, values):
"""
Loads settings from a root file if available and uses that as defaults in
place of built in defaults
"""
config_file_path: str = values.get("config_file", DEFAULT_CONFIG_FILE_PATH)
new_values = {}
if config_file_path.startswith("http"):
new_values = requests.get(config_file_path).json()
elif Path(config_file_path).exists():
with open(config_file_path) as f:
new_values = json.load(f)
new_values.update(values)
return new_values
@validator("VASP_DEFAULT_INPUT_SETS", pre=True)
def load_input_sets(cls, values):
input_sets = {}
for name, inp_set in values.items():
if isinstance(inp_set, str):
_module = ".".join(inp_set.split(".")[:-1])
_class = inp_set.split(".")[-1]
input_sets[name] = getattr(importlib.import_module(_module), _class)
elif isinstance(inp_set, type):
input_sets[name] = inp_set
return input_sets | emmet-core/emmet/core/settings.py | import importlib
import json
from typing import Dict, List, Optional
import requests
from pydantic import BaseSettings, Field, root_validator, validator
from pydantic.types import Path
DEFAULT_CONFIG_FILE_PATH = str(Path.home().joinpath(".emmet.json"))
class EmmetSettings(BaseSettings):
"""
Settings for the emmet- packages
The default way to modify these is to modify ~/.emmet.json or set the environment variable
EMMET_CONFIG_FILE to point to the json with emmet settings
"""
config_file: str = Field(
DEFAULT_CONFIG_FILE_PATH, description="File to load alternative defaults from"
)
LTOL: float = Field(
0.2, description="Fractional length tolerance for structure matching"
)
STOL: float = Field(
0.3,
description="Site tolerance for structure matching. Defined as the fraction of the"
" average free length per atom = ( V / Nsites ) ** (1/3)",
)
SYMPREC: float = Field(
0.1, description="Symmetry precision for spglib symmetry finding"
)
ANGLE_TOL: float = Field(
5, description="Angle tolerance for structure matching in degrees."
)
MAX_PIEZO_MILLER: int = Field(
10,
description="Maximum miller allowed for computing strain direction for maximal piezo response",
)
TAGS_TO_SANDBOXES: Optional[Dict[str, List[str]]] = Field(
None,
description="Mapping of calcuation tags to sandboxes: Dict[sandbox, list of tags]."
" Any calculation without these tags will be kept as core.",
)
VASP_SPECIAL_TAGS: List[str] = Field(
["LASPH"], description="Special tags to prioritize for VASP Task Documents"
)
VASP_QUALITY_SCORES: Dict[str, int] = Field(
{"SCAN": 3, "GGA+U": 2, "GGA": 1},
description="Dictionary Mapping VASP calculation run types to rung level for VASP materials builders",
)
VASP_KPTS_TOLERANCE: float = Field(
0.9,
description="Relative tolerance for kpt density to still be a valid task document",
)
VASP_DEFAULT_INPUT_SETS: Dict = Field(
{
"GGA Structure Optimization": "pymatgen.io.vasp.sets.MPRelaxSet",
"GGA+U Structure Optimization": "pymatgen.io.vasp.sets.MPRelaxSet",
},
description="Default input sets for task validation",
)
VASP_CHECKED_LDAU_FIELDS: List[str] = Field(
["LDAUU", "LDAUJ", "LDAUL"], description="LDAU fields to validate for tasks"
)
class Config:
env_prefix = "emmet_"
extra = "ignore"
@root_validator(pre=True)
def load_default_settings(cls, values):
"""
Loads settings from a root file if available and uses that as defaults in
place of built in defaults
"""
config_file_path: str = values.get("config_file", DEFAULT_CONFIG_FILE_PATH)
new_values = {}
if config_file_path.startswith("http"):
new_values = requests.get(config_file_path).json()
elif Path(config_file_path).exists():
with open(config_file_path) as f:
new_values = json.load(f)
new_values.update(values)
return new_values
@validator("VASP_DEFAULT_INPUT_SETS", pre=True)
def load_input_sets(cls, values):
input_sets = {}
for name, inp_set in values.items():
if isinstance(inp_set, str):
_module = ".".join(inp_set.split(".")[:-1])
_class = inp_set.split(".")[-1]
input_sets[name] = getattr(importlib.import_module(_module), _class)
elif isinstance(inp_set, type):
input_sets[name] = inp_set
return input_sets | 0.797004 | 0.362997 |
import random
import sys
import multiprocessing
from collections import namedtuple
from wicked21st.graph import load_graph, Graph, save_graph, Cascades
import graphviz
DEBUG = False
rand = random.Random(42)
if len(sys.argv) > 1:
graph_file = sys.argv[1]
else:
import config
graph_file = config.GRAPH
graph_def, _ = load_graph(graph_file)
node_list = list()
cats = sorted(Graph.CATEGORIES, key=lambda x: x[0])
for _, catid in cats:
ncat = sorted(graph_def.node_classes[catid], key=lambda x: graph_def.ordering[x])
node_list = node_list + ncat
node_to_idx = {n: idx for idx, n in enumerate(node_list)}
num_nodes = len(node_list)
node_to_code = dict()
if len(next(iter(graph_def.node_names.keys()))) == 3:
node_to_code = {x: x for x in graph_def.node_names}
code_to_node = node_to_code
else:
for catid in graph_def.node_classes:
nodes = graph_def.node_classes[catid]
for node in sorted(nodes, key=lambda x: graph_def.ordering[x]):
name = graph_def.node_names[node].upper()
if name[0] == "*":
name
if name.startswith("LACK OF"):
name = name[len("LACK OF ") :]
code = name[:3]
if code in node_to_code.values():
code = name.split(" ")[1][:3]
if code in node_to_code.values():
raise Error(graph_def.node_names[node] + " " + str(node_to_code))
node_to_code[node] = code
code_to_node = {c: n for n, c in node_to_code.items()}
def reachable(node, path):
if node in path:
return list()
path = path + [node]
result = [(node, path)]
for outlink in sorted(graph_def.outlinks[node]):
result = result + reachable(outlink, path)
return result
cascade = dict()
buckets = dict()
for cnode in node_list:
in_reach = reachable(cnode, [])
shortest = dict()
for node, path in in_reach:
if node in shortest:
existing = shortest[node]
if len(existing[0]) > len(path):
# replace
shortest[node] = [path]
elif len(existing[0]) == len(path):
shortest[node].append(path)
else:
shortest[node] = [path]
# sort by distance
by_distance = dict() # int -> list of (node, (list previous nodes) )
for node, paths in shortest.items():
dist = len(paths[0])
if dist not in by_distance:
by_distance[dist] = list()
prev = [path[-1] for path in paths]
by_distance[dist].append((node, prev))
# randomize
for dist in by_distance:
rand.shuffle(by_distance[dist])
# print
dist_buckts = list()
for dist in sorted(by_distance.keys()):
if dist > 2:
dist_buckts.append(list(map(lambda x: x[0], by_distance[dist])))
flatten = [n for bckt in dist_buckts for n in bckt]
cascade[cnode] = flatten
buckets[cnode] = dist_buckts
if len(sys.argv) > 2:
cascades = Cascades(graph_def)
cascades.cascade = cascade
save_graph(sys.argv[2], graph_def, cascades)
else:
with open("cascading.tsv", "w") as tsv:
for cnode in node_list:
dist_buckts = buckets[cnode]
print(
"{} ({}): {}".format(
graph_def.node_names[cnode],
node_to_code[cnode],
"; ".join(
map(
lambda bckt: ", ".join(
map(lambda x: node_to_code[x], bckt)
),
dist_buckts,
)
),
)
)
flatten = cascade[cnode]
tsv.write("{}\t{}\n".format(cnode, "\t".join(flatten))) | graph_to_cascades.py |
import random
import sys
import multiprocessing
from collections import namedtuple
from wicked21st.graph import load_graph, Graph, save_graph, Cascades
import graphviz
DEBUG = False
rand = random.Random(42)
if len(sys.argv) > 1:
graph_file = sys.argv[1]
else:
import config
graph_file = config.GRAPH
graph_def, _ = load_graph(graph_file)
node_list = list()
cats = sorted(Graph.CATEGORIES, key=lambda x: x[0])
for _, catid in cats:
ncat = sorted(graph_def.node_classes[catid], key=lambda x: graph_def.ordering[x])
node_list = node_list + ncat
node_to_idx = {n: idx for idx, n in enumerate(node_list)}
num_nodes = len(node_list)
node_to_code = dict()
if len(next(iter(graph_def.node_names.keys()))) == 3:
node_to_code = {x: x for x in graph_def.node_names}
code_to_node = node_to_code
else:
for catid in graph_def.node_classes:
nodes = graph_def.node_classes[catid]
for node in sorted(nodes, key=lambda x: graph_def.ordering[x]):
name = graph_def.node_names[node].upper()
if name[0] == "*":
name
if name.startswith("LACK OF"):
name = name[len("LACK OF ") :]
code = name[:3]
if code in node_to_code.values():
code = name.split(" ")[1][:3]
if code in node_to_code.values():
raise Error(graph_def.node_names[node] + " " + str(node_to_code))
node_to_code[node] = code
code_to_node = {c: n for n, c in node_to_code.items()}
def reachable(node, path):
if node in path:
return list()
path = path + [node]
result = [(node, path)]
for outlink in sorted(graph_def.outlinks[node]):
result = result + reachable(outlink, path)
return result
cascade = dict()
buckets = dict()
for cnode in node_list:
in_reach = reachable(cnode, [])
shortest = dict()
for node, path in in_reach:
if node in shortest:
existing = shortest[node]
if len(existing[0]) > len(path):
# replace
shortest[node] = [path]
elif len(existing[0]) == len(path):
shortest[node].append(path)
else:
shortest[node] = [path]
# sort by distance
by_distance = dict() # int -> list of (node, (list previous nodes) )
for node, paths in shortest.items():
dist = len(paths[0])
if dist not in by_distance:
by_distance[dist] = list()
prev = [path[-1] for path in paths]
by_distance[dist].append((node, prev))
# randomize
for dist in by_distance:
rand.shuffle(by_distance[dist])
# print
dist_buckts = list()
for dist in sorted(by_distance.keys()):
if dist > 2:
dist_buckts.append(list(map(lambda x: x[0], by_distance[dist])))
flatten = [n for bckt in dist_buckts for n in bckt]
cascade[cnode] = flatten
buckets[cnode] = dist_buckts
if len(sys.argv) > 2:
cascades = Cascades(graph_def)
cascades.cascade = cascade
save_graph(sys.argv[2], graph_def, cascades)
else:
with open("cascading.tsv", "w") as tsv:
for cnode in node_list:
dist_buckts = buckets[cnode]
print(
"{} ({}): {}".format(
graph_def.node_names[cnode],
node_to_code[cnode],
"; ".join(
map(
lambda bckt: ", ".join(
map(lambda x: node_to_code[x], bckt)
),
dist_buckts,
)
),
)
)
flatten = cascade[cnode]
tsv.write("{}\t{}\n".format(cnode, "\t".join(flatten))) | 0.192388 | 0.291813 |
# template file: justice_py_sdk_codegen/__main__.py
# justice-iam-service (5.10.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from .utils import randomize
from ..api.iam.models import AccountCreateTestUserRequestV4
from ..api.iam.models import AccountCreateUserRequestV4
from ..api.iam.models import AccountCreateUserResponseV4
from ..api.iam.models import AccountUpgradeHeadlessAccountRequestV4
from ..api.iam.models import AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4
from ..api.iam.models import AccountUserActiveBanResponseV4
from ..api.iam.models import AccountUserPermissionsResponseV4
from ..api.iam.models import AccountUserResponseV4
from ..api.iam.models import AccountcommonBan
from ..api.iam.models import AccountcommonBanReason
from ..api.iam.models import AccountcommonBanReasonV3
from ..api.iam.models import AccountcommonBanReasons
from ..api.iam.models import AccountcommonBanReasonsV3
from ..api.iam.models import AccountcommonBanV3
from ..api.iam.models import AccountcommonBannedByV3
from ..api.iam.models import AccountcommonBans
from ..api.iam.models import AccountcommonBansV3
from ..api.iam.models import AccountcommonClientPermission
from ..api.iam.models import AccountcommonClientPermissionV3
from ..api.iam.models import AccountcommonClientPermissions
from ..api.iam.models import AccountcommonClientPermissionsV3
from ..api.iam.models import AccountcommonConflictedUserPlatformAccounts
from ..api.iam.models import AccountcommonCountryAgeRestriction
from ..api.iam.models import AccountcommonDescription
from ..api.iam.models import AccountcommonDistinctLinkedPlatformV3
from ..api.iam.models import AccountcommonDistinctPlatformResponseV3
from ..api.iam.models import AccountcommonInputValidationDescription
from ..api.iam.models import AccountcommonJWTBanV3
from ..api.iam.models import AccountcommonListUsersWithPlatformAccountsResponse
from ..api.iam.models import AccountcommonNamespaceRole
from ..api.iam.models import AccountcommonNetflixCertificates
from ..api.iam.models import AccountcommonPagination
from ..api.iam.models import AccountcommonPaginationV3
from ..api.iam.models import AccountcommonPermission
from ..api.iam.models import AccountcommonPermissionV3
from ..api.iam.models import AccountcommonPermissions
from ..api.iam.models import AccountcommonPermissionsV3
from ..api.iam.models import AccountcommonPlatformAccount
from ..api.iam.models import AccountcommonRegisteredDomain
from ..api.iam.models import AccountcommonRole
from ..api.iam.models import AccountcommonRoleManager
from ..api.iam.models import AccountcommonRoleManagerV3
from ..api.iam.models import AccountcommonRoleMember
from ..api.iam.models import AccountcommonRoleMemberV3
from ..api.iam.models import AccountcommonRoleV3
from ..api.iam.models import AccountcommonSimpleUserPlatformInfoV3
from ..api.iam.models import AccountcommonUserLinkedPlatform
from ..api.iam.models import AccountcommonUserLinkedPlatformV3
from ..api.iam.models import AccountcommonUserLinkedPlatformsResponseV3
from ..api.iam.models import AccountcommonUserPlatformInfo
from ..api.iam.models import AccountcommonUserPlatforms
from ..api.iam.models import AccountcommonUserSearchByPlatformIDResult
from ..api.iam.models import AccountcommonUserSearchResult
from ..api.iam.models import AccountcommonUserWithLinkedPlatformAccounts
from ..api.iam.models import AccountcommonUserWithPlatformAccounts
from ..api.iam.models import BannedBy
from ..api.iam.models import BloomFilterJSON
from ..api.iam.models import ClientmodelClientCreateRequest
from ..api.iam.models import ClientmodelClientCreationResponse
from ..api.iam.models import ClientmodelClientCreationV3Request
from ..api.iam.models import ClientmodelClientResponse
from ..api.iam.models import ClientmodelClientUpdateRequest
from ..api.iam.models import ClientmodelClientUpdateSecretRequest
from ..api.iam.models import ClientmodelClientUpdateV3Request
from ..api.iam.models import ClientmodelClientV3Response
from ..api.iam.models import ClientmodelClientsV3Response
from ..api.iam.models import LegalAcceptedPoliciesRequest
from ..api.iam.models import ModelAddUserRoleV4Request
from ..api.iam.models import ModelAgeRestrictionRequest
from ..api.iam.models import ModelAgeRestrictionRequestV3
from ..api.iam.models import ModelAgeRestrictionResponse
from ..api.iam.models import ModelAgeRestrictionResponseV3
from ..api.iam.models import ModelAssignUserV4Request
from ..api.iam.models import ModelAssignedUserV4Response
from ..api.iam.models import ModelAuthenticatorKeyResponseV4
from ..api.iam.models import ModelBackupCodesResponseV4
from ..api.iam.models import ModelBanCreateRequest
from ..api.iam.models import ModelBanUpdateRequest
from ..api.iam.models import ModelCheckValidUserIDRequestV4
from ..api.iam.models import ModelCountry
from ..api.iam.models import ModelCountryAgeRestrictionRequest
from ..api.iam.models import ModelCountryAgeRestrictionV3Request
from ..api.iam.models import ModelCountryV3Response
from ..api.iam.models import ModelCreateJusticeUserResponse
from ..api.iam.models import ModelDisableUserRequest
from ..api.iam.models import ModelEmailUpdateRequestV4
from ..api.iam.models import ModelEnabledFactorsResponseV4
from ..api.iam.models import ModelForgotPasswordRequestV3
from ..api.iam.models import ModelGetAdminUsersResponse
from ..api.iam.models import ModelGetPublisherUserResponse
from ..api.iam.models import ModelGetUserBanV3Response
from ..api.iam.models import ModelGetUserJusticePlatformAccountResponse
from ..api.iam.models import ModelGetUserMapping
from ..api.iam.models import ModelGetUsersResponseWithPaginationV3
from ..api.iam.models import ModelInputValidationData
from ..api.iam.models import ModelInputValidationDataPublic
from ..api.iam.models import ModelInputValidationUpdatePayload
from ..api.iam.models import ModelInputValidationsPublicResponse
from ..api.iam.models import ModelInputValidationsResponse
from ..api.iam.models import ModelInviteUserRequestV3
from ..api.iam.models import ModelInviteUserRequestV4
from ..api.iam.models import ModelInviteUserResponseV3
from ..api.iam.models import ModelLinkPlatformAccountRequest
from ..api.iam.models import ModelLinkPlatformAccountWithProgressionRequest
from ..api.iam.models import ModelLinkRequest
from ..api.iam.models import ModelListAssignedUsersV4Response
from ..api.iam.models import ModelListBulkUserResponse
from ..api.iam.models import ModelListEmailAddressRequest
from ..api.iam.models import ModelListRoleV4Response
from ..api.iam.models import ModelListUserInformationResult
from ..api.iam.models import ModelListUserResponseV3
from ..api.iam.models import ModelListUserRolesV4Response
from ..api.iam.models import ModelListValidUserIDResponseV4
from ..api.iam.models import ModelLoginHistoriesResponse
from ..api.iam.models import ModelNamespaceRoleRequest
from ..api.iam.models import ModelPermissionDeleteRequest
from ..api.iam.models import ModelPlatformDomainDeleteRequest
from ..api.iam.models import ModelPlatformDomainResponse
from ..api.iam.models import ModelPlatformDomainUpdateRequest
from ..api.iam.models import ModelPlatformUserIDRequest
from ..api.iam.models import ModelPlatformUserInformation
from ..api.iam.models import ModelPublicThirdPartyPlatformInfo
from ..api.iam.models import ModelPublicUserInformationResponseV3
from ..api.iam.models import ModelPublicUserInformationV3
from ..api.iam.models import ModelPublicUserResponse
from ..api.iam.models import ModelPublicUserResponseV3
from ..api.iam.models import ModelPublicUsersResponse
from ..api.iam.models import ModelRemoveUserRoleV4Request
from ..api.iam.models import ModelResetPasswordRequest
from ..api.iam.models import ModelResetPasswordRequestV3
from ..api.iam.models import ModelRevokeUserV4Request
from ..api.iam.models import ModelRoleAdminStatusResponse
from ..api.iam.models import ModelRoleAdminStatusResponseV3
from ..api.iam.models import ModelRoleCreateRequest
from ..api.iam.models import ModelRoleCreateV3Request
from ..api.iam.models import ModelRoleManagersRequest
from ..api.iam.models import ModelRoleManagersRequestV3
from ..api.iam.models import ModelRoleManagersResponse
from ..api.iam.models import ModelRoleManagersResponsesV3
from ..api.iam.models import ModelRoleMembersRequest
from ..api.iam.models import ModelRoleMembersRequestV3
from ..api.iam.models import ModelRoleMembersResponse
from ..api.iam.models import ModelRoleMembersResponseV3
from ..api.iam.models import ModelRoleNamesResponseV3
from ..api.iam.models import ModelRoleResponse
from ..api.iam.models import ModelRoleResponseV3
from ..api.iam.models import ModelRoleResponseWithManagers
from ..api.iam.models import ModelRoleResponseWithManagersAndPaginationV3
from ..api.iam.models import ModelRoleResponseWithManagersV3
from ..api.iam.models import ModelRoleUpdateRequest
from ..api.iam.models import ModelRoleUpdateRequestV3
from ..api.iam.models import ModelRoleV4Request
from ..api.iam.models import ModelRoleV4Response
from ..api.iam.models import ModelSSOPlatformCredentialRequest
from ..api.iam.models import ModelSSOPlatformCredentialResponse
from ..api.iam.models import ModelSearchUsersByPlatformIDResponse
from ..api.iam.models import ModelSearchUsersResponse
from ..api.iam.models import ModelSearchUsersResponseWithPaginationV3
from ..api.iam.models import ModelSendRegisterVerificationCodeRequest
from ..api.iam.models import ModelSendVerificationCodeRequest
from ..api.iam.models import ModelSendVerificationCodeRequestV3
from ..api.iam.models import ModelThirdPartyLoginPlatformCredentialRequest
from ..api.iam.models import ModelThirdPartyLoginPlatformCredentialResponse
from ..api.iam.models import ModelUnlinkUserPlatformRequest
from ..api.iam.models import ModelUpdatePermissionScheduleRequest
from ..api.iam.models import ModelUpdateUserDeletionStatusRequest
from ..api.iam.models import ModelUpdateUserStatusRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountV3Request
from ..api.iam.models import ModelUpgradeHeadlessAccountWithVerificationCodeRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountWithVerificationCodeRequestV3
from ..api.iam.models import ModelUserActiveBanResponse
from ..api.iam.models import ModelUserActiveBanResponseV3
from ..api.iam.models import ModelUserBanResponse
from ..api.iam.models import ModelUserBanResponseV3
from ..api.iam.models import ModelUserBaseInfo
from ..api.iam.models import ModelUserCreateFromInvitationRequestV3
from ..api.iam.models import ModelUserCreateFromInvitationRequestV4
from ..api.iam.models import ModelUserCreateRequest
from ..api.iam.models import ModelUserCreateRequestV3
from ..api.iam.models import ModelUserCreateResponse
from ..api.iam.models import ModelUserCreateResponseV3
from ..api.iam.models import ModelUserDeletionStatusResponse
from ..api.iam.models import ModelUserIDsRequest
from ..api.iam.models import ModelUserInfoResponse
from ..api.iam.models import ModelUserInformation
from ..api.iam.models import ModelUserInvitationV3
from ..api.iam.models import ModelUserLoginHistoryResponse
from ..api.iam.models import ModelUserPasswordUpdateRequest
from ..api.iam.models import ModelUserPasswordUpdateV3Request
from ..api.iam.models import ModelUserPermissionsResponseV3
from ..api.iam.models import ModelUserResponse
from ..api.iam.models import ModelUserResponseV3
from ..api.iam.models import ModelUserRolesV4Response
from ..api.iam.models import ModelUserUpdateRequest
from ..api.iam.models import ModelUserUpdateRequestV3
from ..api.iam.models import ModelUserVerificationRequest
from ..api.iam.models import ModelUserVerificationRequestV3
from ..api.iam.models import ModelValidUserIDResponseV4
from ..api.iam.models import ModelValidationDetail
from ..api.iam.models import ModelValidationDetailPublic
from ..api.iam.models import ModelVerificationCodeResponse
from ..api.iam.models import ModelVerifyRegistrationCode
from ..api.iam.models import ModelWebLinkingResponse
from ..api.iam.models import OauthapiRevocationList
from ..api.iam.models import OauthcommonJWKKey
from ..api.iam.models import OauthcommonJWKSet
from ..api.iam.models import OauthcommonUserRevocationListRecord
from ..api.iam.models import OauthmodelCountryLocationResponse
from ..api.iam.models import OauthmodelErrorResponse
from ..api.iam.models import OauthmodelTokenIntrospectResponse
from ..api.iam.models import OauthmodelTokenResponse
from ..api.iam.models import OauthmodelTokenResponseV3
from ..api.iam.models import OauthmodelTokenThirdPartyResponse
from ..api.iam.models import RestErrorResponse
from ..api.iam.models import RestapiErrorResponse
from ..api.iam.models import Validation
from ..api.iam.models import ValidationDescription
def create_account_create_test_user_request_v4_example() -> AccountCreateTestUserRequestV4:
instance = AccountCreateTestUserRequestV4()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.password_md5_sum = randomize()
instance.username = randomize("slug")
instance.verified = randomize("bool")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
return instance
def create_account_create_user_request_v4_example() -> AccountCreateUserRequestV4:
instance = AccountCreateUserRequestV4()
instance.auth_type = randomize()
instance.code = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.password_md5_sum = randomize()
instance.reach_minimum_age = randomize("bool")
instance.username = randomize("slug")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
return instance
def create_account_create_user_response_v4_example() -> AccountCreateUserResponseV4:
instance = AccountCreateUserResponseV4()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize("adult_birthdate")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.username = randomize("slug")
return instance
def create_account_upgrade_headless_account_request_v4_example() -> AccountUpgradeHeadlessAccountRequestV4:
instance = AccountUpgradeHeadlessAccountRequestV4()
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.username = randomize("slug")
return instance
def create_account_upgrade_headless_account_with_verification_code_request_v4_example() -> AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4:
instance = AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4()
instance.code = randomize()
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.username = randomize("slug")
instance.validate_only = randomize("bool")
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
return instance
def create_account_user_active_ban_response_v4_example() -> AccountUserActiveBanResponseV4:
instance = AccountUserActiveBanResponseV4()
instance.ban = randomize()
instance.ban_id = randomize()
instance.end_date = randomize("date")
return instance
def create_account_user_permissions_response_v4_example() -> AccountUserPermissionsResponseV4:
instance = AccountUserPermissionsResponseV4()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_account_user_response_v4_example() -> AccountUserResponseV4:
instance = AccountUserResponseV4()
instance.auth_type = randomize()
instance.bans = [create_account_user_active_ban_response_v4_example()]
instance.country = randomize("country")
instance.created_at = randomize("date")
instance.date_of_birth = randomize("adult_birthdate")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.namespace = randomize("slug")
instance.old_email_address = randomize()
instance.permissions = [create_account_user_permissions_response_v4_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.new_email_address = randomize()
instance.phone_number = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.username = randomize("slug")
return instance
def create_accountcommon_ban_example() -> AccountcommonBan:
instance = AccountcommonBan()
instance.ban = randomize()
instance.description = randomize()
return instance
def create_accountcommon_ban_reason_example() -> AccountcommonBanReason:
instance = AccountcommonBanReason()
instance.description = randomize()
instance.reason = randomize()
return instance
def create_accountcommon_ban_reason_v3_example() -> AccountcommonBanReasonV3:
instance = AccountcommonBanReasonV3()
instance.description = randomize()
instance.reason = randomize()
return instance
def create_accountcommon_ban_reasons_example() -> AccountcommonBanReasons:
instance = AccountcommonBanReasons()
instance.reasons = [create_accountcommon_ban_reason_example()]
return instance
def create_accountcommon_ban_reasons_v3_example() -> AccountcommonBanReasonsV3:
instance = AccountcommonBanReasonsV3()
instance.reasons = [create_accountcommon_ban_reason_v3_example()]
return instance
def create_accountcommon_ban_v3_example() -> AccountcommonBanV3:
instance = AccountcommonBanV3()
instance.ban = randomize()
instance.type_ = randomize()
instance.description = randomize()
instance.descriptions = create_accountcommon_description_example()
return instance
def create_accountcommon_banned_by_v3_example() -> AccountcommonBannedByV3:
instance = AccountcommonBannedByV3()
instance.display_name = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_bans_example() -> AccountcommonBans:
instance = AccountcommonBans()
instance.bans = [create_accountcommon_ban_example()]
return instance
def create_accountcommon_bans_v3_example() -> AccountcommonBansV3:
instance = AccountcommonBansV3()
instance.bans = [create_accountcommon_ban_v3_example()]
return instance
def create_accountcommon_client_permission_example() -> AccountcommonClientPermission:
instance = AccountcommonClientPermission()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
return instance
def create_accountcommon_client_permission_v3_example() -> AccountcommonClientPermissionV3:
instance = AccountcommonClientPermissionV3()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
return instance
def create_accountcommon_client_permissions_example() -> AccountcommonClientPermissions:
instance = AccountcommonClientPermissions()
instance.permissions = [create_accountcommon_client_permission_example()]
return instance
def create_accountcommon_client_permissions_v3_example() -> AccountcommonClientPermissionsV3:
instance = AccountcommonClientPermissionsV3()
instance.permissions = [create_accountcommon_client_permission_v3_example()]
return instance
def create_accountcommon_conflicted_user_platform_accounts_example() -> AccountcommonConflictedUserPlatformAccounts:
instance = AccountcommonConflictedUserPlatformAccounts()
instance.platform_user_id = randomize()
instance.publisher_accounts = [create_accountcommon_user_with_linked_platform_accounts_example()]
return instance
def create_accountcommon_country_age_restriction_example() -> AccountcommonCountryAgeRestriction:
instance = AccountcommonCountryAgeRestriction()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_accountcommon_description_example() -> AccountcommonDescription:
instance = AccountcommonDescription()
instance.en_us = randomize()
instance.zh_cn = randomize()
return instance
def create_accountcommon_distinct_linked_platform_v3_example() -> AccountcommonDistinctLinkedPlatformV3:
instance = AccountcommonDistinctLinkedPlatformV3()
instance.details = [create_accountcommon_simple_user_platform_info_v3_example()]
instance.linked_at = randomize()
instance.platform_name = randomize()
instance.platform_user_id = randomize()
return instance
def create_accountcommon_distinct_platform_response_v3_example() -> AccountcommonDistinctPlatformResponseV3:
instance = AccountcommonDistinctPlatformResponseV3()
instance.platforms = [create_accountcommon_distinct_linked_platform_v3_example()]
return instance
def create_accountcommon_input_validation_description_example() -> AccountcommonInputValidationDescription:
instance = AccountcommonInputValidationDescription()
instance.language = randomize()
instance.message = [randomize()]
return instance
def create_accountcommon_jwt_ban_v3_example() -> AccountcommonJWTBanV3:
instance = AccountcommonJWTBanV3()
instance.ban = randomize()
instance.enabled = randomize("bool")
instance.end_date = randomize("date")
instance.targeted_namespace = randomize("slug")
instance.disabled_date = randomize("date")
return instance
def create_accountcommon_list_users_with_platform_accounts_response_example() -> AccountcommonListUsersWithPlatformAccountsResponse:
instance = AccountcommonListUsersWithPlatformAccountsResponse()
instance.data = [create_accountcommon_user_with_platform_accounts_example()]
instance.paging = create_accountcommon_pagination_v3_example()
instance.total_data = randomize("int", min_val=1, max_val=1000)
return instance
def create_accountcommon_namespace_role_example() -> AccountcommonNamespaceRole:
instance = AccountcommonNamespaceRole()
instance.namespace = randomize("slug")
instance.role_id = randomize("uid")
return instance
def create_accountcommon_netflix_certificates_example() -> AccountcommonNetflixCertificates:
instance = AccountcommonNetflixCertificates()
instance.encrypted_private_key = randomize()
instance.public_certificate = randomize()
instance.root_certificate = randomize()
return instance
def create_accountcommon_pagination_example() -> AccountcommonPagination:
instance = AccountcommonPagination()
instance.first = randomize()
instance.last = randomize()
instance.next_ = randomize()
instance.previous = randomize()
return instance
def create_accountcommon_pagination_v3_example() -> AccountcommonPaginationV3:
instance = AccountcommonPaginationV3()
instance.first = randomize()
instance.last = randomize()
instance.next_ = randomize()
instance.previous = randomize()
return instance
def create_accountcommon_permission_example() -> AccountcommonPermission:
instance = AccountcommonPermission()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_accountcommon_permission_v3_example() -> AccountcommonPermissionV3:
instance = AccountcommonPermissionV3()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_accountcommon_permissions_example() -> AccountcommonPermissions:
instance = AccountcommonPermissions()
instance.permissions = [create_accountcommon_permission_example()]
return instance
def create_accountcommon_permissions_v3_example() -> AccountcommonPermissionsV3:
instance = AccountcommonPermissionsV3()
instance.permissions = [create_accountcommon_permission_v3_example()]
return instance
def create_accountcommon_platform_account_example() -> AccountcommonPlatformAccount:
instance = AccountcommonPlatformAccount()
instance.namespace = randomize("slug")
instance.platform_user_id = randomize()
return instance
def create_accountcommon_registered_domain_example() -> AccountcommonRegisteredDomain:
instance = AccountcommonRegisteredDomain()
instance.affected_client_i_ds = [randomize()]
instance.domain = randomize()
instance.namespaces = [randomize()]
instance.role_id = randomize("uid")
return instance
def create_accountcommon_role_example() -> AccountcommonRole:
instance = AccountcommonRole()
instance.admin_role = randomize("bool")
instance.deletable = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_example()]
instance.members = [create_accountcommon_role_member_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_accountcommon_role_manager_example() -> AccountcommonRoleManager:
instance = AccountcommonRoleManager()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_manager_v3_example() -> AccountcommonRoleManagerV3:
instance = AccountcommonRoleManagerV3()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_member_example() -> AccountcommonRoleMember:
instance = AccountcommonRoleMember()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_member_v3_example() -> AccountcommonRoleMemberV3:
instance = AccountcommonRoleMemberV3()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_v3_example() -> AccountcommonRoleV3:
instance = AccountcommonRoleV3()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_v3_example()]
instance.members = [create_accountcommon_role_member_v3_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_accountcommon_simple_user_platform_info_v3_example() -> AccountcommonSimpleUserPlatformInfoV3:
instance = AccountcommonSimpleUserPlatformInfoV3()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.display_name = randomize("slug")
instance.platform_id = randomize()
return instance
def create_accountcommon_user_linked_platform_example() -> AccountcommonUserLinkedPlatform:
instance = AccountcommonUserLinkedPlatform()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.xuid = randomize()
return instance
def create_accountcommon_user_linked_platform_v3_example() -> AccountcommonUserLinkedPlatformV3:
instance = AccountcommonUserLinkedPlatformV3()
instance.account_group = randomize()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_accountcommon_user_linked_platforms_response_v3_example() -> AccountcommonUserLinkedPlatformsResponseV3:
instance = AccountcommonUserLinkedPlatformsResponseV3()
instance.data = [create_accountcommon_user_linked_platform_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_accountcommon_user_platform_info_example() -> AccountcommonUserPlatformInfo:
instance = AccountcommonUserPlatformInfo()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_platforms_example() -> AccountcommonUserPlatforms:
instance = AccountcommonUserPlatforms()
instance.user_id_platforms = [create_accountcommon_user_platform_info_example()]
return instance
def create_accountcommon_user_search_by_platform_id_result_example() -> AccountcommonUserSearchByPlatformIDResult:
instance = AccountcommonUserSearchByPlatformIDResult()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_user_linked_platform_example()]
instance.phone_number = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_search_result_example() -> AccountcommonUserSearchResult:
instance = AccountcommonUserSearchResult()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_user_linked_platform_example()]
instance.phone_number = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_with_linked_platform_accounts_example() -> AccountcommonUserWithLinkedPlatformAccounts:
instance = AccountcommonUserWithLinkedPlatformAccounts()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_platform_account_example()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_with_platform_accounts_example() -> AccountcommonUserWithPlatformAccounts:
instance = AccountcommonUserWithPlatformAccounts()
instance.linked_platforms = [create_accountcommon_platform_account_example()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_banned_by_example() -> BannedBy:
instance = BannedBy()
instance.display_name = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_bloom_filter_json_example() -> BloomFilterJSON:
instance = BloomFilterJSON()
instance.bits = [randomize("int", min_val=1, max_val=1000)]
instance.k = randomize("int", min_val=1, max_val=1000)
instance.m = randomize("int", min_val=1, max_val=1000)
return instance
def create_clientmodel_client_create_request_example() -> ClientmodelClientCreateRequest:
instance = ClientmodelClientCreateRequest()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
instance.secret = randomize()
return instance
def create_clientmodel_client_creation_response_example() -> ClientmodelClientCreationResponse:
instance = ClientmodelClientCreationResponse()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_creation_v3_request_example() -> ClientmodelClientCreationV3Request:
instance = ClientmodelClientCreationV3Request()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.client_platform = randomize()
instance.namespace = randomize("slug")
instance.oauth_client_type = randomize()
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.deletable = randomize("bool")
return instance
def create_clientmodel_client_response_example() -> ClientmodelClientResponse:
instance = ClientmodelClientResponse()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.created_at = randomize("date")
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_update_request_example() -> ClientmodelClientUpdateRequest:
instance = ClientmodelClientUpdateRequest()
instance.client_name = randomize()
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_update_secret_request_example() -> ClientmodelClientUpdateSecretRequest:
instance = ClientmodelClientUpdateSecretRequest()
instance.new_secret = randomize()
return instance
def create_clientmodel_client_update_v3_request_example() -> ClientmodelClientUpdateV3Request:
instance = ClientmodelClientUpdateV3Request()
instance.client_platform = randomize()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.deletable = randomize("bool")
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_v3_response_example() -> ClientmodelClientV3Response:
instance = ClientmodelClientV3Response()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.client_platform = randomize()
instance.created_at = randomize("date")
instance.modified_at = randomize("date")
instance.namespace = randomize("slug")
instance.oauth_client_type = randomize()
instance.redirect_uri = randomize()
instance.scopes = [randomize()]
return instance
def create_clientmodel_clients_v3_response_example() -> ClientmodelClientsV3Response:
instance = ClientmodelClientsV3Response()
instance.data = [create_clientmodel_client_v3_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_legal_accepted_policies_request_example() -> LegalAcceptedPoliciesRequest:
instance = LegalAcceptedPoliciesRequest()
instance.is_accepted = randomize("bool")
instance.localized_policy_version_id = randomize()
instance.policy_id = randomize()
instance.policy_version_id = randomize()
return instance
def create_model_add_user_role_v4_request_example() -> ModelAddUserRoleV4Request:
instance = ModelAddUserRoleV4Request()
instance.assigned_namespaces = [randomize()]
instance.role_id = randomize("uid")
return instance
def create_model_age_restriction_request_example() -> ModelAgeRestrictionRequest:
instance = ModelAgeRestrictionRequest()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_request_v3_example() -> ModelAgeRestrictionRequestV3:
instance = ModelAgeRestrictionRequestV3()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_response_example() -> ModelAgeRestrictionResponse:
instance = ModelAgeRestrictionResponse()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_response_v3_example() -> ModelAgeRestrictionResponseV3:
instance = ModelAgeRestrictionResponseV3()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_assign_user_v4_request_example() -> ModelAssignUserV4Request:
instance = ModelAssignUserV4Request()
instance.assigned_namespaces = [randomize()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_assigned_user_v4_response_example() -> ModelAssignedUserV4Response:
instance = ModelAssignedUserV4Response()
instance.assigned_namespaces = [randomize()]
instance.display_name = randomize("slug")
instance.email = randomize("email")
instance.role_id = randomize("uid")
instance.user_id = randomize("uid")
return instance
def create_model_authenticator_key_response_v4_example() -> ModelAuthenticatorKeyResponseV4:
instance = ModelAuthenticatorKeyResponseV4()
instance.secret_key = randomize()
instance.uri = randomize()
return instance
def create_model_backup_codes_response_v4_example() -> ModelBackupCodesResponseV4:
instance = ModelBackupCodesResponseV4()
instance.generated_at = randomize("int", min_val=1, max_val=1000)
instance.invalid_codes = [randomize()]
instance.valid_codes = [randomize()]
return instance
def create_model_ban_create_request_example() -> ModelBanCreateRequest:
instance = ModelBanCreateRequest()
instance.ban = randomize()
instance.comment = randomize()
instance.end_date = randomize()
instance.reason = randomize()
instance.skip_notif = randomize("bool")
return instance
def create_model_ban_update_request_example() -> ModelBanUpdateRequest:
instance = ModelBanUpdateRequest()
instance.enabled = randomize("bool")
instance.skip_notif = randomize("bool")
return instance
def create_model_check_valid_user_id_request_v4_example() -> ModelCheckValidUserIDRequestV4:
instance = ModelCheckValidUserIDRequestV4()
instance.user_ids = [randomize()]
return instance
def create_model_country_example() -> ModelCountry:
instance = ModelCountry()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_model_country_age_restriction_request_example() -> ModelCountryAgeRestrictionRequest:
instance = ModelCountryAgeRestrictionRequest()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_country_age_restriction_v3_request_example() -> ModelCountryAgeRestrictionV3Request:
instance = ModelCountryAgeRestrictionV3Request()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_country_v3_response_example() -> ModelCountryV3Response:
instance = ModelCountryV3Response()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_model_create_justice_user_response_example() -> ModelCreateJusticeUserResponse:
instance = ModelCreateJusticeUserResponse()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_disable_user_request_example() -> ModelDisableUserRequest:
instance = ModelDisableUserRequest()
instance.reason = randomize()
return instance
def create_model_email_update_request_v4_example() -> ModelEmailUpdateRequestV4:
instance = ModelEmailUpdateRequestV4()
instance.code = randomize()
instance.email_address = randomize("email")
return instance
def create_model_enabled_factors_response_v4_example() -> ModelEnabledFactorsResponseV4:
instance = ModelEnabledFactorsResponseV4()
instance.default = randomize()
instance.enabled = [randomize()]
return instance
def create_model_forgot_password_request_v3_example() -> ModelForgotPasswordRequestV3:
instance = ModelForgotPasswordRequestV3()
instance.email_address = randomize("email")
instance.language_tag = randomize()
return instance
def create_model_get_admin_users_response_example() -> ModelGetAdminUsersResponse:
instance = ModelGetAdminUsersResponse()
instance.data = [create_model_user_response_example()]
instance.paging = create_accountcommon_pagination_example()
return instance
def create_model_get_publisher_user_response_example() -> ModelGetPublisherUserResponse:
instance = ModelGetPublisherUserResponse()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_user_ban_v3_response_example() -> ModelGetUserBanV3Response:
instance = ModelGetUserBanV3Response()
instance.data = [create_model_user_ban_response_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_get_user_justice_platform_account_response_example() -> ModelGetUserJusticePlatformAccountResponse:
instance = ModelGetUserJusticePlatformAccountResponse()
instance.designated_namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_user_mapping_example() -> ModelGetUserMapping:
instance = ModelGetUserMapping()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_users_response_with_pagination_v3_example() -> ModelGetUsersResponseWithPaginationV3:
instance = ModelGetUsersResponseWithPaginationV3()
instance.data = [create_model_user_response_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_input_validation_data_example() -> ModelInputValidationData:
instance = ModelInputValidationData()
instance.field = randomize()
instance.validation = create_model_validation_detail_example()
return instance
def create_model_input_validation_data_public_example() -> ModelInputValidationDataPublic:
instance = ModelInputValidationDataPublic()
instance.field = randomize()
instance.validation = create_model_validation_detail_public_example()
return instance
def create_model_input_validation_update_payload_example() -> ModelInputValidationUpdatePayload:
instance = ModelInputValidationUpdatePayload()
instance.field = randomize()
instance.validation = create_validation_example()
return instance
def create_model_input_validations_public_response_example() -> ModelInputValidationsPublicResponse:
instance = ModelInputValidationsPublicResponse()
instance.data = [create_model_input_validation_data_public_example()]
instance.version = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_input_validations_response_example() -> ModelInputValidationsResponse:
instance = ModelInputValidationsResponse()
instance.data = [create_model_input_validation_data_example()]
instance.version = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_invite_user_request_v3_example() -> ModelInviteUserRequestV3:
instance = ModelInviteUserRequestV3()
instance.email_addresses = [randomize()]
instance.is_admin = randomize("bool")
instance.roles = [randomize()]
return instance
def create_model_invite_user_request_v4_example() -> ModelInviteUserRequestV4:
instance = ModelInviteUserRequestV4()
instance.assigned_namespaces = [randomize()]
instance.email_addresses = [randomize()]
instance.is_admin = randomize("bool")
instance.role_id = randomize("uid")
return instance
def create_model_invite_user_response_v3_example() -> ModelInviteUserResponseV3:
instance = ModelInviteUserResponseV3()
instance.data = [create_model_user_invitation_v3_example()]
return instance
def create_model_link_platform_account_request_example() -> ModelLinkPlatformAccountRequest:
instance = ModelLinkPlatformAccountRequest()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_model_link_platform_account_with_progression_request_example() -> ModelLinkPlatformAccountWithProgressionRequest:
instance = ModelLinkPlatformAccountWithProgressionRequest()
instance.chosen_namespaces = [randomize()]
instance.request_id = randomize()
return instance
def create_model_link_request_example() -> ModelLinkRequest:
instance = ModelLinkRequest()
instance.client_id = randomize("uid")
instance.namespace = randomize("slug")
instance.operation_name = randomize()
instance.payload = {randomize(): randomize()}
instance.redirect_uri = randomize()
instance.request_id = randomize()
instance.status = randomize()
instance.conflict_publisher_user_id = randomize()
instance.conflict_user_linked_games = [randomize()]
instance.current_user_linked_games = [randomize()]
instance.error = create_rest_error_response_example()
instance.expiration = randomize("int", min_val=1, max_val=1000)
instance.platform_display_name = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_model_list_assigned_users_v4_response_example() -> ModelListAssignedUsersV4Response:
instance = ModelListAssignedUsersV4Response()
instance.data = [create_model_assigned_user_v4_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_list_bulk_user_response_example() -> ModelListBulkUserResponse:
instance = ModelListBulkUserResponse()
instance.data = [create_model_user_base_info_example()]
return instance
def create_model_list_email_address_request_example() -> ModelListEmailAddressRequest:
instance = ModelListEmailAddressRequest()
instance.list_email_address_request = [randomize()]
return instance
def create_model_list_role_v4_response_example() -> ModelListRoleV4Response:
instance = ModelListRoleV4Response()
instance.data = [create_model_role_v4_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_list_user_information_result_example() -> ModelListUserInformationResult:
instance = ModelListUserInformationResult()
instance.data = [create_model_user_info_response_example()]
return instance
def create_model_list_user_response_v3_example() -> ModelListUserResponseV3:
instance = ModelListUserResponseV3()
instance.data = [create_model_user_response_v3_example()]
return instance
def create_model_list_user_roles_v4_response_example() -> ModelListUserRolesV4Response:
instance = ModelListUserRolesV4Response()
instance.data = [create_model_user_roles_v4_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_list_valid_user_id_response_v4_example() -> ModelListValidUserIDResponseV4:
instance = ModelListValidUserIDResponseV4()
instance.data = [create_model_valid_user_id_response_v4_example()]
return instance
def create_model_login_histories_response_example() -> ModelLoginHistoriesResponse:
instance = ModelLoginHistoriesResponse()
instance.data = [create_model_user_login_history_response_example()]
instance.paging = create_accountcommon_pagination_example()
return instance
def create_model_namespace_role_request_example() -> ModelNamespaceRoleRequest:
instance = ModelNamespaceRoleRequest()
instance.namespace = randomize("slug")
instance.role_id = randomize("uid")
return instance
def create_model_permission_delete_request_example() -> ModelPermissionDeleteRequest:
instance = ModelPermissionDeleteRequest()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
return instance
def create_model_platform_domain_delete_request_example() -> ModelPlatformDomainDeleteRequest:
instance = ModelPlatformDomainDeleteRequest()
instance.domain = randomize()
return instance
def create_model_platform_domain_response_example() -> ModelPlatformDomainResponse:
instance = ModelPlatformDomainResponse()
instance.registered_domains = [create_accountcommon_registered_domain_example()]
return instance
def create_model_platform_domain_update_request_example() -> ModelPlatformDomainUpdateRequest:
instance = ModelPlatformDomainUpdateRequest()
instance.affected_client_i_ds = [randomize()]
instance.assigned_namespaces = [randomize()]
instance.domain = randomize()
instance.role_id = randomize("uid")
return instance
def create_model_platform_user_id_request_example() -> ModelPlatformUserIDRequest:
instance = ModelPlatformUserIDRequest()
instance.platform_user_ids = [randomize()]
return instance
def create_model_platform_user_information_example() -> ModelPlatformUserInformation:
instance = ModelPlatformUserInformation()
instance.display_name = randomize("slug")
instance.linked_at = randomize("date")
instance.namespace = randomize("slug")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.email_address = randomize("email")
instance.xuid = randomize()
return instance
def create_model_public_third_party_platform_info_example() -> ModelPublicThirdPartyPlatformInfo:
instance = ModelPublicThirdPartyPlatformInfo()
instance.app_id = randomize("uid")
instance.client_id = randomize("uid")
instance.environment = randomize()
instance.is_active = randomize("bool")
instance.platform_id = randomize()
return instance
def create_model_public_user_information_response_v3_example() -> ModelPublicUserInformationResponseV3:
instance = ModelPublicUserInformationResponseV3()
instance.data = [create_model_public_user_information_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_public_user_information_v3_example() -> ModelPublicUserInformationV3:
instance = ModelPublicUserInformationV3()
instance.created_at = randomize("date")
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.user_name = randomize("slug")
return instance
def create_model_public_user_response_example() -> ModelPublicUserResponse:
instance = ModelPublicUserResponse()
instance.auth_type = randomize()
instance.bans = [create_model_user_active_ban_response_example()]
instance.created_at = randomize("date")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_enabled_changed_time = randomize("date")
instance.login_id = randomize()
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.username = randomize("slug")
instance.xuid = randomize()
return instance
def create_model_public_user_response_v3_example() -> ModelPublicUserResponseV3:
instance = ModelPublicUserResponseV3()
instance.auth_type = randomize()
instance.bans = [create_model_user_active_ban_response_v3_example()]
instance.created_at = randomize("date")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.permissions = [create_model_user_permissions_response_v3_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.avatar_url = randomize("url")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.user_name = randomize("slug")
return instance
def create_model_public_users_response_example() -> ModelPublicUsersResponse:
instance = ModelPublicUsersResponse()
instance.users = [create_model_public_user_response_example()]
return instance
def create_model_remove_user_role_v4_request_example() -> ModelRemoveUserRoleV4Request:
instance = ModelRemoveUserRoleV4Request()
instance.assigned_namespaces = [randomize()]
instance.role_id = randomize("uid")
return instance
def create_model_reset_password_request_example() -> ModelResetPasswordRequest:
instance = ModelResetPasswordRequest()
instance.code = randomize()
instance.login_id = randomize()
instance.new_password = randomize()
return instance
def create_model_reset_password_request_v3_example() -> ModelResetPasswordRequestV3:
instance = ModelResetPasswordRequestV3()
instance.code = randomize()
instance.email_address = randomize("email")
instance.new_password = randomize()
return instance
def create_model_revoke_user_v4_request_example() -> ModelRevokeUserV4Request:
instance = ModelRevokeUserV4Request()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_role_admin_status_response_example() -> ModelRoleAdminStatusResponse:
instance = ModelRoleAdminStatusResponse()
instance.admin_role = randomize("bool")
return instance
def create_model_role_admin_status_response_v3_example() -> ModelRoleAdminStatusResponseV3:
instance = ModelRoleAdminStatusResponseV3()
instance.admin_role = randomize("bool")
return instance
def create_model_role_create_request_example() -> ModelRoleCreateRequest:
instance = ModelRoleCreateRequest()
instance.admin_role = randomize("bool")
instance.managers = [create_accountcommon_role_manager_example()]
instance.members = [create_accountcommon_role_member_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.role_name = randomize()
return instance
def create_model_role_create_v3_request_example() -> ModelRoleCreateV3Request:
instance = ModelRoleCreateV3Request()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_v3_example()]
instance.members = [create_accountcommon_role_member_v3_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_name = randomize()
instance.deletable = randomize("bool")
return instance
def create_model_role_managers_request_example() -> ModelRoleManagersRequest:
instance = ModelRoleManagersRequest()
instance.managers = [create_accountcommon_role_manager_example()]
return instance
def create_model_role_managers_request_v3_example() -> ModelRoleManagersRequestV3:
instance = ModelRoleManagersRequestV3()
instance.managers = [create_accountcommon_role_manager_v3_example()]
return instance
def create_model_role_managers_response_example() -> ModelRoleManagersResponse:
instance = ModelRoleManagersResponse()
instance.managers = [create_accountcommon_role_manager_example()]
return instance
def create_model_role_managers_responses_v3_example() -> ModelRoleManagersResponsesV3:
instance = ModelRoleManagersResponsesV3()
instance.data = [create_accountcommon_role_manager_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_role_members_request_example() -> ModelRoleMembersRequest:
instance = ModelRoleMembersRequest()
instance.members = [create_accountcommon_role_member_example()]
return instance
def create_model_role_members_request_v3_example() -> ModelRoleMembersRequestV3:
instance = ModelRoleMembersRequestV3()
instance.members = [create_accountcommon_role_member_v3_example()]
return instance
def create_model_role_members_response_example() -> ModelRoleMembersResponse:
instance = ModelRoleMembersResponse()
instance.members = [create_accountcommon_role_member_example()]
return instance
def create_model_role_members_response_v3_example() -> ModelRoleMembersResponseV3:
instance = ModelRoleMembersResponseV3()
instance.data = [create_accountcommon_role_member_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_role_names_response_v3_example() -> ModelRoleNamesResponseV3:
instance = ModelRoleNamesResponseV3()
instance.data = [randomize()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_role_response_example() -> ModelRoleResponse:
instance = ModelRoleResponse()
instance.is_wildcard = randomize("bool")
instance.permissions = [create_accountcommon_permission_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_role_response_v3_example() -> ModelRoleResponseV3:
instance = ModelRoleResponseV3()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_role_response_with_managers_example() -> ModelRoleResponseWithManagers:
instance = ModelRoleResponseWithManagers()
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_role_response_with_managers_and_pagination_v3_example() -> ModelRoleResponseWithManagersAndPaginationV3:
instance = ModelRoleResponseWithManagersAndPaginationV3()
instance.data = [create_model_role_response_with_managers_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_role_response_with_managers_v3_example() -> ModelRoleResponseWithManagersV3:
instance = ModelRoleResponseWithManagersV3()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_v3_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_role_update_request_example() -> ModelRoleUpdateRequest:
instance = ModelRoleUpdateRequest()
instance.role_name = randomize()
return instance
def create_model_role_update_request_v3_example() -> ModelRoleUpdateRequestV3:
instance = ModelRoleUpdateRequestV3()
instance.is_wildcard = randomize("bool")
instance.role_name = randomize()
instance.deletable = randomize("bool")
return instance
def create_model_role_v4_request_example() -> ModelRoleV4Request:
instance = ModelRoleV4Request()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.role_name = randomize()
instance.deletable = randomize("bool")
return instance
def create_model_role_v4_response_example() -> ModelRoleV4Response:
instance = ModelRoleV4Response()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_search_users_by_platform_id_response_example() -> ModelSearchUsersByPlatformIDResponse:
instance = ModelSearchUsersByPlatformIDResponse()
instance.data = [create_accountcommon_user_search_by_platform_id_result_example()]
instance.paging = create_accountcommon_pagination_example()
return instance
def create_model_search_users_response_example() -> ModelSearchUsersResponse:
instance = ModelSearchUsersResponse()
instance.data = [create_accountcommon_user_search_result_example()]
return instance
def create_model_search_users_response_with_pagination_v3_example() -> ModelSearchUsersResponseWithPaginationV3:
instance = ModelSearchUsersResponseWithPaginationV3()
instance.data = [create_model_user_response_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
instance.total_data = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_send_register_verification_code_request_example() -> ModelSendRegisterVerificationCodeRequest:
instance = ModelSendRegisterVerificationCodeRequest()
instance.email_address = randomize("email")
instance.language_tag = randomize()
return instance
def create_model_send_verification_code_request_example() -> ModelSendVerificationCodeRequest:
instance = ModelSendVerificationCodeRequest()
instance.language_tag = randomize()
instance.login_id = randomize()
instance.context = randomize()
return instance
def create_model_send_verification_code_request_v3_example() -> ModelSendVerificationCodeRequestV3:
instance = ModelSendVerificationCodeRequestV3()
instance.email_address = randomize("email")
instance.context = randomize()
instance.language_tag = randomize()
return instance
def create_model_sso_platform_credential_request_example() -> ModelSSOPlatformCredentialRequest:
instance = ModelSSOPlatformCredentialRequest()
instance.acs_url = randomize("url")
instance.api_key = randomize()
instance.app_id = randomize("uid")
instance.federation_metadata_url = randomize("url")
instance.is_active = randomize("bool")
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.sso_url = randomize("url")
return instance
def create_model_sso_platform_credential_response_example() -> ModelSSOPlatformCredentialResponse:
instance = ModelSSOPlatformCredentialResponse()
instance.acs_url = randomize("url")
instance.app_id = randomize("uid")
instance.federation_metadata_url = randomize("url")
instance.is_active = randomize("bool")
instance.namespace = randomize("slug")
instance.platform_id = randomize()
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.sso_url = randomize("url")
instance.truncated_api_key = randomize()
return instance
def create_model_third_party_login_platform_credential_request_example() -> ModelThirdPartyLoginPlatformCredentialRequest:
instance = ModelThirdPartyLoginPlatformCredentialRequest()
instance.acsurl = randomize()
instance.app_id = randomize("uid")
instance.aws_cognito_region = randomize()
instance.aws_cognito_user_pool = randomize()
instance.client_id = randomize("uid")
instance.environment = randomize()
instance.federation_metadata_url = randomize("url")
instance.generic_oauth_flow = randomize("bool")
instance.is_active = randomize("bool")
instance.issuer = randomize()
instance.jwks_endpoint = randomize()
instance.key_id = randomize()
instance.netflix_certificates = create_accountcommon_netflix_certificates_example()
instance.organization_id = randomize()
instance.platform_name = randomize()
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.team_id = randomize()
instance.token_authentication_type = randomize()
instance.token_claims_mapping = {randomize(): randomize()}
return instance
def create_model_third_party_login_platform_credential_response_example() -> ModelThirdPartyLoginPlatformCredentialResponse:
instance = ModelThirdPartyLoginPlatformCredentialResponse()
instance.acsurl = randomize()
instance.app_id = randomize("uid")
instance.aws_cognito_region = randomize()
instance.aws_cognito_user_pool = randomize()
instance.client_id = randomize("uid")
instance.environment = randomize()
instance.federation_metadata_url = randomize("url")
instance.generic_oauth_flow = randomize("bool")
instance.is_active = randomize("bool")
instance.issuer = randomize()
instance.jwks_endpoint = randomize()
instance.key_id = randomize()
instance.namespace = randomize("slug")
instance.organization_id = randomize()
instance.platform_id = randomize()
instance.platform_name = randomize()
instance.redirect_uri = randomize()
instance.registered_domains = [create_accountcommon_registered_domain_example()]
instance.secret = randomize()
instance.team_id = randomize()
instance.token_authentication_type = randomize()
instance.token_claims_mapping = {randomize(): randomize()}
instance.netflix_certificates = create_accountcommon_netflix_certificates_example()
return instance
def create_model_unlink_user_platform_request_example() -> ModelUnlinkUserPlatformRequest:
instance = ModelUnlinkUserPlatformRequest()
instance.platform_namespace = randomize("slug")
return instance
def create_model_update_permission_schedule_request_example() -> ModelUpdatePermissionScheduleRequest:
instance = ModelUpdatePermissionScheduleRequest()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_model_update_user_deletion_status_request_example() -> ModelUpdateUserDeletionStatusRequest:
instance = ModelUpdateUserDeletionStatusRequest()
instance.enabled = randomize("bool")
return instance
def create_model_update_user_status_request_example() -> ModelUpdateUserStatusRequest:
instance = ModelUpdateUserStatusRequest()
instance.enabled = randomize("bool")
instance.reason = randomize()
return instance
def create_model_upgrade_headless_account_request_example() -> ModelUpgradeHeadlessAccountRequest:
instance = ModelUpgradeHeadlessAccountRequest()
instance.login_id = randomize()
instance.password = randomize("password")
return instance
def create_model_upgrade_headless_account_v3_request_example() -> ModelUpgradeHeadlessAccountV3Request:
instance = ModelUpgradeHeadlessAccountV3Request()
instance.email_address = randomize("email")
instance.password = randomize("password")
return instance
def create_model_upgrade_headless_account_with_verification_code_request_example() -> ModelUpgradeHeadlessAccountWithVerificationCodeRequest:
instance = ModelUpgradeHeadlessAccountWithVerificationCodeRequest()
instance.code = randomize()
instance.login_id = randomize()
instance.password = randomize("password")
return instance
def create_model_upgrade_headless_account_with_verification_code_request_v3_example() -> ModelUpgradeHeadlessAccountWithVerificationCodeRequestV3:
instance = ModelUpgradeHeadlessAccountWithVerificationCodeRequestV3()
instance.code = randomize()
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.validate_only = randomize("bool")
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
return instance
def create_model_user_active_ban_response_example() -> ModelUserActiveBanResponse:
instance = ModelUserActiveBanResponse()
instance.ban = randomize()
instance.ban_id = randomize()
instance.end_date = randomize("date")
return instance
def create_model_user_active_ban_response_v3_example() -> ModelUserActiveBanResponseV3:
instance = ModelUserActiveBanResponseV3()
instance.ban = randomize()
instance.ban_id = randomize()
instance.end_date = randomize("date")
return instance
def create_model_user_ban_response_example() -> ModelUserBanResponse:
instance = ModelUserBanResponse()
instance.ban = randomize()
instance.ban_id = randomize()
instance.banned_by = create_banned_by_example()
instance.comment = randomize()
instance.created_at = randomize("date")
instance.enabled = randomize("bool")
instance.end_date = randomize("date")
instance.namespace = randomize("slug")
instance.reason = randomize()
instance.user_id = randomize("uid")
instance.disabled_date = randomize("date")
return instance
def create_model_user_ban_response_v3_example() -> ModelUserBanResponseV3:
instance = ModelUserBanResponseV3()
instance.ban = randomize()
instance.ban_id = randomize()
instance.banned_by = create_accountcommon_banned_by_v3_example()
instance.comment = randomize()
instance.created_at = randomize("date")
instance.disabled_date = randomize("date")
instance.enabled = randomize("bool")
instance.end_date = randomize("date")
instance.namespace = randomize("slug")
instance.reason = randomize()
instance.user_id = randomize("uid")
return instance
def create_model_user_base_info_example() -> ModelUserBaseInfo:
instance = ModelUserBaseInfo()
instance.avatar_url = randomize("url")
instance.display_name = randomize("slug")
instance.platform_user_ids = {randomize(): randomize()}
instance.user_id = randomize("uid")
return instance
def create_model_user_create_from_invitation_request_v3_example() -> ModelUserCreateFromInvitationRequestV3:
instance = ModelUserCreateFromInvitationRequestV3()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
return instance
def create_model_user_create_from_invitation_request_v4_example() -> ModelUserCreateFromInvitationRequestV4:
instance = ModelUserCreateFromInvitationRequestV4()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.username = randomize("slug")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
return instance
def create_model_user_create_request_example() -> ModelUserCreateRequest:
instance = ModelUserCreateRequest()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.login_id = randomize()
instance.password = randomize("password")
instance.password_md5_sum = randomize()
return instance
def create_model_user_create_request_v3_example() -> ModelUserCreateRequestV3:
instance = ModelUserCreateRequestV3()
instance.auth_type = randomize()
instance.code = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
instance.password_md5_sum = randomize()
return instance
def create_model_user_create_response_example() -> ModelUserCreateResponse:
instance = ModelUserCreateResponse()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize("adult_birthdate")
instance.display_name = randomize("slug")
instance.login_id = randomize()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_user_create_response_v3_example() -> ModelUserCreateResponseV3:
instance = ModelUserCreateResponseV3()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize("adult_birthdate")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_user_deletion_status_response_example() -> ModelUserDeletionStatusResponse:
instance = ModelUserDeletionStatusResponse()
instance.deletion_status = randomize("bool")
return instance
def create_model_user_i_ds_request_example() -> ModelUserIDsRequest:
instance = ModelUserIDsRequest()
instance.user_ids = [randomize()]
return instance
def create_model_user_info_response_example() -> ModelUserInfoResponse:
instance = ModelUserInfoResponse()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_user_information_example() -> ModelUserInformation:
instance = ModelUserInformation()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.email_addresses = [randomize()]
instance.linked_platform_accounts = [create_model_platform_user_information_example()]
instance.phone_number = randomize()
instance.username = randomize("slug")
instance.xuid = randomize()
return instance
def create_model_user_invitation_v3_example() -> ModelUserInvitationV3:
instance = ModelUserInvitationV3()
instance.email = randomize("email")
instance.expired_at = randomize("date")
instance.roles = [create_accountcommon_namespace_role_example()]
instance.id_ = randomize()
return instance
def create_model_user_login_history_response_example() -> ModelUserLoginHistoryResponse:
instance = ModelUserLoginHistoryResponse()
instance.application_name = randomize()
instance.city = randomize()
instance.country = randomize("country")
instance.device_id = randomize()
instance.device_name = randomize()
instance.state = randomize()
instance.timestamp = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_user_password_update_request_example() -> ModelUserPasswordUpdateRequest:
instance = ModelUserPasswordUpdateRequest()
instance.language_tag = randomize()
instance.new_password = randomize()
instance.old_password = <PASSWORD>()
return instance
def create_model_user_password_update_v3_request_example() -> ModelUserPasswordUpdateV3Request:
instance = ModelUserPasswordUpdateV3Request()
instance.language_tag = randomize()
instance.new_password = <PASSWORD>()
instance.old_password = <PASSWORD>()
return instance
def create_model_user_permissions_response_v3_example() -> ModelUserPermissionsResponseV3:
instance = ModelUserPermissionsResponseV3()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_model_user_response_example() -> ModelUserResponse:
instance = ModelUserResponse()
instance.auth_type = randomize()
instance.bans = [create_model_user_active_ban_response_example()]
instance.country = randomize("country")
instance.created_at = randomize("date")
instance.date_of_birth = randomize("adult_birthdate")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.login_id = randomize()
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.old_email_address = randomize()
instance.permissions = [create_accountcommon_permission_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.avatar_url = randomize("url")
instance.email_address = randomize("email")
instance.new_email_address = randomize()
instance.phone_number = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.username = randomize("slug")
instance.xuid = randomize()
return instance
def create_model_user_response_v3_example() -> ModelUserResponseV3:
instance = ModelUserResponseV3()
instance.auth_type = randomize()
instance.bans = [create_model_user_active_ban_response_v3_example()]
instance.country = randomize("country")
instance.created_at = randomize("date")
instance.date_of_birth = randomize("adult_birthdate")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.old_email_address = randomize()
instance.permissions = [create_model_user_permissions_response_v3_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.avatar_url = randomize("url")
instance.new_email_address = randomize()
instance.phone_number = randomize()
instance.platform_avatar_url = randomize("url")
instance.platform_display_name = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.user_name = randomize("slug")
return instance
def create_model_user_roles_v4_response_example() -> ModelUserRolesV4Response:
instance = ModelUserRolesV4Response()
instance.assigned_namespaces = [randomize()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_user_update_request_example() -> ModelUserUpdateRequest:
instance = ModelUserUpdateRequest()
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
instance.language_tag = randomize()
return instance
def create_model_user_update_request_v3_example() -> ModelUserUpdateRequestV3:
instance = ModelUserUpdateRequestV3()
instance.avatar_url = randomize("url")
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
instance.language_tag = randomize()
instance.user_name = randomize("slug")
return instance
def create_model_user_verification_request_example() -> ModelUserVerificationRequest:
instance = ModelUserVerificationRequest()
instance.code = randomize()
instance.contact_type = randomize()
instance.language_tag = randomize()
instance.validate_only = randomize("bool")
return instance
def create_model_user_verification_request_v3_example() -> ModelUserVerificationRequestV3:
instance = ModelUserVerificationRequestV3()
instance.code = randomize()
instance.contact_type = randomize()
instance.language_tag = randomize()
instance.validate_only = randomize("bool")
return instance
def create_model_valid_user_id_response_v4_example() -> ModelValidUserIDResponseV4:
instance = ModelValidUserIDResponseV4()
instance.exists = randomize("bool")
instance.user_id = randomize("uid")
return instance
def create_model_validation_detail_example() -> ModelValidationDetail:
instance = ModelValidationDetail()
instance.allow_digit = randomize("bool")
instance.allow_letter = randomize("bool")
instance.allow_space = randomize("bool")
instance.allow_unicode = randomize("bool")
instance.description = [create_accountcommon_input_validation_description_example()]
instance.is_custom_regex = randomize("bool")
instance.letter_case = randomize()
instance.max_length = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_alpha_num = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_special_character = randomize("int", min_val=1, max_val=1000)
instance.min_char_type = randomize("int", min_val=1, max_val=1000)
instance.min_length = randomize("int", min_val=1, max_val=1000)
instance.regex = randomize()
instance.special_character_location = randomize()
instance.special_characters = [randomize()]
return instance
def create_model_validation_detail_public_example() -> ModelValidationDetailPublic:
instance = ModelValidationDetailPublic()
instance.allow_digit = randomize("bool")
instance.allow_letter = randomize("bool")
instance.allow_space = randomize("bool")
instance.allow_unicode = randomize("bool")
instance.description = create_accountcommon_input_validation_description_example()
instance.is_custom_regex = randomize("bool")
instance.letter_case = randomize()
instance.max_length = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_alpha_num = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_special_character = randomize("int", min_val=1, max_val=1000)
instance.min_char_type = randomize("int", min_val=1, max_val=1000)
instance.min_length = randomize("int", min_val=1, max_val=1000)
instance.regex = randomize()
instance.special_character_location = randomize()
instance.special_characters = [randomize()]
return instance
def create_model_verification_code_response_example() -> ModelVerificationCodeResponse:
instance = ModelVerificationCodeResponse()
instance.account_registration = randomize()
instance.account_upgrade = randomize()
instance.password_reset = randomize()
instance.update_email = randomize()
return instance
def create_model_verify_registration_code_example() -> ModelVerifyRegistrationCode:
instance = ModelVerifyRegistrationCode()
instance.code = randomize()
instance.email_address = randomize("email")
return instance
def create_model_web_linking_response_example() -> ModelWebLinkingResponse:
instance = ModelWebLinkingResponse()
instance.third_party_url = randomize("url")
return instance
def create_oauthapi_revocation_list_example() -> OauthapiRevocationList:
instance = OauthapiRevocationList()
instance.revoked_tokens = create_bloom_filter_json_example()
instance.revoked_users = [create_oauthcommon_user_revocation_list_record_example()]
return instance
def create_oauthcommon_jwk_key_example() -> OauthcommonJWKKey:
instance = OauthcommonJWKKey()
instance.kty = randomize()
instance.alg = randomize()
instance.e = randomize()
instance.kid = randomize()
instance.n = randomize()
instance.use = randomize()
return instance
def create_oauthcommon_jwk_set_example() -> OauthcommonJWKSet:
instance = OauthcommonJWKSet()
instance.keys = [create_oauthcommon_jwk_key_example()]
return instance
def create_oauthcommon_user_revocation_list_record_example() -> OauthcommonUserRevocationListRecord:
instance = OauthcommonUserRevocationListRecord()
instance.id_ = randomize()
instance.revoked_at = randomize("date")
return instance
def create_oauthmodel_country_location_response_example() -> OauthmodelCountryLocationResponse:
instance = OauthmodelCountryLocationResponse()
instance.city = randomize()
instance.country_code = randomize()
instance.country_name = randomize()
instance.state = randomize()
return instance
def create_oauthmodel_error_response_example() -> OauthmodelErrorResponse:
instance = OauthmodelErrorResponse()
instance.error = randomize()
instance.client_id = randomize("uid")
instance.default_factor = randomize()
instance.error_description = randomize()
instance.error_uri = randomize()
instance.factors = [randomize()]
instance.linking_token = randomize()
instance.mfa_token = randomize()
instance.platform_id = randomize()
return instance
def create_oauthmodel_token_introspect_response_example() -> OauthmodelTokenIntrospectResponse:
instance = OauthmodelTokenIntrospectResponse()
instance.active = randomize("bool")
instance.aud = randomize()
instance.client_id = randomize("uid")
instance.exp = randomize("int", min_val=1, max_val=1000)
instance.iat = randomize("int", min_val=1, max_val=1000)
instance.scope = randomize()
instance.sub = randomize()
return instance
def create_oauthmodel_token_response_example() -> OauthmodelTokenResponse:
instance = OauthmodelTokenResponse()
instance.access_token = randomize()
instance.bans = [create_accountcommon_jwt_ban_v3_example()]
instance.display_name = randomize("slug")
instance.expires_in = randomize("int", min_val=1, max_val=1000)
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.refresh_token = randomize()
instance.roles = [randomize()]
instance.token_type = randomize()
instance.user_id = randomize("uid")
instance.is_comply = randomize("bool")
instance.jflgs = randomize("int", min_val=1, max_val=1000)
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.refresh_expires_in = randomize("int", min_val=1, max_val=1000)
return instance
def create_oauthmodel_token_response_v3_example() -> OauthmodelTokenResponseV3:
instance = OauthmodelTokenResponseV3()
instance.access_token = randomize()
instance.bans = [create_accountcommon_jwt_ban_v3_example()]
instance.display_name = randomize("slug")
instance.expires_in = randomize("int", min_val=1, max_val=1000)
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.refresh_expires_in = randomize("int", min_val=1, max_val=1000)
instance.refresh_token = randomize()
instance.roles = [randomize()]
instance.scope = randomize()
instance.token_type = randomize()
instance.user_id = randomize("uid")
instance.xuid = randomize()
instance.is_comply = randomize("bool")
instance.jflgs = randomize("int", min_val=1, max_val=1000)
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_oauthmodel_token_third_party_response_example() -> OauthmodelTokenThirdPartyResponse:
instance = OauthmodelTokenThirdPartyResponse()
instance.platform_token = randomize()
instance.sand_box_id = randomize()
return instance
def create_rest_error_response_example() -> RestErrorResponse:
instance = RestErrorResponse()
instance.error_code = randomize("int", min_val=1, max_val=1000)
instance.error_message = randomize()
instance.message_variables = create_accountcommon_conflicted_user_platform_accounts_example()
return instance
def create_restapi_error_response_example() -> RestapiErrorResponse:
instance = RestapiErrorResponse()
instance.message = randomize()
instance.code = randomize("int", min_val=1, max_val=1000)
return instance
def create_validation_example() -> Validation:
instance = Validation()
instance.allow_digit = randomize("bool")
instance.allow_letter = randomize("bool")
instance.allow_space = randomize("bool")
instance.allow_unicode = randomize("bool")
instance.description = [create_validation_description_example()]
instance.is_custom_regex = randomize("bool")
instance.letter_case = randomize()
instance.max_length = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_alpha_num = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_special_character = randomize("int", min_val=1, max_val=1000)
instance.min_char_type = randomize("int", min_val=1, max_val=1000)
instance.min_length = randomize("int", min_val=1, max_val=1000)
instance.regex = randomize()
instance.special_character_location = randomize()
instance.special_characters = [randomize()]
return instance
def create_validation_description_example() -> ValidationDescription:
instance = ValidationDescription()
instance.language = randomize()
instance.message = [randomize()]
return instance | accelbyte_py_sdk/ext/iam.py |
# template file: justice_py_sdk_codegen/__main__.py
# justice-iam-service (5.10.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from .utils import randomize
from ..api.iam.models import AccountCreateTestUserRequestV4
from ..api.iam.models import AccountCreateUserRequestV4
from ..api.iam.models import AccountCreateUserResponseV4
from ..api.iam.models import AccountUpgradeHeadlessAccountRequestV4
from ..api.iam.models import AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4
from ..api.iam.models import AccountUserActiveBanResponseV4
from ..api.iam.models import AccountUserPermissionsResponseV4
from ..api.iam.models import AccountUserResponseV4
from ..api.iam.models import AccountcommonBan
from ..api.iam.models import AccountcommonBanReason
from ..api.iam.models import AccountcommonBanReasonV3
from ..api.iam.models import AccountcommonBanReasons
from ..api.iam.models import AccountcommonBanReasonsV3
from ..api.iam.models import AccountcommonBanV3
from ..api.iam.models import AccountcommonBannedByV3
from ..api.iam.models import AccountcommonBans
from ..api.iam.models import AccountcommonBansV3
from ..api.iam.models import AccountcommonClientPermission
from ..api.iam.models import AccountcommonClientPermissionV3
from ..api.iam.models import AccountcommonClientPermissions
from ..api.iam.models import AccountcommonClientPermissionsV3
from ..api.iam.models import AccountcommonConflictedUserPlatformAccounts
from ..api.iam.models import AccountcommonCountryAgeRestriction
from ..api.iam.models import AccountcommonDescription
from ..api.iam.models import AccountcommonDistinctLinkedPlatformV3
from ..api.iam.models import AccountcommonDistinctPlatformResponseV3
from ..api.iam.models import AccountcommonInputValidationDescription
from ..api.iam.models import AccountcommonJWTBanV3
from ..api.iam.models import AccountcommonListUsersWithPlatformAccountsResponse
from ..api.iam.models import AccountcommonNamespaceRole
from ..api.iam.models import AccountcommonNetflixCertificates
from ..api.iam.models import AccountcommonPagination
from ..api.iam.models import AccountcommonPaginationV3
from ..api.iam.models import AccountcommonPermission
from ..api.iam.models import AccountcommonPermissionV3
from ..api.iam.models import AccountcommonPermissions
from ..api.iam.models import AccountcommonPermissionsV3
from ..api.iam.models import AccountcommonPlatformAccount
from ..api.iam.models import AccountcommonRegisteredDomain
from ..api.iam.models import AccountcommonRole
from ..api.iam.models import AccountcommonRoleManager
from ..api.iam.models import AccountcommonRoleManagerV3
from ..api.iam.models import AccountcommonRoleMember
from ..api.iam.models import AccountcommonRoleMemberV3
from ..api.iam.models import AccountcommonRoleV3
from ..api.iam.models import AccountcommonSimpleUserPlatformInfoV3
from ..api.iam.models import AccountcommonUserLinkedPlatform
from ..api.iam.models import AccountcommonUserLinkedPlatformV3
from ..api.iam.models import AccountcommonUserLinkedPlatformsResponseV3
from ..api.iam.models import AccountcommonUserPlatformInfo
from ..api.iam.models import AccountcommonUserPlatforms
from ..api.iam.models import AccountcommonUserSearchByPlatformIDResult
from ..api.iam.models import AccountcommonUserSearchResult
from ..api.iam.models import AccountcommonUserWithLinkedPlatformAccounts
from ..api.iam.models import AccountcommonUserWithPlatformAccounts
from ..api.iam.models import BannedBy
from ..api.iam.models import BloomFilterJSON
from ..api.iam.models import ClientmodelClientCreateRequest
from ..api.iam.models import ClientmodelClientCreationResponse
from ..api.iam.models import ClientmodelClientCreationV3Request
from ..api.iam.models import ClientmodelClientResponse
from ..api.iam.models import ClientmodelClientUpdateRequest
from ..api.iam.models import ClientmodelClientUpdateSecretRequest
from ..api.iam.models import ClientmodelClientUpdateV3Request
from ..api.iam.models import ClientmodelClientV3Response
from ..api.iam.models import ClientmodelClientsV3Response
from ..api.iam.models import LegalAcceptedPoliciesRequest
from ..api.iam.models import ModelAddUserRoleV4Request
from ..api.iam.models import ModelAgeRestrictionRequest
from ..api.iam.models import ModelAgeRestrictionRequestV3
from ..api.iam.models import ModelAgeRestrictionResponse
from ..api.iam.models import ModelAgeRestrictionResponseV3
from ..api.iam.models import ModelAssignUserV4Request
from ..api.iam.models import ModelAssignedUserV4Response
from ..api.iam.models import ModelAuthenticatorKeyResponseV4
from ..api.iam.models import ModelBackupCodesResponseV4
from ..api.iam.models import ModelBanCreateRequest
from ..api.iam.models import ModelBanUpdateRequest
from ..api.iam.models import ModelCheckValidUserIDRequestV4
from ..api.iam.models import ModelCountry
from ..api.iam.models import ModelCountryAgeRestrictionRequest
from ..api.iam.models import ModelCountryAgeRestrictionV3Request
from ..api.iam.models import ModelCountryV3Response
from ..api.iam.models import ModelCreateJusticeUserResponse
from ..api.iam.models import ModelDisableUserRequest
from ..api.iam.models import ModelEmailUpdateRequestV4
from ..api.iam.models import ModelEnabledFactorsResponseV4
from ..api.iam.models import ModelForgotPasswordRequestV3
from ..api.iam.models import ModelGetAdminUsersResponse
from ..api.iam.models import ModelGetPublisherUserResponse
from ..api.iam.models import ModelGetUserBanV3Response
from ..api.iam.models import ModelGetUserJusticePlatformAccountResponse
from ..api.iam.models import ModelGetUserMapping
from ..api.iam.models import ModelGetUsersResponseWithPaginationV3
from ..api.iam.models import ModelInputValidationData
from ..api.iam.models import ModelInputValidationDataPublic
from ..api.iam.models import ModelInputValidationUpdatePayload
from ..api.iam.models import ModelInputValidationsPublicResponse
from ..api.iam.models import ModelInputValidationsResponse
from ..api.iam.models import ModelInviteUserRequestV3
from ..api.iam.models import ModelInviteUserRequestV4
from ..api.iam.models import ModelInviteUserResponseV3
from ..api.iam.models import ModelLinkPlatformAccountRequest
from ..api.iam.models import ModelLinkPlatformAccountWithProgressionRequest
from ..api.iam.models import ModelLinkRequest
from ..api.iam.models import ModelListAssignedUsersV4Response
from ..api.iam.models import ModelListBulkUserResponse
from ..api.iam.models import ModelListEmailAddressRequest
from ..api.iam.models import ModelListRoleV4Response
from ..api.iam.models import ModelListUserInformationResult
from ..api.iam.models import ModelListUserResponseV3
from ..api.iam.models import ModelListUserRolesV4Response
from ..api.iam.models import ModelListValidUserIDResponseV4
from ..api.iam.models import ModelLoginHistoriesResponse
from ..api.iam.models import ModelNamespaceRoleRequest
from ..api.iam.models import ModelPermissionDeleteRequest
from ..api.iam.models import ModelPlatformDomainDeleteRequest
from ..api.iam.models import ModelPlatformDomainResponse
from ..api.iam.models import ModelPlatformDomainUpdateRequest
from ..api.iam.models import ModelPlatformUserIDRequest
from ..api.iam.models import ModelPlatformUserInformation
from ..api.iam.models import ModelPublicThirdPartyPlatformInfo
from ..api.iam.models import ModelPublicUserInformationResponseV3
from ..api.iam.models import ModelPublicUserInformationV3
from ..api.iam.models import ModelPublicUserResponse
from ..api.iam.models import ModelPublicUserResponseV3
from ..api.iam.models import ModelPublicUsersResponse
from ..api.iam.models import ModelRemoveUserRoleV4Request
from ..api.iam.models import ModelResetPasswordRequest
from ..api.iam.models import ModelResetPasswordRequestV3
from ..api.iam.models import ModelRevokeUserV4Request
from ..api.iam.models import ModelRoleAdminStatusResponse
from ..api.iam.models import ModelRoleAdminStatusResponseV3
from ..api.iam.models import ModelRoleCreateRequest
from ..api.iam.models import ModelRoleCreateV3Request
from ..api.iam.models import ModelRoleManagersRequest
from ..api.iam.models import ModelRoleManagersRequestV3
from ..api.iam.models import ModelRoleManagersResponse
from ..api.iam.models import ModelRoleManagersResponsesV3
from ..api.iam.models import ModelRoleMembersRequest
from ..api.iam.models import ModelRoleMembersRequestV3
from ..api.iam.models import ModelRoleMembersResponse
from ..api.iam.models import ModelRoleMembersResponseV3
from ..api.iam.models import ModelRoleNamesResponseV3
from ..api.iam.models import ModelRoleResponse
from ..api.iam.models import ModelRoleResponseV3
from ..api.iam.models import ModelRoleResponseWithManagers
from ..api.iam.models import ModelRoleResponseWithManagersAndPaginationV3
from ..api.iam.models import ModelRoleResponseWithManagersV3
from ..api.iam.models import ModelRoleUpdateRequest
from ..api.iam.models import ModelRoleUpdateRequestV3
from ..api.iam.models import ModelRoleV4Request
from ..api.iam.models import ModelRoleV4Response
from ..api.iam.models import ModelSSOPlatformCredentialRequest
from ..api.iam.models import ModelSSOPlatformCredentialResponse
from ..api.iam.models import ModelSearchUsersByPlatformIDResponse
from ..api.iam.models import ModelSearchUsersResponse
from ..api.iam.models import ModelSearchUsersResponseWithPaginationV3
from ..api.iam.models import ModelSendRegisterVerificationCodeRequest
from ..api.iam.models import ModelSendVerificationCodeRequest
from ..api.iam.models import ModelSendVerificationCodeRequestV3
from ..api.iam.models import ModelThirdPartyLoginPlatformCredentialRequest
from ..api.iam.models import ModelThirdPartyLoginPlatformCredentialResponse
from ..api.iam.models import ModelUnlinkUserPlatformRequest
from ..api.iam.models import ModelUpdatePermissionScheduleRequest
from ..api.iam.models import ModelUpdateUserDeletionStatusRequest
from ..api.iam.models import ModelUpdateUserStatusRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountV3Request
from ..api.iam.models import ModelUpgradeHeadlessAccountWithVerificationCodeRequest
from ..api.iam.models import ModelUpgradeHeadlessAccountWithVerificationCodeRequestV3
from ..api.iam.models import ModelUserActiveBanResponse
from ..api.iam.models import ModelUserActiveBanResponseV3
from ..api.iam.models import ModelUserBanResponse
from ..api.iam.models import ModelUserBanResponseV3
from ..api.iam.models import ModelUserBaseInfo
from ..api.iam.models import ModelUserCreateFromInvitationRequestV3
from ..api.iam.models import ModelUserCreateFromInvitationRequestV4
from ..api.iam.models import ModelUserCreateRequest
from ..api.iam.models import ModelUserCreateRequestV3
from ..api.iam.models import ModelUserCreateResponse
from ..api.iam.models import ModelUserCreateResponseV3
from ..api.iam.models import ModelUserDeletionStatusResponse
from ..api.iam.models import ModelUserIDsRequest
from ..api.iam.models import ModelUserInfoResponse
from ..api.iam.models import ModelUserInformation
from ..api.iam.models import ModelUserInvitationV3
from ..api.iam.models import ModelUserLoginHistoryResponse
from ..api.iam.models import ModelUserPasswordUpdateRequest
from ..api.iam.models import ModelUserPasswordUpdateV3Request
from ..api.iam.models import ModelUserPermissionsResponseV3
from ..api.iam.models import ModelUserResponse
from ..api.iam.models import ModelUserResponseV3
from ..api.iam.models import ModelUserRolesV4Response
from ..api.iam.models import ModelUserUpdateRequest
from ..api.iam.models import ModelUserUpdateRequestV3
from ..api.iam.models import ModelUserVerificationRequest
from ..api.iam.models import ModelUserVerificationRequestV3
from ..api.iam.models import ModelValidUserIDResponseV4
from ..api.iam.models import ModelValidationDetail
from ..api.iam.models import ModelValidationDetailPublic
from ..api.iam.models import ModelVerificationCodeResponse
from ..api.iam.models import ModelVerifyRegistrationCode
from ..api.iam.models import ModelWebLinkingResponse
from ..api.iam.models import OauthapiRevocationList
from ..api.iam.models import OauthcommonJWKKey
from ..api.iam.models import OauthcommonJWKSet
from ..api.iam.models import OauthcommonUserRevocationListRecord
from ..api.iam.models import OauthmodelCountryLocationResponse
from ..api.iam.models import OauthmodelErrorResponse
from ..api.iam.models import OauthmodelTokenIntrospectResponse
from ..api.iam.models import OauthmodelTokenResponse
from ..api.iam.models import OauthmodelTokenResponseV3
from ..api.iam.models import OauthmodelTokenThirdPartyResponse
from ..api.iam.models import RestErrorResponse
from ..api.iam.models import RestapiErrorResponse
from ..api.iam.models import Validation
from ..api.iam.models import ValidationDescription
def create_account_create_test_user_request_v4_example() -> AccountCreateTestUserRequestV4:
instance = AccountCreateTestUserRequestV4()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.password_md5_sum = randomize()
instance.username = randomize("slug")
instance.verified = randomize("bool")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
return instance
def create_account_create_user_request_v4_example() -> AccountCreateUserRequestV4:
instance = AccountCreateUserRequestV4()
instance.auth_type = randomize()
instance.code = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.password_md5_sum = randomize()
instance.reach_minimum_age = randomize("bool")
instance.username = randomize("slug")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
return instance
def create_account_create_user_response_v4_example() -> AccountCreateUserResponseV4:
instance = AccountCreateUserResponseV4()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize("adult_birthdate")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.username = randomize("slug")
return instance
def create_account_upgrade_headless_account_request_v4_example() -> AccountUpgradeHeadlessAccountRequestV4:
instance = AccountUpgradeHeadlessAccountRequestV4()
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.username = randomize("slug")
return instance
def create_account_upgrade_headless_account_with_verification_code_request_v4_example() -> AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4:
instance = AccountUpgradeHeadlessAccountWithVerificationCodeRequestV4()
instance.code = randomize()
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.username = randomize("slug")
instance.validate_only = randomize("bool")
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
return instance
def create_account_user_active_ban_response_v4_example() -> AccountUserActiveBanResponseV4:
instance = AccountUserActiveBanResponseV4()
instance.ban = randomize()
instance.ban_id = randomize()
instance.end_date = randomize("date")
return instance
def create_account_user_permissions_response_v4_example() -> AccountUserPermissionsResponseV4:
instance = AccountUserPermissionsResponseV4()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_account_user_response_v4_example() -> AccountUserResponseV4:
instance = AccountUserResponseV4()
instance.auth_type = randomize()
instance.bans = [create_account_user_active_ban_response_v4_example()]
instance.country = randomize("country")
instance.created_at = randomize("date")
instance.date_of_birth = randomize("adult_birthdate")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.namespace = randomize("slug")
instance.old_email_address = randomize()
instance.permissions = [create_account_user_permissions_response_v4_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.new_email_address = randomize()
instance.phone_number = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.username = randomize("slug")
return instance
def create_accountcommon_ban_example() -> AccountcommonBan:
instance = AccountcommonBan()
instance.ban = randomize()
instance.description = randomize()
return instance
def create_accountcommon_ban_reason_example() -> AccountcommonBanReason:
instance = AccountcommonBanReason()
instance.description = randomize()
instance.reason = randomize()
return instance
def create_accountcommon_ban_reason_v3_example() -> AccountcommonBanReasonV3:
instance = AccountcommonBanReasonV3()
instance.description = randomize()
instance.reason = randomize()
return instance
def create_accountcommon_ban_reasons_example() -> AccountcommonBanReasons:
instance = AccountcommonBanReasons()
instance.reasons = [create_accountcommon_ban_reason_example()]
return instance
def create_accountcommon_ban_reasons_v3_example() -> AccountcommonBanReasonsV3:
instance = AccountcommonBanReasonsV3()
instance.reasons = [create_accountcommon_ban_reason_v3_example()]
return instance
def create_accountcommon_ban_v3_example() -> AccountcommonBanV3:
instance = AccountcommonBanV3()
instance.ban = randomize()
instance.type_ = randomize()
instance.description = randomize()
instance.descriptions = create_accountcommon_description_example()
return instance
def create_accountcommon_banned_by_v3_example() -> AccountcommonBannedByV3:
instance = AccountcommonBannedByV3()
instance.display_name = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_bans_example() -> AccountcommonBans:
instance = AccountcommonBans()
instance.bans = [create_accountcommon_ban_example()]
return instance
def create_accountcommon_bans_v3_example() -> AccountcommonBansV3:
instance = AccountcommonBansV3()
instance.bans = [create_accountcommon_ban_v3_example()]
return instance
def create_accountcommon_client_permission_example() -> AccountcommonClientPermission:
instance = AccountcommonClientPermission()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
return instance
def create_accountcommon_client_permission_v3_example() -> AccountcommonClientPermissionV3:
instance = AccountcommonClientPermissionV3()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
return instance
def create_accountcommon_client_permissions_example() -> AccountcommonClientPermissions:
instance = AccountcommonClientPermissions()
instance.permissions = [create_accountcommon_client_permission_example()]
return instance
def create_accountcommon_client_permissions_v3_example() -> AccountcommonClientPermissionsV3:
instance = AccountcommonClientPermissionsV3()
instance.permissions = [create_accountcommon_client_permission_v3_example()]
return instance
def create_accountcommon_conflicted_user_platform_accounts_example() -> AccountcommonConflictedUserPlatformAccounts:
instance = AccountcommonConflictedUserPlatformAccounts()
instance.platform_user_id = randomize()
instance.publisher_accounts = [create_accountcommon_user_with_linked_platform_accounts_example()]
return instance
def create_accountcommon_country_age_restriction_example() -> AccountcommonCountryAgeRestriction:
instance = AccountcommonCountryAgeRestriction()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_accountcommon_description_example() -> AccountcommonDescription:
instance = AccountcommonDescription()
instance.en_us = randomize()
instance.zh_cn = randomize()
return instance
def create_accountcommon_distinct_linked_platform_v3_example() -> AccountcommonDistinctLinkedPlatformV3:
instance = AccountcommonDistinctLinkedPlatformV3()
instance.details = [create_accountcommon_simple_user_platform_info_v3_example()]
instance.linked_at = randomize()
instance.platform_name = randomize()
instance.platform_user_id = randomize()
return instance
def create_accountcommon_distinct_platform_response_v3_example() -> AccountcommonDistinctPlatformResponseV3:
instance = AccountcommonDistinctPlatformResponseV3()
instance.platforms = [create_accountcommon_distinct_linked_platform_v3_example()]
return instance
def create_accountcommon_input_validation_description_example() -> AccountcommonInputValidationDescription:
instance = AccountcommonInputValidationDescription()
instance.language = randomize()
instance.message = [randomize()]
return instance
def create_accountcommon_jwt_ban_v3_example() -> AccountcommonJWTBanV3:
instance = AccountcommonJWTBanV3()
instance.ban = randomize()
instance.enabled = randomize("bool")
instance.end_date = randomize("date")
instance.targeted_namespace = randomize("slug")
instance.disabled_date = randomize("date")
return instance
def create_accountcommon_list_users_with_platform_accounts_response_example() -> AccountcommonListUsersWithPlatformAccountsResponse:
instance = AccountcommonListUsersWithPlatformAccountsResponse()
instance.data = [create_accountcommon_user_with_platform_accounts_example()]
instance.paging = create_accountcommon_pagination_v3_example()
instance.total_data = randomize("int", min_val=1, max_val=1000)
return instance
def create_accountcommon_namespace_role_example() -> AccountcommonNamespaceRole:
instance = AccountcommonNamespaceRole()
instance.namespace = randomize("slug")
instance.role_id = randomize("uid")
return instance
def create_accountcommon_netflix_certificates_example() -> AccountcommonNetflixCertificates:
instance = AccountcommonNetflixCertificates()
instance.encrypted_private_key = randomize()
instance.public_certificate = randomize()
instance.root_certificate = randomize()
return instance
def create_accountcommon_pagination_example() -> AccountcommonPagination:
instance = AccountcommonPagination()
instance.first = randomize()
instance.last = randomize()
instance.next_ = randomize()
instance.previous = randomize()
return instance
def create_accountcommon_pagination_v3_example() -> AccountcommonPaginationV3:
instance = AccountcommonPaginationV3()
instance.first = randomize()
instance.last = randomize()
instance.next_ = randomize()
instance.previous = randomize()
return instance
def create_accountcommon_permission_example() -> AccountcommonPermission:
instance = AccountcommonPermission()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_accountcommon_permission_v3_example() -> AccountcommonPermissionV3:
instance = AccountcommonPermissionV3()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_accountcommon_permissions_example() -> AccountcommonPermissions:
instance = AccountcommonPermissions()
instance.permissions = [create_accountcommon_permission_example()]
return instance
def create_accountcommon_permissions_v3_example() -> AccountcommonPermissionsV3:
instance = AccountcommonPermissionsV3()
instance.permissions = [create_accountcommon_permission_v3_example()]
return instance
def create_accountcommon_platform_account_example() -> AccountcommonPlatformAccount:
instance = AccountcommonPlatformAccount()
instance.namespace = randomize("slug")
instance.platform_user_id = randomize()
return instance
def create_accountcommon_registered_domain_example() -> AccountcommonRegisteredDomain:
instance = AccountcommonRegisteredDomain()
instance.affected_client_i_ds = [randomize()]
instance.domain = randomize()
instance.namespaces = [randomize()]
instance.role_id = randomize("uid")
return instance
def create_accountcommon_role_example() -> AccountcommonRole:
instance = AccountcommonRole()
instance.admin_role = randomize("bool")
instance.deletable = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_example()]
instance.members = [create_accountcommon_role_member_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_accountcommon_role_manager_example() -> AccountcommonRoleManager:
instance = AccountcommonRoleManager()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_manager_v3_example() -> AccountcommonRoleManagerV3:
instance = AccountcommonRoleManagerV3()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_member_example() -> AccountcommonRoleMember:
instance = AccountcommonRoleMember()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_member_v3_example() -> AccountcommonRoleMemberV3:
instance = AccountcommonRoleMemberV3()
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_role_v3_example() -> AccountcommonRoleV3:
instance = AccountcommonRoleV3()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_v3_example()]
instance.members = [create_accountcommon_role_member_v3_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_accountcommon_simple_user_platform_info_v3_example() -> AccountcommonSimpleUserPlatformInfoV3:
instance = AccountcommonSimpleUserPlatformInfoV3()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.display_name = randomize("slug")
instance.platform_id = randomize()
return instance
def create_accountcommon_user_linked_platform_example() -> AccountcommonUserLinkedPlatform:
instance = AccountcommonUserLinkedPlatform()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.xuid = randomize()
return instance
def create_accountcommon_user_linked_platform_v3_example() -> AccountcommonUserLinkedPlatformV3:
instance = AccountcommonUserLinkedPlatformV3()
instance.account_group = randomize()
instance.linked_at = randomize()
instance.namespace = randomize("slug")
instance.origin_namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_accountcommon_user_linked_platforms_response_v3_example() -> AccountcommonUserLinkedPlatformsResponseV3:
instance = AccountcommonUserLinkedPlatformsResponseV3()
instance.data = [create_accountcommon_user_linked_platform_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_accountcommon_user_platform_info_example() -> AccountcommonUserPlatformInfo:
instance = AccountcommonUserPlatformInfo()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_platforms_example() -> AccountcommonUserPlatforms:
instance = AccountcommonUserPlatforms()
instance.user_id_platforms = [create_accountcommon_user_platform_info_example()]
return instance
def create_accountcommon_user_search_by_platform_id_result_example() -> AccountcommonUserSearchByPlatformIDResult:
instance = AccountcommonUserSearchByPlatformIDResult()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_user_linked_platform_example()]
instance.phone_number = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_search_result_example() -> AccountcommonUserSearchResult:
instance = AccountcommonUserSearchResult()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_user_linked_platform_example()]
instance.phone_number = randomize()
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_with_linked_platform_accounts_example() -> AccountcommonUserWithLinkedPlatformAccounts:
instance = AccountcommonUserWithLinkedPlatformAccounts()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.linked_platforms = [create_accountcommon_platform_account_example()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_accountcommon_user_with_platform_accounts_example() -> AccountcommonUserWithPlatformAccounts:
instance = AccountcommonUserWithPlatformAccounts()
instance.linked_platforms = [create_accountcommon_platform_account_example()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_banned_by_example() -> BannedBy:
instance = BannedBy()
instance.display_name = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_bloom_filter_json_example() -> BloomFilterJSON:
instance = BloomFilterJSON()
instance.bits = [randomize("int", min_val=1, max_val=1000)]
instance.k = randomize("int", min_val=1, max_val=1000)
instance.m = randomize("int", min_val=1, max_val=1000)
return instance
def create_clientmodel_client_create_request_example() -> ClientmodelClientCreateRequest:
instance = ClientmodelClientCreateRequest()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
instance.secret = randomize()
return instance
def create_clientmodel_client_creation_response_example() -> ClientmodelClientCreationResponse:
instance = ClientmodelClientCreationResponse()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_creation_v3_request_example() -> ClientmodelClientCreationV3Request:
instance = ClientmodelClientCreationV3Request()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.client_platform = randomize()
instance.namespace = randomize("slug")
instance.oauth_client_type = randomize()
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.deletable = randomize("bool")
return instance
def create_clientmodel_client_response_example() -> ClientmodelClientResponse:
instance = ClientmodelClientResponse()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_example()]
instance.created_at = randomize("date")
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_update_request_example() -> ClientmodelClientUpdateRequest:
instance = ClientmodelClientUpdateRequest()
instance.client_name = randomize()
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_update_secret_request_example() -> ClientmodelClientUpdateSecretRequest:
instance = ClientmodelClientUpdateSecretRequest()
instance.new_secret = randomize()
return instance
def create_clientmodel_client_update_v3_request_example() -> ClientmodelClientUpdateV3Request:
instance = ClientmodelClientUpdateV3Request()
instance.client_platform = randomize()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.deletable = randomize("bool")
instance.namespace = randomize("slug")
instance.redirect_uri = randomize()
return instance
def create_clientmodel_client_v3_response_example() -> ClientmodelClientV3Response:
instance = ClientmodelClientV3Response()
instance.audiences = [randomize()]
instance.base_uri = randomize()
instance.client_id = randomize("uid")
instance.client_name = randomize()
instance.client_permissions = [create_accountcommon_permission_v3_example()]
instance.client_platform = randomize()
instance.created_at = randomize("date")
instance.modified_at = randomize("date")
instance.namespace = randomize("slug")
instance.oauth_client_type = randomize()
instance.redirect_uri = randomize()
instance.scopes = [randomize()]
return instance
def create_clientmodel_clients_v3_response_example() -> ClientmodelClientsV3Response:
instance = ClientmodelClientsV3Response()
instance.data = [create_clientmodel_client_v3_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_legal_accepted_policies_request_example() -> LegalAcceptedPoliciesRequest:
instance = LegalAcceptedPoliciesRequest()
instance.is_accepted = randomize("bool")
instance.localized_policy_version_id = randomize()
instance.policy_id = randomize()
instance.policy_version_id = randomize()
return instance
def create_model_add_user_role_v4_request_example() -> ModelAddUserRoleV4Request:
instance = ModelAddUserRoleV4Request()
instance.assigned_namespaces = [randomize()]
instance.role_id = randomize("uid")
return instance
def create_model_age_restriction_request_example() -> ModelAgeRestrictionRequest:
instance = ModelAgeRestrictionRequest()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_request_v3_example() -> ModelAgeRestrictionRequestV3:
instance = ModelAgeRestrictionRequestV3()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_response_example() -> ModelAgeRestrictionResponse:
instance = ModelAgeRestrictionResponse()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_age_restriction_response_v3_example() -> ModelAgeRestrictionResponseV3:
instance = ModelAgeRestrictionResponseV3()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.enable = randomize("bool")
return instance
def create_model_assign_user_v4_request_example() -> ModelAssignUserV4Request:
instance = ModelAssignUserV4Request()
instance.assigned_namespaces = [randomize()]
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_assigned_user_v4_response_example() -> ModelAssignedUserV4Response:
instance = ModelAssignedUserV4Response()
instance.assigned_namespaces = [randomize()]
instance.display_name = randomize("slug")
instance.email = randomize("email")
instance.role_id = randomize("uid")
instance.user_id = randomize("uid")
return instance
def create_model_authenticator_key_response_v4_example() -> ModelAuthenticatorKeyResponseV4:
instance = ModelAuthenticatorKeyResponseV4()
instance.secret_key = randomize()
instance.uri = randomize()
return instance
def create_model_backup_codes_response_v4_example() -> ModelBackupCodesResponseV4:
instance = ModelBackupCodesResponseV4()
instance.generated_at = randomize("int", min_val=1, max_val=1000)
instance.invalid_codes = [randomize()]
instance.valid_codes = [randomize()]
return instance
def create_model_ban_create_request_example() -> ModelBanCreateRequest:
instance = ModelBanCreateRequest()
instance.ban = randomize()
instance.comment = randomize()
instance.end_date = randomize()
instance.reason = randomize()
instance.skip_notif = randomize("bool")
return instance
def create_model_ban_update_request_example() -> ModelBanUpdateRequest:
instance = ModelBanUpdateRequest()
instance.enabled = randomize("bool")
instance.skip_notif = randomize("bool")
return instance
def create_model_check_valid_user_id_request_v4_example() -> ModelCheckValidUserIDRequestV4:
instance = ModelCheckValidUserIDRequestV4()
instance.user_ids = [randomize()]
return instance
def create_model_country_example() -> ModelCountry:
instance = ModelCountry()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_model_country_age_restriction_request_example() -> ModelCountryAgeRestrictionRequest:
instance = ModelCountryAgeRestrictionRequest()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_country_age_restriction_v3_request_example() -> ModelCountryAgeRestrictionV3Request:
instance = ModelCountryAgeRestrictionV3Request()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_country_v3_response_example() -> ModelCountryV3Response:
instance = ModelCountryV3Response()
instance.age_restriction = randomize("int", min_val=1, max_val=1000)
instance.country_code = randomize()
instance.country_name = randomize()
instance.enable = randomize("bool")
return instance
def create_model_create_justice_user_response_example() -> ModelCreateJusticeUserResponse:
instance = ModelCreateJusticeUserResponse()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_disable_user_request_example() -> ModelDisableUserRequest:
instance = ModelDisableUserRequest()
instance.reason = randomize()
return instance
def create_model_email_update_request_v4_example() -> ModelEmailUpdateRequestV4:
instance = ModelEmailUpdateRequestV4()
instance.code = randomize()
instance.email_address = randomize("email")
return instance
def create_model_enabled_factors_response_v4_example() -> ModelEnabledFactorsResponseV4:
instance = ModelEnabledFactorsResponseV4()
instance.default = randomize()
instance.enabled = [randomize()]
return instance
def create_model_forgot_password_request_v3_example() -> ModelForgotPasswordRequestV3:
instance = ModelForgotPasswordRequestV3()
instance.email_address = randomize("email")
instance.language_tag = randomize()
return instance
def create_model_get_admin_users_response_example() -> ModelGetAdminUsersResponse:
instance = ModelGetAdminUsersResponse()
instance.data = [create_model_user_response_example()]
instance.paging = create_accountcommon_pagination_example()
return instance
def create_model_get_publisher_user_response_example() -> ModelGetPublisherUserResponse:
instance = ModelGetPublisherUserResponse()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_user_ban_v3_response_example() -> ModelGetUserBanV3Response:
instance = ModelGetUserBanV3Response()
instance.data = [create_model_user_ban_response_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_get_user_justice_platform_account_response_example() -> ModelGetUserJusticePlatformAccountResponse:
instance = ModelGetUserJusticePlatformAccountResponse()
instance.designated_namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_user_mapping_example() -> ModelGetUserMapping:
instance = ModelGetUserMapping()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_get_users_response_with_pagination_v3_example() -> ModelGetUsersResponseWithPaginationV3:
instance = ModelGetUsersResponseWithPaginationV3()
instance.data = [create_model_user_response_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_input_validation_data_example() -> ModelInputValidationData:
instance = ModelInputValidationData()
instance.field = randomize()
instance.validation = create_model_validation_detail_example()
return instance
def create_model_input_validation_data_public_example() -> ModelInputValidationDataPublic:
instance = ModelInputValidationDataPublic()
instance.field = randomize()
instance.validation = create_model_validation_detail_public_example()
return instance
def create_model_input_validation_update_payload_example() -> ModelInputValidationUpdatePayload:
instance = ModelInputValidationUpdatePayload()
instance.field = randomize()
instance.validation = create_validation_example()
return instance
def create_model_input_validations_public_response_example() -> ModelInputValidationsPublicResponse:
instance = ModelInputValidationsPublicResponse()
instance.data = [create_model_input_validation_data_public_example()]
instance.version = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_input_validations_response_example() -> ModelInputValidationsResponse:
instance = ModelInputValidationsResponse()
instance.data = [create_model_input_validation_data_example()]
instance.version = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_invite_user_request_v3_example() -> ModelInviteUserRequestV3:
instance = ModelInviteUserRequestV3()
instance.email_addresses = [randomize()]
instance.is_admin = randomize("bool")
instance.roles = [randomize()]
return instance
def create_model_invite_user_request_v4_example() -> ModelInviteUserRequestV4:
instance = ModelInviteUserRequestV4()
instance.assigned_namespaces = [randomize()]
instance.email_addresses = [randomize()]
instance.is_admin = randomize("bool")
instance.role_id = randomize("uid")
return instance
def create_model_invite_user_response_v3_example() -> ModelInviteUserResponseV3:
instance = ModelInviteUserResponseV3()
instance.data = [create_model_user_invitation_v3_example()]
return instance
def create_model_link_platform_account_request_example() -> ModelLinkPlatformAccountRequest:
instance = ModelLinkPlatformAccountRequest()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_model_link_platform_account_with_progression_request_example() -> ModelLinkPlatformAccountWithProgressionRequest:
instance = ModelLinkPlatformAccountWithProgressionRequest()
instance.chosen_namespaces = [randomize()]
instance.request_id = randomize()
return instance
def create_model_link_request_example() -> ModelLinkRequest:
instance = ModelLinkRequest()
instance.client_id = randomize("uid")
instance.namespace = randomize("slug")
instance.operation_name = randomize()
instance.payload = {randomize(): randomize()}
instance.redirect_uri = randomize()
instance.request_id = randomize()
instance.status = randomize()
instance.conflict_publisher_user_id = randomize()
instance.conflict_user_linked_games = [randomize()]
instance.current_user_linked_games = [randomize()]
instance.error = create_rest_error_response_example()
instance.expiration = randomize("int", min_val=1, max_val=1000)
instance.platform_display_name = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_model_list_assigned_users_v4_response_example() -> ModelListAssignedUsersV4Response:
instance = ModelListAssignedUsersV4Response()
instance.data = [create_model_assigned_user_v4_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_list_bulk_user_response_example() -> ModelListBulkUserResponse:
instance = ModelListBulkUserResponse()
instance.data = [create_model_user_base_info_example()]
return instance
def create_model_list_email_address_request_example() -> ModelListEmailAddressRequest:
instance = ModelListEmailAddressRequest()
instance.list_email_address_request = [randomize()]
return instance
def create_model_list_role_v4_response_example() -> ModelListRoleV4Response:
instance = ModelListRoleV4Response()
instance.data = [create_model_role_v4_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_list_user_information_result_example() -> ModelListUserInformationResult:
instance = ModelListUserInformationResult()
instance.data = [create_model_user_info_response_example()]
return instance
def create_model_list_user_response_v3_example() -> ModelListUserResponseV3:
instance = ModelListUserResponseV3()
instance.data = [create_model_user_response_v3_example()]
return instance
def create_model_list_user_roles_v4_response_example() -> ModelListUserRolesV4Response:
instance = ModelListUserRolesV4Response()
instance.data = [create_model_user_roles_v4_response_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_list_valid_user_id_response_v4_example() -> ModelListValidUserIDResponseV4:
instance = ModelListValidUserIDResponseV4()
instance.data = [create_model_valid_user_id_response_v4_example()]
return instance
def create_model_login_histories_response_example() -> ModelLoginHistoriesResponse:
instance = ModelLoginHistoriesResponse()
instance.data = [create_model_user_login_history_response_example()]
instance.paging = create_accountcommon_pagination_example()
return instance
def create_model_namespace_role_request_example() -> ModelNamespaceRoleRequest:
instance = ModelNamespaceRoleRequest()
instance.namespace = randomize("slug")
instance.role_id = randomize("uid")
return instance
def create_model_permission_delete_request_example() -> ModelPermissionDeleteRequest:
instance = ModelPermissionDeleteRequest()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
return instance
def create_model_platform_domain_delete_request_example() -> ModelPlatformDomainDeleteRequest:
instance = ModelPlatformDomainDeleteRequest()
instance.domain = randomize()
return instance
def create_model_platform_domain_response_example() -> ModelPlatformDomainResponse:
instance = ModelPlatformDomainResponse()
instance.registered_domains = [create_accountcommon_registered_domain_example()]
return instance
def create_model_platform_domain_update_request_example() -> ModelPlatformDomainUpdateRequest:
instance = ModelPlatformDomainUpdateRequest()
instance.affected_client_i_ds = [randomize()]
instance.assigned_namespaces = [randomize()]
instance.domain = randomize()
instance.role_id = randomize("uid")
return instance
def create_model_platform_user_id_request_example() -> ModelPlatformUserIDRequest:
instance = ModelPlatformUserIDRequest()
instance.platform_user_ids = [randomize()]
return instance
def create_model_platform_user_information_example() -> ModelPlatformUserInformation:
instance = ModelPlatformUserInformation()
instance.display_name = randomize("slug")
instance.linked_at = randomize("date")
instance.namespace = randomize("slug")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.email_address = randomize("email")
instance.xuid = randomize()
return instance
def create_model_public_third_party_platform_info_example() -> ModelPublicThirdPartyPlatformInfo:
instance = ModelPublicThirdPartyPlatformInfo()
instance.app_id = randomize("uid")
instance.client_id = randomize("uid")
instance.environment = randomize()
instance.is_active = randomize("bool")
instance.platform_id = randomize()
return instance
def create_model_public_user_information_response_v3_example() -> ModelPublicUserInformationResponseV3:
instance = ModelPublicUserInformationResponseV3()
instance.data = [create_model_public_user_information_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_public_user_information_v3_example() -> ModelPublicUserInformationV3:
instance = ModelPublicUserInformationV3()
instance.created_at = randomize("date")
instance.display_name = randomize("slug")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
instance.user_name = randomize("slug")
return instance
def create_model_public_user_response_example() -> ModelPublicUserResponse:
instance = ModelPublicUserResponse()
instance.auth_type = randomize()
instance.bans = [create_model_user_active_ban_response_example()]
instance.created_at = randomize("date")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_enabled_changed_time = randomize("date")
instance.login_id = randomize()
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.username = randomize("slug")
instance.xuid = randomize()
return instance
def create_model_public_user_response_v3_example() -> ModelPublicUserResponseV3:
instance = ModelPublicUserResponseV3()
instance.auth_type = randomize()
instance.bans = [create_model_user_active_ban_response_v3_example()]
instance.created_at = randomize("date")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.permissions = [create_model_user_permissions_response_v3_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.avatar_url = randomize("url")
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.user_name = randomize("slug")
return instance
def create_model_public_users_response_example() -> ModelPublicUsersResponse:
instance = ModelPublicUsersResponse()
instance.users = [create_model_public_user_response_example()]
return instance
def create_model_remove_user_role_v4_request_example() -> ModelRemoveUserRoleV4Request:
instance = ModelRemoveUserRoleV4Request()
instance.assigned_namespaces = [randomize()]
instance.role_id = randomize("uid")
return instance
def create_model_reset_password_request_example() -> ModelResetPasswordRequest:
instance = ModelResetPasswordRequest()
instance.code = randomize()
instance.login_id = randomize()
instance.new_password = randomize()
return instance
def create_model_reset_password_request_v3_example() -> ModelResetPasswordRequestV3:
instance = ModelResetPasswordRequestV3()
instance.code = randomize()
instance.email_address = randomize("email")
instance.new_password = randomize()
return instance
def create_model_revoke_user_v4_request_example() -> ModelRevokeUserV4Request:
instance = ModelRevokeUserV4Request()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_role_admin_status_response_example() -> ModelRoleAdminStatusResponse:
instance = ModelRoleAdminStatusResponse()
instance.admin_role = randomize("bool")
return instance
def create_model_role_admin_status_response_v3_example() -> ModelRoleAdminStatusResponseV3:
instance = ModelRoleAdminStatusResponseV3()
instance.admin_role = randomize("bool")
return instance
def create_model_role_create_request_example() -> ModelRoleCreateRequest:
instance = ModelRoleCreateRequest()
instance.admin_role = randomize("bool")
instance.managers = [create_accountcommon_role_manager_example()]
instance.members = [create_accountcommon_role_member_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.role_name = randomize()
return instance
def create_model_role_create_v3_request_example() -> ModelRoleCreateV3Request:
instance = ModelRoleCreateV3Request()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_v3_example()]
instance.members = [create_accountcommon_role_member_v3_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_name = randomize()
instance.deletable = randomize("bool")
return instance
def create_model_role_managers_request_example() -> ModelRoleManagersRequest:
instance = ModelRoleManagersRequest()
instance.managers = [create_accountcommon_role_manager_example()]
return instance
def create_model_role_managers_request_v3_example() -> ModelRoleManagersRequestV3:
instance = ModelRoleManagersRequestV3()
instance.managers = [create_accountcommon_role_manager_v3_example()]
return instance
def create_model_role_managers_response_example() -> ModelRoleManagersResponse:
instance = ModelRoleManagersResponse()
instance.managers = [create_accountcommon_role_manager_example()]
return instance
def create_model_role_managers_responses_v3_example() -> ModelRoleManagersResponsesV3:
instance = ModelRoleManagersResponsesV3()
instance.data = [create_accountcommon_role_manager_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_role_members_request_example() -> ModelRoleMembersRequest:
instance = ModelRoleMembersRequest()
instance.members = [create_accountcommon_role_member_example()]
return instance
def create_model_role_members_request_v3_example() -> ModelRoleMembersRequestV3:
instance = ModelRoleMembersRequestV3()
instance.members = [create_accountcommon_role_member_v3_example()]
return instance
def create_model_role_members_response_example() -> ModelRoleMembersResponse:
instance = ModelRoleMembersResponse()
instance.members = [create_accountcommon_role_member_example()]
return instance
def create_model_role_members_response_v3_example() -> ModelRoleMembersResponseV3:
instance = ModelRoleMembersResponseV3()
instance.data = [create_accountcommon_role_member_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_role_names_response_v3_example() -> ModelRoleNamesResponseV3:
instance = ModelRoleNamesResponseV3()
instance.data = [randomize()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_role_response_example() -> ModelRoleResponse:
instance = ModelRoleResponse()
instance.is_wildcard = randomize("bool")
instance.permissions = [create_accountcommon_permission_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_role_response_v3_example() -> ModelRoleResponseV3:
instance = ModelRoleResponseV3()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_role_response_with_managers_example() -> ModelRoleResponseWithManagers:
instance = ModelRoleResponseWithManagers()
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_role_response_with_managers_and_pagination_v3_example() -> ModelRoleResponseWithManagersAndPaginationV3:
instance = ModelRoleResponseWithManagersAndPaginationV3()
instance.data = [create_model_role_response_with_managers_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
return instance
def create_model_role_response_with_managers_v3_example() -> ModelRoleResponseWithManagersV3:
instance = ModelRoleResponseWithManagersV3()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.managers = [create_accountcommon_role_manager_v3_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_role_update_request_example() -> ModelRoleUpdateRequest:
instance = ModelRoleUpdateRequest()
instance.role_name = randomize()
return instance
def create_model_role_update_request_v3_example() -> ModelRoleUpdateRequestV3:
instance = ModelRoleUpdateRequestV3()
instance.is_wildcard = randomize("bool")
instance.role_name = randomize()
instance.deletable = randomize("bool")
return instance
def create_model_role_v4_request_example() -> ModelRoleV4Request:
instance = ModelRoleV4Request()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.role_name = randomize()
instance.deletable = randomize("bool")
return instance
def create_model_role_v4_response_example() -> ModelRoleV4Response:
instance = ModelRoleV4Response()
instance.admin_role = randomize("bool")
instance.is_wildcard = randomize("bool")
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_search_users_by_platform_id_response_example() -> ModelSearchUsersByPlatformIDResponse:
instance = ModelSearchUsersByPlatformIDResponse()
instance.data = [create_accountcommon_user_search_by_platform_id_result_example()]
instance.paging = create_accountcommon_pagination_example()
return instance
def create_model_search_users_response_example() -> ModelSearchUsersResponse:
instance = ModelSearchUsersResponse()
instance.data = [create_accountcommon_user_search_result_example()]
return instance
def create_model_search_users_response_with_pagination_v3_example() -> ModelSearchUsersResponseWithPaginationV3:
instance = ModelSearchUsersResponseWithPaginationV3()
instance.data = [create_model_user_response_v3_example()]
instance.paging = create_accountcommon_pagination_v3_example()
instance.total_data = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_send_register_verification_code_request_example() -> ModelSendRegisterVerificationCodeRequest:
instance = ModelSendRegisterVerificationCodeRequest()
instance.email_address = randomize("email")
instance.language_tag = randomize()
return instance
def create_model_send_verification_code_request_example() -> ModelSendVerificationCodeRequest:
instance = ModelSendVerificationCodeRequest()
instance.language_tag = randomize()
instance.login_id = randomize()
instance.context = randomize()
return instance
def create_model_send_verification_code_request_v3_example() -> ModelSendVerificationCodeRequestV3:
instance = ModelSendVerificationCodeRequestV3()
instance.email_address = randomize("email")
instance.context = randomize()
instance.language_tag = randomize()
return instance
def create_model_sso_platform_credential_request_example() -> ModelSSOPlatformCredentialRequest:
instance = ModelSSOPlatformCredentialRequest()
instance.acs_url = randomize("url")
instance.api_key = randomize()
instance.app_id = randomize("uid")
instance.federation_metadata_url = randomize("url")
instance.is_active = randomize("bool")
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.sso_url = randomize("url")
return instance
def create_model_sso_platform_credential_response_example() -> ModelSSOPlatformCredentialResponse:
instance = ModelSSOPlatformCredentialResponse()
instance.acs_url = randomize("url")
instance.app_id = randomize("uid")
instance.federation_metadata_url = randomize("url")
instance.is_active = randomize("bool")
instance.namespace = randomize("slug")
instance.platform_id = randomize()
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.sso_url = randomize("url")
instance.truncated_api_key = randomize()
return instance
def create_model_third_party_login_platform_credential_request_example() -> ModelThirdPartyLoginPlatformCredentialRequest:
instance = ModelThirdPartyLoginPlatformCredentialRequest()
instance.acsurl = randomize()
instance.app_id = randomize("uid")
instance.aws_cognito_region = randomize()
instance.aws_cognito_user_pool = randomize()
instance.client_id = randomize("uid")
instance.environment = randomize()
instance.federation_metadata_url = randomize("url")
instance.generic_oauth_flow = randomize("bool")
instance.is_active = randomize("bool")
instance.issuer = randomize()
instance.jwks_endpoint = randomize()
instance.key_id = randomize()
instance.netflix_certificates = create_accountcommon_netflix_certificates_example()
instance.organization_id = randomize()
instance.platform_name = randomize()
instance.redirect_uri = randomize()
instance.secret = randomize()
instance.team_id = randomize()
instance.token_authentication_type = randomize()
instance.token_claims_mapping = {randomize(): randomize()}
return instance
def create_model_third_party_login_platform_credential_response_example() -> ModelThirdPartyLoginPlatformCredentialResponse:
instance = ModelThirdPartyLoginPlatformCredentialResponse()
instance.acsurl = randomize()
instance.app_id = randomize("uid")
instance.aws_cognito_region = randomize()
instance.aws_cognito_user_pool = randomize()
instance.client_id = randomize("uid")
instance.environment = randomize()
instance.federation_metadata_url = randomize("url")
instance.generic_oauth_flow = randomize("bool")
instance.is_active = randomize("bool")
instance.issuer = randomize()
instance.jwks_endpoint = randomize()
instance.key_id = randomize()
instance.namespace = randomize("slug")
instance.organization_id = randomize()
instance.platform_id = randomize()
instance.platform_name = randomize()
instance.redirect_uri = randomize()
instance.registered_domains = [create_accountcommon_registered_domain_example()]
instance.secret = randomize()
instance.team_id = randomize()
instance.token_authentication_type = randomize()
instance.token_claims_mapping = {randomize(): randomize()}
instance.netflix_certificates = create_accountcommon_netflix_certificates_example()
return instance
def create_model_unlink_user_platform_request_example() -> ModelUnlinkUserPlatformRequest:
instance = ModelUnlinkUserPlatformRequest()
instance.platform_namespace = randomize("slug")
return instance
def create_model_update_permission_schedule_request_example() -> ModelUpdatePermissionScheduleRequest:
instance = ModelUpdatePermissionScheduleRequest()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_model_update_user_deletion_status_request_example() -> ModelUpdateUserDeletionStatusRequest:
instance = ModelUpdateUserDeletionStatusRequest()
instance.enabled = randomize("bool")
return instance
def create_model_update_user_status_request_example() -> ModelUpdateUserStatusRequest:
instance = ModelUpdateUserStatusRequest()
instance.enabled = randomize("bool")
instance.reason = randomize()
return instance
def create_model_upgrade_headless_account_request_example() -> ModelUpgradeHeadlessAccountRequest:
instance = ModelUpgradeHeadlessAccountRequest()
instance.login_id = randomize()
instance.password = randomize("password")
return instance
def create_model_upgrade_headless_account_v3_request_example() -> ModelUpgradeHeadlessAccountV3Request:
instance = ModelUpgradeHeadlessAccountV3Request()
instance.email_address = randomize("email")
instance.password = randomize("password")
return instance
def create_model_upgrade_headless_account_with_verification_code_request_example() -> ModelUpgradeHeadlessAccountWithVerificationCodeRequest:
instance = ModelUpgradeHeadlessAccountWithVerificationCodeRequest()
instance.code = randomize()
instance.login_id = randomize()
instance.password = randomize("password")
return instance
def create_model_upgrade_headless_account_with_verification_code_request_v3_example() -> ModelUpgradeHeadlessAccountWithVerificationCodeRequestV3:
instance = ModelUpgradeHeadlessAccountWithVerificationCodeRequestV3()
instance.code = randomize()
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.validate_only = randomize("bool")
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
return instance
def create_model_user_active_ban_response_example() -> ModelUserActiveBanResponse:
instance = ModelUserActiveBanResponse()
instance.ban = randomize()
instance.ban_id = randomize()
instance.end_date = randomize("date")
return instance
def create_model_user_active_ban_response_v3_example() -> ModelUserActiveBanResponseV3:
instance = ModelUserActiveBanResponseV3()
instance.ban = randomize()
instance.ban_id = randomize()
instance.end_date = randomize("date")
return instance
def create_model_user_ban_response_example() -> ModelUserBanResponse:
instance = ModelUserBanResponse()
instance.ban = randomize()
instance.ban_id = randomize()
instance.banned_by = create_banned_by_example()
instance.comment = randomize()
instance.created_at = randomize("date")
instance.enabled = randomize("bool")
instance.end_date = randomize("date")
instance.namespace = randomize("slug")
instance.reason = randomize()
instance.user_id = randomize("uid")
instance.disabled_date = randomize("date")
return instance
def create_model_user_ban_response_v3_example() -> ModelUserBanResponseV3:
instance = ModelUserBanResponseV3()
instance.ban = randomize()
instance.ban_id = randomize()
instance.banned_by = create_accountcommon_banned_by_v3_example()
instance.comment = randomize()
instance.created_at = randomize("date")
instance.disabled_date = randomize("date")
instance.enabled = randomize("bool")
instance.end_date = randomize("date")
instance.namespace = randomize("slug")
instance.reason = randomize()
instance.user_id = randomize("uid")
return instance
def create_model_user_base_info_example() -> ModelUserBaseInfo:
instance = ModelUserBaseInfo()
instance.avatar_url = randomize("url")
instance.display_name = randomize("slug")
instance.platform_user_ids = {randomize(): randomize()}
instance.user_id = randomize("uid")
return instance
def create_model_user_create_from_invitation_request_v3_example() -> ModelUserCreateFromInvitationRequestV3:
instance = ModelUserCreateFromInvitationRequestV3()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
return instance
def create_model_user_create_from_invitation_request_v4_example() -> ModelUserCreateFromInvitationRequestV4:
instance = ModelUserCreateFromInvitationRequestV4()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.username = randomize("slug")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
return instance
def create_model_user_create_request_example() -> ModelUserCreateRequest:
instance = ModelUserCreateRequest()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.login_id = randomize()
instance.password = randomize("password")
instance.password_md5_sum = randomize()
return instance
def create_model_user_create_request_v3_example() -> ModelUserCreateRequestV3:
instance = ModelUserCreateRequestV3()
instance.auth_type = randomize()
instance.code = randomize()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.password = randomize("password")
instance.reach_minimum_age = randomize("bool")
instance.accepted_policies = [create_legal_accepted_policies_request_example()]
instance.date_of_birth = randomize()
instance.password_md5_sum = randomize()
return instance
def create_model_user_create_response_example() -> ModelUserCreateResponse:
instance = ModelUserCreateResponse()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize("adult_birthdate")
instance.display_name = randomize("slug")
instance.login_id = randomize()
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_user_create_response_v3_example() -> ModelUserCreateResponseV3:
instance = ModelUserCreateResponseV3()
instance.auth_type = randomize()
instance.country = randomize("country")
instance.date_of_birth = randomize("adult_birthdate")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_user_deletion_status_response_example() -> ModelUserDeletionStatusResponse:
instance = ModelUserDeletionStatusResponse()
instance.deletion_status = randomize("bool")
return instance
def create_model_user_i_ds_request_example() -> ModelUserIDsRequest:
instance = ModelUserIDsRequest()
instance.user_ids = [randomize()]
return instance
def create_model_user_info_response_example() -> ModelUserInfoResponse:
instance = ModelUserInfoResponse()
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.namespace = randomize("slug")
instance.user_id = randomize("uid")
return instance
def create_model_user_information_example() -> ModelUserInformation:
instance = ModelUserInformation()
instance.country = randomize("country")
instance.display_name = randomize("slug")
instance.email_addresses = [randomize()]
instance.linked_platform_accounts = [create_model_platform_user_information_example()]
instance.phone_number = randomize()
instance.username = randomize("slug")
instance.xuid = randomize()
return instance
def create_model_user_invitation_v3_example() -> ModelUserInvitationV3:
instance = ModelUserInvitationV3()
instance.email = randomize("email")
instance.expired_at = randomize("date")
instance.roles = [create_accountcommon_namespace_role_example()]
instance.id_ = randomize()
return instance
def create_model_user_login_history_response_example() -> ModelUserLoginHistoryResponse:
instance = ModelUserLoginHistoryResponse()
instance.application_name = randomize()
instance.city = randomize()
instance.country = randomize("country")
instance.device_id = randomize()
instance.device_name = randomize()
instance.state = randomize()
instance.timestamp = randomize("int", min_val=1, max_val=1000)
return instance
def create_model_user_password_update_request_example() -> ModelUserPasswordUpdateRequest:
instance = ModelUserPasswordUpdateRequest()
instance.language_tag = randomize()
instance.new_password = randomize()
instance.old_password = <PASSWORD>()
return instance
def create_model_user_password_update_v3_request_example() -> ModelUserPasswordUpdateV3Request:
instance = ModelUserPasswordUpdateV3Request()
instance.language_tag = randomize()
instance.new_password = <PASSWORD>()
instance.old_password = <PASSWORD>()
return instance
def create_model_user_permissions_response_v3_example() -> ModelUserPermissionsResponseV3:
instance = ModelUserPermissionsResponseV3()
instance.action = randomize("int", min_val=1, max_val=1000)
instance.resource = randomize()
instance.sched_action = randomize("int", min_val=1, max_val=1000)
instance.sched_cron = randomize()
instance.sched_range = [randomize()]
return instance
def create_model_user_response_example() -> ModelUserResponse:
instance = ModelUserResponse()
instance.auth_type = randomize()
instance.bans = [create_model_user_active_ban_response_example()]
instance.country = randomize("country")
instance.created_at = randomize("date")
instance.date_of_birth = randomize("adult_birthdate")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.login_id = randomize()
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.old_email_address = randomize()
instance.permissions = [create_accountcommon_permission_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.avatar_url = randomize("url")
instance.email_address = randomize("email")
instance.new_email_address = randomize()
instance.phone_number = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.username = randomize("slug")
instance.xuid = randomize()
return instance
def create_model_user_response_v3_example() -> ModelUserResponseV3:
instance = ModelUserResponseV3()
instance.auth_type = randomize()
instance.bans = [create_model_user_active_ban_response_v3_example()]
instance.country = randomize("country")
instance.created_at = randomize("date")
instance.date_of_birth = randomize("adult_birthdate")
instance.deletion_status = randomize("bool")
instance.display_name = randomize("slug")
instance.email_address = randomize("email")
instance.email_verified = randomize("bool")
instance.enabled = randomize("bool")
instance.last_date_of_birth_changed_time = randomize("date")
instance.last_enabled_changed_time = randomize("date")
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.old_email_address = randomize()
instance.permissions = [create_model_user_permissions_response_v3_example()]
instance.phone_verified = randomize("bool")
instance.roles = [randomize()]
instance.user_id = randomize("uid")
instance.avatar_url = randomize("url")
instance.new_email_address = randomize()
instance.phone_number = randomize()
instance.platform_avatar_url = randomize("url")
instance.platform_display_name = randomize()
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.user_name = randomize("slug")
return instance
def create_model_user_roles_v4_response_example() -> ModelUserRolesV4Response:
instance = ModelUserRolesV4Response()
instance.assigned_namespaces = [randomize()]
instance.role_id = randomize("uid")
instance.role_name = randomize()
return instance
def create_model_user_update_request_example() -> ModelUserUpdateRequest:
instance = ModelUserUpdateRequest()
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
instance.language_tag = randomize()
return instance
def create_model_user_update_request_v3_example() -> ModelUserUpdateRequestV3:
instance = ModelUserUpdateRequestV3()
instance.avatar_url = randomize("url")
instance.country = randomize("country")
instance.date_of_birth = randomize()
instance.display_name = randomize("slug")
instance.language_tag = randomize()
instance.user_name = randomize("slug")
return instance
def create_model_user_verification_request_example() -> ModelUserVerificationRequest:
instance = ModelUserVerificationRequest()
instance.code = randomize()
instance.contact_type = randomize()
instance.language_tag = randomize()
instance.validate_only = randomize("bool")
return instance
def create_model_user_verification_request_v3_example() -> ModelUserVerificationRequestV3:
instance = ModelUserVerificationRequestV3()
instance.code = randomize()
instance.contact_type = randomize()
instance.language_tag = randomize()
instance.validate_only = randomize("bool")
return instance
def create_model_valid_user_id_response_v4_example() -> ModelValidUserIDResponseV4:
instance = ModelValidUserIDResponseV4()
instance.exists = randomize("bool")
instance.user_id = randomize("uid")
return instance
def create_model_validation_detail_example() -> ModelValidationDetail:
instance = ModelValidationDetail()
instance.allow_digit = randomize("bool")
instance.allow_letter = randomize("bool")
instance.allow_space = randomize("bool")
instance.allow_unicode = randomize("bool")
instance.description = [create_accountcommon_input_validation_description_example()]
instance.is_custom_regex = randomize("bool")
instance.letter_case = randomize()
instance.max_length = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_alpha_num = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_special_character = randomize("int", min_val=1, max_val=1000)
instance.min_char_type = randomize("int", min_val=1, max_val=1000)
instance.min_length = randomize("int", min_val=1, max_val=1000)
instance.regex = randomize()
instance.special_character_location = randomize()
instance.special_characters = [randomize()]
return instance
def create_model_validation_detail_public_example() -> ModelValidationDetailPublic:
instance = ModelValidationDetailPublic()
instance.allow_digit = randomize("bool")
instance.allow_letter = randomize("bool")
instance.allow_space = randomize("bool")
instance.allow_unicode = randomize("bool")
instance.description = create_accountcommon_input_validation_description_example()
instance.is_custom_regex = randomize("bool")
instance.letter_case = randomize()
instance.max_length = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_alpha_num = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_special_character = randomize("int", min_val=1, max_val=1000)
instance.min_char_type = randomize("int", min_val=1, max_val=1000)
instance.min_length = randomize("int", min_val=1, max_val=1000)
instance.regex = randomize()
instance.special_character_location = randomize()
instance.special_characters = [randomize()]
return instance
def create_model_verification_code_response_example() -> ModelVerificationCodeResponse:
instance = ModelVerificationCodeResponse()
instance.account_registration = randomize()
instance.account_upgrade = randomize()
instance.password_reset = randomize()
instance.update_email = randomize()
return instance
def create_model_verify_registration_code_example() -> ModelVerifyRegistrationCode:
instance = ModelVerifyRegistrationCode()
instance.code = randomize()
instance.email_address = randomize("email")
return instance
def create_model_web_linking_response_example() -> ModelWebLinkingResponse:
instance = ModelWebLinkingResponse()
instance.third_party_url = randomize("url")
return instance
def create_oauthapi_revocation_list_example() -> OauthapiRevocationList:
instance = OauthapiRevocationList()
instance.revoked_tokens = create_bloom_filter_json_example()
instance.revoked_users = [create_oauthcommon_user_revocation_list_record_example()]
return instance
def create_oauthcommon_jwk_key_example() -> OauthcommonJWKKey:
instance = OauthcommonJWKKey()
instance.kty = randomize()
instance.alg = randomize()
instance.e = randomize()
instance.kid = randomize()
instance.n = randomize()
instance.use = randomize()
return instance
def create_oauthcommon_jwk_set_example() -> OauthcommonJWKSet:
instance = OauthcommonJWKSet()
instance.keys = [create_oauthcommon_jwk_key_example()]
return instance
def create_oauthcommon_user_revocation_list_record_example() -> OauthcommonUserRevocationListRecord:
instance = OauthcommonUserRevocationListRecord()
instance.id_ = randomize()
instance.revoked_at = randomize("date")
return instance
def create_oauthmodel_country_location_response_example() -> OauthmodelCountryLocationResponse:
instance = OauthmodelCountryLocationResponse()
instance.city = randomize()
instance.country_code = randomize()
instance.country_name = randomize()
instance.state = randomize()
return instance
def create_oauthmodel_error_response_example() -> OauthmodelErrorResponse:
instance = OauthmodelErrorResponse()
instance.error = randomize()
instance.client_id = randomize("uid")
instance.default_factor = randomize()
instance.error_description = randomize()
instance.error_uri = randomize()
instance.factors = [randomize()]
instance.linking_token = randomize()
instance.mfa_token = randomize()
instance.platform_id = randomize()
return instance
def create_oauthmodel_token_introspect_response_example() -> OauthmodelTokenIntrospectResponse:
instance = OauthmodelTokenIntrospectResponse()
instance.active = randomize("bool")
instance.aud = randomize()
instance.client_id = randomize("uid")
instance.exp = randomize("int", min_val=1, max_val=1000)
instance.iat = randomize("int", min_val=1, max_val=1000)
instance.scope = randomize()
instance.sub = randomize()
return instance
def create_oauthmodel_token_response_example() -> OauthmodelTokenResponse:
instance = OauthmodelTokenResponse()
instance.access_token = randomize()
instance.bans = [create_accountcommon_jwt_ban_v3_example()]
instance.display_name = randomize("slug")
instance.expires_in = randomize("int", min_val=1, max_val=1000)
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.permissions = [create_accountcommon_permission_example()]
instance.refresh_token = randomize()
instance.roles = [randomize()]
instance.token_type = randomize()
instance.user_id = randomize("uid")
instance.is_comply = randomize("bool")
instance.jflgs = randomize("int", min_val=1, max_val=1000)
instance.platform_id = randomize()
instance.platform_user_id = randomize()
instance.refresh_expires_in = randomize("int", min_val=1, max_val=1000)
return instance
def create_oauthmodel_token_response_v3_example() -> OauthmodelTokenResponseV3:
instance = OauthmodelTokenResponseV3()
instance.access_token = randomize()
instance.bans = [create_accountcommon_jwt_ban_v3_example()]
instance.display_name = randomize("slug")
instance.expires_in = randomize("int", min_val=1, max_val=1000)
instance.namespace = randomize("slug")
instance.namespace_roles = [create_accountcommon_namespace_role_example()]
instance.permissions = [create_accountcommon_permission_v3_example()]
instance.refresh_expires_in = randomize("int", min_val=1, max_val=1000)
instance.refresh_token = randomize()
instance.roles = [randomize()]
instance.scope = randomize()
instance.token_type = randomize()
instance.user_id = randomize("uid")
instance.xuid = randomize()
instance.is_comply = randomize("bool")
instance.jflgs = randomize("int", min_val=1, max_val=1000)
instance.platform_id = randomize()
instance.platform_user_id = randomize()
return instance
def create_oauthmodel_token_third_party_response_example() -> OauthmodelTokenThirdPartyResponse:
instance = OauthmodelTokenThirdPartyResponse()
instance.platform_token = randomize()
instance.sand_box_id = randomize()
return instance
def create_rest_error_response_example() -> RestErrorResponse:
instance = RestErrorResponse()
instance.error_code = randomize("int", min_val=1, max_val=1000)
instance.error_message = randomize()
instance.message_variables = create_accountcommon_conflicted_user_platform_accounts_example()
return instance
def create_restapi_error_response_example() -> RestapiErrorResponse:
instance = RestapiErrorResponse()
instance.message = randomize()
instance.code = randomize("int", min_val=1, max_val=1000)
return instance
def create_validation_example() -> Validation:
instance = Validation()
instance.allow_digit = randomize("bool")
instance.allow_letter = randomize("bool")
instance.allow_space = randomize("bool")
instance.allow_unicode = randomize("bool")
instance.description = [create_validation_description_example()]
instance.is_custom_regex = randomize("bool")
instance.letter_case = randomize()
instance.max_length = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_alpha_num = randomize("int", min_val=1, max_val=1000)
instance.max_repeating_special_character = randomize("int", min_val=1, max_val=1000)
instance.min_char_type = randomize("int", min_val=1, max_val=1000)
instance.min_length = randomize("int", min_val=1, max_val=1000)
instance.regex = randomize()
instance.special_character_location = randomize()
instance.special_characters = [randomize()]
return instance
def create_validation_description_example() -> ValidationDescription:
instance = ValidationDescription()
instance.language = randomize()
instance.message = [randomize()]
return instance | 0.375248 | 0.039122 |
import os
import gzip
import logging
import numpy as np
import unidecode
from transformers import AutoTokenizer
from probing.data.base_data import BaseDataset, DataFields
class WLSTMFields(DataFields):
_fields = (
'probe_target', 'label', 'probe_target_len', 'target_idx',
'raw_idx', 'raw_target', 'raw_sentence',)
_alias = {
'input': 'probe_target',
'input_len': 'probe_target_len',
}
needs_vocab = ('probe_target', 'label')
needs_padding = ('probe_target', )
class Word2vecProberFields(DataFields):
_fields = (
'sentence', 'probe_target', 'probe_target_idx', 'label')
_alias = {
'input': 'probe_target',
}
needs_vocab = ('label',)
class TokenInSequenceProberFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx', 'label',
'subword_tokens', 'input_len', 'probe_target', 'token_starts',
'probe_target_idx',
)
_alias = {
'input': 'subword_tokens'
}
needs_vocab = ('subword_tokens', 'label')
needs_padding = ('subword_tokens', )
needs_constants = ('subword_tokens', )
class SLSTMFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx',
'input', 'input_len', 'target_idx', 'label',
)
needs_vocab = ('input', 'label', )
needs_constants = ('input', )
needs_padding = ('input', )
class SequenceClassificationWithSubwordsDataFields(DataFields):
_fields = (
'raw_sentence', 'labels',
'sentence_len', 'tokens', 'sentence_subword_len', 'token_starts',
)
_alias = {
'input': 'tokens',
'input_len': 'sentence_subword_len',
'label': 'labels',
}
needs_vocab = ('tokens', 'labels')
needs_padding = ('tokens', )
needs_constants = ('tokens', )
class Embedding:
def __init__(self, embedding_file, filter=None):
self.filter_ = filter
if embedding_file.endswith('.gz'):
with gzip.open(embedding_file, 'rt') as f:
self.load_stream(f)
else:
with open(embedding_file, 'rt') as f:
self.load_stream(f)
def load_stream(self, stream):
self.mtx = []
self.vocab = {}
for line in stream:
fd = line.strip().split(" ")
if len(fd) == 2:
continue
word = fd[0]
if self.filter_ and word not in self.filter_:
continue
self.vocab[word] = len(self.mtx)
self.mtx.append(list(map(float, fd[1:])))
self.mtx = np.array(self.mtx)
def __len__(self):
return self.mtx.shape[0]
def __getitem__(self, key):
if key not in self.vocab:
return self.mtx[0]
return self.mtx[self.vocab[key]]
@property
def embedding_dim(self):
return self.mtx.shape[1]
class Word2vecProberDataset(BaseDataset):
datafield_class = Word2vecProberFields
def to_idx(self):
vocab = set(r.probe_target for r in self.raw)
if self.config.embedding == 'discover':
language = self.config.train_file.split("/")[-2]
emb_fn = os.path.join(os.environ['HOME'], 'resources',
'fasttext', language, 'common.vec')
self.config.embedding = emb_fn
else:
emb_fn = self.config.embedding
self.embedding = Embedding(emb_fn, filter=vocab)
self.embedding_size = self.embedding.embedding_dim
word_vecs = []
labels = []
for r in self.raw:
word_vecs.append(self.embedding[r.probe_target])
if r.label:
labels.append(self.vocabs.label[r.label])
else:
labels.append(None)
self.mtx = self.datafield_class(
probe_target=word_vecs,
label=labels
)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
sent, target, idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
return self.datafield_class(
sentence=sent,
probe_target=target,
probe_target_idx=int(idx),
label=label
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.sentence, sample.probe_target,
sample.probe_target_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
class WLSTMDataset(BaseDataset):
datafield_class = WLSTMFields
def __init__(self, config, stream_or_file, **kwargs):
if config.external_tokenizer:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
else:
self.tokenizer = None
super().__init__(config, stream_or_file, **kwargs)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
if len(fd) > 3:
sent, target, idx, label = fd[:4]
else:
sent, target, idx = fd[:3]
label = None
idx = int(idx)
if self.tokenizer:
tokens = self.tokenizer.tokenize(target)
else:
tokens = list(target)
if self.config.probe_first:
target_idx = 0
else:
target_idx = len(tokens) - 1
return self.datafield_class(
raw_sentence=sent,
probe_target=tokens,
target_idx=target_idx,
raw_idx=idx,
raw_target=target,
input_len=len(tokens),
label=label,
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
class SLSTMDataset(BaseDataset):
datafield_class = SLSTMFields
def __init__(self, config, stream_or_file, **kwargs):
if config.external_tokenizer:
lower = 'uncased' in config.external_tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
config.external_tokenizer, do_lower_case=lower)
else:
self.tokenizer = None
super().__init__(config, stream_or_file, **kwargs)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
raw_sent, raw_target, raw_idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
raw_idx = int(raw_idx)
if self.tokenizer:
words = raw_sent.split(' ')
subwords = []
for idx, word in enumerate(words):
if self.config.probe_first:
if idx == raw_idx:
target_idx = len(subwords)
subwords.extend(self.tokenizer.tokenize(word))
else:
subwords.extend(self.tokenizer.tokenize(word))
if idx == raw_idx:
target_idx = len(subwords) - 1
input = subwords
else:
input = list(raw_sent)
words = raw_sent.split(' ')
if self.config.probe_first:
target_idx = sum(len(w) for w in words[:raw_idx]) + raw_idx
else:
target_idx = sum(len(w) for w in words[:raw_idx]) + raw_idx + len(raw_target) - 1
return self.datafield_class(
raw_sentence=raw_sent,
raw_target=raw_target,
raw_idx=raw_idx,
input=input,
input_len=len(input),
target_idx=target_idx,
label=label
)
def to_idx(self):
super().to_idx()
self.mtx.target_idx = np.array(self.mtx.target_idx) + 1
self.mtx.input_len = np.array(self.mtx.input_len) + 2
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = np.argmax(model_output[i])
self.raw[i].label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
))
class SequenceClassificationWithSubwords(BaseDataset):
datafield_class = SequenceClassificationWithSubwordsDataFields
def __init__(self, config, stream_or_file, max_samples=None,
share_vocabs_with=None, is_unlabeled=False):
global_key = f'{config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
globals()[global_key] = self.tokenizer
super().__init__(config, stream_or_file, max_samples, share_vocabs_with, is_unlabeled)
def load_or_create_vocabs(self):
super().load_or_create_vocabs()
self.vocabs.tokens.vocab = self.tokenizer.get_vocab()
self.vocabs.tokens.pad_token = self.tokenizer.pad_token
self.vocabs.tokens.bos_token = self.tokenizer.cls_token
self.vocabs.tokens.eos_token = self.tokenizer.sep_token
self.vocabs.tokens.unk_token = self.tokenizer.unk_token
self.vocabs.tokens.frozen = True
def load_stream(self, stream):
self.raw = []
sent = []
for line in stream:
if not line.strip():
if sent:
sample = self.create_sentence_from_lines(sent)
if not self.ignore_sample(sample):
self.raw.append(sample)
if self.max_samples and len(self.raw) >= self.max_samples:
break
sent = []
else:
sent.append(line.rstrip("\n"))
if sent:
if self.max_samples is None or len(self.raw) < self.max_samples:
sample = self.create_sentence_from_lines(sent)
if not self.ignore_sample(sample):
self.raw.append(sample)
def create_sentence_from_lines(self, lines):
sent = []
labels = []
token_starts = []
subwords = []
for line in lines:
fd = line.rstrip("\n").split("\t")
sent.append(fd[0])
if len(fd) > 1:
labels.append(fd[1])
token_starts.append(len(subwords))
token = fd[0]
if self.config.remove_diacritics:
token = unidecode.unidecode(token)
pieces = self.tokenizer.tokenize(token)
subwords.extend(pieces)
token_starts.append(len(subwords))
if len(labels) == 0:
labels = None
return self.datafield_class(
raw_sentence=sent, labels=labels,
sentence_len=len(sent),
tokens=subwords,
sentence_subword_len=len(subwords),
token_starts=token_starts,
)
def ignore_sample(self, sample):
return sample.sentence_subword_len > 500
def to_idx(self):
super().to_idx()
prefixed_token_starts = []
for ti, tokstarts in enumerate(self.mtx.token_starts):
tokstarts = [t+1 for t in tokstarts]
token_starts = [0] + tokstarts + [len(self.mtx.tokens[ti]) + 1]
prefixed_token_starts.append(token_starts)
self.mtx.token_starts = prefixed_token_starts
def batched_iter(self, batch_size):
for batch in super().batched_iter(batch_size):
padded_token_starts = []
maxlen = max(len(t) for t in batch.token_starts)
pad = 1000
for sample in batch.token_starts:
padded = sample + [pad] * (maxlen - len(sample))
padded_token_starts.append(padded)
batch.token_starts = np.array(padded_token_starts)
if batch.labels:
batch.labels = np.concatenate(batch.labels)
yield batch
def decode(self, model_output):
offset = 0
for si, sample in enumerate(self.raw):
labels = []
for ti in range(sample.sentence_len):
label_idx = model_output[offset + ti].argmax()
labels.append(self.vocabs.labels.inv_lookup(label_idx))
sample.labels = labels
offset += sample.sentence_len
def print_sample(self, sample, stream):
stream.write("\n".join(
"{}\t{}".format(sample.raw_sentence[i], sample.labels[i])
for i in range(sample.sentence_len)
))
stream.write("\n")
def print_raw(self, stream):
for si, sample in enumerate(self.raw):
self.print_sample(sample, stream)
if si < len(self.raw) - 1:
stream.write("\n")
class SentenceProberDataset(BaseDataset):
datafield_class = TokenInSequenceProberFields
def __init__(self, config, stream_or_file, max_samples=None,
share_vocabs_with=None, is_unlabeled=False):
global_key = f'{config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
globals()[global_key] = self.tokenizer
self.MASK = self.tokenizer.mask_token
self.mask_positions = set(config.mask_positions)
if config.use_character_tokenization:
if not config.model_name.startswith('bert-'):
raise ValueError("Character tokenization is only "
"supported for BERT models.")
logging.info("Using character tokenization.")
super().__init__(config, stream_or_file, max_samples, share_vocabs_with, is_unlabeled)
def load_or_create_vocabs(self):
super().load_or_create_vocabs()
self.vocabs.subword_tokens.vocab = self.tokenizer.get_vocab()
self.vocabs.subword_tokens.pad_token = self.tokenizer.pad_token
self.vocabs.subword_tokens.bos_token = self.tokenizer.cls_token
self.vocabs.subword_tokens.eos_token = self.tokenizer.sep_token
self.vocabs.subword_tokens.unk_token = self.tokenizer.unk_token
self.vocabs.subword_tokens.frozen = True
def to_idx(self):
super().to_idx()
prefixed_token_starts = []
for ti, tokstarts in enumerate(self.mtx.token_starts):
tokstarts = [t+1 for t in tokstarts]
token_starts = [0] + tokstarts + [len(self.mtx.subword_tokens[ti]) - 1]
prefixed_token_starts.append(token_starts)
self.mtx.token_starts = prefixed_token_starts
self.mtx.probe_target_idx = np.array(self.mtx.probe_target_idx) + 1
self.mtx.input_len = np.array(self.mtx.input_len) + 2
def batched_iter(self, batch_size):
for batch in super().batched_iter(batch_size):
padded_token_starts = []
maxlen = max(len(t) for t in batch.token_starts)
pad = 1000
for sample in batch.token_starts:
padded = sample + [pad] * (maxlen - len(sample))
padded_token_starts.append(padded)
batch.token_starts = np.array(padded_token_starts)
yield batch
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
raw_sent, raw_target, raw_idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
raw_idx = int(raw_idx)
# Only include the target from the sentence.
if self.config.target_only:
if self.config.remove_diacritics:
target = unidecode.unidecode(raw_target)
else:
target = raw_target
tokenized = [self.tokenizer.tokenize(target)]
target_idx = 0
# Build a list-of-lists from the tokenized words.
# This allows shuffling it later.
else:
tokenized = []
for ti, token in enumerate(raw_sent.split(" ")):
if ti - raw_idx in self.mask_positions:
pieces = [self.MASK]
else:
if self.config.remove_diacritics:
token = unidecode.unidecode(token)
if self.config.use_character_tokenization == 'full':
pieces = [token[0]]
pieces.extend(f'##{c}' for c in token[1:])
elif self.config.use_character_tokenization == 'target_only':
if ti == raw_idx:
pieces = [token[0]]
pieces.extend(f'##{c}' for c in token[1:])
else:
pieces = self.tokenizer.tokenize(token)
else:
pieces = self.tokenizer.tokenize(token)
tokenized.append(pieces)
# Add [SEP] token start.
# Perform BOW.
if self.config.bow:
all_idx = np.arange(len(tokenized))
np.random.shuffle(all_idx)
tokenized = [tokenized[i] for i in all_idx]
target_map = np.argsort(all_idx)
target_idx = target_map[raw_idx]
else:
target_idx = raw_idx
merged = []
token_starts = []
for pieces in tokenized:
token_starts.append(len(merged))
merged.extend(pieces)
return self.datafield_class(
raw_sentence=raw_sent,
raw_target=raw_target,
raw_idx=raw_idx,
probe_target_idx=target_idx,
subword_tokens=merged,
input_len=len(merged),
token_starts=token_starts,
label=label,
)
def ignore_sample(self, sample):
return False
if self.config.exclude_short_sentences is False or self.is_unlabeled:
return False
sent_len = len(sample.raw_sentence.split(" "))
for pi in self.mask_positions:
if sample.raw_idx + pi < 0:
return True
if sample.raw_idx + pi >= sent_len:
return True
return False
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
)) | probing/data/sentence_probe_data.py |
import os
import gzip
import logging
import numpy as np
import unidecode
from transformers import AutoTokenizer
from probing.data.base_data import BaseDataset, DataFields
class WLSTMFields(DataFields):
_fields = (
'probe_target', 'label', 'probe_target_len', 'target_idx',
'raw_idx', 'raw_target', 'raw_sentence',)
_alias = {
'input': 'probe_target',
'input_len': 'probe_target_len',
}
needs_vocab = ('probe_target', 'label')
needs_padding = ('probe_target', )
class Word2vecProberFields(DataFields):
_fields = (
'sentence', 'probe_target', 'probe_target_idx', 'label')
_alias = {
'input': 'probe_target',
}
needs_vocab = ('label',)
class TokenInSequenceProberFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx', 'label',
'subword_tokens', 'input_len', 'probe_target', 'token_starts',
'probe_target_idx',
)
_alias = {
'input': 'subword_tokens'
}
needs_vocab = ('subword_tokens', 'label')
needs_padding = ('subword_tokens', )
needs_constants = ('subword_tokens', )
class SLSTMFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx',
'input', 'input_len', 'target_idx', 'label',
)
needs_vocab = ('input', 'label', )
needs_constants = ('input', )
needs_padding = ('input', )
class SequenceClassificationWithSubwordsDataFields(DataFields):
_fields = (
'raw_sentence', 'labels',
'sentence_len', 'tokens', 'sentence_subword_len', 'token_starts',
)
_alias = {
'input': 'tokens',
'input_len': 'sentence_subword_len',
'label': 'labels',
}
needs_vocab = ('tokens', 'labels')
needs_padding = ('tokens', )
needs_constants = ('tokens', )
class Embedding:
def __init__(self, embedding_file, filter=None):
self.filter_ = filter
if embedding_file.endswith('.gz'):
with gzip.open(embedding_file, 'rt') as f:
self.load_stream(f)
else:
with open(embedding_file, 'rt') as f:
self.load_stream(f)
def load_stream(self, stream):
self.mtx = []
self.vocab = {}
for line in stream:
fd = line.strip().split(" ")
if len(fd) == 2:
continue
word = fd[0]
if self.filter_ and word not in self.filter_:
continue
self.vocab[word] = len(self.mtx)
self.mtx.append(list(map(float, fd[1:])))
self.mtx = np.array(self.mtx)
def __len__(self):
return self.mtx.shape[0]
def __getitem__(self, key):
if key not in self.vocab:
return self.mtx[0]
return self.mtx[self.vocab[key]]
@property
def embedding_dim(self):
return self.mtx.shape[1]
class Word2vecProberDataset(BaseDataset):
datafield_class = Word2vecProberFields
def to_idx(self):
vocab = set(r.probe_target for r in self.raw)
if self.config.embedding == 'discover':
language = self.config.train_file.split("/")[-2]
emb_fn = os.path.join(os.environ['HOME'], 'resources',
'fasttext', language, 'common.vec')
self.config.embedding = emb_fn
else:
emb_fn = self.config.embedding
self.embedding = Embedding(emb_fn, filter=vocab)
self.embedding_size = self.embedding.embedding_dim
word_vecs = []
labels = []
for r in self.raw:
word_vecs.append(self.embedding[r.probe_target])
if r.label:
labels.append(self.vocabs.label[r.label])
else:
labels.append(None)
self.mtx = self.datafield_class(
probe_target=word_vecs,
label=labels
)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
sent, target, idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
return self.datafield_class(
sentence=sent,
probe_target=target,
probe_target_idx=int(idx),
label=label
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.sentence, sample.probe_target,
sample.probe_target_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
class WLSTMDataset(BaseDataset):
datafield_class = WLSTMFields
def __init__(self, config, stream_or_file, **kwargs):
if config.external_tokenizer:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
else:
self.tokenizer = None
super().__init__(config, stream_or_file, **kwargs)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
if len(fd) > 3:
sent, target, idx, label = fd[:4]
else:
sent, target, idx = fd[:3]
label = None
idx = int(idx)
if self.tokenizer:
tokens = self.tokenizer.tokenize(target)
else:
tokens = list(target)
if self.config.probe_first:
target_idx = 0
else:
target_idx = len(tokens) - 1
return self.datafield_class(
raw_sentence=sent,
probe_target=tokens,
target_idx=target_idx,
raw_idx=idx,
raw_target=target,
input_len=len(tokens),
label=label,
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
class SLSTMDataset(BaseDataset):
datafield_class = SLSTMFields
def __init__(self, config, stream_or_file, **kwargs):
if config.external_tokenizer:
lower = 'uncased' in config.external_tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
config.external_tokenizer, do_lower_case=lower)
else:
self.tokenizer = None
super().__init__(config, stream_or_file, **kwargs)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
raw_sent, raw_target, raw_idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
raw_idx = int(raw_idx)
if self.tokenizer:
words = raw_sent.split(' ')
subwords = []
for idx, word in enumerate(words):
if self.config.probe_first:
if idx == raw_idx:
target_idx = len(subwords)
subwords.extend(self.tokenizer.tokenize(word))
else:
subwords.extend(self.tokenizer.tokenize(word))
if idx == raw_idx:
target_idx = len(subwords) - 1
input = subwords
else:
input = list(raw_sent)
words = raw_sent.split(' ')
if self.config.probe_first:
target_idx = sum(len(w) for w in words[:raw_idx]) + raw_idx
else:
target_idx = sum(len(w) for w in words[:raw_idx]) + raw_idx + len(raw_target) - 1
return self.datafield_class(
raw_sentence=raw_sent,
raw_target=raw_target,
raw_idx=raw_idx,
input=input,
input_len=len(input),
target_idx=target_idx,
label=label
)
def to_idx(self):
super().to_idx()
self.mtx.target_idx = np.array(self.mtx.target_idx) + 1
self.mtx.input_len = np.array(self.mtx.input_len) + 2
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = np.argmax(model_output[i])
self.raw[i].label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
))
class SequenceClassificationWithSubwords(BaseDataset):
datafield_class = SequenceClassificationWithSubwordsDataFields
def __init__(self, config, stream_or_file, max_samples=None,
share_vocabs_with=None, is_unlabeled=False):
global_key = f'{config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
globals()[global_key] = self.tokenizer
super().__init__(config, stream_or_file, max_samples, share_vocabs_with, is_unlabeled)
def load_or_create_vocabs(self):
super().load_or_create_vocabs()
self.vocabs.tokens.vocab = self.tokenizer.get_vocab()
self.vocabs.tokens.pad_token = self.tokenizer.pad_token
self.vocabs.tokens.bos_token = self.tokenizer.cls_token
self.vocabs.tokens.eos_token = self.tokenizer.sep_token
self.vocabs.tokens.unk_token = self.tokenizer.unk_token
self.vocabs.tokens.frozen = True
def load_stream(self, stream):
self.raw = []
sent = []
for line in stream:
if not line.strip():
if sent:
sample = self.create_sentence_from_lines(sent)
if not self.ignore_sample(sample):
self.raw.append(sample)
if self.max_samples and len(self.raw) >= self.max_samples:
break
sent = []
else:
sent.append(line.rstrip("\n"))
if sent:
if self.max_samples is None or len(self.raw) < self.max_samples:
sample = self.create_sentence_from_lines(sent)
if not self.ignore_sample(sample):
self.raw.append(sample)
def create_sentence_from_lines(self, lines):
sent = []
labels = []
token_starts = []
subwords = []
for line in lines:
fd = line.rstrip("\n").split("\t")
sent.append(fd[0])
if len(fd) > 1:
labels.append(fd[1])
token_starts.append(len(subwords))
token = fd[0]
if self.config.remove_diacritics:
token = unidecode.unidecode(token)
pieces = self.tokenizer.tokenize(token)
subwords.extend(pieces)
token_starts.append(len(subwords))
if len(labels) == 0:
labels = None
return self.datafield_class(
raw_sentence=sent, labels=labels,
sentence_len=len(sent),
tokens=subwords,
sentence_subword_len=len(subwords),
token_starts=token_starts,
)
def ignore_sample(self, sample):
return sample.sentence_subword_len > 500
def to_idx(self):
super().to_idx()
prefixed_token_starts = []
for ti, tokstarts in enumerate(self.mtx.token_starts):
tokstarts = [t+1 for t in tokstarts]
token_starts = [0] + tokstarts + [len(self.mtx.tokens[ti]) + 1]
prefixed_token_starts.append(token_starts)
self.mtx.token_starts = prefixed_token_starts
def batched_iter(self, batch_size):
for batch in super().batched_iter(batch_size):
padded_token_starts = []
maxlen = max(len(t) for t in batch.token_starts)
pad = 1000
for sample in batch.token_starts:
padded = sample + [pad] * (maxlen - len(sample))
padded_token_starts.append(padded)
batch.token_starts = np.array(padded_token_starts)
if batch.labels:
batch.labels = np.concatenate(batch.labels)
yield batch
def decode(self, model_output):
offset = 0
for si, sample in enumerate(self.raw):
labels = []
for ti in range(sample.sentence_len):
label_idx = model_output[offset + ti].argmax()
labels.append(self.vocabs.labels.inv_lookup(label_idx))
sample.labels = labels
offset += sample.sentence_len
def print_sample(self, sample, stream):
stream.write("\n".join(
"{}\t{}".format(sample.raw_sentence[i], sample.labels[i])
for i in range(sample.sentence_len)
))
stream.write("\n")
def print_raw(self, stream):
for si, sample in enumerate(self.raw):
self.print_sample(sample, stream)
if si < len(self.raw) - 1:
stream.write("\n")
class SentenceProberDataset(BaseDataset):
datafield_class = TokenInSequenceProberFields
def __init__(self, config, stream_or_file, max_samples=None,
share_vocabs_with=None, is_unlabeled=False):
global_key = f'{config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
lower = 'uncased' in config.model_name
self.tokenizer = AutoTokenizer.from_pretrained(
config.model_name, do_lower_case=lower)
globals()[global_key] = self.tokenizer
self.MASK = self.tokenizer.mask_token
self.mask_positions = set(config.mask_positions)
if config.use_character_tokenization:
if not config.model_name.startswith('bert-'):
raise ValueError("Character tokenization is only "
"supported for BERT models.")
logging.info("Using character tokenization.")
super().__init__(config, stream_or_file, max_samples, share_vocabs_with, is_unlabeled)
def load_or_create_vocabs(self):
super().load_or_create_vocabs()
self.vocabs.subword_tokens.vocab = self.tokenizer.get_vocab()
self.vocabs.subword_tokens.pad_token = self.tokenizer.pad_token
self.vocabs.subword_tokens.bos_token = self.tokenizer.cls_token
self.vocabs.subword_tokens.eos_token = self.tokenizer.sep_token
self.vocabs.subword_tokens.unk_token = self.tokenizer.unk_token
self.vocabs.subword_tokens.frozen = True
def to_idx(self):
super().to_idx()
prefixed_token_starts = []
for ti, tokstarts in enumerate(self.mtx.token_starts):
tokstarts = [t+1 for t in tokstarts]
token_starts = [0] + tokstarts + [len(self.mtx.subword_tokens[ti]) - 1]
prefixed_token_starts.append(token_starts)
self.mtx.token_starts = prefixed_token_starts
self.mtx.probe_target_idx = np.array(self.mtx.probe_target_idx) + 1
self.mtx.input_len = np.array(self.mtx.input_len) + 2
def batched_iter(self, batch_size):
for batch in super().batched_iter(batch_size):
padded_token_starts = []
maxlen = max(len(t) for t in batch.token_starts)
pad = 1000
for sample in batch.token_starts:
padded = sample + [pad] * (maxlen - len(sample))
padded_token_starts.append(padded)
batch.token_starts = np.array(padded_token_starts)
yield batch
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
raw_sent, raw_target, raw_idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
raw_idx = int(raw_idx)
# Only include the target from the sentence.
if self.config.target_only:
if self.config.remove_diacritics:
target = unidecode.unidecode(raw_target)
else:
target = raw_target
tokenized = [self.tokenizer.tokenize(target)]
target_idx = 0
# Build a list-of-lists from the tokenized words.
# This allows shuffling it later.
else:
tokenized = []
for ti, token in enumerate(raw_sent.split(" ")):
if ti - raw_idx in self.mask_positions:
pieces = [self.MASK]
else:
if self.config.remove_diacritics:
token = unidecode.unidecode(token)
if self.config.use_character_tokenization == 'full':
pieces = [token[0]]
pieces.extend(f'##{c}' for c in token[1:])
elif self.config.use_character_tokenization == 'target_only':
if ti == raw_idx:
pieces = [token[0]]
pieces.extend(f'##{c}' for c in token[1:])
else:
pieces = self.tokenizer.tokenize(token)
else:
pieces = self.tokenizer.tokenize(token)
tokenized.append(pieces)
# Add [SEP] token start.
# Perform BOW.
if self.config.bow:
all_idx = np.arange(len(tokenized))
np.random.shuffle(all_idx)
tokenized = [tokenized[i] for i in all_idx]
target_map = np.argsort(all_idx)
target_idx = target_map[raw_idx]
else:
target_idx = raw_idx
merged = []
token_starts = []
for pieces in tokenized:
token_starts.append(len(merged))
merged.extend(pieces)
return self.datafield_class(
raw_sentence=raw_sent,
raw_target=raw_target,
raw_idx=raw_idx,
probe_target_idx=target_idx,
subword_tokens=merged,
input_len=len(merged),
token_starts=token_starts,
label=label,
)
def ignore_sample(self, sample):
return False
if self.config.exclude_short_sentences is False or self.is_unlabeled:
return False
sent_len = len(sample.raw_sentence.split(" "))
for pi in self.mask_positions:
if sample.raw_idx + pi < 0:
return True
if sample.raw_idx + pi >= sent_len:
return True
return False
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
)) | 0.609524 | 0.121165 |
from os.path import abspath
from os.path import dirname
from os.path import join
from glob import glob
import subprocess
from Bio import SeqIO
from click.testing import CliRunner
import click
import pandas as pd
import pytest
from click_demultiplex import cli
from click_demultiplex import commands
ROOT = abspath(dirname(__file__))
TEST_R1 = join(ROOT, 'data', 'test_R1.fastq')
TEST_R2 = join(ROOT, 'data', 'test_R2.fastq')
TEST_BARCODES = join(ROOT, 'data', 'test_barcodes.txt')
def test_cli(tmpdir):
params = [
"--r1", TEST_R1,
"--r2", TEST_R2,
"--outdir", tmpdir.strpath,
"--barcodes", TEST_BARCODES,
]
result = CliRunner().invoke(cli.main, params)
assert result.exit_code == 0
def test_defaults(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 1,
'not_trim': False,
'output_dir': tmpdir.strpath,
'overwrite': False,
'prefix': '',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
assert_output(**params)
def test_trim(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 1,
'not_trim': True,
'output_dir': tmpdir.strpath,
'overwrite': True,
'prefix': '',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
assert_output(**params)
def test_prefix(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 1,
'not_trim': True,
'output_dir': tmpdir.strpath,
'overwrite': True,
'prefix': 'my_weird_prefix',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
assert_output(**params)
def test_max_mismatches(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 0,
'not_trim': True,
'output_dir': tmpdir.strpath,
'overwrite': True,
'prefix': 'my_weird_prefix',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
params['max_mismatches'] = 1
commands.demultiplex(*params.values())
assert_output(**params)
params['max_mismatches'] = 2
commands.demultiplex(*params.values())
assert_output(**params)
params['max_mismatches'] = 3
commands.demultiplex(*params.values())
assert_output(**params)
def test_overwrite(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 1,
'not_trim': False,
'output_dir': tmpdir.strpath,
'overwrite': True,
'prefix': '',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
commands.demultiplex(*params.values())
params['overwrite'] = False
with pytest.raises(click.UsageError) as excinfo:
commands.demultiplex(*params.values())
assert 'pass --overwrite as an option' in excinfo.value.message
def assert_output(
r1_path,
r2_path,
barcodes_path,
output_dir,
overwrite,
prefix,
not_trim,
max_mismatches):
# Parse r1 and r2
multiplexed_r1 = SeqIO.parse(r1_path, 'fastq')
original_sequence_length = len(next(multiplexed_r1))
# Parse barcodes
barcodes = commands.get_barcodes(barcodes_path)
# Output files
stats_file = join(output_dir, f'{prefix}result_stats.txt')
output_files = glob(join(output_dir, '*.fastq'))
# Parse Result stats file
stats = pd.read_csv(
stats_file,
sep='\t',
skiprows=1,
skipfooter=1,
engine='python',
index_col=False
)
stats_dict = stats.set_index('Name').to_dict('index')
# Assert number of output files
assert len(barcodes.keys()) == len(stats)
assert len(barcodes.keys()) * 2 == len(output_files)
for name, barcode in barcodes.items():
r1_filename = f'{prefix}{name}_R1.fastq'
r2_filename = f'{prefix}{name}_R2.fastq'
r1_path = join(output_dir, r1_filename)
r2_path = join(output_dir, r2_filename)
with open(r1_path, 'rt') as fr1, open(r2_path, 'rt') as fr2:
records_r1 = list(SeqIO.parse(fr1, 'fastq'))
records_r2 = list(SeqIO.parse(fr2, 'fastq'))
assert len(records_r1) == len(records_r2)
assert stats_dict[name]['Barcode'] == barcode
assert stats_dict[name]['Count'] == len(records_r1)
assert r1_filename in stats_dict[name]['Output R1 file']
assert r2_filename in stats_dict[name]['Output R2 file']
assert_quantity_of_filtered_sequences(stats_file, max_mismatches)
for index in range(len(records_r1)):
expected_sequence_length = original_sequence_length - (
0 if not_trim else len(barcode)
)
assert records_r1[index].id == records_r2[index].id
assert len(records_r1[index].seq) == expected_sequence_length
assert len(records_r2[index].seq) == expected_sequence_length
def assert_quantity_of_filtered_sequences(stats_file, max_mismatches):
result_line = subprocess.check_output(['tail', '-1', stats_file])
results_filtered_count = int(result_line.split()[3])
expected_filtered_count = [35, 36, 60, 158, 250, 250, 250]
assert expected_filtered_count[max_mismatches] == results_filtered_count | tests/test_commands.py |
from os.path import abspath
from os.path import dirname
from os.path import join
from glob import glob
import subprocess
from Bio import SeqIO
from click.testing import CliRunner
import click
import pandas as pd
import pytest
from click_demultiplex import cli
from click_demultiplex import commands
ROOT = abspath(dirname(__file__))
TEST_R1 = join(ROOT, 'data', 'test_R1.fastq')
TEST_R2 = join(ROOT, 'data', 'test_R2.fastq')
TEST_BARCODES = join(ROOT, 'data', 'test_barcodes.txt')
def test_cli(tmpdir):
params = [
"--r1", TEST_R1,
"--r2", TEST_R2,
"--outdir", tmpdir.strpath,
"--barcodes", TEST_BARCODES,
]
result = CliRunner().invoke(cli.main, params)
assert result.exit_code == 0
def test_defaults(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 1,
'not_trim': False,
'output_dir': tmpdir.strpath,
'overwrite': False,
'prefix': '',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
assert_output(**params)
def test_trim(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 1,
'not_trim': True,
'output_dir': tmpdir.strpath,
'overwrite': True,
'prefix': '',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
assert_output(**params)
def test_prefix(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 1,
'not_trim': True,
'output_dir': tmpdir.strpath,
'overwrite': True,
'prefix': 'my_weird_prefix',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
assert_output(**params)
def test_max_mismatches(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 0,
'not_trim': True,
'output_dir': tmpdir.strpath,
'overwrite': True,
'prefix': 'my_weird_prefix',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
params['max_mismatches'] = 1
commands.demultiplex(*params.values())
assert_output(**params)
params['max_mismatches'] = 2
commands.demultiplex(*params.values())
assert_output(**params)
params['max_mismatches'] = 3
commands.demultiplex(*params.values())
assert_output(**params)
def test_overwrite(tmpdir):
params = {
'barcodes_path': TEST_BARCODES,
'max_mismatches': 1,
'not_trim': False,
'output_dir': tmpdir.strpath,
'overwrite': True,
'prefix': '',
'r1_path': TEST_R1,
'r2_path': TEST_R2,
}
commands.demultiplex(*params.values())
commands.demultiplex(*params.values())
params['overwrite'] = False
with pytest.raises(click.UsageError) as excinfo:
commands.demultiplex(*params.values())
assert 'pass --overwrite as an option' in excinfo.value.message
def assert_output(
r1_path,
r2_path,
barcodes_path,
output_dir,
overwrite,
prefix,
not_trim,
max_mismatches):
# Parse r1 and r2
multiplexed_r1 = SeqIO.parse(r1_path, 'fastq')
original_sequence_length = len(next(multiplexed_r1))
# Parse barcodes
barcodes = commands.get_barcodes(barcodes_path)
# Output files
stats_file = join(output_dir, f'{prefix}result_stats.txt')
output_files = glob(join(output_dir, '*.fastq'))
# Parse Result stats file
stats = pd.read_csv(
stats_file,
sep='\t',
skiprows=1,
skipfooter=1,
engine='python',
index_col=False
)
stats_dict = stats.set_index('Name').to_dict('index')
# Assert number of output files
assert len(barcodes.keys()) == len(stats)
assert len(barcodes.keys()) * 2 == len(output_files)
for name, barcode in barcodes.items():
r1_filename = f'{prefix}{name}_R1.fastq'
r2_filename = f'{prefix}{name}_R2.fastq'
r1_path = join(output_dir, r1_filename)
r2_path = join(output_dir, r2_filename)
with open(r1_path, 'rt') as fr1, open(r2_path, 'rt') as fr2:
records_r1 = list(SeqIO.parse(fr1, 'fastq'))
records_r2 = list(SeqIO.parse(fr2, 'fastq'))
assert len(records_r1) == len(records_r2)
assert stats_dict[name]['Barcode'] == barcode
assert stats_dict[name]['Count'] == len(records_r1)
assert r1_filename in stats_dict[name]['Output R1 file']
assert r2_filename in stats_dict[name]['Output R2 file']
assert_quantity_of_filtered_sequences(stats_file, max_mismatches)
for index in range(len(records_r1)):
expected_sequence_length = original_sequence_length - (
0 if not_trim else len(barcode)
)
assert records_r1[index].id == records_r2[index].id
assert len(records_r1[index].seq) == expected_sequence_length
assert len(records_r2[index].seq) == expected_sequence_length
def assert_quantity_of_filtered_sequences(stats_file, max_mismatches):
result_line = subprocess.check_output(['tail', '-1', stats_file])
results_filtered_count = int(result_line.split()[3])
expected_filtered_count = [35, 36, 60, 158, 250, 250, 250]
assert expected_filtered_count[max_mismatches] == results_filtered_count | 0.413596 | 0.372619 |
import argparse
import numpy as np
from data_loader import load_data
from train import train
np.random.seed(555)
parser = argparse.ArgumentParser()
# movie
parser.add_argument('--dataset', type=str, default='movie', help='which dataset to use')
parser.add_argument('--n_epochs', type=int, default=20, help='the number of epochs')
parser.add_argument('--dim', type=int, default=8, help='dimension of user and entity embeddings')
parser.add_argument('--L', type=int, default=1, help='number of low layers')
parser.add_argument('--H', type=int, default=1, help='number of high layers')
parser.add_argument('--batch_size', type=int, default=4096, help='batch size')
parser.add_argument('--l2_weight', type=float, default=1e-6, help='weight of l2 regularization')
parser.add_argument('--lr_rs', type=float, default=0.02, help='learning rate of RS task')
parser.add_argument('--lr_kge', type=float, default=0.01, help='learning rate of KGE task')
parser.add_argument('--kge_interval', type=int, default=3, help='training interval of KGE task')
'''
# book
parser.add_argument('--dataset', type=str, default='book', help='which dataset to use')
parser.add_argument('--n_epochs', type=int, default=10, help='the number of epochs')
parser.add_argument('--dim', type=int, default=8, help='dimension of user and entity embeddings')
parser.add_argument('--L', type=int, default=1, help='number of low layers')
parser.add_argument('--H', type=int, default=1, help='number of high layers')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--l2_weight', type=float, default=1e-6, help='weight of l2 regularization')
parser.add_argument('--lr_rs', type=float, default=2e-4, help='learning rate of RS task')
parser.add_argument('--lr_kge', type=float, default=2e-5, help='learning rate of KGE task')
parser.add_argument('--kge_interval', type=int, default=2, help='training interval of KGE task')
'''
'''
# music
parser.add_argument('--dataset', type=str, default='music', help='which dataset to use')
parser.add_argument('--n_epochs', type=int, default=10, help='the number of epochs')
parser.add_argument('--dim', type=int, default=4, help='dimension of user and entity embeddings')
parser.add_argument('--L', type=int, default=2, help='number of low layers')
parser.add_argument('--H', type=int, default=1, help='number of high layers')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--l2_weight', type=float, default=1e-6, help='weight of l2 regularization')
parser.add_argument('--lr_rs', type=float, default=1e-3, help='learning rate of RS task')
parser.add_argument('--lr_kge', type=float, default=2e-4, help='learning rate of KGE task')
parser.add_argument('--kge_interval', type=int, default=2, help='training interval of KGE task')
'''
show_loss = False
show_topk = False
args = parser.parse_args()
data = load_data(args)
train(args, data, show_loss, show_topk) | src/main.py | import argparse
import numpy as np
from data_loader import load_data
from train import train
np.random.seed(555)
parser = argparse.ArgumentParser()
# movie
parser.add_argument('--dataset', type=str, default='movie', help='which dataset to use')
parser.add_argument('--n_epochs', type=int, default=20, help='the number of epochs')
parser.add_argument('--dim', type=int, default=8, help='dimension of user and entity embeddings')
parser.add_argument('--L', type=int, default=1, help='number of low layers')
parser.add_argument('--H', type=int, default=1, help='number of high layers')
parser.add_argument('--batch_size', type=int, default=4096, help='batch size')
parser.add_argument('--l2_weight', type=float, default=1e-6, help='weight of l2 regularization')
parser.add_argument('--lr_rs', type=float, default=0.02, help='learning rate of RS task')
parser.add_argument('--lr_kge', type=float, default=0.01, help='learning rate of KGE task')
parser.add_argument('--kge_interval', type=int, default=3, help='training interval of KGE task')
'''
# book
parser.add_argument('--dataset', type=str, default='book', help='which dataset to use')
parser.add_argument('--n_epochs', type=int, default=10, help='the number of epochs')
parser.add_argument('--dim', type=int, default=8, help='dimension of user and entity embeddings')
parser.add_argument('--L', type=int, default=1, help='number of low layers')
parser.add_argument('--H', type=int, default=1, help='number of high layers')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--l2_weight', type=float, default=1e-6, help='weight of l2 regularization')
parser.add_argument('--lr_rs', type=float, default=2e-4, help='learning rate of RS task')
parser.add_argument('--lr_kge', type=float, default=2e-5, help='learning rate of KGE task')
parser.add_argument('--kge_interval', type=int, default=2, help='training interval of KGE task')
'''
'''
# music
parser.add_argument('--dataset', type=str, default='music', help='which dataset to use')
parser.add_argument('--n_epochs', type=int, default=10, help='the number of epochs')
parser.add_argument('--dim', type=int, default=4, help='dimension of user and entity embeddings')
parser.add_argument('--L', type=int, default=2, help='number of low layers')
parser.add_argument('--H', type=int, default=1, help='number of high layers')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--l2_weight', type=float, default=1e-6, help='weight of l2 regularization')
parser.add_argument('--lr_rs', type=float, default=1e-3, help='learning rate of RS task')
parser.add_argument('--lr_kge', type=float, default=2e-4, help='learning rate of KGE task')
parser.add_argument('--kge_interval', type=int, default=2, help='training interval of KGE task')
'''
show_loss = False
show_topk = False
args = parser.parse_args()
data = load_data(args)
train(args, data, show_loss, show_topk) | 0.564939 | 0.099558 |
import unittest
from actionlib.simple_action_client import SimpleActionClient
import rospy
from actionlib_msgs.msg import GoalStatus
from std_msgs.msg import Int32
from std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse
from actionlib_tutorials.msg import (FibonacciAction, FibonacciGoal, FibonacciResult,
FibonacciFeedback)
from ros_bt_py_msgs.msg import FindBestExecutorAction, FindBestExecutorGoal
from ros_bt_py.nodes.action import Action
from ros_bt_py.nodes.service import Service
from ros_bt_py.nodes.topic import TopicSubscriber
from ros_bt_py.nodes.sequence import Sequence
PKG = 'ros_bt_py'
class TestRosLeafUtility(unittest.TestCase):
def setUp(self):
self.ac = SimpleActionClient('find_best_executor', FindBestExecutorAction)
# If find_best_executor isn't available within 2 seconds, fail
# the test
self.assertTrue(self.ac.wait_for_server(timeout=rospy.Duration(2.0)))
self.topic = TopicSubscriber(options={
'topic_type': Int32,
'topic_name': 'numbers_out'})
self.topic_2 = TopicSubscriber(
name='Topic2',
options={
'topic_type': Int32,
'topic_name': 'foo'})
self.action = Action(options={
'action_type': FibonacciAction,
'goal_type': FibonacciGoal,
'result_type': FibonacciResult,
'feedback_type': FibonacciFeedback,
'action_name': 'fibonacci',
'wait_for_action_server_seconds': 1.0,
'timeout_seconds': 1.0})
self.service = Service(options={
'service_type': SetBool,
'request_type': SetBoolRequest,
'response_type': SetBoolResponse,
'service_name': 'delay_1s_if_true',
'wait_for_service_seconds': 1.0,
'wait_for_response_seconds': 1.0})
def call_find_best_exec_with_node(self, node):
goal_state = self.ac.send_goal_and_wait(
node_to_goal(node),
execute_timeout=rospy.Duration(2.0))
self.assertEqual(goal_state, GoalStatus.SUCCEEDED)
return self.ac.get_result()
def testBestExecForSingleNodes(self):
for node in [self.topic, self.action, self.service]:
self.assertEqual(
rospy.resolve_name(self.call_find_best_exec_with_node(node)
.best_executor_namespace),
rospy.resolve_name('has_stuff/good_slot/'),
msg='Wrong namespace for node %s' % node.name)
# Just to be sure, test one node that should be executed in
# the other namespace
self.assertEqual(
rospy.resolve_name(self.call_find_best_exec_with_node(self.topic_2)
.best_executor_namespace),
rospy.resolve_name('no_stuff/bad_slot/'))
def testBestExecForSequence(self):
seq = Sequence()\
.add_child(self.topic)\
.add_child(self.action)\
.add_child(self.service)
self.assertEqual(
rospy.resolve_name(self.call_find_best_exec_with_node(seq)
.best_executor_namespace),
rospy.resolve_name('has_stuff/good_slot/'))
def node_to_goal(node):
goal = FindBestExecutorGoal()
goal.tree = node.get_subtree_msg()[0]
return goal
if __name__ == '__main__':
rospy.init_node('test_action_leaf')
import rostest
import sys
import os
os.environ['COVERAGE_FILE'] = '%s.%s.coverage' % (PKG, 'test_ros_leaf_utility')
rostest.rosrun(PKG, 'test_action_leaf', TestRosLeafUtility,
sysargs=sys.argv + ['--cov']) | ros_bt_py/test/rostest/test_ros_leaf_utility.py |
import unittest
from actionlib.simple_action_client import SimpleActionClient
import rospy
from actionlib_msgs.msg import GoalStatus
from std_msgs.msg import Int32
from std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse
from actionlib_tutorials.msg import (FibonacciAction, FibonacciGoal, FibonacciResult,
FibonacciFeedback)
from ros_bt_py_msgs.msg import FindBestExecutorAction, FindBestExecutorGoal
from ros_bt_py.nodes.action import Action
from ros_bt_py.nodes.service import Service
from ros_bt_py.nodes.topic import TopicSubscriber
from ros_bt_py.nodes.sequence import Sequence
PKG = 'ros_bt_py'
class TestRosLeafUtility(unittest.TestCase):
def setUp(self):
self.ac = SimpleActionClient('find_best_executor', FindBestExecutorAction)
# If find_best_executor isn't available within 2 seconds, fail
# the test
self.assertTrue(self.ac.wait_for_server(timeout=rospy.Duration(2.0)))
self.topic = TopicSubscriber(options={
'topic_type': Int32,
'topic_name': 'numbers_out'})
self.topic_2 = TopicSubscriber(
name='Topic2',
options={
'topic_type': Int32,
'topic_name': 'foo'})
self.action = Action(options={
'action_type': FibonacciAction,
'goal_type': FibonacciGoal,
'result_type': FibonacciResult,
'feedback_type': FibonacciFeedback,
'action_name': 'fibonacci',
'wait_for_action_server_seconds': 1.0,
'timeout_seconds': 1.0})
self.service = Service(options={
'service_type': SetBool,
'request_type': SetBoolRequest,
'response_type': SetBoolResponse,
'service_name': 'delay_1s_if_true',
'wait_for_service_seconds': 1.0,
'wait_for_response_seconds': 1.0})
def call_find_best_exec_with_node(self, node):
goal_state = self.ac.send_goal_and_wait(
node_to_goal(node),
execute_timeout=rospy.Duration(2.0))
self.assertEqual(goal_state, GoalStatus.SUCCEEDED)
return self.ac.get_result()
def testBestExecForSingleNodes(self):
for node in [self.topic, self.action, self.service]:
self.assertEqual(
rospy.resolve_name(self.call_find_best_exec_with_node(node)
.best_executor_namespace),
rospy.resolve_name('has_stuff/good_slot/'),
msg='Wrong namespace for node %s' % node.name)
# Just to be sure, test one node that should be executed in
# the other namespace
self.assertEqual(
rospy.resolve_name(self.call_find_best_exec_with_node(self.topic_2)
.best_executor_namespace),
rospy.resolve_name('no_stuff/bad_slot/'))
def testBestExecForSequence(self):
seq = Sequence()\
.add_child(self.topic)\
.add_child(self.action)\
.add_child(self.service)
self.assertEqual(
rospy.resolve_name(self.call_find_best_exec_with_node(seq)
.best_executor_namespace),
rospy.resolve_name('has_stuff/good_slot/'))
def node_to_goal(node):
goal = FindBestExecutorGoal()
goal.tree = node.get_subtree_msg()[0]
return goal
if __name__ == '__main__':
rospy.init_node('test_action_leaf')
import rostest
import sys
import os
os.environ['COVERAGE_FILE'] = '%s.%s.coverage' % (PKG, 'test_ros_leaf_utility')
rostest.rosrun(PKG, 'test_action_leaf', TestRosLeafUtility,
sysargs=sys.argv + ['--cov']) | 0.508788 | 0.249527 |
from dataclasses import dataclass, field
from datetime import datetime
from notion.client import NotionClient
from notion.collection import CollectionView, CollectionRowBlock, NotionDate
from utils import MetaSingleton
from typing import Dict, List, Set
import logging
logger = logging.getLogger(__name__)
def check_attr(func):
def wrapper(obj, attr):
if hasattr(obj, attr):
return func(obj, attr)
return
return wrapper
class NBotClient(NotionClient, metaclass=MetaSingleton):
def __init__(self, token=None):
super().__init__(token_v2=token)
def connect(self, link):
return self.get_collection_view(link)
class NBotElement:
notion_types: Dict
def parse(self):
for i in list(filter(lambda x: self.notion_types[x] == 'multi_select', self.notion_types.keys())):
self.parse_attr(i)
for i in list(filter(lambda x: self.notion_types[x] == 'date', self.notion_types.keys())):
self.parse_date(i)
for i in list(filter(lambda x: self.notion_types[x] == 'number', self.notion_types.keys())):
self.parse_number(i)
@check_attr
def parse_number(self, attr):
attribute = self.__getattribute__(attr)
try:
self.__setattr__(attr, float(attribute))
except ValueError:
logger.warning("Unable to parse {} setting to 0".format(attr))
self.__setattr__(attr, float(0))
@check_attr
def parse_attr(self, attr):
attribute = self.__getattribute__(attr)
self.__setattr__(attr, [i.strip() for i in attribute.split(',')])
@check_attr
def parse_date(self, attr):
attribute = self.__getattribute__(attr)
try:
self.__setattr__(attr, NotionDate(datetime.strptime(attribute, '%d %b %Y').date()))
except ValueError:
logger.error("Unable to parse date: {}".format(attr))
@dataclass
class NBotCategory:
name: str = ""
domains: set[str] = field(default_factory=lambda: set())
status: str = "To Do"
def __str__(self):
return self.name
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.name == other.name
@property
def json(self):
return dict(
name=self.name,
domains=list(self.domains),
status=self.status,
)
@json.setter
def json(self, body):
self.name = body['name']
self.domains.update(body.get('domains'), set())
self.status = body.get('status', 'To Do')
class NBotCV(object):
cv: CollectionView
props: List
_db_type = ""
_notion_link = ""
_categories: Set[NBotCategory]
def __init__(self):
self.notion_client = NBotClient()
def connect(self):
if not self.connected:
self.cv = self.notion_client.connect(self._notion_link)
self.props = [prop['id'] for prop in self.cv.collection.get_schema_properties()]
def save(self, link, status="To Do") -> str:
raise NotImplementedError()
def get_status_by_category(self, name) -> (str, None):
c = self.get_category_by_name(name, return_none=True)
if c:
return c.status
return 'To Do'
def get_category_by_domain(self, domain) -> (str, None):
for category in self._categories:
if domain in category.domains:
return category.name
return None
def get_domains(self, category: str) -> (List, None):
c = self.get_category_by_name(category, return_none=True)
if c:
return c.domains
return None
def get_category_by_name(self, name, return_none=False):
res = [c for c in self._categories if (name == c.name)]
if not res:
if return_none:
return None
return NBotCategory()
return res[0]
def save_item(self, item: NBotElement, row: CollectionRowBlock):
for id_, value in item.__dict__.items():
if id_.lower() not in self.props:
try:
self.cv.collection.update_schema_properties({
id_.lower(): dict(name=id_.lower(), type=item.notion_types.get(id_, "text"))
})
except Exception:
logger.error("Unable to update collection with id={}, value={}".format(id_, value), exc_info=True)
continue
try:
setattr(row, id_.lower(), value)
except Exception:
logger.error("Could not save {}".format(id_), exc_info=True)
@property
def categories(self) -> List[str]:
return [k.name for k in self._categories]
@categories.setter
def categories(self, items: List[Dict]):
logger.info("Current state {} update with {}".format(self._categories, items))
# TODO merge dicts...
# cat = self._categories.get(value.popitem()[0], None)
for item in items:
category = self.get_category_by_name(item['name'])
category.json = item
self._categories.add(category)
logger.info("Current state {}".format(self._categories))
@property
def db_type(self):
return self._db_type
@db_type.setter
def db_type(self, value):
self._db_type = value
@property
def notion_link(self):
return self._notion_link
@notion_link.setter
def notion_link(self, value):
self._notion_link = value
@property
def connected(self):
return hasattr(self, 'cv')
@property
def row(self) -> CollectionRowBlock:
return self.cv.collection.add_row()
@property
def json(self):
return dict(
link=self._notion_link,
db_type=self._db_type,
categories=[i.json for i in self._categories],
)
@json.setter
def json(self, body):
self._notion_link = body['link']
self._db_type = body['db_type']
self.categories = body['categories'] | nbot/clients/notion_db.py | from dataclasses import dataclass, field
from datetime import datetime
from notion.client import NotionClient
from notion.collection import CollectionView, CollectionRowBlock, NotionDate
from utils import MetaSingleton
from typing import Dict, List, Set
import logging
logger = logging.getLogger(__name__)
def check_attr(func):
def wrapper(obj, attr):
if hasattr(obj, attr):
return func(obj, attr)
return
return wrapper
class NBotClient(NotionClient, metaclass=MetaSingleton):
def __init__(self, token=None):
super().__init__(token_v2=token)
def connect(self, link):
return self.get_collection_view(link)
class NBotElement:
notion_types: Dict
def parse(self):
for i in list(filter(lambda x: self.notion_types[x] == 'multi_select', self.notion_types.keys())):
self.parse_attr(i)
for i in list(filter(lambda x: self.notion_types[x] == 'date', self.notion_types.keys())):
self.parse_date(i)
for i in list(filter(lambda x: self.notion_types[x] == 'number', self.notion_types.keys())):
self.parse_number(i)
@check_attr
def parse_number(self, attr):
attribute = self.__getattribute__(attr)
try:
self.__setattr__(attr, float(attribute))
except ValueError:
logger.warning("Unable to parse {} setting to 0".format(attr))
self.__setattr__(attr, float(0))
@check_attr
def parse_attr(self, attr):
attribute = self.__getattribute__(attr)
self.__setattr__(attr, [i.strip() for i in attribute.split(',')])
@check_attr
def parse_date(self, attr):
attribute = self.__getattribute__(attr)
try:
self.__setattr__(attr, NotionDate(datetime.strptime(attribute, '%d %b %Y').date()))
except ValueError:
logger.error("Unable to parse date: {}".format(attr))
@dataclass
class NBotCategory:
name: str = ""
domains: set[str] = field(default_factory=lambda: set())
status: str = "To Do"
def __str__(self):
return self.name
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.name == other.name
@property
def json(self):
return dict(
name=self.name,
domains=list(self.domains),
status=self.status,
)
@json.setter
def json(self, body):
self.name = body['name']
self.domains.update(body.get('domains'), set())
self.status = body.get('status', 'To Do')
class NBotCV(object):
cv: CollectionView
props: List
_db_type = ""
_notion_link = ""
_categories: Set[NBotCategory]
def __init__(self):
self.notion_client = NBotClient()
def connect(self):
if not self.connected:
self.cv = self.notion_client.connect(self._notion_link)
self.props = [prop['id'] for prop in self.cv.collection.get_schema_properties()]
def save(self, link, status="To Do") -> str:
raise NotImplementedError()
def get_status_by_category(self, name) -> (str, None):
c = self.get_category_by_name(name, return_none=True)
if c:
return c.status
return 'To Do'
def get_category_by_domain(self, domain) -> (str, None):
for category in self._categories:
if domain in category.domains:
return category.name
return None
def get_domains(self, category: str) -> (List, None):
c = self.get_category_by_name(category, return_none=True)
if c:
return c.domains
return None
def get_category_by_name(self, name, return_none=False):
res = [c for c in self._categories if (name == c.name)]
if not res:
if return_none:
return None
return NBotCategory()
return res[0]
def save_item(self, item: NBotElement, row: CollectionRowBlock):
for id_, value in item.__dict__.items():
if id_.lower() not in self.props:
try:
self.cv.collection.update_schema_properties({
id_.lower(): dict(name=id_.lower(), type=item.notion_types.get(id_, "text"))
})
except Exception:
logger.error("Unable to update collection with id={}, value={}".format(id_, value), exc_info=True)
continue
try:
setattr(row, id_.lower(), value)
except Exception:
logger.error("Could not save {}".format(id_), exc_info=True)
@property
def categories(self) -> List[str]:
return [k.name for k in self._categories]
@categories.setter
def categories(self, items: List[Dict]):
logger.info("Current state {} update with {}".format(self._categories, items))
# TODO merge dicts...
# cat = self._categories.get(value.popitem()[0], None)
for item in items:
category = self.get_category_by_name(item['name'])
category.json = item
self._categories.add(category)
logger.info("Current state {}".format(self._categories))
@property
def db_type(self):
return self._db_type
@db_type.setter
def db_type(self, value):
self._db_type = value
@property
def notion_link(self):
return self._notion_link
@notion_link.setter
def notion_link(self, value):
self._notion_link = value
@property
def connected(self):
return hasattr(self, 'cv')
@property
def row(self) -> CollectionRowBlock:
return self.cv.collection.add_row()
@property
def json(self):
return dict(
link=self._notion_link,
db_type=self._db_type,
categories=[i.json for i in self._categories],
)
@json.setter
def json(self, body):
self._notion_link = body['link']
self._db_type = body['db_type']
self.categories = body['categories'] | 0.66769 | 0.100481 |
__author__ = "<NAME>"
__maintainer__ = __author__
import cython
import numpy as np
from .general import AbstractDetector
class FKMDetector(AbstractDetector):
"""Rainflow detector as described in FKM non linear.
The algorithm has been published by Clormann & Seeger 1985 and has
been cited heavily since.
.. jupyter-execute::
from pylife.stress.timesignal import TimeSignalGenerator
import pylife.stress.rainflow as RF
ts = TimeSignalGenerator(10, {
'number': 50,
'amplitude_median': 1.0, 'amplitude_std_dev': 0.5,
'frequency_median': 4, 'frequency_std_dev': 3,
'offset_median': 0, 'offset_std_dev': 0.4}, None, None).query(10000)
rfc = RF.FKMDetector(recorder=RF.LoopValueRecorder())
rfc.process(ts)
rfc.recorder.collective
Alternatively you can ask the recorder for a histogram matrix:
.. jupyter-execute::
rfc.recorder.matrix_series(bins=16)
Note
----
This detector **does not** report the loop index.
"""
def __init__(self, recorder):
"""Instantiate a FKMDetector.
Parameters
----------
recorder : subclass of :class:`.AbstractRecorder`
The recorder that the detector will report to.
"""
super().__init__(recorder)
self._ir = 1
self._residuals = []
self._max_turn = 0.0
@cython.locals(
turns=cython.double[:],
iz=cython.int, ir=cython.int,
last0=cython.double, last1=cython.double,
loop_assumed=cython.int,
max_turn=cython.double)
def process(self, samples):
"""Process a sample chunk.
Parameters
----------
samples : array_like, shape (N, )
The samples to be processed
Returns
-------
self : FKMDetector
The ``self`` object so that processing can be chained
"""
ir = self._ir
max_turn = self._max_turn
turns_index, turns = self._new_turns(samples)
for current in turns:
loop_assumed = True
while loop_assumed:
iz = len(self._residuals)
if iz < ir:
break
loop_assumed = False
if iz > ir:
last0 = self._residuals[-1]
last1 = self._residuals[-2]
if np.abs(current-last0) >= np.abs(last0-last1):
self._recorder.record_values(last1, last0)
self._residuals.pop()
self._residuals.pop()
if np.abs(last0) < max_turn and np.abs(last1) < max_turn:
loop_assumed = True
continue
if np.abs(current) > max_turn:
ir += 1
max_turn = max(np.abs(current), max_turn)
self._residuals.append(current)
self._ir = ir
self._max_turn = max_turn
return self | src/pylife/stress/rainflow/fkm.py |
__author__ = "<NAME>"
__maintainer__ = __author__
import cython
import numpy as np
from .general import AbstractDetector
class FKMDetector(AbstractDetector):
"""Rainflow detector as described in FKM non linear.
The algorithm has been published by Clormann & Seeger 1985 and has
been cited heavily since.
.. jupyter-execute::
from pylife.stress.timesignal import TimeSignalGenerator
import pylife.stress.rainflow as RF
ts = TimeSignalGenerator(10, {
'number': 50,
'amplitude_median': 1.0, 'amplitude_std_dev': 0.5,
'frequency_median': 4, 'frequency_std_dev': 3,
'offset_median': 0, 'offset_std_dev': 0.4}, None, None).query(10000)
rfc = RF.FKMDetector(recorder=RF.LoopValueRecorder())
rfc.process(ts)
rfc.recorder.collective
Alternatively you can ask the recorder for a histogram matrix:
.. jupyter-execute::
rfc.recorder.matrix_series(bins=16)
Note
----
This detector **does not** report the loop index.
"""
def __init__(self, recorder):
"""Instantiate a FKMDetector.
Parameters
----------
recorder : subclass of :class:`.AbstractRecorder`
The recorder that the detector will report to.
"""
super().__init__(recorder)
self._ir = 1
self._residuals = []
self._max_turn = 0.0
@cython.locals(
turns=cython.double[:],
iz=cython.int, ir=cython.int,
last0=cython.double, last1=cython.double,
loop_assumed=cython.int,
max_turn=cython.double)
def process(self, samples):
"""Process a sample chunk.
Parameters
----------
samples : array_like, shape (N, )
The samples to be processed
Returns
-------
self : FKMDetector
The ``self`` object so that processing can be chained
"""
ir = self._ir
max_turn = self._max_turn
turns_index, turns = self._new_turns(samples)
for current in turns:
loop_assumed = True
while loop_assumed:
iz = len(self._residuals)
if iz < ir:
break
loop_assumed = False
if iz > ir:
last0 = self._residuals[-1]
last1 = self._residuals[-2]
if np.abs(current-last0) >= np.abs(last0-last1):
self._recorder.record_values(last1, last0)
self._residuals.pop()
self._residuals.pop()
if np.abs(last0) < max_turn and np.abs(last1) < max_turn:
loop_assumed = True
continue
if np.abs(current) > max_turn:
ir += 1
max_turn = max(np.abs(current), max_turn)
self._residuals.append(current)
self._ir = ir
self._max_turn = max_turn
return self | 0.850748 | 0.421076 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('category', models.CharField(max_length=140, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Difficulty',
fields=[
('difficulty', models.CharField(max_length=32, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=280, unique=True)),
],
),
migrations.CreateModel(
name='MultipleChoice',
fields=[
('question', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='trivia.Question')),
('correct_answer', models.CharField(max_length=140)),
('incorrect_b', models.CharField(max_length=140)),
('incorrect_c', models.CharField(max_length=140)),
('incorrect_d', models.CharField(max_length=140)),
],
),
migrations.CreateModel(
name='TrueFalse',
fields=[
('question', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='trivia.Question')),
('correct_answer', models.BooleanField()),
],
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime_start', models.DateTimeField(auto_now_add=True)),
('datetime_end', models.DateTimeField(blank=True, null=True)),
('questions_correct', models.IntegerField(blank=True, null=True)),
('total_questions', models.IntegerField(blank=True, null=True)),
('difficulty', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trivia.Difficulty')),
],
),
] | studenthub/games/trivia/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('category', models.CharField(max_length=140, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Difficulty',
fields=[
('difficulty', models.CharField(max_length=32, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=280, unique=True)),
],
),
migrations.CreateModel(
name='MultipleChoice',
fields=[
('question', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='trivia.Question')),
('correct_answer', models.CharField(max_length=140)),
('incorrect_b', models.CharField(max_length=140)),
('incorrect_c', models.CharField(max_length=140)),
('incorrect_d', models.CharField(max_length=140)),
],
),
migrations.CreateModel(
name='TrueFalse',
fields=[
('question', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='trivia.Question')),
('correct_answer', models.BooleanField()),
],
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime_start', models.DateTimeField(auto_now_add=True)),
('datetime_end', models.DateTimeField(blank=True, null=True)),
('questions_correct', models.IntegerField(blank=True, null=True)),
('total_questions', models.IntegerField(blank=True, null=True)),
('difficulty', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trivia.Difficulty')),
],
),
] | 0.61057 | 0.188511 |
import argparse
import logging
import numpy as np
from cv2 import resize
from lib.scene import Pose
from lib.homography import getFrameFlattening, getFramePxlsInMeter
import lib.conventions
from lib.iterateScenes import iterateCamerasPoses
def makePitchAndSizeMaps(camera_id, pose_id, dry_run=False):
''' Generates maps of pitch and pxls_in_meter for each point in every map. '''
DEFAULT_HEIGHT = 8.5
DOWNSCALE = 4 # For speed up and smoothness, compute on downscaled image.
pose = Pose(camera_id=camera_id, pose_id=pose_id)
if 'H_pose_to_map' not in pose:
raise Exception('No homography for camera %d, pose %d' % (camera_id, pose_id))
H = np.asarray(pose['H_pose_to_map']).reshape((3,3))
# For each point get a flattening.
Y = pose.camera['cam_dims']['height']
X = pose.camera['cam_dims']['width']
flattening_map = np.zeros((Y // DOWNSCALE, X // DOWNSCALE), dtype=float)
size_map = np.zeros((Y // DOWNSCALE, X // DOWNSCALE), dtype=float)
for y in range(Y // DOWNSCALE):
for x in range(X // DOWNSCALE):
y_sc = y * DOWNSCALE
x_sc = x * DOWNSCALE
flattening_map[y, x] = getFrameFlattening(H, y_sc, x_sc)
size_map[y, x] = getFramePxlsInMeter(H, pose.map['pxls_in_meter'], y_sc, x_sc)
logging.info('flattening_map min %.2f, max %.2f' %
(np.min(flattening_map), np.max(flattening_map)))
logging.info('size_map min %.2f, max %.2f' %
(np.min(size_map), np.max(size_map)))
# Top-down is 90 degrees, at the horizon is 0 degrees (consistent with CAD).
pitch_map = np.arcsin(flattening_map)
pitch_map = resize((pitch_map * 255.).astype(np.uint8), (X, Y)).astype(float) / 255.
size_map = resize(size_map.astype(np.uint8), (X, Y)).astype(float)
pitch_path = lib.conventions.get_pose_pitchmap_path(pose.get_pose_dir())
size_path = lib.conventions.get_pose_sizemap_path(pose.get_pose_dir())
if not dry_run:
lib.conventions.write_pitch_image(pitch_path, pitch_map)
lib.conventions.write_size_image(size_path, size_map)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Make pitch and size maps, for one camera-pose or everything.')
parser.add_argument('--camera_id', type=int, help='if not given, all cameras.')
parser.add_argument('--pose_id', type=int, help='if not given, all poses.')
parser.add_argument('--logging', type=int, default=20, choices=[10,20,30,40])
parser.add_argument('--dry_run', action='store_true')
args = parser.parse_args()
logging.basicConfig(level=args.logging, format='%(levelname)s: %(message)s')
if args.camera_id is not None and args.pose_id is not None:
makePitchAndSizeMaps(args.camera_id, args.pose_id, dry_run=args.dry_run)
elif args.camera_id is None and args.pose_id is None:
for camera_id, pose_id in iterateCamerasPoses():
makePitchAndSizeMaps(camera_id, pose_id, dry_run=args.dry_run)
else:
raise Exception('Either specify both camera_id and pose_id, or none of them.') | shuffler/lib/scenes/MakePitchAndSizeMaps.py |
import argparse
import logging
import numpy as np
from cv2 import resize
from lib.scene import Pose
from lib.homography import getFrameFlattening, getFramePxlsInMeter
import lib.conventions
from lib.iterateScenes import iterateCamerasPoses
def makePitchAndSizeMaps(camera_id, pose_id, dry_run=False):
''' Generates maps of pitch and pxls_in_meter for each point in every map. '''
DEFAULT_HEIGHT = 8.5
DOWNSCALE = 4 # For speed up and smoothness, compute on downscaled image.
pose = Pose(camera_id=camera_id, pose_id=pose_id)
if 'H_pose_to_map' not in pose:
raise Exception('No homography for camera %d, pose %d' % (camera_id, pose_id))
H = np.asarray(pose['H_pose_to_map']).reshape((3,3))
# For each point get a flattening.
Y = pose.camera['cam_dims']['height']
X = pose.camera['cam_dims']['width']
flattening_map = np.zeros((Y // DOWNSCALE, X // DOWNSCALE), dtype=float)
size_map = np.zeros((Y // DOWNSCALE, X // DOWNSCALE), dtype=float)
for y in range(Y // DOWNSCALE):
for x in range(X // DOWNSCALE):
y_sc = y * DOWNSCALE
x_sc = x * DOWNSCALE
flattening_map[y, x] = getFrameFlattening(H, y_sc, x_sc)
size_map[y, x] = getFramePxlsInMeter(H, pose.map['pxls_in_meter'], y_sc, x_sc)
logging.info('flattening_map min %.2f, max %.2f' %
(np.min(flattening_map), np.max(flattening_map)))
logging.info('size_map min %.2f, max %.2f' %
(np.min(size_map), np.max(size_map)))
# Top-down is 90 degrees, at the horizon is 0 degrees (consistent with CAD).
pitch_map = np.arcsin(flattening_map)
pitch_map = resize((pitch_map * 255.).astype(np.uint8), (X, Y)).astype(float) / 255.
size_map = resize(size_map.astype(np.uint8), (X, Y)).astype(float)
pitch_path = lib.conventions.get_pose_pitchmap_path(pose.get_pose_dir())
size_path = lib.conventions.get_pose_sizemap_path(pose.get_pose_dir())
if not dry_run:
lib.conventions.write_pitch_image(pitch_path, pitch_map)
lib.conventions.write_size_image(size_path, size_map)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Make pitch and size maps, for one camera-pose or everything.')
parser.add_argument('--camera_id', type=int, help='if not given, all cameras.')
parser.add_argument('--pose_id', type=int, help='if not given, all poses.')
parser.add_argument('--logging', type=int, default=20, choices=[10,20,30,40])
parser.add_argument('--dry_run', action='store_true')
args = parser.parse_args()
logging.basicConfig(level=args.logging, format='%(levelname)s: %(message)s')
if args.camera_id is not None and args.pose_id is not None:
makePitchAndSizeMaps(args.camera_id, args.pose_id, dry_run=args.dry_run)
elif args.camera_id is None and args.pose_id is None:
for camera_id, pose_id in iterateCamerasPoses():
makePitchAndSizeMaps(camera_id, pose_id, dry_run=args.dry_run)
else:
raise Exception('Either specify both camera_id and pose_id, or none of them.') | 0.620852 | 0.263762 |
import unittest
import numpy as np
import matplotlib.pyplot as plt
import lmfit
from pycqed.analysis.tools.plotting import SI_prefix_and_scale_factor
from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
from pycqed.analysis.tools.plotting import format_lmfit_par, plot_lmfit_res
class Test_SI_prefix_scale_factor(unittest.TestCase):
def test_non_SI(self):
unit = 'arb.unit.'
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5, unit=unit)
self.assertEqual(scale_factor, 1)
self.assertEqual(unit, post_unit)
def test_SI_scale_factors(self):
unit = 'V'
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5, unit=unit)
self.assertEqual(scale_factor, 1)
self.assertEqual(''+unit, post_unit)
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5000,
unit=unit)
self.assertEqual(scale_factor, 1/1000)
self.assertEqual('k'+unit, post_unit)
scale_factor, post_unit = SI_prefix_and_scale_factor(val=0.05,
unit=unit)
self.assertEqual(scale_factor, 1000)
self.assertEqual('m'+unit, post_unit)
class test_SI_unit_aware_labels(unittest.TestCase):
def test_label_scaling(self):
"""
This test creates a dummy plot and checks if the tick labels are
rescaled correctly
"""
f, ax = plt.subplots()
x = np.linspace(-6, 6, 101)
y = np.cos(x)
ax.plot(x*1000, y/1e5)
set_xlabel(ax, 'Distance', 'm')
set_ylabel(ax, 'Amplitude', 'V')
xlab = ax.get_xlabel()
ylab = ax.get_ylabel()
self.assertEqual(xlab, 'Distance (km)')
self.assertEqual(ylab, 'Amplitude (μV)')
def test_SI_val_to_msg_str(self):
val, unit = SI_val_to_msg_str(1030, 'm')
self.assertEqual(val, str(1.03))
self.assertEqual(unit, 'km')
class test_format_lmfit_par(unittest.TestCase):
def test_format_lmfit_par(self):
p = lmfit.Parameter('p')
p.value = 5.12
p.stderr = 0.024
test_str = format_lmfit_par('test_par', p, end_char='\n')
self.assertEqual(test_str, 'test_par: 5.1200$\\pm$0.0240\n')
def test_format_lmfit_par_missing_stderr(self):
p = lmfit.Parameter('p')
p.value = 5.12
test_str = format_lmfit_par('test_par', p, end_char='')
self.assertEqual(test_str, 'test_par: 5.1200$\\pm$NaN')
class test_plot_lmfit_res(unittest.TestCase):
def test_plot_model_result(self):
def line(a, b, x):
return a*x+b
a = .1
b = 5
x = np.linspace(0, 20, 31)
y = line(a, b, x)
line_model = lmfit.Model(line, independent_vars='x')
line_model.set_param_hint('a', value=a)
line_model.set_param_hint('b', value=b)
params = line_model.make_params()
fit_res = line_model.fit(y, x=x, params=params)
f, ax = plt.subplots()
plot_lmfit_res(fit_res, ax=ax, plot_kws={'color': 'C1'},
plot_init=True, plot_init_kws={'ls': '--'}) | pycqed/tests/analysis/test_tools_plotting.py | import unittest
import numpy as np
import matplotlib.pyplot as plt
import lmfit
from pycqed.analysis.tools.plotting import SI_prefix_and_scale_factor
from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
from pycqed.analysis.tools.plotting import format_lmfit_par, plot_lmfit_res
class Test_SI_prefix_scale_factor(unittest.TestCase):
def test_non_SI(self):
unit = 'arb.unit.'
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5, unit=unit)
self.assertEqual(scale_factor, 1)
self.assertEqual(unit, post_unit)
def test_SI_scale_factors(self):
unit = 'V'
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5, unit=unit)
self.assertEqual(scale_factor, 1)
self.assertEqual(''+unit, post_unit)
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5000,
unit=unit)
self.assertEqual(scale_factor, 1/1000)
self.assertEqual('k'+unit, post_unit)
scale_factor, post_unit = SI_prefix_and_scale_factor(val=0.05,
unit=unit)
self.assertEqual(scale_factor, 1000)
self.assertEqual('m'+unit, post_unit)
class test_SI_unit_aware_labels(unittest.TestCase):
def test_label_scaling(self):
"""
This test creates a dummy plot and checks if the tick labels are
rescaled correctly
"""
f, ax = plt.subplots()
x = np.linspace(-6, 6, 101)
y = np.cos(x)
ax.plot(x*1000, y/1e5)
set_xlabel(ax, 'Distance', 'm')
set_ylabel(ax, 'Amplitude', 'V')
xlab = ax.get_xlabel()
ylab = ax.get_ylabel()
self.assertEqual(xlab, 'Distance (km)')
self.assertEqual(ylab, 'Amplitude (μV)')
def test_SI_val_to_msg_str(self):
val, unit = SI_val_to_msg_str(1030, 'm')
self.assertEqual(val, str(1.03))
self.assertEqual(unit, 'km')
class test_format_lmfit_par(unittest.TestCase):
def test_format_lmfit_par(self):
p = lmfit.Parameter('p')
p.value = 5.12
p.stderr = 0.024
test_str = format_lmfit_par('test_par', p, end_char='\n')
self.assertEqual(test_str, 'test_par: 5.1200$\\pm$0.0240\n')
def test_format_lmfit_par_missing_stderr(self):
p = lmfit.Parameter('p')
p.value = 5.12
test_str = format_lmfit_par('test_par', p, end_char='')
self.assertEqual(test_str, 'test_par: 5.1200$\\pm$NaN')
class test_plot_lmfit_res(unittest.TestCase):
def test_plot_model_result(self):
def line(a, b, x):
return a*x+b
a = .1
b = 5
x = np.linspace(0, 20, 31)
y = line(a, b, x)
line_model = lmfit.Model(line, independent_vars='x')
line_model.set_param_hint('a', value=a)
line_model.set_param_hint('b', value=b)
params = line_model.make_params()
fit_res = line_model.fit(y, x=x, params=params)
f, ax = plt.subplots()
plot_lmfit_res(fit_res, ax=ax, plot_kws={'color': 'C1'},
plot_init=True, plot_init_kws={'ls': '--'}) | 0.752104 | 0.529385 |
import time
import praw
__all__ = ['PrawOAuth2Mini']
REDIRECT_URL = 'http://127.0.0.1:9999/authorize_callback'
SCOPES = ['identity', 'read']
EXPIRY_DURATION = 3500
class PrawOAuth2Mini:
"""
Creates a `PrawOAuth2Mini` instance. `PrawOAuth2Mini` meant to be
used in the bot and it needs valid `access_token` and `refresh_token`
to operate. Once the `access_token` is expired, it will be refreshed
using the `refresh_token`
:param reddit_client: An Instance of praw
:param app_key: App Secret (or also known as Client Id) of your
app. Find them here: https://www.reddit.com/prefs/apps/
:param app_secret: App Key (or also known as Client Secret) of your
app. Find them here: https://www.reddit.com/prefs/apps/
:param access_token: Once you have authorized your Reddit account with
the app/bot/script using `PrawOAuth2Server`, you get a valid
`access_token` (which expires after 60 minutes).
:param refresh_token: Once you have authorized your Reddit account with
the app/bot/script using `PrawOAuth2Server`, you get a valid
`refresh_token`.
:param scopes: List of scopes for OAuth. Default is `['identity', 'read']`.
https://praw.readthedocs.org/en/latest/pages/oauth.html#oauth-scopes
:param redirect_url: Redirect URL used in authorization process using
`PrawOAuth2Server`. Default is `http://127.0.0.1:9999/authorize_callback`
(which is recommended by praw).
Make sure you provide same `scopes` and `redirect_url` which you used
with `PrawOAuth2Server`.
"""
def __init__(self, reddit_client, app_key,
app_secret, access_token,
refresh_token, scopes=SCOPES,
redirect_url=REDIRECT_URL):
self.reddit_client = reddit_client
self.app_key = app_key
self.app_secret = app_secret
self.access_token = access_token
self.refresh_token = refresh_token
self.scopes = set(scopes)
self.redirect_url = redirect_url
self.validity = 0
self._set_app_info()
self._set_access_credentials_first_time()
def _set_validity(self):
self.validity = time.time() + EXPIRY_DURATION
def _is_token_expired(self):
return time.time() > self.validity
def _set_app_info(self):
self.reddit_client.set_oauth_app_info(client_id=self.app_key,
client_secret=self.app_secret,
redirect_uri=self.redirect_url)
def _set_access_credentials(self):
self.reddit_client.set_access_credentials(
scope=self.scopes, access_token=self.access_token,
refresh_token=self.refresh_token)
self._set_validity()
def _set_access_credentials_first_time(self):
try:
self._set_access_credentials()
except praw.errors.OAuthInvalidToken:
self.refresh()
def _get_refresh_access(self):
return self.reddit_client.refresh_access_information(
refresh_token=self.refresh_token)
def refresh(self, force=False):
"""Refreshes the `access_token` and sets the praw instance `reddit_client`
with a valid one.
:param force: Boolean. Refresh will be done only when last refresh was
done before `EXPIRY_DURATION`, which is 3500 seconds. However
passing `force` will overrides this and refresh operation will be
done everytime.
"""
if self._is_token_expired() or force:
tokens = self._get_refresh_access()
self.access_token = tokens['access_token']
self.refresh_token = tokens['refresh_token']
self._set_access_credentials()
def get_access_codes(self):
"""Returns the `access_token` and `refresh_token`.
:returns: A dictionary containing `access_token` and `refresh_token`.
"""
return {'access_token': self.access_token,
'refresh_token': self.refresh_token} | prawoauth2/PrawOAuth2Mini.py |
import time
import praw
__all__ = ['PrawOAuth2Mini']
REDIRECT_URL = 'http://127.0.0.1:9999/authorize_callback'
SCOPES = ['identity', 'read']
EXPIRY_DURATION = 3500
class PrawOAuth2Mini:
"""
Creates a `PrawOAuth2Mini` instance. `PrawOAuth2Mini` meant to be
used in the bot and it needs valid `access_token` and `refresh_token`
to operate. Once the `access_token` is expired, it will be refreshed
using the `refresh_token`
:param reddit_client: An Instance of praw
:param app_key: App Secret (or also known as Client Id) of your
app. Find them here: https://www.reddit.com/prefs/apps/
:param app_secret: App Key (or also known as Client Secret) of your
app. Find them here: https://www.reddit.com/prefs/apps/
:param access_token: Once you have authorized your Reddit account with
the app/bot/script using `PrawOAuth2Server`, you get a valid
`access_token` (which expires after 60 minutes).
:param refresh_token: Once you have authorized your Reddit account with
the app/bot/script using `PrawOAuth2Server`, you get a valid
`refresh_token`.
:param scopes: List of scopes for OAuth. Default is `['identity', 'read']`.
https://praw.readthedocs.org/en/latest/pages/oauth.html#oauth-scopes
:param redirect_url: Redirect URL used in authorization process using
`PrawOAuth2Server`. Default is `http://127.0.0.1:9999/authorize_callback`
(which is recommended by praw).
Make sure you provide same `scopes` and `redirect_url` which you used
with `PrawOAuth2Server`.
"""
def __init__(self, reddit_client, app_key,
app_secret, access_token,
refresh_token, scopes=SCOPES,
redirect_url=REDIRECT_URL):
self.reddit_client = reddit_client
self.app_key = app_key
self.app_secret = app_secret
self.access_token = access_token
self.refresh_token = refresh_token
self.scopes = set(scopes)
self.redirect_url = redirect_url
self.validity = 0
self._set_app_info()
self._set_access_credentials_first_time()
def _set_validity(self):
self.validity = time.time() + EXPIRY_DURATION
def _is_token_expired(self):
return time.time() > self.validity
def _set_app_info(self):
self.reddit_client.set_oauth_app_info(client_id=self.app_key,
client_secret=self.app_secret,
redirect_uri=self.redirect_url)
def _set_access_credentials(self):
self.reddit_client.set_access_credentials(
scope=self.scopes, access_token=self.access_token,
refresh_token=self.refresh_token)
self._set_validity()
def _set_access_credentials_first_time(self):
try:
self._set_access_credentials()
except praw.errors.OAuthInvalidToken:
self.refresh()
def _get_refresh_access(self):
return self.reddit_client.refresh_access_information(
refresh_token=self.refresh_token)
def refresh(self, force=False):
"""Refreshes the `access_token` and sets the praw instance `reddit_client`
with a valid one.
:param force: Boolean. Refresh will be done only when last refresh was
done before `EXPIRY_DURATION`, which is 3500 seconds. However
passing `force` will overrides this and refresh operation will be
done everytime.
"""
if self._is_token_expired() or force:
tokens = self._get_refresh_access()
self.access_token = tokens['access_token']
self.refresh_token = tokens['refresh_token']
self._set_access_credentials()
def get_access_codes(self):
"""Returns the `access_token` and `refresh_token`.
:returns: A dictionary containing `access_token` and `refresh_token`.
"""
return {'access_token': self.access_token,
'refresh_token': self.refresh_token} | 0.63375 | 0.257187 |
import random
class FewshotSampleBase:
'''
Abstract Class
DO NOT USE
Build your own Sample class and inherit from this class
'''
def __init__(self):
self.class_count = {}
def get_class_count(self):
'''
return a dictionary of {class_name:count} in format {any : int}
'''
return self.class_count
class FewshotSampler:
'''
sample one support set and one query set
'''
def __init__(self, N, K, Q, samples, classes=None, random_state=0):
'''
N: int, how many types in each set
K: int, how many instances for each type in support set
Q: int, how many instances for each type in query set
samples: List[Sample], Sample class must have `get_class_count` attribute
classes[Optional]: List[any], all unique classes in samples. If not given, the classes will be got from samples.get_class_count()
random_state[Optional]: int, the random seed
'''
self.K = K
self.N = N
self.Q = Q
self.samples = samples
self.__check__() # check if samples have correct types
if classes:
self.classes = classes
else:
self.classes = self.__get_all_classes__()
random.seed(random_state)
def __get_all_classes__(self):
classes = []
for sample in self.samples:
classes += list(sample.get_class_count().keys())
return list(set(classes))
def __check__(self):
for idx, sample in enumerate(self.samples):
if not hasattr(sample,'get_class_count'):
print('[ERROR] samples in self.samples expected to have `get_class_count` attribute, but self.samples[{idx}] does not')
raise ValueError
def __additem__(self, index, set_class):
class_count = self.samples[index].get_class_count()
for class_name in class_count:
if class_name in set_class:
set_class[class_name] += class_count[class_name]
else:
set_class[class_name] = class_count[class_name]
def __valid_sample__(self, sample, set_class, target_classes):
threshold = 2 * set_class['k']
class_count = sample.get_class_count()
if not class_count:
return False
isvalid = False
for class_name in class_count:
if class_name not in target_classes:
isvalid = False
elif class_name not in set_class:
isvalid = True
elif set_class[class_name] + class_count[class_name] > threshold:
isvalid = False
elif set_class[class_name] < set_class['k']:
isvalid = True
return isvalid
def __finish__(self, set_class):
if len(set_class) < self.N+1:
return False
for k in set_class:
if set_class[k] < set_class['k']:
return False
return True
def __get_candidates__(self, target_classes):
return [idx for idx, sample in enumerate(self.samples) if sample.valid(target_classes)]
def __next__(self):
'''
randomly sample one support set and one query set
return:
target_classes: List[any]
support_idx: List[int], sample index in support set in samples list
support_idx: List[int], sample index in query set in samples list
'''
support_class = {'k':self.K}
support_idx = []
query_class = {'k':self.Q}
query_idx = []
target_classes = random.sample(self.classes, self.N)
candidates = self.__get_candidates__(target_classes)
while not candidates:
target_classes = random.sample(self.classes, self.N)
candidates = self.__get_candidates__(target_classes)
# greedy search for support set
while not self.__finish__(support_class):
index = random.choice(candidates)
if index not in support_idx:
if self.__valid_sample__(self.samples[index], support_class, target_classes):
self.__additem__(index, support_class)
support_idx.append(index)
# same for query set
while not self.__finish__(query_class):
index = random.choice(candidates)
if index not in query_idx and index not in support_idx:
if self.__valid_sample__(self.samples[index], query_class, target_classes):
self.__additem__(index, query_class)
query_idx.append(index)
return target_classes, support_idx, query_idx
def __iter__(self):
return self | src/fewnerd/fewnerd/util/fewshotsampler.py | import random
class FewshotSampleBase:
'''
Abstract Class
DO NOT USE
Build your own Sample class and inherit from this class
'''
def __init__(self):
self.class_count = {}
def get_class_count(self):
'''
return a dictionary of {class_name:count} in format {any : int}
'''
return self.class_count
class FewshotSampler:
'''
sample one support set and one query set
'''
def __init__(self, N, K, Q, samples, classes=None, random_state=0):
'''
N: int, how many types in each set
K: int, how many instances for each type in support set
Q: int, how many instances for each type in query set
samples: List[Sample], Sample class must have `get_class_count` attribute
classes[Optional]: List[any], all unique classes in samples. If not given, the classes will be got from samples.get_class_count()
random_state[Optional]: int, the random seed
'''
self.K = K
self.N = N
self.Q = Q
self.samples = samples
self.__check__() # check if samples have correct types
if classes:
self.classes = classes
else:
self.classes = self.__get_all_classes__()
random.seed(random_state)
def __get_all_classes__(self):
classes = []
for sample in self.samples:
classes += list(sample.get_class_count().keys())
return list(set(classes))
def __check__(self):
for idx, sample in enumerate(self.samples):
if not hasattr(sample,'get_class_count'):
print('[ERROR] samples in self.samples expected to have `get_class_count` attribute, but self.samples[{idx}] does not')
raise ValueError
def __additem__(self, index, set_class):
class_count = self.samples[index].get_class_count()
for class_name in class_count:
if class_name in set_class:
set_class[class_name] += class_count[class_name]
else:
set_class[class_name] = class_count[class_name]
def __valid_sample__(self, sample, set_class, target_classes):
threshold = 2 * set_class['k']
class_count = sample.get_class_count()
if not class_count:
return False
isvalid = False
for class_name in class_count:
if class_name not in target_classes:
isvalid = False
elif class_name not in set_class:
isvalid = True
elif set_class[class_name] + class_count[class_name] > threshold:
isvalid = False
elif set_class[class_name] < set_class['k']:
isvalid = True
return isvalid
def __finish__(self, set_class):
if len(set_class) < self.N+1:
return False
for k in set_class:
if set_class[k] < set_class['k']:
return False
return True
def __get_candidates__(self, target_classes):
return [idx for idx, sample in enumerate(self.samples) if sample.valid(target_classes)]
def __next__(self):
'''
randomly sample one support set and one query set
return:
target_classes: List[any]
support_idx: List[int], sample index in support set in samples list
support_idx: List[int], sample index in query set in samples list
'''
support_class = {'k':self.K}
support_idx = []
query_class = {'k':self.Q}
query_idx = []
target_classes = random.sample(self.classes, self.N)
candidates = self.__get_candidates__(target_classes)
while not candidates:
target_classes = random.sample(self.classes, self.N)
candidates = self.__get_candidates__(target_classes)
# greedy search for support set
while not self.__finish__(support_class):
index = random.choice(candidates)
if index not in support_idx:
if self.__valid_sample__(self.samples[index], support_class, target_classes):
self.__additem__(index, support_class)
support_idx.append(index)
# same for query set
while not self.__finish__(query_class):
index = random.choice(candidates)
if index not in query_idx and index not in support_idx:
if self.__valid_sample__(self.samples[index], query_class, target_classes):
self.__additem__(index, query_class)
query_idx.append(index)
return target_classes, support_idx, query_idx
def __iter__(self):
return self | 0.68742 | 0.320901 |
import matplotlib.pyplot as plt
import numpy as np
from misc.ansi_color_codes import ACC
def gen_plot(timeline, filename, title):
if not isinstance(timeline, list):
timeline = [timeline]
plt.figure(10000)
plt.clf()
for i in range(len(timeline)): plt.plot(timeline[i])
plt.title(title)
plt.savefig(filename)
def print_dbl_line(num=150):
print("=" * num)
def print_trace(trace_num, mode_seq, po, observations, sc_mode_seq, sc_po, sc_observations, max_steps=20):
_, num_steps, _ = observations.shape
print_line()
for s in range(np.minimum(max_steps, num_steps)):
print("\t| ", mode_seq[trace_num, s], " ", ACC.OkBlue, "[", sc_mode_seq[trace_num, s], "] \t", ACC.End, end='')
print("-> [", *print_array(po[trace_num, s]), "] ", ACC.OkBlue, "[", *print_array(sc_po[trace_num, s]), "]", ACC.End, end='')
print("-> [", *print_array(observations[trace_num, s]), "] ")
[print("\t" * 2 + " ." + "\t" * 9 + "." + "\t" * 10 + " ." + "\t" * 9 + " .") for _ in range(3)]
def print_array(a, format_string ='{0:+.8f}'):
return [format_string.format(v,i) for i,v in enumerate(a)]
def print_line(text=None, num=150):
print("-" * num)
if text is not None:
print("--- ", end='')
print(text, end='')
print(" ", "-" * (num - 6 - len(text)))
def print_mat_statistics(id, a):
# extract, eigenvalues and eigenvectors
np.set_printoptions(formatter={'float': '{: 0.5f}'.format})
evals, evecs = np.linalg.eig(a)
# sort the values
sorting = evals.argsort()[::-1]
evals = evals[sorting]
evecs = evecs[:, sorting]
# calc condition number
cond_num = np.abs(evals[0]) / np.abs(evals[-1])
# do the printing
print_line()
print("Matrix [{}]".format(id))
print_line("Properties")
print("\t| COND_NUM = {:0.6f}".format(cond_num))
print("\t| EIG = \t", end='')
for i in range(len(evals)):
print(evals[i])
if i < len(evals) - 1:
print("\t\t\t\t", end='')
# print matrix
print_line("Matrix")
[print(a[i]) for i in range(len(a))]
# print eigen vector basis
print_line("U")
[print("u{} -> {} -> {}".format(i, evals[i], evecs[:,i])) for i in range(len(a))] | misc/output.py |
import matplotlib.pyplot as plt
import numpy as np
from misc.ansi_color_codes import ACC
def gen_plot(timeline, filename, title):
if not isinstance(timeline, list):
timeline = [timeline]
plt.figure(10000)
plt.clf()
for i in range(len(timeline)): plt.plot(timeline[i])
plt.title(title)
plt.savefig(filename)
def print_dbl_line(num=150):
print("=" * num)
def print_trace(trace_num, mode_seq, po, observations, sc_mode_seq, sc_po, sc_observations, max_steps=20):
_, num_steps, _ = observations.shape
print_line()
for s in range(np.minimum(max_steps, num_steps)):
print("\t| ", mode_seq[trace_num, s], " ", ACC.OkBlue, "[", sc_mode_seq[trace_num, s], "] \t", ACC.End, end='')
print("-> [", *print_array(po[trace_num, s]), "] ", ACC.OkBlue, "[", *print_array(sc_po[trace_num, s]), "]", ACC.End, end='')
print("-> [", *print_array(observations[trace_num, s]), "] ")
[print("\t" * 2 + " ." + "\t" * 9 + "." + "\t" * 10 + " ." + "\t" * 9 + " .") for _ in range(3)]
def print_array(a, format_string ='{0:+.8f}'):
return [format_string.format(v,i) for i,v in enumerate(a)]
def print_line(text=None, num=150):
print("-" * num)
if text is not None:
print("--- ", end='')
print(text, end='')
print(" ", "-" * (num - 6 - len(text)))
def print_mat_statistics(id, a):
# extract, eigenvalues and eigenvectors
np.set_printoptions(formatter={'float': '{: 0.5f}'.format})
evals, evecs = np.linalg.eig(a)
# sort the values
sorting = evals.argsort()[::-1]
evals = evals[sorting]
evecs = evecs[:, sorting]
# calc condition number
cond_num = np.abs(evals[0]) / np.abs(evals[-1])
# do the printing
print_line()
print("Matrix [{}]".format(id))
print_line("Properties")
print("\t| COND_NUM = {:0.6f}".format(cond_num))
print("\t| EIG = \t", end='')
for i in range(len(evals)):
print(evals[i])
if i < len(evals) - 1:
print("\t\t\t\t", end='')
# print matrix
print_line("Matrix")
[print(a[i]) for i in range(len(a))]
# print eigen vector basis
print_line("U")
[print("u{} -> {} -> {}".format(i, evals[i], evecs[:,i])) for i in range(len(a))] | 0.423458 | 0.447702 |
import os
import shutil
import subprocess
print " "
print "==============================="
print "| Frostfall Release Builder |"
print "| \/ |"
print "| _\_\/\/_/_ |"
print "| _\_\/_/_ |"
print "| __/_/\_\__ |"
print "| / /\/\ \ |"
print "| /\ |"
print "==============================="
print " "
user_input = raw_input("Enter the release version: ")
os.chdir("..\\")
# Build the temp directory
print "Creating temp directories..."
tempdir = ".\\tmp\\Data\\"
if os.path.isdir(tempdir):
print "Removing old temp directory..."
shutil.rmtree(".\\tmp")
os.makedirs('./tmp/Data/readmes')
os.makedirs('./tmp/Data/Interface/frostfall')
os.makedirs('./tmp/Data/Interface/exported/widgets/frostfall')
os.makedirs('./tmp/Data/Interface/Translations')
os.makedirs('./tmp/Data/meshes/frostfall')
os.makedirs('./tmp/Data/Scripts/Source')
os.makedirs('./tmp/Data/sound/fx/frostfall')
os.makedirs('./tmp/Data/textures/frostfall')
# Copy the project files
print "Copying project files..."
with open("./Campfire/FrostfallArchiveManifest.txt") as manifest:
lines = manifest.readlines()
for line in lines:
shutil.copy(".\\Campfire\\" + line.rstrip('\n'), tempdir + line.rstrip('\n'))
# Build the directories
dirname = "./Frostfall " + user_input + " Release"
if not os.path.isdir(dirname):
print "Creating new build..."
os.mkdir(dirname)
else:
print "Removing old build of same version..."
shutil.rmtree(dirname)
os.mkdir(dirname)
os.makedirs(dirname + "/Frostfall/readmes")
os.makedirs(dirname + "/Frostfall/SKSE/Plugins/FrostfallData")
os.makedirs(dirname + "/SkyUI51AddOn/readmes")
os.makedirs(dirname + "/SkyUI51AddOn/SKSE/Plugins/FrostfallData")
os.makedirs(dirname + "/SkyUI51AddOn/Interface/Translations")
os.makedirs(dirname + "/SkyUI51AddOn/Interface/skyui")
os.makedirs(dirname + "/fomod")
# Generate BSA archive
print "Generating BSA archive..."
shutil.copy('./Campfire/Archive.exe', './tmp/Archive.exe')
shutil.copy('./Campfire/FrostfallArchiveBuilder.txt', './tmp/FrostfallArchiveBuilder.txt')
shutil.copy('./Campfire/FrostfallArchiveManifest.txt', './tmp/FrostfallArchiveManifest.txt')
os.chdir("./tmp")
subprocess.call(['./Archive.exe', './FrostfallArchiveBuilder.txt'])
os.chdir("..\\")
# Copy files - Mod
shutil.copyfile("./Campfire/Frostfall.esp", dirname + "/Frostfall/Frostfall.esp")
shutil.copyfile("./tmp/Frostfall.bsa", dirname + "/Frostfall/Frostfall.bsa")
shutil.copyfile("./Campfire/SKSE/Plugins/FrostfallData/READ_THIS_PLEASE_AND_DO_NOT_DELETE.txt", dirname + "/Frostfall/SKSE/Plugins/FrostfallData/READ_THIS_PLEASE_AND_DO_NOT_DELETE.txt")
shutil.copyfile("./Campfire/readmes/Frostfall_readme.txt", dirname + "/Frostfall/readmes/Frostfall_readme.txt")
shutil.copyfile("./Campfire/readmes/Frostfall_license.txt", dirname + "/Frostfall/readmes/Frostfall_license.txt")
shutil.copyfile("./Campfire/readmes/Frostfall_changelog.txt", dirname + "/Frostfall/readmes/Frostfall_changelog.txt")
# Copy files - add-on
shutil.copyfile("./Campfire/readmes/Frostfall_SkyUI_AddOn_readme.txt", dirname + "/SkyUI51AddOn/readmes/Frostfall_SkyUI_AddOn_readme.txt")
shutil.copyfile("./Campfire/SKSE/Plugins/FrostfallData/interface_package_version.json", dirname + "/SkyUI51AddOn/SKSE/Plugins/FrostfallData/interface_package_version.json")
shutil.copyfile("./Campfire/Interface/bartermenu.swf", dirname + "/SkyUI51AddOn/Interface/bartermenu.swf")
shutil.copyfile("./Campfire/Interface/containermenu.swf", dirname + "/SkyUI51AddOn/Interface/containermenu.swf")
shutil.copyfile("./Campfire/Interface/craftingmenu.swf", dirname + "/SkyUI51AddOn/Interface/craftingmenu.swf")
shutil.copyfile("./Campfire/Interface/inventorymenu.swf", dirname + "/SkyUI51AddOn/Interface/inventorymenu.swf")
shutil.copyfile("./Campfire/Interface/skyui/bottombar.swf", dirname + "/SkyUI51AddOn/Interface/skyui/bottombar.swf")
shutil.copyfile("./Campfire/Interface/skyui/itemcard.swf", dirname + "/SkyUI51AddOn/Interface/skyui/itemcard.swf")
shutil.copyfile("./Campfire/Interface/Translations/skyui_czech.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_czech.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_english.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_english.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_french.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_french.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_german.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_german.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_italian.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_italian.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_japanese.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_japanese.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_polish.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_polish.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_russian.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_russian.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_spanish.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_spanish.txt")
# Copy files - Installer
shutil.copyfile("./Campfire/Installers/Frostfall/InstallSplash1.jpg", dirname + "/InstallSplash1.jpg")
shutil.copyfile("./Campfire/Installers/Frostfall/InstallSplash2.jpg", dirname + "/InstallSplash2.jpg")
shutil.copyfile("./Campfire/Installers/Frostfall/fomod/info.xml", dirname + "/fomod/info.xml")
shutil.copyfile("./Campfire/Installers/Frostfall/fomod/ModuleConfig.xml", dirname + "/fomod/ModuleConfig.xml")
# Create release zip
zip_name_ver = user_input.replace(".", "_")
shutil.make_archive("./Frostfall_" + zip_name_ver + "_Release", format="zip", root_dir=dirname)
shutil.move("./Frostfall_" + zip_name_ver + "_Release.zip", dirname + "/Frostfall_" + zip_name_ver + "_Release.zip")
print "Created " + dirname + "/Frostfall_" + zip_name_ver + "_Release.zip"
# Clean Up
print "Removing temp files..."
shutil.rmtree("./tmp")
print "Done!" | Frostfall_BuildRelease.py | import os
import shutil
import subprocess
print " "
print "==============================="
print "| Frostfall Release Builder |"
print "| \/ |"
print "| _\_\/\/_/_ |"
print "| _\_\/_/_ |"
print "| __/_/\_\__ |"
print "| / /\/\ \ |"
print "| /\ |"
print "==============================="
print " "
user_input = raw_input("Enter the release version: ")
os.chdir("..\\")
# Build the temp directory
print "Creating temp directories..."
tempdir = ".\\tmp\\Data\\"
if os.path.isdir(tempdir):
print "Removing old temp directory..."
shutil.rmtree(".\\tmp")
os.makedirs('./tmp/Data/readmes')
os.makedirs('./tmp/Data/Interface/frostfall')
os.makedirs('./tmp/Data/Interface/exported/widgets/frostfall')
os.makedirs('./tmp/Data/Interface/Translations')
os.makedirs('./tmp/Data/meshes/frostfall')
os.makedirs('./tmp/Data/Scripts/Source')
os.makedirs('./tmp/Data/sound/fx/frostfall')
os.makedirs('./tmp/Data/textures/frostfall')
# Copy the project files
print "Copying project files..."
with open("./Campfire/FrostfallArchiveManifest.txt") as manifest:
lines = manifest.readlines()
for line in lines:
shutil.copy(".\\Campfire\\" + line.rstrip('\n'), tempdir + line.rstrip('\n'))
# Build the directories
dirname = "./Frostfall " + user_input + " Release"
if not os.path.isdir(dirname):
print "Creating new build..."
os.mkdir(dirname)
else:
print "Removing old build of same version..."
shutil.rmtree(dirname)
os.mkdir(dirname)
os.makedirs(dirname + "/Frostfall/readmes")
os.makedirs(dirname + "/Frostfall/SKSE/Plugins/FrostfallData")
os.makedirs(dirname + "/SkyUI51AddOn/readmes")
os.makedirs(dirname + "/SkyUI51AddOn/SKSE/Plugins/FrostfallData")
os.makedirs(dirname + "/SkyUI51AddOn/Interface/Translations")
os.makedirs(dirname + "/SkyUI51AddOn/Interface/skyui")
os.makedirs(dirname + "/fomod")
# Generate BSA archive
print "Generating BSA archive..."
shutil.copy('./Campfire/Archive.exe', './tmp/Archive.exe')
shutil.copy('./Campfire/FrostfallArchiveBuilder.txt', './tmp/FrostfallArchiveBuilder.txt')
shutil.copy('./Campfire/FrostfallArchiveManifest.txt', './tmp/FrostfallArchiveManifest.txt')
os.chdir("./tmp")
subprocess.call(['./Archive.exe', './FrostfallArchiveBuilder.txt'])
os.chdir("..\\")
# Copy files - Mod
shutil.copyfile("./Campfire/Frostfall.esp", dirname + "/Frostfall/Frostfall.esp")
shutil.copyfile("./tmp/Frostfall.bsa", dirname + "/Frostfall/Frostfall.bsa")
shutil.copyfile("./Campfire/SKSE/Plugins/FrostfallData/READ_THIS_PLEASE_AND_DO_NOT_DELETE.txt", dirname + "/Frostfall/SKSE/Plugins/FrostfallData/READ_THIS_PLEASE_AND_DO_NOT_DELETE.txt")
shutil.copyfile("./Campfire/readmes/Frostfall_readme.txt", dirname + "/Frostfall/readmes/Frostfall_readme.txt")
shutil.copyfile("./Campfire/readmes/Frostfall_license.txt", dirname + "/Frostfall/readmes/Frostfall_license.txt")
shutil.copyfile("./Campfire/readmes/Frostfall_changelog.txt", dirname + "/Frostfall/readmes/Frostfall_changelog.txt")
# Copy files - add-on
shutil.copyfile("./Campfire/readmes/Frostfall_SkyUI_AddOn_readme.txt", dirname + "/SkyUI51AddOn/readmes/Frostfall_SkyUI_AddOn_readme.txt")
shutil.copyfile("./Campfire/SKSE/Plugins/FrostfallData/interface_package_version.json", dirname + "/SkyUI51AddOn/SKSE/Plugins/FrostfallData/interface_package_version.json")
shutil.copyfile("./Campfire/Interface/bartermenu.swf", dirname + "/SkyUI51AddOn/Interface/bartermenu.swf")
shutil.copyfile("./Campfire/Interface/containermenu.swf", dirname + "/SkyUI51AddOn/Interface/containermenu.swf")
shutil.copyfile("./Campfire/Interface/craftingmenu.swf", dirname + "/SkyUI51AddOn/Interface/craftingmenu.swf")
shutil.copyfile("./Campfire/Interface/inventorymenu.swf", dirname + "/SkyUI51AddOn/Interface/inventorymenu.swf")
shutil.copyfile("./Campfire/Interface/skyui/bottombar.swf", dirname + "/SkyUI51AddOn/Interface/skyui/bottombar.swf")
shutil.copyfile("./Campfire/Interface/skyui/itemcard.swf", dirname + "/SkyUI51AddOn/Interface/skyui/itemcard.swf")
shutil.copyfile("./Campfire/Interface/Translations/skyui_czech.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_czech.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_english.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_english.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_french.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_french.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_german.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_german.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_italian.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_italian.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_japanese.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_japanese.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_polish.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_polish.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_russian.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_russian.txt")
shutil.copyfile("./Campfire/Interface/Translations/skyui_spanish.txt", dirname + "/SkyUI51AddOn/Interface/Translations/skyui_spanish.txt")
# Copy files - Installer
shutil.copyfile("./Campfire/Installers/Frostfall/InstallSplash1.jpg", dirname + "/InstallSplash1.jpg")
shutil.copyfile("./Campfire/Installers/Frostfall/InstallSplash2.jpg", dirname + "/InstallSplash2.jpg")
shutil.copyfile("./Campfire/Installers/Frostfall/fomod/info.xml", dirname + "/fomod/info.xml")
shutil.copyfile("./Campfire/Installers/Frostfall/fomod/ModuleConfig.xml", dirname + "/fomod/ModuleConfig.xml")
# Create release zip
zip_name_ver = user_input.replace(".", "_")
shutil.make_archive("./Frostfall_" + zip_name_ver + "_Release", format="zip", root_dir=dirname)
shutil.move("./Frostfall_" + zip_name_ver + "_Release.zip", dirname + "/Frostfall_" + zip_name_ver + "_Release.zip")
print "Created " + dirname + "/Frostfall_" + zip_name_ver + "_Release.zip"
# Clean Up
print "Removing temp files..."
shutil.rmtree("./tmp")
print "Done!" | 0.096025 | 0.033812 |
import logging
from makobot.utils import reaction_to_int
logger = logging.getLogger(__name__)
class Plugin(object):
@property
def enabled(self):
"""
REturns true if the plugin has been enabled or false if not.
Typically this will check if the necessary environment variables are
set.
:returns: True if enabled, False if disabled
:rtype: boolean
"""
return False
def activate(self):
"""
Handles the activation of the plugin, typically this would be
instantiating a client or something similar.
"""
raise NotImplementedError('Plugin activate method not implemented')
def extract(self, message):
"""
Extracts the relevant values from a message for use when generating a
report. Values are expected to be stored as an attribute of the Plugin
class.
"""
raise NotImplementedError('Plugin extract method not implemented')
def report(self, message, active=True):
"""
Reports any potential vulnerabilities via Slackbot. If active then the
expected response is a reply, if not active (passive) then send only
messages that meet a certain threshold to reduce noise.
"""
self.retrieve()
logger.debug('Found %s reports' %
len([r for r in self.reports.values() if r]))
for subject, report in self.reports.items():
if report:
logger.debug('Have a report for %s: %s' % (subject, report))
if active:
logger.debug('Bot is active, reporting...')
message.reply(self.format(subject, report))
elif self.threshold_met(report):
logger.debug(
'Bot passive, but threshold met, reporting...')
message.send(self.format(subject, report))
else:
logger.debug(
'Bot passive and threshold not met, skipping...')
else:
logger.debug('No report for %s, skipping...' % subject)
# TODO: Move this to a wrapper class
reaction = self.react()
self.promote_reaction(message, reaction)
def retrieve(self):
"""
Retrieves reports from the configured reporting service and populates
the reports dict accordingly. This method should work in concert with
the extract method.
"""
raise NotImplementedError('Plugin retrieve method not implemented')
def format(self, subject, report):
"""
Formats a report in some easily and quickly consumed format. This is
typically called via the plugin's report method.
"""
raise NotImplementedError('Plugin format method not implemented')
def threshold_met(self, report):
"""
Determine if a threshold has been met for a report before sending a
message to an entire channel. This method should return a boolean,
where True is to send the message.
:returns: True if threshold met, False if not
:rtype: boolean
"""
return False
def react(self):
"""
Reacts to a report with an an emoticon of some kind. Typically a
weather-based icon representing the severity of the risk is the most
clearly understood.
"""
pass
# FIXME: A more elegant solution for ensuring only one reaction.
def promote_reaction(self, message, reaction):
"""
Determines if the latest reaction is more severe than the current one.
Only the most severe reaction should be used.
"""
if reaction:
current = reaction_to_int(getattr(message, 'mako_reaction', 'fog'))
latest = reaction_to_int(reaction)
if latest > current:
message.mako_reaction = reaction | makobot/plugins/base.py |
import logging
from makobot.utils import reaction_to_int
logger = logging.getLogger(__name__)
class Plugin(object):
@property
def enabled(self):
"""
REturns true if the plugin has been enabled or false if not.
Typically this will check if the necessary environment variables are
set.
:returns: True if enabled, False if disabled
:rtype: boolean
"""
return False
def activate(self):
"""
Handles the activation of the plugin, typically this would be
instantiating a client or something similar.
"""
raise NotImplementedError('Plugin activate method not implemented')
def extract(self, message):
"""
Extracts the relevant values from a message for use when generating a
report. Values are expected to be stored as an attribute of the Plugin
class.
"""
raise NotImplementedError('Plugin extract method not implemented')
def report(self, message, active=True):
"""
Reports any potential vulnerabilities via Slackbot. If active then the
expected response is a reply, if not active (passive) then send only
messages that meet a certain threshold to reduce noise.
"""
self.retrieve()
logger.debug('Found %s reports' %
len([r for r in self.reports.values() if r]))
for subject, report in self.reports.items():
if report:
logger.debug('Have a report for %s: %s' % (subject, report))
if active:
logger.debug('Bot is active, reporting...')
message.reply(self.format(subject, report))
elif self.threshold_met(report):
logger.debug(
'Bot passive, but threshold met, reporting...')
message.send(self.format(subject, report))
else:
logger.debug(
'Bot passive and threshold not met, skipping...')
else:
logger.debug('No report for %s, skipping...' % subject)
# TODO: Move this to a wrapper class
reaction = self.react()
self.promote_reaction(message, reaction)
def retrieve(self):
"""
Retrieves reports from the configured reporting service and populates
the reports dict accordingly. This method should work in concert with
the extract method.
"""
raise NotImplementedError('Plugin retrieve method not implemented')
def format(self, subject, report):
"""
Formats a report in some easily and quickly consumed format. This is
typically called via the plugin's report method.
"""
raise NotImplementedError('Plugin format method not implemented')
def threshold_met(self, report):
"""
Determine if a threshold has been met for a report before sending a
message to an entire channel. This method should return a boolean,
where True is to send the message.
:returns: True if threshold met, False if not
:rtype: boolean
"""
return False
def react(self):
"""
Reacts to a report with an an emoticon of some kind. Typically a
weather-based icon representing the severity of the risk is the most
clearly understood.
"""
pass
# FIXME: A more elegant solution for ensuring only one reaction.
def promote_reaction(self, message, reaction):
"""
Determines if the latest reaction is more severe than the current one.
Only the most severe reaction should be used.
"""
if reaction:
current = reaction_to_int(getattr(message, 'mako_reaction', 'fog'))
latest = reaction_to_int(reaction)
if latest > current:
message.mako_reaction = reaction | 0.550124 | 0.351172 |
import datetime
from django.urls import reverse
from systori.lib.testing import ClientTestCase
from ..project.factories import ProjectFactory
from .factories import JobFactory, GroupFactory, TaskFactory, LineItemFactory
from .models import Task, Job, ProgressReport, ExpendReport
from .views import JobCopy, JobPaste
class JobViewsTest(ClientTestCase):
def test_create(self):
project = ProjectFactory()
self.assertEqual(Job.objects.count(), 0)
response = self.client.post(
reverse("job.create", args=[project.pk]), data={"name": "New Job"}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Job.objects.count(), 1)
def test_get_editor(self):
job = JobFactory(
name="job name", description="new job description", project=ProjectFactory()
) # type: Job
self.assertEqual(
reverse("job.editor", args=[job.project.pk, job.pk]), job.get_absolute_url()
)
response = self.client.get(job.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertIn(b"new job description", response.content)
self.assertEqual(response.context["job"], job)
def test_delete(self):
job = JobFactory(project=ProjectFactory())
self.assertEqual(Job.objects.count(), 1)
response = self.client.post(reverse("job.delete", args=[job.pk]))
self.assertEqual(response.status_code, 302)
self.assertEqual(Job.objects.count(), 0)
class JobProgressTest(ClientTestCase):
def setUp(self):
super().setUp()
self.job = JobFactory(
name="job name", description="new job description", project=ProjectFactory()
) # type: Job
def test_get_form(self):
self.client.get(
reverse("job.progress", args=[self.job.project.pk, self.job.pk])
)
def test_status_complete_happy_path(self):
self.assertEqual(self.job.status, Job.DRAFT)
response = self.client.post(
reverse("job.progress", args=[self.job.project.pk, self.job.pk]),
{"status_complete": "true"},
)
self.assertEqual(response.status_code, 302)
self.job.refresh_from_db()
self.assertEqual(self.job.status, Job.COMPLETED)
def test_change_task_progress(self):
task = TaskFactory(group=self.job, qty=10, price=5, total=50)
job = Job.objects.get()
self.assertEqual(job.progress_percent, 0)
self.assertEqual(ProgressReport.objects.count(), 0)
response = self.client.post(
reverse("job.progress", args=[self.job.project.pk, self.job.pk]),
{
"progress_onehundred": "true",
"progress_date": "01/01/2001",
"comment": "default comment",
"task-{}-complete".format(task.id): 10,
"task-{}-worker".format(task.id): self.worker.id,
"task-{}-comment".format(task.id): "specific comment",
},
)
self.assertEqual(response.status_code, 302)
job = Job.objects.get()
self.assertEqual(job.status, Job.DRAFT)
self.assertEqual(job.progress_percent, 100)
progress = ProgressReport.objects.get()
self.assertEqual(progress.task, task)
self.assertEqual(progress.complete, 10)
self.assertEqual(progress.comment, "specific comment")
self.assertEqual(progress.worker, self.worker)
def test_change_task_progress_default_comment(self):
task = TaskFactory(group=self.job, qty=10, price=5, total=50)
self.client.post(
reverse("job.progress", args=[self.job.project.pk, self.job.pk]),
{
"progress_date": "01/01/2001",
"comment": "default comment",
"task-{}-complete".format(task.id): 10,
"task-{}-worker".format(task.id): self.worker.id,
"task-{}-comment".format(task.id): "",
},
)
progress = ProgressReport.objects.get()
self.assertEqual(progress.comment, "default comment")
def test_change_lineitem_progress(self):
task = TaskFactory(group=self.job, qty=None, price=5, total=50)
lineitem = LineItemFactory(task=task, qty=10, price=5, total=50)
job = Job.objects.get()
self.assertEqual(job.progress_percent, 0)
self.assertEqual(ExpendReport.objects.count(), 0)
response = self.client.post(
reverse("job.progress", args=[self.job.project.pk, self.job.pk]),
{
"progress_date": "01/01/2001",
"comment": "default comment",
"li-{}-complete".format(lineitem.id): 10,
"li-{}-worker".format(lineitem.id): self.worker.id,
"li-{}-comment".format(lineitem.id): "specific comment",
},
)
self.assertEqual(response.status_code, 302)
job = Job.objects.get()
self.assertEqual(job.status, Job.DRAFT)
self.assertEqual(job.progress_percent, 100)
expend = ExpendReport.objects.get()
self.assertEqual(expend.lineitem, lineitem)
self.assertEqual(expend.expended, 10)
self.assertEqual(expend.comment, "specific comment")
self.assertEqual(expend.worker, self.worker)
class JobCopyPasteTest(ClientTestCase):
def copy(self, job):
self.client.get(reverse("job.copy", args=[job.project.pk, job.pk]))
def test_paste_job_010101(self):
project = ProjectFactory()
job = JobFactory(
name="job name", description="new job description", project=project
) # type: Job
group = GroupFactory(name="my group", parent=job)
task = TaskFactory(
group=group,
name="<NAME>",
qty=7,
complete=7,
status=Task.RUNNING,
started_on=datetime.date.today(),
completed_on=datetime.date.today(),
)
LineItemFactory(task=task)
self.copy(job)
response = self.client.get(reverse("job.paste", args=[project.pk]))
form = response.context["form"]
self.assertEqual(form["name"].value(), job.name)
self.assertEqual(form["job_template"].value(), job.pk)
response = self.client.post(
reverse("job.paste", args=[project.pk]),
{
"name": "job name changed",
"description": "job description",
"job_template": job.pk,
},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(project.jobs.count(), 2)
new_job = project.jobs.exclude(pk=job.pk).get()
self.assertIsNotNone(new_job.account)
self.assertEqual(new_job.name, "job name changed")
self.assertEqual(
job.groups.first().tasks.first().name,
new_job.groups.first().tasks.first().name,
)
self.assertEqual(
job.groups.first().tasks.first().lineitems.first().name,
new_job.groups.first().tasks.first().lineitems.first().name,
)
def test_copy_job_0101(self):
project = ProjectFactory(structure="01.01")
job = JobFactory(
name="job name", description="new job description", project=project
) # type: Job
task = TaskFactory(
group=job,
name="some task",
qty=7,
complete=7,
status=Task.RUNNING,
started_on=datetime.date.today(),
completed_on=datetime.date.today(),
)
LineItemFactory(task=task)
self.copy(job)
self.client.post(
reverse("job.paste", args=[project.pk]),
{
"name": "job name changed",
"description": "job description",
"job_template": job.pk,
},
)
new_job = project.jobs.exclude(pk=job.pk).get()
self.assertEqual(project.jobs.count(), 2)
self.assertEqual(job.tasks.first().name, new_job.tasks.first().name)
def test_error_on_incompatible_structure(self):
project = ProjectFactory()
job = JobFactory(project=project)
self.copy(job)
project2 = ProjectFactory(structure="01.001")
self.client.post(
reverse("job.paste", args=[project2.pk]),
{
"name": "job name changed",
"description": "job description",
"job_template": job.pk,
},
) # fails because of incompatible project.structure
self.assertEqual(project2.jobs.count(), 0)
def test_finish_and_cancel_job_copy(self):
project = ProjectFactory()
job = JobFactory(project=project)
# first check is to finish a copy paste operation
self.copy(job)
self.assertTrue(JobCopy.SESSION_KEY in self.client.session)
self.client.post(
reverse("job.paste", args=[project.pk]),
{
"name": "job name changed",
"description": "job description",
"job_template": job.pk,
},
)
self.assertTrue(JobPaste.SESSION_KEY in self.client.session)
self.client.get(reverse("project.view", args=[project.pk]))
self.assertFalse(
JobCopy.SESSION_KEY in self.client.session
and JobPaste.SESSION_KEY in self.client.session
)
# second check is to sucessfully cancel an paste operatio
self.copy(job)
self.client.get(reverse("job.cancel-paste"))
self.assertFalse(JobCopy.SESSION_KEY in self.client.session)
class JobLockTest(ClientTestCase):
def test_job_lock(self):
project = ProjectFactory()
job = JobFactory(project=project)
self.assertFalse(job.is_locked)
self.client.get(reverse("job.toggle_lock", args=[job.pk]))
job = Job.objects.get(id=job.pk)
self.assertTrue(job.is_locked)
def test_disabled_editor(self):
project = ProjectFactory()
job = JobFactory(project=project)
self.client.get(reverse("job.toggle_lock", args=[job.pk]))
response = self.client.get(reverse("job.editor", args=[job.pk]))
self.assertContains(response, 'contenteditable="False"')
self.assertNotContains(
response, '<script src="/static/dart/build/job_editor.dart.js"></script>'
)
def test_render_project_detail_job_btn(self):
project = ProjectFactory()
JobFactory(project=project)
response = self.client.get(reverse("project.view", args=[project.pk]))
self.assertEqual(response.status_code, 200) | systori/apps/task/test_views.py | import datetime
from django.urls import reverse
from systori.lib.testing import ClientTestCase
from ..project.factories import ProjectFactory
from .factories import JobFactory, GroupFactory, TaskFactory, LineItemFactory
from .models import Task, Job, ProgressReport, ExpendReport
from .views import JobCopy, JobPaste
class JobViewsTest(ClientTestCase):
def test_create(self):
project = ProjectFactory()
self.assertEqual(Job.objects.count(), 0)
response = self.client.post(
reverse("job.create", args=[project.pk]), data={"name": "New Job"}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Job.objects.count(), 1)
def test_get_editor(self):
job = JobFactory(
name="job name", description="new job description", project=ProjectFactory()
) # type: Job
self.assertEqual(
reverse("job.editor", args=[job.project.pk, job.pk]), job.get_absolute_url()
)
response = self.client.get(job.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertIn(b"new job description", response.content)
self.assertEqual(response.context["job"], job)
def test_delete(self):
job = JobFactory(project=ProjectFactory())
self.assertEqual(Job.objects.count(), 1)
response = self.client.post(reverse("job.delete", args=[job.pk]))
self.assertEqual(response.status_code, 302)
self.assertEqual(Job.objects.count(), 0)
class JobProgressTest(ClientTestCase):
def setUp(self):
super().setUp()
self.job = JobFactory(
name="job name", description="new job description", project=ProjectFactory()
) # type: Job
def test_get_form(self):
self.client.get(
reverse("job.progress", args=[self.job.project.pk, self.job.pk])
)
def test_status_complete_happy_path(self):
self.assertEqual(self.job.status, Job.DRAFT)
response = self.client.post(
reverse("job.progress", args=[self.job.project.pk, self.job.pk]),
{"status_complete": "true"},
)
self.assertEqual(response.status_code, 302)
self.job.refresh_from_db()
self.assertEqual(self.job.status, Job.COMPLETED)
def test_change_task_progress(self):
task = TaskFactory(group=self.job, qty=10, price=5, total=50)
job = Job.objects.get()
self.assertEqual(job.progress_percent, 0)
self.assertEqual(ProgressReport.objects.count(), 0)
response = self.client.post(
reverse("job.progress", args=[self.job.project.pk, self.job.pk]),
{
"progress_onehundred": "true",
"progress_date": "01/01/2001",
"comment": "default comment",
"task-{}-complete".format(task.id): 10,
"task-{}-worker".format(task.id): self.worker.id,
"task-{}-comment".format(task.id): "specific comment",
},
)
self.assertEqual(response.status_code, 302)
job = Job.objects.get()
self.assertEqual(job.status, Job.DRAFT)
self.assertEqual(job.progress_percent, 100)
progress = ProgressReport.objects.get()
self.assertEqual(progress.task, task)
self.assertEqual(progress.complete, 10)
self.assertEqual(progress.comment, "specific comment")
self.assertEqual(progress.worker, self.worker)
def test_change_task_progress_default_comment(self):
task = TaskFactory(group=self.job, qty=10, price=5, total=50)
self.client.post(
reverse("job.progress", args=[self.job.project.pk, self.job.pk]),
{
"progress_date": "01/01/2001",
"comment": "default comment",
"task-{}-complete".format(task.id): 10,
"task-{}-worker".format(task.id): self.worker.id,
"task-{}-comment".format(task.id): "",
},
)
progress = ProgressReport.objects.get()
self.assertEqual(progress.comment, "default comment")
def test_change_lineitem_progress(self):
task = TaskFactory(group=self.job, qty=None, price=5, total=50)
lineitem = LineItemFactory(task=task, qty=10, price=5, total=50)
job = Job.objects.get()
self.assertEqual(job.progress_percent, 0)
self.assertEqual(ExpendReport.objects.count(), 0)
response = self.client.post(
reverse("job.progress", args=[self.job.project.pk, self.job.pk]),
{
"progress_date": "01/01/2001",
"comment": "default comment",
"li-{}-complete".format(lineitem.id): 10,
"li-{}-worker".format(lineitem.id): self.worker.id,
"li-{}-comment".format(lineitem.id): "specific comment",
},
)
self.assertEqual(response.status_code, 302)
job = Job.objects.get()
self.assertEqual(job.status, Job.DRAFT)
self.assertEqual(job.progress_percent, 100)
expend = ExpendReport.objects.get()
self.assertEqual(expend.lineitem, lineitem)
self.assertEqual(expend.expended, 10)
self.assertEqual(expend.comment, "specific comment")
self.assertEqual(expend.worker, self.worker)
class JobCopyPasteTest(ClientTestCase):
def copy(self, job):
self.client.get(reverse("job.copy", args=[job.project.pk, job.pk]))
def test_paste_job_010101(self):
project = ProjectFactory()
job = JobFactory(
name="job name", description="new job description", project=project
) # type: Job
group = GroupFactory(name="my group", parent=job)
task = TaskFactory(
group=group,
name="<NAME>",
qty=7,
complete=7,
status=Task.RUNNING,
started_on=datetime.date.today(),
completed_on=datetime.date.today(),
)
LineItemFactory(task=task)
self.copy(job)
response = self.client.get(reverse("job.paste", args=[project.pk]))
form = response.context["form"]
self.assertEqual(form["name"].value(), job.name)
self.assertEqual(form["job_template"].value(), job.pk)
response = self.client.post(
reverse("job.paste", args=[project.pk]),
{
"name": "job name changed",
"description": "job description",
"job_template": job.pk,
},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(project.jobs.count(), 2)
new_job = project.jobs.exclude(pk=job.pk).get()
self.assertIsNotNone(new_job.account)
self.assertEqual(new_job.name, "job name changed")
self.assertEqual(
job.groups.first().tasks.first().name,
new_job.groups.first().tasks.first().name,
)
self.assertEqual(
job.groups.first().tasks.first().lineitems.first().name,
new_job.groups.first().tasks.first().lineitems.first().name,
)
def test_copy_job_0101(self):
project = ProjectFactory(structure="01.01")
job = JobFactory(
name="job name", description="new job description", project=project
) # type: Job
task = TaskFactory(
group=job,
name="some task",
qty=7,
complete=7,
status=Task.RUNNING,
started_on=datetime.date.today(),
completed_on=datetime.date.today(),
)
LineItemFactory(task=task)
self.copy(job)
self.client.post(
reverse("job.paste", args=[project.pk]),
{
"name": "job name changed",
"description": "job description",
"job_template": job.pk,
},
)
new_job = project.jobs.exclude(pk=job.pk).get()
self.assertEqual(project.jobs.count(), 2)
self.assertEqual(job.tasks.first().name, new_job.tasks.first().name)
def test_error_on_incompatible_structure(self):
project = ProjectFactory()
job = JobFactory(project=project)
self.copy(job)
project2 = ProjectFactory(structure="01.001")
self.client.post(
reverse("job.paste", args=[project2.pk]),
{
"name": "job name changed",
"description": "job description",
"job_template": job.pk,
},
) # fails because of incompatible project.structure
self.assertEqual(project2.jobs.count(), 0)
def test_finish_and_cancel_job_copy(self):
project = ProjectFactory()
job = JobFactory(project=project)
# first check is to finish a copy paste operation
self.copy(job)
self.assertTrue(JobCopy.SESSION_KEY in self.client.session)
self.client.post(
reverse("job.paste", args=[project.pk]),
{
"name": "job name changed",
"description": "job description",
"job_template": job.pk,
},
)
self.assertTrue(JobPaste.SESSION_KEY in self.client.session)
self.client.get(reverse("project.view", args=[project.pk]))
self.assertFalse(
JobCopy.SESSION_KEY in self.client.session
and JobPaste.SESSION_KEY in self.client.session
)
# second check is to sucessfully cancel an paste operatio
self.copy(job)
self.client.get(reverse("job.cancel-paste"))
self.assertFalse(JobCopy.SESSION_KEY in self.client.session)
class JobLockTest(ClientTestCase):
def test_job_lock(self):
project = ProjectFactory()
job = JobFactory(project=project)
self.assertFalse(job.is_locked)
self.client.get(reverse("job.toggle_lock", args=[job.pk]))
job = Job.objects.get(id=job.pk)
self.assertTrue(job.is_locked)
def test_disabled_editor(self):
project = ProjectFactory()
job = JobFactory(project=project)
self.client.get(reverse("job.toggle_lock", args=[job.pk]))
response = self.client.get(reverse("job.editor", args=[job.pk]))
self.assertContains(response, 'contenteditable="False"')
self.assertNotContains(
response, '<script src="/static/dart/build/job_editor.dart.js"></script>'
)
def test_render_project_detail_job_btn(self):
project = ProjectFactory()
JobFactory(project=project)
response = self.client.get(reverse("project.view", args=[project.pk]))
self.assertEqual(response.status_code, 200) | 0.427516 | 0.226891 |
#%% Imports
import numpy as np
from calib_main import calib_main
from load_pickle import load_pickle
#%% Version number
version_num = 'V9'
#%% data path
directory = 'F:\\Arbeit und Uni\\MasterArbeit\\'
# path to the pupil capture data
data_directory = directory + 'Pupil_VR_Recordings\\'
# path to the calibration data from the stimulus script
time_directory = directory + 'HTC_Vive_Recs\\Data\\'
#%% Configurations
disp_plots = 1
# 1. uncalibrated data; 2. GT after calibration
disp_what = [1, 1, 0]
# atm calculated data can't be saved
save_data = 0
# forst check the save directory for the plots
save_plots = 0
#%% choose data set
choose_dataset = 0
if choose_dataset == 0:
# specify the recording you want to calibrate
subj_name = 'olbe'
file_date = '2018_11_20'
file_num = '001'
# capture frequency in Hz
set_fps = 120
# left; right; both
use_eye = 'both'
#%% load calibration times from pickle file
mask_ind_cal,mask_ind_val = load_pickle(time_directory,subj_name,file_date,file_num)
#%% extract calibration grid
gt_px = mask_ind_cal[:,3:5]
#%% specify dots for calibration and validation
cal_dots = np.linspace(1,np.size(gt_px,0),np.size(gt_px,0));
val_dots = np.linspace(1,np.size(gt_px,0),np.size(gt_px,0));
#%% choose coefficents for design matrix
choose_coeff = 1
if choose_coeff == 1:
coeff_num_all = 6
cal_form_all_x = [['1','x','y','x^2','y^2','x*y']]
cal_form_all_y = [['1','x','y','x^2','y^2','x*y']]
cal_form_all = [cal_form_all_x, cal_form_all_y]
#%% screen resolutions
screen_width = np.nan
screen_height = np.nan
screen_dist = 1
#%% shorten input data and configs
class CalibConfig(object):
def __init__(self, disp_plots, disp_what, save_data, save_plots):
self.disp_plots = disp_plots
self.disp_what = disp_what
self.save_data = save_data
self.save_plots = save_plots
fct_cfg = CalibConfig(disp_plots, disp_what, save_data, save_plots)
class CalibInputValue(object):
def __init__(self, coeff_num_all, cal_form_all, version_num, data_directory,
time_directory, subj_name, file_date, file_num, mask_ind_cal,
mask_ind_val, cal_dots, val_dots, gt_px, set_fps, use_eye):
self.coeff_num_all = coeff_num_all
self.cal_form_all = cal_form_all
self.version_num = version_num
self.data_directory = data_directory
self.time_directory = time_directory
self.subj_name = subj_name
self.file_date = file_date
self.file_num = file_num
self.mask_ind_cal = mask_ind_cal
self.mask_ind_val = mask_ind_val
self.cal_dots = cal_dots
self.val_dots = val_dots
self.gt_px = gt_px
self.set_fps = set_fps
self.use_eye = use_eye
fct_in = CalibInputValue(coeff_num_all, cal_form_all, version_num, data_directory,
time_directory, subj_name, file_date, file_num, mask_ind_cal,
mask_ind_val, cal_dots, val_dots, gt_px, set_fps, use_eye)
class ScreenConfig(object):
def __init__(self, screen_width, screen_height, screen_dist):
self.screen_width = screen_width
self.screen_height = screen_height
self.screen_dist = screen_dist
screen_cfg = ScreenConfig(screen_width, screen_height, screen_dist)
#%% Output
fct_out = calib_main(fct_cfg,fct_in,screen_cfg) | Calib_Tools/calib_start.py | #%% Imports
import numpy as np
from calib_main import calib_main
from load_pickle import load_pickle
#%% Version number
version_num = 'V9'
#%% data path
directory = 'F:\\Arbeit und Uni\\MasterArbeit\\'
# path to the pupil capture data
data_directory = directory + 'Pupil_VR_Recordings\\'
# path to the calibration data from the stimulus script
time_directory = directory + 'HTC_Vive_Recs\\Data\\'
#%% Configurations
disp_plots = 1
# 1. uncalibrated data; 2. GT after calibration
disp_what = [1, 1, 0]
# atm calculated data can't be saved
save_data = 0
# forst check the save directory for the plots
save_plots = 0
#%% choose data set
choose_dataset = 0
if choose_dataset == 0:
# specify the recording you want to calibrate
subj_name = 'olbe'
file_date = '2018_11_20'
file_num = '001'
# capture frequency in Hz
set_fps = 120
# left; right; both
use_eye = 'both'
#%% load calibration times from pickle file
mask_ind_cal,mask_ind_val = load_pickle(time_directory,subj_name,file_date,file_num)
#%% extract calibration grid
gt_px = mask_ind_cal[:,3:5]
#%% specify dots for calibration and validation
cal_dots = np.linspace(1,np.size(gt_px,0),np.size(gt_px,0));
val_dots = np.linspace(1,np.size(gt_px,0),np.size(gt_px,0));
#%% choose coefficents for design matrix
choose_coeff = 1
if choose_coeff == 1:
coeff_num_all = 6
cal_form_all_x = [['1','x','y','x^2','y^2','x*y']]
cal_form_all_y = [['1','x','y','x^2','y^2','x*y']]
cal_form_all = [cal_form_all_x, cal_form_all_y]
#%% screen resolutions
screen_width = np.nan
screen_height = np.nan
screen_dist = 1
#%% shorten input data and configs
class CalibConfig(object):
def __init__(self, disp_plots, disp_what, save_data, save_plots):
self.disp_plots = disp_plots
self.disp_what = disp_what
self.save_data = save_data
self.save_plots = save_plots
fct_cfg = CalibConfig(disp_plots, disp_what, save_data, save_plots)
class CalibInputValue(object):
def __init__(self, coeff_num_all, cal_form_all, version_num, data_directory,
time_directory, subj_name, file_date, file_num, mask_ind_cal,
mask_ind_val, cal_dots, val_dots, gt_px, set_fps, use_eye):
self.coeff_num_all = coeff_num_all
self.cal_form_all = cal_form_all
self.version_num = version_num
self.data_directory = data_directory
self.time_directory = time_directory
self.subj_name = subj_name
self.file_date = file_date
self.file_num = file_num
self.mask_ind_cal = mask_ind_cal
self.mask_ind_val = mask_ind_val
self.cal_dots = cal_dots
self.val_dots = val_dots
self.gt_px = gt_px
self.set_fps = set_fps
self.use_eye = use_eye
fct_in = CalibInputValue(coeff_num_all, cal_form_all, version_num, data_directory,
time_directory, subj_name, file_date, file_num, mask_ind_cal,
mask_ind_val, cal_dots, val_dots, gt_px, set_fps, use_eye)
class ScreenConfig(object):
def __init__(self, screen_width, screen_height, screen_dist):
self.screen_width = screen_width
self.screen_height = screen_height
self.screen_dist = screen_dist
screen_cfg = ScreenConfig(screen_width, screen_height, screen_dist)
#%% Output
fct_out = calib_main(fct_cfg,fct_in,screen_cfg) | 0.378804 | 0.171685 |
import sympy as sy
import numpy as np
from scipy.signal import cont2discrete
class CostModel(object):
def __init__(self, NX=None, NU=None):
assert NX != None
assert NU != None
self.NX = NX
self.NU = NU
self.Lqq, self.Luu, self.Luq, \
self.Lq, self.Lu, self.L,\
self.Vqq, self.Vq, self.V = self.get_lambdified()
def gen_cost_sympy_function(self):
"""
returns stage and terminal cost function in sympy
"""
raise NotImplementedError
def get_lambdified(self):
q = sy.symbols('q:{0}'.format(self.NX))
u = sy.symbols('u:{0}'.format(self.NU))
L, V = self.gen_cost_sympy_function()
Lq = L.jacobian(q)
Lu = L.jacobian(u)
Lqq = sy.derive_by_array(Lq, q)
Luu = sy.derive_by_array(Lu, u)
Luq = sy.derive_by_array(Lu, q)
Vq = V.jacobian(q)
Vqq = sy.derive_by_array(Vq, q)
return (*[sy.lambdify([q,u], F, ["numpy"]) for F in [np.squeeze(Lqq), np.squeeze(Luu), np.squeeze(Luq), Lq, Lu, L]],
*[sy.lambdify([q], F, ["numpy"]) for F in [np.squeeze(Vqq), Vq, V]])
class quadraticCostModel(CostModel):
def __init__(self, Q=None, R=None, q=None, r=None,
Q_term=None, q_term=None,
x_ref=None, NX=None, NU=None):
assert NX != None
assert NU != None
assert Q.ndim==2 and Q.shape[0]==NX and Q.shape[1]==NX
assert q.ndim==1 and q.shape[0]==NX
assert R.ndim==2 and R.shape[0]==NU and R.shape[1]==NU
assert r.ndim==1 and r.shape[0]==NU
assert Q_term.ndim==2 and Q_term.shape[0]==NX and Q_term.shape[1]==NX
assert q_term.ndim==1 and q_term.shape[0]==NX
self.NX = NX
self.NU = NU
self.Q = Q
self.q = q
self.R = R
self.r = r
self.Qf = Q_term
self.qf = q_term
if x_ref is None:
self.x_ref = np.zeros(NX)
else:
self.x_ref = x_ref
super().__init__(NX=NX, NU=NU)
def gen_cost_sympy_function(self):
q = sy.symbols('q:{0}'.format(self.NX))
u = sy.symbols('u:{0}'.format(self.NU))
q_vec = sy.Matrix([e-self.x_ref[i] for i,e in enumerate(q)])
u_vec = sy.Matrix([_ for _ in u])
Q_weight = sy.Matrix(self.Q)
R_weight = sy.Matrix(self.R)
q_weight = sy.Matrix(self.q)
r_weight = sy.Matrix(self.r)
Qf_weight = sy.Matrix(self.Qf)
qf_weight = sy.Matrix(self.qf)
L = q_vec.transpose()*Q_weight*q_vec + u_vec.transpose()*R_weight*u_vec\
+ q_weight.transpose()*q_vec + r_weight.transpose()*u_vec
V = q_vec.transpose()*Qf_weight*q_vec + q_weight.transpose()*q_vec
return L, V | classic_gym/cost/__init__.py | import sympy as sy
import numpy as np
from scipy.signal import cont2discrete
class CostModel(object):
def __init__(self, NX=None, NU=None):
assert NX != None
assert NU != None
self.NX = NX
self.NU = NU
self.Lqq, self.Luu, self.Luq, \
self.Lq, self.Lu, self.L,\
self.Vqq, self.Vq, self.V = self.get_lambdified()
def gen_cost_sympy_function(self):
"""
returns stage and terminal cost function in sympy
"""
raise NotImplementedError
def get_lambdified(self):
q = sy.symbols('q:{0}'.format(self.NX))
u = sy.symbols('u:{0}'.format(self.NU))
L, V = self.gen_cost_sympy_function()
Lq = L.jacobian(q)
Lu = L.jacobian(u)
Lqq = sy.derive_by_array(Lq, q)
Luu = sy.derive_by_array(Lu, u)
Luq = sy.derive_by_array(Lu, q)
Vq = V.jacobian(q)
Vqq = sy.derive_by_array(Vq, q)
return (*[sy.lambdify([q,u], F, ["numpy"]) for F in [np.squeeze(Lqq), np.squeeze(Luu), np.squeeze(Luq), Lq, Lu, L]],
*[sy.lambdify([q], F, ["numpy"]) for F in [np.squeeze(Vqq), Vq, V]])
class quadraticCostModel(CostModel):
def __init__(self, Q=None, R=None, q=None, r=None,
Q_term=None, q_term=None,
x_ref=None, NX=None, NU=None):
assert NX != None
assert NU != None
assert Q.ndim==2 and Q.shape[0]==NX and Q.shape[1]==NX
assert q.ndim==1 and q.shape[0]==NX
assert R.ndim==2 and R.shape[0]==NU and R.shape[1]==NU
assert r.ndim==1 and r.shape[0]==NU
assert Q_term.ndim==2 and Q_term.shape[0]==NX and Q_term.shape[1]==NX
assert q_term.ndim==1 and q_term.shape[0]==NX
self.NX = NX
self.NU = NU
self.Q = Q
self.q = q
self.R = R
self.r = r
self.Qf = Q_term
self.qf = q_term
if x_ref is None:
self.x_ref = np.zeros(NX)
else:
self.x_ref = x_ref
super().__init__(NX=NX, NU=NU)
def gen_cost_sympy_function(self):
q = sy.symbols('q:{0}'.format(self.NX))
u = sy.symbols('u:{0}'.format(self.NU))
q_vec = sy.Matrix([e-self.x_ref[i] for i,e in enumerate(q)])
u_vec = sy.Matrix([_ for _ in u])
Q_weight = sy.Matrix(self.Q)
R_weight = sy.Matrix(self.R)
q_weight = sy.Matrix(self.q)
r_weight = sy.Matrix(self.r)
Qf_weight = sy.Matrix(self.Qf)
qf_weight = sy.Matrix(self.qf)
L = q_vec.transpose()*Q_weight*q_vec + u_vec.transpose()*R_weight*u_vec\
+ q_weight.transpose()*q_vec + r_weight.transpose()*u_vec
V = q_vec.transpose()*Qf_weight*q_vec + q_weight.transpose()*q_vec
return L, V | 0.628977 | 0.656878 |
"""Tests for the file-like object implementation using the SleuthKit (TSK)."""
import os
import unittest
from dfvfs.file_io import tsk_file_io
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests.file_io import test_lib
class TSKFileTestExt2(test_lib.Ext2ImageFileTestCase):
"""Tests the SleuthKit (TSK) file-like object on ext2."""
_IDENTIFIER_ANOTHER_FILE = 15
_IDENTIFIER_PASSWORDS_TXT = 14
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(TSKFileTestExt2, self).setUp()
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['ext2.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseIdentifier(self):
"""Test the open and close functionality using an inode."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK,
inode=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# TODO: add a failing scenario.
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',
inode=self._IDENTIFIER_ANOTHER_FILE,
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 22)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
inode=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password\n'
b'bank,joesmith,superrich\n'
b'alarm system,-,1234\n'
b'treasure chest,-,1111\n'
b'uber secret laire,admin,admin\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
class TSKFileTestHFS(test_lib.HFSImageFileTestCase):
"""Tests the SleuthKit (TSK) file-like object on HFS."""
_IDENTIFIER_ANOTHER_FILE = 21
_IDENTIFIER_PASSWORDS_TXT = 20
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(TSKFileTestHFS, self).setUp()
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['hfsplus.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseIdentifier(self):
"""Test the open and close functionality using an identifier."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, inode=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseIdentifier(file_object)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseLocation(file_object)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
self._TestOpenCloseLocation(file_object)
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',
inode=self._IDENTIFIER_ANOTHER_FILE, parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestSeek(file_object)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestRead(file_object)
def testReadResourceFork(self):
"""Test the read functionality on a resource fork."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, data_stream='rsrc', inode=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestReadResourceFork(file_object)
class TSKFileTestNTFS(test_lib.NTFSImageFileTestCase):
"""Tests the SleuthKit (TSK) file-like object on NTFS."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(TSKFileTestNTFS, self).setUp()
test_path = self._GetTestFilePath(['ntfs.raw'])
self._SkipIfPathNotExists(test_path)
self._os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
def testOpenCloseMFTEntry(self):
"""Test the open and close functionality using a MFT entry."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, inode=self._MFT_ENTRY_PASSWORDS_TXT,
parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseMFTEntry(file_object)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseLocation(file_object)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
self._TestOpenCloseLocation(file_object)
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',
inode=self._MFT_ENTRY_ANOTHER_FILE, parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestSeek(file_object)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
inode=self._MFT_ENTRY_PASSWORDS_TXT, parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestRead(file_object)
def testReadADS(self):
"""Test the read functionality on an alternate data stream (ADS)."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, data_stream='$SDS', location='/$Secure',
inode=9, parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestReadADS(file_object)
if __name__ == '__main__':
unittest.main() | tests/file_io/tsk_file_io.py | """Tests for the file-like object implementation using the SleuthKit (TSK)."""
import os
import unittest
from dfvfs.file_io import tsk_file_io
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests.file_io import test_lib
class TSKFileTestExt2(test_lib.Ext2ImageFileTestCase):
"""Tests the SleuthKit (TSK) file-like object on ext2."""
_IDENTIFIER_ANOTHER_FILE = 15
_IDENTIFIER_PASSWORDS_TXT = 14
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(TSKFileTestExt2, self).setUp()
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['ext2.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseIdentifier(self):
"""Test the open and close functionality using an inode."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK,
inode=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# TODO: add a failing scenario.
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 116)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
file_object.Open()
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',
inode=self._IDENTIFIER_ANOTHER_FILE,
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 22)
file_object.seek(10)
self.assertEqual(file_object.read(5), b'other')
self.assertEqual(file_object.get_offset(), 15)
file_object.seek(-10, os.SEEK_END)
self.assertEqual(file_object.read(5), b'her f')
file_object.seek(2, os.SEEK_CUR)
self.assertEqual(file_object.read(2), b'e.')
# Conforming to the POSIX seek the offset can exceed the file size
# but reading will result in no data being returned.
file_object.seek(300, os.SEEK_SET)
self.assertEqual(file_object.get_offset(), 300)
self.assertEqual(file_object.read(2), b'')
with self.assertRaises(IOError):
file_object.seek(-10, os.SEEK_SET)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
with self.assertRaises(IOError):
file_object.seek(10, 5)
# On error the offset should not change.
self.assertEqual(file_object.get_offset(), 300)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
inode=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
file_object.Open()
read_buffer = file_object.read()
expected_buffer = (
b'place,user,password\n'
b'bank,joesmith,superrich\n'
b'alarm system,-,1234\n'
b'treasure chest,-,1111\n'
b'uber secret laire,admin,admin\n')
self.assertEqual(read_buffer, expected_buffer)
# TODO: add boundary scenarios.
class TSKFileTestHFS(test_lib.HFSImageFileTestCase):
"""Tests the SleuthKit (TSK) file-like object on HFS."""
_IDENTIFIER_ANOTHER_FILE = 21
_IDENTIFIER_PASSWORDS_TXT = 20
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(TSKFileTestHFS, self).setUp()
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['hfsplus.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseIdentifier(self):
"""Test the open and close functionality using an identifier."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, inode=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseIdentifier(file_object)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseLocation(file_object)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
self._TestOpenCloseLocation(file_object)
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',
inode=self._IDENTIFIER_ANOTHER_FILE, parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestSeek(file_object)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
inode=self._IDENTIFIER_PASSWORDS_TXT, parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestRead(file_object)
def testReadResourceFork(self):
"""Test the read functionality on a resource fork."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, data_stream='rsrc', inode=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestReadResourceFork(file_object)
class TSKFileTestNTFS(test_lib.NTFSImageFileTestCase):
"""Tests the SleuthKit (TSK) file-like object on NTFS."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(TSKFileTestNTFS, self).setUp()
test_path = self._GetTestFilePath(['ntfs.raw'])
self._SkipIfPathNotExists(test_path)
self._os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
def testOpenCloseMFTEntry(self):
"""Test the open and close functionality using a MFT entry."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, inode=self._MFT_ENTRY_PASSWORDS_TXT,
parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseMFTEntry(file_object)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestOpenCloseLocation(file_object)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
self._TestOpenCloseLocation(file_object)
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',
inode=self._MFT_ENTRY_ANOTHER_FILE, parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestSeek(file_object)
def testRead(self):
"""Test the read functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location='/passwords.txt',
inode=self._MFT_ENTRY_PASSWORDS_TXT, parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestRead(file_object)
def testReadADS(self):
"""Test the read functionality on an alternate data stream (ADS)."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, data_stream='$SDS', location='/$Secure',
inode=9, parent=self._os_path_spec)
file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)
self._TestReadADS(file_object)
if __name__ == '__main__':
unittest.main() | 0.563858 | 0.344774 |
import numpy as np
def normalize(np_values, _=None):
mean = np.mean(np_values)
normalized = np_values - int(mean)
return normalized
def subtract_min(np_values, _=None):
minimum = np.min(np_values)
result = np_values - int(minimum)
return result
def none(np_values, _=None):
return np_values
def subtract_min_and_normalize(np_values, _=None):
return normalize(subtract_min(np_values))
def normalize_and_subtract_min(np_values, _=None):
return subtract_min(normalize(np_values))
def diff(np_array, _=None):
diff = np.diff(np_array).astype(int)
if len(diff) == 0:
return [0]
return diff
def standardize(np_values, _=None):
stddev = np.std(np_values)
return np_values / stddev
def std_interpolate(np_values, np_time_stamps):
return standardize(custom_left_neighbour_interpolate(np_values, np_time_stamps))
# This is a modified left neighbour interpolation that guarantees that each value in the
# input time series is also present in the interpolation at least once, regardless of time stamps
def custom_left_neighbour_interpolate(np_values, np_time_stamps):
last_time_stamp = np_time_stamps[len(np_time_stamps) - 1]
x_steps = np.linspace(np_time_stamps[0], last_time_stamp, 45, endpoint=True) # create equally spaced timesteps
#print("\n\nBefore interpolating, values: ", np_values, "\ntime stamps: ", np_time_stamps, "\nx_steps:", x_steps)
#print("np_values.shape", np_values.shape)
interpolation = left_nearest_modified(np_values, np_time_stamps, x_steps)
#print("np.array(interpolation).shape", np.array(interpolation).shape)
#print("\nAfter interpolating: ", interpolation)
assert (interpolation[0] == np_values[0])
assert (interpolation[len(interpolation) - 1] == np_values[len(np_values) - 1])
return np.array(interpolation)
def standard_interpolate(np_values, np_time_stamps, x_steps):
return np.interp(x_steps, np_time_stamps, np_values, period=None)
def right_nearest_modified(np_values, np_time_stamps, x_steps):
last_time_stamp_index = len(np_time_stamps) - 1
interpolation = []
pos = 0
for time_stamp in x_steps:
new_time_stamp = False
while pos < last_time_stamp_index and time_stamp > np_time_stamps[pos]:
pos += 1
interpolation.append(np_values[pos])
new_time_stamp = True
if not new_time_stamp: # add value at least once
interpolation.append(np_values[pos])
# interpolation.append(np_values[last_time_stamp_index])
return interpolation
def left_nearest_modified(np_values, np_time_stamps, x_steps):
interpolation = []
pos = 0
step_cnt = 0
for time_stamp in np_time_stamps:
interpolation.append(np_values[pos])
while step_cnt < len(x_steps) - 1 and time_stamp > x_steps[step_cnt]:
interpolation.append(np_values[pos])
step_cnt += 1
pos += 1
return interpolation | code/Backend/analysis-tool/preprocessing.py |
import numpy as np
def normalize(np_values, _=None):
mean = np.mean(np_values)
normalized = np_values - int(mean)
return normalized
def subtract_min(np_values, _=None):
minimum = np.min(np_values)
result = np_values - int(minimum)
return result
def none(np_values, _=None):
return np_values
def subtract_min_and_normalize(np_values, _=None):
return normalize(subtract_min(np_values))
def normalize_and_subtract_min(np_values, _=None):
return subtract_min(normalize(np_values))
def diff(np_array, _=None):
diff = np.diff(np_array).astype(int)
if len(diff) == 0:
return [0]
return diff
def standardize(np_values, _=None):
stddev = np.std(np_values)
return np_values / stddev
def std_interpolate(np_values, np_time_stamps):
return standardize(custom_left_neighbour_interpolate(np_values, np_time_stamps))
# This is a modified left neighbour interpolation that guarantees that each value in the
# input time series is also present in the interpolation at least once, regardless of time stamps
def custom_left_neighbour_interpolate(np_values, np_time_stamps):
last_time_stamp = np_time_stamps[len(np_time_stamps) - 1]
x_steps = np.linspace(np_time_stamps[0], last_time_stamp, 45, endpoint=True) # create equally spaced timesteps
#print("\n\nBefore interpolating, values: ", np_values, "\ntime stamps: ", np_time_stamps, "\nx_steps:", x_steps)
#print("np_values.shape", np_values.shape)
interpolation = left_nearest_modified(np_values, np_time_stamps, x_steps)
#print("np.array(interpolation).shape", np.array(interpolation).shape)
#print("\nAfter interpolating: ", interpolation)
assert (interpolation[0] == np_values[0])
assert (interpolation[len(interpolation) - 1] == np_values[len(np_values) - 1])
return np.array(interpolation)
def standard_interpolate(np_values, np_time_stamps, x_steps):
return np.interp(x_steps, np_time_stamps, np_values, period=None)
def right_nearest_modified(np_values, np_time_stamps, x_steps):
last_time_stamp_index = len(np_time_stamps) - 1
interpolation = []
pos = 0
for time_stamp in x_steps:
new_time_stamp = False
while pos < last_time_stamp_index and time_stamp > np_time_stamps[pos]:
pos += 1
interpolation.append(np_values[pos])
new_time_stamp = True
if not new_time_stamp: # add value at least once
interpolation.append(np_values[pos])
# interpolation.append(np_values[last_time_stamp_index])
return interpolation
def left_nearest_modified(np_values, np_time_stamps, x_steps):
interpolation = []
pos = 0
step_cnt = 0
for time_stamp in np_time_stamps:
interpolation.append(np_values[pos])
while step_cnt < len(x_steps) - 1 and time_stamp > x_steps[step_cnt]:
interpolation.append(np_values[pos])
step_cnt += 1
pos += 1
return interpolation | 0.463201 | 0.745445 |
from common import date_utils
from datetime import datetime
from collections import namedtuple
import unittest
Range = namedtuple('Range', ['start', 'end'])
"""
From root directory TeamUp: python3 -m test.test_date_utils
"""
class TestDaysOverlap(unittest.TestCase):
def test_no_overlap(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 6, 0, 0), datetime(2021, 1, 1, 18, 0, 0))), 0)
def test_no_overlap_2(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 2, 6, 0, 0), datetime(2021, 1, 2, 10, 0, 0))), 0)
def test_no_overlap_3(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 12, 0, 0), datetime(2021, 1, 1, 20, 0, 0))), 2)
def test_no_overlap_4(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 1, 20, 0, 0))), 2)
def test_no_overlap_5(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 19, 0, 0), datetime(2021, 1, 1, 20, 0, 0))), 1)
def test_no_overlap_6(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 19, 0, 0), datetime(2021, 1, 2, 5, 0, 0))), 10)
def test_no_overlap_7(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 19, 0, 0), datetime(2021, 1, 2, 6, 0, 0))), 11)
def test_no_overlap_8(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 19, 0, 0), datetime(2021, 1, 2, 8, 0, 0))), 11)
def test_no_overlap_9(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2022, 2, 5, 18, 0, 0), datetime(2022, 2, 6, 6, 0, 0)),
Range(datetime(2022, 2, 5, 12, 0, 0), datetime(2022, 2, 5, 18, 0, 0))), 0)
if __name__ == '__main__':
unittest.main() | test/test_date_utils.py | from common import date_utils
from datetime import datetime
from collections import namedtuple
import unittest
Range = namedtuple('Range', ['start', 'end'])
"""
From root directory TeamUp: python3 -m test.test_date_utils
"""
class TestDaysOverlap(unittest.TestCase):
def test_no_overlap(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 6, 0, 0), datetime(2021, 1, 1, 18, 0, 0))), 0)
def test_no_overlap_2(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 2, 6, 0, 0), datetime(2021, 1, 2, 10, 0, 0))), 0)
def test_no_overlap_3(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 12, 0, 0), datetime(2021, 1, 1, 20, 0, 0))), 2)
def test_no_overlap_4(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 1, 20, 0, 0))), 2)
def test_no_overlap_5(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 19, 0, 0), datetime(2021, 1, 1, 20, 0, 0))), 1)
def test_no_overlap_6(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 19, 0, 0), datetime(2021, 1, 2, 5, 0, 0))), 10)
def test_no_overlap_7(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 19, 0, 0), datetime(2021, 1, 2, 6, 0, 0))), 11)
def test_no_overlap_8(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2021, 1, 1, 18, 0, 0), datetime(2021, 1, 2, 6, 0, 0)),
Range(datetime(2021, 1, 1, 19, 0, 0), datetime(2021, 1, 2, 8, 0, 0))), 11)
def test_no_overlap_9(self):
self.longMessage = True
self.assertEqual(date_utils.hours_overlap(Range(datetime(2022, 2, 5, 18, 0, 0), datetime(2022, 2, 6, 6, 0, 0)),
Range(datetime(2022, 2, 5, 12, 0, 0), datetime(2022, 2, 5, 18, 0, 0))), 0)
if __name__ == '__main__':
unittest.main() | 0.515132 | 0.703155 |
import logging
import sys
from os.path import dirname, realpath, isdir, isfile, join
from abstract_data import abstract_info_list
LOGGER = logging.getLogger(__name__)
class AbstractHelp(object):
def __init__(self, title, year, dataset_doi, desc_filename):
self.title = title
self.year = year
self.dataset_doi = dataset_doi
self.desc_filename = desc_filename
def get_description(self):
desc = """
=================================
## Title
=================================
{2} (BARI)
=================================
## Description
=================================
{0}
<br /><br />Provided by the Boston Area Research Initiative (BARI): <a href="https://www.northeastern.edu/csshresearch/bostonarearesearchinitiative/">https://www.northeastern.edu/csshresearch/bostonarearesearchinitiative/</a>
<br /><br />This file, along with other administrative geographies utilized by the City of Boston, may be found in the <a href="{1}">Dataverse repository</a>:
<br /><br />
<ul>
<li>Data: <a href="{1}">{1}</a></li>
<li>Documentation: <a href="http://dx.doi.org/10.7910/DVN/C5IULB">http://dx.doi.org/10.7910/DVN/C5IULB</a></li>
</ul>
=================================
## Purpose
=================================
To provide geocoding services to data within Dataverse, https://dataverse.harvard.edu/
=================================
## Test url
=================================
http://worldmap.harvard.edu/data/geonode:LAYER_NAME
""".format(self.get_description_from_file(), self.dataset_doi, self.title)
return desc
def get_description_from_file(self):
CURRENT_DIR = dirname(realpath(__file__))
DESC_DIRECTORY = join(CURRENT_DIR , 'descriptions')
if not isdir(DESC_DIRECTORY):
LOGGER.error('Directory does not exist %s' % DESC_DIRECTORY)
return ''
desc_file = join(DESC_DIRECTORY, self.desc_filename)
if not isfile(desc_file):
LOGGER.error('Directory file not found %s' % desc_file)
return ''
return open(desc_file, 'r').read()
def show_abstracts(info_list):
print '\n' + ('-' * 30)
print 'Please choose one:\n'
for idx, info in enumerate(info_list):
ab_help = AbstractHelp(*info)
print '(%d) %s' % (idx+1, ab_help.title)
print '\n' + ('-' * 30)
def format_selected_abstract(selected_idx, info_list):
assert selected_idx.isdigit(), "selected_idx must be a digit. Not: %s" % selected_idx
for idx, info in enumerate(info_list):
if (idx+1) == int(selected_idx):
ab_help = AbstractHelp(*info)
print ab_help.get_description()
return
print 'Sorry! [%s] not found! Please select a number from the list: ' % selected_idx
show_abstracts(info_list)
if __name__ == '__main__':
if len(sys.argv) == 2:
format_selected_abstract(sys.argv[1], abstract_info_list)
else:
show_abstracts(abstract_info_list) | scripts/tabular_test_data/code/abstract_help.py | import logging
import sys
from os.path import dirname, realpath, isdir, isfile, join
from abstract_data import abstract_info_list
LOGGER = logging.getLogger(__name__)
class AbstractHelp(object):
def __init__(self, title, year, dataset_doi, desc_filename):
self.title = title
self.year = year
self.dataset_doi = dataset_doi
self.desc_filename = desc_filename
def get_description(self):
desc = """
=================================
## Title
=================================
{2} (BARI)
=================================
## Description
=================================
{0}
<br /><br />Provided by the Boston Area Research Initiative (BARI): <a href="https://www.northeastern.edu/csshresearch/bostonarearesearchinitiative/">https://www.northeastern.edu/csshresearch/bostonarearesearchinitiative/</a>
<br /><br />This file, along with other administrative geographies utilized by the City of Boston, may be found in the <a href="{1}">Dataverse repository</a>:
<br /><br />
<ul>
<li>Data: <a href="{1}">{1}</a></li>
<li>Documentation: <a href="http://dx.doi.org/10.7910/DVN/C5IULB">http://dx.doi.org/10.7910/DVN/C5IULB</a></li>
</ul>
=================================
## Purpose
=================================
To provide geocoding services to data within Dataverse, https://dataverse.harvard.edu/
=================================
## Test url
=================================
http://worldmap.harvard.edu/data/geonode:LAYER_NAME
""".format(self.get_description_from_file(), self.dataset_doi, self.title)
return desc
def get_description_from_file(self):
CURRENT_DIR = dirname(realpath(__file__))
DESC_DIRECTORY = join(CURRENT_DIR , 'descriptions')
if not isdir(DESC_DIRECTORY):
LOGGER.error('Directory does not exist %s' % DESC_DIRECTORY)
return ''
desc_file = join(DESC_DIRECTORY, self.desc_filename)
if not isfile(desc_file):
LOGGER.error('Directory file not found %s' % desc_file)
return ''
return open(desc_file, 'r').read()
def show_abstracts(info_list):
print '\n' + ('-' * 30)
print 'Please choose one:\n'
for idx, info in enumerate(info_list):
ab_help = AbstractHelp(*info)
print '(%d) %s' % (idx+1, ab_help.title)
print '\n' + ('-' * 30)
def format_selected_abstract(selected_idx, info_list):
assert selected_idx.isdigit(), "selected_idx must be a digit. Not: %s" % selected_idx
for idx, info in enumerate(info_list):
if (idx+1) == int(selected_idx):
ab_help = AbstractHelp(*info)
print ab_help.get_description()
return
print 'Sorry! [%s] not found! Please select a number from the list: ' % selected_idx
show_abstracts(info_list)
if __name__ == '__main__':
if len(sys.argv) == 2:
format_selected_abstract(sys.argv[1], abstract_info_list)
else:
show_abstracts(abstract_info_list) | 0.319865 | 0.233171 |
from sqlalchemy import Column, Integer, String
from . import Base
class Timetable(Base):
"""
Map class for table timetable.
- **timetable_id**: Integer, primary_key.
- **open_hour**: Integer, not null.
- **open_minute**: Integer, not null.
- **close_hour**: Integer, not null.
- **close_minute**: Integer, not null.
- **timezone**: String(100), not null.
"""
__tablename__ = "timetable"
timetable_id = Column(Integer, primary_key = True)
open_hour = Column(Integer, nullable = False)
open_minute = Column(Integer, nullable = False)
close_hour = Column(Integer, nullable = False)
close_minute = Column(Integer, nullable = False)
timezone = Column(String(100), nullable = False)
def __init__(self, open_hour, open_minute, close_hour, close_minute, timezone):
"""
Costructor method.
Args:
- open_hour (int): Opening hour.
- open_minute (int): Opening minute.
- close_hour (int): Closing hour.
- close_minute (int): Closing minute.
- timezone (str): Timezone name. Example: US/Eastern
"""
self.open_hour = open_hour
self.open_minute = open_minute
self.close_hour = close_hour
self.close_minute = close_minute
self.timezone = timezone
def __repr__(self):
return "<Timetable(open_hour={}, open_minute={}, close_hour={}, close_minute={}, timezone={})>".format(self.open_hour,
self.open_minute,
self.close_hour,
self.close_minute,
self.timezone
) | alchemist_lib/database/timetable.py | from sqlalchemy import Column, Integer, String
from . import Base
class Timetable(Base):
"""
Map class for table timetable.
- **timetable_id**: Integer, primary_key.
- **open_hour**: Integer, not null.
- **open_minute**: Integer, not null.
- **close_hour**: Integer, not null.
- **close_minute**: Integer, not null.
- **timezone**: String(100), not null.
"""
__tablename__ = "timetable"
timetable_id = Column(Integer, primary_key = True)
open_hour = Column(Integer, nullable = False)
open_minute = Column(Integer, nullable = False)
close_hour = Column(Integer, nullable = False)
close_minute = Column(Integer, nullable = False)
timezone = Column(String(100), nullable = False)
def __init__(self, open_hour, open_minute, close_hour, close_minute, timezone):
"""
Costructor method.
Args:
- open_hour (int): Opening hour.
- open_minute (int): Opening minute.
- close_hour (int): Closing hour.
- close_minute (int): Closing minute.
- timezone (str): Timezone name. Example: US/Eastern
"""
self.open_hour = open_hour
self.open_minute = open_minute
self.close_hour = close_hour
self.close_minute = close_minute
self.timezone = timezone
def __repr__(self):
return "<Timetable(open_hour={}, open_minute={}, close_hour={}, close_minute={}, timezone={})>".format(self.open_hour,
self.open_minute,
self.close_hour,
self.close_minute,
self.timezone
) | 0.695855 | 0.15444 |
from typing import Dict, Optional
from overrides import overrides
import torch
from allennlp.models.model import Model
from allennlp.data import Vocabulary, TextFieldTensors
from allennlp.training.metrics import Average, Auc
from torch import Tensor
from transformers import BertModel, BertForSequenceClassification
@Model.register("hatefulmememodel")
class HatefulMemeModel(Model):
def __init__(self, vocab: Vocabulary, text_model_name: str):
super().__init__(vocab)
self._text_model = BertForSequenceClassification.from_pretrained(text_model_name)
self._num_labels = vocab.get_vocab_size()
self._accuracy = Average()
self._auc = Auc()
self._softmax = torch.nn.Softmax(dim=1)
def forward(
self,
source_tokens: TextFieldTensors,
box_features: Optional[Tensor] = None,
box_coordinates: Optional[Tensor] = None,
box_mask: Optional[Tensor] = None,
label: Optional[Tensor] = None,
metadata: Optional[Dict] = None,
) -> Dict[str, torch.Tensor]:
input_ids = source_tokens["tokens"]["token_ids"]
input_mask = source_tokens["tokens"]["mask"]
token_type_ids = source_tokens["tokens"]["type_ids"]
outputs = self._text_model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
return_dict=True,
labels=label,
)
if label is not None:
predictions = torch.argmax(self._softmax(outputs.logits), dim=-1)
for index in range(predictions.shape[0]):
correct = float((predictions[index] == label[index]))
self._accuracy(int(correct))
self._auc(predictions, label)
return outputs
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics: Dict[str, float] = {}
if not self.training:
metrics["accuracy"] = self._accuracy.get_metric(reset=reset)
metrics["auc"] = self._auc.get_metric(reset=reset)
return metrics | src/models/hatefulmememodel.py | from typing import Dict, Optional
from overrides import overrides
import torch
from allennlp.models.model import Model
from allennlp.data import Vocabulary, TextFieldTensors
from allennlp.training.metrics import Average, Auc
from torch import Tensor
from transformers import BertModel, BertForSequenceClassification
@Model.register("hatefulmememodel")
class HatefulMemeModel(Model):
def __init__(self, vocab: Vocabulary, text_model_name: str):
super().__init__(vocab)
self._text_model = BertForSequenceClassification.from_pretrained(text_model_name)
self._num_labels = vocab.get_vocab_size()
self._accuracy = Average()
self._auc = Auc()
self._softmax = torch.nn.Softmax(dim=1)
def forward(
self,
source_tokens: TextFieldTensors,
box_features: Optional[Tensor] = None,
box_coordinates: Optional[Tensor] = None,
box_mask: Optional[Tensor] = None,
label: Optional[Tensor] = None,
metadata: Optional[Dict] = None,
) -> Dict[str, torch.Tensor]:
input_ids = source_tokens["tokens"]["token_ids"]
input_mask = source_tokens["tokens"]["mask"]
token_type_ids = source_tokens["tokens"]["type_ids"]
outputs = self._text_model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
return_dict=True,
labels=label,
)
if label is not None:
predictions = torch.argmax(self._softmax(outputs.logits), dim=-1)
for index in range(predictions.shape[0]):
correct = float((predictions[index] == label[index]))
self._accuracy(int(correct))
self._auc(predictions, label)
return outputs
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics: Dict[str, float] = {}
if not self.training:
metrics["accuracy"] = self._accuracy.get_metric(reset=reset)
metrics["auc"] = self._auc.get_metric(reset=reset)
return metrics | 0.952486 | 0.278373 |
import pandas as pd
import numpy as np
import json
import os
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
def kmeans_relevance(similarity_matrix):
"""
Using cosine similarities computes kmeans clustering
:similarity_matrix: rows -> queries, columns -> document ids
:returns: a dataframe -> rows: queries, columns -> documents ids, cells -> relevance level (0, 1, 2)
"""
result_df = similarity_matrix
k_means_results = []
for index, row in similarity_matrix.iterrows():
query_number = index*2+1
query_similarities = row.to_numpy().reshape(-1,1)
plt.plot(query_similarities)
# Fit to Kmeans
query_k_means = KMeans(n_clusters=3)
query_k_means.fit_predict(query_similarities)
# Process cluster centers
query_centers = query_k_means.cluster_centers_.reshape(-1)
query_centers = np.sort(query_centers)
upper_bound = ( query_centers[1] + query_centers[2] ) / 2
lower_bound = ( query_centers[0] + query_centers[1] ) / 2
plt.hlines(upper_bound, 0, len(query_similarities), colors="k", zorder=10)
plt.hlines(lower_bound, 0, len(query_similarities), colors="k", zorder=10)
# Determine relevances
high_relevance = query_similarities >= upper_bound
partially_relevance_upper = query_similarities < upper_bound
partially_relevance_lower = query_similarities > lower_bound
partially_relevance = partially_relevance_lower * partially_relevance_upper
non_relevance = query_similarities <= lower_bound
#print(high_relevance)
high_relevance_docid = similarity_matrix.columns[high_relevance.reshape(-1)]
partially_relevance_docid = similarity_matrix.columns[partially_relevance.reshape(-1)]
non_relevance_docid = similarity_matrix.columns[non_relevance.reshape(-1)]
plt.savefig("./models/kmeans/cos_sim_q{}.png".format(query_number))
result_df.loc[index, high_relevance_docid] = 2
result_df.loc[index, partially_relevance_docid] = 1
result_df.loc[index, non_relevance_docid] = 0
plt.clf()
return result_df
def main():
similarity_matrix = pd.read_csv("./preprocessing/cosine_similarity_matrix.csv", index_col=0)
try:
os.mkdir("./models/kmeans")
except:
pass
kmeans_predictions = kmeans_relevance(similarity_matrix)
kmeans_predictions.to_csv("./models/kmeans/kmeans_predictions.csv")
if __name__=="__main__":
main() | models/tf_idf2kmeans.py | import pandas as pd
import numpy as np
import json
import os
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
def kmeans_relevance(similarity_matrix):
"""
Using cosine similarities computes kmeans clustering
:similarity_matrix: rows -> queries, columns -> document ids
:returns: a dataframe -> rows: queries, columns -> documents ids, cells -> relevance level (0, 1, 2)
"""
result_df = similarity_matrix
k_means_results = []
for index, row in similarity_matrix.iterrows():
query_number = index*2+1
query_similarities = row.to_numpy().reshape(-1,1)
plt.plot(query_similarities)
# Fit to Kmeans
query_k_means = KMeans(n_clusters=3)
query_k_means.fit_predict(query_similarities)
# Process cluster centers
query_centers = query_k_means.cluster_centers_.reshape(-1)
query_centers = np.sort(query_centers)
upper_bound = ( query_centers[1] + query_centers[2] ) / 2
lower_bound = ( query_centers[0] + query_centers[1] ) / 2
plt.hlines(upper_bound, 0, len(query_similarities), colors="k", zorder=10)
plt.hlines(lower_bound, 0, len(query_similarities), colors="k", zorder=10)
# Determine relevances
high_relevance = query_similarities >= upper_bound
partially_relevance_upper = query_similarities < upper_bound
partially_relevance_lower = query_similarities > lower_bound
partially_relevance = partially_relevance_lower * partially_relevance_upper
non_relevance = query_similarities <= lower_bound
#print(high_relevance)
high_relevance_docid = similarity_matrix.columns[high_relevance.reshape(-1)]
partially_relevance_docid = similarity_matrix.columns[partially_relevance.reshape(-1)]
non_relevance_docid = similarity_matrix.columns[non_relevance.reshape(-1)]
plt.savefig("./models/kmeans/cos_sim_q{}.png".format(query_number))
result_df.loc[index, high_relevance_docid] = 2
result_df.loc[index, partially_relevance_docid] = 1
result_df.loc[index, non_relevance_docid] = 0
plt.clf()
return result_df
def main():
similarity_matrix = pd.read_csv("./preprocessing/cosine_similarity_matrix.csv", index_col=0)
try:
os.mkdir("./models/kmeans")
except:
pass
kmeans_predictions = kmeans_relevance(similarity_matrix)
kmeans_predictions.to_csv("./models/kmeans/kmeans_predictions.csv")
if __name__=="__main__":
main() | 0.562417 | 0.607576 |
from div_free_interpolation import *
from discrete_shell_potential import *
import datetime
import numpy as np
import os
from shape_utils import *
from base_tools import *
from param import *
import matplotlib.pyplot as plt
import scipy.io
def np_to_torch(m, long=False):
if long:
return torch.as_tensor(m, dtype=torch.long, device=device)
else:
return torch.as_tensor(m, dtype=torch.float32, device=device)
def get_file_array(dataset):
if dataset == "FAUST":
file_array = list(range(90))
elif dataset == "FAUST_sub":
file_array = list(range(90))
return file_array
def save_curve(m_array, file_name):
folder_name = data_folder_out + "curves/"
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
dict_out = {}
for i_m in range(len(m_array)):
dict_out["curve_" + str(i_m)] = m_array[i_m]
scipy.io.savemat(folder_name + file_name, dict_out)
def load_seq_file(folder_name, i_file):
file_name = "seq_" + str(i_file).zfill(3) + ".mat"
mat_dict = scipy.io.loadmat(folder_name + file_name)
vert_sequence = np_to_torch(mat_dict["vert_sequence"])
if "time_elapsed" in mat_dict.values():
time_elapsed = mat_dict["time_elapsed"]
else:
time_elapsed = -1
shape_x = Shape(np_to_torch(mat_dict["vert_x"]), np_to_torch(mat_dict["triv_x"], long=True)-1)
shape_y = Shape(np_to_torch(mat_dict["vert_y"]), np_to_torch(mat_dict["triv_y"], long=True)-1)
return shape_x, shape_y, vert_sequence, time_elapsed
def plot_curves(m_array, title=None, logarithmic=False):
num_plot = 200
coords_array = []
for m in m_array:
m_stacked = m.view(-1).cpu()
m_sort, _ = torch.sort(m_stacked)
if logarithmic:
plot_select = np.linspace(0, np.log(m_sort.shape[0] - 1), num_plot, dtype=np.float64)
plot_select = np.exp(plot_select)
plot_select = torch.as_tensor(plot_select, device=device_cpu, dtype=torch.long)
m_sort = m_sort[plot_select]
y = np.linspace(0, 1, m_sort.shape[0], dtype=np.float64)
m_sort = m_sort.detach().cpu().numpy()
plt.semilogx(m_sort, y)
else:
plot_select = np.linspace(0, m_sort.shape[0] - 1, num_plot, dtype=np.long)
plot_select = torch.as_tensor(plot_select, device=device_cpu)
m_sort = m_sort[plot_select]
y = np.linspace(0, 1, m_sort.shape[0], dtype=np.float64)
m_sort = m_sort.detach().cpu().numpy()
plt.plot(m_sort, y)
coords_array_curr = np.zeros([y.shape[0], 2], dtype=np.float64)
coords_array_curr[:, 0] = m_sort
coords_array_curr[:, 1] = y
coords_array.append(coords_array_curr)
if not title is None:
plt.title(title)
plt.ylim(0, 1)
plt.grid()
plt.show()
return coords_array
def eval_volume_change(method, dataset, folder_idx, plot=False):
folder_name = data_folder_out + method + "/" + dataset + "_" + str(folder_idx) + "/"
file_array = get_file_array(dataset)
num_files = len(file_array)
volume_diff = []
for i_file in file_array:
shape_x, shape_y, vert_sequence, time_elapsed = load_seq_file(folder_name, i_file)
num_t = vert_sequence.shape[0]
volume_diff_curr = my_zeros([num_t]).to(dtype=torch.float64)
volume_ref = shape_x.compute_volume().to(dtype=torch.float64)
shape_x_new = deepcopy(shape_x)
shape_x_new.vert = vert_sequence[num_t-1, ...]
for t in range(num_t):
volume_curr = shape_x.compute_volume_shifted(vert_sequence[t, ...]).to(dtype=torch.float64)
volume_diff_curr[t] = volume_curr / volume_ref + volume_ref / volume_curr - 2
volume_diff.append(volume_diff_curr)
if plot:
scatter_shape_pair(shape_x_new, shape_y, title="Mean volume change: " + str(volume_diff_curr.mean()))
volume_diff_tens = my_zeros([num_files, num_t]).to(dtype=torch.float64)
for i_file in range(len(volume_diff)):
volume_diff_tens[i_file, :] = volume_diff[i_file]
print("Mean volume change: ", volume_diff_tens.mean())
return volume_diff_tens
def compute_chamfer(shape_x, shape_y, vert_sequence, num_eval=10000):
num_t = vert_sequence.shape[0]
shape_x_new = deepcopy(shape_x)
shape_x_new.vert = vert_sequence[num_t - 1, ...]
samples = knn(shape_y.vert.to(device_cpu), shape_x_new.vert.to(device_cpu), k=1).to(device)
chamfer_curr = (shape_x_new.vert[samples[0, :], :] - shape_y.vert[samples[1, :], :]).norm(dim=1)
idx_eval = torch.zeros([10000], device=device, dtype=torch.long).random_(0, chamfer_curr.shape[0])
chamfer_curr = chamfer_curr[idx_eval]
return chamfer_curr
def eval_chamfer(method, dataset, folder_idx, plot=False):
folder_name = data_folder_out + method + "/" + dataset + "_" + str(folder_idx) + "/"
file_array = get_file_array(dataset)
num_files = len(file_array)
num_eval = 10000
chamfer_array = []
for i_file in file_array:
shape_x, shape_y, vert_sequence, time_elapsed = load_seq_file(folder_name, i_file)
chamfer_curr = compute_chamfer(shape_x, shape_y, vert_sequence)
chamfer_array.append(chamfer_curr)
if plot:
scatter_shape_pair(shape_x_new, shape_y, title="Mean chamfer dist: " + str(chamfer_curr.mean()))
chamfer_tens = my_zeros([num_files, num_eval])
for i_file in range(len(chamfer_array)):
chamfer_tens[i_file, :] = chamfer_array[i_file]
print("Mean chamfer dist: ", chamfer_tens.mean())
return chamfer_tens
def compute_distortion(shape_x, shape_y, vert_sequence, num_eval=10000):
dist_max = 10
num_t = vert_sequence.shape[0]
num_triv = shape_x.triv.shape[0]
dist_curr = my_zeros([num_t, num_eval])
shape_x_new = deepcopy(shape_x)
shape_x_new.vert = vert_sequence[num_t - 1, ...]
for t in range(num_t):
normal_0, _, area_0, _, _, edge_t, edge_proj_0 = discrete_shell_energy_pre(vert_sequence[t, ...], shape_x.vert,
shape_x.triv)
_, a_membrane_n = membrane_transformation(edge_t, area_0, normal_0, edge_proj_0)
distortion_curr = (batch_trace(torch.bmm(a_membrane_n.transpose(1, 2), a_membrane_n)).squeeze()) / \
(torch.det(a_membrane_n) + 1e-6) - 3
distortion_curr = torch.abs(distortion_curr)
distortion_curr = dist_max - torch.relu(dist_max - distortion_curr)
idx_eval = torch.zeros([10000], device=device, dtype=torch.long).random_(0, num_triv)
distortion_curr = distortion_curr[idx_eval]
dist_curr[t, :] = distortion_curr
return dist_curr
def eval_distortion(method, dataset, folder_idx, plot=False):
folder_name = data_folder_out + method + "/" + dataset + "_" + str(folder_idx) + "/"
file_array = get_file_array(dataset)
num_files = len(file_array)
num_eval = 10000
distortion_array = []
for i_file in file_array:
shape_x, shape_y, vert_sequence, time_elapsed = load_seq_file(folder_name, i_file)
num_t = vert_sequence.shape[0]
dist_curr = compute_distortion(shape_x, shape_y, vert_sequence)
distortion_array.append(dist_curr)
if plot:
scatter_shape_pair(shape_x_new, shape_y, title="Mean distortion: " + str(dist_curr.mean()))
distortion_tens = my_zeros([num_files, num_t, num_eval])
for i_file in range(len(distortion_array)):
distortion_tens[i_file, ...] = distortion_array[i_file]
print("Mean distortion: ", distortion_tens.mean())
return distortion_tens
def get_folder_idx(method, dataset):
if dataset == "FAUST":
folder_idx = 1
elif dataset == "FAUST_sub":
folder_idx = 1
return folder_idx
def eval_all(dataset, save_results=False):
print("Evaluate ", dataset, "...")
distortion_array = []
volume_array = []
chamfer_dist_array = []
for method in ["ham", "div"]:
print("Method: ", method, "...")
folder_idx = get_folder_idx(method, dataset)
folder_idx = str(folder_idx).zfill(3)
try:
distortion = eval_distortion(method, dataset, folder_idx)
volume_change = eval_volume_change(method, dataset, folder_idx)
chamfer_dist = eval_chamfer(method, dataset, folder_idx)
distortion_array.append(distortion)
volume_array.append(volume_change)
chamfer_dist_array.append(chamfer_dist)
except Exception as e:
print("Skipping method ", method, "...")
print(type(e))
print(e.args)
print(e)
coords_conf_dist = plot_curves(distortion_array, 'Conformal distortion')
coords_volume = plot_curves(volume_array, 'Volume change', logarithmic=True)
coords_chamfer = plot_curves(chamfer_dist_array, 'Chamfer distance')
if save_results:
save_curve(coords_conf_dist, dataset + "_conf_dist.mat")
save_curve(coords_volume, dataset + "_volume_change.mat")
save_curve(coords_chamfer, dataset + "_chamfer_dist.mat")
return 0
def eval_single(dataset, method):
folder_idx = get_folder_idx(method, dataset)
folder_idx = str(folder_idx).zfill(3)
distortion = eval_distortion(method, dataset, folder_idx)
volume_change = eval_volume_change(method, dataset, folder_idx)
chamfer_dist = eval_chamfer(method, dataset, folder_idx)
plot_curves([distortion], 'Conformal distortion')
plot_curves([volume_change], 'Volume change', logarithmic=True)
plot_curves([chamfer_dist], 'Chamfer distance')
if __name__ == "__main__":
#choose dataset to evaluate
dataset = "FAUST"
# dataset = "FAUST_sub"
#choose method to evaluate
method = "ham"
eval_single(dataset, method) | interpolation/eval_interpolation.py | from div_free_interpolation import *
from discrete_shell_potential import *
import datetime
import numpy as np
import os
from shape_utils import *
from base_tools import *
from param import *
import matplotlib.pyplot as plt
import scipy.io
def np_to_torch(m, long=False):
if long:
return torch.as_tensor(m, dtype=torch.long, device=device)
else:
return torch.as_tensor(m, dtype=torch.float32, device=device)
def get_file_array(dataset):
if dataset == "FAUST":
file_array = list(range(90))
elif dataset == "FAUST_sub":
file_array = list(range(90))
return file_array
def save_curve(m_array, file_name):
folder_name = data_folder_out + "curves/"
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
dict_out = {}
for i_m in range(len(m_array)):
dict_out["curve_" + str(i_m)] = m_array[i_m]
scipy.io.savemat(folder_name + file_name, dict_out)
def load_seq_file(folder_name, i_file):
file_name = "seq_" + str(i_file).zfill(3) + ".mat"
mat_dict = scipy.io.loadmat(folder_name + file_name)
vert_sequence = np_to_torch(mat_dict["vert_sequence"])
if "time_elapsed" in mat_dict.values():
time_elapsed = mat_dict["time_elapsed"]
else:
time_elapsed = -1
shape_x = Shape(np_to_torch(mat_dict["vert_x"]), np_to_torch(mat_dict["triv_x"], long=True)-1)
shape_y = Shape(np_to_torch(mat_dict["vert_y"]), np_to_torch(mat_dict["triv_y"], long=True)-1)
return shape_x, shape_y, vert_sequence, time_elapsed
def plot_curves(m_array, title=None, logarithmic=False):
num_plot = 200
coords_array = []
for m in m_array:
m_stacked = m.view(-1).cpu()
m_sort, _ = torch.sort(m_stacked)
if logarithmic:
plot_select = np.linspace(0, np.log(m_sort.shape[0] - 1), num_plot, dtype=np.float64)
plot_select = np.exp(plot_select)
plot_select = torch.as_tensor(plot_select, device=device_cpu, dtype=torch.long)
m_sort = m_sort[plot_select]
y = np.linspace(0, 1, m_sort.shape[0], dtype=np.float64)
m_sort = m_sort.detach().cpu().numpy()
plt.semilogx(m_sort, y)
else:
plot_select = np.linspace(0, m_sort.shape[0] - 1, num_plot, dtype=np.long)
plot_select = torch.as_tensor(plot_select, device=device_cpu)
m_sort = m_sort[plot_select]
y = np.linspace(0, 1, m_sort.shape[0], dtype=np.float64)
m_sort = m_sort.detach().cpu().numpy()
plt.plot(m_sort, y)
coords_array_curr = np.zeros([y.shape[0], 2], dtype=np.float64)
coords_array_curr[:, 0] = m_sort
coords_array_curr[:, 1] = y
coords_array.append(coords_array_curr)
if not title is None:
plt.title(title)
plt.ylim(0, 1)
plt.grid()
plt.show()
return coords_array
def eval_volume_change(method, dataset, folder_idx, plot=False):
folder_name = data_folder_out + method + "/" + dataset + "_" + str(folder_idx) + "/"
file_array = get_file_array(dataset)
num_files = len(file_array)
volume_diff = []
for i_file in file_array:
shape_x, shape_y, vert_sequence, time_elapsed = load_seq_file(folder_name, i_file)
num_t = vert_sequence.shape[0]
volume_diff_curr = my_zeros([num_t]).to(dtype=torch.float64)
volume_ref = shape_x.compute_volume().to(dtype=torch.float64)
shape_x_new = deepcopy(shape_x)
shape_x_new.vert = vert_sequence[num_t-1, ...]
for t in range(num_t):
volume_curr = shape_x.compute_volume_shifted(vert_sequence[t, ...]).to(dtype=torch.float64)
volume_diff_curr[t] = volume_curr / volume_ref + volume_ref / volume_curr - 2
volume_diff.append(volume_diff_curr)
if plot:
scatter_shape_pair(shape_x_new, shape_y, title="Mean volume change: " + str(volume_diff_curr.mean()))
volume_diff_tens = my_zeros([num_files, num_t]).to(dtype=torch.float64)
for i_file in range(len(volume_diff)):
volume_diff_tens[i_file, :] = volume_diff[i_file]
print("Mean volume change: ", volume_diff_tens.mean())
return volume_diff_tens
def compute_chamfer(shape_x, shape_y, vert_sequence, num_eval=10000):
num_t = vert_sequence.shape[0]
shape_x_new = deepcopy(shape_x)
shape_x_new.vert = vert_sequence[num_t - 1, ...]
samples = knn(shape_y.vert.to(device_cpu), shape_x_new.vert.to(device_cpu), k=1).to(device)
chamfer_curr = (shape_x_new.vert[samples[0, :], :] - shape_y.vert[samples[1, :], :]).norm(dim=1)
idx_eval = torch.zeros([10000], device=device, dtype=torch.long).random_(0, chamfer_curr.shape[0])
chamfer_curr = chamfer_curr[idx_eval]
return chamfer_curr
def eval_chamfer(method, dataset, folder_idx, plot=False):
folder_name = data_folder_out + method + "/" + dataset + "_" + str(folder_idx) + "/"
file_array = get_file_array(dataset)
num_files = len(file_array)
num_eval = 10000
chamfer_array = []
for i_file in file_array:
shape_x, shape_y, vert_sequence, time_elapsed = load_seq_file(folder_name, i_file)
chamfer_curr = compute_chamfer(shape_x, shape_y, vert_sequence)
chamfer_array.append(chamfer_curr)
if plot:
scatter_shape_pair(shape_x_new, shape_y, title="Mean chamfer dist: " + str(chamfer_curr.mean()))
chamfer_tens = my_zeros([num_files, num_eval])
for i_file in range(len(chamfer_array)):
chamfer_tens[i_file, :] = chamfer_array[i_file]
print("Mean chamfer dist: ", chamfer_tens.mean())
return chamfer_tens
def compute_distortion(shape_x, shape_y, vert_sequence, num_eval=10000):
dist_max = 10
num_t = vert_sequence.shape[0]
num_triv = shape_x.triv.shape[0]
dist_curr = my_zeros([num_t, num_eval])
shape_x_new = deepcopy(shape_x)
shape_x_new.vert = vert_sequence[num_t - 1, ...]
for t in range(num_t):
normal_0, _, area_0, _, _, edge_t, edge_proj_0 = discrete_shell_energy_pre(vert_sequence[t, ...], shape_x.vert,
shape_x.triv)
_, a_membrane_n = membrane_transformation(edge_t, area_0, normal_0, edge_proj_0)
distortion_curr = (batch_trace(torch.bmm(a_membrane_n.transpose(1, 2), a_membrane_n)).squeeze()) / \
(torch.det(a_membrane_n) + 1e-6) - 3
distortion_curr = torch.abs(distortion_curr)
distortion_curr = dist_max - torch.relu(dist_max - distortion_curr)
idx_eval = torch.zeros([10000], device=device, dtype=torch.long).random_(0, num_triv)
distortion_curr = distortion_curr[idx_eval]
dist_curr[t, :] = distortion_curr
return dist_curr
def eval_distortion(method, dataset, folder_idx, plot=False):
folder_name = data_folder_out + method + "/" + dataset + "_" + str(folder_idx) + "/"
file_array = get_file_array(dataset)
num_files = len(file_array)
num_eval = 10000
distortion_array = []
for i_file in file_array:
shape_x, shape_y, vert_sequence, time_elapsed = load_seq_file(folder_name, i_file)
num_t = vert_sequence.shape[0]
dist_curr = compute_distortion(shape_x, shape_y, vert_sequence)
distortion_array.append(dist_curr)
if plot:
scatter_shape_pair(shape_x_new, shape_y, title="Mean distortion: " + str(dist_curr.mean()))
distortion_tens = my_zeros([num_files, num_t, num_eval])
for i_file in range(len(distortion_array)):
distortion_tens[i_file, ...] = distortion_array[i_file]
print("Mean distortion: ", distortion_tens.mean())
return distortion_tens
def get_folder_idx(method, dataset):
if dataset == "FAUST":
folder_idx = 1
elif dataset == "FAUST_sub":
folder_idx = 1
return folder_idx
def eval_all(dataset, save_results=False):
print("Evaluate ", dataset, "...")
distortion_array = []
volume_array = []
chamfer_dist_array = []
for method in ["ham", "div"]:
print("Method: ", method, "...")
folder_idx = get_folder_idx(method, dataset)
folder_idx = str(folder_idx).zfill(3)
try:
distortion = eval_distortion(method, dataset, folder_idx)
volume_change = eval_volume_change(method, dataset, folder_idx)
chamfer_dist = eval_chamfer(method, dataset, folder_idx)
distortion_array.append(distortion)
volume_array.append(volume_change)
chamfer_dist_array.append(chamfer_dist)
except Exception as e:
print("Skipping method ", method, "...")
print(type(e))
print(e.args)
print(e)
coords_conf_dist = plot_curves(distortion_array, 'Conformal distortion')
coords_volume = plot_curves(volume_array, 'Volume change', logarithmic=True)
coords_chamfer = plot_curves(chamfer_dist_array, 'Chamfer distance')
if save_results:
save_curve(coords_conf_dist, dataset + "_conf_dist.mat")
save_curve(coords_volume, dataset + "_volume_change.mat")
save_curve(coords_chamfer, dataset + "_chamfer_dist.mat")
return 0
def eval_single(dataset, method):
folder_idx = get_folder_idx(method, dataset)
folder_idx = str(folder_idx).zfill(3)
distortion = eval_distortion(method, dataset, folder_idx)
volume_change = eval_volume_change(method, dataset, folder_idx)
chamfer_dist = eval_chamfer(method, dataset, folder_idx)
plot_curves([distortion], 'Conformal distortion')
plot_curves([volume_change], 'Volume change', logarithmic=True)
plot_curves([chamfer_dist], 'Chamfer distance')
if __name__ == "__main__":
#choose dataset to evaluate
dataset = "FAUST"
# dataset = "FAUST_sub"
#choose method to evaluate
method = "ham"
eval_single(dataset, method) | 0.34632 | 0.481088 |
from modules.whos_on_first_common import ButtonPosition
SCREEN_TO_BUTTON_TO_READ = {
"YES": ButtonPosition.middle_left,
"FIRST": ButtonPosition.top_right,
"DISPLAY": ButtonPosition.bottom_right,
"OKAY": ButtonPosition.top_right,
"SAYS": ButtonPosition.bottom_right,
"NOTHING": ButtonPosition.middle_left,
"": ButtonPosition.bottom_left,
"BLANK": ButtonPosition.middle_right,
"NO": ButtonPosition.bottom_right,
"LED": ButtonPosition.middle_left,
"LEAD": ButtonPosition.bottom_right,
"READ": ButtonPosition.middle_right,
"RED": ButtonPosition.middle_right,
"REED": ButtonPosition.bottom_left,
"LEED": ButtonPosition.bottom_left,
"HOLDON": ButtonPosition.bottom_right,
"YOU": ButtonPosition.middle_right,
"YOUARE": ButtonPosition.bottom_right,
"YOUR": ButtonPosition.middle_right,
"YOU'RE": ButtonPosition.middle_right,
"UR": ButtonPosition.top_left,
"THERE": ButtonPosition.bottom_right,
"THEY'RE": ButtonPosition.bottom_left,
"THEIR": ButtonPosition.middle_right,
"THEYARE": ButtonPosition.middle_left,
"SEE": ButtonPosition.bottom_right,
"C": ButtonPosition.top_right,
"CEE": ButtonPosition.bottom_right,
}
BUTTON_TEXT_TO_WORD_LIST = {
"READY": ["YES", "OKAY", "WHAT", "MIDDLE", "LEFT", "PRESS", "RIGHT", "BLANK", "READY", "NO", "FIRST", "UHHH", "NOTHING", "WAIT"],
"FIRST": ["LEFT", "OKAY", "YES", "MIDDLE", "NO", "RIGHT", "NOTHING", "UHHH", "WAIT", "READY", "BLANK", "WHAT", "PRESS", "FIRST"],
"NO": ["BLANK", "UHHH", "WAIT", "FIRST", "WHAT", "READY", "RIGHT", "YES", "NOTHING", "LEFT", "PRESS", "OKAY", "NO", "MIDDLE"],
"BLANK": ["WAIT", "RIGHT", "OKAY", "MIDDLE", "BLANK", "PRESS", "READY", "NOTHING", "NO", "WHAT", "LEFT", "UHHH", "YES", "FIRST"],
"NOTHING": ["UHHH", "RIGHT", "OKAY", "MIDDLE", "YES", "BLANK", "NO", "PRESS", "LEFT", "WHAT", "WAIT", "FIRST", "NOTHING", "READY"],
"YES": ["OKAY", "RIGHT", "UHHH", "MIDDLE", "FIRST", "WHAT", "PRESS", "READY", "NOTHING", "YES", "LEFT", "BLANK", "NO", "WAIT"],
"WHAT": ["UHHH", "WHAT", "LEFT", "NOTHING", "READY", "BLANK", "MIDDLE", "NO", "OKAY", "FIRST", "WAIT", "YES", "PRESS", "RIGHT"],
"UHHH": ["READY", "NOTHING", "LEFT", "WHAT", "OKAY", "YES", "RIGHT", "NO", "PRESS", "BLANK", "UHHH", "MIDDLE", "WAIT", "FIRST"],
"LEFT": ["RIGHT", "LEFT", "FIRST", "NO", "MIDDLE", "YES", "BLANK", "WHAT", "UHHH", "WAIT", "PRESS", "READY", "OKAY", "NOTHING"],
"RIGHT": ["YES", "NOTHING", "READY", "PRESS", "NO", "WAIT", "WHAT", "RIGHT", "MIDDLE", "LEFT", "UHHH", "BLANK", "OKAY", "FIRST"],
"MIDDLE": ["BLANK", "READY", "OKAY", "WHAT", "NOTHING", "PRESS", "NO", "WAIT", "LEFT", "MIDDLE", "RIGHT", "FIRST", "UHHH", "YES"],
"OKAY": ["MIDDLE", "NO", "FIRST", "YES", "UHHH", "NOTHING", "WAIT", "OKAY", "LEFT", "READY", "BLANK", "PRESS", "WHAT", "RIGHT"],
"WAIT": ["UHHH", "NO", "BLANK", "OKAY", "YES", "LEFT", "FIRST", "PRESS", "WHAT", "WAIT", "NOTHING", "READY", "RIGHT", "MIDDLE"],
"PRESS": ["RIGHT", "MIDDLE", "YES", "READY", "PRESS", "OKAY", "NOTHING", "UHHH", "BLANK", "LEFT", "FIRST", "WHAT", "NO", "WAIT"],
"YOU": ["SURE", "YOU" "ARE", "YOUR", "YOU'RE", "NEXT", "UH" "HUH", "UR", "HOLD", "WHAT?", "YOU", "UH" "UH", "LIKE", "DONE", "U"],
"YOU ARE": ["YOUR", "NEXT", "LIKE", "UH" "HUH", "WHAT?", "DONE", "UH" "UH", "HOLD", "YOU", "U", "YOU'RE", "SURE", "UR", "YOU ARE"],
"YOUR": ["UH UH", "YOU ARE", "UH HUH", "YOUR", "NEXT", "UR", "SURE", "U", "YOU'RE", "YOU", "WHAT?", "HOLD", "LIKE", "DONE"],
"YOU'RE": ["YOU", "YOU'RE", "UR", "NEXT", "UH UH", "YOU ARE", "U", "YOUR", "WHAT?", "UH HUH", "SURE", "DONE", "LIKE", "HOLD"],
"UR": ["DONE", "U", "UR", "UH HUH", "WHAT?", "SURE", "YOUR", "HOLD", "YOU'RE", "LIKE", "NEXT", "UH UH", "YOU ARE", "YOU"],
"U": ["UH HUH", "SURE", "NEXT", "WHAT?", "YOU'RE", "UR", "UH UH", "DONE", "U", "YOU", "LIKE", "HOLD", "YOU ARE", "YOUR"],
"UH HUH": ["UH HUH", "YOUR", "YOU ARE", "YOU", "DONE", "HOLD", "UH UH", "NEXT", "SURE", "LIKE", "YOU'RE", "UR", "U", "WHAT?"],
"UH UH": ["UR", "U", "YOU ARE", "YOU'RE", "NEXT", "UH UH", "DONE", "YOU", "UH HUH", "LIKE", "YOUR", "SURE", "HOLD", "WHAT?"],
"WHAT?": ["YOU", "HOLD", "YOU'RE", "YOUR", "U", "DONE", "UH UH", "LIKE", "YOU ARE", "UH HUH", "UR", "NEXT", "WHAT?", "SURE"],
"DONE": ["SURE", "UH HUH", "NEXT", "WHAT?", "YOUR", "UR", "YOU'RE", "HOLD", "LIKE", "YOU", "U", "YOU ARE", "UH UH", "DONE"],
"NEXT": ["WHAT?", "UH HUH", "UH UH", "YOUR", "HOLD", "SURE", "NEXT", "LIKE", "DONE", "YOU ARE", "UR", "YOU'RE", "U", "YOU"],
"HOLD": ["YOU ARE", "U", "DONE", "UH UH", "YOU", "UR", "SURE", "WHAT?", "YOU'RE", "NEXT", "HOLD", "UH HUH", "YOUR", "LIKE"],
"SURE": ["YOU ARE", "DONE", "LIKE", "YOU'RE", "YOU", "HOLD", "UH HUH", "UR", "SURE", "U", "WHAT?", "NEXT", "YOUR", "UH UH"],
"LIKE": ["YOU'RE", "NEXT", "U", "UR", "HOLD", "DONE", "UH UH", "WHAT?", "UH HUH", "YOU", "LIKE", "SURE", "YOU ARE", "YOUR"],
}
def button_to_press(screen_text, buttons):
"""
Takes in the screen text and a list of button texts. Returns the ButtonPosition to press.
"""
word_to_position = {}
for position in ButtonPosition:
word_to_position[buttons[position.value]] = position
button_to_read = SCREEN_TO_BUTTON_TO_READ[screen_text]
print "Reading", button_to_read
button_text = buttons[button_to_read.value]
word_list = BUTTON_TEXT_TO_WORD_LIST[button_text]
for word in word_list:
if word in word_to_position:
return word_to_position[word]
assert False, "Couldn't find button in word list" | src/modules/whos_on_first_solution.py | from modules.whos_on_first_common import ButtonPosition
SCREEN_TO_BUTTON_TO_READ = {
"YES": ButtonPosition.middle_left,
"FIRST": ButtonPosition.top_right,
"DISPLAY": ButtonPosition.bottom_right,
"OKAY": ButtonPosition.top_right,
"SAYS": ButtonPosition.bottom_right,
"NOTHING": ButtonPosition.middle_left,
"": ButtonPosition.bottom_left,
"BLANK": ButtonPosition.middle_right,
"NO": ButtonPosition.bottom_right,
"LED": ButtonPosition.middle_left,
"LEAD": ButtonPosition.bottom_right,
"READ": ButtonPosition.middle_right,
"RED": ButtonPosition.middle_right,
"REED": ButtonPosition.bottom_left,
"LEED": ButtonPosition.bottom_left,
"HOLDON": ButtonPosition.bottom_right,
"YOU": ButtonPosition.middle_right,
"YOUARE": ButtonPosition.bottom_right,
"YOUR": ButtonPosition.middle_right,
"YOU'RE": ButtonPosition.middle_right,
"UR": ButtonPosition.top_left,
"THERE": ButtonPosition.bottom_right,
"THEY'RE": ButtonPosition.bottom_left,
"THEIR": ButtonPosition.middle_right,
"THEYARE": ButtonPosition.middle_left,
"SEE": ButtonPosition.bottom_right,
"C": ButtonPosition.top_right,
"CEE": ButtonPosition.bottom_right,
}
BUTTON_TEXT_TO_WORD_LIST = {
"READY": ["YES", "OKAY", "WHAT", "MIDDLE", "LEFT", "PRESS", "RIGHT", "BLANK", "READY", "NO", "FIRST", "UHHH", "NOTHING", "WAIT"],
"FIRST": ["LEFT", "OKAY", "YES", "MIDDLE", "NO", "RIGHT", "NOTHING", "UHHH", "WAIT", "READY", "BLANK", "WHAT", "PRESS", "FIRST"],
"NO": ["BLANK", "UHHH", "WAIT", "FIRST", "WHAT", "READY", "RIGHT", "YES", "NOTHING", "LEFT", "PRESS", "OKAY", "NO", "MIDDLE"],
"BLANK": ["WAIT", "RIGHT", "OKAY", "MIDDLE", "BLANK", "PRESS", "READY", "NOTHING", "NO", "WHAT", "LEFT", "UHHH", "YES", "FIRST"],
"NOTHING": ["UHHH", "RIGHT", "OKAY", "MIDDLE", "YES", "BLANK", "NO", "PRESS", "LEFT", "WHAT", "WAIT", "FIRST", "NOTHING", "READY"],
"YES": ["OKAY", "RIGHT", "UHHH", "MIDDLE", "FIRST", "WHAT", "PRESS", "READY", "NOTHING", "YES", "LEFT", "BLANK", "NO", "WAIT"],
"WHAT": ["UHHH", "WHAT", "LEFT", "NOTHING", "READY", "BLANK", "MIDDLE", "NO", "OKAY", "FIRST", "WAIT", "YES", "PRESS", "RIGHT"],
"UHHH": ["READY", "NOTHING", "LEFT", "WHAT", "OKAY", "YES", "RIGHT", "NO", "PRESS", "BLANK", "UHHH", "MIDDLE", "WAIT", "FIRST"],
"LEFT": ["RIGHT", "LEFT", "FIRST", "NO", "MIDDLE", "YES", "BLANK", "WHAT", "UHHH", "WAIT", "PRESS", "READY", "OKAY", "NOTHING"],
"RIGHT": ["YES", "NOTHING", "READY", "PRESS", "NO", "WAIT", "WHAT", "RIGHT", "MIDDLE", "LEFT", "UHHH", "BLANK", "OKAY", "FIRST"],
"MIDDLE": ["BLANK", "READY", "OKAY", "WHAT", "NOTHING", "PRESS", "NO", "WAIT", "LEFT", "MIDDLE", "RIGHT", "FIRST", "UHHH", "YES"],
"OKAY": ["MIDDLE", "NO", "FIRST", "YES", "UHHH", "NOTHING", "WAIT", "OKAY", "LEFT", "READY", "BLANK", "PRESS", "WHAT", "RIGHT"],
"WAIT": ["UHHH", "NO", "BLANK", "OKAY", "YES", "LEFT", "FIRST", "PRESS", "WHAT", "WAIT", "NOTHING", "READY", "RIGHT", "MIDDLE"],
"PRESS": ["RIGHT", "MIDDLE", "YES", "READY", "PRESS", "OKAY", "NOTHING", "UHHH", "BLANK", "LEFT", "FIRST", "WHAT", "NO", "WAIT"],
"YOU": ["SURE", "YOU" "ARE", "YOUR", "YOU'RE", "NEXT", "UH" "HUH", "UR", "HOLD", "WHAT?", "YOU", "UH" "UH", "LIKE", "DONE", "U"],
"YOU ARE": ["YOUR", "NEXT", "LIKE", "UH" "HUH", "WHAT?", "DONE", "UH" "UH", "HOLD", "YOU", "U", "YOU'RE", "SURE", "UR", "YOU ARE"],
"YOUR": ["UH UH", "YOU ARE", "UH HUH", "YOUR", "NEXT", "UR", "SURE", "U", "YOU'RE", "YOU", "WHAT?", "HOLD", "LIKE", "DONE"],
"YOU'RE": ["YOU", "YOU'RE", "UR", "NEXT", "UH UH", "YOU ARE", "U", "YOUR", "WHAT?", "UH HUH", "SURE", "DONE", "LIKE", "HOLD"],
"UR": ["DONE", "U", "UR", "UH HUH", "WHAT?", "SURE", "YOUR", "HOLD", "YOU'RE", "LIKE", "NEXT", "UH UH", "YOU ARE", "YOU"],
"U": ["UH HUH", "SURE", "NEXT", "WHAT?", "YOU'RE", "UR", "UH UH", "DONE", "U", "YOU", "LIKE", "HOLD", "YOU ARE", "YOUR"],
"UH HUH": ["UH HUH", "YOUR", "YOU ARE", "YOU", "DONE", "HOLD", "UH UH", "NEXT", "SURE", "LIKE", "YOU'RE", "UR", "U", "WHAT?"],
"UH UH": ["UR", "U", "YOU ARE", "YOU'RE", "NEXT", "UH UH", "DONE", "YOU", "UH HUH", "LIKE", "YOUR", "SURE", "HOLD", "WHAT?"],
"WHAT?": ["YOU", "HOLD", "YOU'RE", "YOUR", "U", "DONE", "UH UH", "LIKE", "YOU ARE", "UH HUH", "UR", "NEXT", "WHAT?", "SURE"],
"DONE": ["SURE", "UH HUH", "NEXT", "WHAT?", "YOUR", "UR", "YOU'RE", "HOLD", "LIKE", "YOU", "U", "YOU ARE", "UH UH", "DONE"],
"NEXT": ["WHAT?", "UH HUH", "UH UH", "YOUR", "HOLD", "SURE", "NEXT", "LIKE", "DONE", "YOU ARE", "UR", "YOU'RE", "U", "YOU"],
"HOLD": ["YOU ARE", "U", "DONE", "UH UH", "YOU", "UR", "SURE", "WHAT?", "YOU'RE", "NEXT", "HOLD", "UH HUH", "YOUR", "LIKE"],
"SURE": ["YOU ARE", "DONE", "LIKE", "YOU'RE", "YOU", "HOLD", "UH HUH", "UR", "SURE", "U", "WHAT?", "NEXT", "YOUR", "UH UH"],
"LIKE": ["YOU'RE", "NEXT", "U", "UR", "HOLD", "DONE", "UH UH", "WHAT?", "UH HUH", "YOU", "LIKE", "SURE", "YOU ARE", "YOUR"],
}
def button_to_press(screen_text, buttons):
"""
Takes in the screen text and a list of button texts. Returns the ButtonPosition to press.
"""
word_to_position = {}
for position in ButtonPosition:
word_to_position[buttons[position.value]] = position
button_to_read = SCREEN_TO_BUTTON_TO_READ[screen_text]
print "Reading", button_to_read
button_text = buttons[button_to_read.value]
word_list = BUTTON_TEXT_TO_WORD_LIST[button_text]
for word in word_list:
if word in word_to_position:
return word_to_position[word]
assert False, "Couldn't find button in word list" | 0.266166 | 0.435241 |
import os
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import xml.etree.ElementTree as et
import mujoco_py
class PusherEnv3DofEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, **kwargs):
utils.EzPickle.__init__(self)
self.reference_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'assets/pusher_3dof.xml')
mujoco_env.MujocoEnv.__init__(self, self.reference_path, frame_skip=5)
self.model.stat.extent = 10
# randomization
self.reference_xml = et.parse(self.reference_path)
self.config_file = kwargs.get('config')
self.dimensions = []
self._locate_randomize_parameters()
# self.checkMy = False
def _locate_randomize_parameters(self):
self.root = self.reference_xml.getroot()
end_effector = self.root.find(".//body[@name='distal_4']")
self.wrist = end_effector.findall("./geom[@type='capsule']")
self.tips = end_effector.findall(".//body[@name='tips_arm']/geom")
self.object_body = self.root.find(".//body[@name='object']/geom")
self.object_joints = self.root.findall(".//body[@name='object']/joint")
def _update_randomized_params(self):
xml = self._create_xml()
self._re_init(xml)
def _re_init(self, xml):
self.model = mujoco_py.load_model_from_xml(xml)
self.sim = mujoco_py.MjSim(self.model)
self.data = self.sim.data
self.init_qpos = self.data.qpos.ravel().copy()
self.init_qvel = self.data.qvel.ravel().copy()
observation, _reward, done, _info = self.step(np.zeros(self.model.nu))
assert not done
if self.viewer:
self.viewer.update_sim(self.sim)
def _create_xml(self):
# TODO: I might speed this up, but I think is insignificant w.r.t to the model/sim creation...
self._randomize_friction()
self._randomize_damping()
# self._randomize_size()
return et.tostring(self.root, encoding='unicode', method='xml')
# TODO: I'm making an assumption here that 3 places after the comma are good enough, are they?
def _randomize_friction(self):
frictionloss = self.dimensions[0].current_value
for joint in self.object_joints:
joint.set('frictionloss', '{:3f}'.format(frictionloss))
def _randomize_damping(self):
damping = self.dimensions[1].current_value
for joint in self.object_joints:
joint.set('damping', '{:3f}'.format(damping))
def _randomize_size(self):
size = self.dimensions[2].current_value
# grabber
grabber_width = size * 2
self.wrist[0].set('fromto', '0 -{:3f} 0. 0.0 +{:3f} 0'.format(grabber_width, grabber_width))
self.wrist[1].set('fromto', '0 -{:3f} 0. {:3f} -{:3f} 0'.format(grabber_width, grabber_width, grabber_width))
self.wrist[2].set('fromto', '0 +{:3f} 0. {:3f} +{:3f} 0'.format(grabber_width, grabber_width, grabber_width))
self.tips[0].set('pos', '{:3f} -{:3f} 0.'.format(grabber_width, grabber_width))
self.tips[1].set('pos', '{:3f} {:3f} 0.'.format(grabber_width, grabber_width))
def step(self, action):
arm_dist = np.linalg.norm(self.get_body_com("object")[:2] - self.get_body_com("tips_arm")[:2])
goal_dist = np.linalg.norm(self.get_body_com("object")[:2] - self.get_body_com("goal")[:2])
# Reward from Soft Q Learning
# action_cost = np.square(action).sum()
reward = -goal_dist
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, {'arm_dist': arm_dist, 'goal_dist': goal_dist}
def viewer_setup(self):
# coords = [.7, -.5, 0]
coords = [0.15, -0, -1000]
for i in range(3):
self.viewer.cam.lookat[i] = coords[i]
# self.viewer.cam.trackbodyid = -1
# self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.25
self.viewer.cam.lookat[2] = -0.2
self.viewer.cam.elevation = -60
print (self.viewer.cam.distance, self.viewer.cam.lookat,self.viewer.cam.elevation )
# checkMy = True
def reset_model(self):
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
# Original
# object_ = np.random.uniform(low=[.3,-1.0], high=[1.2,-0.4])
# goal = np.random.uniform(low=[.8,-1.2], high=[1.2,-0.8])
while True:
# NOW RUNNING: "HARDER*"
object_ = np.random.uniform(low=[.4,-1.0], high=[1.2,-0.5])
# object_ = np.random.uniform(low=[.5,-1.0], high=[1.2,-0.6])
goal = np.random.uniform(low=[.8,-1.2], high=[1.2,-0.8])
if np.linalg.norm(object_ - goal) > 0.45:
break
self.object = np.array(object_)
self.goal = np.array(goal)
qpos[-4:-2] = self.object
qpos[-2:] = self.goal
qvel = self.init_qvel
qvel[-4:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
# print (self.get_body_com("distal_4"))
height, width = 64, 64
camera_id = 0
self._get_viewer('rgb_array').render(width, height)
data = self._get_viewer('rgb_array').read_pixels(width, height, depth=False)
return data | envs/pusher3dof.py | import os
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import xml.etree.ElementTree as et
import mujoco_py
class PusherEnv3DofEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, **kwargs):
utils.EzPickle.__init__(self)
self.reference_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'assets/pusher_3dof.xml')
mujoco_env.MujocoEnv.__init__(self, self.reference_path, frame_skip=5)
self.model.stat.extent = 10
# randomization
self.reference_xml = et.parse(self.reference_path)
self.config_file = kwargs.get('config')
self.dimensions = []
self._locate_randomize_parameters()
# self.checkMy = False
def _locate_randomize_parameters(self):
self.root = self.reference_xml.getroot()
end_effector = self.root.find(".//body[@name='distal_4']")
self.wrist = end_effector.findall("./geom[@type='capsule']")
self.tips = end_effector.findall(".//body[@name='tips_arm']/geom")
self.object_body = self.root.find(".//body[@name='object']/geom")
self.object_joints = self.root.findall(".//body[@name='object']/joint")
def _update_randomized_params(self):
xml = self._create_xml()
self._re_init(xml)
def _re_init(self, xml):
self.model = mujoco_py.load_model_from_xml(xml)
self.sim = mujoco_py.MjSim(self.model)
self.data = self.sim.data
self.init_qpos = self.data.qpos.ravel().copy()
self.init_qvel = self.data.qvel.ravel().copy()
observation, _reward, done, _info = self.step(np.zeros(self.model.nu))
assert not done
if self.viewer:
self.viewer.update_sim(self.sim)
def _create_xml(self):
# TODO: I might speed this up, but I think is insignificant w.r.t to the model/sim creation...
self._randomize_friction()
self._randomize_damping()
# self._randomize_size()
return et.tostring(self.root, encoding='unicode', method='xml')
# TODO: I'm making an assumption here that 3 places after the comma are good enough, are they?
def _randomize_friction(self):
frictionloss = self.dimensions[0].current_value
for joint in self.object_joints:
joint.set('frictionloss', '{:3f}'.format(frictionloss))
def _randomize_damping(self):
damping = self.dimensions[1].current_value
for joint in self.object_joints:
joint.set('damping', '{:3f}'.format(damping))
def _randomize_size(self):
size = self.dimensions[2].current_value
# grabber
grabber_width = size * 2
self.wrist[0].set('fromto', '0 -{:3f} 0. 0.0 +{:3f} 0'.format(grabber_width, grabber_width))
self.wrist[1].set('fromto', '0 -{:3f} 0. {:3f} -{:3f} 0'.format(grabber_width, grabber_width, grabber_width))
self.wrist[2].set('fromto', '0 +{:3f} 0. {:3f} +{:3f} 0'.format(grabber_width, grabber_width, grabber_width))
self.tips[0].set('pos', '{:3f} -{:3f} 0.'.format(grabber_width, grabber_width))
self.tips[1].set('pos', '{:3f} {:3f} 0.'.format(grabber_width, grabber_width))
def step(self, action):
arm_dist = np.linalg.norm(self.get_body_com("object")[:2] - self.get_body_com("tips_arm")[:2])
goal_dist = np.linalg.norm(self.get_body_com("object")[:2] - self.get_body_com("goal")[:2])
# Reward from Soft Q Learning
# action_cost = np.square(action).sum()
reward = -goal_dist
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, {'arm_dist': arm_dist, 'goal_dist': goal_dist}
def viewer_setup(self):
# coords = [.7, -.5, 0]
coords = [0.15, -0, -1000]
for i in range(3):
self.viewer.cam.lookat[i] = coords[i]
# self.viewer.cam.trackbodyid = -1
# self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.25
self.viewer.cam.lookat[2] = -0.2
self.viewer.cam.elevation = -60
print (self.viewer.cam.distance, self.viewer.cam.lookat,self.viewer.cam.elevation )
# checkMy = True
def reset_model(self):
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
# Original
# object_ = np.random.uniform(low=[.3,-1.0], high=[1.2,-0.4])
# goal = np.random.uniform(low=[.8,-1.2], high=[1.2,-0.8])
while True:
# NOW RUNNING: "HARDER*"
object_ = np.random.uniform(low=[.4,-1.0], high=[1.2,-0.5])
# object_ = np.random.uniform(low=[.5,-1.0], high=[1.2,-0.6])
goal = np.random.uniform(low=[.8,-1.2], high=[1.2,-0.8])
if np.linalg.norm(object_ - goal) > 0.45:
break
self.object = np.array(object_)
self.goal = np.array(goal)
qpos[-4:-2] = self.object
qpos[-2:] = self.goal
qvel = self.init_qvel
qvel[-4:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
# print (self.get_body_com("distal_4"))
height, width = 64, 64
camera_id = 0
self._get_viewer('rgb_array').render(width, height)
data = self._get_viewer('rgb_array').read_pixels(width, height, depth=False)
return data | 0.390243 | 0.239305 |
import pandas as pd
from argparse import ArgumentParser
import glob
import shutil
import os
import uuid
import random
def main(target_folder, csv_path, out_folder, dry_run = False, train_test_split=0.8):
filenames = glob.glob(os.path.join(target_folder, "*.jpg"))
ids = []
ages = []
imagenos = []
uuids = []
old_filenames = []
new_filenames = []
copy_count = 0
id_to_uuid = dict()
in_test_set = []
id_to_test_set_status = dict()
for f in filenames:
id, age, imageno = os.path.basename(f).replace(".jpg","").split("_")
ids.append(id)
ages.append(float(age))
imagenos.append(int(imageno))
cur_id_in_test_set = False
if id not in id_to_uuid:
new_id = uuid.uuid4()
id_to_uuid[id] = new_id
if random.random() > train_test_split:
cur_id_in_test_set = True
id_to_test_set_status[id] = cur_id_in_test_set
else:
new_id = id_to_uuid[id]
cur_id_in_test_set = id_to_test_set_status[id]
uuids.append(new_id)
old_filenames.append(f)
in_test_set.append(cur_id_in_test_set)
new_filename = f"{new_id}_{age}_{imageno}.jpg"
new_filenames.append(new_filename)
full_new_path = os.path.join(out_folder, new_filename)
if dry_run is False:
print(f"Copying {f} to {full_new_path}")
if os.path.isfile(full_new_path) == False:
shutil.copyfile(f, full_new_path)
else:
print(f"Skipping {f} (already present)")
copy_count = copy_count + 1
else:
print(f"DRYRUN: Pretending to copy {f} to {full_new_path}")
df = pd.DataFrame({"id":ids, "age":ages, "imageno":imagenos, "uuids":uuids, "filename":new_filenames, "testset":in_test_set})
df.to_csv(csv_path, index=False)
print(f"{copy_count} file(s) were copied to {out_folder}, details in {csv_path}")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--target", type=str, help="Folder with image set", required=True)
parser.add_argument("--out_csv", type=str, help="Name of CSV to create with results", default="dataset.csv", required=False)
parser.add_argument("--out_folder", type=str, help="Where to put pseudonmized images", required=True)
parser.add_argument("--dryrun", action="store_true", help="Use for a dummy run (no files copied)", required=False, default=False)
args = parser.parse_args()
main(args.target, args.out_csv, args.out_folder, args.dryrun) | src/pseudonomize.py | import pandas as pd
from argparse import ArgumentParser
import glob
import shutil
import os
import uuid
import random
def main(target_folder, csv_path, out_folder, dry_run = False, train_test_split=0.8):
filenames = glob.glob(os.path.join(target_folder, "*.jpg"))
ids = []
ages = []
imagenos = []
uuids = []
old_filenames = []
new_filenames = []
copy_count = 0
id_to_uuid = dict()
in_test_set = []
id_to_test_set_status = dict()
for f in filenames:
id, age, imageno = os.path.basename(f).replace(".jpg","").split("_")
ids.append(id)
ages.append(float(age))
imagenos.append(int(imageno))
cur_id_in_test_set = False
if id not in id_to_uuid:
new_id = uuid.uuid4()
id_to_uuid[id] = new_id
if random.random() > train_test_split:
cur_id_in_test_set = True
id_to_test_set_status[id] = cur_id_in_test_set
else:
new_id = id_to_uuid[id]
cur_id_in_test_set = id_to_test_set_status[id]
uuids.append(new_id)
old_filenames.append(f)
in_test_set.append(cur_id_in_test_set)
new_filename = f"{new_id}_{age}_{imageno}.jpg"
new_filenames.append(new_filename)
full_new_path = os.path.join(out_folder, new_filename)
if dry_run is False:
print(f"Copying {f} to {full_new_path}")
if os.path.isfile(full_new_path) == False:
shutil.copyfile(f, full_new_path)
else:
print(f"Skipping {f} (already present)")
copy_count = copy_count + 1
else:
print(f"DRYRUN: Pretending to copy {f} to {full_new_path}")
df = pd.DataFrame({"id":ids, "age":ages, "imageno":imagenos, "uuids":uuids, "filename":new_filenames, "testset":in_test_set})
df.to_csv(csv_path, index=False)
print(f"{copy_count} file(s) were copied to {out_folder}, details in {csv_path}")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--target", type=str, help="Folder with image set", required=True)
parser.add_argument("--out_csv", type=str, help="Name of CSV to create with results", default="dataset.csv", required=False)
parser.add_argument("--out_folder", type=str, help="Where to put pseudonmized images", required=True)
parser.add_argument("--dryrun", action="store_true", help="Use for a dummy run (no files copied)", required=False, default=False)
args = parser.parse_args()
main(args.target, args.out_csv, args.out_folder, args.dryrun) | 0.228673 | 0.143818 |
from nonebot import on_command
from nonebot.adapters import Bot
from nonebot.adapters.cqhttp import GROUP, GroupMessageEvent, Message, MessageSegment
from nonebot.typing import T_State
from modules.user_info import UserInfo
from utils.log import logger
from .accident import random_accident
from .data_source import *
from .sentence import *
from nonebot.plugin import export
from ..sign_in.config import LUCKY_MAX
export = export()
export.plugin_name = '俄罗斯轮盘'
export.plugin_usage = '''俄罗斯轮盘帮助:
开启游戏:装弹[金额][at](指定决斗对象,为空则所有群友都可接受决斗)
示例:装弹10
接受对决:接受对决/拒绝决斗
开始对决:开枪(轮流开枪,60秒未开枪另一方可通过该命令进行结算)
'''
russian_roulette = on_command('俄罗斯轮盘', aliases={'装弹', '俄罗斯转盘'}, permission=GROUP, priority=5, block=True)
_accept = on_command('接受', aliases={'接受决斗', '接受挑战'}, permission=GROUP, priority=5, block=True)
_refuse = on_command('拒绝', aliases={'拒绝决斗', '拒绝挑战'}, permission=GROUP, priority=5, block=True)
_shot = on_command('开枪', aliases={'咔', '嘭', '嘣'}, permission=GROUP, priority=5, block=True)
@russian_roulette.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
player1_id = event.sender.user_id
# 获取最近一场决斗
latest_duel = get_latest_duel(group_id)
if latest_duel is not None and latest_duel.can_be_handle():
# 超时后终止上一个决斗
if latest_duel.expired():
logger.debug(f'终止超时的决斗: {latest_duel}')
duel_end(latest_duel)
del latest_duel
# 若决斗未超时,则发送通知并跳过后续步骤
elif latest_duel.player1_id == player1_id:
await russian_roulette.finish('请先完成当前决斗')
return
else:
await russian_roulette.finish('请勿打扰别人神圣的决斗,丨')
return
message = event.message
if len(message) < 1:
await russian_roulette.finish(f'请按照格式: {export.plugin_usage}')
return
# 命令后第一个参数必须为数字,作为赌注
gold = 0
gold_message = message[0]
if gold_message.is_text:
message_text = str(gold_message).strip()
try:
gold = int(message_text)
except Exception:
pass
if gold == 0:
await russian_roulette.finish('请输入赌注,子弹也是要钱的')
return
elif gold < 0:
await russian_roulette.finish('咋地,决斗完还想倒吸钱啊?')
return
# 获取第一个被@的人作为被挑战者
player2_id = -1
for item in message:
if item.type == 'at':
player2_id = int(item.data.get('qq', -1))
break
# 不能和自己决斗
if player2_id == player1_id:
await russian_roulette.finish('珍爱生命,不要自残', at_sender=True)
return
# 检测决斗发起人是否有足够的金币
player1_gold = await UserInfo.get_gold(player1_id, group_id)
logger.debug(f'开始一场新的决斗:\n'
f'挑战者: {player1_id}\n'
f'挑战者拥有金币: {player1_gold}\n'
f'赌注: {gold}')
if player1_gold < gold:
await russian_roulette.finish('请出门左转打工挣够钱再来')
return
# 若指定了被决斗者,则检测其金币是否足够
if player2_id != -1:
player2_gold = await UserInfo.get_gold(player2_id, group_id)
if player2_gold < gold:
logger.debug(f'被挑战者{player2_id}所拥有金币不足以支付决斗')
await russian_roulette.finish('你的对手太穷了,他不配和你对战')
return
logger.debug(f'被挑战者: {player2_id}\n'
f'被挑战者拥有金币: {player2_gold}')
else:
logger.debug('未指定被挑战者')
# 若无指定被决斗者,则所有群员都可响应这场决斗
if player2_id == -1:
# 插入新的决斗记录
insert_duel(group_id, player1_id, player2_id, gold)
await russian_roulette.finish(random_sentence(group_challenge))
else:
# 插入新的决斗记录
insert_duel(group_id, player1_id, player2_id, gold)
# 向被决斗者发送at消息
message = Message(f'{MessageSegment.at(player2_id)}{random_sentence(challenge)}')
await russian_roulette.finish(message)
@_accept.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
# 获取最近一场决斗
latest_duel = get_latest_can_handle_duel(group_id)
# 决斗可能因超时被取消(或根本无发生过任何决斗)
if latest_duel is None:
logger.debug(f'当前无可被接受挑战的决斗: {latest_duel}')
await _accept.finish('当前无任何可接受的决斗,你接受个什么劲儿')
return
# 若决斗超时则跳过后续步骤(更新其状态)
if latest_duel.expired():
logger.debug(f'决斗已超时,不能被接受了: {latest_duel}')
duel_end(latest_duel)
await _accept.finish('决斗已经超时,请重新发起')
return
accept_id = event.user_id
player1_id = latest_duel.player1_id
if player1_id == accept_id:
await _accept.finish('珍爱生命,不要自残', at_sender=True)
return
player2_id = latest_duel.player2_id
logger.debug('[接受]当前决斗: {latest_duel}')
# 用户是否有资格接受决斗(当前决斗未指定任何人,或接受用户是被决斗者)
if player2_id == -1 or player2_id == accept_id:
player2_id = accept_id
latest_duel.player2_id = player2_id
player2_gold = await UserInfo.get_gold(player2_id, group_id)
if player2_gold < latest_duel.wager:
logger.debug(f'接受决斗者无足够金币: {player2_gold}')
await _accept.finish('你的金币不足以支付决斗费用,请去打工再来')
return
# 进入下一阶段
duel_accept(latest_duel)
logger.debug(f'当前决斗被接受,进入下一阶段: {latest_duel}')
random_s = random_sentence(accept)
message = Message(f'{MessageSegment.at(player2_id)}{random_s}{MessageSegment.at(player1_id)}。'
f'{MessageSegment.at(player1_id)}请通过[开枪]来把握自己的命运')
await _accept.finish(message)
else:
await _accept.finish('和你无关,一边玩泥巴去!')
@_refuse.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
# 获取最近一场决斗
latest_duel = get_latest_can_handle_duel(group_id)
# 决斗可能因超时被取消(或根本无发生过任何决斗)
if latest_duel is None:
logger.debug(f'当前无可被拒绝挑战的决斗: {latest_duel}')
await _refuse.finish('当前无任何可拒绝的决斗,你怂个啥哦')
return
# 若决斗超时则跳过后续步骤(更新其状态)
if latest_duel.expired():
logger.debug(f'决斗已超时,不能被拒绝了: {latest_duel}')
duel_end(latest_duel)
await _refuse.finish('决斗已经超时了,挺起腰板吧')
return
refuse_id = event.user_id
player1_id = latest_duel.player1_id
if player1_id == refuse_id:
await _accept.finish('你不能拒绝自己的决斗', at_sender=True)
return
player2_id = latest_duel.player2_id
logger.debug(f'[拒绝]当前决斗: {latest_duel}')
if player2_id == -1:
await _refuse.finish('这场决斗面向所有人,不用站出来认怂')
return
if player2_id == refuse_id:
logger.debug(f'用户{player2_id}拒绝了决斗,更新其状态')
# 更新决斗状态
duel_denied(latest_duel)
message = Message(f'卑微的{MessageSegment.at(player2_id)}拒绝了应用的{MessageSegment.at(player1_id)}')
await _refuse.finish(message)
else:
await _refuse.finish('吃瓜群众一边去')
@_shot.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
latest_duel = get_latest_can_shot_duel(group_id)
# 当前没有决斗或不在决斗状态,直接向用户发出通知消息
if latest_duel is None:
logger.debug(f'[开枪]当前无进行中的决斗: {latest_duel}')
await _shot.finish('射射射,你射个啥呢,现在没有任何决斗!')
return
shot_player_id = event.user_id
another_player_id = latest_duel.another
logger.debug(f'[开枪{shot_player_id}]当前决斗: {latest_duel}')
# 决斗超时进入结算(由另一方发送[开枪]才允许触发结算)
if shot_player_id == another_player_id and latest_duel.expired():
duel_end(latest_duel)
# 进入结算状态
winner, loser = latest_duel.clearing()
message = await _end_of_game(event, latest_duel, winner, loser)
logger.debug(f'决斗超时,由另一方发起结算: {another_player_id}')
await _shot.finish(message)
return
# 检测命令发送者id是否和当前记录的开枪人一致
if shot_player_id != latest_duel.in_turn:
await _shot.finish('枪不在你手上,别捣乱')
return
# 根据开枪用户当天运气,触发额外事件
user_fortune = await UserInfo.get_lucky(shot_player_id, group_id)
if user_fortune is None:
user_fortune = 0
# 总概率为用户最大运气值的8%(这里强关联了用户的最大运气值)
t = random.randint(0, LUCKY_MAX * 8)
if t < user_fortune:
# 触发意外事件,当前子弹直接换人
message, shot, end, winner, loser = random_accident(shot_player_id, another_player_id)
logger.debug(f'用户触发意外事件:\n'
f'终结消息: {message}\n,'
f'子弹是否射出: {shot}\n,'
f'是否结束事件: {end}\n'
f'胜者: {winner}\n'
f'败者: {loser}')
# 是否需要结束决斗
if end:
end_message = await _end_of_game(event, latest_duel, winner, loser)
duel_end(latest_duel)
await _shot.send('幸运事件: ' + message)
await _shot.finish(end_message)
return
# 当前子弹是否已发射
if shot:
duel_shot(latest_duel)
else:
duel_switch(latest_duel)
await _shot.finish('幸运事件: ' + message)
return
if latest_duel.finish:
message = MessageSegment.text('子弹打光了,这场决斗无人胜利~\n'
f'子弹: {latest_duel.visual_bullet}')
await _shot.finish(message)
return
get_shot = duel_shot(latest_duel)
if get_shot:
logger.debug(f'用户{shot_player_id}中弹,进入结算')
duel_end(latest_duel)
# 中枪后进入结算
await _shot.send(random_sentence(died))
message = await _end_of_game(event, latest_duel, another_player_id, shot_player_id)
await _shot.finish(message)
else:
message = Message(f'{random_sentence(miss)}。枪交到了{MessageSegment.at(another_player_id)}手上')
await _shot.finish(message)
async def _end_of_game(event: GroupMessageEvent, duel: DuelHistory, winner: int, loser: int) -> Message:
group_id = event.group_id
wager = duel.wager
await UserInfo.change_gold(winner, group_id, wager)
await UserInfo.change_gold(loser, group_id, -wager)
return Message(
f'胜者{MessageSegment.at(winner)}赢得了{wager}枚金币\n'
f'败者{MessageSegment.at(loser)}被丢进了海里喂鱼\n'
f'子弹: {duel.visual_bullet}') | plugins/russian_roulette/__init__.py | from nonebot import on_command
from nonebot.adapters import Bot
from nonebot.adapters.cqhttp import GROUP, GroupMessageEvent, Message, MessageSegment
from nonebot.typing import T_State
from modules.user_info import UserInfo
from utils.log import logger
from .accident import random_accident
from .data_source import *
from .sentence import *
from nonebot.plugin import export
from ..sign_in.config import LUCKY_MAX
export = export()
export.plugin_name = '俄罗斯轮盘'
export.plugin_usage = '''俄罗斯轮盘帮助:
开启游戏:装弹[金额][at](指定决斗对象,为空则所有群友都可接受决斗)
示例:装弹10
接受对决:接受对决/拒绝决斗
开始对决:开枪(轮流开枪,60秒未开枪另一方可通过该命令进行结算)
'''
russian_roulette = on_command('俄罗斯轮盘', aliases={'装弹', '俄罗斯转盘'}, permission=GROUP, priority=5, block=True)
_accept = on_command('接受', aliases={'接受决斗', '接受挑战'}, permission=GROUP, priority=5, block=True)
_refuse = on_command('拒绝', aliases={'拒绝决斗', '拒绝挑战'}, permission=GROUP, priority=5, block=True)
_shot = on_command('开枪', aliases={'咔', '嘭', '嘣'}, permission=GROUP, priority=5, block=True)
@russian_roulette.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
player1_id = event.sender.user_id
# 获取最近一场决斗
latest_duel = get_latest_duel(group_id)
if latest_duel is not None and latest_duel.can_be_handle():
# 超时后终止上一个决斗
if latest_duel.expired():
logger.debug(f'终止超时的决斗: {latest_duel}')
duel_end(latest_duel)
del latest_duel
# 若决斗未超时,则发送通知并跳过后续步骤
elif latest_duel.player1_id == player1_id:
await russian_roulette.finish('请先完成当前决斗')
return
else:
await russian_roulette.finish('请勿打扰别人神圣的决斗,丨')
return
message = event.message
if len(message) < 1:
await russian_roulette.finish(f'请按照格式: {export.plugin_usage}')
return
# 命令后第一个参数必须为数字,作为赌注
gold = 0
gold_message = message[0]
if gold_message.is_text:
message_text = str(gold_message).strip()
try:
gold = int(message_text)
except Exception:
pass
if gold == 0:
await russian_roulette.finish('请输入赌注,子弹也是要钱的')
return
elif gold < 0:
await russian_roulette.finish('咋地,决斗完还想倒吸钱啊?')
return
# 获取第一个被@的人作为被挑战者
player2_id = -1
for item in message:
if item.type == 'at':
player2_id = int(item.data.get('qq', -1))
break
# 不能和自己决斗
if player2_id == player1_id:
await russian_roulette.finish('珍爱生命,不要自残', at_sender=True)
return
# 检测决斗发起人是否有足够的金币
player1_gold = await UserInfo.get_gold(player1_id, group_id)
logger.debug(f'开始一场新的决斗:\n'
f'挑战者: {player1_id}\n'
f'挑战者拥有金币: {player1_gold}\n'
f'赌注: {gold}')
if player1_gold < gold:
await russian_roulette.finish('请出门左转打工挣够钱再来')
return
# 若指定了被决斗者,则检测其金币是否足够
if player2_id != -1:
player2_gold = await UserInfo.get_gold(player2_id, group_id)
if player2_gold < gold:
logger.debug(f'被挑战者{player2_id}所拥有金币不足以支付决斗')
await russian_roulette.finish('你的对手太穷了,他不配和你对战')
return
logger.debug(f'被挑战者: {player2_id}\n'
f'被挑战者拥有金币: {player2_gold}')
else:
logger.debug('未指定被挑战者')
# 若无指定被决斗者,则所有群员都可响应这场决斗
if player2_id == -1:
# 插入新的决斗记录
insert_duel(group_id, player1_id, player2_id, gold)
await russian_roulette.finish(random_sentence(group_challenge))
else:
# 插入新的决斗记录
insert_duel(group_id, player1_id, player2_id, gold)
# 向被决斗者发送at消息
message = Message(f'{MessageSegment.at(player2_id)}{random_sentence(challenge)}')
await russian_roulette.finish(message)
@_accept.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
# 获取最近一场决斗
latest_duel = get_latest_can_handle_duel(group_id)
# 决斗可能因超时被取消(或根本无发生过任何决斗)
if latest_duel is None:
logger.debug(f'当前无可被接受挑战的决斗: {latest_duel}')
await _accept.finish('当前无任何可接受的决斗,你接受个什么劲儿')
return
# 若决斗超时则跳过后续步骤(更新其状态)
if latest_duel.expired():
logger.debug(f'决斗已超时,不能被接受了: {latest_duel}')
duel_end(latest_duel)
await _accept.finish('决斗已经超时,请重新发起')
return
accept_id = event.user_id
player1_id = latest_duel.player1_id
if player1_id == accept_id:
await _accept.finish('珍爱生命,不要自残', at_sender=True)
return
player2_id = latest_duel.player2_id
logger.debug('[接受]当前决斗: {latest_duel}')
# 用户是否有资格接受决斗(当前决斗未指定任何人,或接受用户是被决斗者)
if player2_id == -1 or player2_id == accept_id:
player2_id = accept_id
latest_duel.player2_id = player2_id
player2_gold = await UserInfo.get_gold(player2_id, group_id)
if player2_gold < latest_duel.wager:
logger.debug(f'接受决斗者无足够金币: {player2_gold}')
await _accept.finish('你的金币不足以支付决斗费用,请去打工再来')
return
# 进入下一阶段
duel_accept(latest_duel)
logger.debug(f'当前决斗被接受,进入下一阶段: {latest_duel}')
random_s = random_sentence(accept)
message = Message(f'{MessageSegment.at(player2_id)}{random_s}{MessageSegment.at(player1_id)}。'
f'{MessageSegment.at(player1_id)}请通过[开枪]来把握自己的命运')
await _accept.finish(message)
else:
await _accept.finish('和你无关,一边玩泥巴去!')
@_refuse.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
# 获取最近一场决斗
latest_duel = get_latest_can_handle_duel(group_id)
# 决斗可能因超时被取消(或根本无发生过任何决斗)
if latest_duel is None:
logger.debug(f'当前无可被拒绝挑战的决斗: {latest_duel}')
await _refuse.finish('当前无任何可拒绝的决斗,你怂个啥哦')
return
# 若决斗超时则跳过后续步骤(更新其状态)
if latest_duel.expired():
logger.debug(f'决斗已超时,不能被拒绝了: {latest_duel}')
duel_end(latest_duel)
await _refuse.finish('决斗已经超时了,挺起腰板吧')
return
refuse_id = event.user_id
player1_id = latest_duel.player1_id
if player1_id == refuse_id:
await _accept.finish('你不能拒绝自己的决斗', at_sender=True)
return
player2_id = latest_duel.player2_id
logger.debug(f'[拒绝]当前决斗: {latest_duel}')
if player2_id == -1:
await _refuse.finish('这场决斗面向所有人,不用站出来认怂')
return
if player2_id == refuse_id:
logger.debug(f'用户{player2_id}拒绝了决斗,更新其状态')
# 更新决斗状态
duel_denied(latest_duel)
message = Message(f'卑微的{MessageSegment.at(player2_id)}拒绝了应用的{MessageSegment.at(player1_id)}')
await _refuse.finish(message)
else:
await _refuse.finish('吃瓜群众一边去')
@_shot.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
latest_duel = get_latest_can_shot_duel(group_id)
# 当前没有决斗或不在决斗状态,直接向用户发出通知消息
if latest_duel is None:
logger.debug(f'[开枪]当前无进行中的决斗: {latest_duel}')
await _shot.finish('射射射,你射个啥呢,现在没有任何决斗!')
return
shot_player_id = event.user_id
another_player_id = latest_duel.another
logger.debug(f'[开枪{shot_player_id}]当前决斗: {latest_duel}')
# 决斗超时进入结算(由另一方发送[开枪]才允许触发结算)
if shot_player_id == another_player_id and latest_duel.expired():
duel_end(latest_duel)
# 进入结算状态
winner, loser = latest_duel.clearing()
message = await _end_of_game(event, latest_duel, winner, loser)
logger.debug(f'决斗超时,由另一方发起结算: {another_player_id}')
await _shot.finish(message)
return
# 检测命令发送者id是否和当前记录的开枪人一致
if shot_player_id != latest_duel.in_turn:
await _shot.finish('枪不在你手上,别捣乱')
return
# 根据开枪用户当天运气,触发额外事件
user_fortune = await UserInfo.get_lucky(shot_player_id, group_id)
if user_fortune is None:
user_fortune = 0
# 总概率为用户最大运气值的8%(这里强关联了用户的最大运气值)
t = random.randint(0, LUCKY_MAX * 8)
if t < user_fortune:
# 触发意外事件,当前子弹直接换人
message, shot, end, winner, loser = random_accident(shot_player_id, another_player_id)
logger.debug(f'用户触发意外事件:\n'
f'终结消息: {message}\n,'
f'子弹是否射出: {shot}\n,'
f'是否结束事件: {end}\n'
f'胜者: {winner}\n'
f'败者: {loser}')
# 是否需要结束决斗
if end:
end_message = await _end_of_game(event, latest_duel, winner, loser)
duel_end(latest_duel)
await _shot.send('幸运事件: ' + message)
await _shot.finish(end_message)
return
# 当前子弹是否已发射
if shot:
duel_shot(latest_duel)
else:
duel_switch(latest_duel)
await _shot.finish('幸运事件: ' + message)
return
if latest_duel.finish:
message = MessageSegment.text('子弹打光了,这场决斗无人胜利~\n'
f'子弹: {latest_duel.visual_bullet}')
await _shot.finish(message)
return
get_shot = duel_shot(latest_duel)
if get_shot:
logger.debug(f'用户{shot_player_id}中弹,进入结算')
duel_end(latest_duel)
# 中枪后进入结算
await _shot.send(random_sentence(died))
message = await _end_of_game(event, latest_duel, another_player_id, shot_player_id)
await _shot.finish(message)
else:
message = Message(f'{random_sentence(miss)}。枪交到了{MessageSegment.at(another_player_id)}手上')
await _shot.finish(message)
async def _end_of_game(event: GroupMessageEvent, duel: DuelHistory, winner: int, loser: int) -> Message:
group_id = event.group_id
wager = duel.wager
await UserInfo.change_gold(winner, group_id, wager)
await UserInfo.change_gold(loser, group_id, -wager)
return Message(
f'胜者{MessageSegment.at(winner)}赢得了{wager}枚金币\n'
f'败者{MessageSegment.at(loser)}被丢进了海里喂鱼\n'
f'子弹: {duel.visual_bullet}') | 0.169337 | 0.148078 |
import gzip
from diskcache import FanoutCache, Disk
from diskcache.core import BytesType, MODE_BINARY, BytesIO
from pathlib import Path
from .logconf import logging
log = logging.getLogger(__name__)
log.setLevel(logging.WARN)
log.setLevel(logging.INFO)
log.setLevel(logging.DEBUG)
# Cache Directory
# Currently using on kaggle.
cache_dir = '/kaggle/working'
class GzipDisk(Disk):
def store(self, value, read, key=None):
"""
Override from base class diskcache.Disk.
Chunking is due to needing to work on pythons < 2.7.13:
- Issue #27130: In the "zlib" module, fix handling of large buffers
(typically 2 or 4 GiB). Previously, inputs were limited to 2 GiB, and
compression and decompression operations did not properly handle results of
2 or 4 GiB.
:param value: value to convert
:param bool read: True when value is file-like object
:return: (size, mode, filename, value) tuple for Cache table
"""
# pylint: disable=unidiomatic-typecheck
if type(value) is BytesType:
if read:
value = value.read()
read = False
str_io = BytesIO()
gz_file = gzip.GzipFile(mode='wb', compresslevel=1, fileobj=str_io)
for offset in range(0, len(value), 2**30):
gz_file.write(value[offset:offset+2**30])
gz_file.close()
value = str_io.getvalue()
return super(GzipDisk, self).store(value, read)
def fetch(self, mode, filename, value, read):
"""
Override from base class diskcache.Disk.
Chunking is due to needing to work on pythons < 2.7.13:
- Issue #27130: In the "zlib" module, fix handling of large buffers
(typically 2 or 4 GiB). Previously, inputs were limited to 2 GiB, and
compression and decompression operations did not properly handle results of
2 or 4 GiB.
:param int mode: value mode raw, binary, text, or pickle
:param str filename: filename of corresponding value
:param value: database value
:param bool read: when True, return an open file handle
:return: corresponding Python value
"""
value = super(GzipDisk, self).fetch(mode, filename, value, read)
if mode == MODE_BINARY:
str_io = BytesIO(value)
gz_file = gzip.GzipFile(mode='rb', fileobj=str_io)
read_csio = BytesIO()
while True:
uncompressed_data = gz_file.read(2**30)
if uncompressed_data:
read_csio.write(uncompressed_data)
else:
break
value = read_csio.getvalue()
return value
def getCache(scope_str):
return FanoutCache(f'{cache_dir}/cache/' + scope_str,
disk=GzipDisk,
shards=64,
timeout=1,
size_limit=3e11,
# disk_min_file_size=2**20,
) | utils/disk.py | import gzip
from diskcache import FanoutCache, Disk
from diskcache.core import BytesType, MODE_BINARY, BytesIO
from pathlib import Path
from .logconf import logging
log = logging.getLogger(__name__)
log.setLevel(logging.WARN)
log.setLevel(logging.INFO)
log.setLevel(logging.DEBUG)
# Cache Directory
# Currently using on kaggle.
cache_dir = '/kaggle/working'
class GzipDisk(Disk):
def store(self, value, read, key=None):
"""
Override from base class diskcache.Disk.
Chunking is due to needing to work on pythons < 2.7.13:
- Issue #27130: In the "zlib" module, fix handling of large buffers
(typically 2 or 4 GiB). Previously, inputs were limited to 2 GiB, and
compression and decompression operations did not properly handle results of
2 or 4 GiB.
:param value: value to convert
:param bool read: True when value is file-like object
:return: (size, mode, filename, value) tuple for Cache table
"""
# pylint: disable=unidiomatic-typecheck
if type(value) is BytesType:
if read:
value = value.read()
read = False
str_io = BytesIO()
gz_file = gzip.GzipFile(mode='wb', compresslevel=1, fileobj=str_io)
for offset in range(0, len(value), 2**30):
gz_file.write(value[offset:offset+2**30])
gz_file.close()
value = str_io.getvalue()
return super(GzipDisk, self).store(value, read)
def fetch(self, mode, filename, value, read):
"""
Override from base class diskcache.Disk.
Chunking is due to needing to work on pythons < 2.7.13:
- Issue #27130: In the "zlib" module, fix handling of large buffers
(typically 2 or 4 GiB). Previously, inputs were limited to 2 GiB, and
compression and decompression operations did not properly handle results of
2 or 4 GiB.
:param int mode: value mode raw, binary, text, or pickle
:param str filename: filename of corresponding value
:param value: database value
:param bool read: when True, return an open file handle
:return: corresponding Python value
"""
value = super(GzipDisk, self).fetch(mode, filename, value, read)
if mode == MODE_BINARY:
str_io = BytesIO(value)
gz_file = gzip.GzipFile(mode='rb', fileobj=str_io)
read_csio = BytesIO()
while True:
uncompressed_data = gz_file.read(2**30)
if uncompressed_data:
read_csio.write(uncompressed_data)
else:
break
value = read_csio.getvalue()
return value
def getCache(scope_str):
return FanoutCache(f'{cache_dir}/cache/' + scope_str,
disk=GzipDisk,
shards=64,
timeout=1,
size_limit=3e11,
# disk_min_file_size=2**20,
) | 0.615781 | 0.216012 |
import argparse
import json
import os
import pickle
import sys
import stanfordnlp
from tqdm import tqdm
from utils import (
WORD_MAP_FILENAME,
decode_caption,
get_caption_without_special_tokens,
IMAGES_META_FILENAME,
DATA_CAPTIONS,
DATA_COCO_SPLIT,
POS_TAGGED_CAPTIONS_FILENAME,
)
# stanfordnlp.download('en', confirm_if_exists=True)
def count_adjective_noun_pairs(preprocessed_data_folder):
nlp_pipeline = stanfordnlp.Pipeline()
with open(
os.path.join(preprocessed_data_folder, IMAGES_META_FILENAME), "r"
) as json_file:
images_meta = json.load(json_file)
word_map_path = os.path.join(preprocessed_data_folder, WORD_MAP_FILENAME)
with open(word_map_path, "r") as json_file:
word_map = json.load(json_file)
data = {}
for coco_id, image_meta in tqdm(images_meta.items()):
encoded_captions = image_meta[DATA_CAPTIONS]
decoded_captions = [
" ".join(
decode_caption(
get_caption_without_special_tokens(caption, word_map), word_map
)
)
for caption in encoded_captions
]
data[coco_id] = {}
data[coco_id][DATA_COCO_SPLIT] = image_meta[DATA_COCO_SPLIT]
data[coco_id]["pos_tagged_captions"] = []
for caption in decoded_captions:
doc = nlp_pipeline(caption)
sentence = doc.sentences[0]
data[coco_id]["pos_tagged_captions"].append(sentence)
data_path = os.path.join(preprocessed_data_folder, POS_TAGGED_CAPTIONS_FILENAME)
print("\nSaving results to {}".format(data_path))
with open(data_path, "wb") as pickle_file:
pickle.dump(data, pickle_file)
def check_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--preprocessed-data-folder",
help="Folder where the preprocessed data is located",
default="../datasets/coco2014_preprocessed/",
)
parsed_args = parser.parse_args(args)
print(parsed_args)
return parsed_args
if __name__ == "__main__":
parsed_args = check_args(sys.argv[1:])
count_adjective_noun_pairs(parsed_args.preprocessed_data_folder) | data_preprocessing_utils/pos_tag_captions.py |
import argparse
import json
import os
import pickle
import sys
import stanfordnlp
from tqdm import tqdm
from utils import (
WORD_MAP_FILENAME,
decode_caption,
get_caption_without_special_tokens,
IMAGES_META_FILENAME,
DATA_CAPTIONS,
DATA_COCO_SPLIT,
POS_TAGGED_CAPTIONS_FILENAME,
)
# stanfordnlp.download('en', confirm_if_exists=True)
def count_adjective_noun_pairs(preprocessed_data_folder):
nlp_pipeline = stanfordnlp.Pipeline()
with open(
os.path.join(preprocessed_data_folder, IMAGES_META_FILENAME), "r"
) as json_file:
images_meta = json.load(json_file)
word_map_path = os.path.join(preprocessed_data_folder, WORD_MAP_FILENAME)
with open(word_map_path, "r") as json_file:
word_map = json.load(json_file)
data = {}
for coco_id, image_meta in tqdm(images_meta.items()):
encoded_captions = image_meta[DATA_CAPTIONS]
decoded_captions = [
" ".join(
decode_caption(
get_caption_without_special_tokens(caption, word_map), word_map
)
)
for caption in encoded_captions
]
data[coco_id] = {}
data[coco_id][DATA_COCO_SPLIT] = image_meta[DATA_COCO_SPLIT]
data[coco_id]["pos_tagged_captions"] = []
for caption in decoded_captions:
doc = nlp_pipeline(caption)
sentence = doc.sentences[0]
data[coco_id]["pos_tagged_captions"].append(sentence)
data_path = os.path.join(preprocessed_data_folder, POS_TAGGED_CAPTIONS_FILENAME)
print("\nSaving results to {}".format(data_path))
with open(data_path, "wb") as pickle_file:
pickle.dump(data, pickle_file)
def check_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--preprocessed-data-folder",
help="Folder where the preprocessed data is located",
default="../datasets/coco2014_preprocessed/",
)
parsed_args = parser.parse_args(args)
print(parsed_args)
return parsed_args
if __name__ == "__main__":
parsed_args = check_args(sys.argv[1:])
count_adjective_noun_pairs(parsed_args.preprocessed_data_folder) | 0.299003 | 0.12787 |
from functools import wraps
import logging
import types
from selenium.common import exceptions as selenium_ex
LOGGER = logging.getLogger(__name__)
class FreshWebElement(object):
"""
Selenium WebElement proxy/wrapper watching over errors
due to element staleness.
"""
__ATTEMPTS = 5
__STALE_ELEM_MSG = "Detected stale element '%s=%s', refreshing (#%s)..."
def __init__(self, element, by, value):
"""
Parameters:
element (WebElement): page element
by (str): location method
value (str): locator value
"""
self._by = by
self._value = value
self._elem = element
def __dir__(self):
return list(self.__dict__.keys()) + dir(self._elem)
def __refresh_element(self):
"""Find the element on the page again."""
driver = self._elem.parent
self._elem = driver.find_element(by=self._by,
value=self._value,
auto_refresh=False)
def __getattr__(self, name):
"""
Delegates all attribute lookups and method calls to the original
WebElement and watches for StaleElementReferenceException.
If caught, the WebElement is "refreshed", i.e., it's looked up
on the page again and the attribute lookup or (decorated) method call
is executed again on the "fresh" element.
"""
for attempt in range(1, self.__ATTEMPTS + 1):
try:
attr = getattr(self._elem, name)
break
except selenium_ex.StaleElementReferenceException:
LOGGER.debug(self.__STALE_ELEM_MSG, self._by,
self._value, attempt)
self.__refresh_element()
if isinstance(attr, types.MethodType):
@wraps(attr)
def safe_elem_method(*args, **kwargs):
""" safe element """
for attempt in range(1, self.__ATTEMPTS + 1):
try:
attr = getattr(self._elem, name)
return attr(*args, **kwargs)
except selenium_ex.StaleElementReferenceException:
LOGGER.debug(self.__STALE_ELEM_MSG, self._by,
self._value, attempt)
self.__refresh_element()
return safe_elem_method
return attr | webstr/selenium/webelement.py |
from functools import wraps
import logging
import types
from selenium.common import exceptions as selenium_ex
LOGGER = logging.getLogger(__name__)
class FreshWebElement(object):
"""
Selenium WebElement proxy/wrapper watching over errors
due to element staleness.
"""
__ATTEMPTS = 5
__STALE_ELEM_MSG = "Detected stale element '%s=%s', refreshing (#%s)..."
def __init__(self, element, by, value):
"""
Parameters:
element (WebElement): page element
by (str): location method
value (str): locator value
"""
self._by = by
self._value = value
self._elem = element
def __dir__(self):
return list(self.__dict__.keys()) + dir(self._elem)
def __refresh_element(self):
"""Find the element on the page again."""
driver = self._elem.parent
self._elem = driver.find_element(by=self._by,
value=self._value,
auto_refresh=False)
def __getattr__(self, name):
"""
Delegates all attribute lookups and method calls to the original
WebElement and watches for StaleElementReferenceException.
If caught, the WebElement is "refreshed", i.e., it's looked up
on the page again and the attribute lookup or (decorated) method call
is executed again on the "fresh" element.
"""
for attempt in range(1, self.__ATTEMPTS + 1):
try:
attr = getattr(self._elem, name)
break
except selenium_ex.StaleElementReferenceException:
LOGGER.debug(self.__STALE_ELEM_MSG, self._by,
self._value, attempt)
self.__refresh_element()
if isinstance(attr, types.MethodType):
@wraps(attr)
def safe_elem_method(*args, **kwargs):
""" safe element """
for attempt in range(1, self.__ATTEMPTS + 1):
try:
attr = getattr(self._elem, name)
return attr(*args, **kwargs)
except selenium_ex.StaleElementReferenceException:
LOGGER.debug(self.__STALE_ELEM_MSG, self._by,
self._value, attempt)
self.__refresh_element()
return safe_elem_method
return attr | 0.712732 | 0.085061 |
import ast
import glob
import os
import re
import shlex
import shutil
import signal
import sys
import termios
import threading
import tty
from utils import _utils
CUSTOM_DIC_PATH = "docs/common/custom_dic"
HUNSPELL_CMD = [
"hunspell",
"-a", # Pipe mode
"-d",
"en_GB", # Graphcore uses en_GB for documentation
"-i",
"utf-8", # Encoding: suitable for linux and osx
"-mode=none"
] # Use raw text
TERM_STDIN = sys.stdin
def getChar():
try:
# Backup this or the terminal will break on closing
old_attr = termios.tcgetattr(TERM_STDIN.fileno())
tty.setraw(TERM_STDIN.fileno())
char = TERM_STDIN.read(1)
finally:
# Reset the terminal
termios.tcsetattr(TERM_STDIN.fileno(), termios.TCIFLUSH, old_attr)
return char
class DocStr():
def __init__(self, doc_str, source_file, line_num):
self._doc_str = doc_str
self._source_file = source_file
self._line_num = line_num
@property
def doc_str(self):
return self._doc_str
@property
def line_num(self):
return self._line_num
@property
def source_file(self):
return self._source_file
def __str__(self):
s = f"{self._line_num}:" + self._doc_str
return s
def start_hunspell_process():
# Add custom dictionary first time only
if "-p" not in HUNSPELL_CMD:
custom_dic_path = os.path.join(_utils.sources_dir(), CUSTOM_DIC_PATH)
if not os.path.exists(custom_dic_path):
open(custom_dic_path, 'a').close()
HUNSPELL_CMD.append("-p")
HUNSPELL_CMD.append(shlex.quote(custom_dic_path))
hunspell_output = []
def out_handler(line):
hunspell_output.append(line)
# subprocess.Popen fails to pass the filename correctly without this when
# shell=True. shlex.quote will handle any spaces correctly.
cmd = " ".join(HUNSPELL_CMD)
hunspell_proc = _utils.Process(cmd,
env=None,
redirect_stderr=True,
stdout_handler=out_handler,
bufsize=0)
# First line is just a version
while len(hunspell_output) < 1:
assert hunspell_proc.is_running()
hunspell_output.clear()
return {'proc': hunspell_proc, 'out': hunspell_output}
CODE_BLOCK = re.compile(r"\.\. code-block::[^\n]+\n\n.*?\n\n", flags=re.DOTALL)
def strip_code_blocks(s):
s_list = list(s)
for match in CODE_BLOCK.finditer(s):
for pos in range(match.start(), match.end()):
# Preserve lines by replacing everything except new lines with
# spaces
if s_list[pos] != "\n":
s_list[pos] = " "
return "".join(s_list)
def should_skip(line):
stripped_line = line.strip()
if stripped_line.startswith(">>>"):
return True
if stripped_line.startswith("..."):
return True
return False
ALL_EXCLUSIONS = (re.compile(r":param [^:]+:"), re.compile(r"p[0-9]+[^0-9]"),
re.compile(r":py:[^:]+:"), re.compile(r"T[0-9]+[^0-9]"),
re.compile(r"`+[^`]+`+"), re.compile(r":r?type.*"))
def remove_exclusions(line):
for exclusion in ALL_EXCLUSIONS:
line = exclusion.sub("", line)
line = line.replace(".. seealso::", "")
return line
def get_doc_str_line_number(element):
# Handle the case of lots of parameters etc
if isinstance(element.body[0], ast.Expr):
if isinstance(element.body[0].value, ast.Str):
end_line_no = element.body[0].value.lineno
doc_str_lines = element.body[0].value.s.count("\n")
return end_line_no - doc_str_lines
# If the string lookup fails
return element.lineno
DOC_STR_ELEMENTS = (ast.AsyncFunctionDef, ast.FunctionDef, ast.ClassDef,
ast.Module)
def recursive_add_doc_str(source_file, element, doc_str_list):
for sub_element in element.body:
if isinstance(sub_element, DOC_STR_ELEMENTS):
doc_str = ast.get_docstring(sub_element)
if doc_str is not None:
doc_str_list.append(
DocStr(doc_str, source_file,
get_doc_str_line_number(sub_element)))
if hasattr(sub_element, "body"):
recursive_add_doc_str(source_file, sub_element, doc_str_list)
BLACK_ON_WHITE = "\033[30;107m"
RESET_COLOR = "\033[39;49m"
UNDERLINE = "\033[4m"
NOT_UNDERLINE = "\033[24m"
def print_context(doc_str, line_offset, unknown_spelling):
print(BLACK_ON_WHITE, end='')
all_lines = doc_str.doc_str.split("\n")
for line_num, line in enumerate(all_lines):
if line_num == line_offset:
# Make sure we find the right incident of spelling
pattern = unknown_spelling + r"[^a-z]"
match_start = re.search(pattern, line + " ").start()
before = line[:match_start]
print(before, end='')
print(UNDERLINE, end='')
print(unknown_spelling, end='')
print(NOT_UNDERLINE, end='')
after = line[match_start + len(unknown_spelling):]
print(after, end='')
else:
print(line, end='')
if line_num + 1 != len(all_lines):
print()
print(RESET_COLOR + "\n")
def process_incorrect_word(hunspell, result, doc_str, line_offset):
result = result.split(" ")
symbol = result[0]
if symbol not in ("&", "#"):
raise RuntimeError("Invalid symbol")
unknown_spelling = result[1]
line_num = doc_str.line_num + line_offset
while True:
print_context(doc_str, line_offset, unknown_spelling)
print(f"Unknown spelling, '{unknown_spelling}' on line {line_num}" +
f" ({doc_str.source_file}).")
if symbol == b"&":
# Comma seprated list of suggestions
suggestions = [r.decode("utf-8") for r in result[4:]]
print("Suggestions: " + " ".join(suggestions))
print("(space): continue, (a)dd to dictionary, (q)uit")
c = getChar()
if c == ' ':
break
if c == 'a':
# Add to dictionary and save
hunspell['proc'].write(b"*")
hunspell['proc'].write(unknown_spelling.encode("utf-8"))
hunspell['proc'].write(b"\n")
hunspell['proc'].write(b"#\n")
break
# Ctrl+c and ctrl+z are intercepted
if c in ('q', '\x03', '\x04'): # ^C and ^D
sys.exit(0)
if c == '\x1a': # ^Z
signal.pthread_kill(threading.get_ident(), signal.SIGSTOP)
print("\n\n\n\n")
def process_doc_str(hunspell, doc_str):
all_doc_str = doc_str.doc_str
all_doc_str = strip_code_blocks(all_doc_str)
all_lines = all_doc_str.split("\n")
for line_offset, line in enumerate(all_lines):
if should_skip(line):
continue
line = remove_exclusions(line)
full_line = b"^" # Escape any commands
full_line += line.encode('utf-8') + b"\n"
hunspell['proc'].write(full_line)
while True:
if len(hunspell['out']) == 0:
assert hunspell['proc'].is_running()
continue
next_token = hunspell['out'].pop(0)
if next_token == "":
break
if (next_token == "*" or next_token == "-"
or next_token[0] == "+"):
continue
process_incorrect_word(hunspell, next_token, doc_str, line_offset)
def check_source_file(source_dir, source_file):
source_file_without_root = source_file[len(source_dir) + 1:]
print(f"Checking {source_file_without_root}\n")
with open(source_file, 'r') as f:
source = f.read()
ast_module = ast.parse(source, source_file)
all_doc_str = []
recursive_add_doc_str(source_file_without_root, ast_module, all_doc_str)
hunspell = start_hunspell_process()
for doc_str in all_doc_str:
process_doc_str(hunspell, doc_str)
hunspell['proc'].eof()
hunspell['proc'].wait()
if __name__ == "__main__":
if _utils.get_os_type() != _utils.OsType.Linux:
print("Not running on linux.")
sys.exit(1)
if shutil.which(HUNSPELL_CMD[0]) is None:
print(f"Please install {HUNSPELL_CMD[0]}.")
sys.exit(1)
source_dir = os.path.join(_utils.sources_dir(), "python")
for source_file in glob.glob(os.path.join(source_dir, "*.py")):
check_source_file(source_dir, source_file) | scripts/check_spelling.py |
import ast
import glob
import os
import re
import shlex
import shutil
import signal
import sys
import termios
import threading
import tty
from utils import _utils
CUSTOM_DIC_PATH = "docs/common/custom_dic"
HUNSPELL_CMD = [
"hunspell",
"-a", # Pipe mode
"-d",
"en_GB", # Graphcore uses en_GB for documentation
"-i",
"utf-8", # Encoding: suitable for linux and osx
"-mode=none"
] # Use raw text
TERM_STDIN = sys.stdin
def getChar():
try:
# Backup this or the terminal will break on closing
old_attr = termios.tcgetattr(TERM_STDIN.fileno())
tty.setraw(TERM_STDIN.fileno())
char = TERM_STDIN.read(1)
finally:
# Reset the terminal
termios.tcsetattr(TERM_STDIN.fileno(), termios.TCIFLUSH, old_attr)
return char
class DocStr():
def __init__(self, doc_str, source_file, line_num):
self._doc_str = doc_str
self._source_file = source_file
self._line_num = line_num
@property
def doc_str(self):
return self._doc_str
@property
def line_num(self):
return self._line_num
@property
def source_file(self):
return self._source_file
def __str__(self):
s = f"{self._line_num}:" + self._doc_str
return s
def start_hunspell_process():
# Add custom dictionary first time only
if "-p" not in HUNSPELL_CMD:
custom_dic_path = os.path.join(_utils.sources_dir(), CUSTOM_DIC_PATH)
if not os.path.exists(custom_dic_path):
open(custom_dic_path, 'a').close()
HUNSPELL_CMD.append("-p")
HUNSPELL_CMD.append(shlex.quote(custom_dic_path))
hunspell_output = []
def out_handler(line):
hunspell_output.append(line)
# subprocess.Popen fails to pass the filename correctly without this when
# shell=True. shlex.quote will handle any spaces correctly.
cmd = " ".join(HUNSPELL_CMD)
hunspell_proc = _utils.Process(cmd,
env=None,
redirect_stderr=True,
stdout_handler=out_handler,
bufsize=0)
# First line is just a version
while len(hunspell_output) < 1:
assert hunspell_proc.is_running()
hunspell_output.clear()
return {'proc': hunspell_proc, 'out': hunspell_output}
CODE_BLOCK = re.compile(r"\.\. code-block::[^\n]+\n\n.*?\n\n", flags=re.DOTALL)
def strip_code_blocks(s):
s_list = list(s)
for match in CODE_BLOCK.finditer(s):
for pos in range(match.start(), match.end()):
# Preserve lines by replacing everything except new lines with
# spaces
if s_list[pos] != "\n":
s_list[pos] = " "
return "".join(s_list)
def should_skip(line):
stripped_line = line.strip()
if stripped_line.startswith(">>>"):
return True
if stripped_line.startswith("..."):
return True
return False
ALL_EXCLUSIONS = (re.compile(r":param [^:]+:"), re.compile(r"p[0-9]+[^0-9]"),
re.compile(r":py:[^:]+:"), re.compile(r"T[0-9]+[^0-9]"),
re.compile(r"`+[^`]+`+"), re.compile(r":r?type.*"))
def remove_exclusions(line):
for exclusion in ALL_EXCLUSIONS:
line = exclusion.sub("", line)
line = line.replace(".. seealso::", "")
return line
def get_doc_str_line_number(element):
# Handle the case of lots of parameters etc
if isinstance(element.body[0], ast.Expr):
if isinstance(element.body[0].value, ast.Str):
end_line_no = element.body[0].value.lineno
doc_str_lines = element.body[0].value.s.count("\n")
return end_line_no - doc_str_lines
# If the string lookup fails
return element.lineno
DOC_STR_ELEMENTS = (ast.AsyncFunctionDef, ast.FunctionDef, ast.ClassDef,
ast.Module)
def recursive_add_doc_str(source_file, element, doc_str_list):
for sub_element in element.body:
if isinstance(sub_element, DOC_STR_ELEMENTS):
doc_str = ast.get_docstring(sub_element)
if doc_str is not None:
doc_str_list.append(
DocStr(doc_str, source_file,
get_doc_str_line_number(sub_element)))
if hasattr(sub_element, "body"):
recursive_add_doc_str(source_file, sub_element, doc_str_list)
BLACK_ON_WHITE = "\033[30;107m"
RESET_COLOR = "\033[39;49m"
UNDERLINE = "\033[4m"
NOT_UNDERLINE = "\033[24m"
def print_context(doc_str, line_offset, unknown_spelling):
print(BLACK_ON_WHITE, end='')
all_lines = doc_str.doc_str.split("\n")
for line_num, line in enumerate(all_lines):
if line_num == line_offset:
# Make sure we find the right incident of spelling
pattern = unknown_spelling + r"[^a-z]"
match_start = re.search(pattern, line + " ").start()
before = line[:match_start]
print(before, end='')
print(UNDERLINE, end='')
print(unknown_spelling, end='')
print(NOT_UNDERLINE, end='')
after = line[match_start + len(unknown_spelling):]
print(after, end='')
else:
print(line, end='')
if line_num + 1 != len(all_lines):
print()
print(RESET_COLOR + "\n")
def process_incorrect_word(hunspell, result, doc_str, line_offset):
result = result.split(" ")
symbol = result[0]
if symbol not in ("&", "#"):
raise RuntimeError("Invalid symbol")
unknown_spelling = result[1]
line_num = doc_str.line_num + line_offset
while True:
print_context(doc_str, line_offset, unknown_spelling)
print(f"Unknown spelling, '{unknown_spelling}' on line {line_num}" +
f" ({doc_str.source_file}).")
if symbol == b"&":
# Comma seprated list of suggestions
suggestions = [r.decode("utf-8") for r in result[4:]]
print("Suggestions: " + " ".join(suggestions))
print("(space): continue, (a)dd to dictionary, (q)uit")
c = getChar()
if c == ' ':
break
if c == 'a':
# Add to dictionary and save
hunspell['proc'].write(b"*")
hunspell['proc'].write(unknown_spelling.encode("utf-8"))
hunspell['proc'].write(b"\n")
hunspell['proc'].write(b"#\n")
break
# Ctrl+c and ctrl+z are intercepted
if c in ('q', '\x03', '\x04'): # ^C and ^D
sys.exit(0)
if c == '\x1a': # ^Z
signal.pthread_kill(threading.get_ident(), signal.SIGSTOP)
print("\n\n\n\n")
def process_doc_str(hunspell, doc_str):
all_doc_str = doc_str.doc_str
all_doc_str = strip_code_blocks(all_doc_str)
all_lines = all_doc_str.split("\n")
for line_offset, line in enumerate(all_lines):
if should_skip(line):
continue
line = remove_exclusions(line)
full_line = b"^" # Escape any commands
full_line += line.encode('utf-8') + b"\n"
hunspell['proc'].write(full_line)
while True:
if len(hunspell['out']) == 0:
assert hunspell['proc'].is_running()
continue
next_token = hunspell['out'].pop(0)
if next_token == "":
break
if (next_token == "*" or next_token == "-"
or next_token[0] == "+"):
continue
process_incorrect_word(hunspell, next_token, doc_str, line_offset)
def check_source_file(source_dir, source_file):
source_file_without_root = source_file[len(source_dir) + 1:]
print(f"Checking {source_file_without_root}\n")
with open(source_file, 'r') as f:
source = f.read()
ast_module = ast.parse(source, source_file)
all_doc_str = []
recursive_add_doc_str(source_file_without_root, ast_module, all_doc_str)
hunspell = start_hunspell_process()
for doc_str in all_doc_str:
process_doc_str(hunspell, doc_str)
hunspell['proc'].eof()
hunspell['proc'].wait()
if __name__ == "__main__":
if _utils.get_os_type() != _utils.OsType.Linux:
print("Not running on linux.")
sys.exit(1)
if shutil.which(HUNSPELL_CMD[0]) is None:
print(f"Please install {HUNSPELL_CMD[0]}.")
sys.exit(1)
source_dir = os.path.join(_utils.sources_dir(), "python")
for source_file in glob.glob(os.path.join(source_dir, "*.py")):
check_source_file(source_dir, source_file) | 0.386416 | 0.14851 |
# GrovePi + Rotary Angle Sensor (Potentiometer) + LED
# http://www.seeedstudio.com/wiki/Grove_-_Rotary_Angle_Sensor
# http://www.seeedstudio.com/wiki/Grove_-_LED_Socket_Kit
'''
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
from grovepi import *
# Connect the LED to digital port D5
button = 3
led = 4
pinMode(button, "INPUT")
pinMode(led,"OUTPUT")
analogWrite(led, 0)
print("입출력 프로그램을 시작합니다. Ctrl + C를 눌러 종료할 수 있습니다.")
print("1초마다 버튼이 눌렸는지 안 눌렸는지 검사합니다.")
while True:
try:
button_status = digitalRead(button)
if button_status:
digitalWrite(led, 1)
print("버튼이 눌렸습니다. LED ON")
else:
digitalWrite(led, 0)
print("버튼이 눌려있지 않습니다. LED OFF")
time.sleep(1)
except KeyboardInterrupt:
digitalWrite(led, 0)
break
except IOError:
print("Error") | 02_iot-raspbian/04_button-led.py |
# GrovePi + Rotary Angle Sensor (Potentiometer) + LED
# http://www.seeedstudio.com/wiki/Grove_-_Rotary_Angle_Sensor
# http://www.seeedstudio.com/wiki/Grove_-_LED_Socket_Kit
'''
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
from grovepi import *
# Connect the LED to digital port D5
button = 3
led = 4
pinMode(button, "INPUT")
pinMode(led,"OUTPUT")
analogWrite(led, 0)
print("입출력 프로그램을 시작합니다. Ctrl + C를 눌러 종료할 수 있습니다.")
print("1초마다 버튼이 눌렸는지 안 눌렸는지 검사합니다.")
while True:
try:
button_status = digitalRead(button)
if button_status:
digitalWrite(led, 1)
print("버튼이 눌렸습니다. LED ON")
else:
digitalWrite(led, 0)
print("버튼이 눌려있지 않습니다. LED OFF")
time.sleep(1)
except KeyboardInterrupt:
digitalWrite(led, 0)
break
except IOError:
print("Error") | 0.723212 | 0.298019 |
import datetime as dt
import re
from data import store as store
from utils import ui
_logger = ui.get_logger()
class Option:
def __init__(self, ticker: str, product: str, strike: str, expiry: dt.datetime):
# Specified
self.ticker = ticker
self.product = product
self.strike = strike
self.expiry = expiry
self.spot = 0.0
# Calculated
self.calc_price = 0.0
self.calc_volatility = 0.0
self.time_to_maturity = 0.0
self.rate = 0.0
self.delta = 0.0
self.gamma = 0.0
self.theta = 0.0
self.vega = 0.0
self.rho = 0.0
# Fetched online with YFinance
self.contract = ''
self.last_trade_date = ''
self.last_price = 0.0
self.bid = 0.0
self.ask = 0.0
self.change = 0.0
self.percent_change = 0.0
self.volume = 0.0
self.open_interest = 0.0
self.implied_volatility = 0.0
self.itm = False
self.contract_size = ''
self.currency = ''
def __str__(self):
name = self.contract if self.contract else 'No contract selected'
return f'Contract:{name}\n'\
f'Ticker: {self.ticker}\n'\
f'Product: {self.product.title()}\n'\
f'Expiry: {self.expiry:%Y-%m-%d} ({self.time_to_maturity*365:.0f}/{self.time_to_maturity:.5f})\n'\
f'Strike: {self.strike:.2f}\n'\
f'Spot: {self.spot:.2f}\n'\
f'Rate: {self.rate:.3f}\n'\
f'Last Trade: {self.last_trade_date}\n'\
f'Calc Price: {self.calc_price:.2f}\n'\
f'Last Price: {self.last_price:.2f}\n'\
f'Bid: {self.bid:.2f}\n'\
f'Ask: {self.ask:.2f}\n'\
f'Change: {self.change}\n'\
f'Change%: {self.percent_change}\n'\
f'Volume: {self.volume}\n'\
f'Open Interest: {self.open_interest}\n'\
f'Calc Volitility: {self.calc_volatility:.4f}\n'\
f'Impl Volitility: {self.implied_volatility:.4f}\n'\
f'ITM: {self.itm}\n'\
f'Size: {self.contract_size}\n'\
f'Currency: {self.currency}\n'\
f'Delta: {self.delta:.5f}\n'\
f'Gamma: {self.gamma:.5f}\n'\
f'Theta: {self.theta:.5f}\n'\
f'Vega: {self.vega:.5f}\n'\
f'Rho: {self.rho:.5f}'
def load_contract(self, contract_name: str) -> bool:
ret = True
parsed = _parse_contract_name(contract_name)
self.ticker = parsed['ticker']
self.product = parsed['product']
self.expiry = dt.datetime.strptime(parsed['expiry'], '%Y-%m-%d')
self.strike = parsed['strike']
contract = _get_contract(contract_name)
if contract is not None:
self.contract = contract['contractSymbol']
self.last_trade_date = contract['lastTradeDate']
self.strike = contract['strike']
self.last_price = contract['lastPrice']
self.bid = contract['bid']
self.ask = contract['ask']
self.change = contract['change']
self.percent_change = contract['percentChange']
self.volume = contract['volume']
self.open_interest = contract['openInterest']
self.implied_volatility = contract['impliedVolatility']
self.itm = contract['inTheMoney']
self.contract_size = contract['contractSize']
self.currency = contract['currency']
_logger.info(f'{__name__}: Loaded contract {contract_name}')
if self.last_price > 0.0:
diff = self.calc_price / self.last_price
if diff > 1.25 or diff < 0.75:
_logger.info(f'{__name__}: The calculated price is significantly different than the last traded price')
else:
ret = False
return ret
def _get_contract(contract_name: str) -> str:
parsed = _parse_contract_name(contract_name)
ticker = parsed['ticker']
product = parsed['product']
expiry = parsed['expiry']
try:
if product == 'call':
chain = store.get_option_chain(ticker, uselast=True)(expiry).calls
else:
chain = store.get_option_chain(ticker, uselast=True)(expiry).puts
contract = chain.loc[chain['contractSymbol'] == contract_name]
return contract.iloc[0]
except Exception as e:
print(str(e))
return ''
def _parse_contract_name(contract_name: str) -> dict:
# ex: MSFT210305C00237500
regex = r'([\d]{6})([PC])'
parsed = re.split(regex, contract_name)
ticker = parsed[0]
expiry = f'20{parsed[1][:2]}-{parsed[1][2:4]}-{parsed[1][4:]}'
product = 'call' if 'C' in parsed[2].upper() else 'put'
strike = float(parsed[3][:5]) + (float(parsed[3][5:]) / 1000.0)
return {'ticker': ticker, 'expiry': expiry, 'product': product, 'strike': strike} | options/option.py | import datetime as dt
import re
from data import store as store
from utils import ui
_logger = ui.get_logger()
class Option:
def __init__(self, ticker: str, product: str, strike: str, expiry: dt.datetime):
# Specified
self.ticker = ticker
self.product = product
self.strike = strike
self.expiry = expiry
self.spot = 0.0
# Calculated
self.calc_price = 0.0
self.calc_volatility = 0.0
self.time_to_maturity = 0.0
self.rate = 0.0
self.delta = 0.0
self.gamma = 0.0
self.theta = 0.0
self.vega = 0.0
self.rho = 0.0
# Fetched online with YFinance
self.contract = ''
self.last_trade_date = ''
self.last_price = 0.0
self.bid = 0.0
self.ask = 0.0
self.change = 0.0
self.percent_change = 0.0
self.volume = 0.0
self.open_interest = 0.0
self.implied_volatility = 0.0
self.itm = False
self.contract_size = ''
self.currency = ''
def __str__(self):
name = self.contract if self.contract else 'No contract selected'
return f'Contract:{name}\n'\
f'Ticker: {self.ticker}\n'\
f'Product: {self.product.title()}\n'\
f'Expiry: {self.expiry:%Y-%m-%d} ({self.time_to_maturity*365:.0f}/{self.time_to_maturity:.5f})\n'\
f'Strike: {self.strike:.2f}\n'\
f'Spot: {self.spot:.2f}\n'\
f'Rate: {self.rate:.3f}\n'\
f'Last Trade: {self.last_trade_date}\n'\
f'Calc Price: {self.calc_price:.2f}\n'\
f'Last Price: {self.last_price:.2f}\n'\
f'Bid: {self.bid:.2f}\n'\
f'Ask: {self.ask:.2f}\n'\
f'Change: {self.change}\n'\
f'Change%: {self.percent_change}\n'\
f'Volume: {self.volume}\n'\
f'Open Interest: {self.open_interest}\n'\
f'Calc Volitility: {self.calc_volatility:.4f}\n'\
f'Impl Volitility: {self.implied_volatility:.4f}\n'\
f'ITM: {self.itm}\n'\
f'Size: {self.contract_size}\n'\
f'Currency: {self.currency}\n'\
f'Delta: {self.delta:.5f}\n'\
f'Gamma: {self.gamma:.5f}\n'\
f'Theta: {self.theta:.5f}\n'\
f'Vega: {self.vega:.5f}\n'\
f'Rho: {self.rho:.5f}'
def load_contract(self, contract_name: str) -> bool:
ret = True
parsed = _parse_contract_name(contract_name)
self.ticker = parsed['ticker']
self.product = parsed['product']
self.expiry = dt.datetime.strptime(parsed['expiry'], '%Y-%m-%d')
self.strike = parsed['strike']
contract = _get_contract(contract_name)
if contract is not None:
self.contract = contract['contractSymbol']
self.last_trade_date = contract['lastTradeDate']
self.strike = contract['strike']
self.last_price = contract['lastPrice']
self.bid = contract['bid']
self.ask = contract['ask']
self.change = contract['change']
self.percent_change = contract['percentChange']
self.volume = contract['volume']
self.open_interest = contract['openInterest']
self.implied_volatility = contract['impliedVolatility']
self.itm = contract['inTheMoney']
self.contract_size = contract['contractSize']
self.currency = contract['currency']
_logger.info(f'{__name__}: Loaded contract {contract_name}')
if self.last_price > 0.0:
diff = self.calc_price / self.last_price
if diff > 1.25 or diff < 0.75:
_logger.info(f'{__name__}: The calculated price is significantly different than the last traded price')
else:
ret = False
return ret
def _get_contract(contract_name: str) -> str:
parsed = _parse_contract_name(contract_name)
ticker = parsed['ticker']
product = parsed['product']
expiry = parsed['expiry']
try:
if product == 'call':
chain = store.get_option_chain(ticker, uselast=True)(expiry).calls
else:
chain = store.get_option_chain(ticker, uselast=True)(expiry).puts
contract = chain.loc[chain['contractSymbol'] == contract_name]
return contract.iloc[0]
except Exception as e:
print(str(e))
return ''
def _parse_contract_name(contract_name: str) -> dict:
# ex: MSFT210305C00237500
regex = r'([\d]{6})([PC])'
parsed = re.split(regex, contract_name)
ticker = parsed[0]
expiry = f'20{parsed[1][:2]}-{parsed[1][2:4]}-{parsed[1][4:]}'
product = 'call' if 'C' in parsed[2].upper() else 'put'
strike = float(parsed[3][:5]) + (float(parsed[3][5:]) / 1000.0)
return {'ticker': ticker, 'expiry': expiry, 'product': product, 'strike': strike} | 0.545286 | 0.20454 |
from collections import OrderedDict
import tensorflow as tf
from ..tfcompat import variables_initializer, global_variables
__all__ = [
'ensure_default_session', 'get_variable_values',
'get_variable_values_as_dict', 'get_uninitialized_variables',
'ensure_variables_initialized',
]
def ensure_default_session():
"""Ensure that a default TensorFlow session exists, otherwise raise error.
Returns
-------
tf.Session
The default TensorFlow session.
Raises
------
RuntimeError
If the default session does not exist.
"""
sess = tf.get_default_session()
if sess is None:
raise RuntimeError('No default session has been open.')
return sess
def get_variable_values(var_or_vars):
"""Get the values of specified TensorFlow variables.
Parameters
----------
var_or_vars : tf.Variable | collections.Iterable[tf.Variable]
A TensorFlow variable, or a list of TensorFlow variables.
Returns
-------
any | tuple[any]
If one single variable is queried, returns its value.
If a tuple of variables are queried, return their values in tuple.
"""
if isinstance(var_or_vars, tf.Variable):
return ensure_default_session().run(var_or_vars)
else:
var_or_vars = list(var_or_vars)
if not var_or_vars:
return ()
return tuple(ensure_default_session().run(var_or_vars))
def get_variable_values_as_dict(var_or_vars):
"""Get the values of specified TensorFlow variables as dict.
Parameters
----------
var_or_vars : tf.Variable | tuple[tf.Variable]
A TensorFlow variable, or a tuple of TensorFlow variables.
Returns
-------
OrderedDict[tf.Variable, any]
Dict from the variable instances to their fetched values.
"""
if isinstance(var_or_vars, tf.Variable):
var_or_vars = [var_or_vars]
else:
var_or_vars = list(var_or_vars)
values = get_variable_values(var_or_vars)
return OrderedDict((var, val) for var, val in zip(var_or_vars, values))
def get_uninitialized_variables(variables=None):
"""Get uninitialized variables as a list.
Parameters
----------
variables : collections.Iterable[tf.Variable]
Return only uninitialized variables within this collection.
If not specified, will return all uninitialized variables.
Returns
-------
list[tf.Variable]
"""
sess = ensure_default_session()
if variables is None:
variables = global_variables()
else:
variables = list(variables)
init_flag = sess.run(
tf.pack([tf.is_variable_initialized(v) for v in variables]))
return [v for v, f in zip(variables, init_flag) if not f]
def ensure_variables_initialized(variables=None):
"""Ensure all variables are initialized.
Parameters
----------
variables : collections.Iterable[tf.Variable]
Ensure only these variables to be initialized.
If not specified, will ensure all variables initialized.
"""
uninitialized = get_uninitialized_variables(variables)
if uninitialized:
ensure_default_session().run(variables_initializer(uninitialized)) | madoka/utils/tfhelper/session.py | from collections import OrderedDict
import tensorflow as tf
from ..tfcompat import variables_initializer, global_variables
__all__ = [
'ensure_default_session', 'get_variable_values',
'get_variable_values_as_dict', 'get_uninitialized_variables',
'ensure_variables_initialized',
]
def ensure_default_session():
"""Ensure that a default TensorFlow session exists, otherwise raise error.
Returns
-------
tf.Session
The default TensorFlow session.
Raises
------
RuntimeError
If the default session does not exist.
"""
sess = tf.get_default_session()
if sess is None:
raise RuntimeError('No default session has been open.')
return sess
def get_variable_values(var_or_vars):
"""Get the values of specified TensorFlow variables.
Parameters
----------
var_or_vars : tf.Variable | collections.Iterable[tf.Variable]
A TensorFlow variable, or a list of TensorFlow variables.
Returns
-------
any | tuple[any]
If one single variable is queried, returns its value.
If a tuple of variables are queried, return their values in tuple.
"""
if isinstance(var_or_vars, tf.Variable):
return ensure_default_session().run(var_or_vars)
else:
var_or_vars = list(var_or_vars)
if not var_or_vars:
return ()
return tuple(ensure_default_session().run(var_or_vars))
def get_variable_values_as_dict(var_or_vars):
"""Get the values of specified TensorFlow variables as dict.
Parameters
----------
var_or_vars : tf.Variable | tuple[tf.Variable]
A TensorFlow variable, or a tuple of TensorFlow variables.
Returns
-------
OrderedDict[tf.Variable, any]
Dict from the variable instances to their fetched values.
"""
if isinstance(var_or_vars, tf.Variable):
var_or_vars = [var_or_vars]
else:
var_or_vars = list(var_or_vars)
values = get_variable_values(var_or_vars)
return OrderedDict((var, val) for var, val in zip(var_or_vars, values))
def get_uninitialized_variables(variables=None):
"""Get uninitialized variables as a list.
Parameters
----------
variables : collections.Iterable[tf.Variable]
Return only uninitialized variables within this collection.
If not specified, will return all uninitialized variables.
Returns
-------
list[tf.Variable]
"""
sess = ensure_default_session()
if variables is None:
variables = global_variables()
else:
variables = list(variables)
init_flag = sess.run(
tf.pack([tf.is_variable_initialized(v) for v in variables]))
return [v for v, f in zip(variables, init_flag) if not f]
def ensure_variables_initialized(variables=None):
"""Ensure all variables are initialized.
Parameters
----------
variables : collections.Iterable[tf.Variable]
Ensure only these variables to be initialized.
If not specified, will ensure all variables initialized.
"""
uninitialized = get_uninitialized_variables(variables)
if uninitialized:
ensure_default_session().run(variables_initializer(uninitialized)) | 0.891271 | 0.442034 |
from unittest.mock import mock_open, patch
import pytest
from satosa.metadata_creation.description import ContactPersonDesc, UIInfoDesc, OrganizationDesc, MetadataDescription
class TestContactPersonDesc(object):
def test_to_dict(self):
desc = ContactPersonDesc()
desc.contact_type = "test"
desc.given_name = "First"
desc.sur_name = "Tester"
desc.add_email_address("<EMAIL>")
serialized = desc.to_dict()
assert serialized["contact_type"] == "test"
assert serialized["given_name"] == "First"
assert serialized["sur_name"] == "Tester"
assert serialized["email_address"] == ["<EMAIL>"]
class TestUIInfoDesc(object):
def test_to_dict(self):
desc = UIInfoDesc()
desc.add_description("test", "en")
desc.add_display_name("my company", "en")
desc.add_logo("logo.jpg", 80, 80, "en")
serialized = desc.to_dict()
ui_info = serialized["service"]["idp"]["ui_info"]
assert ui_info["description"] == [{"text": "test", "lang": "en"}]
assert ui_info["display_name"] == [{"text": "my company", "lang": "en"}]
assert ui_info["logo"] == [{"text": "logo.jpg", "width": 80, "height": 80, "lang": "en"}]
def test_to_dict_for_logo_without_lang(self):
desc = UIInfoDesc()
desc.add_logo("logo.jpg", 80, 80, None)
serialized = desc.to_dict()
ui_info = serialized["service"]["idp"]["ui_info"]
assert ui_info["logo"] == [{"text": "logo.jpg", "width": 80, "height": 80}]
def test_to_dict_with_empty(self):
desc = UIInfoDesc()
assert desc.to_dict() == {}
class TestOrganizationDesc(object):
def test_to_dict(self):
desc = OrganizationDesc()
desc.add_display_name("Foo Testing", "en")
desc.add_name("Testing Co.", "en")
desc.add_url("https://test.example.com", "en")
serialized = desc.to_dict()
org_info = serialized["organization"]
assert org_info["display_name"] == [("Foo Testing", "en")]
assert org_info["name"] == [("Testing Co.", "en")]
assert org_info["url"] == [("https://test.example.com", "en")]
def test_to_dict_with_empty(self):
desc = OrganizationDesc()
assert desc.to_dict() == {}
class TestMetadataDescription(object):
def test_to_dict(self):
org_desc = OrganizationDesc()
org_desc.add_display_name("Foo Testing", "en")
org_desc.add_name("Testing Co.", "en")
org_desc.add_url("https://test.example.com", "en")
contact_desc = ContactPersonDesc()
contact_desc.contact_type = "test"
contact_desc.given_name = "First"
contact_desc.sur_name = "Tester"
contact_desc.add_email_address("<EMAIL>")
ui_desc = UIInfoDesc()
ui_desc.add_description("test", "en")
ui_desc.add_display_name("my company", "en")
ui_desc.add_logo("http://example.com/logo.jpg", 80, 80, "en")
desc = MetadataDescription("my_entity")
desc.organization = org_desc
desc.add_contact_person(contact_desc)
desc.ui_info = ui_desc
serialized = desc.to_dict()
assert serialized["entityid"] == "my_entity"
assert serialized["organization"]
assert serialized["contact_person"]
assert serialized["service"]["idp"]["ui_info"]
def test_set_organization_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.organization = "bad input"
def test_add_contact_person_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.add_contact_person("bad input")
def test_set_ui_info_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.ui_info = "bad input" | tests/satosa/metadata_creation/test_description.py | from unittest.mock import mock_open, patch
import pytest
from satosa.metadata_creation.description import ContactPersonDesc, UIInfoDesc, OrganizationDesc, MetadataDescription
class TestContactPersonDesc(object):
def test_to_dict(self):
desc = ContactPersonDesc()
desc.contact_type = "test"
desc.given_name = "First"
desc.sur_name = "Tester"
desc.add_email_address("<EMAIL>")
serialized = desc.to_dict()
assert serialized["contact_type"] == "test"
assert serialized["given_name"] == "First"
assert serialized["sur_name"] == "Tester"
assert serialized["email_address"] == ["<EMAIL>"]
class TestUIInfoDesc(object):
def test_to_dict(self):
desc = UIInfoDesc()
desc.add_description("test", "en")
desc.add_display_name("my company", "en")
desc.add_logo("logo.jpg", 80, 80, "en")
serialized = desc.to_dict()
ui_info = serialized["service"]["idp"]["ui_info"]
assert ui_info["description"] == [{"text": "test", "lang": "en"}]
assert ui_info["display_name"] == [{"text": "my company", "lang": "en"}]
assert ui_info["logo"] == [{"text": "logo.jpg", "width": 80, "height": 80, "lang": "en"}]
def test_to_dict_for_logo_without_lang(self):
desc = UIInfoDesc()
desc.add_logo("logo.jpg", 80, 80, None)
serialized = desc.to_dict()
ui_info = serialized["service"]["idp"]["ui_info"]
assert ui_info["logo"] == [{"text": "logo.jpg", "width": 80, "height": 80}]
def test_to_dict_with_empty(self):
desc = UIInfoDesc()
assert desc.to_dict() == {}
class TestOrganizationDesc(object):
def test_to_dict(self):
desc = OrganizationDesc()
desc.add_display_name("Foo Testing", "en")
desc.add_name("Testing Co.", "en")
desc.add_url("https://test.example.com", "en")
serialized = desc.to_dict()
org_info = serialized["organization"]
assert org_info["display_name"] == [("Foo Testing", "en")]
assert org_info["name"] == [("Testing Co.", "en")]
assert org_info["url"] == [("https://test.example.com", "en")]
def test_to_dict_with_empty(self):
desc = OrganizationDesc()
assert desc.to_dict() == {}
class TestMetadataDescription(object):
def test_to_dict(self):
org_desc = OrganizationDesc()
org_desc.add_display_name("Foo Testing", "en")
org_desc.add_name("Testing Co.", "en")
org_desc.add_url("https://test.example.com", "en")
contact_desc = ContactPersonDesc()
contact_desc.contact_type = "test"
contact_desc.given_name = "First"
contact_desc.sur_name = "Tester"
contact_desc.add_email_address("<EMAIL>")
ui_desc = UIInfoDesc()
ui_desc.add_description("test", "en")
ui_desc.add_display_name("my company", "en")
ui_desc.add_logo("http://example.com/logo.jpg", 80, 80, "en")
desc = MetadataDescription("my_entity")
desc.organization = org_desc
desc.add_contact_person(contact_desc)
desc.ui_info = ui_desc
serialized = desc.to_dict()
assert serialized["entityid"] == "my_entity"
assert serialized["organization"]
assert serialized["contact_person"]
assert serialized["service"]["idp"]["ui_info"]
def test_set_organization_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.organization = "bad input"
def test_add_contact_person_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.add_contact_person("bad input")
def test_set_ui_info_rejects_bad_input(self):
desc = MetadataDescription("my_entity")
with pytest.raises(TypeError):
desc.ui_info = "bad input" | 0.627951 | 0.477798 |
import re
from ..message_server import Message
from ..util import app_logging
log = app_logging.getLogger('Log Utils')
code = re.compile('%CODE')
class FlowModInfo(object):
"""
All we need to retrieve FlowMod from Database.
"""
def __init__(self, entry):
dpid, flow_mod = entry
self.dpid = dpid
self.match = flow_mod.match
self.actions = flow_mod.actions
self.command = flow_mod.command
self.priority = flow_mod.priority
def __str__(self):
return (str(self.dpid) + '\n' + str(self.match) +
'\n' + str(self.actions))
class RuleInfo(object):
def __init__(self, entry):
dpid, rule = entry
self.dpid = dpid
self.match = rule.match
self.actions = rule.actions
self.priority = rule.priority
class MessageInfo(object):
"""
Utility for a single error message.
Has multiple text <parts> with call stacks between them.
Each call stack is associated with QueryID(qid) and FlowModInfo.
Traces how many unanswered queries are remaining.
"""
def __init__(self, infos, qids, indices, event):
if len(infos) != len(qids) or len(qids) != len(indices):
raise Exception('Wrong length')
self.code = {} # qid -> code
self.infos = {} # qid -> info
self.qids = qids
self.indices = indices
self.indices_to_qids = {}
self.unanswered = len(qids)
self.query_count = {}
for i, qid in enumerate(qids):
self.infos[qid] = infos[i]
self.code[qid] = None
self.query_count[qid] = 0
self.event = event
def set_code(self, qid, code):
"""
Set call stack for qid.
"""
if self.code[qid] is None:
self.unanswered -= self.qids.count(qid)
self.code[qid] = code
def filled(self):
"""
Have we received all the necessary information for this message?
"""
filled = (self.unanswered == 0)
if filled:
for i, qid in enumerate(self.qids):
self.indices_to_qids[self.indices[i]] = qid
return filled
def get_info_and_code(self, index_pair):
qid = self.indices_to_qids[index_pair]
return (self.infos[qid], self.code[qid])
def change_qid(self, old_qid, new_qid):
"""
Remove <old_qid>, insert <new_qid> instead.
Used for repeated queries with the same FlowMod.
"""
count = 0
for i, v in enumerate(self.qids):
if v == old_qid:
count += 1
self.qids[i] = new_qid
if count == 0:
return
self.code[new_qid] = self.code[old_qid]
del self.code[old_qid]
self.infos[new_qid] = self.infos[old_qid]
del self.infos[old_qid]
self.query_count[new_qid] = self.query_count[old_qid]
del self.query_count[old_qid]
def __str__(self):
"""
Construct textual log message.
"""
self.parts = code.split(self.event.log())
text = ""
for part, qid in zip(self.parts, self.qids):
c = ""
res = self.code[qid]
for entry in res:
c += entry[0] + '\n'
if isinstance(entry[1], basestring):
c += ' ' + str(entry[1]) + '\n'
elif isinstance(entry[1], tuple) or isinstance(entry[1], list):
c += '\n'.join([' ' + str(r) for r in entry[1]]) + '\n'
text += part + c
text += self.parts[-1]
return text
def get_data(self):
"""
Return [(info, code),...]
"""
res = []
for qid in self.qids:
res.append((self.infos[qid], self.code[qid]))
return res
def get_query_count(self, qid):
return self.query_count[qid]
def inc_query_count(self, qid):
self.query_count[qid] += 1
class ReQuery(Message):
"""
Send specific query later.
"""
def __init__(self, info, qid):
self.info = info
self.qid = qid | adapters/pox/ext/debugger/elt/logger/util.py | import re
from ..message_server import Message
from ..util import app_logging
log = app_logging.getLogger('Log Utils')
code = re.compile('%CODE')
class FlowModInfo(object):
"""
All we need to retrieve FlowMod from Database.
"""
def __init__(self, entry):
dpid, flow_mod = entry
self.dpid = dpid
self.match = flow_mod.match
self.actions = flow_mod.actions
self.command = flow_mod.command
self.priority = flow_mod.priority
def __str__(self):
return (str(self.dpid) + '\n' + str(self.match) +
'\n' + str(self.actions))
class RuleInfo(object):
def __init__(self, entry):
dpid, rule = entry
self.dpid = dpid
self.match = rule.match
self.actions = rule.actions
self.priority = rule.priority
class MessageInfo(object):
"""
Utility for a single error message.
Has multiple text <parts> with call stacks between them.
Each call stack is associated with QueryID(qid) and FlowModInfo.
Traces how many unanswered queries are remaining.
"""
def __init__(self, infos, qids, indices, event):
if len(infos) != len(qids) or len(qids) != len(indices):
raise Exception('Wrong length')
self.code = {} # qid -> code
self.infos = {} # qid -> info
self.qids = qids
self.indices = indices
self.indices_to_qids = {}
self.unanswered = len(qids)
self.query_count = {}
for i, qid in enumerate(qids):
self.infos[qid] = infos[i]
self.code[qid] = None
self.query_count[qid] = 0
self.event = event
def set_code(self, qid, code):
"""
Set call stack for qid.
"""
if self.code[qid] is None:
self.unanswered -= self.qids.count(qid)
self.code[qid] = code
def filled(self):
"""
Have we received all the necessary information for this message?
"""
filled = (self.unanswered == 0)
if filled:
for i, qid in enumerate(self.qids):
self.indices_to_qids[self.indices[i]] = qid
return filled
def get_info_and_code(self, index_pair):
qid = self.indices_to_qids[index_pair]
return (self.infos[qid], self.code[qid])
def change_qid(self, old_qid, new_qid):
"""
Remove <old_qid>, insert <new_qid> instead.
Used for repeated queries with the same FlowMod.
"""
count = 0
for i, v in enumerate(self.qids):
if v == old_qid:
count += 1
self.qids[i] = new_qid
if count == 0:
return
self.code[new_qid] = self.code[old_qid]
del self.code[old_qid]
self.infos[new_qid] = self.infos[old_qid]
del self.infos[old_qid]
self.query_count[new_qid] = self.query_count[old_qid]
del self.query_count[old_qid]
def __str__(self):
"""
Construct textual log message.
"""
self.parts = code.split(self.event.log())
text = ""
for part, qid in zip(self.parts, self.qids):
c = ""
res = self.code[qid]
for entry in res:
c += entry[0] + '\n'
if isinstance(entry[1], basestring):
c += ' ' + str(entry[1]) + '\n'
elif isinstance(entry[1], tuple) or isinstance(entry[1], list):
c += '\n'.join([' ' + str(r) for r in entry[1]]) + '\n'
text += part + c
text += self.parts[-1]
return text
def get_data(self):
"""
Return [(info, code),...]
"""
res = []
for qid in self.qids:
res.append((self.infos[qid], self.code[qid]))
return res
def get_query_count(self, qid):
return self.query_count[qid]
def inc_query_count(self, qid):
self.query_count[qid] += 1
class ReQuery(Message):
"""
Send specific query later.
"""
def __init__(self, info, qid):
self.info = info
self.qid = qid | 0.506591 | 0.147955 |
from os import PathLike
from pathlib import Path
from typing import (
Any,
Callable,
Container,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
overload,
)
import click
import joblib
import numpy as np
import tqdm
import yaml
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import get_scorer
from sklearn.model_selection import (
BaseCrossValidator,
GroupKFold,
GroupShuffleSplit,
LeaveOneGroupOut,
LeaveOneOut,
StratifiedKFold,
StratifiedShuffleSplit,
)
from sklearn.model_selection._split import _BaseKFold
from sklearn.model_selection._validation import _score
from sklearn.pipeline import Pipeline
from sklearn.utils.validation import check_array
PathOrStr = Union[PathLike, str]
# Class adapted from user394430's answer here:
# https://stackoverflow.com/a/61900501/10044861
# Licensed under CC BY-SA 4.0
class TqdmParallel(joblib.Parallel):
"""Convenience class that acts identically to joblib.Parallel except
it uses a tqdm progress bar.
"""
def __init__(
self,
total: int = 1,
desc: str = "",
unit: str = "it",
leave: bool = True,
**kwargs,
):
self.total = total
self.tqdm_args = {"desc": desc, "unit": unit, "leave": leave, "disable": None}
kwargs["verbose"] = 0
super().__init__(**kwargs)
def __call__(self, iterable):
with tqdm.tqdm(total=self.total, **self.tqdm_args) as self.pbar:
return super().__call__(iterable)
def print_progress(self):
self.pbar.n = self.n_completed_tasks
self.pbar.refresh()
class PathlibPath(click.Path):
"""Convenience class that acts identically to `click.Path` except it
converts the value to a `pathlib.Path` object.
"""
def convert(self, value, param, ctx) -> Path:
return Path(super().convert(value, param, ctx))
T1 = TypeVar("T1")
T2 = TypeVar("T2")
def itmap(s: Callable[[T1], T2]):
"""Returns a new map function that additionally maps tuples to
tuples and lists to lists.
"""
@overload
def _map(x: T1) -> T2:
...
@overload
def _map(x: List[T1]) -> List[T2]:
...
@overload
def _map(x: Tuple[T1, ...]) -> Tuple[T2, ...]:
...
def _map(x):
if isinstance(x, (list, tuple)):
return type(x)(s(y) for y in x)
else:
return s(x)
return _map
def ordered_intersect(a: Iterable, b: Container) -> List:
"""Returns a list of the intersection of `a` and `b`, in the order
elements appear in `a`.
"""
return [x for x in a if x in b]
def filter_kwargs(kwargs: Dict[str, Any], method: Callable) -> Dict[str, Any]:
"""Removes incompatible keyword arguments. This ignores any **kwargs
catchall in method signature, and only returns args specifically
present as keyhwords in the method signature which are also not
positional only.
Args:
-----
params: dict
Keyword arguments to pass to method.
method: callable
The method for which to check valid parameters.
Returns:
--------
params: dict
Filtered keyword arguments.
"""
import inspect
meth_params = inspect.signature(method).parameters
kwargs = kwargs.copy()
for key in set(kwargs.keys()):
if (
key not in meth_params
or meth_params[key].kind == inspect.Parameter.POSITIONAL_ONLY
):
del kwargs[key]
return kwargs
def get_arg_mapping_multi(s: str) -> Dict[str, List[Any]]:
"""Given a string mapping from the command-line, returns a dict
representing that mapping.
The string form of the mapping is:
key:value[,key:value]+
Duplicate keys will be mapped to a list of values.
Args:
-----
s: str
String representing the mapping. It cannot contain spaces or
shell symbols (unless escaped).
Returns:
--------
mapping: dict
A dictionary mapping keys to lists of values from the string.
"""
mapping: Dict[str, List[str]] = {}
for cls in s.split(","):
key, val = cls.split(":")
if key in mapping:
mapping[key].append(val)
else:
mapping[key] = [val]
return mapping
def get_arg_mapping(s: Union[Path, str]) -> Dict[str, Any]:
"""Given a mapping on the command-line, returns a dict representing
that mapping. Mapping can be a string or a more complex YAML file.
The string form of the mapping is:
key:value[,key:value]+
Args:
-----
s: PathLike or str
String representing the mapping or path to YAML containing
mapping. If a string, it cannot contain spaces or shell symbols
(unless escaped).
Returns:
--------
mapping: dict
A dictionary mapping keys to values from the string.
"""
if isinstance(s, Path) or Path(s).exists():
with open(s) as fid:
return yaml.safe_load(fid) or {}
return {k: v[0] if len(v) == 1 else v for k, v in get_arg_mapping_multi(s).items()}
def flat_to_inst(x: np.ndarray, slices: Union[np.ndarray, List[int]]) -> np.ndarray:
"""Takes a concatenated 2D data array and converts it to either a
contiguous 2D/3D array or a variable-length 3D array, with one
feature vector/matrix per instance.
"""
if len(x) == len(slices):
# 2-D contiguous array
return x
elif all(x == slices[0] for x in slices):
# 3-D contiguous array
assert len(x) % len(slices) == 0
return x.reshape(len(slices), len(x) // len(slices), x[0].shape[-1])
else:
# 3-D variable length array
start_idx = np.cumsum(slices)[:-1]
return np.array(np.split(x, start_idx, axis=0), dtype=object)
def inst_to_flat(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""The inverse of flat_to_inst(). Takes an instance matrix and
converts to a "flattened" 2D matrix.
"""
slices = np.ones(len(x), dtype=int)
if len(x.shape) != 2:
slices = np.array([len(_x) for _x in x])
if len(x.shape) == 3:
x = x.reshape(sum(slices), x.shape[2])
else:
x = np.concatenate(x)
assert sum(slices) == len(x)
return x, slices
def check_3d(arrays: Union[Sequence[np.ndarray], np.ndarray]):
"""Checks if an array is 3D or each array in a list is 2D. Raises an
exception if this isn't the case.
"""
if any(len(x.shape) != 2 for x in arrays):
raise ValueError("arrays must be 3D (contiguous or vlen).")
def frame_arrays(
arrays: Union[List[np.ndarray], np.ndarray],
frame_size: int = 640,
frame_shift: int = 160,
num_frames: Optional[int] = None,
):
"""Creates sequences of frames from the given arrays. Each input
array is a 1-D or L x 1 time domain signal. Each corresponding
output array is a 2-D array of frames of shape (num_frames,
frame_size).
"""
# TODO: Make option for vlen output
if num_frames is None:
max_len = max(len(x) for x in arrays)
num_frames = (max_len - frame_size) // frame_shift + 1
_arrs = []
for seq in arrays:
seq = np.squeeze(seq)
arr = np.zeros((num_frames, frame_size), dtype=np.float32)
for i in range(0, len(seq), frame_shift):
idx = i // frame_shift
if idx >= num_frames:
break
maxl = min(len(seq) - i, frame_size)
arr[idx, :maxl] = seq[i : i + frame_size]
_arrs.append(arr)
arrs = np.array(_arrs)
assert tuple(arrs.shape) == (len(arrays), num_frames, frame_size)
return arrs
def pad_arrays(arrays: Union[List[np.ndarray], np.ndarray], pad: int = 32):
"""Pads each array to the nearest multiple of `pad` greater than the
array size. Assumes axis 0 of each sub-array, or axis 1 of x, is
the time axis.
"""
if isinstance(arrays, np.ndarray) and len(arrays.shape) > 1:
padding = int(np.ceil(arrays.shape[1] / pad)) * pad - arrays.shape[1]
extra_dims = tuple((0, 0) for _ in arrays.shape[2:])
return np.pad(arrays, ((0, 0), (0, padding)) + extra_dims)
new_arrays = []
for x in arrays:
padding = int(np.ceil(x.shape[0] / pad)) * pad - x.shape[0]
new_arrays.append(np.pad(x, ((0, padding), (0, 0))))
if isinstance(arrays, np.ndarray):
if all(x.shape == new_arrays[0].shape for x in new_arrays):
return np.array(new_arrays)
return np.array(new_arrays, dtype=object)
return new_arrays
def clip_arrays(
arrays: Union[List[np.ndarray], np.ndarray], length: int, copy: bool = True
):
"""Clips each array to the specified maximum length."""
if isinstance(arrays, np.ndarray):
if len(arrays.shape) > 1:
return arrays[:, :length, ...].copy() if copy else arrays[:, :length, ...]
new_arrays = [x[:length].copy() if copy else x[:length] for x in arrays]
if all(x.shape == new_arrays[0].shape for x in new_arrays):
# Return contiguous array
return np.stack(new_arrays)
return np.array(new_arrays, dtype=object)
return [x[:length].copy() if copy else x[:length] for x in arrays]
def transpose_time(arrays: Union[List[np.ndarray], np.ndarray]):
"""Transpose the time and feature axis of each array. Requires each
array be 2-D.
NOTE: This function modifies the arrays in-place.
"""
check_3d(arrays)
if isinstance(arrays, np.ndarray) and len(arrays.shape) == 3:
arrays = arrays.transpose(0, 2, 1)
else:
for i in range(len(arrays)):
arrays[i] = arrays[i].transpose()
assert all(x.shape[0] == arrays[0].shape[0] for x in arrays)
return arrays
def shuffle_multiple(*arrays: Union[np.ndarray, Sequence], numpy_indexing: bool = True):
"""Shuffles multiple arrays or lists in sync. Useful for shuffling the data
and labels in a dataset separately while keeping them synchronised.
Parameters:
-----------
arrays, iterable of array-like
The arrays to shuffle. They must all have the same size of first
dimension.
numpy_indexing: bool, default = True
Whether to use NumPy-style indexing or list comprehension.
Returns:
shuffled_arrays: iterable of array-like
The shuffled arrays.
"""
if any(len(arrays[0]) != len(x) for x in arrays):
raise ValueError("Not all arrays have equal first dimension.")
perm = np.random.default_rng().permutation(len(arrays[0]))
new_arrays = [
array[perm] if numpy_indexing else [array[i] for i in perm] for array in arrays
]
return new_arrays
def batch_arrays(
arrays_x: Union[np.ndarray, List[np.ndarray]],
y: np.ndarray,
batch_size: int = 32,
shuffle: bool = True,
uniform_batch_size: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Batches a list of arrays of different sizes, grouping them by
size. This is designed for use with variable length sequences. Each
batch will have a maximum of batch_size arrays, but may have less if
there are fewer arrays of the same length. It is recommended to use
the pad_arrays() method of the LabelledDataset instance before using
this function, in order to quantise the lengths.
Parameters:
-----
arrays_x: list of ndarray
A list of N-D arrays, possibly of different lengths, to batch.
The assumption is that all the arrays have the same rank and
only axis 0 differs in length.
y: ndarray
The labels for each of the arrays in arrays_x.
batch_size: int
Arrays will be grouped together by size, up to a maximum of
batch_size, after which a new batch will be created. Thus each
batch produced will have between 1 and batch_size items.
shuffle: bool, default = True
Whether to shuffle array order in a batch.
uniform_batch_size: bool, default = False
Whether to keep all batches the same size, batch_size, and pad
with zeros if necessary, or have batches of different sizes if
there aren't enough sequences to group together.
Returns:
--------
x_list: ndarray,
The batched arrays. x_list[i] is the i'th
batch, having between 1 and batch_size items, each of length
lengths[i].
y_list: ndarray
The batched labels corresponding to sequences in x_list.
y_list[i] has the same length as x_list[i].
"""
if isinstance(arrays_x, list):
arrays_x = np.array(arrays_x, dtype=object)
if shuffle:
arrays_x, y = shuffle_multiple(arrays_x, y, numpy_indexing=False)
fixed_shape = arrays_x[0].shape[1:]
lengths = [x.shape[0] for x in arrays_x]
unique_len = np.unique(lengths)
x_dtype = arrays_x[0].dtype
y_dtype = y.dtype
xlist = []
ylist = []
for length in unique_len:
idx = np.nonzero(lengths == length)[0]
for b in range(0, len(idx), batch_size):
batch_idx = idx[b : b + batch_size]
size = batch_size if uniform_batch_size else len(batch_idx)
_x = np.zeros((size, length) + fixed_shape, dtype=x_dtype)
_y = np.zeros(size, dtype=y_dtype)
_y[:size] = y[batch_idx]
for i, j in enumerate(batch_idx):
_x[i, ...] = arrays_x[j]
xlist.append(_x)
ylist.append(_y)
x_batch = np.array(xlist, dtype=object)
y_batch = np.array(ylist, dtype=y_dtype if uniform_batch_size else object)
return x_batch, y_batch
class TrainValidation(BaseCrossValidator):
"""Validation method that uses the training set as validation set."""
def split(self, X, y, groups):
yield np.arange(len(X)), np.arange(len(X))
def get_n_splits(self, X, y, groups):
return 1
class ShuffleGroupKFold(_BaseKFold):
"""Like GroupKFold but with random combinations of groups instead of
deterministic combinations based on group size. This is most useful
if you have groups of near equal size, and you want group k-fold CV,
where k divides n_groups.
Note: If shuffle=False, this does not behave identical to
GroupKFold, but rather splits groups in sorted order (as returned by
`numpy.unique()`).
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError(
"Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d." % (self.n_splits, n_groups)
)
# Pairs of start,end indices of groups each of n folds
fold_idx = np.linspace(0, n_groups, self.n_splits + 1, dtype=int)
group_order = np.arange(n_groups)
if self.shuffle:
# Shuffle order groups appear in folds
group_order = np.random.default_rng(self.random_state).permutation(
group_order
)
# Mapping from group index to fold index
group_to_fold = np.zeros(n_groups)
for fold, (g1, g2) in enumerate(zip(fold_idx[:-1], fold_idx[1:])):
group_to_fold[group_order[g1:g2]] = fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class ValidationSplit(BaseCrossValidator):
"""Validation method that uses a pre-defined validation set."""
def __init__(self, valid_idx: Union[List[int], np.ndarray]):
self.valid_idx = valid_idx
def split(self, X, y, groups):
train_idx = np.arange(len(X))
train_idx = train_idx[~np.isin(train_idx, self.valid_idx)]
yield train_idx, self.valid_idx
def get_n_splits(self, X, y, groups):
return 1
def get_cv_splitter(
group: bool,
k: int,
test_size: float = 0.2,
shuffle: bool = False,
random_state: int = None,
) -> BaseCrossValidator:
"""Gets an appropriate cross-validation splitter for the given
number of folds and groups, or a single random split.
Parameters:
-----------
group: bool
Whether to split over pre-defined groups of instances.
k: int
If k > 1 then do k-fold CV. If k == 1 then do one random
split. If k = -1 then do leave-one-out. If k == 0 then use the
whole train set as validation split.
test_size: float
The size of the test set when k == 1 (one random split).
shuffle: bool
Whether to shuffle when using k-fold for k > 1.
random_state: int, optional
The random state to set for splitters with shuffling behaviour.
Returns:
--------
splitter: BaseCrossValidator
Cross-validation splitter that has `split()` and
`get_n_splits()` methods.
"""
# TODO: Leave-|k|-out for k < 0?
if k == 0:
return TrainValidation()
if group:
if k > 1:
if shuffle:
return ShuffleGroupKFold(k, shuffle=shuffle, random_state=random_state)
return GroupKFold(k)
elif k == 1:
return GroupShuffleSplit(1, test_size=test_size, random_state=random_state)
return LeaveOneGroupOut()
if k > 1:
return StratifiedKFold(k, shuffle=shuffle, random_state=random_state)
elif k == 1:
return StratifiedShuffleSplit(1, test_size=test_size, random_state=random_state)
return LeaveOneOut()
def group_transform(
x: np.ndarray,
groups: np.ndarray,
transform: TransformerMixin,
*,
inplace: bool = False,
**fit_params,
):
"""Per-group (offline) transformation (e.g. standardisation).
Args:
-----
x: np.ndarray
The data matrix to transform. Each x[i] must be an instance.
groups: np.ndarray
Groups assignment for each instance. It must be the case that
len(groups) == len(x).
transform:
The transformation to apply. Must implement fit_transform().
inplace: bool
Whether to modify x in-place. Default is False so that a copy is
made.
**fit_params:
Other keyword arguments to pass to the transform.fit() method.
Returns:
--------
x: np.ndarray
The modified data matrix with transformations applied to each
group individually.
"""
if not inplace:
x = x.copy()
unique_groups = np.unique(groups)
for g_id in unique_groups:
flat, slices = inst_to_flat(x[groups == g_id])
flat = transform.fit_transform(flat, y=None, **fit_params)
if len(x.shape) == 1 and len(slices) == 1:
# Special case to avoid issues for vlen arrays
_arr = np.empty(1, dtype=object)
_arr[0] = flat
x[groups == g_id] = _arr
continue
x[groups == g_id] = flat_to_inst(flat, slices)
return x
def instance_transform(
x: np.ndarray, transform: TransformerMixin, *, inplace: bool = False, **fit_params
):
"""Per-instance transformation (e.g. standardisation).
Args:
-----
x: np.ndarray
The data matrix to transform. Each x[i] must be a 2D instance.
transform:
The transformation to apply. Must implement fit_transform().
inplace: bool
Whether to modify x in-place. Default is False so that a copy is
made.
**fit_params:
Other keyword arguments to pass to the transform.fit() method.
Returns:
--------
x: np.ndarray
The modified data matrix with transformations applied to each
instance individually.
"""
return group_transform(
x, np.arange(len(x)), transform, inplace=inplace, **fit_params
)
ScoreFunction = Callable[[np.ndarray, np.ndarray], float]
def get_scores(
scoring: Union[str, List[str], Dict[str, ScoreFunction], Callable[..., float]],
y_pred: np.ndarray,
y_true: np.ndarray,
) -> Dict[str, Any]:
"""Get dictionary of scores for predictions.
Parameters:
-----------
scoring: str, list, dict or callable
Score(s) to calculate. This takes the same for as for
scikit-learn's cross_val_* methods.
y_pred: array-like
Predictions.
y_true: array-like
Ground truth.
Returns:
--------
scores: dict
A dictionary mapping score names to corresponding score(s).
"""
class DummyEstimator:
"""Class that implements a dummy estimator for scoring, to avoid
repeated invocations of `predict()` etc.
"""
def __init__(self, y_pred):
self.y_pred = y_pred
def predict(self, x, **kwargs):
return self.y_pred
def predict_proba(self, x, **kwargs):
return self.y_pred
def decision_function(self, x, **kwargs):
return self.y_pred
y_pred = np.array(y_pred)
y_true = np.array(y_true)
dummy = DummyEstimator(y_pred)
if isinstance(scoring, str):
scoring = {"score": get_scorer(scoring)}
elif callable(scoring):
scoring = {"score": scoring}
elif isinstance(scoring, list):
scoring = {x: get_scorer(x) for x in scoring}
return _score(dummy, None, y_true, scoring)
def get_pipeline_params(params: Dict[str, Any], pipeline: Pipeline):
"""Modifies parameter names to pass to a Pipeline instance's `fit()`
method.
Parameters:
-----------
params: dict
Parameters to pass to Pipeline.fit(). All parameters are passed
to all estimators in the pipeline so long as they are valid.
pipeline: Pipeline
The pipeline instance.
Returns:
--------
new_params: dict
Parameters filtered and prepended with pipeline step names and
double underscore (e.g. groups -> clf__groups).
"""
new_params = {}
for name, est in pipeline.named_steps.items():
if est is None or est == "passthrough":
continue
filt_params = filter_kwargs(params, est.fit)
new_params.update({name + "__" + k: v for k, v in filt_params.items()})
return new_params
class GroupTransformWrapper(TransformerMixin, BaseEstimator):
"""Transform that modifies groups independently without storing
parameters.
"""
def __init__(self, transformer: TransformerMixin) -> None:
self.transformer = transformer
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, groups=None, **fit_params):
return group_transform(X, groups, self.transformer, inplace=False, **fit_params)
class InstanceTransformWrapper(TransformerMixin, BaseEstimator):
"""Transform that modifies instances independently without storing
parameters.
"""
def __init__(self, transformer: TransformerMixin) -> None:
self.transformer = transformer
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, **fit_params):
raise instance_transform(X, self.transformer, inplace=False, **fit_params)
class SequenceTransform(TransformerMixin, BaseEstimator):
"""Transform designed to process sequences of vectors."""
pass
class SequenceTransformWrapper(SequenceTransform):
"""Wrapper around a scikit-learn transform that can process
sequences of vectors.
Args:
-----
transformer:
An object which implements the fit_transform() method on a
collection of 1D vectors.
method: str
The method to manipuate the sequence into 1D vectors, one of
{"feature", "global"}. If "feature" then each feature column of
the concatenated (2D) input is transformed independently. If
"global" then the transformer is fitted over the whole input
including all feature columns.
"""
def __init__(self, transformer: TransformerMixin, method: str):
VALID_METHODS = {"feature", "global"}
self.transformer = transformer
if method not in VALID_METHODS:
raise ValueError(f"method '{method}' not in {VALID_METHODS}.")
self.method = method
def fit(self, X, y=None, **fit_params):
flat_x, _ = inst_to_flat(X)
if self.method == "feature":
self.transformer.fit(flat_x, y=y, **fit_params)
elif self.method == "global":
self.transformer.fit(flat_x.reshape((-1, 1)), y=y, **fit_params)
return self
def transform(self, X, **fit_params):
flat_x, slices = inst_to_flat(X)
if self.method == "feature":
flat_x = self.transformer.transform(flat_x, **fit_params)
elif self.method == "global":
flat_shape = flat_x.shape
flat_x = self.transformer.transform(
flat_x.reshape((-1, 1)), **fit_params
).reshape(flat_shape)
return flat_to_inst(flat_x, slices) | ertk/utils.py |
from os import PathLike
from pathlib import Path
from typing import (
Any,
Callable,
Container,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
overload,
)
import click
import joblib
import numpy as np
import tqdm
import yaml
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import get_scorer
from sklearn.model_selection import (
BaseCrossValidator,
GroupKFold,
GroupShuffleSplit,
LeaveOneGroupOut,
LeaveOneOut,
StratifiedKFold,
StratifiedShuffleSplit,
)
from sklearn.model_selection._split import _BaseKFold
from sklearn.model_selection._validation import _score
from sklearn.pipeline import Pipeline
from sklearn.utils.validation import check_array
PathOrStr = Union[PathLike, str]
# Class adapted from user394430's answer here:
# https://stackoverflow.com/a/61900501/10044861
# Licensed under CC BY-SA 4.0
class TqdmParallel(joblib.Parallel):
"""Convenience class that acts identically to joblib.Parallel except
it uses a tqdm progress bar.
"""
def __init__(
self,
total: int = 1,
desc: str = "",
unit: str = "it",
leave: bool = True,
**kwargs,
):
self.total = total
self.tqdm_args = {"desc": desc, "unit": unit, "leave": leave, "disable": None}
kwargs["verbose"] = 0
super().__init__(**kwargs)
def __call__(self, iterable):
with tqdm.tqdm(total=self.total, **self.tqdm_args) as self.pbar:
return super().__call__(iterable)
def print_progress(self):
self.pbar.n = self.n_completed_tasks
self.pbar.refresh()
class PathlibPath(click.Path):
"""Convenience class that acts identically to `click.Path` except it
converts the value to a `pathlib.Path` object.
"""
def convert(self, value, param, ctx) -> Path:
return Path(super().convert(value, param, ctx))
T1 = TypeVar("T1")
T2 = TypeVar("T2")
def itmap(s: Callable[[T1], T2]):
"""Returns a new map function that additionally maps tuples to
tuples and lists to lists.
"""
@overload
def _map(x: T1) -> T2:
...
@overload
def _map(x: List[T1]) -> List[T2]:
...
@overload
def _map(x: Tuple[T1, ...]) -> Tuple[T2, ...]:
...
def _map(x):
if isinstance(x, (list, tuple)):
return type(x)(s(y) for y in x)
else:
return s(x)
return _map
def ordered_intersect(a: Iterable, b: Container) -> List:
"""Returns a list of the intersection of `a` and `b`, in the order
elements appear in `a`.
"""
return [x for x in a if x in b]
def filter_kwargs(kwargs: Dict[str, Any], method: Callable) -> Dict[str, Any]:
"""Removes incompatible keyword arguments. This ignores any **kwargs
catchall in method signature, and only returns args specifically
present as keyhwords in the method signature which are also not
positional only.
Args:
-----
params: dict
Keyword arguments to pass to method.
method: callable
The method for which to check valid parameters.
Returns:
--------
params: dict
Filtered keyword arguments.
"""
import inspect
meth_params = inspect.signature(method).parameters
kwargs = kwargs.copy()
for key in set(kwargs.keys()):
if (
key not in meth_params
or meth_params[key].kind == inspect.Parameter.POSITIONAL_ONLY
):
del kwargs[key]
return kwargs
def get_arg_mapping_multi(s: str) -> Dict[str, List[Any]]:
"""Given a string mapping from the command-line, returns a dict
representing that mapping.
The string form of the mapping is:
key:value[,key:value]+
Duplicate keys will be mapped to a list of values.
Args:
-----
s: str
String representing the mapping. It cannot contain spaces or
shell symbols (unless escaped).
Returns:
--------
mapping: dict
A dictionary mapping keys to lists of values from the string.
"""
mapping: Dict[str, List[str]] = {}
for cls in s.split(","):
key, val = cls.split(":")
if key in mapping:
mapping[key].append(val)
else:
mapping[key] = [val]
return mapping
def get_arg_mapping(s: Union[Path, str]) -> Dict[str, Any]:
"""Given a mapping on the command-line, returns a dict representing
that mapping. Mapping can be a string or a more complex YAML file.
The string form of the mapping is:
key:value[,key:value]+
Args:
-----
s: PathLike or str
String representing the mapping or path to YAML containing
mapping. If a string, it cannot contain spaces or shell symbols
(unless escaped).
Returns:
--------
mapping: dict
A dictionary mapping keys to values from the string.
"""
if isinstance(s, Path) or Path(s).exists():
with open(s) as fid:
return yaml.safe_load(fid) or {}
return {k: v[0] if len(v) == 1 else v for k, v in get_arg_mapping_multi(s).items()}
def flat_to_inst(x: np.ndarray, slices: Union[np.ndarray, List[int]]) -> np.ndarray:
"""Takes a concatenated 2D data array and converts it to either a
contiguous 2D/3D array or a variable-length 3D array, with one
feature vector/matrix per instance.
"""
if len(x) == len(slices):
# 2-D contiguous array
return x
elif all(x == slices[0] for x in slices):
# 3-D contiguous array
assert len(x) % len(slices) == 0
return x.reshape(len(slices), len(x) // len(slices), x[0].shape[-1])
else:
# 3-D variable length array
start_idx = np.cumsum(slices)[:-1]
return np.array(np.split(x, start_idx, axis=0), dtype=object)
def inst_to_flat(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""The inverse of flat_to_inst(). Takes an instance matrix and
converts to a "flattened" 2D matrix.
"""
slices = np.ones(len(x), dtype=int)
if len(x.shape) != 2:
slices = np.array([len(_x) for _x in x])
if len(x.shape) == 3:
x = x.reshape(sum(slices), x.shape[2])
else:
x = np.concatenate(x)
assert sum(slices) == len(x)
return x, slices
def check_3d(arrays: Union[Sequence[np.ndarray], np.ndarray]):
"""Checks if an array is 3D or each array in a list is 2D. Raises an
exception if this isn't the case.
"""
if any(len(x.shape) != 2 for x in arrays):
raise ValueError("arrays must be 3D (contiguous or vlen).")
def frame_arrays(
arrays: Union[List[np.ndarray], np.ndarray],
frame_size: int = 640,
frame_shift: int = 160,
num_frames: Optional[int] = None,
):
"""Creates sequences of frames from the given arrays. Each input
array is a 1-D or L x 1 time domain signal. Each corresponding
output array is a 2-D array of frames of shape (num_frames,
frame_size).
"""
# TODO: Make option for vlen output
if num_frames is None:
max_len = max(len(x) for x in arrays)
num_frames = (max_len - frame_size) // frame_shift + 1
_arrs = []
for seq in arrays:
seq = np.squeeze(seq)
arr = np.zeros((num_frames, frame_size), dtype=np.float32)
for i in range(0, len(seq), frame_shift):
idx = i // frame_shift
if idx >= num_frames:
break
maxl = min(len(seq) - i, frame_size)
arr[idx, :maxl] = seq[i : i + frame_size]
_arrs.append(arr)
arrs = np.array(_arrs)
assert tuple(arrs.shape) == (len(arrays), num_frames, frame_size)
return arrs
def pad_arrays(arrays: Union[List[np.ndarray], np.ndarray], pad: int = 32):
"""Pads each array to the nearest multiple of `pad` greater than the
array size. Assumes axis 0 of each sub-array, or axis 1 of x, is
the time axis.
"""
if isinstance(arrays, np.ndarray) and len(arrays.shape) > 1:
padding = int(np.ceil(arrays.shape[1] / pad)) * pad - arrays.shape[1]
extra_dims = tuple((0, 0) for _ in arrays.shape[2:])
return np.pad(arrays, ((0, 0), (0, padding)) + extra_dims)
new_arrays = []
for x in arrays:
padding = int(np.ceil(x.shape[0] / pad)) * pad - x.shape[0]
new_arrays.append(np.pad(x, ((0, padding), (0, 0))))
if isinstance(arrays, np.ndarray):
if all(x.shape == new_arrays[0].shape for x in new_arrays):
return np.array(new_arrays)
return np.array(new_arrays, dtype=object)
return new_arrays
def clip_arrays(
arrays: Union[List[np.ndarray], np.ndarray], length: int, copy: bool = True
):
"""Clips each array to the specified maximum length."""
if isinstance(arrays, np.ndarray):
if len(arrays.shape) > 1:
return arrays[:, :length, ...].copy() if copy else arrays[:, :length, ...]
new_arrays = [x[:length].copy() if copy else x[:length] for x in arrays]
if all(x.shape == new_arrays[0].shape for x in new_arrays):
# Return contiguous array
return np.stack(new_arrays)
return np.array(new_arrays, dtype=object)
return [x[:length].copy() if copy else x[:length] for x in arrays]
def transpose_time(arrays: Union[List[np.ndarray], np.ndarray]):
"""Transpose the time and feature axis of each array. Requires each
array be 2-D.
NOTE: This function modifies the arrays in-place.
"""
check_3d(arrays)
if isinstance(arrays, np.ndarray) and len(arrays.shape) == 3:
arrays = arrays.transpose(0, 2, 1)
else:
for i in range(len(arrays)):
arrays[i] = arrays[i].transpose()
assert all(x.shape[0] == arrays[0].shape[0] for x in arrays)
return arrays
def shuffle_multiple(*arrays: Union[np.ndarray, Sequence], numpy_indexing: bool = True):
"""Shuffles multiple arrays or lists in sync. Useful for shuffling the data
and labels in a dataset separately while keeping them synchronised.
Parameters:
-----------
arrays, iterable of array-like
The arrays to shuffle. They must all have the same size of first
dimension.
numpy_indexing: bool, default = True
Whether to use NumPy-style indexing or list comprehension.
Returns:
shuffled_arrays: iterable of array-like
The shuffled arrays.
"""
if any(len(arrays[0]) != len(x) for x in arrays):
raise ValueError("Not all arrays have equal first dimension.")
perm = np.random.default_rng().permutation(len(arrays[0]))
new_arrays = [
array[perm] if numpy_indexing else [array[i] for i in perm] for array in arrays
]
return new_arrays
def batch_arrays(
arrays_x: Union[np.ndarray, List[np.ndarray]],
y: np.ndarray,
batch_size: int = 32,
shuffle: bool = True,
uniform_batch_size: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Batches a list of arrays of different sizes, grouping them by
size. This is designed for use with variable length sequences. Each
batch will have a maximum of batch_size arrays, but may have less if
there are fewer arrays of the same length. It is recommended to use
the pad_arrays() method of the LabelledDataset instance before using
this function, in order to quantise the lengths.
Parameters:
-----
arrays_x: list of ndarray
A list of N-D arrays, possibly of different lengths, to batch.
The assumption is that all the arrays have the same rank and
only axis 0 differs in length.
y: ndarray
The labels for each of the arrays in arrays_x.
batch_size: int
Arrays will be grouped together by size, up to a maximum of
batch_size, after which a new batch will be created. Thus each
batch produced will have between 1 and batch_size items.
shuffle: bool, default = True
Whether to shuffle array order in a batch.
uniform_batch_size: bool, default = False
Whether to keep all batches the same size, batch_size, and pad
with zeros if necessary, or have batches of different sizes if
there aren't enough sequences to group together.
Returns:
--------
x_list: ndarray,
The batched arrays. x_list[i] is the i'th
batch, having between 1 and batch_size items, each of length
lengths[i].
y_list: ndarray
The batched labels corresponding to sequences in x_list.
y_list[i] has the same length as x_list[i].
"""
if isinstance(arrays_x, list):
arrays_x = np.array(arrays_x, dtype=object)
if shuffle:
arrays_x, y = shuffle_multiple(arrays_x, y, numpy_indexing=False)
fixed_shape = arrays_x[0].shape[1:]
lengths = [x.shape[0] for x in arrays_x]
unique_len = np.unique(lengths)
x_dtype = arrays_x[0].dtype
y_dtype = y.dtype
xlist = []
ylist = []
for length in unique_len:
idx = np.nonzero(lengths == length)[0]
for b in range(0, len(idx), batch_size):
batch_idx = idx[b : b + batch_size]
size = batch_size if uniform_batch_size else len(batch_idx)
_x = np.zeros((size, length) + fixed_shape, dtype=x_dtype)
_y = np.zeros(size, dtype=y_dtype)
_y[:size] = y[batch_idx]
for i, j in enumerate(batch_idx):
_x[i, ...] = arrays_x[j]
xlist.append(_x)
ylist.append(_y)
x_batch = np.array(xlist, dtype=object)
y_batch = np.array(ylist, dtype=y_dtype if uniform_batch_size else object)
return x_batch, y_batch
class TrainValidation(BaseCrossValidator):
"""Validation method that uses the training set as validation set."""
def split(self, X, y, groups):
yield np.arange(len(X)), np.arange(len(X))
def get_n_splits(self, X, y, groups):
return 1
class ShuffleGroupKFold(_BaseKFold):
"""Like GroupKFold but with random combinations of groups instead of
deterministic combinations based on group size. This is most useful
if you have groups of near equal size, and you want group k-fold CV,
where k divides n_groups.
Note: If shuffle=False, this does not behave identical to
GroupKFold, but rather splits groups in sorted order (as returned by
`numpy.unique()`).
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError(
"Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d." % (self.n_splits, n_groups)
)
# Pairs of start,end indices of groups each of n folds
fold_idx = np.linspace(0, n_groups, self.n_splits + 1, dtype=int)
group_order = np.arange(n_groups)
if self.shuffle:
# Shuffle order groups appear in folds
group_order = np.random.default_rng(self.random_state).permutation(
group_order
)
# Mapping from group index to fold index
group_to_fold = np.zeros(n_groups)
for fold, (g1, g2) in enumerate(zip(fold_idx[:-1], fold_idx[1:])):
group_to_fold[group_order[g1:g2]] = fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class ValidationSplit(BaseCrossValidator):
"""Validation method that uses a pre-defined validation set."""
def __init__(self, valid_idx: Union[List[int], np.ndarray]):
self.valid_idx = valid_idx
def split(self, X, y, groups):
train_idx = np.arange(len(X))
train_idx = train_idx[~np.isin(train_idx, self.valid_idx)]
yield train_idx, self.valid_idx
def get_n_splits(self, X, y, groups):
return 1
def get_cv_splitter(
group: bool,
k: int,
test_size: float = 0.2,
shuffle: bool = False,
random_state: int = None,
) -> BaseCrossValidator:
"""Gets an appropriate cross-validation splitter for the given
number of folds and groups, or a single random split.
Parameters:
-----------
group: bool
Whether to split over pre-defined groups of instances.
k: int
If k > 1 then do k-fold CV. If k == 1 then do one random
split. If k = -1 then do leave-one-out. If k == 0 then use the
whole train set as validation split.
test_size: float
The size of the test set when k == 1 (one random split).
shuffle: bool
Whether to shuffle when using k-fold for k > 1.
random_state: int, optional
The random state to set for splitters with shuffling behaviour.
Returns:
--------
splitter: BaseCrossValidator
Cross-validation splitter that has `split()` and
`get_n_splits()` methods.
"""
# TODO: Leave-|k|-out for k < 0?
if k == 0:
return TrainValidation()
if group:
if k > 1:
if shuffle:
return ShuffleGroupKFold(k, shuffle=shuffle, random_state=random_state)
return GroupKFold(k)
elif k == 1:
return GroupShuffleSplit(1, test_size=test_size, random_state=random_state)
return LeaveOneGroupOut()
if k > 1:
return StratifiedKFold(k, shuffle=shuffle, random_state=random_state)
elif k == 1:
return StratifiedShuffleSplit(1, test_size=test_size, random_state=random_state)
return LeaveOneOut()
def group_transform(
x: np.ndarray,
groups: np.ndarray,
transform: TransformerMixin,
*,
inplace: bool = False,
**fit_params,
):
"""Per-group (offline) transformation (e.g. standardisation).
Args:
-----
x: np.ndarray
The data matrix to transform. Each x[i] must be an instance.
groups: np.ndarray
Groups assignment for each instance. It must be the case that
len(groups) == len(x).
transform:
The transformation to apply. Must implement fit_transform().
inplace: bool
Whether to modify x in-place. Default is False so that a copy is
made.
**fit_params:
Other keyword arguments to pass to the transform.fit() method.
Returns:
--------
x: np.ndarray
The modified data matrix with transformations applied to each
group individually.
"""
if not inplace:
x = x.copy()
unique_groups = np.unique(groups)
for g_id in unique_groups:
flat, slices = inst_to_flat(x[groups == g_id])
flat = transform.fit_transform(flat, y=None, **fit_params)
if len(x.shape) == 1 and len(slices) == 1:
# Special case to avoid issues for vlen arrays
_arr = np.empty(1, dtype=object)
_arr[0] = flat
x[groups == g_id] = _arr
continue
x[groups == g_id] = flat_to_inst(flat, slices)
return x
def instance_transform(
x: np.ndarray, transform: TransformerMixin, *, inplace: bool = False, **fit_params
):
"""Per-instance transformation (e.g. standardisation).
Args:
-----
x: np.ndarray
The data matrix to transform. Each x[i] must be a 2D instance.
transform:
The transformation to apply. Must implement fit_transform().
inplace: bool
Whether to modify x in-place. Default is False so that a copy is
made.
**fit_params:
Other keyword arguments to pass to the transform.fit() method.
Returns:
--------
x: np.ndarray
The modified data matrix with transformations applied to each
instance individually.
"""
return group_transform(
x, np.arange(len(x)), transform, inplace=inplace, **fit_params
)
ScoreFunction = Callable[[np.ndarray, np.ndarray], float]
def get_scores(
scoring: Union[str, List[str], Dict[str, ScoreFunction], Callable[..., float]],
y_pred: np.ndarray,
y_true: np.ndarray,
) -> Dict[str, Any]:
"""Get dictionary of scores for predictions.
Parameters:
-----------
scoring: str, list, dict or callable
Score(s) to calculate. This takes the same for as for
scikit-learn's cross_val_* methods.
y_pred: array-like
Predictions.
y_true: array-like
Ground truth.
Returns:
--------
scores: dict
A dictionary mapping score names to corresponding score(s).
"""
class DummyEstimator:
"""Class that implements a dummy estimator for scoring, to avoid
repeated invocations of `predict()` etc.
"""
def __init__(self, y_pred):
self.y_pred = y_pred
def predict(self, x, **kwargs):
return self.y_pred
def predict_proba(self, x, **kwargs):
return self.y_pred
def decision_function(self, x, **kwargs):
return self.y_pred
y_pred = np.array(y_pred)
y_true = np.array(y_true)
dummy = DummyEstimator(y_pred)
if isinstance(scoring, str):
scoring = {"score": get_scorer(scoring)}
elif callable(scoring):
scoring = {"score": scoring}
elif isinstance(scoring, list):
scoring = {x: get_scorer(x) for x in scoring}
return _score(dummy, None, y_true, scoring)
def get_pipeline_params(params: Dict[str, Any], pipeline: Pipeline):
"""Modifies parameter names to pass to a Pipeline instance's `fit()`
method.
Parameters:
-----------
params: dict
Parameters to pass to Pipeline.fit(). All parameters are passed
to all estimators in the pipeline so long as they are valid.
pipeline: Pipeline
The pipeline instance.
Returns:
--------
new_params: dict
Parameters filtered and prepended with pipeline step names and
double underscore (e.g. groups -> clf__groups).
"""
new_params = {}
for name, est in pipeline.named_steps.items():
if est is None or est == "passthrough":
continue
filt_params = filter_kwargs(params, est.fit)
new_params.update({name + "__" + k: v for k, v in filt_params.items()})
return new_params
class GroupTransformWrapper(TransformerMixin, BaseEstimator):
"""Transform that modifies groups independently without storing
parameters.
"""
def __init__(self, transformer: TransformerMixin) -> None:
self.transformer = transformer
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, groups=None, **fit_params):
return group_transform(X, groups, self.transformer, inplace=False, **fit_params)
class InstanceTransformWrapper(TransformerMixin, BaseEstimator):
"""Transform that modifies instances independently without storing
parameters.
"""
def __init__(self, transformer: TransformerMixin) -> None:
self.transformer = transformer
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, **fit_params):
raise instance_transform(X, self.transformer, inplace=False, **fit_params)
class SequenceTransform(TransformerMixin, BaseEstimator):
"""Transform designed to process sequences of vectors."""
pass
class SequenceTransformWrapper(SequenceTransform):
"""Wrapper around a scikit-learn transform that can process
sequences of vectors.
Args:
-----
transformer:
An object which implements the fit_transform() method on a
collection of 1D vectors.
method: str
The method to manipuate the sequence into 1D vectors, one of
{"feature", "global"}. If "feature" then each feature column of
the concatenated (2D) input is transformed independently. If
"global" then the transformer is fitted over the whole input
including all feature columns.
"""
def __init__(self, transformer: TransformerMixin, method: str):
VALID_METHODS = {"feature", "global"}
self.transformer = transformer
if method not in VALID_METHODS:
raise ValueError(f"method '{method}' not in {VALID_METHODS}.")
self.method = method
def fit(self, X, y=None, **fit_params):
flat_x, _ = inst_to_flat(X)
if self.method == "feature":
self.transformer.fit(flat_x, y=y, **fit_params)
elif self.method == "global":
self.transformer.fit(flat_x.reshape((-1, 1)), y=y, **fit_params)
return self
def transform(self, X, **fit_params):
flat_x, slices = inst_to_flat(X)
if self.method == "feature":
flat_x = self.transformer.transform(flat_x, **fit_params)
elif self.method == "global":
flat_shape = flat_x.shape
flat_x = self.transformer.transform(
flat_x.reshape((-1, 1)), **fit_params
).reshape(flat_shape)
return flat_to_inst(flat_x, slices) | 0.871448 | 0.32546 |
__doc__ = """
Script to use jellyfish to get kmer information
Input: fasta/fastq file
Output: kmer information, one of:
1. hash: binary hash of counts
2. stats: summary stats
3. dump: profile (kmer seq - count)
4. histo: histogram (count - abundance)
5. histo ranked: count, abundance, count*abundance, reverse-sum(abundance), reverse-sum(count*abundance), ratio-to-largest"""
import sys, os, glob, string, random, math, re
import itertools, subprocess
from optparse import OptionParser
from Bio import SeqIO
fa_re = re.compile('^>')
BUFFER = 2 * math.pow(1024, 3) # 2 Gb seq buffer
TYPES = ['fasta', 'fastq', 'hash']
FORMATS = ['hash', 'stats', 'dump', 'histo']
def run_cmd(cmd):
proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise IOError("%s\n%s"%(" ".join(cmd), stderr))
return stdout, stderr
def random_str(size=6):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for x in range(size))
def split_fasta(in_file, fhdl_set, max_size):
curr_size = 0
curr_buff = 0
curr_file = 0
strbuffer = ''
for line in open(in_file):
head = fa_re.match(line)
if head and ((curr_size + curr_buff) >= max_size):
fhdl_set[curr_file].write(strbuffer)
curr_size = 0
curr_buff = 0
curr_file += 1
strbuffer = ''
if head and (curr_buff > BUFFER):
fhdl_set[curr_file].write(strbuffer)
curr_size += curr_buff
curr_buff = 0
strbuffer = ''
strbuffer += line
curr_buff += len(line)
if strbuffer != '':
fhdl_set[curr_file].write(strbuffer)
def split_fastq(in_file, fhdl_set, max_size):
curr_size = 0
curr_buff = 0
curr_file = 0
strbuffer = ''
with open(in_file) as f:
for lines in itertools.izip_longest(*[f]*4):
if (curr_size + curr_buff) >= max_size:
fhdl_set[curr_file].write(strbuffer)
curr_size = 0
curr_buff = 0
curr_file += 1
strbuffer = ''
if curr_buff > BUFFER:
fhdl_set[curr_file].write(strbuffer)
curr_size += curr_buff
curr_buff = 0
strbuffer = ''
rec_str = ''.join(lines)
strbuffer += rec_str
curr_buff += len(rec_str)
if strbuffer != '':
fhdl_set[curr_file].write(strbuffer)
def split_seq_file(seq_file, max_size, seq_type, tmpdir):
split_num = int(os.path.getsize(seq_file) / max_size) + 1
if split_num == 1:
return [seq_file]
file_base = os.path.join(tmpdir, "%s.%s"%(random_str(), seq_type))
file_set = map(lambda x: "%s.%d"%(file_base, x+1), range(split_num))
fhdl_set = map(lambda x: open(x, 'w'), file_set)
if seq_type == 'fasta':
split_fasta(seq_file, fhdl_set, max_size)
elif seq_type == 'fastq':
split_fastq(seq_file, fhdl_set, max_size)
for h in fhdl_set:
h.close()
return file_set
def merge_hash_set(hash_set, tmpdir):
if len(hash_set) == 1:
return hash_set[0]
merge_file = os.path.join(tmpdir, random_str()+'.js')
merge_cmd = ['jellyfish', 'merge', '-o', merge_file]
merge_cmd.extend(hash_set)
_sout, _serr = run_cmd(merge_cmd)
for h in hash_set:
os.remove(h)
if not os.path.isfile(merge_file):
sys.stderr.write("[error] jellyfish count returned no results")
sys.stderr.write(_serr)
sys.exit(0)
return merge_file
def ranked_histo(data_str):
sum_col_1 = 0
sum_col_2 = 0
data_matrix = []
for rrow in reversed(data_str.strip().split("\n")):
num, count = rrow.strip().split()
product_0_1 = int(num) * int(count)
sum_col_1 += int(count)
sum_col_2 += product_0_1
data_matrix.append([ num, count, product_0_1, sum_col_1, sum_col_2 ])
for i in range(len(data_matrix)):
ratio = data_matrix[i][4] * 1.0 / sum_col_2
data_matrix[i].append("%.4f"%ratio)
data_matrix.reverse()
return data_matrix
def kmer_count(input, procs, length, size, count, tmpdir):
jf_base = os.path.join(tmpdir, random_str()+'.js.part')
jf_cmd = ['jellyfish', 'count', '-C', '-t', str(procs), '-m', str(length), '-c', str(count), '-s', size, '-o', jf_base, input]
_sout, _serr = run_cmd(jf_cmd)
parts = glob.glob(jf_base+'_*')
return merge_hash_set(parts, tmpdir)
def main(args):
usage = "usage: %prog [options] -i <input file> -o <output file>"
parser = OptionParser(usage)
parser.add_option("-i", "--input", dest="input", default=None, help="Input file, sequence (fasta/fastq) or binary count hash.")
parser.add_option("-o", "--output", dest="output", default=None, help="Output file.")
parser.add_option("-t", "--type", dest="type", default='fasta', help="Input file type, one of: %s [default 'fasta']"%(", ".join(TYPES)))
parser.add_option("-m", "--max", dest="max", default=10.0, type="float", help="Maximum size (in Gb) to count, files larger are split [default 10.0].")
parser.add_option("-p", "--procs", dest="procs", default=4, type="int", help="Number of processors to use [default 4].")
parser.add_option("-l", "--length", dest="length", default=None, type="int", help="Length of kmer to use.")
parser.add_option("-s", "--size", dest="size", default="1G", help="Size of hash to use, number of unique kmers [default '1G']")
parser.add_option("-c", "--count", dest="count", default=12, type="int", help="Count size in bits [default '12']")
parser.add_option("-f", "--format", dest="format", default='histo', help="Output format, one of: %s [default 'histo']"%(", ".join(FORMATS)))
parser.add_option("--histo_max", dest="histo_max", default=10000000, type="int", help="Max count value for histogram [default 10000000]")
parser.add_option("-r", "--ranked", dest="ranked", action="store_true", default=False, help="histo output includes additional transformations for ranked plot")
parser.add_option("-d", "--tmpdir", dest="tmpdir", default=None, help="Dir to store intermediate files [default is dir of output file]")
(opts, args) = parser.parse_args()
if not (opts.input and os.path.isfile(opts.input) and opts.output):
parser.error("[error] missing input/output files")
if not (opts.type and (opts.type in TYPES)):
parser.error("[error] missing input type, use one of: %s"%(", ".join(TYPES)))
if not (opts.format and (opts.format in FORMATS)):
parser.error("[error] missing output format, use one of: %s"%(", ".join(FORMATS)))
if (opts.type != 'hash') and (not opts.length or (opts.length < 2)):
parser.error("[error] missing / invalid kmer length")
if (opts.type == 'hash') and (opts.format == 'hash'):
parser.error("[error] both input and output is binary hash")
if opts.procs < 1: opts.procs = 1
if opts.count < 2: opts.count = 2
if not opts.tmpdir: opts.tmpdir = os.path.dirname(opts.output)
# get kmer count hash
if opts.type == 'hash':
jf_hash = opts.input
else:
# check file size, split if too large
max_size = opts.max * math.pow(1024, 3)
input_set = split_seq_file(opts.input, max_size, opts.type, opts.tmpdir)
# get hash set
hash_set = []
for ifile in input_set:
if (os.path.getsize(ifile) > 0) and os.path.isfile(ifile):
hash_set.append( kmer_count(ifile, opts.procs, opts.length, opts.size, opts.count, opts.tmpdir) )
jf_hash = merge_hash_set(hash_set, opts.tmpdir)
# cleanup
if len(input_set) > 1:
for f in input_set:
os.remove(f)
if opts.format == 'hash':
os.rename(jf_hash, opts.output)
return 0
output_cmd = ['jellyfish', opts.format]
if opts.format == 'histo':
output_cmd.extend(['-t', str(opts.procs), '-h', str(opts.histo_max)])
elif opts.format == 'dump':
output_cmd.extend(['-c', '-t'])
output_cmd.append(jf_hash)
sout, serr = run_cmd(output_cmd)
ohdl = open(opts.output, 'w')
if opts.ranked:
extra_data = ranked_histo(sout)
for row in extra_data:
line = "\t".join( map(lambda x: str(x), row) ) + "\n"
ohdl.write(line)
else:
ohdl.write(sout)
ohdl.close()
if opts.type != 'hash':
os.remove(jf_hash)
if not os.path.isfile(opts.output):
sys.stderr.write("[error] jellyfish %s returned no results"%(opts.format))
sys.stderr.write(serr)
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv)) | scripts/kmer-tool.py | __doc__ = """
Script to use jellyfish to get kmer information
Input: fasta/fastq file
Output: kmer information, one of:
1. hash: binary hash of counts
2. stats: summary stats
3. dump: profile (kmer seq - count)
4. histo: histogram (count - abundance)
5. histo ranked: count, abundance, count*abundance, reverse-sum(abundance), reverse-sum(count*abundance), ratio-to-largest"""
import sys, os, glob, string, random, math, re
import itertools, subprocess
from optparse import OptionParser
from Bio import SeqIO
fa_re = re.compile('^>')
BUFFER = 2 * math.pow(1024, 3) # 2 Gb seq buffer
TYPES = ['fasta', 'fastq', 'hash']
FORMATS = ['hash', 'stats', 'dump', 'histo']
def run_cmd(cmd):
proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise IOError("%s\n%s"%(" ".join(cmd), stderr))
return stdout, stderr
def random_str(size=6):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for x in range(size))
def split_fasta(in_file, fhdl_set, max_size):
curr_size = 0
curr_buff = 0
curr_file = 0
strbuffer = ''
for line in open(in_file):
head = fa_re.match(line)
if head and ((curr_size + curr_buff) >= max_size):
fhdl_set[curr_file].write(strbuffer)
curr_size = 0
curr_buff = 0
curr_file += 1
strbuffer = ''
if head and (curr_buff > BUFFER):
fhdl_set[curr_file].write(strbuffer)
curr_size += curr_buff
curr_buff = 0
strbuffer = ''
strbuffer += line
curr_buff += len(line)
if strbuffer != '':
fhdl_set[curr_file].write(strbuffer)
def split_fastq(in_file, fhdl_set, max_size):
curr_size = 0
curr_buff = 0
curr_file = 0
strbuffer = ''
with open(in_file) as f:
for lines in itertools.izip_longest(*[f]*4):
if (curr_size + curr_buff) >= max_size:
fhdl_set[curr_file].write(strbuffer)
curr_size = 0
curr_buff = 0
curr_file += 1
strbuffer = ''
if curr_buff > BUFFER:
fhdl_set[curr_file].write(strbuffer)
curr_size += curr_buff
curr_buff = 0
strbuffer = ''
rec_str = ''.join(lines)
strbuffer += rec_str
curr_buff += len(rec_str)
if strbuffer != '':
fhdl_set[curr_file].write(strbuffer)
def split_seq_file(seq_file, max_size, seq_type, tmpdir):
split_num = int(os.path.getsize(seq_file) / max_size) + 1
if split_num == 1:
return [seq_file]
file_base = os.path.join(tmpdir, "%s.%s"%(random_str(), seq_type))
file_set = map(lambda x: "%s.%d"%(file_base, x+1), range(split_num))
fhdl_set = map(lambda x: open(x, 'w'), file_set)
if seq_type == 'fasta':
split_fasta(seq_file, fhdl_set, max_size)
elif seq_type == 'fastq':
split_fastq(seq_file, fhdl_set, max_size)
for h in fhdl_set:
h.close()
return file_set
def merge_hash_set(hash_set, tmpdir):
if len(hash_set) == 1:
return hash_set[0]
merge_file = os.path.join(tmpdir, random_str()+'.js')
merge_cmd = ['jellyfish', 'merge', '-o', merge_file]
merge_cmd.extend(hash_set)
_sout, _serr = run_cmd(merge_cmd)
for h in hash_set:
os.remove(h)
if not os.path.isfile(merge_file):
sys.stderr.write("[error] jellyfish count returned no results")
sys.stderr.write(_serr)
sys.exit(0)
return merge_file
def ranked_histo(data_str):
sum_col_1 = 0
sum_col_2 = 0
data_matrix = []
for rrow in reversed(data_str.strip().split("\n")):
num, count = rrow.strip().split()
product_0_1 = int(num) * int(count)
sum_col_1 += int(count)
sum_col_2 += product_0_1
data_matrix.append([ num, count, product_0_1, sum_col_1, sum_col_2 ])
for i in range(len(data_matrix)):
ratio = data_matrix[i][4] * 1.0 / sum_col_2
data_matrix[i].append("%.4f"%ratio)
data_matrix.reverse()
return data_matrix
def kmer_count(input, procs, length, size, count, tmpdir):
jf_base = os.path.join(tmpdir, random_str()+'.js.part')
jf_cmd = ['jellyfish', 'count', '-C', '-t', str(procs), '-m', str(length), '-c', str(count), '-s', size, '-o', jf_base, input]
_sout, _serr = run_cmd(jf_cmd)
parts = glob.glob(jf_base+'_*')
return merge_hash_set(parts, tmpdir)
def main(args):
usage = "usage: %prog [options] -i <input file> -o <output file>"
parser = OptionParser(usage)
parser.add_option("-i", "--input", dest="input", default=None, help="Input file, sequence (fasta/fastq) or binary count hash.")
parser.add_option("-o", "--output", dest="output", default=None, help="Output file.")
parser.add_option("-t", "--type", dest="type", default='fasta', help="Input file type, one of: %s [default 'fasta']"%(", ".join(TYPES)))
parser.add_option("-m", "--max", dest="max", default=10.0, type="float", help="Maximum size (in Gb) to count, files larger are split [default 10.0].")
parser.add_option("-p", "--procs", dest="procs", default=4, type="int", help="Number of processors to use [default 4].")
parser.add_option("-l", "--length", dest="length", default=None, type="int", help="Length of kmer to use.")
parser.add_option("-s", "--size", dest="size", default="1G", help="Size of hash to use, number of unique kmers [default '1G']")
parser.add_option("-c", "--count", dest="count", default=12, type="int", help="Count size in bits [default '12']")
parser.add_option("-f", "--format", dest="format", default='histo', help="Output format, one of: %s [default 'histo']"%(", ".join(FORMATS)))
parser.add_option("--histo_max", dest="histo_max", default=10000000, type="int", help="Max count value for histogram [default 10000000]")
parser.add_option("-r", "--ranked", dest="ranked", action="store_true", default=False, help="histo output includes additional transformations for ranked plot")
parser.add_option("-d", "--tmpdir", dest="tmpdir", default=None, help="Dir to store intermediate files [default is dir of output file]")
(opts, args) = parser.parse_args()
if not (opts.input and os.path.isfile(opts.input) and opts.output):
parser.error("[error] missing input/output files")
if not (opts.type and (opts.type in TYPES)):
parser.error("[error] missing input type, use one of: %s"%(", ".join(TYPES)))
if not (opts.format and (opts.format in FORMATS)):
parser.error("[error] missing output format, use one of: %s"%(", ".join(FORMATS)))
if (opts.type != 'hash') and (not opts.length or (opts.length < 2)):
parser.error("[error] missing / invalid kmer length")
if (opts.type == 'hash') and (opts.format == 'hash'):
parser.error("[error] both input and output is binary hash")
if opts.procs < 1: opts.procs = 1
if opts.count < 2: opts.count = 2
if not opts.tmpdir: opts.tmpdir = os.path.dirname(opts.output)
# get kmer count hash
if opts.type == 'hash':
jf_hash = opts.input
else:
# check file size, split if too large
max_size = opts.max * math.pow(1024, 3)
input_set = split_seq_file(opts.input, max_size, opts.type, opts.tmpdir)
# get hash set
hash_set = []
for ifile in input_set:
if (os.path.getsize(ifile) > 0) and os.path.isfile(ifile):
hash_set.append( kmer_count(ifile, opts.procs, opts.length, opts.size, opts.count, opts.tmpdir) )
jf_hash = merge_hash_set(hash_set, opts.tmpdir)
# cleanup
if len(input_set) > 1:
for f in input_set:
os.remove(f)
if opts.format == 'hash':
os.rename(jf_hash, opts.output)
return 0
output_cmd = ['jellyfish', opts.format]
if opts.format == 'histo':
output_cmd.extend(['-t', str(opts.procs), '-h', str(opts.histo_max)])
elif opts.format == 'dump':
output_cmd.extend(['-c', '-t'])
output_cmd.append(jf_hash)
sout, serr = run_cmd(output_cmd)
ohdl = open(opts.output, 'w')
if opts.ranked:
extra_data = ranked_histo(sout)
for row in extra_data:
line = "\t".join( map(lambda x: str(x), row) ) + "\n"
ohdl.write(line)
else:
ohdl.write(sout)
ohdl.close()
if opts.type != 'hash':
os.remove(jf_hash)
if not os.path.isfile(opts.output):
sys.stderr.write("[error] jellyfish %s returned no results"%(opts.format))
sys.stderr.write(serr)
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv)) | 0.282295 | 0.211539 |
# instead of using yearly performance (return and volatility)
# use monthly data
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
# matplotlib inline
import scipy.optimize as sco
# load data for portfolio
mixed_tickers = []
with open('./data/mixed_portfolio.txt') as file:
for line in file:
mixed_tickers.append(line.rstrip())
stocks = mixed_tickers
combined = pd.read_csv('./data/stock_pool_data.csv', index_col=0)
combined['time'] = pd.Index(pd.to_datetime(combined.index))
combined = combined.set_index('time')
# calculate stock returns
data_raw = combined[['ticker', 'adjclose']]
data = data_raw.pivot_table(index=data_raw.index, columns='ticker', values=['adjclose'])
# flatten columns multi-index, `date` will become the dataframe index
data.columns = [col[1] for col in data.columns.values]
pf_data = data[mixed_tickers]
num_stocks = len(stocks)
returns = pf_data.pct_change()
mean_returns = returns.mean()
cov_matrix = returns.cov()
num_portfolios = 100000
risk_free_rate = 0.01136
def portfolio_Monthly_performance(weights, mean_returns, cov_matrix):
returns = np.sum(mean_returns * weights) * 21
std = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(21)
return std, returns
def random_portfolios(num_portfolios, mean_returns, cov_matrix, risk_free_rate):
results = np.zeros((3, num_portfolios))
weights_record = []
for i in range(num_portfolios):
weights = np.random.random(len(stocks))
weights /= np.sum(weights)
weights_record.append(weights)
portfolio_std_dev, portfolio_return = portfolio_Monthly_performance(weights, mean_returns, cov_matrix)
results[0, i] = portfolio_std_dev
results[1, i] = portfolio_return
results[2, i] = (portfolio_return - risk_free_rate) / portfolio_std_dev
return results, weights_record
def neg_sharpe_ratio(weights, mean_returns, cov_matrix, risk_free_rate):
p_var, p_ret = portfolio_Monthly_performance(weights, mean_returns, cov_matrix)
return -(p_ret - risk_free_rate) / p_var
def max_sharpe_ratio(mean_returns, cov_matrix, risk_free_rate):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix, risk_free_rate)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bound = (0.0, 1.0)
bounds = tuple(bound for asset in range(num_assets))
result = sco.minimize(neg_sharpe_ratio, num_assets * [1. / num_assets, ], args=args,
method='SLSQP', bounds=bounds, constraints=constraints)
return result
def portfolio_volatility(weights, mean_returns, cov_matrix):
return portfolio_Monthly_performance(weights, mean_returns, cov_matrix)[0]
def min_variance(mean_returns, cov_matrix):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bound = (0.0, 1.0)
bounds = tuple(bound for asset in range(num_assets))
result = sco.minimize(portfolio_volatility, num_assets * [1. / num_assets, ], args=args,
method='SLSQP', bounds=bounds, constraints=constraints)
return result
def portfolio_return(weights):
return portfolio_Monthly_performance(weights, mean_returns, cov_matrix)[1]
def efficient_return(mean_returns, cov_matrix, target):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix)
constraints = ({'type': 'eq', 'fun': lambda x: portfolio_return(x) - target},
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bounds = tuple((0, 1) for asset in range(num_assets))
result = sco.minimize(portfolio_volatility, num_assets * [1. / num_assets, ], args=args, method='SLSQP',
bounds=bounds,
constraints=constraints)
return result
def efficient_frontier(mean_returns, cov_matrix, returns_range):
efficients = []
for ret in returns_range:
efficients.append(efficient_return(mean_returns, cov_matrix, ret))
return efficients
def display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate):
results, weights = random_portfolios(num_portfolios, mean_returns, cov_matrix, risk_free_rate)
max_sharpe_idx = np.argmax(results[2])
sdp, rp = results[0, max_sharpe_idx], results[1, max_sharpe_idx]
max_sharpe_allocation = pd.DataFrame(weights[max_sharpe_idx], index=pf_data.columns, columns=['allocation'])
max_sharpe_allocation.allocation = [round(i * 100, 2) for i in max_sharpe_allocation.allocation]
max_sharpe_allocation = max_sharpe_allocation.T
min_vol_idx = np.argmin(results[0])
sdp_min, rp_min = results[0, min_vol_idx], results[1, min_vol_idx]
min_vol_allocation = pd.DataFrame(weights[min_vol_idx], index=pf_data.columns, columns=['allocation'])
min_vol_allocation.allocation = [round(i * 100, 2) for i in min_vol_allocation.allocation]
min_vol_allocation = min_vol_allocation.T
print("-" * 80)
print("Maximum Sharpe Ratio Portfolio Allocation\n")
print("Monthly Return:", round(rp, 2))
print("Monthly Volatility:", round(sdp, 2))
print("\n")
print(max_sharpe_allocation)
print("-" * 80)
print("Minimum Volatility Portfolio Allocation\n")
print("Monthly Return:", round(rp_min, 2))
print("Monthly Volatility:", round(sdp_min, 2))
print("\n")
print(min_vol_allocation)
max_sharpe_allocation.to_csv('./data/max_sharpe_allocation.csv', index=True)
min_vol_allocation.to_csv('./data/min_vol_allocation.csv', index=True)
plt.figure(figsize=(10, 7))
plt.scatter(results[0, :], results[1, :], c=results[2, :], cmap='YlGnBu', marker='o', s=10, alpha=0.3)
plt.colorbar()
plt.scatter(sdp, rp, marker='*', color='r', s=500, label='Maximum Sharpe ratio')
plt.scatter(sdp_min, rp_min, marker='*', color='g', s=500, label='Minimum volatility')
plt.title('Simulated Portfolio Optimization based on Efficient Frontier')
plt.xlabel('Monthly Volatility')
plt.ylabel('Monthly Returns')
plt.legend(labelspacing=0.8)
plt.savefig('./data/efficient_frontier_without_line.jpg')
plt.show()
def display_calculated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate):
results, _ = random_portfolios(num_portfolios, mean_returns, cov_matrix, risk_free_rate)
max_sharpe = max_sharpe_ratio(mean_returns, cov_matrix, risk_free_rate)
sdp, rp = portfolio_Monthly_performance(max_sharpe['x'], mean_returns, cov_matrix)
max_sharpe_allocation = pd.DataFrame(max_sharpe.x, index=pf_data.columns, columns=['allocation'])
max_sharpe_allocation.allocation = [round(i * 100, 2) for i in max_sharpe_allocation.allocation]
max_sharpe_allocation = max_sharpe_allocation.T
min_vol = min_variance(mean_returns, cov_matrix)
sdp_min, rp_min = portfolio_Monthly_performance(min_vol['x'], mean_returns, cov_matrix)
min_vol_allocation = pd.DataFrame(min_vol.x, index=pf_data.columns, columns=['allocation'])
min_vol_allocation.allocation = [round(i * 100, 2) for i in min_vol_allocation.allocation]
min_vol_allocation = min_vol_allocation.T
print("-" * 80)
print("Maximum Sharpe Ratio Portfolio Allocation\n")
print("Monthly Return:", round(rp, 2))
print("Monthly Volatility:", round(sdp, 2))
print("\n")
print(max_sharpe_allocation)
print("-" * 80)
print("Minimum Volatility Portfolio Allocation\n")
print("Monthly Return:", round(rp_min, 2))
print("Monthly Volatility:", round(sdp_min, 2))
print("\n")
print(min_vol_allocation)
max_sharpe_allocation.to_csv('./data/max_sharpe_allocation.csv', index=True)
min_vol_allocation.to_csv('./data/min_vol_allocation.csv', index=True)
plt.figure(figsize=(10, 7))
plt.scatter(results[0, :], results[1, :], c=results[2, :], cmap='YlGnBu', marker='o', s=10, alpha=0.3)
plt.colorbar()
plt.scatter(sdp, rp, marker='*', color='r', s=500, label='Maximum Sharpe ratio')
plt.scatter(sdp_min, rp_min, marker='*', color='g', s=500, label='Minimum volatility')
target = np.linspace(rp_min, 0.038, 50)
efficient_portfolios = efficient_frontier(mean_returns, cov_matrix, target)
plt.plot([p['fun'] for p in efficient_portfolios], target, linestyle='-.', color='black',
label='efficient frontier')
plt.title('Calculated Portfolio Optimization based on Efficient Frontier')
plt.xlabel('Monthly Volatility')
plt.ylabel('Monthly Returns')
plt.legend(labelspacing=0.8)
plt.savefig('./data/efficient_frontier_with_line.jpg')
plt.show()
if __name__ == "__main__":
# display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate)
display_calculated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate) | stock_selection/optimization.py |
# instead of using yearly performance (return and volatility)
# use monthly data
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
# matplotlib inline
import scipy.optimize as sco
# load data for portfolio
mixed_tickers = []
with open('./data/mixed_portfolio.txt') as file:
for line in file:
mixed_tickers.append(line.rstrip())
stocks = mixed_tickers
combined = pd.read_csv('./data/stock_pool_data.csv', index_col=0)
combined['time'] = pd.Index(pd.to_datetime(combined.index))
combined = combined.set_index('time')
# calculate stock returns
data_raw = combined[['ticker', 'adjclose']]
data = data_raw.pivot_table(index=data_raw.index, columns='ticker', values=['adjclose'])
# flatten columns multi-index, `date` will become the dataframe index
data.columns = [col[1] for col in data.columns.values]
pf_data = data[mixed_tickers]
num_stocks = len(stocks)
returns = pf_data.pct_change()
mean_returns = returns.mean()
cov_matrix = returns.cov()
num_portfolios = 100000
risk_free_rate = 0.01136
def portfolio_Monthly_performance(weights, mean_returns, cov_matrix):
returns = np.sum(mean_returns * weights) * 21
std = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(21)
return std, returns
def random_portfolios(num_portfolios, mean_returns, cov_matrix, risk_free_rate):
results = np.zeros((3, num_portfolios))
weights_record = []
for i in range(num_portfolios):
weights = np.random.random(len(stocks))
weights /= np.sum(weights)
weights_record.append(weights)
portfolio_std_dev, portfolio_return = portfolio_Monthly_performance(weights, mean_returns, cov_matrix)
results[0, i] = portfolio_std_dev
results[1, i] = portfolio_return
results[2, i] = (portfolio_return - risk_free_rate) / portfolio_std_dev
return results, weights_record
def neg_sharpe_ratio(weights, mean_returns, cov_matrix, risk_free_rate):
p_var, p_ret = portfolio_Monthly_performance(weights, mean_returns, cov_matrix)
return -(p_ret - risk_free_rate) / p_var
def max_sharpe_ratio(mean_returns, cov_matrix, risk_free_rate):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix, risk_free_rate)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bound = (0.0, 1.0)
bounds = tuple(bound for asset in range(num_assets))
result = sco.minimize(neg_sharpe_ratio, num_assets * [1. / num_assets, ], args=args,
method='SLSQP', bounds=bounds, constraints=constraints)
return result
def portfolio_volatility(weights, mean_returns, cov_matrix):
return portfolio_Monthly_performance(weights, mean_returns, cov_matrix)[0]
def min_variance(mean_returns, cov_matrix):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bound = (0.0, 1.0)
bounds = tuple(bound for asset in range(num_assets))
result = sco.minimize(portfolio_volatility, num_assets * [1. / num_assets, ], args=args,
method='SLSQP', bounds=bounds, constraints=constraints)
return result
def portfolio_return(weights):
return portfolio_Monthly_performance(weights, mean_returns, cov_matrix)[1]
def efficient_return(mean_returns, cov_matrix, target):
num_assets = len(mean_returns)
args = (mean_returns, cov_matrix)
constraints = ({'type': 'eq', 'fun': lambda x: portfolio_return(x) - target},
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bounds = tuple((0, 1) for asset in range(num_assets))
result = sco.minimize(portfolio_volatility, num_assets * [1. / num_assets, ], args=args, method='SLSQP',
bounds=bounds,
constraints=constraints)
return result
def efficient_frontier(mean_returns, cov_matrix, returns_range):
efficients = []
for ret in returns_range:
efficients.append(efficient_return(mean_returns, cov_matrix, ret))
return efficients
def display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate):
results, weights = random_portfolios(num_portfolios, mean_returns, cov_matrix, risk_free_rate)
max_sharpe_idx = np.argmax(results[2])
sdp, rp = results[0, max_sharpe_idx], results[1, max_sharpe_idx]
max_sharpe_allocation = pd.DataFrame(weights[max_sharpe_idx], index=pf_data.columns, columns=['allocation'])
max_sharpe_allocation.allocation = [round(i * 100, 2) for i in max_sharpe_allocation.allocation]
max_sharpe_allocation = max_sharpe_allocation.T
min_vol_idx = np.argmin(results[0])
sdp_min, rp_min = results[0, min_vol_idx], results[1, min_vol_idx]
min_vol_allocation = pd.DataFrame(weights[min_vol_idx], index=pf_data.columns, columns=['allocation'])
min_vol_allocation.allocation = [round(i * 100, 2) for i in min_vol_allocation.allocation]
min_vol_allocation = min_vol_allocation.T
print("-" * 80)
print("Maximum Sharpe Ratio Portfolio Allocation\n")
print("Monthly Return:", round(rp, 2))
print("Monthly Volatility:", round(sdp, 2))
print("\n")
print(max_sharpe_allocation)
print("-" * 80)
print("Minimum Volatility Portfolio Allocation\n")
print("Monthly Return:", round(rp_min, 2))
print("Monthly Volatility:", round(sdp_min, 2))
print("\n")
print(min_vol_allocation)
max_sharpe_allocation.to_csv('./data/max_sharpe_allocation.csv', index=True)
min_vol_allocation.to_csv('./data/min_vol_allocation.csv', index=True)
plt.figure(figsize=(10, 7))
plt.scatter(results[0, :], results[1, :], c=results[2, :], cmap='YlGnBu', marker='o', s=10, alpha=0.3)
plt.colorbar()
plt.scatter(sdp, rp, marker='*', color='r', s=500, label='Maximum Sharpe ratio')
plt.scatter(sdp_min, rp_min, marker='*', color='g', s=500, label='Minimum volatility')
plt.title('Simulated Portfolio Optimization based on Efficient Frontier')
plt.xlabel('Monthly Volatility')
plt.ylabel('Monthly Returns')
plt.legend(labelspacing=0.8)
plt.savefig('./data/efficient_frontier_without_line.jpg')
plt.show()
def display_calculated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate):
results, _ = random_portfolios(num_portfolios, mean_returns, cov_matrix, risk_free_rate)
max_sharpe = max_sharpe_ratio(mean_returns, cov_matrix, risk_free_rate)
sdp, rp = portfolio_Monthly_performance(max_sharpe['x'], mean_returns, cov_matrix)
max_sharpe_allocation = pd.DataFrame(max_sharpe.x, index=pf_data.columns, columns=['allocation'])
max_sharpe_allocation.allocation = [round(i * 100, 2) for i in max_sharpe_allocation.allocation]
max_sharpe_allocation = max_sharpe_allocation.T
min_vol = min_variance(mean_returns, cov_matrix)
sdp_min, rp_min = portfolio_Monthly_performance(min_vol['x'], mean_returns, cov_matrix)
min_vol_allocation = pd.DataFrame(min_vol.x, index=pf_data.columns, columns=['allocation'])
min_vol_allocation.allocation = [round(i * 100, 2) for i in min_vol_allocation.allocation]
min_vol_allocation = min_vol_allocation.T
print("-" * 80)
print("Maximum Sharpe Ratio Portfolio Allocation\n")
print("Monthly Return:", round(rp, 2))
print("Monthly Volatility:", round(sdp, 2))
print("\n")
print(max_sharpe_allocation)
print("-" * 80)
print("Minimum Volatility Portfolio Allocation\n")
print("Monthly Return:", round(rp_min, 2))
print("Monthly Volatility:", round(sdp_min, 2))
print("\n")
print(min_vol_allocation)
max_sharpe_allocation.to_csv('./data/max_sharpe_allocation.csv', index=True)
min_vol_allocation.to_csv('./data/min_vol_allocation.csv', index=True)
plt.figure(figsize=(10, 7))
plt.scatter(results[0, :], results[1, :], c=results[2, :], cmap='YlGnBu', marker='o', s=10, alpha=0.3)
plt.colorbar()
plt.scatter(sdp, rp, marker='*', color='r', s=500, label='Maximum Sharpe ratio')
plt.scatter(sdp_min, rp_min, marker='*', color='g', s=500, label='Minimum volatility')
target = np.linspace(rp_min, 0.038, 50)
efficient_portfolios = efficient_frontier(mean_returns, cov_matrix, target)
plt.plot([p['fun'] for p in efficient_portfolios], target, linestyle='-.', color='black',
label='efficient frontier')
plt.title('Calculated Portfolio Optimization based on Efficient Frontier')
plt.xlabel('Monthly Volatility')
plt.ylabel('Monthly Returns')
plt.legend(labelspacing=0.8)
plt.savefig('./data/efficient_frontier_with_line.jpg')
plt.show()
if __name__ == "__main__":
# display_simulated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate)
display_calculated_ef_with_random(mean_returns, cov_matrix, num_portfolios, risk_free_rate) | 0.709925 | 0.680507 |
import requests
from bs4 import BeautifulSoup
import datetime
import pandas as pd
URL = "http://www1.river.go.jp"
DAT_HEAD_ROWS = 9
class _DataPage(object):
def __init__(self):
self._url_base = ""
self._kind = 1
self.begin_date = ""
self.end_date = ""
self.station_id = 0
def _gen_url(self):
self._url = self._url_base + "KIND={}&ID={}".format(self._kind, self.station_id)
if self._kind == 1:
self._url += "&BGNDATE={}&ENDDATE={}".format(self.begin_date, self.end_date)
def _grab_html(self):
response = requests.get(self._url)
if response.status_code == 200:
self._html = response.content
else:
raise ConnectionError
def _gen_soup(self):
self._soup = BeautifulSoup(self._html, "html.parser")
# we use the .dat file that the website generated automatically
def _get_dat_url(self):
self._dat_url = URL + self._soup.find("img", src="/img/download.gif").parent["href"]
def _download_dat(self):
dat = requests.get(self._dat_url)
dat.encoding = "Shift_JIS"
self._dat_text = dat.text
def _dat_2_dataframe(self):
self.df = pd.DataFrame({"datetime": [], "data": []})
for row in self._dat_text.split("\r\n")[DAT_HEAD_ROWS:]:
if row.endswith("#") or row.endswith("$") or row == "":
continue
time, data = _row_text_2_datetime_data(row)
if data is not None and time is not None:
self.df = self.df.append(pd.Series([time, data], index=self.df.columns), ignore_index=True)
def _process(self):
self._gen_url()
self._grab_html()
self._gen_soup()
self._get_dat_url()
self._download_dat()
self._dat_2_dataframe()
class _WaterLevelDataPage(_DataPage):
def __init__(self, station_id):
super().__init__()
self._url_base = "http://www1.river.go.jp/cgi-bin/DspWaterData.exe?"
self.station_id = station_id
class WaterLevelByHourDataPage(_WaterLevelDataPage):
def __init__(self, station_id, begin_date, end_date):
super().__init__(station_id)
self._kind = 1
self.begin_date = begin_date
self.end_date = end_date
self._process()
class WaterLevelRealTimeDataPage(_WaterLevelDataPage):
def __init__(self, station_id):
super().__init__(station_id)
self._kind = 9
self._process()
class _RainDataPage(_DataPage):
def __init__(self, station_id):
super().__init__()
self._url_base = "http://www1.river.go.jp/cgi-bin/DspRainData.exe?"
self.station_id = station_id
class RainByHourDataPage(_RainDataPage):
def __init__(self, station_id, begin_date, end_date):
super().__init__(station_id)
self._kind = 1
self.begin_date = begin_date
self.end_date = end_date
self._process()
class RainRealTimeDataPage(_RainDataPage):
def __init__(self, station_id):
super().__init__(station_id)
self._kind = 9
self._process()
def _row_text_2_datetime_data(row: str):
tmp = row.split(",")
_fix_24_hour(tmp)
try:
time = datetime.datetime.strptime("{} {}".format(tmp[0], tmp[1]), "%Y/%m/%d %H:%M")
except ValueError:
time = None
try:
data = float(tmp[2])
except ValueError:
data = None
return time, data
# datetime library doesn't support time like 24:00 so we have to fix it
def _fix_24_hour(row: list):
if row[1] == "24:00":
row[1] = "00:00"
row[0] = _fix_date(row[0])
def _fix_date(date_str: str):
date = datetime.datetime.strptime(date_str, "%Y/%m/%d") - datetime.timedelta(days=1)
return date.strftime("%Y/%m/%d") | mlit/data_page.py | import requests
from bs4 import BeautifulSoup
import datetime
import pandas as pd
URL = "http://www1.river.go.jp"
DAT_HEAD_ROWS = 9
class _DataPage(object):
def __init__(self):
self._url_base = ""
self._kind = 1
self.begin_date = ""
self.end_date = ""
self.station_id = 0
def _gen_url(self):
self._url = self._url_base + "KIND={}&ID={}".format(self._kind, self.station_id)
if self._kind == 1:
self._url += "&BGNDATE={}&ENDDATE={}".format(self.begin_date, self.end_date)
def _grab_html(self):
response = requests.get(self._url)
if response.status_code == 200:
self._html = response.content
else:
raise ConnectionError
def _gen_soup(self):
self._soup = BeautifulSoup(self._html, "html.parser")
# we use the .dat file that the website generated automatically
def _get_dat_url(self):
self._dat_url = URL + self._soup.find("img", src="/img/download.gif").parent["href"]
def _download_dat(self):
dat = requests.get(self._dat_url)
dat.encoding = "Shift_JIS"
self._dat_text = dat.text
def _dat_2_dataframe(self):
self.df = pd.DataFrame({"datetime": [], "data": []})
for row in self._dat_text.split("\r\n")[DAT_HEAD_ROWS:]:
if row.endswith("#") or row.endswith("$") or row == "":
continue
time, data = _row_text_2_datetime_data(row)
if data is not None and time is not None:
self.df = self.df.append(pd.Series([time, data], index=self.df.columns), ignore_index=True)
def _process(self):
self._gen_url()
self._grab_html()
self._gen_soup()
self._get_dat_url()
self._download_dat()
self._dat_2_dataframe()
class _WaterLevelDataPage(_DataPage):
def __init__(self, station_id):
super().__init__()
self._url_base = "http://www1.river.go.jp/cgi-bin/DspWaterData.exe?"
self.station_id = station_id
class WaterLevelByHourDataPage(_WaterLevelDataPage):
def __init__(self, station_id, begin_date, end_date):
super().__init__(station_id)
self._kind = 1
self.begin_date = begin_date
self.end_date = end_date
self._process()
class WaterLevelRealTimeDataPage(_WaterLevelDataPage):
def __init__(self, station_id):
super().__init__(station_id)
self._kind = 9
self._process()
class _RainDataPage(_DataPage):
def __init__(self, station_id):
super().__init__()
self._url_base = "http://www1.river.go.jp/cgi-bin/DspRainData.exe?"
self.station_id = station_id
class RainByHourDataPage(_RainDataPage):
def __init__(self, station_id, begin_date, end_date):
super().__init__(station_id)
self._kind = 1
self.begin_date = begin_date
self.end_date = end_date
self._process()
class RainRealTimeDataPage(_RainDataPage):
def __init__(self, station_id):
super().__init__(station_id)
self._kind = 9
self._process()
def _row_text_2_datetime_data(row: str):
tmp = row.split(",")
_fix_24_hour(tmp)
try:
time = datetime.datetime.strptime("{} {}".format(tmp[0], tmp[1]), "%Y/%m/%d %H:%M")
except ValueError:
time = None
try:
data = float(tmp[2])
except ValueError:
data = None
return time, data
# datetime library doesn't support time like 24:00 so we have to fix it
def _fix_24_hour(row: list):
if row[1] == "24:00":
row[1] = "00:00"
row[0] = _fix_date(row[0])
def _fix_date(date_str: str):
date = datetime.datetime.strptime(date_str, "%Y/%m/%d") - datetime.timedelta(days=1)
return date.strftime("%Y/%m/%d") | 0.245356 | 0.071819 |
u"""
compute_tide_corrections.py
Written by <NAME> (09/2021)
Calculates tidal elevations for correcting elevation or imagery data
Uses OTIS format tidal solutions provided by Ohio State University and ESR
http://volkov.oce.orst.edu/tides/region.html
https://www.esr.org/research/polar-tide-models/list-of-polar-tide-models/
ftp://ftp.esr.org/pub/datasets/tmd/
Global Tide Model (GOT) solutions provided by Richard Ray at GSFC
or Finite Element Solution (FES) models provided by AVISO
INPUTS:
x: x-coordinates in projection EPSG
y: y-coordinates in projection EPSG
delta_time: seconds since EPOCH or datetime array
OPTIONS:
DIRECTORY: working data directory for tide models
MODEL: Tide model to use in correction
ATLAS_FORMAT: ATLAS tide model format (OTIS, netcdf)
GZIP: Tide model files are gzip compressed
DEFINITION_FILE: Tide model definition file for use as correction
EPOCH: time period for calculating delta times
default: J2000 (seconds since 2000-01-01T00:00:00)
TYPE: input data type
None: determined from input variable dimensions
drift: drift buoys or satellite/airborne altimetry (time per data point)
grid: spatial grids or images (single time for all data points)
TIME: input time standard or input type
GPS: leap seconds needed
TAI: leap seconds needed (TAI = GPS + 19 seconds)
UTC: no leap seconds needed
datetime: numpy datatime array in UTC
EPSG: input coordinate system
default: 3031 Polar Stereographic South, WGS84
METHOD: interpolation method
bilinear: quick bilinear interpolation
spline: scipy bivariate spline interpolation
linear, nearest: scipy regular grid interpolations
EXTRAPOLATE: extrapolate with nearest-neighbors
CUTOFF: Extrapolation cutoff in kilometers
set to np.inf to extrapolate for all points
FILL_VALUE: output invalid value (default NaN)
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
pyproj: Python interface to PROJ library
https://pypi.org/project/pyproj/
PROGRAM DEPENDENCIES:
time.py: utilities for calculating time operations
model.py: retrieves tide model parameters for named tide models
spatial: utilities for reading, writing and operating on spatial data
utilities.py: download and management utilities for syncing files
calc_astrol_longitudes.py: computes the basic astronomical mean longitudes
calc_delta_time.py: calculates difference between universal and dynamic time
convert_ll_xy.py: convert lat/lon points to and from projected coordinates
infer_minor_corrections.py: return corrections for minor constituents
load_constituent.py: loads parameters for a given tidal constituent
load_nodal_corrections.py: load the nodal corrections for tidal constituents
predict_tide.py: predict tides at single times using harmonic constants
predict_tide_drift.py: predict tidal elevations using harmonic constants
read_tide_model.py: extract tidal harmonic constants from OTIS tide models
read_netcdf_model.py: extract tidal harmonic constants from netcdf models
read_GOT_model.py: extract tidal harmonic constants from GSFC GOT models
read_FES_model.py: extract tidal harmonic constants from FES tide models
bilinear_interp.py: bilinear interpolation of data to coordinates
nearest_extrap.py: nearest-neighbor extrapolation of data to coordinates
UPDATE HISTORY:
Updated 09/2021: refactor to use model class for files and attributes
Updated 07/2021: can use numpy datetime arrays as input time variable
added function for determining the input spatial variable type
added check that tide model directory is accessible
Updated 06/2021: added new Gr1km-v2 1km Greenland model from ESR
add try/except for input projection strings
Updated 05/2021: added option for extrapolation cutoff in kilometers
Updated 03/2021: added TPXO9-atlas-v4 in binary OTIS format
simplified netcdf inputs to be similar to binary OTIS read program
Updated 02/2021: replaced numpy bool to prevent deprecation warning
Updated 12/2020: added valid data extrapolation with nearest_extrap
Updated 11/2020: added model constituents from TPXO9-atlas-v3
Updated 08/2020: using builtin time operations.
calculate difference in leap seconds from start of epoch
using conversion protocols following pyproj-2 updates
Updated 07/2020: added function docstrings, FES2014 and TPXO9-atlas-v2
use merged delta time files combining biannual, monthly and daily files
Updated 03/2020: added TYPE, TIME, FILL_VALUE and METHOD options
Written 03/2020
"""
from __future__ import print_function
import os
import pyproj
import numpy as np
import pyTMD.time
import pyTMD.model
import pyTMD.spatial
import pyTMD.utilities
from pyTMD.calc_delta_time import calc_delta_time
from pyTMD.infer_minor_corrections import infer_minor_corrections
from pyTMD.predict_tide import predict_tide
from pyTMD.predict_tide_drift import predict_tide_drift
from pyTMD.read_tide_model import extract_tidal_constants
from pyTMD.read_netcdf_model import extract_netcdf_constants
from pyTMD.read_GOT_model import extract_GOT_constants
from pyTMD.read_FES_model import extract_FES_constants
#-- PURPOSE: compute tides at points and times using tide model algorithms
def compute_tide_corrections(x, y, delta_time, DIRECTORY=None, MODEL=None,
ATLAS_FORMAT='netcdf', GZIP=False, DEFINITION_FILE=None, EPSG=3031,
EPOCH=(2000,1,1,0,0,0), TYPE='drift', TIME='UTC', METHOD='spline',
EXTRAPOLATE=False, CUTOFF=10.0, FILL_VALUE=np.nan):
"""
Compute tides at points and times using tidal harmonics
Arguments
---------
x: x-coordinates in projection EPSG
y: y-coordinates in projection EPSG
delta_time: seconds since EPOCH or datetime array
Keyword arguments
-----------------
DIRECTORY: working data directory for tide models
MODEL: Tide model to use in correction
ATLAS_FORMAT: ATLAS tide model format (OTIS, netcdf)
GZIP: Tide model files are gzip compressed
DEFINITION_FILE: Tide model definition file for use as correction
EPOCH: time period for calculating delta times
default: J2000 (seconds since 2000-01-01T00:00:00)
TYPE: input data type
None: determined from input variable dimensions
drift: drift buoys or satellite/airborne altimetry (time per data point)
grid: spatial grids or images (single time per image)
TIME: time type if need to compute leap seconds to convert to UTC
GPS: leap seconds needed
TAI: leap seconds needed (TAI = GPS + 19 seconds)
UTC: no leap seconds needed
datetime: numpy datatime array in UTC
EPSG: input coordinate system
default: 3031 Polar Stereographic South, WGS84
METHOD: interpolation method
bilinear: quick bilinear interpolation
spline: scipy bivariate spline interpolation
linear, nearest: scipy regular grid interpolations
EXTRAPOLATE: extrapolate with nearest-neighbors
CUTOFF: Extrapolation cutoff in kilometers
set to np.inf to extrapolate for all points
FILL_VALUE: output invalid value (default NaN)
Returns
-------
tide: tidal elevation at coordinates and time in meters
"""
#-- check that tide directory is accessible
try:
os.access(DIRECTORY, os.F_OK)
except:
raise FileNotFoundError("Invalid tide directory")
#-- get parameters for tide model
if DEFINITION_FILE is not None:
model = pyTMD.model(DIRECTORY).from_file(DEFINITION_FILE)
else:
model = pyTMD.model(DIRECTORY, format=ATLAS_FORMAT,
compressed=GZIP).elevation(MODEL)
#-- determine input data type based on variable dimensions
if not TYPE:
TYPE = pyTMD.spatial.data_type(x, y, delta_time)
#-- reform coordinate dimensions for input grids
if (TYPE.lower() == 'grid') and (np.size(x) != np.size(y)):
x,y = np.meshgrid(np.copy(x),np.copy(y))
#-- converting x,y from EPSG to latitude/longitude
try:
#-- EPSG projection code string or int
crs1 = pyproj.CRS.from_string("epsg:{0:d}".format(int(EPSG)))
except (ValueError,pyproj.exceptions.CRSError):
#-- Projection SRS string
crs1 = pyproj.CRS.from_string(EPSG)
crs2 = pyproj.CRS.from_string("epsg:{0:d}".format(4326))
transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)
lon,lat = transformer.transform(x.flatten(), y.flatten())
#-- assert delta time is an array
delta_time = np.atleast_1d(delta_time)
#-- calculate leap seconds if specified
if (TIME.upper() == 'GPS'):
GPS_Epoch_Time = pyTMD.time.convert_delta_time(0, epoch1=EPOCH,
epoch2=(1980,1,6,0,0,0), scale=1.0)
GPS_Time = pyTMD.time.convert_delta_time(delta_time, epoch1=EPOCH,
epoch2=(1980,1,6,0,0,0), scale=1.0)
#-- calculate difference in leap seconds from start of epoch
leap_seconds = pyTMD.time.count_leap_seconds(GPS_Time) - \
pyTMD.time.count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))
elif (TIME.upper() == 'TAI'):
#-- TAI time is ahead of GPS time by 19 seconds
GPS_Epoch_Time = pyTMD.time.convert_delta_time(-19.0, epoch1=EPOCH,
epoch2=(1980,1,6,0,0,0), scale=1.0)
GPS_Time = pyTMD.time.convert_delta_time(delta_time-19.0, epoch1=EPOCH,
epoch2=(1980,1,6,0,0,0), scale=1.0)
#-- calculate difference in leap seconds from start of epoch
leap_seconds = pyTMD.time.count_leap_seconds(GPS_Time) - \
pyTMD.time.count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))
else:
leap_seconds = 0.0
#-- convert delta times or datetimes objects
if (TIME.lower() == 'datetime'):
#-- convert delta time array from datetime object
#-- to days relative to 1992-01-01T00:00:00
t = pyTMD.time.convert_datetime(delta_time,
epoch=(1992,1,1,0,0,0))/86400.0
else:
#-- convert time to days relative to Jan 1, 1992 (48622mjd)
t = pyTMD.time.convert_delta_time(delta_time - leap_seconds,
epoch1=EPOCH, epoch2=(1992,1,1,0,0,0), scale=(1.0/86400.0))
#-- delta time (TT - UT1) file
delta_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data'])
#-- read tidal constants and interpolate to grid points
if model.format in ('OTIS','ATLAS'):
amp,ph,D,c = extract_tidal_constants(lon, lat, model.grid_file,
model.model_file, model.projection, TYPE=model.type,
METHOD=METHOD, EXTRAPOLATE=EXTRAPOLATE, CUTOFF=CUTOFF,
GRID=model.format)
deltat = np.zeros_like(t)
elif (model.format == 'netcdf'):
amp,ph,D,c = extract_netcdf_constants(lon, lat, model.grid_file,
model.model_file, TYPE=model.type, METHOD=METHOD,
EXTRAPOLATE=EXTRAPOLATE, CUTOFF=CUTOFF, SCALE=model.scale,
GZIP=model.compressed)
deltat = np.zeros_like(t)
elif (model.format == 'GOT'):
amp,ph,c = extract_GOT_constants(lon, lat, model.model_file,
METHOD=METHOD, EXTRAPOLATE=EXTRAPOLATE, CUTOFF=CUTOFF,
SCALE=model.scale, GZIP=model.compressed)
#-- interpolate delta times from calendar dates to tide time
deltat = calc_delta_time(delta_file, t)
elif (model.format == 'FES'):
amp,ph = extract_FES_constants(lon, lat, model.model_file,
TYPE=model.type, VERSION=model.version, METHOD=METHOD,
EXTRAPOLATE=EXTRAPOLATE, CUTOFF=CUTOFF, SCALE=model.scale,
GZIP=model.compressed)
#-- available model constituents
c = model.constituents
#-- interpolate delta times from calendar dates to tide time
deltat = calc_delta_time(delta_file, t)
#-- calculate complex phase in radians for Euler's
cph = -1j*ph*np.pi/180.0
#-- calculate constituent oscillation
hc = amp*np.exp(cph)
#-- predict tidal elevations at time and infer minor corrections
if (TYPE.lower() == 'grid'):
ny,nx = np.shape(x); nt = len(t)
tide = np.ma.zeros((ny,nx,nt),fill_value=FILL_VALUE)
tide.mask = np.zeros((ny,nx,nt),dtype=bool)
for i in range(nt):
TIDE = predict_tide(t[i], hc, c,
DELTAT=deltat[i], CORRECTIONS=model.format)
MINOR = infer_minor_corrections(t[i], hc, c,
DELTAT=deltat[i], CORRECTIONS=model.format)
#-- add major and minor components and reform grid
tide[:,:,i] = np.reshape((TIDE+MINOR), (ny,nx))
tide.mask[:,:,i] = np.reshape((TIDE.mask | MINOR.mask), (ny,nx))
elif (TYPE.lower() == 'drift'):
npts = len(t)
tide = np.ma.zeros((npts), fill_value=FILL_VALUE)
tide.mask = np.any(hc.mask,axis=1)
tide.data[:] = predict_tide_drift(t, hc, c,
DELTAT=deltat, CORRECTIONS=model.format)
minor = infer_minor_corrections(t, hc, c,
DELTAT=deltat, CORRECTIONS=model.format)
tide.data[:] += minor.data[:]
#-- replace invalid values with fill value
tide.data[tide.mask] = tide.fill_value
#-- return the tide correction
return tide | pyTMD/compute_tide_corrections.py | u"""
compute_tide_corrections.py
Written by <NAME> (09/2021)
Calculates tidal elevations for correcting elevation or imagery data
Uses OTIS format tidal solutions provided by Ohio State University and ESR
http://volkov.oce.orst.edu/tides/region.html
https://www.esr.org/research/polar-tide-models/list-of-polar-tide-models/
ftp://ftp.esr.org/pub/datasets/tmd/
Global Tide Model (GOT) solutions provided by Richard Ray at GSFC
or Finite Element Solution (FES) models provided by AVISO
INPUTS:
x: x-coordinates in projection EPSG
y: y-coordinates in projection EPSG
delta_time: seconds since EPOCH or datetime array
OPTIONS:
DIRECTORY: working data directory for tide models
MODEL: Tide model to use in correction
ATLAS_FORMAT: ATLAS tide model format (OTIS, netcdf)
GZIP: Tide model files are gzip compressed
DEFINITION_FILE: Tide model definition file for use as correction
EPOCH: time period for calculating delta times
default: J2000 (seconds since 2000-01-01T00:00:00)
TYPE: input data type
None: determined from input variable dimensions
drift: drift buoys or satellite/airborne altimetry (time per data point)
grid: spatial grids or images (single time for all data points)
TIME: input time standard or input type
GPS: leap seconds needed
TAI: leap seconds needed (TAI = GPS + 19 seconds)
UTC: no leap seconds needed
datetime: numpy datatime array in UTC
EPSG: input coordinate system
default: 3031 Polar Stereographic South, WGS84
METHOD: interpolation method
bilinear: quick bilinear interpolation
spline: scipy bivariate spline interpolation
linear, nearest: scipy regular grid interpolations
EXTRAPOLATE: extrapolate with nearest-neighbors
CUTOFF: Extrapolation cutoff in kilometers
set to np.inf to extrapolate for all points
FILL_VALUE: output invalid value (default NaN)
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
pyproj: Python interface to PROJ library
https://pypi.org/project/pyproj/
PROGRAM DEPENDENCIES:
time.py: utilities for calculating time operations
model.py: retrieves tide model parameters for named tide models
spatial: utilities for reading, writing and operating on spatial data
utilities.py: download and management utilities for syncing files
calc_astrol_longitudes.py: computes the basic astronomical mean longitudes
calc_delta_time.py: calculates difference between universal and dynamic time
convert_ll_xy.py: convert lat/lon points to and from projected coordinates
infer_minor_corrections.py: return corrections for minor constituents
load_constituent.py: loads parameters for a given tidal constituent
load_nodal_corrections.py: load the nodal corrections for tidal constituents
predict_tide.py: predict tides at single times using harmonic constants
predict_tide_drift.py: predict tidal elevations using harmonic constants
read_tide_model.py: extract tidal harmonic constants from OTIS tide models
read_netcdf_model.py: extract tidal harmonic constants from netcdf models
read_GOT_model.py: extract tidal harmonic constants from GSFC GOT models
read_FES_model.py: extract tidal harmonic constants from FES tide models
bilinear_interp.py: bilinear interpolation of data to coordinates
nearest_extrap.py: nearest-neighbor extrapolation of data to coordinates
UPDATE HISTORY:
Updated 09/2021: refactor to use model class for files and attributes
Updated 07/2021: can use numpy datetime arrays as input time variable
added function for determining the input spatial variable type
added check that tide model directory is accessible
Updated 06/2021: added new Gr1km-v2 1km Greenland model from ESR
add try/except for input projection strings
Updated 05/2021: added option for extrapolation cutoff in kilometers
Updated 03/2021: added TPXO9-atlas-v4 in binary OTIS format
simplified netcdf inputs to be similar to binary OTIS read program
Updated 02/2021: replaced numpy bool to prevent deprecation warning
Updated 12/2020: added valid data extrapolation with nearest_extrap
Updated 11/2020: added model constituents from TPXO9-atlas-v3
Updated 08/2020: using builtin time operations.
calculate difference in leap seconds from start of epoch
using conversion protocols following pyproj-2 updates
Updated 07/2020: added function docstrings, FES2014 and TPXO9-atlas-v2
use merged delta time files combining biannual, monthly and daily files
Updated 03/2020: added TYPE, TIME, FILL_VALUE and METHOD options
Written 03/2020
"""
from __future__ import print_function
import os
import pyproj
import numpy as np
import pyTMD.time
import pyTMD.model
import pyTMD.spatial
import pyTMD.utilities
from pyTMD.calc_delta_time import calc_delta_time
from pyTMD.infer_minor_corrections import infer_minor_corrections
from pyTMD.predict_tide import predict_tide
from pyTMD.predict_tide_drift import predict_tide_drift
from pyTMD.read_tide_model import extract_tidal_constants
from pyTMD.read_netcdf_model import extract_netcdf_constants
from pyTMD.read_GOT_model import extract_GOT_constants
from pyTMD.read_FES_model import extract_FES_constants
#-- PURPOSE: compute tides at points and times using tide model algorithms
def compute_tide_corrections(x, y, delta_time, DIRECTORY=None, MODEL=None,
ATLAS_FORMAT='netcdf', GZIP=False, DEFINITION_FILE=None, EPSG=3031,
EPOCH=(2000,1,1,0,0,0), TYPE='drift', TIME='UTC', METHOD='spline',
EXTRAPOLATE=False, CUTOFF=10.0, FILL_VALUE=np.nan):
"""
Compute tides at points and times using tidal harmonics
Arguments
---------
x: x-coordinates in projection EPSG
y: y-coordinates in projection EPSG
delta_time: seconds since EPOCH or datetime array
Keyword arguments
-----------------
DIRECTORY: working data directory for tide models
MODEL: Tide model to use in correction
ATLAS_FORMAT: ATLAS tide model format (OTIS, netcdf)
GZIP: Tide model files are gzip compressed
DEFINITION_FILE: Tide model definition file for use as correction
EPOCH: time period for calculating delta times
default: J2000 (seconds since 2000-01-01T00:00:00)
TYPE: input data type
None: determined from input variable dimensions
drift: drift buoys or satellite/airborne altimetry (time per data point)
grid: spatial grids or images (single time per image)
TIME: time type if need to compute leap seconds to convert to UTC
GPS: leap seconds needed
TAI: leap seconds needed (TAI = GPS + 19 seconds)
UTC: no leap seconds needed
datetime: numpy datatime array in UTC
EPSG: input coordinate system
default: 3031 Polar Stereographic South, WGS84
METHOD: interpolation method
bilinear: quick bilinear interpolation
spline: scipy bivariate spline interpolation
linear, nearest: scipy regular grid interpolations
EXTRAPOLATE: extrapolate with nearest-neighbors
CUTOFF: Extrapolation cutoff in kilometers
set to np.inf to extrapolate for all points
FILL_VALUE: output invalid value (default NaN)
Returns
-------
tide: tidal elevation at coordinates and time in meters
"""
#-- check that tide directory is accessible
try:
os.access(DIRECTORY, os.F_OK)
except:
raise FileNotFoundError("Invalid tide directory")
#-- get parameters for tide model
if DEFINITION_FILE is not None:
model = pyTMD.model(DIRECTORY).from_file(DEFINITION_FILE)
else:
model = pyTMD.model(DIRECTORY, format=ATLAS_FORMAT,
compressed=GZIP).elevation(MODEL)
#-- determine input data type based on variable dimensions
if not TYPE:
TYPE = pyTMD.spatial.data_type(x, y, delta_time)
#-- reform coordinate dimensions for input grids
if (TYPE.lower() == 'grid') and (np.size(x) != np.size(y)):
x,y = np.meshgrid(np.copy(x),np.copy(y))
#-- converting x,y from EPSG to latitude/longitude
try:
#-- EPSG projection code string or int
crs1 = pyproj.CRS.from_string("epsg:{0:d}".format(int(EPSG)))
except (ValueError,pyproj.exceptions.CRSError):
#-- Projection SRS string
crs1 = pyproj.CRS.from_string(EPSG)
crs2 = pyproj.CRS.from_string("epsg:{0:d}".format(4326))
transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)
lon,lat = transformer.transform(x.flatten(), y.flatten())
#-- assert delta time is an array
delta_time = np.atleast_1d(delta_time)
#-- calculate leap seconds if specified
if (TIME.upper() == 'GPS'):
GPS_Epoch_Time = pyTMD.time.convert_delta_time(0, epoch1=EPOCH,
epoch2=(1980,1,6,0,0,0), scale=1.0)
GPS_Time = pyTMD.time.convert_delta_time(delta_time, epoch1=EPOCH,
epoch2=(1980,1,6,0,0,0), scale=1.0)
#-- calculate difference in leap seconds from start of epoch
leap_seconds = pyTMD.time.count_leap_seconds(GPS_Time) - \
pyTMD.time.count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))
elif (TIME.upper() == 'TAI'):
#-- TAI time is ahead of GPS time by 19 seconds
GPS_Epoch_Time = pyTMD.time.convert_delta_time(-19.0, epoch1=EPOCH,
epoch2=(1980,1,6,0,0,0), scale=1.0)
GPS_Time = pyTMD.time.convert_delta_time(delta_time-19.0, epoch1=EPOCH,
epoch2=(1980,1,6,0,0,0), scale=1.0)
#-- calculate difference in leap seconds from start of epoch
leap_seconds = pyTMD.time.count_leap_seconds(GPS_Time) - \
pyTMD.time.count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))
else:
leap_seconds = 0.0
#-- convert delta times or datetimes objects
if (TIME.lower() == 'datetime'):
#-- convert delta time array from datetime object
#-- to days relative to 1992-01-01T00:00:00
t = pyTMD.time.convert_datetime(delta_time,
epoch=(1992,1,1,0,0,0))/86400.0
else:
#-- convert time to days relative to Jan 1, 1992 (48622mjd)
t = pyTMD.time.convert_delta_time(delta_time - leap_seconds,
epoch1=EPOCH, epoch2=(1992,1,1,0,0,0), scale=(1.0/86400.0))
#-- delta time (TT - UT1) file
delta_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data'])
#-- read tidal constants and interpolate to grid points
if model.format in ('OTIS','ATLAS'):
amp,ph,D,c = extract_tidal_constants(lon, lat, model.grid_file,
model.model_file, model.projection, TYPE=model.type,
METHOD=METHOD, EXTRAPOLATE=EXTRAPOLATE, CUTOFF=CUTOFF,
GRID=model.format)
deltat = np.zeros_like(t)
elif (model.format == 'netcdf'):
amp,ph,D,c = extract_netcdf_constants(lon, lat, model.grid_file,
model.model_file, TYPE=model.type, METHOD=METHOD,
EXTRAPOLATE=EXTRAPOLATE, CUTOFF=CUTOFF, SCALE=model.scale,
GZIP=model.compressed)
deltat = np.zeros_like(t)
elif (model.format == 'GOT'):
amp,ph,c = extract_GOT_constants(lon, lat, model.model_file,
METHOD=METHOD, EXTRAPOLATE=EXTRAPOLATE, CUTOFF=CUTOFF,
SCALE=model.scale, GZIP=model.compressed)
#-- interpolate delta times from calendar dates to tide time
deltat = calc_delta_time(delta_file, t)
elif (model.format == 'FES'):
amp,ph = extract_FES_constants(lon, lat, model.model_file,
TYPE=model.type, VERSION=model.version, METHOD=METHOD,
EXTRAPOLATE=EXTRAPOLATE, CUTOFF=CUTOFF, SCALE=model.scale,
GZIP=model.compressed)
#-- available model constituents
c = model.constituents
#-- interpolate delta times from calendar dates to tide time
deltat = calc_delta_time(delta_file, t)
#-- calculate complex phase in radians for Euler's
cph = -1j*ph*np.pi/180.0
#-- calculate constituent oscillation
hc = amp*np.exp(cph)
#-- predict tidal elevations at time and infer minor corrections
if (TYPE.lower() == 'grid'):
ny,nx = np.shape(x); nt = len(t)
tide = np.ma.zeros((ny,nx,nt),fill_value=FILL_VALUE)
tide.mask = np.zeros((ny,nx,nt),dtype=bool)
for i in range(nt):
TIDE = predict_tide(t[i], hc, c,
DELTAT=deltat[i], CORRECTIONS=model.format)
MINOR = infer_minor_corrections(t[i], hc, c,
DELTAT=deltat[i], CORRECTIONS=model.format)
#-- add major and minor components and reform grid
tide[:,:,i] = np.reshape((TIDE+MINOR), (ny,nx))
tide.mask[:,:,i] = np.reshape((TIDE.mask | MINOR.mask), (ny,nx))
elif (TYPE.lower() == 'drift'):
npts = len(t)
tide = np.ma.zeros((npts), fill_value=FILL_VALUE)
tide.mask = np.any(hc.mask,axis=1)
tide.data[:] = predict_tide_drift(t, hc, c,
DELTAT=deltat, CORRECTIONS=model.format)
minor = infer_minor_corrections(t, hc, c,
DELTAT=deltat, CORRECTIONS=model.format)
tide.data[:] += minor.data[:]
#-- replace invalid values with fill value
tide.data[tide.mask] = tide.fill_value
#-- return the tide correction
return tide | 0.865764 | 0.748881 |
from django import test
from django.shortcuts import resolve_url
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from django_opt_out.models import OptOut
from django_opt_out.plugins.sparkpost import send_email, signals
from .test_views import CaptureSignal
class SparkPostHookTests(TestCase):
def test_opt_out_created(self):
self.assertEqual(0, OptOut.objects.all().count())
url = resolve_url('django_opt_out_sparkpost:SparkPostUnsubscribeWebhook')
test.Client().post(url, data=list_unsubscribe, content_type="application/json")
opt_out = OptOut.objects.all().first()
self.assertEqual('<EMAIL>', opt_out.email)
self.assertIsNotNone(opt_out.data)
@CaptureSignal(signals.list_unsubscribe)
def test_list_unsubscribe(self, handler):
url = resolve_url('django_opt_out_sparkpost:SparkPostUnsubscribeWebhook')
test.Client().post(url, data=list_unsubscribe, content_type="application/json")
self.assertTrue(handler.called)
args, kwargs = handler.call_args
self.assertEqual('<EMAIL>', kwargs['email'])
@CaptureSignal(signals.link_unsubscribe)
def test_link_unsubscribe(self, handler):
url = resolve_url('django_opt_out_sparkpost:SparkPostUnsubscribeWebhook')
test.Client().post(url, data=link_unsubscribe, content_type="application/json")
self.assertTrue(handler.called)
args, kwargs = handler.call_args
self.assertEqual('<EMAIL>', kwargs['email'])
@CaptureSignal(signals.list_unsubscribe, 'list_handler')
@CaptureSignal(signals.link_unsubscribe, 'link_handler')
def test_multi(self, link_handler, list_handler):
url = resolve_url('django_opt_out_sparkpost:SparkPostUnsubscribeWebhook')
test.Client().post(url, data=unsubscribe_multiple, content_type="application/json")
self.assertTrue(link_handler.called)
args, kwargs = link_handler.call_args
self.assertEqual('<EMAIL>', kwargs['email'])
self.assertTrue(list_handler.called)
args, kwargs = list_handler.call_args
self.assertEqual('<EMAIL>', kwargs['email'])
@override_settings(SPARKPOST_API_KEY='not-valid')
@patch('django_opt_out.plugins.sparkpost.hooks.client.suppression_list.create')
def test_confirm_creates_suppression(self, create):
url = resolve_url("django_opt_out:OptOutConfirm")
test.Client().post(url, data={'email': '<EMAIL>'})
self.assertTrue(create.called)
# noinspection PyShadowingNames
def test_send_mail_template(mocker, settings):
render_to_string = mocker.patch('django_opt_out.plugins.sparkpost.render_to_string')
render_to_string.return_value = \
"<NAME> uczył dzieci swoje. " \
"Na głowie przy tym stojąc wiele lat. " \
"Rzekł jeden z synów: – Tak bardzo się boję. " \
"O ciebie ojcze, boś już stary dziad."
to = settings.ADMINS[0][1] if settings.ADMINS else '<EMAIL>'
from django_opt_out.utils import get_opt_out_path
ctx = {
'unsubscribe': settings.BASE_URL + get_opt_out_path(to, 'testing')
}
settings.EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# To actually test sending an email, comment out mocking of EmailMultiAlternatives.send
send = mocker.patch('django.core.mail.message.EmailMultiAlternatives.send')
send_email(subject='Alicja w krainie czarów', to=to, template_html='notused.html', ctx=ctx)
assert send.called
def test_plain_email_send():
from django_opt_out.utils import get_opt_out_path
unsubscribe = get_opt_out_path("", 'some', 'tags', 'controlling', 'questionnaire')
# unsubscribe link will not have a domain name and scheme
# you can build prefix from request, but I prefer to set it in settings
from django.conf import settings
unsubscribe = settings.BASE_URL + unsubscribe
body = 'Hello, Regards\n\nUnsubscribe: ' + unsubscribe
from django.core import mail
message = mail.EmailMultiAlternatives(body=body, to=['<EMAIL>'])
message.extra_headers['List-Unsubscribe'] = "<{}>".format(unsubscribe)
message.send()
list_unsubscribe = """[{"msys":{"unsubscribe_event":{"type":"list_unsubscribe","campaign_id":"Example Campaign Name","customer_id":"1","delv_method":"esmtp","event_id":"92356927693813856","friendly_from":"<EMAIL>","ip_address":"127.0.0.1","ip_pool":"Example-Ip-Pool","mailfrom":"<EMAIL>","message_id":"000443ee14578172be22","msg_from":"<EMAIL>","msg_size":"1337","num_retries":"2","queue_time":"12","rcpt_meta":{"customKey":"customValue"},"rcpt_tags":["male","US"],"rcpt_to":"<EMAIL>","raw_rcpt_to":"<EMAIL>","rcpt_type":"cc","routing_domain":"example.com","sending_ip":"127.0.0.1","subaccount_id":"101","subject":"Summer deals are here!","template_id":"templ-1234","template_version":"1","timestamp":"1454442600","transmission_id":"65832150921904138"}}}]""" # noqa E501
link_unsubscribe = """[{"msys":{"unsubscribe_event":{"type":"link_unsubscribe","campaign_id":"Example Campaign Name","customer_id":"1","delv_method":"esmtp","event_id":"92356927693813856","friendly_from":"<EMAIL>","ip_address":"127.0.0.1","ip_pool":"Example-Ip-Pool","mailfrom":"<EMAIL>","message_id":"000443ee14578172be22","msg_from":"<EMAIL>","msg_size":"1337","num_retries":"2","queue_time":"12","rcpt_meta":{"customKey":"customValue"},"rcpt_tags":["male","US"],"rcpt_to":"<EMAIL>","raw_rcpt_to":"<EMAIL>","rcpt_type":"cc","routing_domain":"example.com","sending_ip":"127.0.0.1","subaccount_id":"101","subject":"Summer deals are here!","template_id":"templ-1234","template_version":"1","timestamp":"1454442600","transmission_id":"65832150921904138","user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36"}}}]""" # noqa E501
unsubscribe_multiple = """
[
{
"msys": {
"unsubscribe_event": {
"type": "list_unsubscribe",
"campaign_id": "Example Campaign Name",
"customer_id": "1",
"delv_method": "esmtp",
"event_id": "92356927693813856",
"friendly_from": "<EMAIL>",
"ip_address": "127.0.0.1",
"ip_pool": "Example-Ip-Pool",
"mailfrom": "<EMAIL>",
"message_id": "000443ee14578172be22",
"msg_from": "<EMAIL>",
"msg_size": "1337",
"num_retries": "2",
"queue_time": "12",
"rcpt_meta": {
"customKey": "customValue"
},
"rcpt_tags": [
"male",
"US"
],
"rcpt_to": "<EMAIL>",
"raw_rcpt_to": "<EMAIL>",
"rcpt_type": "cc",
"routing_domain": "example.com",
"sending_ip": "127.0.0.1",
"subaccount_id": "101",
"subject": "Summer deals are here!",
"template_id": "templ-1234",
"template_version": "1",
"timestamp": "1454442600",
"transmission_id": "65832150921904138"
}
}
},
{
"msys": {
"unsubscribe_event": {
"type": "link_unsubscribe",
"campaign_id": "Example Campaign Name",
"customer_id": "1",
"delv_method": "esmtp",
"event_id": "92356927693813856",
"friendly_from": "<EMAIL>",
"ip_address": "127.0.0.1",
"ip_pool": "Example-Ip-Pool",
"mailfrom": "<EMAIL>",
"message_id": "000443ee14578172be22",
"msg_from": "<EMAIL>",
"msg_size": "1337",
"num_retries": "2",
"queue_time": "12",
"rcpt_meta": {
"customKey": "customValue"
},
"rcpt_tags": [
"male",
"US"
],
"rcpt_to": "<EMAIL>",
"raw_rcpt_to": "<EMAIL>",
"rcpt_type": "cc",
"routing_domain": "example.com",
"sending_ip": "127.0.0.1",
"subaccount_id": "101",
"subject": "Summer deals are here!",
"template_id": "templ-1234",
"template_version": "1",
"timestamp": "1454442600",
"transmission_id": "65832150921904138",
"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36"
}
}
}
]
""" | tests/test_sparkpost.py | from django import test
from django.shortcuts import resolve_url
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from django_opt_out.models import OptOut
from django_opt_out.plugins.sparkpost import send_email, signals
from .test_views import CaptureSignal
class SparkPostHookTests(TestCase):
def test_opt_out_created(self):
self.assertEqual(0, OptOut.objects.all().count())
url = resolve_url('django_opt_out_sparkpost:SparkPostUnsubscribeWebhook')
test.Client().post(url, data=list_unsubscribe, content_type="application/json")
opt_out = OptOut.objects.all().first()
self.assertEqual('<EMAIL>', opt_out.email)
self.assertIsNotNone(opt_out.data)
@CaptureSignal(signals.list_unsubscribe)
def test_list_unsubscribe(self, handler):
url = resolve_url('django_opt_out_sparkpost:SparkPostUnsubscribeWebhook')
test.Client().post(url, data=list_unsubscribe, content_type="application/json")
self.assertTrue(handler.called)
args, kwargs = handler.call_args
self.assertEqual('<EMAIL>', kwargs['email'])
@CaptureSignal(signals.link_unsubscribe)
def test_link_unsubscribe(self, handler):
url = resolve_url('django_opt_out_sparkpost:SparkPostUnsubscribeWebhook')
test.Client().post(url, data=link_unsubscribe, content_type="application/json")
self.assertTrue(handler.called)
args, kwargs = handler.call_args
self.assertEqual('<EMAIL>', kwargs['email'])
@CaptureSignal(signals.list_unsubscribe, 'list_handler')
@CaptureSignal(signals.link_unsubscribe, 'link_handler')
def test_multi(self, link_handler, list_handler):
url = resolve_url('django_opt_out_sparkpost:SparkPostUnsubscribeWebhook')
test.Client().post(url, data=unsubscribe_multiple, content_type="application/json")
self.assertTrue(link_handler.called)
args, kwargs = link_handler.call_args
self.assertEqual('<EMAIL>', kwargs['email'])
self.assertTrue(list_handler.called)
args, kwargs = list_handler.call_args
self.assertEqual('<EMAIL>', kwargs['email'])
@override_settings(SPARKPOST_API_KEY='not-valid')
@patch('django_opt_out.plugins.sparkpost.hooks.client.suppression_list.create')
def test_confirm_creates_suppression(self, create):
url = resolve_url("django_opt_out:OptOutConfirm")
test.Client().post(url, data={'email': '<EMAIL>'})
self.assertTrue(create.called)
# noinspection PyShadowingNames
def test_send_mail_template(mocker, settings):
render_to_string = mocker.patch('django_opt_out.plugins.sparkpost.render_to_string')
render_to_string.return_value = \
"<NAME> uczył dzieci swoje. " \
"Na głowie przy tym stojąc wiele lat. " \
"Rzekł jeden z synów: – Tak bardzo się boję. " \
"O ciebie ojcze, boś już stary dziad."
to = settings.ADMINS[0][1] if settings.ADMINS else '<EMAIL>'
from django_opt_out.utils import get_opt_out_path
ctx = {
'unsubscribe': settings.BASE_URL + get_opt_out_path(to, 'testing')
}
settings.EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# To actually test sending an email, comment out mocking of EmailMultiAlternatives.send
send = mocker.patch('django.core.mail.message.EmailMultiAlternatives.send')
send_email(subject='Alicja w krainie czarów', to=to, template_html='notused.html', ctx=ctx)
assert send.called
def test_plain_email_send():
from django_opt_out.utils import get_opt_out_path
unsubscribe = get_opt_out_path("", 'some', 'tags', 'controlling', 'questionnaire')
# unsubscribe link will not have a domain name and scheme
# you can build prefix from request, but I prefer to set it in settings
from django.conf import settings
unsubscribe = settings.BASE_URL + unsubscribe
body = 'Hello, Regards\n\nUnsubscribe: ' + unsubscribe
from django.core import mail
message = mail.EmailMultiAlternatives(body=body, to=['<EMAIL>'])
message.extra_headers['List-Unsubscribe'] = "<{}>".format(unsubscribe)
message.send()
list_unsubscribe = """[{"msys":{"unsubscribe_event":{"type":"list_unsubscribe","campaign_id":"Example Campaign Name","customer_id":"1","delv_method":"esmtp","event_id":"92356927693813856","friendly_from":"<EMAIL>","ip_address":"127.0.0.1","ip_pool":"Example-Ip-Pool","mailfrom":"<EMAIL>","message_id":"000443ee14578172be22","msg_from":"<EMAIL>","msg_size":"1337","num_retries":"2","queue_time":"12","rcpt_meta":{"customKey":"customValue"},"rcpt_tags":["male","US"],"rcpt_to":"<EMAIL>","raw_rcpt_to":"<EMAIL>","rcpt_type":"cc","routing_domain":"example.com","sending_ip":"127.0.0.1","subaccount_id":"101","subject":"Summer deals are here!","template_id":"templ-1234","template_version":"1","timestamp":"1454442600","transmission_id":"65832150921904138"}}}]""" # noqa E501
link_unsubscribe = """[{"msys":{"unsubscribe_event":{"type":"link_unsubscribe","campaign_id":"Example Campaign Name","customer_id":"1","delv_method":"esmtp","event_id":"92356927693813856","friendly_from":"<EMAIL>","ip_address":"127.0.0.1","ip_pool":"Example-Ip-Pool","mailfrom":"<EMAIL>","message_id":"000443ee14578172be22","msg_from":"<EMAIL>","msg_size":"1337","num_retries":"2","queue_time":"12","rcpt_meta":{"customKey":"customValue"},"rcpt_tags":["male","US"],"rcpt_to":"<EMAIL>","raw_rcpt_to":"<EMAIL>","rcpt_type":"cc","routing_domain":"example.com","sending_ip":"127.0.0.1","subaccount_id":"101","subject":"Summer deals are here!","template_id":"templ-1234","template_version":"1","timestamp":"1454442600","transmission_id":"65832150921904138","user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36"}}}]""" # noqa E501
unsubscribe_multiple = """
[
{
"msys": {
"unsubscribe_event": {
"type": "list_unsubscribe",
"campaign_id": "Example Campaign Name",
"customer_id": "1",
"delv_method": "esmtp",
"event_id": "92356927693813856",
"friendly_from": "<EMAIL>",
"ip_address": "127.0.0.1",
"ip_pool": "Example-Ip-Pool",
"mailfrom": "<EMAIL>",
"message_id": "000443ee14578172be22",
"msg_from": "<EMAIL>",
"msg_size": "1337",
"num_retries": "2",
"queue_time": "12",
"rcpt_meta": {
"customKey": "customValue"
},
"rcpt_tags": [
"male",
"US"
],
"rcpt_to": "<EMAIL>",
"raw_rcpt_to": "<EMAIL>",
"rcpt_type": "cc",
"routing_domain": "example.com",
"sending_ip": "127.0.0.1",
"subaccount_id": "101",
"subject": "Summer deals are here!",
"template_id": "templ-1234",
"template_version": "1",
"timestamp": "1454442600",
"transmission_id": "65832150921904138"
}
}
},
{
"msys": {
"unsubscribe_event": {
"type": "link_unsubscribe",
"campaign_id": "Example Campaign Name",
"customer_id": "1",
"delv_method": "esmtp",
"event_id": "92356927693813856",
"friendly_from": "<EMAIL>",
"ip_address": "127.0.0.1",
"ip_pool": "Example-Ip-Pool",
"mailfrom": "<EMAIL>",
"message_id": "000443ee14578172be22",
"msg_from": "<EMAIL>",
"msg_size": "1337",
"num_retries": "2",
"queue_time": "12",
"rcpt_meta": {
"customKey": "customValue"
},
"rcpt_tags": [
"male",
"US"
],
"rcpt_to": "<EMAIL>",
"raw_rcpt_to": "<EMAIL>",
"rcpt_type": "cc",
"routing_domain": "example.com",
"sending_ip": "127.0.0.1",
"subaccount_id": "101",
"subject": "Summer deals are here!",
"template_id": "templ-1234",
"template_version": "1",
"timestamp": "1454442600",
"transmission_id": "65832150921904138",
"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36"
}
}
}
]
""" | 0.448909 | 0.161122 |
import design
import debug
from tech import drc, info
from vector import vector
import contact
from ptx import ptx
from globals import OPTS
class single_level_column_mux(design.design):
"""
This module implements the columnmux bitline cell used in the design.
Creates a single columnmux cell.
"""
def __init__(self, tx_size):
name="single_level_column_mux_{}".format(tx_size)
design.design.__init__(self, name)
debug.info(2, "create single column mux cell: {0}".format(name))
c = reload(__import__(OPTS.bitcell))
self.mod_bitcell = getattr(c, OPTS.bitcell)
self.bitcell = self.mod_bitcell()
self.ptx_width = tx_size * drc["minwidth_tx"]
self.add_pin_list(["bl", "br", "bl_out", "br_out", "sel", "gnd"])
self.create_layout()
def create_layout(self):
self.add_ptx()
self.pin_height = 2*self.m2_width
self.width = self.bitcell.width
self.height = self.nmos2.uy() + self.pin_height
self.connect_poly()
self.add_gnd_rail()
self.add_bitline_pins()
self.connect_bitlines()
self.add_wells()
def add_bitline_pins(self):
""" Add the top and bottom pins to this cell """
bl_pos = vector(self.bitcell.get_pin("BL").lx(), 0)
br_pos = vector(self.bitcell.get_pin("BR").lx(), 0)
# bl and br
self.add_layout_pin(text="bl",
layer="metal2",
offset=bl_pos + vector(0,self.height - self.pin_height),
height=self.pin_height)
self.add_layout_pin(text="br",
layer="metal2",
offset=br_pos + vector(0,self.height - self.pin_height),
height=self.pin_height)
# bl_out and br_out
self.add_layout_pin(text="bl_out",
layer="metal2",
offset=bl_pos,
height=self.pin_height)
self.add_layout_pin(text="br_out",
layer="metal2",
offset=br_pos,
height=self.pin_height)
def add_ptx(self):
""" Create the two pass gate NMOS transistors to switch the bitlines"""
# Adds nmos1,nmos2 to the module
self.nmos = ptx(width=self.ptx_width)
self.add_mod(self.nmos)
# Space it in the center
nmos1_position = self.nmos.active_offset.scale(0,1) + vector(0.5*self.bitcell.width-0.5*self.nmos.active_width,0)
self.nmos1=self.add_inst(name="mux_tx1",
mod=self.nmos,
offset=nmos1_position)
self.connect_inst(["bl", "sel", "bl_out", "gnd"])
# This aligns it directly above the other tx with gates abutting
nmos2_position = nmos1_position + vector(0,self.nmos.active_height + self.poly_space)
self.nmos2=self.add_inst(name="mux_tx2",
mod=self.nmos,
offset=nmos2_position)
self.connect_inst(["br", "sel", "br_out", "gnd"])
def connect_poly(self):
""" Connect the poly gate of the two pass transistors """
height=self.nmos2.get_pin("G").uy() - self.nmos1.get_pin("G").by()
self.add_layout_pin(text="sel",
layer="poly",
offset=self.nmos1.get_pin("G").ll(),
height=height)
def connect_bitlines(self):
""" Connect the bitlines to the mux transistors """
# These are on metal2
bl_pin = self.get_pin("bl")
br_pin = self.get_pin("br")
bl_out_pin = self.get_pin("bl_out")
br_out_pin = self.get_pin("br_out")
# These are on metal1
nmos1_s_pin = self.nmos1.get_pin("S")
nmos1_d_pin = self.nmos1.get_pin("D")
nmos2_s_pin = self.nmos2.get_pin("S")
nmos2_d_pin = self.nmos2.get_pin("D")
# Add vias to bl, br_out, nmos2/S, nmos1/D
self.add_via_center(layers=("metal1","via1","metal2"),
offset=bl_pin.bc())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=br_out_pin.uc())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=nmos2_s_pin.center())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=nmos1_d_pin.center())
# bl -> nmos2/D on metal1
# bl_out -> nmos2/S on metal2
self.add_path("metal1",[bl_pin.ll(), vector(nmos2_d_pin.cx(),bl_pin.by()), nmos2_d_pin.center()])
# halfway up, move over
mid1 = bl_out_pin.uc().scale(1,0.5)+nmos2_s_pin.bc().scale(0,0.5)
mid2 = bl_out_pin.uc().scale(0,0.5)+nmos2_s_pin.bc().scale(1,0.5)
self.add_path("metal2",[bl_out_pin.uc(), mid1, mid2, nmos2_s_pin.bc()])
# br -> nmos1/D on metal2
# br_out -> nmos1/S on metal1
self.add_path("metal1",[br_out_pin.uc(), vector(nmos1_s_pin.cx(),br_out_pin.uy()), nmos1_s_pin.center()])
# halfway up, move over
mid1 = br_pin.bc().scale(1,0.5)+nmos1_d_pin.uc().scale(0,0.5)
mid2 = br_pin.bc().scale(0,0.5)+nmos1_d_pin.uc().scale(1,0.5)
self.add_path("metal2",[br_pin.bc(), mid1, mid2, nmos1_d_pin.uc()])
def add_gnd_rail(self):
""" Add the gnd rails through the cell to connect to the bitcell array """
gnd_pins = self.bitcell.get_pins("gnd")
for gnd_pin in gnd_pins:
# only use vertical gnd pins that span the whole cell
if gnd_pin.layer == "metal2" and gnd_pin.height >= self.bitcell.height:
gnd_position = vector(gnd_pin.lx(), 0)
self.add_layout_pin(text="gnd",
layer="metal2",
offset=gnd_position,
height=self.height)
def add_wells(self):
""" Add a well and implant over the whole cell. Also, add the pwell contact (if it exists) """
# find right most gnd rail
gnd_pins = self.bitcell.get_pins("gnd")
right_gnd = None
for gnd_pin in gnd_pins:
if right_gnd == None or gnd_pin.lx()>right_gnd.lx():
right_gnd = gnd_pin
# Add to the right (first) gnd rail
m1m2_offset = right_gnd.bc() + vector(0,0.5*self.nmos.poly_height)
self.add_via_center(layers=("metal1", "via1", "metal2"),
offset=m1m2_offset)
active_offset = right_gnd.bc() + vector(0,0.5*self.nmos.poly_height)
self.add_via_center(layers=("active", "contact", "metal1"),
offset=active_offset,
implant_type="p",
well_type="p") | compiler/modules/single_level_column_mux.py | import design
import debug
from tech import drc, info
from vector import vector
import contact
from ptx import ptx
from globals import OPTS
class single_level_column_mux(design.design):
"""
This module implements the columnmux bitline cell used in the design.
Creates a single columnmux cell.
"""
def __init__(self, tx_size):
name="single_level_column_mux_{}".format(tx_size)
design.design.__init__(self, name)
debug.info(2, "create single column mux cell: {0}".format(name))
c = reload(__import__(OPTS.bitcell))
self.mod_bitcell = getattr(c, OPTS.bitcell)
self.bitcell = self.mod_bitcell()
self.ptx_width = tx_size * drc["minwidth_tx"]
self.add_pin_list(["bl", "br", "bl_out", "br_out", "sel", "gnd"])
self.create_layout()
def create_layout(self):
self.add_ptx()
self.pin_height = 2*self.m2_width
self.width = self.bitcell.width
self.height = self.nmos2.uy() + self.pin_height
self.connect_poly()
self.add_gnd_rail()
self.add_bitline_pins()
self.connect_bitlines()
self.add_wells()
def add_bitline_pins(self):
""" Add the top and bottom pins to this cell """
bl_pos = vector(self.bitcell.get_pin("BL").lx(), 0)
br_pos = vector(self.bitcell.get_pin("BR").lx(), 0)
# bl and br
self.add_layout_pin(text="bl",
layer="metal2",
offset=bl_pos + vector(0,self.height - self.pin_height),
height=self.pin_height)
self.add_layout_pin(text="br",
layer="metal2",
offset=br_pos + vector(0,self.height - self.pin_height),
height=self.pin_height)
# bl_out and br_out
self.add_layout_pin(text="bl_out",
layer="metal2",
offset=bl_pos,
height=self.pin_height)
self.add_layout_pin(text="br_out",
layer="metal2",
offset=br_pos,
height=self.pin_height)
def add_ptx(self):
""" Create the two pass gate NMOS transistors to switch the bitlines"""
# Adds nmos1,nmos2 to the module
self.nmos = ptx(width=self.ptx_width)
self.add_mod(self.nmos)
# Space it in the center
nmos1_position = self.nmos.active_offset.scale(0,1) + vector(0.5*self.bitcell.width-0.5*self.nmos.active_width,0)
self.nmos1=self.add_inst(name="mux_tx1",
mod=self.nmos,
offset=nmos1_position)
self.connect_inst(["bl", "sel", "bl_out", "gnd"])
# This aligns it directly above the other tx with gates abutting
nmos2_position = nmos1_position + vector(0,self.nmos.active_height + self.poly_space)
self.nmos2=self.add_inst(name="mux_tx2",
mod=self.nmos,
offset=nmos2_position)
self.connect_inst(["br", "sel", "br_out", "gnd"])
def connect_poly(self):
""" Connect the poly gate of the two pass transistors """
height=self.nmos2.get_pin("G").uy() - self.nmos1.get_pin("G").by()
self.add_layout_pin(text="sel",
layer="poly",
offset=self.nmos1.get_pin("G").ll(),
height=height)
def connect_bitlines(self):
""" Connect the bitlines to the mux transistors """
# These are on metal2
bl_pin = self.get_pin("bl")
br_pin = self.get_pin("br")
bl_out_pin = self.get_pin("bl_out")
br_out_pin = self.get_pin("br_out")
# These are on metal1
nmos1_s_pin = self.nmos1.get_pin("S")
nmos1_d_pin = self.nmos1.get_pin("D")
nmos2_s_pin = self.nmos2.get_pin("S")
nmos2_d_pin = self.nmos2.get_pin("D")
# Add vias to bl, br_out, nmos2/S, nmos1/D
self.add_via_center(layers=("metal1","via1","metal2"),
offset=bl_pin.bc())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=br_out_pin.uc())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=nmos2_s_pin.center())
self.add_via_center(layers=("metal1","via1","metal2"),
offset=nmos1_d_pin.center())
# bl -> nmos2/D on metal1
# bl_out -> nmos2/S on metal2
self.add_path("metal1",[bl_pin.ll(), vector(nmos2_d_pin.cx(),bl_pin.by()), nmos2_d_pin.center()])
# halfway up, move over
mid1 = bl_out_pin.uc().scale(1,0.5)+nmos2_s_pin.bc().scale(0,0.5)
mid2 = bl_out_pin.uc().scale(0,0.5)+nmos2_s_pin.bc().scale(1,0.5)
self.add_path("metal2",[bl_out_pin.uc(), mid1, mid2, nmos2_s_pin.bc()])
# br -> nmos1/D on metal2
# br_out -> nmos1/S on metal1
self.add_path("metal1",[br_out_pin.uc(), vector(nmos1_s_pin.cx(),br_out_pin.uy()), nmos1_s_pin.center()])
# halfway up, move over
mid1 = br_pin.bc().scale(1,0.5)+nmos1_d_pin.uc().scale(0,0.5)
mid2 = br_pin.bc().scale(0,0.5)+nmos1_d_pin.uc().scale(1,0.5)
self.add_path("metal2",[br_pin.bc(), mid1, mid2, nmos1_d_pin.uc()])
def add_gnd_rail(self):
""" Add the gnd rails through the cell to connect to the bitcell array """
gnd_pins = self.bitcell.get_pins("gnd")
for gnd_pin in gnd_pins:
# only use vertical gnd pins that span the whole cell
if gnd_pin.layer == "metal2" and gnd_pin.height >= self.bitcell.height:
gnd_position = vector(gnd_pin.lx(), 0)
self.add_layout_pin(text="gnd",
layer="metal2",
offset=gnd_position,
height=self.height)
def add_wells(self):
""" Add a well and implant over the whole cell. Also, add the pwell contact (if it exists) """
# find right most gnd rail
gnd_pins = self.bitcell.get_pins("gnd")
right_gnd = None
for gnd_pin in gnd_pins:
if right_gnd == None or gnd_pin.lx()>right_gnd.lx():
right_gnd = gnd_pin
# Add to the right (first) gnd rail
m1m2_offset = right_gnd.bc() + vector(0,0.5*self.nmos.poly_height)
self.add_via_center(layers=("metal1", "via1", "metal2"),
offset=m1m2_offset)
active_offset = right_gnd.bc() + vector(0,0.5*self.nmos.poly_height)
self.add_via_center(layers=("active", "contact", "metal1"),
offset=active_offset,
implant_type="p",
well_type="p") | 0.489015 | 0.2243 |
import os
import tensorflow as tf
from keras import backend as K
from keras import metrics
from keras.callbacks import TensorBoard
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Sequential
from keras.optimizers import Adadelta
from keras.preprocessing.image import ImageDataGenerator
from data import load_noise_data
from exporter import export_model
def build_model():
model = Sequential([
# 28*28*1
# Encoder
Conv2D(
input_shape=[28, 28, 1],
filters=32,
kernel_size=(3, 3),
activation='relu',
padding='same'),
MaxPooling2D(
pool_size=(2, 2),
padding='same'),
# 14*14*32
Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu',
padding='same'),
MaxPooling2D(
pool_size=(2, 2),
padding='same'),
# 7*7*32
# Decoder
Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu',
padding='same'),
UpSampling2D(
size=(2, 2)),
# 8*8*32
Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu',
padding='same'),
UpSampling2D(
size=(2, 2)),
# 32*32*16
Conv2D(
filters=1,
kernel_size=(3, 3),
activation='sigmoid',
padding='same')
# 32*32*1
])
model.summary()
model.compile(optimizer=Adadelta(),
loss=K.binary_crossentropy,
metrics=[metrics.binary_accuracy])
return model
def train(model, x_train, y_train, x_test, y_test, epochs=50, batch_size=128):
model.fit(x=x_train, y=y_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_data=(x_test, y_test),
callbacks=[TensorBoard(
log_dir="/tmp/tensorflow/autoencoder",
write_images=True,
histogram_freq=5,
batch_size=batch_size
)])
def train_with_augmentation(model, x_train, y_train, x_test, y_test, epochs=50, batch_size=128):
gen = ImageDataGenerator(rotation_range=10, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.3)
test_gen = ImageDataGenerator()
gen.fit(x_train)
train_generator = gen.flow(x_train, y_train, batch_size=batch_size)
test_generator = test_gen.flow(x_test, y_test, batch_size=batch_size)
model.fit_generator(train_generator,
steps_per_epoch=500,
epochs=epochs,
validation_steps=50,
validation_data=test_generator,
callbacks=[TensorBoard(
log_dir="/tmp/tensorflow/autoencoder",
write_images=True,
histogram_freq=0,
batch_size=batch_size
)])
def main():
x_train, x_train_noisy, _, x_test, x_test_noisy, _ = load_noise_data()
model = build_model()
train(model, x_train_noisy, x_train, x_test_noisy, x_test, epochs=50)
if not os.path.exists('out'):
os.mkdir('out')
export_model(tf.train.Saver(), ["conv2d_1_input"], ["conv2d_5/Sigmoid"], "mnist_autoencoder")
model.save("out/autoencoder.h5")
if __name__ == '__main__':
main() | model/autoencoder.py | import os
import tensorflow as tf
from keras import backend as K
from keras import metrics
from keras.callbacks import TensorBoard
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Sequential
from keras.optimizers import Adadelta
from keras.preprocessing.image import ImageDataGenerator
from data import load_noise_data
from exporter import export_model
def build_model():
model = Sequential([
# 28*28*1
# Encoder
Conv2D(
input_shape=[28, 28, 1],
filters=32,
kernel_size=(3, 3),
activation='relu',
padding='same'),
MaxPooling2D(
pool_size=(2, 2),
padding='same'),
# 14*14*32
Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu',
padding='same'),
MaxPooling2D(
pool_size=(2, 2),
padding='same'),
# 7*7*32
# Decoder
Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu',
padding='same'),
UpSampling2D(
size=(2, 2)),
# 8*8*32
Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu',
padding='same'),
UpSampling2D(
size=(2, 2)),
# 32*32*16
Conv2D(
filters=1,
kernel_size=(3, 3),
activation='sigmoid',
padding='same')
# 32*32*1
])
model.summary()
model.compile(optimizer=Adadelta(),
loss=K.binary_crossentropy,
metrics=[metrics.binary_accuracy])
return model
def train(model, x_train, y_train, x_test, y_test, epochs=50, batch_size=128):
model.fit(x=x_train, y=y_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_data=(x_test, y_test),
callbacks=[TensorBoard(
log_dir="/tmp/tensorflow/autoencoder",
write_images=True,
histogram_freq=5,
batch_size=batch_size
)])
def train_with_augmentation(model, x_train, y_train, x_test, y_test, epochs=50, batch_size=128):
gen = ImageDataGenerator(rotation_range=10, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.3)
test_gen = ImageDataGenerator()
gen.fit(x_train)
train_generator = gen.flow(x_train, y_train, batch_size=batch_size)
test_generator = test_gen.flow(x_test, y_test, batch_size=batch_size)
model.fit_generator(train_generator,
steps_per_epoch=500,
epochs=epochs,
validation_steps=50,
validation_data=test_generator,
callbacks=[TensorBoard(
log_dir="/tmp/tensorflow/autoencoder",
write_images=True,
histogram_freq=0,
batch_size=batch_size
)])
def main():
x_train, x_train_noisy, _, x_test, x_test_noisy, _ = load_noise_data()
model = build_model()
train(model, x_train_noisy, x_train, x_test_noisy, x_test, epochs=50)
if not os.path.exists('out'):
os.mkdir('out')
export_model(tf.train.Saver(), ["conv2d_1_input"], ["conv2d_5/Sigmoid"], "mnist_autoencoder")
model.save("out/autoencoder.h5")
if __name__ == '__main__':
main() | 0.787482 | 0.505981 |
import unittest
from ie.isde import ComplexTypes, ISDEDatasetMetadata, RDFNamespaces
class ISDETools(unittest.TestCase):
_ie_marine_data__dataset_1000 = r"https://irishspatialdataexchange.blob.core.windows.net/metadata/xml/ie_marine_data__dataset_1000.xml"
_ie_nbdc_dataset_BioMar = r"http://www.isde.ie/geonetwork/srv/api/records/ie.nbdc.dataset.BioMar/formatters/xml"
_md = ISDEDatasetMetadata().from_iso(_ie_marine_data__dataset_1000)
_md2 = ISDEDatasetMetadata().from_iso(_ie_nbdc_dataset_BioMar)
def test_from_iso_dataset_title(self):
self.assertEqual(self._md.title == 'CE0613 Site Survey', True)
def test_from_iso_dataset_date(self):
self.assertEqual(self._md.date_issued == '2018-11-29', True)
def test_from_iso_dataset_identifier(self):
self.assertEqual(self._md.identifier == "ie.marine.data:dataset.1000", True)
def test_from_iso_bounding_box_north(self):
self.assertEqual(self._md2.bounding_box['north'] == 55.44532946, True)
def test_from_iso_bounding_box_south(self):
self.assertEqual(self._md2.bounding_box['south'] == 51.42459778, True)
def test_from_iso_bounding_box_west(self):
self.assertEqual(self._md2.bounding_box['west'] == -10.60604422, True)
def test_from_iso_bounding_box_east(self):
self.assertEqual(self._md2.bounding_box['east'] == -5.76884641, True)
def test_from_iso_bounding_box_to_geojson(self):
self.assertEqual(self._md2.bounding_box_to_geojson() == '{"type": "Polygon", "coordinates": [[[-10.60604422, 51.42459778], [-10.60604422, 55.44532946], [-5.76884641, 55.44532946], [-5.76884641, 51.42459778], [-10.60604422, 51.42459778]]]}', True)
def test_from_iso_bounding_box_to_wkt(self):
self.assertEqual(self._md2.bounding_box_to_wkt() == 'POLYGON ((-10.60604422 51.42459778,-10.60604422 55.44532946,-5.76884641 55.44532946,-5.76884641 51.42459778,-5.76884641 55.44532946))', True)
def test_from_iso_temporal_extent_end(self):
self.assertEqual(self._md2.temporal_extent['end'] == '1996-12-31T00:00:00', True)
def test_from_iso_temporal_extent_start(self):
self.assertEqual(self._md2.temporal_extent['start'] == '1993-01-01T00:00:00', True)
def test_rdf_namespaces_dcat_prefix(self):
self.assertEqual(RDFNamespaces.DCAT['ns'] == 'dcat', True)
def test_rdf_namespaces_dcat_url(self):
self.assertEqual(RDFNamespaces.DCAT['url'] == 'http://www.w3.org/ns/dcat#', True)
def test_complex_types_timeperiod(self):
self.assertEqual(ComplexTypes.TIMEPERIOD.value == dict(start=None, end=None), True)
if __name__ == '__main__':
unittest.main() | test.py | import unittest
from ie.isde import ComplexTypes, ISDEDatasetMetadata, RDFNamespaces
class ISDETools(unittest.TestCase):
_ie_marine_data__dataset_1000 = r"https://irishspatialdataexchange.blob.core.windows.net/metadata/xml/ie_marine_data__dataset_1000.xml"
_ie_nbdc_dataset_BioMar = r"http://www.isde.ie/geonetwork/srv/api/records/ie.nbdc.dataset.BioMar/formatters/xml"
_md = ISDEDatasetMetadata().from_iso(_ie_marine_data__dataset_1000)
_md2 = ISDEDatasetMetadata().from_iso(_ie_nbdc_dataset_BioMar)
def test_from_iso_dataset_title(self):
self.assertEqual(self._md.title == 'CE0613 Site Survey', True)
def test_from_iso_dataset_date(self):
self.assertEqual(self._md.date_issued == '2018-11-29', True)
def test_from_iso_dataset_identifier(self):
self.assertEqual(self._md.identifier == "ie.marine.data:dataset.1000", True)
def test_from_iso_bounding_box_north(self):
self.assertEqual(self._md2.bounding_box['north'] == 55.44532946, True)
def test_from_iso_bounding_box_south(self):
self.assertEqual(self._md2.bounding_box['south'] == 51.42459778, True)
def test_from_iso_bounding_box_west(self):
self.assertEqual(self._md2.bounding_box['west'] == -10.60604422, True)
def test_from_iso_bounding_box_east(self):
self.assertEqual(self._md2.bounding_box['east'] == -5.76884641, True)
def test_from_iso_bounding_box_to_geojson(self):
self.assertEqual(self._md2.bounding_box_to_geojson() == '{"type": "Polygon", "coordinates": [[[-10.60604422, 51.42459778], [-10.60604422, 55.44532946], [-5.76884641, 55.44532946], [-5.76884641, 51.42459778], [-10.60604422, 51.42459778]]]}', True)
def test_from_iso_bounding_box_to_wkt(self):
self.assertEqual(self._md2.bounding_box_to_wkt() == 'POLYGON ((-10.60604422 51.42459778,-10.60604422 55.44532946,-5.76884641 55.44532946,-5.76884641 51.42459778,-5.76884641 55.44532946))', True)
def test_from_iso_temporal_extent_end(self):
self.assertEqual(self._md2.temporal_extent['end'] == '1996-12-31T00:00:00', True)
def test_from_iso_temporal_extent_start(self):
self.assertEqual(self._md2.temporal_extent['start'] == '1993-01-01T00:00:00', True)
def test_rdf_namespaces_dcat_prefix(self):
self.assertEqual(RDFNamespaces.DCAT['ns'] == 'dcat', True)
def test_rdf_namespaces_dcat_url(self):
self.assertEqual(RDFNamespaces.DCAT['url'] == 'http://www.w3.org/ns/dcat#', True)
def test_complex_types_timeperiod(self):
self.assertEqual(ComplexTypes.TIMEPERIOD.value == dict(start=None, end=None), True)
if __name__ == '__main__':
unittest.main() | 0.645455 | 0.453746 |
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Gender',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('birth_date', models.DateField()),
('reader_id', models.CharField(max_length=10000, unique=True)),
('image_url', models.TextField(blank=True, default='/static/img/default_profile_img.png')),
('date_joined', models.DateField(default=django.utils.timezone.now)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_superuser', models.BooleanField(default=False)),
('gender', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='gender', to='users.gender')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='location', to='users.location')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
] | users/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Gender',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('birth_date', models.DateField()),
('reader_id', models.CharField(max_length=10000, unique=True)),
('image_url', models.TextField(blank=True, default='/static/img/default_profile_img.png')),
('date_joined', models.DateField(default=django.utils.timezone.now)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_superuser', models.BooleanField(default=False)),
('gender', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='gender', to='users.gender')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='location', to='users.location')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
] | 0.523908 | 0.167559 |
from .response import Response
from ..simplates import Simplate, SimplateDefaults, SimplateException
class Static(object):
"""Model a static HTTP resource.
"""
def __init__(self, website, fspath, raw, media_type):
self.website = website
self.raw = raw
self.media_type = media_type
if media_type == 'application/json':
self.media_type = self.website.media_type_json
def respond(self, context):
response = context.get('response', Response())
# XXX Perform HTTP caching here.
assert type(self.raw) is str # sanity check
response.body = self.raw
response.headers['Content-Type'] = self.media_type
if self.media_type.startswith('text/'):
charset = self.website.charset_static
if charset is None:
pass # Let the browser guess.
else:
response.charset = charset
response.headers['Content-Type'] += '; charset=' + charset
return response
class Dynamic(Simplate):
"""Model a dynamic HTTP resource using simplates.
Most defaults are in website, so make SimplateDefaults from that.
Make .website available as it has been historically.
Figure out which accept header to use.
Append a charset to text Content-Types if one is known.
"""
def __init__(self, website, fs, raw, default_media_type):
self.website = website
initial_context = { 'website': website }
defaults = SimplateDefaults(website.default_renderers_by_media_type,
website.renderer_factories,
initial_context)
super(Dynamic, self).__init__(defaults, fs, raw, default_media_type)
def respond(self, state):
accept = dispatch_accept = state['dispatch_result'].extra.get('accept')
if accept is None:
accept = state.get('accept_header')
try:
content_type, body = super(Dynamic, self).respond(accept, state)
response = state['response']
response.body = body
if 'Content-Type' not in response.headers:
if content_type.startswith('text/') and response.charset is not None:
content_type += '; charset=' + response.charset
response.headers['Content-Type'] = content_type
return response
except SimplateException as e:
# find an Accept header
if dispatch_accept is not None: # indirect negotiation
raise Response(404)
else: # direct negotiation
msg = "The following media types are available: %s."
msg %= ', '.join(e.available_types)
raise Response(406, msg.encode('US-ASCII')) | aspen/http/resource.py | from .response import Response
from ..simplates import Simplate, SimplateDefaults, SimplateException
class Static(object):
"""Model a static HTTP resource.
"""
def __init__(self, website, fspath, raw, media_type):
self.website = website
self.raw = raw
self.media_type = media_type
if media_type == 'application/json':
self.media_type = self.website.media_type_json
def respond(self, context):
response = context.get('response', Response())
# XXX Perform HTTP caching here.
assert type(self.raw) is str # sanity check
response.body = self.raw
response.headers['Content-Type'] = self.media_type
if self.media_type.startswith('text/'):
charset = self.website.charset_static
if charset is None:
pass # Let the browser guess.
else:
response.charset = charset
response.headers['Content-Type'] += '; charset=' + charset
return response
class Dynamic(Simplate):
"""Model a dynamic HTTP resource using simplates.
Most defaults are in website, so make SimplateDefaults from that.
Make .website available as it has been historically.
Figure out which accept header to use.
Append a charset to text Content-Types if one is known.
"""
def __init__(self, website, fs, raw, default_media_type):
self.website = website
initial_context = { 'website': website }
defaults = SimplateDefaults(website.default_renderers_by_media_type,
website.renderer_factories,
initial_context)
super(Dynamic, self).__init__(defaults, fs, raw, default_media_type)
def respond(self, state):
accept = dispatch_accept = state['dispatch_result'].extra.get('accept')
if accept is None:
accept = state.get('accept_header')
try:
content_type, body = super(Dynamic, self).respond(accept, state)
response = state['response']
response.body = body
if 'Content-Type' not in response.headers:
if content_type.startswith('text/') and response.charset is not None:
content_type += '; charset=' + response.charset
response.headers['Content-Type'] = content_type
return response
except SimplateException as e:
# find an Accept header
if dispatch_accept is not None: # indirect negotiation
raise Response(404)
else: # direct negotiation
msg = "The following media types are available: %s."
msg %= ', '.join(e.available_types)
raise Response(406, msg.encode('US-ASCII')) | 0.480235 | 0.113187 |
from easyai.base_name.model_name import ModelName
from easyai.base_name.backbone_name import BackboneName
from easyai.base_name.block_name import NormalizationType, ActivationType
from easyai.base_name.block_name import LayerType, BlockType
from easyai.base_name.loss_name import LossType
from easyai.loss.utility.cross_entropy2d import CrossEntropy2d
from easyai.model.base_block.utility.upsample_layer import Upsample
from easyai.model.base_block.utility.utility_block import ConvBNActivationBlock
from easyai.model.base_block.seg.pspnet_block import PyramidPooling
from easyai.model.base_block.seg.encnet_block import EncNetBlockName
from easyai.model.base_block.seg.encnet_block import JPUBlock
from easyai.model.utility.base_model import *
from easyai.model.backbone.utility.backbone_factory import BackboneFactory
class PSPNetSeg(BaseModel):
def __init__(self, data_channel=3, class_num=2):
super().__init__()
self.set_name(ModelName.PSPNetSeg)
self.data_channel = data_channel
self.class_number = class_num
self.is_jpu = True
self.bn_name = NormalizationType.BatchNormalize2d
self.activation_name = ActivationType.ReLU
self.factory = BackboneFactory()
self.create_block_list()
def create_block_list(self):
self.clear_list()
backbone = self.factory.get_base_model(BackboneName.ResNet101)
base_out_channels = backbone.get_outchannel_list()
self.add_block_list(BlockType.BaseNet, backbone, base_out_channels[-1])
if self.is_jpu:
jup = JPUBlock(layers='4,8,31,34', in_planes=(512, 1024, 2048), width=512,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(jup.get_name(), jup, 512 + 512 + 512 + 512)
scale_factor = 8
else:
scale_factor = 32
psp = PyramidPooling(2048, bn_name=self.bn_name,
activation_name=self.activation_name)
self.add_block_list(psp.get_name(), psp, 2048 * 2)
conv1 = ConvBNActivationBlock(in_channels=2048 * 2,
out_channels=512,
kernel_size=3,
padding=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(conv1.get_name(), conv1, 512)
dropout = nn.Dropout(0.1)
self.add_block_list(LayerType.Dropout, dropout, self.block_out_channels[-1])
conv2 = nn.Conv2d(512, self.class_number, 1)
self.add_block_list(LayerType.Convolutional, conv2, self.class_number)
layer = Upsample(scale_factor=scale_factor, mode='bilinear')
self.add_block_list(layer.get_name(), layer, self.block_out_channels[-1])
self.create_loss()
def create_loss(self, input_dict=None):
self.lossList = []
loss = CrossEntropy2d(ignore_index=250)
self.add_block_list(LossType.CrossEntropy2d, loss, self.block_out_channels[-1])
self.lossList.append(loss)
def forward(self, x):
base_outputs = []
layer_outputs = []
output = []
for key, block in self._modules.items():
if BlockType.BaseNet in key:
base_outputs = block(x)
x = base_outputs[-1]
elif LayerType.RouteLayer in key:
x = block(layer_outputs, base_outputs)
elif EncNetBlockName.JPUBlock in key:
x = block(layer_outputs, base_outputs)
elif LossType.CrossEntropy2d in key:
output.append(x)
else:
x = block(x)
layer_outputs.append(x)
print(key, x.shape)
return output | easyai/model/seg/pspnet_seg.py | from easyai.base_name.model_name import ModelName
from easyai.base_name.backbone_name import BackboneName
from easyai.base_name.block_name import NormalizationType, ActivationType
from easyai.base_name.block_name import LayerType, BlockType
from easyai.base_name.loss_name import LossType
from easyai.loss.utility.cross_entropy2d import CrossEntropy2d
from easyai.model.base_block.utility.upsample_layer import Upsample
from easyai.model.base_block.utility.utility_block import ConvBNActivationBlock
from easyai.model.base_block.seg.pspnet_block import PyramidPooling
from easyai.model.base_block.seg.encnet_block import EncNetBlockName
from easyai.model.base_block.seg.encnet_block import JPUBlock
from easyai.model.utility.base_model import *
from easyai.model.backbone.utility.backbone_factory import BackboneFactory
class PSPNetSeg(BaseModel):
def __init__(self, data_channel=3, class_num=2):
super().__init__()
self.set_name(ModelName.PSPNetSeg)
self.data_channel = data_channel
self.class_number = class_num
self.is_jpu = True
self.bn_name = NormalizationType.BatchNormalize2d
self.activation_name = ActivationType.ReLU
self.factory = BackboneFactory()
self.create_block_list()
def create_block_list(self):
self.clear_list()
backbone = self.factory.get_base_model(BackboneName.ResNet101)
base_out_channels = backbone.get_outchannel_list()
self.add_block_list(BlockType.BaseNet, backbone, base_out_channels[-1])
if self.is_jpu:
jup = JPUBlock(layers='4,8,31,34', in_planes=(512, 1024, 2048), width=512,
bn_name=self.bn_name, activation_name=self.activation_name)
self.add_block_list(jup.get_name(), jup, 512 + 512 + 512 + 512)
scale_factor = 8
else:
scale_factor = 32
psp = PyramidPooling(2048, bn_name=self.bn_name,
activation_name=self.activation_name)
self.add_block_list(psp.get_name(), psp, 2048 * 2)
conv1 = ConvBNActivationBlock(in_channels=2048 * 2,
out_channels=512,
kernel_size=3,
padding=1,
bias=False,
bnName=self.bn_name,
activationName=self.activation_name)
self.add_block_list(conv1.get_name(), conv1, 512)
dropout = nn.Dropout(0.1)
self.add_block_list(LayerType.Dropout, dropout, self.block_out_channels[-1])
conv2 = nn.Conv2d(512, self.class_number, 1)
self.add_block_list(LayerType.Convolutional, conv2, self.class_number)
layer = Upsample(scale_factor=scale_factor, mode='bilinear')
self.add_block_list(layer.get_name(), layer, self.block_out_channels[-1])
self.create_loss()
def create_loss(self, input_dict=None):
self.lossList = []
loss = CrossEntropy2d(ignore_index=250)
self.add_block_list(LossType.CrossEntropy2d, loss, self.block_out_channels[-1])
self.lossList.append(loss)
def forward(self, x):
base_outputs = []
layer_outputs = []
output = []
for key, block in self._modules.items():
if BlockType.BaseNet in key:
base_outputs = block(x)
x = base_outputs[-1]
elif LayerType.RouteLayer in key:
x = block(layer_outputs, base_outputs)
elif EncNetBlockName.JPUBlock in key:
x = block(layer_outputs, base_outputs)
elif LossType.CrossEntropy2d in key:
output.append(x)
else:
x = block(x)
layer_outputs.append(x)
print(key, x.shape)
return output | 0.864425 | 0.191517 |
import datetime
from dateutil.relativedelta import relativedelta
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from MESAeveryday import login_manager, app, bcrypt
from flask_login import UserMixin
from flask import flash
import os
from sqlalchemy import Column, Integer, String, create_engine, ForeignKey, DateTime, Date, or_, and_, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, backref
# db_connection uses mysql+pymysql as otherwise certain libraries that are not supported by python3 will need to be installed
# Check link to it here: https://stackoverflow.com/questions/22252397/importerror-no-module-named-mysqldb
#db_connection uses mysql+pymysql as otherwise certain libraries that are not supported by python3 will need to be installed
#check link to it here: https://stackoverflow.com/questions/22252397/importerror-no-module-named-mysqldb
# db_connection = 'mysql+pymysql://' + os.environ['MESAusername'] + ':' + os.environ['MESApassword'] + '@' + os.environ['MESAhostname'] + ':3306/' + os.environ['MESAusername']
db_connection = 'mysql+pymysql://' + os.environ['MESAusername'] + ':' + os.environ['MESApassword'] + '@' + os.environ['MESAhostname'] + ':3306/' + os.environ['MESAusername']
# Create a session with the database
engine = create_engine(db_connection, pool_recycle=3600)
Base = declarative_base(engine)
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
@login_manager.user_loader
def load_user(user_id):
"""
Function used to load a user
Used by the login manager to obtain the information of a user who is logged in
"""
try:
return session.query(User).filter(User.id==user_id).first()
except:
session.rollback()
return None
def close_session():
session.close()
#All classes here are based on a table in the database. If a change is made to the database, those changes must be reflected here as well
#Class for the "users" table
class User(Base, UserMixin):
__tablename__ = 'users'
id = Column('user_id', Integer, primary_key=True)
first_name = Column(String)
last_name = Column(String)
username = Column(String)
email = Column(String)
role = Column(String)
school_id = Column(Integer, ForeignKey("schools.school_id"))
avatar_id = Column(Integer, ForeignKey("avatars.id"))
password = Column('<PASSWORD>', String)
last_login = Column(DateTime)
school = relationship("School", foreign_keys=[school_id], lazy='subquery')
avatar = relationship("Avatar", foreign_keys=[avatar_id], lazy='subquery')
def __init__(self, username, first_name, last_name, email, password, school_id):
self.username = username
self.email = email
self.avatar_id = 1
self.password = password
self.first_name = first_name
self.last_name = last_name
self.school_id = school_id
self.role = 'user'
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
return session.query(User).filter(User.id==user_id).first()
except:
session.rollback()
return None
def get_all_username():
try:
return session.query(User.username)
except:
session.rollback()
return None
def validate_username(username):
try:
user = session.query(User).filter(User.username == username.data).first()
except:
session.rollback()
user = None
if user:
return True
else:
return False
def validate_email(email):
try:
user = session.query(User).filter(User.email == email.data).first()
except:
session.rollback()
user = None
if user:
return True
else:
return False
def add_new_user(new_user):
try:
session.add(new_user)
session.commit()
except:
session.rollback()
def get_user_by_email(email):
try:
return session.query(User).filter(User.email == email).first()
except:
session.rollback()
return None
def get_user_by_username(username):
try:
return session.query(User).filter(User.username == username).first()
except:
session.rollback()
return None
def delete_user_by_id(id):
try:
session.query(User).filter(User.id == id).delete()
session.commit()
except:
session.rollback()
return None
def reset_pwd(id, hashed_pwd):
try:
row = session.query(User).filter(User.id == id).first()
row.password = <PASSWORD>
session.commit()
except:
session.rollback()
return False
return True
def update_last_login(id, new_last_login):
try:
row = session.query(User).filter(User.id == id).first()
row.last_login = new_last_login
session.commit()
except:
session.rollback()
return False
return True
def update_name(id, new_first_name, new_last_name):
try:
row = session.query(User).filter(User.id == id).first()
row.first_name = new_first_name
row.last_name = new_last_name
session.commit()
except:
session.rollback()
return False
return True
def update_email(id, new_email):
try:
row = session.query(User).filter(User.id == id).first()
row.email = new_email
session.commit()
except:
session.rollback()
return False
return True
def update_school(id, new_school_id):
try:
row = session.query(User).filter(User.id == id).first()
row.school_id = new_school_id
session.commit()
except:
session.rollback()
return False
return True
def update_avatar(id, new_avatar_id):
try:
row = session.query(User).filter(User.id == id).first()
row.avatar_id = new_avatar_id
session.commit()
except:
session.rollback()
return False
return True
def get_badge_progress(user_id, badge_id):
try:
return session.execute("SELECT total_points, current_level, to_next_level FROM user_aggregate WHERE user_id = :user_id AND badge_id = :badge_id", {'user_id':user_id, 'badge_id':badge_id}).first()
except:
session.rollback()
return None
def get_record_holders(badge_id, top_score):
try:
return session.execute("SELECT u.first_name, u.last_name, s.school_name, ug.total_points, ug.current_level FROM user_aggregate ug JOIN users u ON ug.user_id = u.user_id JOIN schools s ON u.school_id = s.school_id WHERE ug.badge_id = :badge_id AND ug.total_points = :top_score", {'badge_id':badge_id, 'top_score':top_score})
except:
session.rollback()
return None
def get_users_by_school(school_id):
try:
return session.query(User).filter(User.school_id == school_id)
except:
session.rollback()
return None
# Added by Millen
# Checks if user had an admin role
def verify_role(id):
try:
target = session.query(User).filter(User.id == id).first()
if(target.role == "admin"):
return True
else:
return False
except:
session.rollback()
return False
def delete_innactive_accounts(years_innactive):
try:
results = session.query(User).filter(and_(User.last_login < datetime.datetime.now() - relativedelta(years=years_innactive)), (User.last_login != None)).delete()
session.commit()
return results
except:
session.rollback()
return None
#Class for the "schools" table
class School(Base):
__tablename__ = 'schools'
school_id = Column(Integer, primary_key=True)
school_name = Column(String)
district = Column(String)
city = Column(String)
state = Column(String)
zip_code = Column(String)
def __init__(self, school_name, district, city, state, zip_code):
self.school_name = school_name
self.district = district
self.city = city
self.state = state
self.zip_code = zip_code
def get_all_schools_names():
try:
# The union ensures that the "Other" will always be found at the end
results = session.query(School.school_id, School.school_name).filter(School.school_name != 'Other').order_by(School.school_name.asc())\
.union(session.query(School.school_id, School.school_name).filter(School.school_name == 'Other'))
return results
except:
session.rollback()
return None
def get_school():
try:
results=session.query(School.school_name).all()
return results
except:
session.rollback()
return None
def add_new_school(new_school):
try:
session.add(new_school)
session.commit()
except:
session.rollback()
def delete_school_by_id(id):
try:
other_school = School.get_school_by_name('Other')
users = User.get_users_by_school(id)
for user in users:
user.school_id = other_school.school_id
session.query(School).filter(School.school_id == id).delete()
session.commit()
except:
session.rollback()
return None
def get_school_by_id(id):
try:
return session.query(School.school_name).filter(School.school_id == id).first()
except:
session.rollback()
return None
def get_school_by_name(name):
try:
return session.query(School).filter(School.school_name == name).first()
except:
session.rollback()
return None
#Class for the "badges" table
class Badge(Base):
__tablename__ = 'badges'
badge_id = Column(Integer, primary_key=True)
badge_name = Column(String)
color = Column(String)
icon_id = Column(Integer, ForeignKey("badge_icons.id"))
level1_points = Column(Integer)
level2_points = Column(Integer)
level3_points = Column(Integer)
level4_points = Column(Integer)
level5_points = Column(Integer)
level6_points = Column(Integer)
level7_points = Column(Integer)
level8_points = Column(Integer)
level9_points = Column(Integer)
level10_points = Column(Integer)
icon = relationship("Icon", foreign_keys=[icon_id], lazy='subquery')
def __init__(self, badge_name, color, level1_points, level2_points, level3_points, level4_points,
level5_points, level6_points, level7_points, level8_points, level9_points, level10_points):
self.badge_name = badge_name
self.level1_points = level1_points
self.level2_points = level2_points
self.level3_points = level3_points
self.level4_points = level4_points
self.level5_points = level5_points
self.level6_points = level6_points
self.level7_points = level7_points
self.level8_points = level8_points
self.level9_points = level9_points
self.level10_points = level10_points
def get_all_badges():
try:
return session.query(Badge)
except:
session.rollback()
return None
def get_badge_by_id(badge_id):
try:
return session.query(Badge).filter(Badge.badge_id == badge_id).first()
except:
session.rollback()
return None
def get_all_badges_names():
try:
return session.query(Badge.badge_name)
except:
session.rollback()
return None
def get_all_badges_id_with_names():
try:
return session.query(Badge.badge_id, Badge.badge_name)
except:
session.rollback()
return None
def get_badge_name(badge_id):
try:
return session.query(Badge.badge_name).filter(Badge.badge_id == badge_id)
except:
session.rollback()
return None
def get_top_scores(badge_id):
try:
return session.execute("SELECT total_points FROM user_aggregate WHERE badge_id = :badge_id AND total_points != 0 GROUP BY total_points ORDER BY total_points DESC LIMIT 3", {'badge_id':badge_id})
except:
session.rollback()
return None
def update_badge_name(badge_id,new_badge_name):
try:
badge=session.query(Badge).filter(Badge.badge_id==badge_id).first()
badge.badge_name=new_badge_name
session.commit()
except:
session.rollback()
return None
def update_icon(id, new_icon_id):
try:
badge = session.query(Badge).filter(Badge.badge_id == id).first()
badge.icon_id = new_icon_id
session.commit()
return True
except:
session.rollback()
return False
def change_points(badge_id, level1_points, level2_points, level3_points, level4_points, level5_points, level6_points, level7_points, level8_points, level9_points, level10_points):
try:
badge = session.query(Badge).filter(Badge.badge_id == badge_id).first()
badge.level1_points = level1_points
badge.level2_points = level2_points
badge.level3_points = level3_points
badge.level4_points = level4_points
badge.level5_points = level5_points
badge.level6_points = level6_points
badge.level7_points = level7_points
badge.level8_points = level8_points
badge.level9_points = level9_points
badge.level10_points = level10_points
session.commit()
return True
except:
session.rollback()
return False
#Class for the "stamps" table
class Stamp(Base, UserMixin):
__tablename__ = 'stamps'
stamp_id = Column(Integer, primary_key=True)
stamp_name = Column(String)
badge_id = Column(Integer, ForeignKey("badges.badge_id"))
points = Column(Integer)
url = Column(String)
badge = relationship("Badge", foreign_keys=[badge_id], lazy='subquery')
def __init__(self, stamp_name, badge_id, points, url):
self.stamp_name = stamp_name
self.badge_id = badge_id
self.points = points
self.url = url
def get_user_stamps_of_badge(user_id, badge_id):
try:
reset_date = session.query(Reset_Date.reset_date).first().reset_date.strftime('%m-%d')
if datetime.datetime.now().strftime('%m-%d') >= reset_date:
last_reset_date = str(datetime.datetime.now().year) + '-' + str(reset_date)
else:
last_reset_date = str(datetime.datetime.now().year -1) + '-' + str(reset_date)
subquery = session.query(UserStamp.stamp_id).filter(and_(UserStamp.user_id == user_id, UserStamp.log_date >= last_reset_date))
return session.query(Stamp.stamp_id, Stamp.stamp_name).filter(Stamp.badge_id == badge_id).filter(Stamp.stamp_id.notin_(subquery))
except:
session.rollback()
return None
def get_all_stamps():
try:
return session.query(Stamp.stamp_id, Stamp.stamp_name)
except:
session.rollback()
return None
def get_stamps_of_badge(badge_id):
try:
return session.query(Stamp.stamp_id, Stamp.stamp_name).filter(Stamp.badge_id == badge_id)
except:
session.rollback()
return None
def get_unearned_stamps_of_badge(user_id, badge_id):
try:
reset_date = session.query(Reset_Date.reset_date).first().reset_date.strftime('%m-%d')
if datetime.datetime.now().strftime('%m-%d') >= reset_date:
last_reset_date = str(datetime.datetime.now().year) + '-' + str(reset_date)
else:
last_reset_date = str(datetime.datetime.now().year -1) + '-' + str(reset_date)
subquery = session.query(UserStamp.stamp_id).filter(and_(UserStamp.user_id == user_id, UserStamp.log_date >= last_reset_date))
return session.query(Stamp).filter(Stamp.badge_id == badge_id).filter(Stamp.stamp_id.notin_(subquery))
except:
session.rollback()
return None
def get_earned_stamps_of_badge(user_id, badge_id):
try:
subquery = session.query(UserStamp.stamp_id).filter(UserStamp.user_id == user_id)
return session.query(Stamp).filter(Stamp.badge_id == badge_id).filter(Stamp.stamp_id.in_(subquery))
except:
session.rollback()
return None
def add_stamp(new_stamp):
try:
session.add(new_stamp)
session.commit()
except:
session.rollback()
return None
def get_all_stampid_stampname():
try:
return session.query(Stamp.stamp_id, Stamp.stamp_name).all()
except:
session.rollback()
return None
def get_stamp_by_stamp_id(stamp_id):
try:
return session.query(Stamp.stamp_name).filter(Stamp.stamp_id == stamp_id).first()
except:
session.rollback()
return None
def get_stamp_by_name(name):
try:
return session.query(Stamp).filter(Stamp.stamp_name == name).first()
except:
session.rollback()
return None
def delete_stamp_by_id(id):
try:
session.query(Stamp).filter(Stamp.stamp_id == id).delete()
session.commit()
except:
session.rollback()
return None
def get_max_points(badge_id):
try:
return session.query(func.sum(Stamp.points).label('max_points')).filter(Stamp.badge_id == badge_id).first()
except:
session.rollback()
return None
#Class for the "user_stamps" table
class UserStamp(Base, UserMixin):
__tablename__ = 'user_stamps'
user_id = Column(Integer, ForeignKey("users.user_id"), primary_key=True)
stamp_id = Column(Integer, ForeignKey("stamps.stamp_id"), primary_key=True)
log_date = Column(DateTime, primary_key=True)
stamp_date = Column(Date)
user = relationship("User", foreign_keys=[user_id], lazy='subquery')
stamp = relationship("Stamp", foreign_keys=[stamp_id], lazy='subquery')
def __init__(self, user_id, stamp_id, log_date, stamp_date):
self.user_id = user_id
self.stamp_id = stamp_id
self.log_date = log_date
self.stamp_date = stamp_date
def earn_stamp(user_id, stamp_id, log_date, stamp_date):
new_UserStamp = UserStamp(user_id, stamp_id, log_date, stamp_date)
try:
session.add(new_UserStamp)
session.commit()
except:
session.rollback()
return False
return True
def get_earned_stamps_of_badge(user_id, badge_id):
try:
reset_date = session.query(Reset_Date.reset_date).first().reset_date.strftime('%m-%d')
if datetime.datetime.now().strftime('%m-%d') >= reset_date:
last_reset_date = str(datetime.datetime.now().year) + '-' + str(reset_date)
else:
last_reset_date = str(datetime.datetime.now().year -1) + '-' + str(reset_date)
return session.query(UserStamp.stamp_id, UserStamp.log_date, UserStamp.stamp_date, Stamp.stamp_name).filter(and_(and_(and_(UserStamp.user_id == user_id, Stamp.stamp_id == UserStamp.stamp_id), UserStamp.log_date >= last_reset_date), Stamp.badge_id == badge_id))
except:
session.rollback()
return None
def delete_stamp(user_id, stamp_id, stamp_date, log_date):
try:
# Query = UserStamp.query.filter_by(user_id == user_id, stamp_id == stamp_id, stamp_date == stamp_date, log_date == log_date).first()
Query = session.query(UserStamp).filter(UserStamp.user_id == user_id).filter(UserStamp.stamp_id == stamp_id).filter(UserStamp.stamp_date == stamp_date).filter(UserStamp.log_date == log_date).first()
if not Query:
return False
session.delete(Query)
session.commit()
except:
session.rollback()
return False
return True
#Class for the "avatars" table
class Avatar(Base):
__tablename__ = 'avatars'
id = Column(Integer, primary_key=True)
file_name = Column(String)
def __init__(self, file_name):
self.file_name = file_name
def get_all_avatars():
try:
return session.query(Avatar)
except:
session.rollback()
return None
#Class for the "badge_icons" table
class Icon(Base):
__tablename__ = 'badge_icons'
id = Column(Integer, primary_key=True)
file_name = Column(String)
def __init__(self, file_name):
self.file_name = file_name
def get_all_icons():
try:
return session.query(Icon)
except:
session.rollback()
return None
class Reset_Date(Base):
__tablename__ = 'reset_date'
reset_date = Column(Date, primary_key=True)
def get_reset_date():
try:
return session.query(Reset_Date).first()
except:
session.rollback()
return None
def change_date(new_date):
try:
date = session.query(Reset_Date).first()
date.reset_date = new_date
session.commit()
return True
except:
session.rollback()
return False | MESAeveryday/models.py | import datetime
from dateutil.relativedelta import relativedelta
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from MESAeveryday import login_manager, app, bcrypt
from flask_login import UserMixin
from flask import flash
import os
from sqlalchemy import Column, Integer, String, create_engine, ForeignKey, DateTime, Date, or_, and_, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, backref
# db_connection uses mysql+pymysql as otherwise certain libraries that are not supported by python3 will need to be installed
# Check link to it here: https://stackoverflow.com/questions/22252397/importerror-no-module-named-mysqldb
#db_connection uses mysql+pymysql as otherwise certain libraries that are not supported by python3 will need to be installed
#check link to it here: https://stackoverflow.com/questions/22252397/importerror-no-module-named-mysqldb
# db_connection = 'mysql+pymysql://' + os.environ['MESAusername'] + ':' + os.environ['MESApassword'] + '@' + os.environ['MESAhostname'] + ':3306/' + os.environ['MESAusername']
db_connection = 'mysql+pymysql://' + os.environ['MESAusername'] + ':' + os.environ['MESApassword'] + '@' + os.environ['MESAhostname'] + ':3306/' + os.environ['MESAusername']
# Create a session with the database
engine = create_engine(db_connection, pool_recycle=3600)
Base = declarative_base(engine)
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
@login_manager.user_loader
def load_user(user_id):
"""
Function used to load a user
Used by the login manager to obtain the information of a user who is logged in
"""
try:
return session.query(User).filter(User.id==user_id).first()
except:
session.rollback()
return None
def close_session():
session.close()
#All classes here are based on a table in the database. If a change is made to the database, those changes must be reflected here as well
#Class for the "users" table
class User(Base, UserMixin):
__tablename__ = 'users'
id = Column('user_id', Integer, primary_key=True)
first_name = Column(String)
last_name = Column(String)
username = Column(String)
email = Column(String)
role = Column(String)
school_id = Column(Integer, ForeignKey("schools.school_id"))
avatar_id = Column(Integer, ForeignKey("avatars.id"))
password = Column('<PASSWORD>', String)
last_login = Column(DateTime)
school = relationship("School", foreign_keys=[school_id], lazy='subquery')
avatar = relationship("Avatar", foreign_keys=[avatar_id], lazy='subquery')
def __init__(self, username, first_name, last_name, email, password, school_id):
self.username = username
self.email = email
self.avatar_id = 1
self.password = password
self.first_name = first_name
self.last_name = last_name
self.school_id = school_id
self.role = 'user'
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
return session.query(User).filter(User.id==user_id).first()
except:
session.rollback()
return None
def get_all_username():
try:
return session.query(User.username)
except:
session.rollback()
return None
def validate_username(username):
try:
user = session.query(User).filter(User.username == username.data).first()
except:
session.rollback()
user = None
if user:
return True
else:
return False
def validate_email(email):
try:
user = session.query(User).filter(User.email == email.data).first()
except:
session.rollback()
user = None
if user:
return True
else:
return False
def add_new_user(new_user):
try:
session.add(new_user)
session.commit()
except:
session.rollback()
def get_user_by_email(email):
try:
return session.query(User).filter(User.email == email).first()
except:
session.rollback()
return None
def get_user_by_username(username):
try:
return session.query(User).filter(User.username == username).first()
except:
session.rollback()
return None
def delete_user_by_id(id):
try:
session.query(User).filter(User.id == id).delete()
session.commit()
except:
session.rollback()
return None
def reset_pwd(id, hashed_pwd):
try:
row = session.query(User).filter(User.id == id).first()
row.password = <PASSWORD>
session.commit()
except:
session.rollback()
return False
return True
def update_last_login(id, new_last_login):
try:
row = session.query(User).filter(User.id == id).first()
row.last_login = new_last_login
session.commit()
except:
session.rollback()
return False
return True
def update_name(id, new_first_name, new_last_name):
try:
row = session.query(User).filter(User.id == id).first()
row.first_name = new_first_name
row.last_name = new_last_name
session.commit()
except:
session.rollback()
return False
return True
def update_email(id, new_email):
try:
row = session.query(User).filter(User.id == id).first()
row.email = new_email
session.commit()
except:
session.rollback()
return False
return True
def update_school(id, new_school_id):
try:
row = session.query(User).filter(User.id == id).first()
row.school_id = new_school_id
session.commit()
except:
session.rollback()
return False
return True
def update_avatar(id, new_avatar_id):
try:
row = session.query(User).filter(User.id == id).first()
row.avatar_id = new_avatar_id
session.commit()
except:
session.rollback()
return False
return True
def get_badge_progress(user_id, badge_id):
try:
return session.execute("SELECT total_points, current_level, to_next_level FROM user_aggregate WHERE user_id = :user_id AND badge_id = :badge_id", {'user_id':user_id, 'badge_id':badge_id}).first()
except:
session.rollback()
return None
def get_record_holders(badge_id, top_score):
try:
return session.execute("SELECT u.first_name, u.last_name, s.school_name, ug.total_points, ug.current_level FROM user_aggregate ug JOIN users u ON ug.user_id = u.user_id JOIN schools s ON u.school_id = s.school_id WHERE ug.badge_id = :badge_id AND ug.total_points = :top_score", {'badge_id':badge_id, 'top_score':top_score})
except:
session.rollback()
return None
def get_users_by_school(school_id):
try:
return session.query(User).filter(User.school_id == school_id)
except:
session.rollback()
return None
# Added by Millen
# Checks if user had an admin role
def verify_role(id):
try:
target = session.query(User).filter(User.id == id).first()
if(target.role == "admin"):
return True
else:
return False
except:
session.rollback()
return False
def delete_innactive_accounts(years_innactive):
try:
results = session.query(User).filter(and_(User.last_login < datetime.datetime.now() - relativedelta(years=years_innactive)), (User.last_login != None)).delete()
session.commit()
return results
except:
session.rollback()
return None
#Class for the "schools" table
class School(Base):
__tablename__ = 'schools'
school_id = Column(Integer, primary_key=True)
school_name = Column(String)
district = Column(String)
city = Column(String)
state = Column(String)
zip_code = Column(String)
def __init__(self, school_name, district, city, state, zip_code):
self.school_name = school_name
self.district = district
self.city = city
self.state = state
self.zip_code = zip_code
def get_all_schools_names():
try:
# The union ensures that the "Other" will always be found at the end
results = session.query(School.school_id, School.school_name).filter(School.school_name != 'Other').order_by(School.school_name.asc())\
.union(session.query(School.school_id, School.school_name).filter(School.school_name == 'Other'))
return results
except:
session.rollback()
return None
def get_school():
try:
results=session.query(School.school_name).all()
return results
except:
session.rollback()
return None
def add_new_school(new_school):
try:
session.add(new_school)
session.commit()
except:
session.rollback()
def delete_school_by_id(id):
try:
other_school = School.get_school_by_name('Other')
users = User.get_users_by_school(id)
for user in users:
user.school_id = other_school.school_id
session.query(School).filter(School.school_id == id).delete()
session.commit()
except:
session.rollback()
return None
def get_school_by_id(id):
try:
return session.query(School.school_name).filter(School.school_id == id).first()
except:
session.rollback()
return None
def get_school_by_name(name):
try:
return session.query(School).filter(School.school_name == name).first()
except:
session.rollback()
return None
#Class for the "badges" table
class Badge(Base):
__tablename__ = 'badges'
badge_id = Column(Integer, primary_key=True)
badge_name = Column(String)
color = Column(String)
icon_id = Column(Integer, ForeignKey("badge_icons.id"))
level1_points = Column(Integer)
level2_points = Column(Integer)
level3_points = Column(Integer)
level4_points = Column(Integer)
level5_points = Column(Integer)
level6_points = Column(Integer)
level7_points = Column(Integer)
level8_points = Column(Integer)
level9_points = Column(Integer)
level10_points = Column(Integer)
icon = relationship("Icon", foreign_keys=[icon_id], lazy='subquery')
def __init__(self, badge_name, color, level1_points, level2_points, level3_points, level4_points,
level5_points, level6_points, level7_points, level8_points, level9_points, level10_points):
self.badge_name = badge_name
self.level1_points = level1_points
self.level2_points = level2_points
self.level3_points = level3_points
self.level4_points = level4_points
self.level5_points = level5_points
self.level6_points = level6_points
self.level7_points = level7_points
self.level8_points = level8_points
self.level9_points = level9_points
self.level10_points = level10_points
def get_all_badges():
try:
return session.query(Badge)
except:
session.rollback()
return None
def get_badge_by_id(badge_id):
try:
return session.query(Badge).filter(Badge.badge_id == badge_id).first()
except:
session.rollback()
return None
def get_all_badges_names():
try:
return session.query(Badge.badge_name)
except:
session.rollback()
return None
def get_all_badges_id_with_names():
try:
return session.query(Badge.badge_id, Badge.badge_name)
except:
session.rollback()
return None
def get_badge_name(badge_id):
try:
return session.query(Badge.badge_name).filter(Badge.badge_id == badge_id)
except:
session.rollback()
return None
def get_top_scores(badge_id):
try:
return session.execute("SELECT total_points FROM user_aggregate WHERE badge_id = :badge_id AND total_points != 0 GROUP BY total_points ORDER BY total_points DESC LIMIT 3", {'badge_id':badge_id})
except:
session.rollback()
return None
def update_badge_name(badge_id,new_badge_name):
try:
badge=session.query(Badge).filter(Badge.badge_id==badge_id).first()
badge.badge_name=new_badge_name
session.commit()
except:
session.rollback()
return None
def update_icon(id, new_icon_id):
try:
badge = session.query(Badge).filter(Badge.badge_id == id).first()
badge.icon_id = new_icon_id
session.commit()
return True
except:
session.rollback()
return False
def change_points(badge_id, level1_points, level2_points, level3_points, level4_points, level5_points, level6_points, level7_points, level8_points, level9_points, level10_points):
try:
badge = session.query(Badge).filter(Badge.badge_id == badge_id).first()
badge.level1_points = level1_points
badge.level2_points = level2_points
badge.level3_points = level3_points
badge.level4_points = level4_points
badge.level5_points = level5_points
badge.level6_points = level6_points
badge.level7_points = level7_points
badge.level8_points = level8_points
badge.level9_points = level9_points
badge.level10_points = level10_points
session.commit()
return True
except:
session.rollback()
return False
#Class for the "stamps" table
class Stamp(Base, UserMixin):
__tablename__ = 'stamps'
stamp_id = Column(Integer, primary_key=True)
stamp_name = Column(String)
badge_id = Column(Integer, ForeignKey("badges.badge_id"))
points = Column(Integer)
url = Column(String)
badge = relationship("Badge", foreign_keys=[badge_id], lazy='subquery')
def __init__(self, stamp_name, badge_id, points, url):
self.stamp_name = stamp_name
self.badge_id = badge_id
self.points = points
self.url = url
def get_user_stamps_of_badge(user_id, badge_id):
try:
reset_date = session.query(Reset_Date.reset_date).first().reset_date.strftime('%m-%d')
if datetime.datetime.now().strftime('%m-%d') >= reset_date:
last_reset_date = str(datetime.datetime.now().year) + '-' + str(reset_date)
else:
last_reset_date = str(datetime.datetime.now().year -1) + '-' + str(reset_date)
subquery = session.query(UserStamp.stamp_id).filter(and_(UserStamp.user_id == user_id, UserStamp.log_date >= last_reset_date))
return session.query(Stamp.stamp_id, Stamp.stamp_name).filter(Stamp.badge_id == badge_id).filter(Stamp.stamp_id.notin_(subquery))
except:
session.rollback()
return None
def get_all_stamps():
try:
return session.query(Stamp.stamp_id, Stamp.stamp_name)
except:
session.rollback()
return None
def get_stamps_of_badge(badge_id):
try:
return session.query(Stamp.stamp_id, Stamp.stamp_name).filter(Stamp.badge_id == badge_id)
except:
session.rollback()
return None
def get_unearned_stamps_of_badge(user_id, badge_id):
try:
reset_date = session.query(Reset_Date.reset_date).first().reset_date.strftime('%m-%d')
if datetime.datetime.now().strftime('%m-%d') >= reset_date:
last_reset_date = str(datetime.datetime.now().year) + '-' + str(reset_date)
else:
last_reset_date = str(datetime.datetime.now().year -1) + '-' + str(reset_date)
subquery = session.query(UserStamp.stamp_id).filter(and_(UserStamp.user_id == user_id, UserStamp.log_date >= last_reset_date))
return session.query(Stamp).filter(Stamp.badge_id == badge_id).filter(Stamp.stamp_id.notin_(subquery))
except:
session.rollback()
return None
def get_earned_stamps_of_badge(user_id, badge_id):
try:
subquery = session.query(UserStamp.stamp_id).filter(UserStamp.user_id == user_id)
return session.query(Stamp).filter(Stamp.badge_id == badge_id).filter(Stamp.stamp_id.in_(subquery))
except:
session.rollback()
return None
def add_stamp(new_stamp):
try:
session.add(new_stamp)
session.commit()
except:
session.rollback()
return None
def get_all_stampid_stampname():
try:
return session.query(Stamp.stamp_id, Stamp.stamp_name).all()
except:
session.rollback()
return None
def get_stamp_by_stamp_id(stamp_id):
try:
return session.query(Stamp.stamp_name).filter(Stamp.stamp_id == stamp_id).first()
except:
session.rollback()
return None
def get_stamp_by_name(name):
try:
return session.query(Stamp).filter(Stamp.stamp_name == name).first()
except:
session.rollback()
return None
def delete_stamp_by_id(id):
try:
session.query(Stamp).filter(Stamp.stamp_id == id).delete()
session.commit()
except:
session.rollback()
return None
def get_max_points(badge_id):
try:
return session.query(func.sum(Stamp.points).label('max_points')).filter(Stamp.badge_id == badge_id).first()
except:
session.rollback()
return None
#Class for the "user_stamps" table
class UserStamp(Base, UserMixin):
__tablename__ = 'user_stamps'
user_id = Column(Integer, ForeignKey("users.user_id"), primary_key=True)
stamp_id = Column(Integer, ForeignKey("stamps.stamp_id"), primary_key=True)
log_date = Column(DateTime, primary_key=True)
stamp_date = Column(Date)
user = relationship("User", foreign_keys=[user_id], lazy='subquery')
stamp = relationship("Stamp", foreign_keys=[stamp_id], lazy='subquery')
def __init__(self, user_id, stamp_id, log_date, stamp_date):
self.user_id = user_id
self.stamp_id = stamp_id
self.log_date = log_date
self.stamp_date = stamp_date
def earn_stamp(user_id, stamp_id, log_date, stamp_date):
new_UserStamp = UserStamp(user_id, stamp_id, log_date, stamp_date)
try:
session.add(new_UserStamp)
session.commit()
except:
session.rollback()
return False
return True
def get_earned_stamps_of_badge(user_id, badge_id):
try:
reset_date = session.query(Reset_Date.reset_date).first().reset_date.strftime('%m-%d')
if datetime.datetime.now().strftime('%m-%d') >= reset_date:
last_reset_date = str(datetime.datetime.now().year) + '-' + str(reset_date)
else:
last_reset_date = str(datetime.datetime.now().year -1) + '-' + str(reset_date)
return session.query(UserStamp.stamp_id, UserStamp.log_date, UserStamp.stamp_date, Stamp.stamp_name).filter(and_(and_(and_(UserStamp.user_id == user_id, Stamp.stamp_id == UserStamp.stamp_id), UserStamp.log_date >= last_reset_date), Stamp.badge_id == badge_id))
except:
session.rollback()
return None
def delete_stamp(user_id, stamp_id, stamp_date, log_date):
try:
# Query = UserStamp.query.filter_by(user_id == user_id, stamp_id == stamp_id, stamp_date == stamp_date, log_date == log_date).first()
Query = session.query(UserStamp).filter(UserStamp.user_id == user_id).filter(UserStamp.stamp_id == stamp_id).filter(UserStamp.stamp_date == stamp_date).filter(UserStamp.log_date == log_date).first()
if not Query:
return False
session.delete(Query)
session.commit()
except:
session.rollback()
return False
return True
#Class for the "avatars" table
class Avatar(Base):
__tablename__ = 'avatars'
id = Column(Integer, primary_key=True)
file_name = Column(String)
def __init__(self, file_name):
self.file_name = file_name
def get_all_avatars():
try:
return session.query(Avatar)
except:
session.rollback()
return None
#Class for the "badge_icons" table
class Icon(Base):
__tablename__ = 'badge_icons'
id = Column(Integer, primary_key=True)
file_name = Column(String)
def __init__(self, file_name):
self.file_name = file_name
def get_all_icons():
try:
return session.query(Icon)
except:
session.rollback()
return None
class Reset_Date(Base):
__tablename__ = 'reset_date'
reset_date = Column(Date, primary_key=True)
def get_reset_date():
try:
return session.query(Reset_Date).first()
except:
session.rollback()
return None
def change_date(new_date):
try:
date = session.query(Reset_Date).first()
date.reset_date = new_date
session.commit()
return True
except:
session.rollback()
return False | 0.386416 | 0.121869 |
__author__ = 'mnowotka'
#-----------------------------------------------------------------------------------------------------------------------
from chembl_beaker.beaker import app
from bottle import request
from chembl_beaker.beaker.core_apps.conversions.impl import _ctab2smiles, _smiles2ctab, _inchi2ctab, _ctab2smarts
from chembl_beaker.beaker.core_apps.conversions.impl import _ctab2inchi, _inchi2inchiKey
from chembl_beaker.beaker.core_apps.conversions.impl import _canonicalize_smiles, _ctab2inchiKey
from chembl_beaker.beaker.core_apps.conversions.impl import _smiles2inchi, _smiles2inchiKey
from chembl_beaker.beaker.utils.io import _parseFlag
import base64
#-----------------------------------------------------------------------------------------------------------------------
def ctab2smilesView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['nameHeader'] = params.get('nameHeader', 'Name')
kwargs['includeHeader'] = _parseFlag(params.get('includeHeader', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
kwargs['kekuleSmiles'] = _parseFlag(params.get('kekuleSmiles', False))
return _ctab2smiles(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smiles/<ctab>', method=['OPTIONS', 'GET'], name="ctab2smiles")
def ctab2smiles(ctab):
"""
Converts CTAB to SMILES format. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2smiles/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_"
curl -X GET ${BEAKER_ROOT_URL}ctab2smiles/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_")?isomericSmiles=1
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat explicitHs.mol | base64 -w 0 | tr "+/" "-_")"?removeHs=0"
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2smilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smiles', method=['OPTIONS', 'POST'], name="ctab2smiles")
def ctab2smiles():
"""
Converts CTAB to SMILES format. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST -F "file=@isomeric.mol" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@isomeric.mol" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@explicitHs.mol" -F "removeHs=0" ${BEAKER_ROOT_URL}ctab2smiles
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2smilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2smartsView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
return _ctab2smarts(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smarts/<ctab>', method=['OPTIONS', 'GET'], name="ctab2smarts")
def ctab2smarts(ctab):
"""
Converts CTAB to SMARTS format. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2smarts/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_"
curl -X GET ${BEAKER_ROOT_URL}ctab2smarts/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_")?isomericSmiles=1
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat explicitHs.mol | base64 -w 0 | tr "+/" "-_")"?removeHs=0"
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2smartsView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smarts', method=['OPTIONS', 'POST'], name="ctab2smarts")
def ctab2smarts():
"""
Converts CTAB to SMILES format. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST -F "file=@isomeric.mol" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@isomeric.mol" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@explicitHs.mol" -F "removeHs=0" ${BEAKER_ROOT_URL}ctab2smarts
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2smartsView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2ctabView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', True))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2ctab(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2ctab/<smiles>', method=['OPTIONS', 'GET'], name="smiles2ctab")
def smiles2ctab(smiles):
"""
Converts SMILES to CTAB. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET "${BEAKER_ROOT_URL}smiles2ctab/"$(cat rules.smi | base64 -w 0 | tr "+/" "-_")"?computeCoords=0"
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2ctabView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2ctab', method=['OPTIONS', 'POST'], name="smiles2ctab")
def smiles2ctab():
"""
Converts SMILES to CTAB. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@rules.smi" -F "computeCoords=0" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2ctabView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2inchiView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2inchi(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchi/<smiles>', method=['OPTIONS', 'GET'], name="smiles2inchi")
def smiles2inchi(smiles):
"""
Converts SMILES to InChi. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchi', method=['OPTIONS', 'POST'], name="smiles2inchi")
def smiles2inchi():
"""
Converts SMILES to InChi. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2inchiKeyView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2inchiKey(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchiKey/<smiles>', method=['OPTIONS', 'GET'], name="smiles2inchiKey")
def smiles2inchiKey(smiles):
"""
Converts SMILES to InChi Key. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchiKey', method=['OPTIONS', 'POST'], name="smiles2inchiKey")
def smiles2inchiKey():
"""
Converts SMILES to InChi Key. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def canonicalizeSmilesView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['in_delimiter'] = params.get('in_delimiter', ' ')
kwargs['out_delimiter'] = params.get('out_delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['nameHeader'] = params.get('nameHeader', 'Name')
kwargs['includeHeader'] = _parseFlag(params.get('includeHeader', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
kwargs['kekuleSmiles'] = _parseFlag(params.get('kekuleSmiles', False))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _canonicalize_smiles(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/canonicalizeSmiles/<smiles>', method=['OPTIONS', 'GET'], name="canonicalizeSmiles")
def canonicalizeSmiles(smiles):
"""
Converts SMILES to canonical SMILES. This method accepts urlsafe_base64 encoded string containing single or multiple
SMILES optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}canonicalizeSmiles/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}canonicalizeSmiles/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")"?out_delimiter=|&nameHeader=foo"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat isomeric.smi | base64 -w 0 | tr "+/" "-_")"?isomericSmiles=1"
"""
data = base64.urlsafe_b64decode(smiles)
return canonicalizeSmilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/canonicalizeSmiles', method=['OPTIONS', 'POST'], name="canonicalizeSmiles")
def canonicalizeSmiles():
"""
Converts SMILES to canonical SMILES. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST --data-binary @aspirin_no_header.smi ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST --data-binary @aspirin_with_header.smi ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@aspirin_with_header.smi" -F "out_delimiter=|" -F "nameHeader=foo" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@isomeric.smi" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@isomeric.smi" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return canonicalizeSmilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2ctab/<inchi>', method=['OPTIONS', 'GET'], name="inchi2ctab")
def inchi2ctab(inchi):
"""
Converts InChi to CTAB. This method accepts urlsafe_base64 encoded string containing one or multiple InChis.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}inchi2ctab/$(cat aspirin.inchi | base64 -w 0 | tr "+/" "-_")tab
"""
inchis = base64.urlsafe_b64decode(inchi)
return _inchi2ctab(inchis)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2ctab', method=['OPTIONS', 'POST'], name="inchi2ctab")
def inchi2ctab():
"""
Converts InChi to CTAB. This method accepts one or multiple InChis.
cURL examples:
curl -X POST --data-binary @aspirin.inchi ${BEAKER_ROOT_URL}inchi2ctab
curl -X POST -F "file=@aspirin.inchi" ${BEAKER_ROOT_URL}inchi2ctab
"""
inchis = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return _inchi2ctab(inchis)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2inchiView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
return _ctab2inchi(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchi/<ctab>', method=['OPTIONS', 'GET'], name="ctab2inchi")
def ctab2inchi(ctab):
"""
Converts CTAB to InChis. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2inchi/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchi', method=['OPTIONS', 'POST'], name="ctab2inchi")
def ctab2inchi():
"""
Converts CTAB to InChis. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST --data-binary @aspirin.mol ${BEAKER_ROOT_URL}ctab2inchi
curl -X POST -F "file=@aspirin.mol" ${BEAKER_ROOT_URL}ctab2inchi
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2inchiKeyView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
return _ctab2inchiKey(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchiKey/<ctab>', method=['OPTIONS', 'GET'], name="ctab2inchiKey")
def ctab2inchiKey(ctab):
"""
Converts CTAB to InChi Keys. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2inchiKey/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchiKey', method=['OPTIONS', 'POST'], name="ctab2inchiKey")
def ctab2inchiKey():
"""
Converts CTAB to InChi Keys. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST --data-binary @aspirin.mol ${BEAKER_ROOT_URL}ctab2inchiKey
curl -X POST -F "file=@aspirin.mol" ${BEAKER_ROOT_URL}ctab2inchiKey
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2inchiKey/<inchi>', method=['OPTIONS', 'GET'], name="inchi2inchiKey")
def inchi2inchiKey(inchi):
"""
Converts InChis to InChiKeys. This method accepts urlsafe_base64 encoded string containing one or multiple InChis.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}inchi2inchiKey/$(cat aspirin.inchi | base64 -w 0 | tr "+/" "-_")
"""
inchis = base64.urlsafe_b64decode(inchi)
return _inchi2inchiKey(inchis)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2inchiKey', method=['OPTIONS', 'POST'], name="inchi2inchiKey")
def inchi2inchiKey():
"""
Converts InChis to InChiKeys. This method accepts one or multiple InChis.
cURL examples:
curl -X POST --data-binary @aspirin.inchi ${BEAKER_ROOT_URL}inchi2inchiKey
curl -X POST -F "file=@aspirin.inchi" ${BEAKER_ROOT_URL}inchi2inchiKey
"""
inchis = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return _inchi2inchiKey(inchis)
#----------------------------------------------------------------------------------------------------------------------- | chembl_beaker/beaker/core_apps/conversions/views.py | __author__ = 'mnowotka'
#-----------------------------------------------------------------------------------------------------------------------
from chembl_beaker.beaker import app
from bottle import request
from chembl_beaker.beaker.core_apps.conversions.impl import _ctab2smiles, _smiles2ctab, _inchi2ctab, _ctab2smarts
from chembl_beaker.beaker.core_apps.conversions.impl import _ctab2inchi, _inchi2inchiKey
from chembl_beaker.beaker.core_apps.conversions.impl import _canonicalize_smiles, _ctab2inchiKey
from chembl_beaker.beaker.core_apps.conversions.impl import _smiles2inchi, _smiles2inchiKey
from chembl_beaker.beaker.utils.io import _parseFlag
import base64
#-----------------------------------------------------------------------------------------------------------------------
def ctab2smilesView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['nameHeader'] = params.get('nameHeader', 'Name')
kwargs['includeHeader'] = _parseFlag(params.get('includeHeader', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
kwargs['kekuleSmiles'] = _parseFlag(params.get('kekuleSmiles', False))
return _ctab2smiles(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smiles/<ctab>', method=['OPTIONS', 'GET'], name="ctab2smiles")
def ctab2smiles(ctab):
"""
Converts CTAB to SMILES format. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2smiles/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_"
curl -X GET ${BEAKER_ROOT_URL}ctab2smiles/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_")?isomericSmiles=1
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smiles/"$(cat explicitHs.mol | base64 -w 0 | tr "+/" "-_")"?removeHs=0"
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2smilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smiles', method=['OPTIONS', 'POST'], name="ctab2smiles")
def ctab2smiles():
"""
Converts CTAB to SMILES format. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST -F "file=@isomeric.mol" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@isomeric.mol" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@explicitHs.mol" -F "removeHs=0" ${BEAKER_ROOT_URL}ctab2smiles
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2smilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2smartsView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
return _ctab2smarts(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smarts/<ctab>', method=['OPTIONS', 'GET'], name="ctab2smarts")
def ctab2smarts(ctab):
"""
Converts CTAB to SMARTS format. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2smarts/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_"
curl -X GET ${BEAKER_ROOT_URL}ctab2smarts/$(cat isomeric.mol | base64 -w 0 | tr "+/" "-_")?isomericSmiles=1
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat non_kekule.mol | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}ctab2smarts/"$(cat explicitHs.mol | base64 -w 0 | tr "+/" "-_")"?removeHs=0"
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2smartsView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2smarts', method=['OPTIONS', 'POST'], name="ctab2smarts")
def ctab2smarts():
"""
Converts CTAB to SMILES format. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST -F "file=@isomeric.mol" ${BEAKER_ROOT_URL}ctab2smiles
curl -X POST -F "file=@isomeric.mol" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@non_kekule.mol" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}ctab2smarts
curl -X POST -F "file=@explicitHs.mol" -F "removeHs=0" ${BEAKER_ROOT_URL}ctab2smarts
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2smartsView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2ctabView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', True))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2ctab(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2ctab/<smiles>', method=['OPTIONS', 'GET'], name="smiles2ctab")
def smiles2ctab(smiles):
"""
Converts SMILES to CTAB. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET "${BEAKER_ROOT_URL}smiles2ctab/"$(cat rules.smi | base64 -w 0 | tr "+/" "-_")"?computeCoords=0"
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2ctab/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2ctabView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2ctab', method=['OPTIONS', 'POST'], name="smiles2ctab")
def smiles2ctab():
"""
Converts SMILES to CTAB. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@rules.smi" -F "computeCoords=0" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2ctab
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2ctab
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2ctabView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2inchiView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2inchi(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchi/<smiles>', method=['OPTIONS', 'GET'], name="smiles2inchi")
def smiles2inchi(smiles):
"""
Converts SMILES to InChi. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchi/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchi', method=['OPTIONS', 'POST'], name="smiles2inchi")
def smiles2inchi():
"""
Converts SMILES to InChi. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def smiles2inchiKeyView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['delimiter'] = params.get('delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _smiles2inchiKey(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchiKey/<smiles>', method=['OPTIONS', 'GET'], name="smiles2inchiKey")
def smiles2inchiKey(smiles):
"""
Converts SMILES to InChi Key. This method accepts urlsafe_base64 encoded string containing single or multiple SMILES
optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat mcs.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}smiles2inchiKey/$(cat mcs_no_header.smi | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(smiles)
return smiles2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/smiles2inchiKey', method=['OPTIONS', 'POST'], name="smiles2inchiKey")
def smiles2inchiKey():
"""
Converts SMILES to InChi Key. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST -F "file=@aspirin_with_header.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
curl -X POST -F "file=@aspirin_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchi
curl -X POST -F "file=@mcs.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
curl -X POST -F "file=@mcs_no_header.smi" ${BEAKER_ROOT_URL}smiles2inchiKey
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return smiles2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def canonicalizeSmilesView(data, params):
kwargs = dict()
kwargs['computeCoords'] = _parseFlag(params.get('computeCoords', False))
kwargs['in_delimiter'] = params.get('in_delimiter', ' ')
kwargs['out_delimiter'] = params.get('out_delimiter', ' ')
kwargs['smilesColumn'] = int(params.get('smilesColumn', 0))
kwargs['nameColumn'] = int(params.get('nameColumn', 1))
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['nameHeader'] = params.get('nameHeader', 'Name')
kwargs['includeHeader'] = _parseFlag(params.get('includeHeader', True))
kwargs['isomericSmiles'] = _parseFlag(params.get('isomericSmiles', False))
kwargs['kekuleSmiles'] = _parseFlag(params.get('kekuleSmiles', False))
if params.get('titleLine') is None and not data.startswith('SMILES Name'):
kwargs['titleLine'] = False
else:
kwargs['titleLine'] = _parseFlag(params.get('titleLine', True))
return _canonicalize_smiles(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/canonicalizeSmiles/<smiles>', method=['OPTIONS', 'GET'], name="canonicalizeSmiles")
def canonicalizeSmiles(smiles):
"""
Converts SMILES to canonical SMILES. This method accepts urlsafe_base64 encoded string containing single or multiple
SMILES optionally containing header line, specific to *.smi format.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}canonicalizeSmiles/$(cat aspirin_no_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}canonicalizeSmiles/$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat aspirin_with_header.smi | base64 -w 0 | tr "+/" "-_")"?out_delimiter=|&nameHeader=foo"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=0"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=0&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat non_kekule.smi | base64 -w 0 | tr "+/" "-_")"?kekuleSmiles=1&sanitize=1"
curl -X GET "${BEAKER_ROOT_URL}canonicalizeSmiles/"$(cat isomeric.smi | base64 -w 0 | tr "+/" "-_")"?isomericSmiles=1"
"""
data = base64.urlsafe_b64decode(smiles)
return canonicalizeSmilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/canonicalizeSmiles', method=['OPTIONS', 'POST'], name="canonicalizeSmiles")
def canonicalizeSmiles():
"""
Converts SMILES to canonical SMILES. This method accepts single or multiple SMILES or *.smi file.
cURL examples:
curl -X POST --data-binary @aspirin_no_header.smi ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST --data-binary @aspirin_with_header.smi ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@aspirin_with_header.smi" -F "out_delimiter=|" -F "nameHeader=foo" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=0" -F "sanitize=0" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=0" -F "sanitize=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@non_kekule.smi" -F "kekuleSmiles=1" -F "sanitize=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@isomeric.smi" ${BEAKER_ROOT_URL}canonicalizeSmiles
curl -X POST -F "file=@isomeric.smi" -F "isomericSmiles=1" ${BEAKER_ROOT_URL}canonicalizeSmiles
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return canonicalizeSmilesView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2ctab/<inchi>', method=['OPTIONS', 'GET'], name="inchi2ctab")
def inchi2ctab(inchi):
"""
Converts InChi to CTAB. This method accepts urlsafe_base64 encoded string containing one or multiple InChis.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}inchi2ctab/$(cat aspirin.inchi | base64 -w 0 | tr "+/" "-_")tab
"""
inchis = base64.urlsafe_b64decode(inchi)
return _inchi2ctab(inchis)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2ctab', method=['OPTIONS', 'POST'], name="inchi2ctab")
def inchi2ctab():
"""
Converts InChi to CTAB. This method accepts one or multiple InChis.
cURL examples:
curl -X POST --data-binary @aspirin.inchi ${BEAKER_ROOT_URL}inchi2ctab
curl -X POST -F "file=@aspirin.inchi" ${BEAKER_ROOT_URL}inchi2ctab
"""
inchis = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return _inchi2ctab(inchis)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2inchiView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
return _ctab2inchi(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchi/<ctab>', method=['OPTIONS', 'GET'], name="ctab2inchi")
def ctab2inchi(ctab):
"""
Converts CTAB to InChis. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2inchi/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchi', method=['OPTIONS', 'POST'], name="ctab2inchi")
def ctab2inchi():
"""
Converts CTAB to InChis. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST --data-binary @aspirin.mol ${BEAKER_ROOT_URL}ctab2inchi
curl -X POST -F "file=@aspirin.mol" ${BEAKER_ROOT_URL}ctab2inchi
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2inchiView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
def ctab2inchiKeyView(data, params):
kwargs = dict()
kwargs['sanitize'] = _parseFlag(params.get('sanitize', True))
kwargs['removeHs'] = _parseFlag(params.get('removeHs', True))
kwargs['strictParsing'] = _parseFlag(params.get('strictParsing', True))
return _ctab2inchiKey(data, **kwargs)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchiKey/<ctab>', method=['OPTIONS', 'GET'], name="ctab2inchiKey")
def ctab2inchiKey(ctab):
"""
Converts CTAB to InChi Keys. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation
of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}ctab2inchiKey/$(cat aspirin.mol | base64 -w 0 | tr "+/" "-_")
"""
data = base64.urlsafe_b64decode(ctab)
return ctab2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/ctab2inchiKey', method=['OPTIONS', 'POST'], name="ctab2inchiKey")
def ctab2inchiKey():
"""
Converts CTAB to InChi Keys. CTAB is either single molfile or SDF file.
cURL examples:
curl -X POST --data-binary @aspirin.mol ${BEAKER_ROOT_URL}ctab2inchiKey
curl -X POST -F "file=@aspirin.mol" ${BEAKER_ROOT_URL}ctab2inchiKey
"""
data = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return ctab2inchiKeyView(data, request.params)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2inchiKey/<inchi>', method=['OPTIONS', 'GET'], name="inchi2inchiKey")
def inchi2inchiKey(inchi):
"""
Converts InChis to InChiKeys. This method accepts urlsafe_base64 encoded string containing one or multiple InChis.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}inchi2inchiKey/$(cat aspirin.inchi | base64 -w 0 | tr "+/" "-_")
"""
inchis = base64.urlsafe_b64decode(inchi)
return _inchi2inchiKey(inchis)
#-----------------------------------------------------------------------------------------------------------------------
@app.route('/inchi2inchiKey', method=['OPTIONS', 'POST'], name="inchi2inchiKey")
def inchi2inchiKey():
"""
Converts InChis to InChiKeys. This method accepts one or multiple InChis.
cURL examples:
curl -X POST --data-binary @aspirin.inchi ${BEAKER_ROOT_URL}inchi2inchiKey
curl -X POST -F "file=@aspirin.inchi" ${BEAKER_ROOT_URL}inchi2inchiKey
"""
inchis = request.files.values()[0].file.read() if len(request.files) else request.body.read()
return _inchi2inchiKey(inchis)
#----------------------------------------------------------------------------------------------------------------------- | 0.47098 | 0.104249 |
import os
from unittest import TestCase
import pandas as pd
import plotly.graph_objects as go
from pandas.testing import assert_frame_equal
from moonstone.parsers.metadata import MetadataParser, YAMLBasedMetadataParser
class TestMetadataParser(TestCase):
def setUp(self):
self.metadata_file = os.path.join(
os.path.dirname(__file__), "data/metadata/metadata.tsv"
)
self.metadata_file_no_header = os.path.join(
os.path.dirname(__file__), "data/metadata/metadata_noheader.tsv"
)
self.metadata_file_dirty = os.path.join(
os.path.dirname(__file__), "data/metadata/dirty_metadata.tsv"
)
def test_parse_file(self):
expected_df = pd.DataFrame(
{"col_2": [13.3, 15.3, 19.1], "col_3": ["M", "F", "M"]}
)
expected_df.index = pd.Index(["s1", "s2", "s3"], name="col_1")
parser = MetadataParser(self.metadata_file, index_col="col_1")
assert_frame_equal(parser.dataframe, expected_df)
def test_get_stats_headers(self):
expected_list = [
{
"col_name": "col_2",
"col_type": "float64",
"python_col_type": "float",
"n_values": 3,
"n_uniq_values": 3,
"mean": 15.9,
"values_repartition": {13.3: 1, 15.3: 1, 19.1: 1},
},
{
"col_name": "col_3",
"col_type": "object",
"python_col_type": "str",
"n_values": 3,
"n_uniq_values": 2,
"values_repartition": {"M": 2, "F": 1},
},
]
parser = MetadataParser(self.metadata_file, index_col="col_1")
self.assertListEqual(parser.get_stats(), expected_list)
def test_get_stats_no_header(self):
expected_list = [
{
"col_name": 1,
"col_type": "float64",
"python_col_type": "float",
"n_values": 3,
"n_uniq_values": 3,
"mean": 15.9,
"values_repartition": {13.3: 1, 15.3: 1, 19.1: 1},
},
{
"col_name": 2,
"col_type": "object",
"python_col_type": "str",
"n_values": 3,
"n_uniq_values": 2,
"values_repartition": {"M": 2, "F": 1},
},
]
parser = MetadataParser(
self.metadata_file_no_header, no_header=True, index_col=0
)
self.assertListEqual(parser.get_stats(), expected_list)
def test_parse_file_force_dtype(self):
expected_df = pd.DataFrame(
{"col_2": ["13.3", "15.3", "19.1"], "col_3": ["M", "F", "M"]}
)
expected_df.index = pd.Index(["s1", "s2", "s3"], name="col_1")
parsing_options = {"dtype": {"col_2": "object"}}
parser = MetadataParser(
self.metadata_file, parsing_options=parsing_options, index_col="col_1"
)
assert_frame_equal(parser.dataframe, expected_df)
def test_parse_dirty_metadata_and_clean(self):
expected_df = pd.DataFrame(
{
"age": [29, 48, 36, 25],
}
)
expected_df.index = pd.Index(["s1", "s2", "s3", "s4"], name="sample")
cleaning_operations = {
"samples": [("to_slug", {}), ("rename", {"new_name": "sample"})]
}
parser = MetadataParser(
self.metadata_file_dirty,
sep=",",
cleaning_operations=cleaning_operations,
index_col="sample",
)
assert_frame_equal(parser.dataframe, expected_df)
def test_get_dimensions(self):
categories = ["col_3"]
parser = MetadataParser(self.metadata_file, index_col="col_1")
output_dimensions = parser._get_dimensions(categories)
self.assertEqual(len(output_dimensions), 1)
for dim in output_dimensions:
self.assertTrue(isinstance(dim, go.parcats.Dimension))
def test_get_color(self):
color_by = "col_3"
index = pd.Series(("s1", "s2", "s3"), name="col_1")
expected_series = pd.Series([1, 2, 1], index=index, name=color_by)
parser = MetadataParser(self.metadata_file, index_col="col_1")
pd.testing.assert_series_equal(parser._get_color(color_by), expected_series)
class MockedYAMLBasedMetadataParser(YAMLBasedMetadataParser):
"""
Mocked to skip __init__ and test only private methods of the class
"""
def __init__(self):
pass
class TestYAMLBasedMetadataParser(TestCase):
def setUp(self):
# For unit tests
self.parsing_config = [
{"col_name": "col_1", "dtype": "object"},
{"col_name": "col_2", "operations": [{"name": "to_slug"}]},
{
"col_name": "col_3",
"operations": [{"name": "rename", "options": {"new_name": "new"}}],
},
]
def test_extract_parsing_options(self):
expected_dict = {"dtype": {"col_1": "object"}}
parser = MockedYAMLBasedMetadataParser()
self.assertDictEqual(
parser._extract_parsing_options(self.parsing_config), expected_dict
)
def test_extract_cleaning_operations(self):
expected_dict = {
"col_2": [("to_slug", {})],
"col_3": [("rename", {"new_name": "new"})],
}
parser = MockedYAMLBasedMetadataParser()
self.assertDictEqual(
parser._extract_cleaning_operations(self.parsing_config), expected_dict
)
def test_parse_yaml_config(self):
config_file = os.path.join(
os.path.dirname(__file__), "data/metadata/config.yaml"
)
expected_parsing_options = {"dtype": {"age": "object"}}
expected_cleaning_operations = {
"samples": [("to_slug", {}), ("rename", {"new_name": "sample"})],
}
parser = MockedYAMLBasedMetadataParser()
parser._parse_yaml_config(config_file)
self.assertDictEqual(parser.parsing_options, expected_parsing_options)
self.assertDictEqual(parser.cleaning_operations, expected_cleaning_operations)
def test_parse_end_to_end(self):
metadata_file_dirty = os.path.join(
os.path.dirname(__file__), "data/metadata/dirty_metadata.tsv"
)
config_file = os.path.join(
os.path.dirname(__file__), "data/metadata/config.yaml"
)
parser = YAMLBasedMetadataParser(
metadata_file_dirty, config_file, sep=",", index_col="sample"
)
expected_df = pd.DataFrame(
{
"age": ["29", "48", "36", "25"],
}
)
expected_df.index = pd.Index(["s1", "s2", "s3", "s4"], name="sample")
pd.testing.assert_frame_equal(parser.metadata_parser.dataframe, expected_df) | tests/parsers/test_metadata.py | import os
from unittest import TestCase
import pandas as pd
import plotly.graph_objects as go
from pandas.testing import assert_frame_equal
from moonstone.parsers.metadata import MetadataParser, YAMLBasedMetadataParser
class TestMetadataParser(TestCase):
def setUp(self):
self.metadata_file = os.path.join(
os.path.dirname(__file__), "data/metadata/metadata.tsv"
)
self.metadata_file_no_header = os.path.join(
os.path.dirname(__file__), "data/metadata/metadata_noheader.tsv"
)
self.metadata_file_dirty = os.path.join(
os.path.dirname(__file__), "data/metadata/dirty_metadata.tsv"
)
def test_parse_file(self):
expected_df = pd.DataFrame(
{"col_2": [13.3, 15.3, 19.1], "col_3": ["M", "F", "M"]}
)
expected_df.index = pd.Index(["s1", "s2", "s3"], name="col_1")
parser = MetadataParser(self.metadata_file, index_col="col_1")
assert_frame_equal(parser.dataframe, expected_df)
def test_get_stats_headers(self):
expected_list = [
{
"col_name": "col_2",
"col_type": "float64",
"python_col_type": "float",
"n_values": 3,
"n_uniq_values": 3,
"mean": 15.9,
"values_repartition": {13.3: 1, 15.3: 1, 19.1: 1},
},
{
"col_name": "col_3",
"col_type": "object",
"python_col_type": "str",
"n_values": 3,
"n_uniq_values": 2,
"values_repartition": {"M": 2, "F": 1},
},
]
parser = MetadataParser(self.metadata_file, index_col="col_1")
self.assertListEqual(parser.get_stats(), expected_list)
def test_get_stats_no_header(self):
expected_list = [
{
"col_name": 1,
"col_type": "float64",
"python_col_type": "float",
"n_values": 3,
"n_uniq_values": 3,
"mean": 15.9,
"values_repartition": {13.3: 1, 15.3: 1, 19.1: 1},
},
{
"col_name": 2,
"col_type": "object",
"python_col_type": "str",
"n_values": 3,
"n_uniq_values": 2,
"values_repartition": {"M": 2, "F": 1},
},
]
parser = MetadataParser(
self.metadata_file_no_header, no_header=True, index_col=0
)
self.assertListEqual(parser.get_stats(), expected_list)
def test_parse_file_force_dtype(self):
expected_df = pd.DataFrame(
{"col_2": ["13.3", "15.3", "19.1"], "col_3": ["M", "F", "M"]}
)
expected_df.index = pd.Index(["s1", "s2", "s3"], name="col_1")
parsing_options = {"dtype": {"col_2": "object"}}
parser = MetadataParser(
self.metadata_file, parsing_options=parsing_options, index_col="col_1"
)
assert_frame_equal(parser.dataframe, expected_df)
def test_parse_dirty_metadata_and_clean(self):
expected_df = pd.DataFrame(
{
"age": [29, 48, 36, 25],
}
)
expected_df.index = pd.Index(["s1", "s2", "s3", "s4"], name="sample")
cleaning_operations = {
"samples": [("to_slug", {}), ("rename", {"new_name": "sample"})]
}
parser = MetadataParser(
self.metadata_file_dirty,
sep=",",
cleaning_operations=cleaning_operations,
index_col="sample",
)
assert_frame_equal(parser.dataframe, expected_df)
def test_get_dimensions(self):
categories = ["col_3"]
parser = MetadataParser(self.metadata_file, index_col="col_1")
output_dimensions = parser._get_dimensions(categories)
self.assertEqual(len(output_dimensions), 1)
for dim in output_dimensions:
self.assertTrue(isinstance(dim, go.parcats.Dimension))
def test_get_color(self):
color_by = "col_3"
index = pd.Series(("s1", "s2", "s3"), name="col_1")
expected_series = pd.Series([1, 2, 1], index=index, name=color_by)
parser = MetadataParser(self.metadata_file, index_col="col_1")
pd.testing.assert_series_equal(parser._get_color(color_by), expected_series)
class MockedYAMLBasedMetadataParser(YAMLBasedMetadataParser):
"""
Mocked to skip __init__ and test only private methods of the class
"""
def __init__(self):
pass
class TestYAMLBasedMetadataParser(TestCase):
def setUp(self):
# For unit tests
self.parsing_config = [
{"col_name": "col_1", "dtype": "object"},
{"col_name": "col_2", "operations": [{"name": "to_slug"}]},
{
"col_name": "col_3",
"operations": [{"name": "rename", "options": {"new_name": "new"}}],
},
]
def test_extract_parsing_options(self):
expected_dict = {"dtype": {"col_1": "object"}}
parser = MockedYAMLBasedMetadataParser()
self.assertDictEqual(
parser._extract_parsing_options(self.parsing_config), expected_dict
)
def test_extract_cleaning_operations(self):
expected_dict = {
"col_2": [("to_slug", {})],
"col_3": [("rename", {"new_name": "new"})],
}
parser = MockedYAMLBasedMetadataParser()
self.assertDictEqual(
parser._extract_cleaning_operations(self.parsing_config), expected_dict
)
def test_parse_yaml_config(self):
config_file = os.path.join(
os.path.dirname(__file__), "data/metadata/config.yaml"
)
expected_parsing_options = {"dtype": {"age": "object"}}
expected_cleaning_operations = {
"samples": [("to_slug", {}), ("rename", {"new_name": "sample"})],
}
parser = MockedYAMLBasedMetadataParser()
parser._parse_yaml_config(config_file)
self.assertDictEqual(parser.parsing_options, expected_parsing_options)
self.assertDictEqual(parser.cleaning_operations, expected_cleaning_operations)
def test_parse_end_to_end(self):
metadata_file_dirty = os.path.join(
os.path.dirname(__file__), "data/metadata/dirty_metadata.tsv"
)
config_file = os.path.join(
os.path.dirname(__file__), "data/metadata/config.yaml"
)
parser = YAMLBasedMetadataParser(
metadata_file_dirty, config_file, sep=",", index_col="sample"
)
expected_df = pd.DataFrame(
{
"age": ["29", "48", "36", "25"],
}
)
expected_df.index = pd.Index(["s1", "s2", "s3", "s4"], name="sample")
pd.testing.assert_frame_equal(parser.metadata_parser.dataframe, expected_df) | 0.605099 | 0.439326 |
from urllib import parse
from celery import shared_task, states
from celery.canvas import group
from django.conf import settings
from django.db import transaction
from extras.tasks import CurrentUserTaskMixin
from registry.models import CatalougeService, WebFeatureService, WebMapService
from registry.models.metadata import (DatasetMetadata,
WebFeatureServiceRemoteMetadata,
WebMapServiceRemoteMetadata)
from registry.models.security import (WebFeatureServiceAuthentication,
WebMapServiceAuthentication)
from registry.xmlmapper.ogc.capabilities import CswService as CswXmlMapper
from registry.xmlmapper.ogc.capabilities import Wfs200Service as WfsXmlMapper
from registry.xmlmapper.ogc.capabilities import WmsService as WmsXmlMapper
from registry.xmlmapper.ogc.capabilities import get_parsed_service
from requests import Request, Session
from rest_framework.reverse import reverse
@shared_task(bind=True,
base=CurrentUserTaskMixin)
def build_ogc_service(self, get_capabilities_url: str, collect_metadata_records: bool, service_auth_pk: None, **kwargs):
self.update_state(state=states.STARTED, meta={
'done': 0, 'total': 3, 'phase': 'download capabilities document...'})
auth = None
if service_auth_pk:
match parse.parse_qs(parse.urlsplit(get_capabilities_url).query)['SERVICE'][0].lower():
case 'wms':
auth = WebMapServiceAuthentication.objects.get(
id=service_auth_pk)
case 'wfs':
auth = WebFeatureServiceAuthentication.objects.get(
id=service_auth_pk)
case _:
auth = None
session = Session()
session.proxies = settings.PROXIES
request = Request(method="GET",
url=get_capabilities_url,
auth=auth.get_auth_for_request() if auth else None)
response = session.send(request.prepare())
self.update_state(state=states.STARTED, meta={
'done': 1, 'total': 3, 'phase': 'parse capabilities document...'})
parsed_service = get_parsed_service(xml=response.content)
self.update_state(state=states.STARTED, meta={
'done': 2, 'total': 3, 'phase': 'persisting service...'})
with transaction.atomic():
# create all needed database objects and rollback if any error occours to avoid from database inconsistence
# FIXME: pass the current user
if isinstance(parsed_service, WmsXmlMapper):
db_service = WebMapService.capabilities.create_from_parsed_service(
parsed_service=parsed_service)
resource_name = "WebMapService"
self_url = reverse(
viewname='registry:wms-detail', args=[db_service.pk])
elif isinstance(parsed_service, WfsXmlMapper):
db_service = WebFeatureService.capabilities.create_from_parsed_service(
parsed_service=parsed_service)
resource_name = "WebFeatureService"
self_url = reverse(
viewname='registry:wfs-detail', args=[db_service.pk])
elif isinstance(parsed_service, CswXmlMapper):
db_service = CatalougeService.capabilities.create_from_parsed_service(
parsed_service=parsed_service)
resource_name = "CatalougeService"
# FIXME: no csw modelviewset
self_url = reverse(
viewname='registry:csw-detail', args=[db_service.pk])
else:
raise NotImplementedError(
"Unknown XML mapper detected. Only WMS, WFS and CSW services are allowed.")
if auth:
auth.service = db_service
auth.save()
self.update_state(state=states.SUCCESS, meta={'done': 3, 'total': 3})
# TODO: use correct Serializer and render the json:api as result
return_dict = {
"data": {
"type": resource_name,
"id": str(db_service.pk),
"links": {
"self": self_url
}
}
}
if collect_metadata_records:
remote_metadata_list = None
if isinstance(db_service, WebMapService):
remote_metadata_list = WebMapServiceRemoteMetadata.objects.filter(
service__pk=db_service.pk)
elif isinstance(db_service, WebFeatureService):
remote_metadata_list = WebFeatureServiceRemoteMetadata.objects.filter(
service__pk=db_service.pk)
if remote_metadata_list:
job = group([fetch_remote_metadata_xml.s(remote_metadata.pk, db_service.__class__.__name__, **kwargs)
for remote_metadata in remote_metadata_list])
group_result = job.apply_async()
group_result.save()
data = return_dict["data"]
data.update({
"meta": {
"collect_metadata_records_job_id": str(group_result.id)
}
})
return return_dict
@shared_task(bind=True,
base=CurrentUserTaskMixin,
queue="download_iso_metadata")
def fetch_remote_metadata_xml(self, remote_metadata_id, class_name, **kwargs):
self.update_state(state=states.STARTED, meta={
'done': 0, 'total': 1, 'phase': 'fetching remote document...'})
remote_metadata = None
if class_name == 'WebMapService':
remote_metadata = WebMapServiceRemoteMetadata.objects.get(
pk=remote_metadata_id)
elif class_name == 'WebFeatureService':
remote_metadata = WebFeatureServiceRemoteMetadata.objects.get(
pk=remote_metadata_id)
if not remote_metadata:
return None
try:
remote_metadata.fetch_remote_content()
self.update_state(state=states.STARTED, meta={'done': 1, 'total': 2})
metadata_record = remote_metadata.create_metadata_instance()
self.update_state(state=states.STARTED, meta={'done': 2, 'total': 2})
return {
"data": {
"type": "DatasetMetadata" if isinstance(metadata_record, DatasetMetadata) else "ServiceMetadata",
"id": f"{metadata_record.pk}",
"links": {
"self": f"{reverse(viewname='registry:datasetmetadata-detail', args=[metadata_record.pk])}"
}
}
}
except Exception as e:
settings.ROOT_LOGGER.exception(e, stack_info=True, exc_info=True)
return None | backend/registry/tasks/service.py | from urllib import parse
from celery import shared_task, states
from celery.canvas import group
from django.conf import settings
from django.db import transaction
from extras.tasks import CurrentUserTaskMixin
from registry.models import CatalougeService, WebFeatureService, WebMapService
from registry.models.metadata import (DatasetMetadata,
WebFeatureServiceRemoteMetadata,
WebMapServiceRemoteMetadata)
from registry.models.security import (WebFeatureServiceAuthentication,
WebMapServiceAuthentication)
from registry.xmlmapper.ogc.capabilities import CswService as CswXmlMapper
from registry.xmlmapper.ogc.capabilities import Wfs200Service as WfsXmlMapper
from registry.xmlmapper.ogc.capabilities import WmsService as WmsXmlMapper
from registry.xmlmapper.ogc.capabilities import get_parsed_service
from requests import Request, Session
from rest_framework.reverse import reverse
@shared_task(bind=True,
base=CurrentUserTaskMixin)
def build_ogc_service(self, get_capabilities_url: str, collect_metadata_records: bool, service_auth_pk: None, **kwargs):
self.update_state(state=states.STARTED, meta={
'done': 0, 'total': 3, 'phase': 'download capabilities document...'})
auth = None
if service_auth_pk:
match parse.parse_qs(parse.urlsplit(get_capabilities_url).query)['SERVICE'][0].lower():
case 'wms':
auth = WebMapServiceAuthentication.objects.get(
id=service_auth_pk)
case 'wfs':
auth = WebFeatureServiceAuthentication.objects.get(
id=service_auth_pk)
case _:
auth = None
session = Session()
session.proxies = settings.PROXIES
request = Request(method="GET",
url=get_capabilities_url,
auth=auth.get_auth_for_request() if auth else None)
response = session.send(request.prepare())
self.update_state(state=states.STARTED, meta={
'done': 1, 'total': 3, 'phase': 'parse capabilities document...'})
parsed_service = get_parsed_service(xml=response.content)
self.update_state(state=states.STARTED, meta={
'done': 2, 'total': 3, 'phase': 'persisting service...'})
with transaction.atomic():
# create all needed database objects and rollback if any error occours to avoid from database inconsistence
# FIXME: pass the current user
if isinstance(parsed_service, WmsXmlMapper):
db_service = WebMapService.capabilities.create_from_parsed_service(
parsed_service=parsed_service)
resource_name = "WebMapService"
self_url = reverse(
viewname='registry:wms-detail', args=[db_service.pk])
elif isinstance(parsed_service, WfsXmlMapper):
db_service = WebFeatureService.capabilities.create_from_parsed_service(
parsed_service=parsed_service)
resource_name = "WebFeatureService"
self_url = reverse(
viewname='registry:wfs-detail', args=[db_service.pk])
elif isinstance(parsed_service, CswXmlMapper):
db_service = CatalougeService.capabilities.create_from_parsed_service(
parsed_service=parsed_service)
resource_name = "CatalougeService"
# FIXME: no csw modelviewset
self_url = reverse(
viewname='registry:csw-detail', args=[db_service.pk])
else:
raise NotImplementedError(
"Unknown XML mapper detected. Only WMS, WFS and CSW services are allowed.")
if auth:
auth.service = db_service
auth.save()
self.update_state(state=states.SUCCESS, meta={'done': 3, 'total': 3})
# TODO: use correct Serializer and render the json:api as result
return_dict = {
"data": {
"type": resource_name,
"id": str(db_service.pk),
"links": {
"self": self_url
}
}
}
if collect_metadata_records:
remote_metadata_list = None
if isinstance(db_service, WebMapService):
remote_metadata_list = WebMapServiceRemoteMetadata.objects.filter(
service__pk=db_service.pk)
elif isinstance(db_service, WebFeatureService):
remote_metadata_list = WebFeatureServiceRemoteMetadata.objects.filter(
service__pk=db_service.pk)
if remote_metadata_list:
job = group([fetch_remote_metadata_xml.s(remote_metadata.pk, db_service.__class__.__name__, **kwargs)
for remote_metadata in remote_metadata_list])
group_result = job.apply_async()
group_result.save()
data = return_dict["data"]
data.update({
"meta": {
"collect_metadata_records_job_id": str(group_result.id)
}
})
return return_dict
@shared_task(bind=True,
base=CurrentUserTaskMixin,
queue="download_iso_metadata")
def fetch_remote_metadata_xml(self, remote_metadata_id, class_name, **kwargs):
self.update_state(state=states.STARTED, meta={
'done': 0, 'total': 1, 'phase': 'fetching remote document...'})
remote_metadata = None
if class_name == 'WebMapService':
remote_metadata = WebMapServiceRemoteMetadata.objects.get(
pk=remote_metadata_id)
elif class_name == 'WebFeatureService':
remote_metadata = WebFeatureServiceRemoteMetadata.objects.get(
pk=remote_metadata_id)
if not remote_metadata:
return None
try:
remote_metadata.fetch_remote_content()
self.update_state(state=states.STARTED, meta={'done': 1, 'total': 2})
metadata_record = remote_metadata.create_metadata_instance()
self.update_state(state=states.STARTED, meta={'done': 2, 'total': 2})
return {
"data": {
"type": "DatasetMetadata" if isinstance(metadata_record, DatasetMetadata) else "ServiceMetadata",
"id": f"{metadata_record.pk}",
"links": {
"self": f"{reverse(viewname='registry:datasetmetadata-detail', args=[metadata_record.pk])}"
}
}
}
except Exception as e:
settings.ROOT_LOGGER.exception(e, stack_info=True, exc_info=True)
return None | 0.34632 | 0.086362 |
import os
import numpy as np
import cobra
from enzyme import enzyme
from warnings import filterwarnings
class TestFBAModel:
def setup_class(self):
modelPath='../data/external/yeast_7.6/yeast_7.6.xml'
filterwarnings('ignore', 'charge of s_[0-9][0-9][0-9][0-9] is not a number ()')
filterwarnings('ignore', 'uppercase AND/OR found in rule ')
self.model = cobra.io.read_sbml_model(modelPath)
modelDir = '../models/yeast_7.6'
self.genes = {g.id: enzyme(g.id) for g in self.model.genes}
for g in self.model.genes:
self.genes[g.id].reactionRules = [r.gene_reaction_rule for r in g.reactions]
with open(os.path.join(modelDir, 'gene_loss_costs.tsv'), 'r') as f:
lines = f.readlines()
minimalMedia = [tuple(m.split(' AND ')) for m in lines[0].strip().split('\t')[1:]]
for line in lines[1:]:
self.genes[line.split('\t')[0]].geneLossCosts = np.array([float(i.strip())
for i in line.split('\t')[1:]])
with open(os.path.join(modelDir, 'function_loss_costs.tsv'), 'r') as f:
for line in f.readlines()[1:]:
self.genes[line.split('\t')[0]].functionLossCosts = np.array([float(i.strip())
for i in line.split('\t')[1:]])
def test_gene_to_reaction_rules_sensible(self):
rules = [r.gene_reaction_rule for r in self.model.reactions]
allRules = ''.join(rules)
allRules = allRules.replace(' ', '')
allRules = allRules.replace('and', '')
allRules = allRules.replace('or', '')
allRules = allRules.replace('(', '')
allRules = allRules.replace(')', '')
for geneName in sorted([g.id for g in self.model.genes], key=len, reverse=True):
allRules = allRules.replace(geneName, '')
assert allRules == '', 'gene reaction rules should contain only |gene names|and|or|()|'
def test_each_gene_has_at_least_one_gene_to_reaction_rule(self):
assert all([len(g.reactionRules) > 0 for g in self.genes.values()])
def test_function_loss_equals_gene_loss_in_simple_cases(self):
assert all([g.old_and_new_costs_identical() for g in self.genes.values() if
g.is_simple_single_function()])
def test_isoenzymes_not_simple_single_function(self):
assert len([g for g in self.genes.values() if g.is_isoenzyme()
and g.is_simple_single_function()]) == 0
def test_isoenzyme_pairs_with_only_one_reaction_have_symmetric_costs(self):
def genes_in_rule(rule):
"""Given a reaction rule, return a list of genes.
Args:
rule (str): the reaction rule.
Returns:
list(str): the genes.
"""
genes = set(rule.replace('and', '').replace('or', '').replace('(', '').replace(')', '').split())
if len(genes) == 0:
raise UserWarning('ERROR: no genes found in reaction rule.')
return genes
isozymesInOneReaction = {gene.name: gene for gene in self.genes.values()
if gene.is_isoenzyme() and gene.number_reactions() == 1}
isoSimplePairs = set()
for g in isozymesInOneReaction.values():
genesInRule = genes_in_rule(g.reactionRules[0])
if len(genesInRule) == 2:
if all([i in isozymesInOneReaction for i in genesInRule]):
isoSimplePairs.add(tuple(sorted(genesInRule)))
nIsoSimplePairs = len(isoSimplePairs)
assert nIsoSimplePairs > 10, 'Should be at least a few simple pairs of isoenzymes.'
countOldEqual, countNewEqual, countOldZero = 0, 0, 0
for i, j in isoSimplePairs:
if np.array_equal(self.genes[i].geneLossCosts, self.genes[j].geneLossCosts):
countOldEqual += 1
if np.array_equal(self.genes[i].functionLossCosts, self.genes[j].functionLossCosts):
countNewEqual += 1
if np.array_equal(self.genes[i].geneLossCosts, np.zeros(self.genes[i].geneLossCosts.shape)):
countOldZero += 1
if np.all(np.isclose(self.genes[j].geneLossCosts, np.zeros(self.genes[j].geneLossCosts.shape), atol = 1e-5)):
countOldZero += 1
assert countOldEqual == nIsoSimplePairs
assert countNewEqual == nIsoSimplePairs
assert countOldZero == nIsoSimplePairs | flux_balance_analysis/test_code.py | import os
import numpy as np
import cobra
from enzyme import enzyme
from warnings import filterwarnings
class TestFBAModel:
def setup_class(self):
modelPath='../data/external/yeast_7.6/yeast_7.6.xml'
filterwarnings('ignore', 'charge of s_[0-9][0-9][0-9][0-9] is not a number ()')
filterwarnings('ignore', 'uppercase AND/OR found in rule ')
self.model = cobra.io.read_sbml_model(modelPath)
modelDir = '../models/yeast_7.6'
self.genes = {g.id: enzyme(g.id) for g in self.model.genes}
for g in self.model.genes:
self.genes[g.id].reactionRules = [r.gene_reaction_rule for r in g.reactions]
with open(os.path.join(modelDir, 'gene_loss_costs.tsv'), 'r') as f:
lines = f.readlines()
minimalMedia = [tuple(m.split(' AND ')) for m in lines[0].strip().split('\t')[1:]]
for line in lines[1:]:
self.genes[line.split('\t')[0]].geneLossCosts = np.array([float(i.strip())
for i in line.split('\t')[1:]])
with open(os.path.join(modelDir, 'function_loss_costs.tsv'), 'r') as f:
for line in f.readlines()[1:]:
self.genes[line.split('\t')[0]].functionLossCosts = np.array([float(i.strip())
for i in line.split('\t')[1:]])
def test_gene_to_reaction_rules_sensible(self):
rules = [r.gene_reaction_rule for r in self.model.reactions]
allRules = ''.join(rules)
allRules = allRules.replace(' ', '')
allRules = allRules.replace('and', '')
allRules = allRules.replace('or', '')
allRules = allRules.replace('(', '')
allRules = allRules.replace(')', '')
for geneName in sorted([g.id for g in self.model.genes], key=len, reverse=True):
allRules = allRules.replace(geneName, '')
assert allRules == '', 'gene reaction rules should contain only |gene names|and|or|()|'
def test_each_gene_has_at_least_one_gene_to_reaction_rule(self):
assert all([len(g.reactionRules) > 0 for g in self.genes.values()])
def test_function_loss_equals_gene_loss_in_simple_cases(self):
assert all([g.old_and_new_costs_identical() for g in self.genes.values() if
g.is_simple_single_function()])
def test_isoenzymes_not_simple_single_function(self):
assert len([g for g in self.genes.values() if g.is_isoenzyme()
and g.is_simple_single_function()]) == 0
def test_isoenzyme_pairs_with_only_one_reaction_have_symmetric_costs(self):
def genes_in_rule(rule):
"""Given a reaction rule, return a list of genes.
Args:
rule (str): the reaction rule.
Returns:
list(str): the genes.
"""
genes = set(rule.replace('and', '').replace('or', '').replace('(', '').replace(')', '').split())
if len(genes) == 0:
raise UserWarning('ERROR: no genes found in reaction rule.')
return genes
isozymesInOneReaction = {gene.name: gene for gene in self.genes.values()
if gene.is_isoenzyme() and gene.number_reactions() == 1}
isoSimplePairs = set()
for g in isozymesInOneReaction.values():
genesInRule = genes_in_rule(g.reactionRules[0])
if len(genesInRule) == 2:
if all([i in isozymesInOneReaction for i in genesInRule]):
isoSimplePairs.add(tuple(sorted(genesInRule)))
nIsoSimplePairs = len(isoSimplePairs)
assert nIsoSimplePairs > 10, 'Should be at least a few simple pairs of isoenzymes.'
countOldEqual, countNewEqual, countOldZero = 0, 0, 0
for i, j in isoSimplePairs:
if np.array_equal(self.genes[i].geneLossCosts, self.genes[j].geneLossCosts):
countOldEqual += 1
if np.array_equal(self.genes[i].functionLossCosts, self.genes[j].functionLossCosts):
countNewEqual += 1
if np.array_equal(self.genes[i].geneLossCosts, np.zeros(self.genes[i].geneLossCosts.shape)):
countOldZero += 1
if np.all(np.isclose(self.genes[j].geneLossCosts, np.zeros(self.genes[j].geneLossCosts.shape), atol = 1e-5)):
countOldZero += 1
assert countOldEqual == nIsoSimplePairs
assert countNewEqual == nIsoSimplePairs
assert countOldZero == nIsoSimplePairs | 0.505615 | 0.423518 |
import sys
import regex as re
__author__ = '<NAME>'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
class MultiRegex(object):
simple = False
regexes = ()
def __init__(self):
try:
self._rx = re.compile('|'.join(self.regexes), flags=re.IGNORECASE)
except:
for r in self.regexes:
try:
re.compile(r)
except:
print('Error in regex: {}'.format(str(r)))
def sub(self, s):
if not s or s is None: return ''
return self._rx.sub(self._sub, s)
def _sub(self, mo):
try:
for k, v in mo.groupdict().items():
if v:
if k == 'AllElse':
return ''
if 'UUU' in str(k):
return bytes(str(k).replace('UUU', '\\' + 'u'), 'ascii').decode('unicode-escape')
try:
sub = getattr(self, k)
if callable(sub):
return sub(mo)
else:
return sub
except:
return str(k)
except:
print('\nError MR: {0}\n'.format(str(sys.exc_info())))
class Abbreviations(MultiRegex):
simple = True
regexes = (
r'(?P<January>^jan(uary)?\.*$)',
r'(?P<February>^feb(ruary)?\.*$)',
r'(?P<March>^m(ar|rz)(ch)?\.*$)',
r'(?P<April>^apr(il)?\.*$)',
r'(?P<June>^june?\.*$)',
r'(?P<July>^july?\.*$)',
r'(?P<August>^aug(ust)?\.*$)',
r'(?P<September>^sept?(ember)?\.*$)',
r'(?P<October>^o[ck]t(ober)?\.*$)',
r'(?P<November>^nov(ember)?\.*$)',
r'(?P<December>^de[cz](ember)?\.*$)',
r'(?P<Monday>^mon(day)?s?\.*$)',
r'(?P<Tuesday>^tues?(day)?s?\.*$)',
r'(?P<Wednesday>^wed(ne)?s?(day)?s?\.*$)',
r'(?P<Thursday>^thur?s?(day)?s?\.*$)',
r'(?P<Friday>^fri(day)?s?\.*$)',
r'(?P<Saturday>^sat(urday)?s?\.*$)',
r'(?P<Sunday>^sun(day)?s?\.*$)',
r'(?P<Abbildung>^abb(ildung)?\.*$)', # German, illustration, figure
r'(?P<Abdruck>^abdr(uck)?\.*$)', # German, impression, print, reproduction
r'(?P<Abhandlung>^abh(andlung)?\.*$)', # German, treatise
r'(?P<AbkUUU00FCrzung>^abk(.rzung)?\.*$)', # German, abbreviation
r'(?P<Abschrift>^abschr(ift)?\.*$)', # German, reprint, copy
r'(?P<Abteilung>^abt(eilung)?\.*$)', # German
r'(?P<approximately>^(ca|approx)\.*$)',
r'(?P<Auflage>^aufl(age)?\.*$)', # German, edition
r'(?P<Ausgabe>^ausg(abe)?\.*$)', # German, edition
r'(?P<augmented>^aug(mented)\.*$)',
r'(?P<BUUU00E4ndchen>^b(aen)?dche?n\.*$)', # German
r'(?P<BUUU00E4nde>^b(ae?n)?de\.*$)', # German
r'(?P<Band>^b(an)?d\.*$)', # German, volume
r'(?P<Bearbeitung>^bearb(eitung)?\.*$)', # German, arrangement
r'(?P<Beiheft>^beih(eft)?\.*$)', # German, supplement
r'(?P<Beispiel>^beisp(iel)?\.*$)', # German, example
r'(?P<beziehungsweise>^be?z(iehungs)?w(eise)?\.*$)', # German, respectively; or, or else; more specifically
r'(?P<bibliography>^bibl(iog)?(raphy)?\.*$)',
r'(?P<books>^bo*ks\.*$)',
r'(?P<book>^bo*k\.*$)',
r'(?P<Buchhandler>^buchh(andler)?\.*$)', # German, bookseller
r'(?P<CDs>^cd-?(rom)?s\.*$)',
r'(?P<CD>^cd-?(rom)?\.*$)',
r'(?P<chiefly>^chiefle*y\.*$)',
r'(?P<cm>^cm\.*$)',
r'(?P<coloured>^colo+u?red\.*$)',
r'(?P<colour>^col(o+u?r|eur)?\.*$)',
r'(?P<columns>^col(umn)?s\.*$)',
r'(?P<corrected>^corr(ected)?\.*$)',
r'(?P<cover>^couv(erture)?\.*$)',
r'(?P<deel>^de*l\.*$)', # Dutch
r'(?P<Department>^dept\.*$)',
r'(?P<diagrams>^diagra?m?s*\.*$)',
r'(?P<dopolnennoe>^dop(ol)?(nennoe)?\.*$)', # Russian
r'(?P<DVDs>^dvd-?(rom)?s\.*$)',
r'(?P<DVD>^dvd-?(rom)?\.*$)',
r'(?P<UUU00E9dition>^[\u00e9\u00C9]d(ition)?\.*$)', # édition
r'(?P<edition>^ed(itio)?n?\.*$)',
r'(?P<Einleitung>^einl(eitung)?\.*$)', # German, introduction
r'(?P<ekdosi>^ekd(osi)?\.*$)', # Greek
r'(?P<engraved>^engr(aved)?\.*$)',
r'(?P<enlarged>^enl(arged)?\.*$)',
r'(?P<erweiterte>^erw(eit)?(erte)?\.*$)', # German
r'(?P<fascicule>^fasc(icule)?\.*$)', # French
r'(?P<facsimiles>^fa(cs|sc)(im)?(ile)?s\.*$)',
r'(?P<facsimile>^fa(cs|sc)(im)?(ile)?\.*$)',
r'(?P<feet>^f[e]*t\.*$)',
r'(?P<figures>^fig(ures)?s*\.*$)',
r'(?P<folded>^(ofld|fold(ed)?)\.*$)',
r'(?P<folio>^fol[io.]*\.*$)',
r'(?P<folios>^fol[io.]*s\.*$)',
r'(?P<frames>^fr(ame)?s*\.*$)',
r'(?P<frontispiece>^front(\.|is)(piece)?\.*$)',
r'(?P<gedruckt>^gedr(uckt)?\.*$)', # German, printed
r'(?P<Gegenwart>^gegenw(art)?\.*$)', # German, present time
r'(?P<genealogical>^geneal(ogical)?\.*$)',
r'(?P<geological>^geol(og)?(ical)?\.*$)',
r'(?P<garren>^g(arre)?n\.*$)', # Basque, nth
r'(?P<Handbuch>^h(an)?db(uch)?\.*$)', # German, handbook, manual
r'(?P<hardback>^h(ard)?b(ac)?k\.*$)',
r'(?P<Hefte>^he*fte\.*$)', # German
r'(?P<Heft>^he*ft\.*$)', # German
r'(?P<Herausgeber>^he?r(au)?sg(eber)?\.*$)', # German, editor
r'(?P<illustrations>^a?il+u?s?(tration.*)?s?\.*$)',
r'(?P<impression>^impr(ession)?\.*$)',
r'(?P<including>^incl?(uding)?\.*$)',
r'(?P<introduction>^introd(uction)?\.*$)',
r'(?P<ispravlennoe>^ispr(avl)?(ennoe)?\.*$)', # Russian
r'(?P<izdaniye>^izd(aniye)?\.*$)', # Russian
r'(?P<Jahreszahl>^j(ahres)?z(ah)?l\.*$)', # German, date, year
r'(?P<jaargang>^jaarg(ang)?\.*$)', # Dutch
r'(?P<Jahrgang>^jahrg(ang)?\.*$)', # German
r'(?P<Jahrhundert>^j(ahr)?h(undert)?\.*$)', # German, century
r'(?P<knjiga>^knj(iga)?\.*$)', # Croatian
r'(?P<mahadurah>^mahad(urah)?\.*$)', # Hebrew
r'(?P<manuscript>^m(ss*|anuscripts?)\.*$)',
r'(?P<microfiche>^micr[io]-*fiches*\.*$)',
r'(?P<microfilm>^micr[io]-*film*\.*$)',
r'(?P<minutes>^min(ute)?s\.*$)',
r'(?P<Mitarbeiter>^mitarb(eiter)?\.*$)', # German, collaborator
r'(?P<Mitwirkung>^mitw(irkung)?\.*$)', # German, cooperation
r'(?P<mm>^mm\.*$)',
r'(?P<music>^mus(ic)?\.*$)',
r'(?P<Nachricht>^nachr(icht)?\.*$)', # German, communication, report, notice
r'(?P<Nachwort>^nachw(ort)?\.*$)', # German, concluding remarks, epilogue
r'(?P<nakladateUUU0142stvUUU00ed>^nakl(ad)?(ate)?\.*$)', # Czech, nakladatełství
r'(?P<Neudruck>^neudr(uck)?\.*$)', # German, reprint
r'(?P<nouvelle>^nouv(elle)?\.*$)', # French
r'(?P<numbers>^n-*(o|ro?|um+b?ero?)s*\.*$)',
r'(?P<oblong>^obl(ong)?\.*$)',
r'(?P<Originalausgabe>^Originalausg(abe)?\.*$)', # German
r'(?P<pages>^pp+(age)?s*\.*$)',
r'(?P<paperback>^p(aper)?b(ac)?k\.*$)',
r'(?P<parts>^p(ar)?t\.*$)',
r'(?P<patippu>^pat(ippu)?\.*$)', # Russian
r'(?P<plates>^pl(at)?e?s*\.*$)',
r'(?P<poprawione>^popr(awione)?\.*$)', # Polish, corrected
r'(?P<portraits>^portr?(ait)?s*\.*$)',
r'(?P<reprinted>^re-*pr(int)?(ed)?\.*$)',
r'(?P<revised>^rev(ised)?\.*$)',
r'(?P<Sammelwerk>^s(ammel)?w(er)?k\.*$)', # German, collected works
r'(?P<Sammlung>^samml(ung)?\.*$)', # German, collection, compilation, set
r'(?P<Schriftleiter>^schriftl(eiter)?\.*$)', # German, editor
r'(?P<selfUUU002Dportraits>^self-?portr?(ait)?s*\.*$)',
r'(?P<series>^ser(ies)?\.*$)',
r'(?P<sheet>^sh\.*$)',
r'(?P<stereograph>^stereo-?graph\.*$)',
r'(?P<sound>^s(oun)?d\.*$)',
r'(?P<Stimmbuch>^st(imm)?b(uch)?\.*$)', # German, part book
r'(?P<supplement>^suppl?(ement)?\.*$)',
r'(?P<svazek>^sv(azek)?\.*$)', # Czech
r'(?P<tomes>^tome?s*\.*$)',
r'(?P<undUUU0020soUUU0020weiter>^u(nd)?\s*so?\s*w(eiter)?\.*$)', # German, and so forth, etc.
r'(?P<unnumbered>^un-?numbered\.*$)',
r'(?P<updated>^upd(ated)?\.*$)',
r'(?P<uzupeUUU0142nione>^uzup(elnione)?\.*$)', # Polish, uzupełnione
r'(?P<Verfasser>^verf(asser)?\.*$)', # German, composer, writer
r'(?P<vergleich>^vergl(eich)?\.*$)', # German, compare
r'(?P<Verzeichnis>^verz(eichnis)?\.*$)', # German, catalogue
r'(?P<videodisc>^video-*disc\.*$)',
r'(?P<volumes>^vol?(ume)?s*\.*$)',
r'(?P<Vorwort>^vorw(ort)?\.*$)', # German, foreword
r'(?P<vydUUU00E1nUUU00ED>^vyd(ani)?\.*$)', # Czech, vydání
r'(?P<vypusk>^vyp(usk)?\.*$)', # Russian
r'(?P<wydanie>^wyd(anie)?\.*$)', # Polish
r'(?P<years>^y(ea)?rs\.*$)',
r'(?P<year>^y(ea)?r\.*$)',
r'(?P<Zeitschrift>^z(ei)?tschr(ift)?\.*$)', # German, periodical
r'(?P<Zeitung>^z(ei)?t(un)?g\.*$)', # German, newspaper
r'(?P<zeszyt>^zesz(yt)?\.*$)', # Polish
r'(?P<zvezek>^zv(ezek)?\.*$)', # Slovenian, volumes
) | nielsenTools/multiregex.py | import sys
import regex as re
__author__ = '<NAME>'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
class MultiRegex(object):
simple = False
regexes = ()
def __init__(self):
try:
self._rx = re.compile('|'.join(self.regexes), flags=re.IGNORECASE)
except:
for r in self.regexes:
try:
re.compile(r)
except:
print('Error in regex: {}'.format(str(r)))
def sub(self, s):
if not s or s is None: return ''
return self._rx.sub(self._sub, s)
def _sub(self, mo):
try:
for k, v in mo.groupdict().items():
if v:
if k == 'AllElse':
return ''
if 'UUU' in str(k):
return bytes(str(k).replace('UUU', '\\' + 'u'), 'ascii').decode('unicode-escape')
try:
sub = getattr(self, k)
if callable(sub):
return sub(mo)
else:
return sub
except:
return str(k)
except:
print('\nError MR: {0}\n'.format(str(sys.exc_info())))
class Abbreviations(MultiRegex):
simple = True
regexes = (
r'(?P<January>^jan(uary)?\.*$)',
r'(?P<February>^feb(ruary)?\.*$)',
r'(?P<March>^m(ar|rz)(ch)?\.*$)',
r'(?P<April>^apr(il)?\.*$)',
r'(?P<June>^june?\.*$)',
r'(?P<July>^july?\.*$)',
r'(?P<August>^aug(ust)?\.*$)',
r'(?P<September>^sept?(ember)?\.*$)',
r'(?P<October>^o[ck]t(ober)?\.*$)',
r'(?P<November>^nov(ember)?\.*$)',
r'(?P<December>^de[cz](ember)?\.*$)',
r'(?P<Monday>^mon(day)?s?\.*$)',
r'(?P<Tuesday>^tues?(day)?s?\.*$)',
r'(?P<Wednesday>^wed(ne)?s?(day)?s?\.*$)',
r'(?P<Thursday>^thur?s?(day)?s?\.*$)',
r'(?P<Friday>^fri(day)?s?\.*$)',
r'(?P<Saturday>^sat(urday)?s?\.*$)',
r'(?P<Sunday>^sun(day)?s?\.*$)',
r'(?P<Abbildung>^abb(ildung)?\.*$)', # German, illustration, figure
r'(?P<Abdruck>^abdr(uck)?\.*$)', # German, impression, print, reproduction
r'(?P<Abhandlung>^abh(andlung)?\.*$)', # German, treatise
r'(?P<AbkUUU00FCrzung>^abk(.rzung)?\.*$)', # German, abbreviation
r'(?P<Abschrift>^abschr(ift)?\.*$)', # German, reprint, copy
r'(?P<Abteilung>^abt(eilung)?\.*$)', # German
r'(?P<approximately>^(ca|approx)\.*$)',
r'(?P<Auflage>^aufl(age)?\.*$)', # German, edition
r'(?P<Ausgabe>^ausg(abe)?\.*$)', # German, edition
r'(?P<augmented>^aug(mented)\.*$)',
r'(?P<BUUU00E4ndchen>^b(aen)?dche?n\.*$)', # German
r'(?P<BUUU00E4nde>^b(ae?n)?de\.*$)', # German
r'(?P<Band>^b(an)?d\.*$)', # German, volume
r'(?P<Bearbeitung>^bearb(eitung)?\.*$)', # German, arrangement
r'(?P<Beiheft>^beih(eft)?\.*$)', # German, supplement
r'(?P<Beispiel>^beisp(iel)?\.*$)', # German, example
r'(?P<beziehungsweise>^be?z(iehungs)?w(eise)?\.*$)', # German, respectively; or, or else; more specifically
r'(?P<bibliography>^bibl(iog)?(raphy)?\.*$)',
r'(?P<books>^bo*ks\.*$)',
r'(?P<book>^bo*k\.*$)',
r'(?P<Buchhandler>^buchh(andler)?\.*$)', # German, bookseller
r'(?P<CDs>^cd-?(rom)?s\.*$)',
r'(?P<CD>^cd-?(rom)?\.*$)',
r'(?P<chiefly>^chiefle*y\.*$)',
r'(?P<cm>^cm\.*$)',
r'(?P<coloured>^colo+u?red\.*$)',
r'(?P<colour>^col(o+u?r|eur)?\.*$)',
r'(?P<columns>^col(umn)?s\.*$)',
r'(?P<corrected>^corr(ected)?\.*$)',
r'(?P<cover>^couv(erture)?\.*$)',
r'(?P<deel>^de*l\.*$)', # Dutch
r'(?P<Department>^dept\.*$)',
r'(?P<diagrams>^diagra?m?s*\.*$)',
r'(?P<dopolnennoe>^dop(ol)?(nennoe)?\.*$)', # Russian
r'(?P<DVDs>^dvd-?(rom)?s\.*$)',
r'(?P<DVD>^dvd-?(rom)?\.*$)',
r'(?P<UUU00E9dition>^[\u00e9\u00C9]d(ition)?\.*$)', # édition
r'(?P<edition>^ed(itio)?n?\.*$)',
r'(?P<Einleitung>^einl(eitung)?\.*$)', # German, introduction
r'(?P<ekdosi>^ekd(osi)?\.*$)', # Greek
r'(?P<engraved>^engr(aved)?\.*$)',
r'(?P<enlarged>^enl(arged)?\.*$)',
r'(?P<erweiterte>^erw(eit)?(erte)?\.*$)', # German
r'(?P<fascicule>^fasc(icule)?\.*$)', # French
r'(?P<facsimiles>^fa(cs|sc)(im)?(ile)?s\.*$)',
r'(?P<facsimile>^fa(cs|sc)(im)?(ile)?\.*$)',
r'(?P<feet>^f[e]*t\.*$)',
r'(?P<figures>^fig(ures)?s*\.*$)',
r'(?P<folded>^(ofld|fold(ed)?)\.*$)',
r'(?P<folio>^fol[io.]*\.*$)',
r'(?P<folios>^fol[io.]*s\.*$)',
r'(?P<frames>^fr(ame)?s*\.*$)',
r'(?P<frontispiece>^front(\.|is)(piece)?\.*$)',
r'(?P<gedruckt>^gedr(uckt)?\.*$)', # German, printed
r'(?P<Gegenwart>^gegenw(art)?\.*$)', # German, present time
r'(?P<genealogical>^geneal(ogical)?\.*$)',
r'(?P<geological>^geol(og)?(ical)?\.*$)',
r'(?P<garren>^g(arre)?n\.*$)', # Basque, nth
r'(?P<Handbuch>^h(an)?db(uch)?\.*$)', # German, handbook, manual
r'(?P<hardback>^h(ard)?b(ac)?k\.*$)',
r'(?P<Hefte>^he*fte\.*$)', # German
r'(?P<Heft>^he*ft\.*$)', # German
r'(?P<Herausgeber>^he?r(au)?sg(eber)?\.*$)', # German, editor
r'(?P<illustrations>^a?il+u?s?(tration.*)?s?\.*$)',
r'(?P<impression>^impr(ession)?\.*$)',
r'(?P<including>^incl?(uding)?\.*$)',
r'(?P<introduction>^introd(uction)?\.*$)',
r'(?P<ispravlennoe>^ispr(avl)?(ennoe)?\.*$)', # Russian
r'(?P<izdaniye>^izd(aniye)?\.*$)', # Russian
r'(?P<Jahreszahl>^j(ahres)?z(ah)?l\.*$)', # German, date, year
r'(?P<jaargang>^jaarg(ang)?\.*$)', # Dutch
r'(?P<Jahrgang>^jahrg(ang)?\.*$)', # German
r'(?P<Jahrhundert>^j(ahr)?h(undert)?\.*$)', # German, century
r'(?P<knjiga>^knj(iga)?\.*$)', # Croatian
r'(?P<mahadurah>^mahad(urah)?\.*$)', # Hebrew
r'(?P<manuscript>^m(ss*|anuscripts?)\.*$)',
r'(?P<microfiche>^micr[io]-*fiches*\.*$)',
r'(?P<microfilm>^micr[io]-*film*\.*$)',
r'(?P<minutes>^min(ute)?s\.*$)',
r'(?P<Mitarbeiter>^mitarb(eiter)?\.*$)', # German, collaborator
r'(?P<Mitwirkung>^mitw(irkung)?\.*$)', # German, cooperation
r'(?P<mm>^mm\.*$)',
r'(?P<music>^mus(ic)?\.*$)',
r'(?P<Nachricht>^nachr(icht)?\.*$)', # German, communication, report, notice
r'(?P<Nachwort>^nachw(ort)?\.*$)', # German, concluding remarks, epilogue
r'(?P<nakladateUUU0142stvUUU00ed>^nakl(ad)?(ate)?\.*$)', # Czech, nakladatełství
r'(?P<Neudruck>^neudr(uck)?\.*$)', # German, reprint
r'(?P<nouvelle>^nouv(elle)?\.*$)', # French
r'(?P<numbers>^n-*(o|ro?|um+b?ero?)s*\.*$)',
r'(?P<oblong>^obl(ong)?\.*$)',
r'(?P<Originalausgabe>^Originalausg(abe)?\.*$)', # German
r'(?P<pages>^pp+(age)?s*\.*$)',
r'(?P<paperback>^p(aper)?b(ac)?k\.*$)',
r'(?P<parts>^p(ar)?t\.*$)',
r'(?P<patippu>^pat(ippu)?\.*$)', # Russian
r'(?P<plates>^pl(at)?e?s*\.*$)',
r'(?P<poprawione>^popr(awione)?\.*$)', # Polish, corrected
r'(?P<portraits>^portr?(ait)?s*\.*$)',
r'(?P<reprinted>^re-*pr(int)?(ed)?\.*$)',
r'(?P<revised>^rev(ised)?\.*$)',
r'(?P<Sammelwerk>^s(ammel)?w(er)?k\.*$)', # German, collected works
r'(?P<Sammlung>^samml(ung)?\.*$)', # German, collection, compilation, set
r'(?P<Schriftleiter>^schriftl(eiter)?\.*$)', # German, editor
r'(?P<selfUUU002Dportraits>^self-?portr?(ait)?s*\.*$)',
r'(?P<series>^ser(ies)?\.*$)',
r'(?P<sheet>^sh\.*$)',
r'(?P<stereograph>^stereo-?graph\.*$)',
r'(?P<sound>^s(oun)?d\.*$)',
r'(?P<Stimmbuch>^st(imm)?b(uch)?\.*$)', # German, part book
r'(?P<supplement>^suppl?(ement)?\.*$)',
r'(?P<svazek>^sv(azek)?\.*$)', # Czech
r'(?P<tomes>^tome?s*\.*$)',
r'(?P<undUUU0020soUUU0020weiter>^u(nd)?\s*so?\s*w(eiter)?\.*$)', # German, and so forth, etc.
r'(?P<unnumbered>^un-?numbered\.*$)',
r'(?P<updated>^upd(ated)?\.*$)',
r'(?P<uzupeUUU0142nione>^uzup(elnione)?\.*$)', # Polish, uzupełnione
r'(?P<Verfasser>^verf(asser)?\.*$)', # German, composer, writer
r'(?P<vergleich>^vergl(eich)?\.*$)', # German, compare
r'(?P<Verzeichnis>^verz(eichnis)?\.*$)', # German, catalogue
r'(?P<videodisc>^video-*disc\.*$)',
r'(?P<volumes>^vol?(ume)?s*\.*$)',
r'(?P<Vorwort>^vorw(ort)?\.*$)', # German, foreword
r'(?P<vydUUU00E1nUUU00ED>^vyd(ani)?\.*$)', # Czech, vydání
r'(?P<vypusk>^vyp(usk)?\.*$)', # Russian
r'(?P<wydanie>^wyd(anie)?\.*$)', # Polish
r'(?P<years>^y(ea)?rs\.*$)',
r'(?P<year>^y(ea)?r\.*$)',
r'(?P<Zeitschrift>^z(ei)?tschr(ift)?\.*$)', # German, periodical
r'(?P<Zeitung>^z(ei)?t(un)?g\.*$)', # German, newspaper
r'(?P<zeszyt>^zesz(yt)?\.*$)', # Polish
r'(?P<zvezek>^zv(ezek)?\.*$)', # Slovenian, volumes
) | 0.154058 | 0.405213 |
from cgitb import text
from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify, url_for, redirect
import atexit
import os
import json
import xml.etree.ElementTree as ET
tree = ET.parse('catalog.xml')
root = tree.getroot()
app = Flask(__name__, static_url_path='')
db_name = 'mydb'
client = None
db = None
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = <PASSWORD>['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif "CLOUDANT_URL" in os.environ:
client = Cloudant(os.environ['CLOUDANT_USERNAME'], os.environ['CLOUDANT_PASSWORD'], url=os.environ['CLOUDANT_URL'], connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = <PASSWORD>['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
port = int(os.getenv('PORT', 8000))
@app.route('/')
def index():
tree = ET.parse('catalog.xml')
root = tree.getroot()
return render_template('index.html', data=root, len=len(root))
@app.route('/delete/<int:id>')
def delete(id):
root.remove(root[id])
with open('catalog.xml', 'wb') as f:
tree.write(f)
return redirect(url_for('index'))
@app.route('/insert', methods=["POST"])
def insert():
name = request.form.get('name')
cpu = request.form.get('cpu')
ram = request.form.get('ram')
hdd = request.form.get('hdd')
price = request.form.get('price')
server = ET.Element("server")
name_elem = ET.SubElement(server, 'name')
name_elem.text = name
cpu_elem = ET.SubElement(server, 'cpu')
cpu_elem.text = cpu
ram_elem = ET.SubElement(server, "ram")
ram_elem.text = ram
hdd_elem = ET.SubElement(server, "hdd")
hdd_elem.text = hdd
price_elem = ET.SubElement(server, "price")
price_elem.text = price
root.insert(len(root), server)
with open('catalog.xml', 'wb') as f:
tree.write(f)
return redirect(url_for('index'))
@atexit.register
def shutdown():
if client:
client.disconnect()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True) | hello.py | from cgitb import text
from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify, url_for, redirect
import atexit
import os
import json
import xml.etree.ElementTree as ET
tree = ET.parse('catalog.xml')
root = tree.getroot()
app = Flask(__name__, static_url_path='')
db_name = 'mydb'
client = None
db = None
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = <PASSWORD>['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif "CLOUDANT_URL" in os.environ:
client = Cloudant(os.environ['CLOUDANT_USERNAME'], os.environ['CLOUDANT_PASSWORD'], url=os.environ['CLOUDANT_URL'], connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = <PASSWORD>['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
port = int(os.getenv('PORT', 8000))
@app.route('/')
def index():
tree = ET.parse('catalog.xml')
root = tree.getroot()
return render_template('index.html', data=root, len=len(root))
@app.route('/delete/<int:id>')
def delete(id):
root.remove(root[id])
with open('catalog.xml', 'wb') as f:
tree.write(f)
return redirect(url_for('index'))
@app.route('/insert', methods=["POST"])
def insert():
name = request.form.get('name')
cpu = request.form.get('cpu')
ram = request.form.get('ram')
hdd = request.form.get('hdd')
price = request.form.get('price')
server = ET.Element("server")
name_elem = ET.SubElement(server, 'name')
name_elem.text = name
cpu_elem = ET.SubElement(server, 'cpu')
cpu_elem.text = cpu
ram_elem = ET.SubElement(server, "ram")
ram_elem.text = ram
hdd_elem = ET.SubElement(server, "hdd")
hdd_elem.text = hdd
price_elem = ET.SubElement(server, "price")
price_elem.text = price
root.insert(len(root), server)
with open('catalog.xml', 'wb') as f:
tree.write(f)
return redirect(url_for('index'))
@atexit.register
def shutdown():
if client:
client.disconnect()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True) | 0.322526 | 0.046486 |
import argparse
import json
import numpy as np
import os
# manually selected list
benchs_list = {
"raw": ["cartpolereduced", "BNNOnProteinStructure", "BNNOnYearPrediction"],
"surro": ["ParamNetReducedAdultOnTimeBenchmark", "ParamNetReducedHiggsOnTimeBenchmark",
"ParamNetReducedLetterOnTimeBenchmark", "ParamNetReducedMnistOnTimeBenchmark",
"ParamNetReducedOptdigitsOnTimeBenchmark", "ParamNetReducedPokerOnTimeBenchmark",
"Cifar10ValidNasBench201Benchmark", "Cifar100NasBench201Benchmark",
"ImageNetNasBench201Benchmark", "NASCifar10ABenchmark", "NASCifar10BBenchmark", "NASCifar10CBenchmark",
"SliceLocalizationBenchmark", "ProteinStructureBenchmark",
"NavalPropulsionBenchmark", "ParkinsonsTelemonitoringBenchmark",
"NASBench1shot1SearchSpace1Benchmark", "NASBench1shot1SearchSpace2Benchmark",
"NASBench1shot1SearchSpace3Benchmark",
]
}
if __name__ == "__main__":
# Script to compute used wallclocktime
parser = argparse.ArgumentParser()
parser.add_argument('--inp', required=True, type=str)
args, unknown = parser.parse_known_args()
time_unit = 60*60
for lsname in benchs_list:
print("*"*80)
print(lsname)
print("*"*80)
res_dc = {}
table_header = []
assert os.path.isdir(args.inp)
for b in benchs_list[lsname]:
inp_path = os.path.join(args.inp, f"{b}/stats2_{b}_all.json")
if not os.path.isfile(inp_path):
print(f"Skipping {b}, {inp_path} does not exist")
continue
table_header.append(r"\multicolumn{2}{1}{%s}" % b)
with open(inp_path) as fh:
data = json.load(fh)
for opt in data:
if opt == "lowest_val": continue
if opt in ("autogluon", "ray_randomsearch"): continue
else:
if opt not in res_dc: res_dc[opt] = 0
res_dc[opt] += np.sum(data[opt]["act_wc_time"])
for opt in res_dc:
print("%s: %d" % (opt, np.rint(res_dc[opt])/time_unit))
print("Total:", np.sum([res_dc[i] for i in res_dc])/time_unit/24/365, "CPU years") | scripts/get_runtime.py | import argparse
import json
import numpy as np
import os
# manually selected list
benchs_list = {
"raw": ["cartpolereduced", "BNNOnProteinStructure", "BNNOnYearPrediction"],
"surro": ["ParamNetReducedAdultOnTimeBenchmark", "ParamNetReducedHiggsOnTimeBenchmark",
"ParamNetReducedLetterOnTimeBenchmark", "ParamNetReducedMnistOnTimeBenchmark",
"ParamNetReducedOptdigitsOnTimeBenchmark", "ParamNetReducedPokerOnTimeBenchmark",
"Cifar10ValidNasBench201Benchmark", "Cifar100NasBench201Benchmark",
"ImageNetNasBench201Benchmark", "NASCifar10ABenchmark", "NASCifar10BBenchmark", "NASCifar10CBenchmark",
"SliceLocalizationBenchmark", "ProteinStructureBenchmark",
"NavalPropulsionBenchmark", "ParkinsonsTelemonitoringBenchmark",
"NASBench1shot1SearchSpace1Benchmark", "NASBench1shot1SearchSpace2Benchmark",
"NASBench1shot1SearchSpace3Benchmark",
]
}
if __name__ == "__main__":
# Script to compute used wallclocktime
parser = argparse.ArgumentParser()
parser.add_argument('--inp', required=True, type=str)
args, unknown = parser.parse_known_args()
time_unit = 60*60
for lsname in benchs_list:
print("*"*80)
print(lsname)
print("*"*80)
res_dc = {}
table_header = []
assert os.path.isdir(args.inp)
for b in benchs_list[lsname]:
inp_path = os.path.join(args.inp, f"{b}/stats2_{b}_all.json")
if not os.path.isfile(inp_path):
print(f"Skipping {b}, {inp_path} does not exist")
continue
table_header.append(r"\multicolumn{2}{1}{%s}" % b)
with open(inp_path) as fh:
data = json.load(fh)
for opt in data:
if opt == "lowest_val": continue
if opt in ("autogluon", "ray_randomsearch"): continue
else:
if opt not in res_dc: res_dc[opt] = 0
res_dc[opt] += np.sum(data[opt]["act_wc_time"])
for opt in res_dc:
print("%s: %d" % (opt, np.rint(res_dc[opt])/time_unit))
print("Total:", np.sum([res_dc[i] for i in res_dc])/time_unit/24/365, "CPU years") | 0.233357 | 0.230205 |
import re
import requests
import logging
from copy import deepcopy
from typing import List, Dict
from logging import Logger
from datetime import timedelta, datetime
import fbchat
from fbchat import Client, User, Message, Mention, ThreadType
class Reporter(Client):
debug: bool
logger: Logger
maxage: timedelta
messages: Dict[str, Message] = dict()
def __init__(
self,
username: str,
password: str,
cookies: Dict,
maxage=timedelta(hours=1),
debug=False,
) -> None:
level = logging.DEBUG if debug else logging.INFO
client_level = logging.INFO if debug else logging.ERROR
logger = Logger(Reporter.__name__, level)
logger.addHandler(logging.StreamHandler())
logger.info("Authenticating client...")
super(Reporter, self).__init__(
username,
password,
session_cookies=cookies,
logging_level=client_level,
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15",
)
self.setDefaultThread(self.uid, ThreadType.USER)
logger.info("Client authenticated.")
self.debug = debug
self.logger = logger
self.maxage = maxage
self.messages = dict()
def __clean(self) -> bool:
self.logger.info("Cleaning up expired messages...")
now = datetime.now()
discarded = 0
messages: Dict[str, Message] = dict()
for id, message in self.messages.items():
timestamp = datetime.fromtimestamp(message.timestamp / 1000)
timestamp += timedelta(milliseconds=(message.timestamp % 1000))
if now > (timestamp + self.maxage):
discarded += 1
else:
messages[id] = message
if discarded:
self.logger.info(f"Discarded {discarded} messages.")
self.messages = messages
return True
self.logger.info("No expired messages.")
return False
__counter = 0
def onMessage(
self, mid: str, message: str, author_id: str, message_object: Message, **kwargs,
) -> None:
if not self.debug:
if author_id == self.uid:
return
self.logger.debug(f"Received message: {message}")
self.logger.debug(f"Cached messages: {len(self.messages)}")
self.messages[mid] = message_object
self.__counter += 1
if self.__counter >= 60:
self.__counter = 0
self.__clean()
def onMessageUnsent(self, mid: str, author_id: str, **kwargs) -> None:
if not self.debug:
if author_id == self.uid:
return
author: User = self.fetchUserInfo(author_id)[author_id]
name = author.name
self.logger.info(f"Caught unsend by {name}.")
message = Message(
f"{name} unsent a message.",
mentions=[Mention(author_id, length=len(name))],
)
id = self.send(message)
if mid not in self.messages:
return
message: Message = deepcopy(self.messages[mid])
message.reply_to_id = id
files = Reporter.__message_files(message)
if files:
self.sendRemoteFiles(files, message)
else:
self.send(message)
@staticmethod
def __message_files(message: Message) -> List[str]:
files = list()
for a in message.attachments:
if isinstance(a, fbchat.ImageAttachment):
if a.is_animated:
files.append(a.animated_preview_url)
else:
url = a.large_preview_url or a.preview_url or a.thumbnail_url
if url:
files.append(url)
elif isinstance(a, fbchat.VideoAttachment):
files.append(a.preview_url)
elif isinstance(a, fbchat.FileAttachment):
r = requests.get(a.url)
if r.status_code == 200:
url = re.search(
r'document\.location\.replace\("(.*)"\);', r.text,
).group(1)
url = url.replace(r"\/", "/")
files.append(url)
return files | veritaserum/reporter.py | import re
import requests
import logging
from copy import deepcopy
from typing import List, Dict
from logging import Logger
from datetime import timedelta, datetime
import fbchat
from fbchat import Client, User, Message, Mention, ThreadType
class Reporter(Client):
debug: bool
logger: Logger
maxage: timedelta
messages: Dict[str, Message] = dict()
def __init__(
self,
username: str,
password: str,
cookies: Dict,
maxage=timedelta(hours=1),
debug=False,
) -> None:
level = logging.DEBUG if debug else logging.INFO
client_level = logging.INFO if debug else logging.ERROR
logger = Logger(Reporter.__name__, level)
logger.addHandler(logging.StreamHandler())
logger.info("Authenticating client...")
super(Reporter, self).__init__(
username,
password,
session_cookies=cookies,
logging_level=client_level,
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15",
)
self.setDefaultThread(self.uid, ThreadType.USER)
logger.info("Client authenticated.")
self.debug = debug
self.logger = logger
self.maxage = maxage
self.messages = dict()
def __clean(self) -> bool:
self.logger.info("Cleaning up expired messages...")
now = datetime.now()
discarded = 0
messages: Dict[str, Message] = dict()
for id, message in self.messages.items():
timestamp = datetime.fromtimestamp(message.timestamp / 1000)
timestamp += timedelta(milliseconds=(message.timestamp % 1000))
if now > (timestamp + self.maxage):
discarded += 1
else:
messages[id] = message
if discarded:
self.logger.info(f"Discarded {discarded} messages.")
self.messages = messages
return True
self.logger.info("No expired messages.")
return False
__counter = 0
def onMessage(
self, mid: str, message: str, author_id: str, message_object: Message, **kwargs,
) -> None:
if not self.debug:
if author_id == self.uid:
return
self.logger.debug(f"Received message: {message}")
self.logger.debug(f"Cached messages: {len(self.messages)}")
self.messages[mid] = message_object
self.__counter += 1
if self.__counter >= 60:
self.__counter = 0
self.__clean()
def onMessageUnsent(self, mid: str, author_id: str, **kwargs) -> None:
if not self.debug:
if author_id == self.uid:
return
author: User = self.fetchUserInfo(author_id)[author_id]
name = author.name
self.logger.info(f"Caught unsend by {name}.")
message = Message(
f"{name} unsent a message.",
mentions=[Mention(author_id, length=len(name))],
)
id = self.send(message)
if mid not in self.messages:
return
message: Message = deepcopy(self.messages[mid])
message.reply_to_id = id
files = Reporter.__message_files(message)
if files:
self.sendRemoteFiles(files, message)
else:
self.send(message)
@staticmethod
def __message_files(message: Message) -> List[str]:
files = list()
for a in message.attachments:
if isinstance(a, fbchat.ImageAttachment):
if a.is_animated:
files.append(a.animated_preview_url)
else:
url = a.large_preview_url or a.preview_url or a.thumbnail_url
if url:
files.append(url)
elif isinstance(a, fbchat.VideoAttachment):
files.append(a.preview_url)
elif isinstance(a, fbchat.FileAttachment):
r = requests.get(a.url)
if r.status_code == 200:
url = re.search(
r'document\.location\.replace\("(.*)"\);', r.text,
).group(1)
url = url.replace(r"\/", "/")
files.append(url)
return files | 0.610221 | 0.072505 |
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AssignedMedicalCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('board_member', models.ForeignKey(related_name='assigned_medical_categories', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('category', models.ForeignKey(to='core.MedicalCategory')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Constraint',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_time', models.TimeField(null=True, blank=True)),
('end_time', models.TimeField(null=True, blank=True)),
('weight', models.FloatField(default=0.5, choices=[(1.0, 'impossible'), (0.5, 'unfavorable')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Meeting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start', models.DateTimeField()),
('title', models.CharField(max_length=200)),
('optimization_task_id', models.TextField(null=True)),
('started', models.DateTimeField(null=True)),
('ended', models.DateTimeField(null=True)),
('comments', models.TextField(null=True, blank=True)),
('deadline', models.DateTimeField(null=True)),
('deadline_diplomathesis', models.DateTimeField(null=True)),
('agenda_sent_at', models.DateTimeField(null=True)),
('protocol_sent_at', models.DateTimeField(null=True)),
('expedited_reviewer_invitation_sent_for', models.DateTimeField(null=True)),
('expedited_reviewer_invitation_sent_at', models.DateTimeField(null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Participation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ignored_for_optimization', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TimetableEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, blank=True)),
('timetable_index', models.IntegerField(null=True)),
('duration_in_seconds', models.PositiveIntegerField()),
('is_break', models.BooleanField(default=False)),
('optimal_start', models.TimeField(null=True)),
('is_open', models.BooleanField(default=True)),
('meeting', models.ForeignKey(related_name='timetable_entries', to='meetings.Meeting')),
('submission', models.ForeignKey(related_name='timetable_entries', to='core.Submission', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='timetableentry',
unique_together={('meeting', 'timetable_index')},
),
migrations.AddField(
model_name='participation',
name='entry',
field=models.ForeignKey(related_name='participations', to='meetings.TimetableEntry'),
preserve_default=True,
),
migrations.AddField(
model_name='participation',
name='medical_category',
field=models.ForeignKey(related_name='meeting_participations', blank=True, to='core.MedicalCategory', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='participation',
name='user',
field=models.ForeignKey(related_name='meeting_participations', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='meeting',
name='submissions',
field=models.ManyToManyField(related_name='meetings', through='meetings.TimetableEntry', to='core.Submission'),
preserve_default=True,
),
migrations.AddField(
model_name='constraint',
name='meeting',
field=models.ForeignKey(related_name='constraints', to='meetings.Meeting'),
preserve_default=True,
),
migrations.AddField(
model_name='constraint',
name='user',
field=models.ForeignKey(related_name='meeting_constraints', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='assignedmedicalcategory',
name='meeting',
field=models.ForeignKey(related_name='medical_categories', to='meetings.Meeting'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='assignedmedicalcategory',
unique_together={('category', 'meeting')},
),
] | ecs/meetings/migrations/0001_initial.py | from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AssignedMedicalCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('board_member', models.ForeignKey(related_name='assigned_medical_categories', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('category', models.ForeignKey(to='core.MedicalCategory')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Constraint',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_time', models.TimeField(null=True, blank=True)),
('end_time', models.TimeField(null=True, blank=True)),
('weight', models.FloatField(default=0.5, choices=[(1.0, 'impossible'), (0.5, 'unfavorable')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Meeting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start', models.DateTimeField()),
('title', models.CharField(max_length=200)),
('optimization_task_id', models.TextField(null=True)),
('started', models.DateTimeField(null=True)),
('ended', models.DateTimeField(null=True)),
('comments', models.TextField(null=True, blank=True)),
('deadline', models.DateTimeField(null=True)),
('deadline_diplomathesis', models.DateTimeField(null=True)),
('agenda_sent_at', models.DateTimeField(null=True)),
('protocol_sent_at', models.DateTimeField(null=True)),
('expedited_reviewer_invitation_sent_for', models.DateTimeField(null=True)),
('expedited_reviewer_invitation_sent_at', models.DateTimeField(null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Participation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ignored_for_optimization', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TimetableEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, blank=True)),
('timetable_index', models.IntegerField(null=True)),
('duration_in_seconds', models.PositiveIntegerField()),
('is_break', models.BooleanField(default=False)),
('optimal_start', models.TimeField(null=True)),
('is_open', models.BooleanField(default=True)),
('meeting', models.ForeignKey(related_name='timetable_entries', to='meetings.Meeting')),
('submission', models.ForeignKey(related_name='timetable_entries', to='core.Submission', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='timetableentry',
unique_together={('meeting', 'timetable_index')},
),
migrations.AddField(
model_name='participation',
name='entry',
field=models.ForeignKey(related_name='participations', to='meetings.TimetableEntry'),
preserve_default=True,
),
migrations.AddField(
model_name='participation',
name='medical_category',
field=models.ForeignKey(related_name='meeting_participations', blank=True, to='core.MedicalCategory', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='participation',
name='user',
field=models.ForeignKey(related_name='meeting_participations', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='meeting',
name='submissions',
field=models.ManyToManyField(related_name='meetings', through='meetings.TimetableEntry', to='core.Submission'),
preserve_default=True,
),
migrations.AddField(
model_name='constraint',
name='meeting',
field=models.ForeignKey(related_name='constraints', to='meetings.Meeting'),
preserve_default=True,
),
migrations.AddField(
model_name='constraint',
name='user',
field=models.ForeignKey(related_name='meeting_constraints', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='assignedmedicalcategory',
name='meeting',
field=models.ForeignKey(related_name='medical_categories', to='meetings.Meeting'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='assignedmedicalcategory',
unique_together={('category', 'meeting')},
),
] | 0.560373 | 0.184768 |
from .base import Base
from utilities import authenticate
import requests
import datetime
class reservations(Base):
"""Make 'get reservations' function calls to Teem
with parameters passed via CLI"""
Rooms = {
'showcase': 130700,
'pistachio': 218764,
'almond': 218763,
'22-91': 219151,
'22-92': 219152,
'22-93': 219153,
'toronto': 135254,
'test room': 167492,
'techbar': 177863,
'tower a lobby': 77522
}
def get_reservations(access_token, reservation_id=None, parameters={}):
"""
Returns a dictionary of all reservations, a sigle reservation or the
reservations of a single room depending on the input parameters
@ Parameter - 'access_token' - Teem access token
@ Parameter - 'reseration_id' - Id of an individual reservation
@ Parameter - 'parameters' - dictionary of values to modify results
of get_reservations api call. Visible in Teem API documentation.
"""
print(parameters)
reservations = 'calendars/reservations/'
base_url = 'https://app.teem.com/api/v4/'
nulls = ['null', 'None', None]
if reservation_id in nulls:
url = base_url + reservations
else:
url = base_url + reservations + str(reservation_id) + '/'
headers = {'Authorization': 'Bearer ' + access_token}
try:
r = requests.get(url, params=parameters, headers=headers)
except Exception as e:
raise e
#print(r.status_code)
r.raise_for_status()
data = r.json()
response = {}
try:
response['reservations'] = data['reservations']
response['meta'] = data['meta']
except KeyError as e:
print("No Meta")
try:
response['reservations'] = []
response['reservations'].append(data['reservation'])
except KeyError as e:
raise e
return response
def prompt(parameters):
print("Received the following options from command line", parameters)
if parameters['loop']:
verb = 'Looping through'
else:
verb = 'Getting'
if parameters['room']:
room = parameters['room']
else:
room = 'all rooms'
if parameters['before']:
before = f"from {parameters['before']}"
else:
before = 'the beginning of time'
if parameters['after']:
after = f"until {parameters['after']}"
else:
after = 'until the end of time'
print(f"{verb} reservations for {room} {before} {after}")
def map_rooms(self, room_name):
return self.Rooms[room_name.lower()]
def run(self, parameters):
if parameters['verbose']:
self.prompt(parameters)
if parameters['room'] is not None:
parameters['room_id'] = self.map_rooms(self, parameters.pop('room'))
## if parameters['before'] or parameters['after'] is not None:
try:
creds = authenticate.load_credentials()
tokens = authenticate.obtain_token(creds['teem_access_key_id'],
creds['teem_secret_access_key'],
creds['teem_username'],
creds['teem_password'],
'https://app.teem.com/oauth/token/',
['users', 'reservations', 'accounts'])
except Exception as e:
raise e
if parameters['loop']:
while True:
try:
response = self.get_reservations(tokens['access_token'],
parameters['reservation'],
parameters)
except Exception as e:
raise e
else:
if response['meta']['filtered_total'] >= 1:
self.print_reservations(response['reservations'])
else:
break
print("No reservations to show with the current filters")
else:
try:
response = self.get_reservations(tokens['access_token'],
parameters['reservation'],
parameters)
except Exception as e:
raise e
else:
if response['meta']['filtered_total'] >= 1:
self.print_reservations(response['reservations'])
else:
print("No reservations to show with the current filters")
def print_reservations(reservations, info=[]):
interesting = ['room_id','title',
'creator','id',
'participant_ids','checked_in']
if not info:
info = interesting
for event in reservations:
for item in info:
if item == 'creator':
try:
print(event[item]['first_name'])
except TypeError:
pass
elif item == 'checked_in':
try:
print(item, convert_time(event[item]))
except TypeError:
print(item)
else:
print(item, event[item])
print("Starts: {}, Ends: {}".format(convert_time(event['starts_at']),
convert_time(event['ends_at'])))
def convert_time(time_stamp):
return datetime.datetime.fromtimestamp(int(time_stamp)).strftime('%Y-%m-%d %H:%M:%S')
if __name__ == '__main__':
res = reservations(room='Showcase', loop=True, before='10:30')
res.run()
res.args | teem/commands/reservations.py | from .base import Base
from utilities import authenticate
import requests
import datetime
class reservations(Base):
"""Make 'get reservations' function calls to Teem
with parameters passed via CLI"""
Rooms = {
'showcase': 130700,
'pistachio': 218764,
'almond': 218763,
'22-91': 219151,
'22-92': 219152,
'22-93': 219153,
'toronto': 135254,
'test room': 167492,
'techbar': 177863,
'tower a lobby': 77522
}
def get_reservations(access_token, reservation_id=None, parameters={}):
"""
Returns a dictionary of all reservations, a sigle reservation or the
reservations of a single room depending on the input parameters
@ Parameter - 'access_token' - Teem access token
@ Parameter - 'reseration_id' - Id of an individual reservation
@ Parameter - 'parameters' - dictionary of values to modify results
of get_reservations api call. Visible in Teem API documentation.
"""
print(parameters)
reservations = 'calendars/reservations/'
base_url = 'https://app.teem.com/api/v4/'
nulls = ['null', 'None', None]
if reservation_id in nulls:
url = base_url + reservations
else:
url = base_url + reservations + str(reservation_id) + '/'
headers = {'Authorization': 'Bearer ' + access_token}
try:
r = requests.get(url, params=parameters, headers=headers)
except Exception as e:
raise e
#print(r.status_code)
r.raise_for_status()
data = r.json()
response = {}
try:
response['reservations'] = data['reservations']
response['meta'] = data['meta']
except KeyError as e:
print("No Meta")
try:
response['reservations'] = []
response['reservations'].append(data['reservation'])
except KeyError as e:
raise e
return response
def prompt(parameters):
print("Received the following options from command line", parameters)
if parameters['loop']:
verb = 'Looping through'
else:
verb = 'Getting'
if parameters['room']:
room = parameters['room']
else:
room = 'all rooms'
if parameters['before']:
before = f"from {parameters['before']}"
else:
before = 'the beginning of time'
if parameters['after']:
after = f"until {parameters['after']}"
else:
after = 'until the end of time'
print(f"{verb} reservations for {room} {before} {after}")
def map_rooms(self, room_name):
return self.Rooms[room_name.lower()]
def run(self, parameters):
if parameters['verbose']:
self.prompt(parameters)
if parameters['room'] is not None:
parameters['room_id'] = self.map_rooms(self, parameters.pop('room'))
## if parameters['before'] or parameters['after'] is not None:
try:
creds = authenticate.load_credentials()
tokens = authenticate.obtain_token(creds['teem_access_key_id'],
creds['teem_secret_access_key'],
creds['teem_username'],
creds['teem_password'],
'https://app.teem.com/oauth/token/',
['users', 'reservations', 'accounts'])
except Exception as e:
raise e
if parameters['loop']:
while True:
try:
response = self.get_reservations(tokens['access_token'],
parameters['reservation'],
parameters)
except Exception as e:
raise e
else:
if response['meta']['filtered_total'] >= 1:
self.print_reservations(response['reservations'])
else:
break
print("No reservations to show with the current filters")
else:
try:
response = self.get_reservations(tokens['access_token'],
parameters['reservation'],
parameters)
except Exception as e:
raise e
else:
if response['meta']['filtered_total'] >= 1:
self.print_reservations(response['reservations'])
else:
print("No reservations to show with the current filters")
def print_reservations(reservations, info=[]):
interesting = ['room_id','title',
'creator','id',
'participant_ids','checked_in']
if not info:
info = interesting
for event in reservations:
for item in info:
if item == 'creator':
try:
print(event[item]['first_name'])
except TypeError:
pass
elif item == 'checked_in':
try:
print(item, convert_time(event[item]))
except TypeError:
print(item)
else:
print(item, event[item])
print("Starts: {}, Ends: {}".format(convert_time(event['starts_at']),
convert_time(event['ends_at'])))
def convert_time(time_stamp):
return datetime.datetime.fromtimestamp(int(time_stamp)).strftime('%Y-%m-%d %H:%M:%S')
if __name__ == '__main__':
res = reservations(room='Showcase', loop=True, before='10:30')
res.run()
res.args | 0.330471 | 0.112065 |
from django import forms
from django.utils.translation import gettext_lazy as _
from .base import ChangeSettingsForm
class ChangeThreadsSettingsForm(ChangeSettingsForm):
settings = [
"attachment_403_image",
"attachment_404_image",
"daily_post_limit",
"hourly_post_limit",
"post_attachments_limit",
"post_length_max",
"post_length_min",
"readtracker_cutoff",
"thread_title_length_max",
"thread_title_length_min",
"unused_attachments_lifetime",
"threads_per_page",
"posts_per_page",
"posts_per_page_orphans",
"events_per_page",
]
daily_post_limit = forms.IntegerField(
label=_("Daily post limit per user"),
help_text=_(
"Daily limit of posts that may be posted by single user. "
"Fail-safe for situations when forum is flooded by spam bots. "
"Change to 0 to remove the limit."
),
min_value=0,
)
hourly_post_limit = forms.IntegerField(
label=_("Hourly post limit per user"),
help_text=_(
"Hourly limit of posts that may be posted by single user. "
"Fail-safe for situations when forum is flooded by spam bots. "
"Change to 0 to remove the limit."
),
min_value=0,
)
post_attachments_limit = forms.IntegerField(
label=_("Maximum number of attachments per post"), min_value=1
)
post_length_max = forms.IntegerField(
label=_("Maximum allowed post length"), min_value=0
)
post_length_min = forms.IntegerField(
label=_("Minimum required post length"), min_value=1
)
thread_title_length_max = forms.IntegerField(
label=_("Maximum allowed thread title length"), min_value=2, max_value=255
)
thread_title_length_min = forms.IntegerField(
label=_("Minimum required thread title length"), min_value=2, max_value=255
)
unused_attachments_lifetime = forms.IntegerField(
label=_("Unused attachments lifetime"),
help_text=_(
"Period of time (in hours) after which user-uploaded files that weren't "
"attached to any post are deleted from disk."
),
min_value=1,
)
readtracker_cutoff = forms.IntegerField(
label=_("Read-tracker cutoff"),
help_text=_(
"Controls amount of data used by read-tracking system. All content older "
"than number of days specified in this setting is considered old and read, "
"even if the opposite is true for the user. Active forums can try lowering "
"this value while less active ones may wish to increase it instead. "
),
min_value=1,
)
threads_per_page = forms.IntegerField(
label=_("Number of threads displayed on a single page"), min_value=10
)
posts_per_page = forms.IntegerField(
label=_("Number of posts displayed on a single page"), min_value=5
)
posts_per_page_orphans = forms.IntegerField(
label=_("Maximum orphans"),
help_text=_(
"If number of posts to be displayed on the last page is less or equal to "
"number specified in this setting, those posts will instead be displayed "
"on previous page, reducing the total number of pages in thread."
),
min_value=0,
)
events_per_page = forms.IntegerField(
label=_("Maximum number of events displayed on a single page"), min_value=5
)
attachment_403_image = forms.ImageField(
label=_("Permission denied"),
help_text=_(
"Attachments proxy will display this image in place of default one "
"when user tries to access attachment they have no permission to see."
),
required=False,
)
attachment_403_image_delete = forms.BooleanField(
label=_("Delete custom permission denied image"), required=False
)
attachment_404_image = forms.ImageField(
label=_("Not found"),
help_text=_(
"Attachments proxy will display this image in place of default one "
"when user tries to access attachment that doesn't exist."
),
required=False,
)
attachment_404_image_delete = forms.BooleanField(
label=_("Delete custom not found image"), required=False
)
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get("posts_per_page_orphans") > cleaned_data.get(
"posts_per_page"
):
self.add_error(
"posts_per_page_orphans",
_("This value must be lower than number of posts per page."),
)
return cleaned_data | misago/misago/conf/admin/forms/threads.py | from django import forms
from django.utils.translation import gettext_lazy as _
from .base import ChangeSettingsForm
class ChangeThreadsSettingsForm(ChangeSettingsForm):
settings = [
"attachment_403_image",
"attachment_404_image",
"daily_post_limit",
"hourly_post_limit",
"post_attachments_limit",
"post_length_max",
"post_length_min",
"readtracker_cutoff",
"thread_title_length_max",
"thread_title_length_min",
"unused_attachments_lifetime",
"threads_per_page",
"posts_per_page",
"posts_per_page_orphans",
"events_per_page",
]
daily_post_limit = forms.IntegerField(
label=_("Daily post limit per user"),
help_text=_(
"Daily limit of posts that may be posted by single user. "
"Fail-safe for situations when forum is flooded by spam bots. "
"Change to 0 to remove the limit."
),
min_value=0,
)
hourly_post_limit = forms.IntegerField(
label=_("Hourly post limit per user"),
help_text=_(
"Hourly limit of posts that may be posted by single user. "
"Fail-safe for situations when forum is flooded by spam bots. "
"Change to 0 to remove the limit."
),
min_value=0,
)
post_attachments_limit = forms.IntegerField(
label=_("Maximum number of attachments per post"), min_value=1
)
post_length_max = forms.IntegerField(
label=_("Maximum allowed post length"), min_value=0
)
post_length_min = forms.IntegerField(
label=_("Minimum required post length"), min_value=1
)
thread_title_length_max = forms.IntegerField(
label=_("Maximum allowed thread title length"), min_value=2, max_value=255
)
thread_title_length_min = forms.IntegerField(
label=_("Minimum required thread title length"), min_value=2, max_value=255
)
unused_attachments_lifetime = forms.IntegerField(
label=_("Unused attachments lifetime"),
help_text=_(
"Period of time (in hours) after which user-uploaded files that weren't "
"attached to any post are deleted from disk."
),
min_value=1,
)
readtracker_cutoff = forms.IntegerField(
label=_("Read-tracker cutoff"),
help_text=_(
"Controls amount of data used by read-tracking system. All content older "
"than number of days specified in this setting is considered old and read, "
"even if the opposite is true for the user. Active forums can try lowering "
"this value while less active ones may wish to increase it instead. "
),
min_value=1,
)
threads_per_page = forms.IntegerField(
label=_("Number of threads displayed on a single page"), min_value=10
)
posts_per_page = forms.IntegerField(
label=_("Number of posts displayed on a single page"), min_value=5
)
posts_per_page_orphans = forms.IntegerField(
label=_("Maximum orphans"),
help_text=_(
"If number of posts to be displayed on the last page is less or equal to "
"number specified in this setting, those posts will instead be displayed "
"on previous page, reducing the total number of pages in thread."
),
min_value=0,
)
events_per_page = forms.IntegerField(
label=_("Maximum number of events displayed on a single page"), min_value=5
)
attachment_403_image = forms.ImageField(
label=_("Permission denied"),
help_text=_(
"Attachments proxy will display this image in place of default one "
"when user tries to access attachment they have no permission to see."
),
required=False,
)
attachment_403_image_delete = forms.BooleanField(
label=_("Delete custom permission denied image"), required=False
)
attachment_404_image = forms.ImageField(
label=_("Not found"),
help_text=_(
"Attachments proxy will display this image in place of default one "
"when user tries to access attachment that doesn't exist."
),
required=False,
)
attachment_404_image_delete = forms.BooleanField(
label=_("Delete custom not found image"), required=False
)
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get("posts_per_page_orphans") > cleaned_data.get(
"posts_per_page"
):
self.add_error(
"posts_per_page_orphans",
_("This value must be lower than number of posts per page."),
)
return cleaned_data | 0.602997 | 0.118793 |
import sys
from socket import *
import threading
import time
import datetime as dt
# The argument of client
servername = sys.argv[1]
serverPort = sys.argv[2]
udpPort = sys.argv[3]
serverPort = int(serverPort)
# Create the TCP socket
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((servername, serverPort))
# Create the UDP socket
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
portnum = int(udpPort)
udpsock = socket(AF_INET, SOCK_DGRAM)
udpsock.bind((local_ip, udpPort))
# Start a thread for UDP transfer
def udprec():
while (True):
# We receive the filename first
l, addr = udpsock.recvfrom(1024)
# Save the filename if file name is not defined
filename = ''
if not filename:
filename = l.decode('utf-8')
l = ''
# Next the
while (l):
f = open(filename, a)
f.write(l)
f.close()
l, addr = udpsock.recvfrom(1024)
thread = threading.Thread(target=udprec)
thread.start()
# This is the authentication function
# It process the reply info comes from the server
def authenticate():
while True:
receivedMessage = clientSocket.recv(2048)
receivedMessage = receivedMessage.decode('utf-8')
if receivedMessage == "Username\r\n":
message = input("Username: ")
clientSocket.send(message.encode('utf-8'))
elif receivedMessage == "Password\r\n":
message = input("Password: ")
clientSocket.send(message.encode('utf-8'))
elif receivedMessage == "Invalid Password\r\n":
print("Invalid Password. Please try again\n")
message = input("Password: ")
clientSocket.send(message.encode('utf-8'))
# If return False, it means you are locked.
elif receivedMessage == "Locked\r\n":
print("Invalid Password. Your account has been blocked. Please try again later\n")
return False
elif receivedMessage == "Still locked\r\n":
print("Your account is blocked due to multiple login failures. Please try again later\n")
return False
elif receivedMessage == "Login Success\r\n":
clientSocket.send(udpPort.encode('utf-8'))
return True
# Respond to message sent by the dlt function in server
def msg(word):
# print(clientSocket)
confirm = clientSocket.recv(2048).decode('utf-8')
confirm = confirm.split()
time = ' '.join(confirm[1::])
message = 'Message ' + '#' + confirm[0] + ' ' + 'posted at ' + time + '.\n'
print(message)
# Respond to message sent by the dlt function in server
def dlt(infor):
infor = infor.split()
info = infor[0]
if info == 'Seq':
print('The sequence number you provided is invalid\n')
elif info == 'User':
print('You do not have the authority to delete this message\n')
elif info == 'Timestamp':
print('The timestamp you provided does not match the log. Please check\n')
elif info == 'Delete':
time = ' '.join(infor[1::])
print('The deletion at ' + time + ' is successful\n')
# Respond to message sent by the dlt function in server
def edt(infor):
infor = infor.split()
info = infor[0]
if info == 'Seq':
print('The sequence number you provided is invalid\n')
elif info == 'User':
print('You do not have the authority to delete this message\n')
elif info == 'Timestamp':
print('The timestamp you provided does not match the log. Please check\n')
elif info == 'Edit':
print("enter\n")
time = ' '.join(infor[1::])
print('The Edit operation at ' + time + ' is successful\n')
def upd():
pass
# The authenticate function will retrun true or false
# If true, the welcome message will print
ifloged = authenticate()
while ifloged:
print("Welcome to TOOM!")
allcommand = input("Enter one of the following commands (MSG, DLT, EDT, RDM, ATU, OUT, UPD):")
command = allcommand[0:3]
if command == 'MSG':
# Check the usage of this command
if allcommand == 'MSG':
print("Error! Need message after MSG command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
msg(allcommand[4::])
elif command == 'DLT':
# We need to check the usage of DLT
if allcommand == 'DLT':
print("Error! Need seq number and timestamp after DLT command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number and timestamp after DLT command\n")
else:
recev = clientSocket.recv(2048).decode('utf-8')
dlt(recev)
elif command == 'EDT':
if allcommand == 'EDT':
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
edt(recev)
elif command == 'RDM':
if allcommand == 'RDM':
print("Error! Need timestamp after EDT command\n")
else:
info = allcommand[4::]
clientSocket.send(allcommand.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
print(recev)
elif command == 'ATU':
if allcommand == command:
clientSocket.send('ATU'.encode('utf-8'))
print('The active user list returned: \n')
info = clientSocket.recv(2048).decode('utf-8')
else:
print("Error! ATU command does not take any argument.\n")
elif command == 'UPD':
if allcommand == 'UPD':
print("Error! Need filename and username after MSG command\n")
else:
info = allcommand[4::]
info = info.split()
# The username and filename
recevname = info[0]
file = info[-1]
# The new filename
filename = '_'.join(info)
# Need to check if the username if online
clientSocket.send(recevname.encode('utf-8'))
msg = clientSocket.recv(1024).decode('utf-8')
# If offline, then print offline
if msg == 'Offline':
print(recevname +' is offline\n')
else:
# First we send the filename to the audience
udpsock.sendto(filename.encode('utf-8'), (msg[0], int(msg[1])))
msg = msg.split()
f = open(file, 'rb')
line = f.read(1024)
while (line):
udpsock.sendto(line, (msg[0], msg[1]))
line = f.read(1024)
udpsock.close()
elif command == 'OUT':
if allcommand == command:
clientSocket.send('OUT'.encode('utf-8'))
info = clientSocket.recv(2048).decode('utf-8')
print("Thank you for using. You have logged out.\n")
break
else:
print("Error! OUT command does not take any argument.\n")
else:
print("This command is invalid. Please try again with either one of MSG, DLT, EDT, RDM, ATU, OUT and UPD\n")
clientSocket.close() | code/testclient.py | import sys
from socket import *
import threading
import time
import datetime as dt
# The argument of client
servername = sys.argv[1]
serverPort = sys.argv[2]
udpPort = sys.argv[3]
serverPort = int(serverPort)
# Create the TCP socket
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((servername, serverPort))
# Create the UDP socket
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
portnum = int(udpPort)
udpsock = socket(AF_INET, SOCK_DGRAM)
udpsock.bind((local_ip, udpPort))
# Start a thread for UDP transfer
def udprec():
while (True):
# We receive the filename first
l, addr = udpsock.recvfrom(1024)
# Save the filename if file name is not defined
filename = ''
if not filename:
filename = l.decode('utf-8')
l = ''
# Next the
while (l):
f = open(filename, a)
f.write(l)
f.close()
l, addr = udpsock.recvfrom(1024)
thread = threading.Thread(target=udprec)
thread.start()
# This is the authentication function
# It process the reply info comes from the server
def authenticate():
while True:
receivedMessage = clientSocket.recv(2048)
receivedMessage = receivedMessage.decode('utf-8')
if receivedMessage == "Username\r\n":
message = input("Username: ")
clientSocket.send(message.encode('utf-8'))
elif receivedMessage == "Password\r\n":
message = input("Password: ")
clientSocket.send(message.encode('utf-8'))
elif receivedMessage == "Invalid Password\r\n":
print("Invalid Password. Please try again\n")
message = input("Password: ")
clientSocket.send(message.encode('utf-8'))
# If return False, it means you are locked.
elif receivedMessage == "Locked\r\n":
print("Invalid Password. Your account has been blocked. Please try again later\n")
return False
elif receivedMessage == "Still locked\r\n":
print("Your account is blocked due to multiple login failures. Please try again later\n")
return False
elif receivedMessage == "Login Success\r\n":
clientSocket.send(udpPort.encode('utf-8'))
return True
# Respond to message sent by the dlt function in server
def msg(word):
# print(clientSocket)
confirm = clientSocket.recv(2048).decode('utf-8')
confirm = confirm.split()
time = ' '.join(confirm[1::])
message = 'Message ' + '#' + confirm[0] + ' ' + 'posted at ' + time + '.\n'
print(message)
# Respond to message sent by the dlt function in server
def dlt(infor):
infor = infor.split()
info = infor[0]
if info == 'Seq':
print('The sequence number you provided is invalid\n')
elif info == 'User':
print('You do not have the authority to delete this message\n')
elif info == 'Timestamp':
print('The timestamp you provided does not match the log. Please check\n')
elif info == 'Delete':
time = ' '.join(infor[1::])
print('The deletion at ' + time + ' is successful\n')
# Respond to message sent by the dlt function in server
def edt(infor):
infor = infor.split()
info = infor[0]
if info == 'Seq':
print('The sequence number you provided is invalid\n')
elif info == 'User':
print('You do not have the authority to delete this message\n')
elif info == 'Timestamp':
print('The timestamp you provided does not match the log. Please check\n')
elif info == 'Edit':
print("enter\n")
time = ' '.join(infor[1::])
print('The Edit operation at ' + time + ' is successful\n')
def upd():
pass
# The authenticate function will retrun true or false
# If true, the welcome message will print
ifloged = authenticate()
while ifloged:
print("Welcome to TOOM!")
allcommand = input("Enter one of the following commands (MSG, DLT, EDT, RDM, ATU, OUT, UPD):")
command = allcommand[0:3]
if command == 'MSG':
# Check the usage of this command
if allcommand == 'MSG':
print("Error! Need message after MSG command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
msg(allcommand[4::])
elif command == 'DLT':
# We need to check the usage of DLT
if allcommand == 'DLT':
print("Error! Need seq number and timestamp after DLT command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number and timestamp after DLT command\n")
else:
recev = clientSocket.recv(2048).decode('utf-8')
dlt(recev)
elif command == 'EDT':
if allcommand == 'EDT':
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
clientSocket.send(allcommand.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
edt(recev)
elif command == 'RDM':
if allcommand == 'RDM':
print("Error! Need timestamp after EDT command\n")
else:
info = allcommand[4::]
clientSocket.send(allcommand.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
print(recev)
elif command == 'ATU':
if allcommand == command:
clientSocket.send('ATU'.encode('utf-8'))
print('The active user list returned: \n')
info = clientSocket.recv(2048).decode('utf-8')
else:
print("Error! ATU command does not take any argument.\n")
elif command == 'UPD':
if allcommand == 'UPD':
print("Error! Need filename and username after MSG command\n")
else:
info = allcommand[4::]
info = info.split()
# The username and filename
recevname = info[0]
file = info[-1]
# The new filename
filename = '_'.join(info)
# Need to check if the username if online
clientSocket.send(recevname.encode('utf-8'))
msg = clientSocket.recv(1024).decode('utf-8')
# If offline, then print offline
if msg == 'Offline':
print(recevname +' is offline\n')
else:
# First we send the filename to the audience
udpsock.sendto(filename.encode('utf-8'), (msg[0], int(msg[1])))
msg = msg.split()
f = open(file, 'rb')
line = f.read(1024)
while (line):
udpsock.sendto(line, (msg[0], msg[1]))
line = f.read(1024)
udpsock.close()
elif command == 'OUT':
if allcommand == command:
clientSocket.send('OUT'.encode('utf-8'))
info = clientSocket.recv(2048).decode('utf-8')
print("Thank you for using. You have logged out.\n")
break
else:
print("Error! OUT command does not take any argument.\n")
else:
print("This command is invalid. Please try again with either one of MSG, DLT, EDT, RDM, ATU, OUT and UPD\n")
clientSocket.close() | 0.126515 | 0.050518 |
import numpy as np
import cv2
import glob
import pickle
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Class that holds both the left and right line tracking data
class tracker():
# Constructor?
def __init__(self, Mywindow_width, Mywindow_height, Mymargin,
My_ym = 1, My_xm = 1, Mysmooth_factor = 15):
# past left right center list
self.recent_centers = []
# Pixel width of window
self.window_width = Mywindow_width
# Pixel height of window
self.window_height = Mywindow_height
# Margin
self.margin = Mymargin
# Meters per pixel in y.
self.ym_per_pix = My_ym
# Meters per pixel in y.
self.xm_per_pix = My_xm
# Smooth factor.
self.smooth_factor = Mysmooth_factor
# Tracking function
def find_window_centroids(self, warped):
window_width = self.window_width
window_height = self.window_height
margin = self.margin
window_centroids = []
window = np.ones(window_width)
# Sum quarter bottom of image to get slice.
img_hgt = warped.shape[0]
img_wdt = warped.shape[1]
# Left
l_sum = np.sum(warped[int(3*img_hgt/4):, :int(img_wdt/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
# Right
r_sum = np.sum(warped[int(3*img_hgt/4):, int(img_wdt/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(img_wdt/2)
# Add what we find to the first layer
window_centroids.append((l_center, r_center))
# Go through each layer looking for max pixel locations.
for level in range(1, (int)(img_hgt/window_height)):
# Convolve the rectangle on the layer.
image_layer = np.sum(warped[int(img_hgt-(level+1)*window_height):int(img_hgt-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Offset to center
offset = window_width/2
# Find left centroid of the maximum signal.
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,img_wdt))
lmax = np.max(conv_signal[l_min_index:l_max_index])
# Do not update if the signal is zero
if(lmax > 0):
l_center = np.argmax(conv_signal[l_min_index:l_max_index])+l_min_index-offset
# Find right centroid of the maximum signal.
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,img_wdt))
rmax = np.max(conv_signal[r_min_index:r_max_index])
# Do not update if the signal is zero
if(rmax > 0):
r_center = np.argmax(conv_signal[r_min_index:r_max_index])+r_min_index-offset
# Add to list.
window_centroids.append((l_center, r_center))
# Append to the list window_centroids.
self.recent_centers.append(window_centroids)
# Return the average over smooth_factor count of the past centers.
return np.average(self.recent_centers[-self.smooth_factor:], axis=0) | tracker.py | import numpy as np
import cv2
import glob
import pickle
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Class that holds both the left and right line tracking data
class tracker():
# Constructor?
def __init__(self, Mywindow_width, Mywindow_height, Mymargin,
My_ym = 1, My_xm = 1, Mysmooth_factor = 15):
# past left right center list
self.recent_centers = []
# Pixel width of window
self.window_width = Mywindow_width
# Pixel height of window
self.window_height = Mywindow_height
# Margin
self.margin = Mymargin
# Meters per pixel in y.
self.ym_per_pix = My_ym
# Meters per pixel in y.
self.xm_per_pix = My_xm
# Smooth factor.
self.smooth_factor = Mysmooth_factor
# Tracking function
def find_window_centroids(self, warped):
window_width = self.window_width
window_height = self.window_height
margin = self.margin
window_centroids = []
window = np.ones(window_width)
# Sum quarter bottom of image to get slice.
img_hgt = warped.shape[0]
img_wdt = warped.shape[1]
# Left
l_sum = np.sum(warped[int(3*img_hgt/4):, :int(img_wdt/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
# Right
r_sum = np.sum(warped[int(3*img_hgt/4):, int(img_wdt/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(img_wdt/2)
# Add what we find to the first layer
window_centroids.append((l_center, r_center))
# Go through each layer looking for max pixel locations.
for level in range(1, (int)(img_hgt/window_height)):
# Convolve the rectangle on the layer.
image_layer = np.sum(warped[int(img_hgt-(level+1)*window_height):int(img_hgt-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Offset to center
offset = window_width/2
# Find left centroid of the maximum signal.
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,img_wdt))
lmax = np.max(conv_signal[l_min_index:l_max_index])
# Do not update if the signal is zero
if(lmax > 0):
l_center = np.argmax(conv_signal[l_min_index:l_max_index])+l_min_index-offset
# Find right centroid of the maximum signal.
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,img_wdt))
rmax = np.max(conv_signal[r_min_index:r_max_index])
# Do not update if the signal is zero
if(rmax > 0):
r_center = np.argmax(conv_signal[r_min_index:r_max_index])+r_min_index-offset
# Add to list.
window_centroids.append((l_center, r_center))
# Append to the list window_centroids.
self.recent_centers.append(window_centroids)
# Return the average over smooth_factor count of the past centers.
return np.average(self.recent_centers[-self.smooth_factor:], axis=0) | 0.678647 | 0.339663 |
import os
import time
import typing
import typer
from hrflow_importer.importer.worker import send_batch_to_hrflow
from hrflow_importer.utils.config.config import config #TODO improve module naming for import
from hrflow import Hrflow
PIPELINES_LOGS_FILE = "{}/importer_logs.txt".format(config.STORAGE_DIRECTORY_PATH)
cli = typer.Typer()
def display_results(results: typing.Counter) -> None:
total = sum(results.values())
if total > 0:
typer.echo("\t\t{:<40} {:>5}".format("total", total))
for status, count in results.items():
typer.echo(
"\t\t" + "{:<40} {:>5} ({:.0%})".format(status, count, count / total)
)
# ---- CLI ----
# The functions defined in this section are wrappers around the main function
# send_batch_to_hrflow allowing them to be called directly from the terminal as a CLI
# executable/script.
@cli.command()
def local(max_workers: int = typer.Option(None)):
"""Parse files in ./app_data/files."""
section_name = "Worker Parameters"
seperator = "=" * ((100 - len(section_name))//2)
typer.echo(seperator + section_name + seperator)
multiprocess = int(typer.prompt("Use multiprocessing [1: yes, 0: no]"))
if multiprocess:
sleep_period = 6 #default value
typer.echo("Proceeding with multiprocessing.")
else:
typer.echo("Proceeding without multiprocessing. Select sleep time between API calls.")
sleep_period = int(typer.prompt("Sleep period (in seconds)"))
section_name = "HrFlow API Config"
seperator = "=" * ((100 - len(section_name))//2)
typer.echo(seperator + section_name + seperator)
api_secret = typer.prompt("API Secret Key ")
team = typer.prompt("Team Name ")
source_key = typer.prompt("Source Key ")
api_user = typer.prompt("API User Email ")
client = Hrflow(api_secret=api_secret, api_user=api_user)
start = time.time()
typer.echo("=" * 100)
typer.echo("[Import Command] Started")
typer.echo("[Import Command][Stats]")
filename_list = os.listdir(os.path.join(config.STORAGE_DIRECTORY_PATH, config.LOCAL_FILES_FOLDER)) #TODO prompt through cli parameters
file_reference_list = filename_list #TODO integrate reference generation in FileHandler internal logic
n_files = len(filename_list)
typer.echo("\t\t n_files={}".format(n_files))
typer.echo("[Import Command][Importing]")
typer.echo(
"\t\t Imorting files to \n\t\t\tteam={} \n\t\t\tsource={}".format(
team, source_key
)
)
#TODO refactor this part
parsing_results = send_batch_to_hrflow(client,
source_key,
filename_list,
file_reference_list,
multiprocess,
sleep_period
)
#parsing_results = send_batch_to_hrflow(client, source_key, filename_list, file_reference_list, max_workers)
typer.echo("[Import Command][Parsing] Results")
display_results(parsing_results)
typer.echo("[Import Command] Finished in {:.1f}s".format(time.time() - start))
typer.echo("=" * 100)
if __name__ == "__main__":
cli() | src/hrflow_importer/import_cli.py | import os
import time
import typing
import typer
from hrflow_importer.importer.worker import send_batch_to_hrflow
from hrflow_importer.utils.config.config import config #TODO improve module naming for import
from hrflow import Hrflow
PIPELINES_LOGS_FILE = "{}/importer_logs.txt".format(config.STORAGE_DIRECTORY_PATH)
cli = typer.Typer()
def display_results(results: typing.Counter) -> None:
total = sum(results.values())
if total > 0:
typer.echo("\t\t{:<40} {:>5}".format("total", total))
for status, count in results.items():
typer.echo(
"\t\t" + "{:<40} {:>5} ({:.0%})".format(status, count, count / total)
)
# ---- CLI ----
# The functions defined in this section are wrappers around the main function
# send_batch_to_hrflow allowing them to be called directly from the terminal as a CLI
# executable/script.
@cli.command()
def local(max_workers: int = typer.Option(None)):
"""Parse files in ./app_data/files."""
section_name = "Worker Parameters"
seperator = "=" * ((100 - len(section_name))//2)
typer.echo(seperator + section_name + seperator)
multiprocess = int(typer.prompt("Use multiprocessing [1: yes, 0: no]"))
if multiprocess:
sleep_period = 6 #default value
typer.echo("Proceeding with multiprocessing.")
else:
typer.echo("Proceeding without multiprocessing. Select sleep time between API calls.")
sleep_period = int(typer.prompt("Sleep period (in seconds)"))
section_name = "HrFlow API Config"
seperator = "=" * ((100 - len(section_name))//2)
typer.echo(seperator + section_name + seperator)
api_secret = typer.prompt("API Secret Key ")
team = typer.prompt("Team Name ")
source_key = typer.prompt("Source Key ")
api_user = typer.prompt("API User Email ")
client = Hrflow(api_secret=api_secret, api_user=api_user)
start = time.time()
typer.echo("=" * 100)
typer.echo("[Import Command] Started")
typer.echo("[Import Command][Stats]")
filename_list = os.listdir(os.path.join(config.STORAGE_DIRECTORY_PATH, config.LOCAL_FILES_FOLDER)) #TODO prompt through cli parameters
file_reference_list = filename_list #TODO integrate reference generation in FileHandler internal logic
n_files = len(filename_list)
typer.echo("\t\t n_files={}".format(n_files))
typer.echo("[Import Command][Importing]")
typer.echo(
"\t\t Imorting files to \n\t\t\tteam={} \n\t\t\tsource={}".format(
team, source_key
)
)
#TODO refactor this part
parsing_results = send_batch_to_hrflow(client,
source_key,
filename_list,
file_reference_list,
multiprocess,
sleep_period
)
#parsing_results = send_batch_to_hrflow(client, source_key, filename_list, file_reference_list, max_workers)
typer.echo("[Import Command][Parsing] Results")
display_results(parsing_results)
typer.echo("[Import Command] Finished in {:.1f}s".format(time.time() - start))
typer.echo("=" * 100)
if __name__ == "__main__":
cli() | 0.215598 | 0.169681 |
import torch
import torch.nn as nn
import torch.nn.functional as F
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=strd, padding=padding, bias=bias
)
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes):
super(ConvBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, int(out_planes / 2))
self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4))
self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4))
if in_planes != out_planes:
self.downsample = nn.Sequential(
nn.BatchNorm2d(in_planes),
nn.ReLU(True),
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False),
)
else:
self.downsample = None
def forward(self, x):
residual = x
out1 = self.bn1(x)
out1 = F.relu(out1, True)
out1 = self.conv1(out1)
out2 = self.bn2(out1)
out2 = F.relu(out2, True)
out2 = self.conv2(out2)
out3 = self.bn3(out2)
out3 = F.relu(out3, True)
out3 = self.conv3(out3)
out3 = torch.cat((out1, out2, out3), 1)
if self.downsample is not None:
residual = self.downsample(residual)
out3 += residual
return out3
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(
self, gate_channels, face_classes, reduction_ratio=16, pool_types=["avg"]
):
""" """
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, face_classes),
)
self.pool_types = pool_types
self.face_classes = face_classes
def forward(self, x):
b, c, h, w = x.shape
assert c % self.face_classes == 0
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == "avg":
avg_pool = F.avg_pool2d(
x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))
)
channel_att_raw = self.mlp(avg_pool)
elif pool_type == "max":
max_pool = F.max_pool2d(
x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))
)
channel_att_raw = self.mlp(max_pool)
elif pool_type == "lp":
lp_pool = F.lp_pool2d(
x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))
)
channel_att_raw = self.mlp(lp_pool)
elif pool_type == "lse":
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).unsqueeze(1)
x = x.view(b, -1, self.face_classes, h, w)
out = x * scale
return out.view(b, -1, h, w).contiguous() | ibug/age_estimation/module.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=strd, padding=padding, bias=bias
)
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes):
super(ConvBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, int(out_planes / 2))
self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4))
self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4))
if in_planes != out_planes:
self.downsample = nn.Sequential(
nn.BatchNorm2d(in_planes),
nn.ReLU(True),
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False),
)
else:
self.downsample = None
def forward(self, x):
residual = x
out1 = self.bn1(x)
out1 = F.relu(out1, True)
out1 = self.conv1(out1)
out2 = self.bn2(out1)
out2 = F.relu(out2, True)
out2 = self.conv2(out2)
out3 = self.bn3(out2)
out3 = F.relu(out3, True)
out3 = self.conv3(out3)
out3 = torch.cat((out1, out2, out3), 1)
if self.downsample is not None:
residual = self.downsample(residual)
out3 += residual
return out3
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(
self, gate_channels, face_classes, reduction_ratio=16, pool_types=["avg"]
):
""" """
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, face_classes),
)
self.pool_types = pool_types
self.face_classes = face_classes
def forward(self, x):
b, c, h, w = x.shape
assert c % self.face_classes == 0
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type == "avg":
avg_pool = F.avg_pool2d(
x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))
)
channel_att_raw = self.mlp(avg_pool)
elif pool_type == "max":
max_pool = F.max_pool2d(
x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))
)
channel_att_raw = self.mlp(max_pool)
elif pool_type == "lp":
lp_pool = F.lp_pool2d(
x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))
)
channel_att_raw = self.mlp(lp_pool)
elif pool_type == "lse":
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = torch.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).unsqueeze(1)
x = x.view(b, -1, self.face_classes, h, w)
out = x * scale
return out.view(b, -1, h, w).contiguous() | 0.935553 | 0.696479 |
import unittest
def interleavedp(begins,ends,m=None) :
if( len(begins) != len(ends) ) :
print 'begin-end token number mismatch'
# Should learn to throw...
return False
if m :
if len(m) > len(begins) :
print 'excess else tokens'
return False
ok = True
for i in range(len(begins)-1) :
ok = ok and begins[i] < ends[i] < begins[i+1] < ends[i+1]
if not ok :
print 'begin-end token order mismatch'
return False
if m :
ok = True
for i in range(len(m)-1) :
ok = ok and m[i] < m[i+1]
if not ok :
print 'else tokens out of order'
ok = True
i = 0 ; j = 0 ; notDone = True
if m[i] < begins[j] :
print 'else token before if token'
return False
while j < len(begins) and notDone :
k = 0
while i < len(m) and m[i] < ends[j] :
i = i + 1
k = k + 1
if k > 1 :
print 'too many else tokens'
return False
notDone = i < len(m)
j = j + 1
if i < len(m):
print 'else token after endifs token'
return False
return True
class TestInterleaved(unittest.TestCase):
def test_InOrder(self):
a = [1,3,5,7,9]
b = [2,4,6,8,10]
self.assertEqual(interleavedp(a,b),True)
def test_NumberMismatch(self):
a = [1,3,7,9]
b = [2,4,6,8,10]
self.assertEqual(interleavedp(a,b),False)
def test_OrderMismatch1(self):
a = [1,3,5,7,9]
b = [2,4,6,10,12]
self.assertEqual(interleavedp(a,b),False)
def test_OrderMismatch2(self):
a = [1,3,5,7,9]
b = [4,6,10,12,14]
self.assertEqual(interleavedp(a,b),False)
def test_OrderMismatch3(self):
a = [1,3,5,7,11]
b = [2,4,6,8,10]
self.assertEqual(interleavedp(a,b),False)
def test_ElseMid1(self):
a = [10]
m = [15]
b = [20]
self.assertEqual(interleavedp(a,b,m=m),True)
def test_ElseMid2(self):
a = [10]
m = [15]
b = [15]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid3(self):
a = [10]
m = [15]
b = [15]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid4(self):
a = [10]
m = [5]
b = [20]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid5(self):
a = [10,30]
m = [15]
b = [20,40]
self.assertEqual(interleavedp(a,b,m=m),True)
def test_ElseMid6(self):
a = [10,30]
m = [15,17]
b = [20,40]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid7(self):
a = [10,30]
m = [25,30]
b = [20,40]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid8(self):
a = [10,30]
m = [25,31,35]
b = [20,40]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid9(self):
a = [10,30,50]
m = [25,55,35]
b = [20,40,60]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid10(self):
a = [10,30,50]
m = [15,35,55]
b = [20,40,60]
self.assertEqual(interleavedp(a,b,m=m),True)
if __name__ == '__main__' :
unittest.main() | components/elm/src/external_models/sbetr/3rd-party/pfunit/bin/mods/pre/interleavedp.py |
import unittest
def interleavedp(begins,ends,m=None) :
if( len(begins) != len(ends) ) :
print 'begin-end token number mismatch'
# Should learn to throw...
return False
if m :
if len(m) > len(begins) :
print 'excess else tokens'
return False
ok = True
for i in range(len(begins)-1) :
ok = ok and begins[i] < ends[i] < begins[i+1] < ends[i+1]
if not ok :
print 'begin-end token order mismatch'
return False
if m :
ok = True
for i in range(len(m)-1) :
ok = ok and m[i] < m[i+1]
if not ok :
print 'else tokens out of order'
ok = True
i = 0 ; j = 0 ; notDone = True
if m[i] < begins[j] :
print 'else token before if token'
return False
while j < len(begins) and notDone :
k = 0
while i < len(m) and m[i] < ends[j] :
i = i + 1
k = k + 1
if k > 1 :
print 'too many else tokens'
return False
notDone = i < len(m)
j = j + 1
if i < len(m):
print 'else token after endifs token'
return False
return True
class TestInterleaved(unittest.TestCase):
def test_InOrder(self):
a = [1,3,5,7,9]
b = [2,4,6,8,10]
self.assertEqual(interleavedp(a,b),True)
def test_NumberMismatch(self):
a = [1,3,7,9]
b = [2,4,6,8,10]
self.assertEqual(interleavedp(a,b),False)
def test_OrderMismatch1(self):
a = [1,3,5,7,9]
b = [2,4,6,10,12]
self.assertEqual(interleavedp(a,b),False)
def test_OrderMismatch2(self):
a = [1,3,5,7,9]
b = [4,6,10,12,14]
self.assertEqual(interleavedp(a,b),False)
def test_OrderMismatch3(self):
a = [1,3,5,7,11]
b = [2,4,6,8,10]
self.assertEqual(interleavedp(a,b),False)
def test_ElseMid1(self):
a = [10]
m = [15]
b = [20]
self.assertEqual(interleavedp(a,b,m=m),True)
def test_ElseMid2(self):
a = [10]
m = [15]
b = [15]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid3(self):
a = [10]
m = [15]
b = [15]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid4(self):
a = [10]
m = [5]
b = [20]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid5(self):
a = [10,30]
m = [15]
b = [20,40]
self.assertEqual(interleavedp(a,b,m=m),True)
def test_ElseMid6(self):
a = [10,30]
m = [15,17]
b = [20,40]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid7(self):
a = [10,30]
m = [25,30]
b = [20,40]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid8(self):
a = [10,30]
m = [25,31,35]
b = [20,40]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid9(self):
a = [10,30,50]
m = [25,55,35]
b = [20,40,60]
self.assertEqual(interleavedp(a,b,m=m),False)
def test_ElseMid10(self):
a = [10,30,50]
m = [15,35,55]
b = [20,40,60]
self.assertEqual(interleavedp(a,b,m=m),True)
if __name__ == '__main__' :
unittest.main() | 0.276105 | 0.524395 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
import gym
from easy_rl.agents import agents
from easy_rl.models import DQNModel
from easy_rl.utils.window_stat import WindowStat
from easy_rl.models import EvolutionStrategy
DQN_MODEL_CONFIG = dict(
# specific
type="DQN",
n_step=3,
dueling=False,
double_q=True,
num_atoms=11, # recommend to set 11 to run distributional dqn
v_min=0,
v_max=25,
# common
parameter_noise=False, # set True to use parameter_noise
gamma=0.95,
init_lr=1e-3,
lr_strategy_spec={
'type': 'exponential_decay',
'decay_steps': 1000,
'decay_rate': 0.9
},
global_norm_clip=40)
DQN_AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=4,
buffer_size=50000,
learning_starts=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
batch_size=256,
sync_target_frequency=100,
exploration_timesteps=40000,
perturbation_frequency=40, # recommend to set to 50
noise_kl_episodes=300 # after 300 episodes kl_threshold will decay to 1e-4
)
DDPG_MODEL_CONFIG = dict(
# specific
type="DDPG",
# common
parameter_noise=False, # set True to use parameter_noise
gamma=0.99,
actor_lr_init=1e-2,
actor_lr_strategy_spec={
'type': 'polynomial_decay',
'decay_steps': 10000,
'end_learning_rate': 1e-4
},
critic_lr_init=1e-2,
critic_lr_strategy_spec={
'type': 'polynomial_decay',
'decay_steps': 13000,
'end_learning_rate': 1e-3
},
global_norm_clip=100,
ornstein_uhlenbeck_spec={
"sigma": 0.1,
"theta": 0.3,
"noise_scale": 1.0
},
)
DDPG_AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=8,
buffer_size=50000,
learning_starts=2000,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
batch_size=1024,
sync_target_frequency=200,
perturbation_frequency=50, # recommend to set to 50
noise_kl_episodes=1000 # 1000 episode kl_threshold will decay to 1e-4
)
PPO_MODEL_CONFIG = dict(
# specific
type="PPO",
# common
init_lr=1e-3,
lr_strategy_spec={
'type': 'exponential_decay',
'decay_steps': 100,
'decay_rate': 0.9
},
global_norm_clip=40)
PPO_AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=64,
batch_size=128,
sub_train_batch=64,
train_epochs=2,
# gae
gamma=0.9,
lambda_=0.5,
use_gae=True,
)
ES_MODEL_CONFIG = dict(
# specific
type="ES",
# common
init_lr=0.01,
lr_strategy_spec={
'type': 'exponential_decay',
'decay_steps': 50,
'decay_rate': 0.9
},
global_norm_clip=40)
ES_AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=100,
batch_size=100,
)
class MyESmodel(EvolutionStrategy):
def _encode_obs(self, input_obs, scope="encode_obs"):
with tf.variable_scope(name_or_scope=scope):
h1 = tf.layers.dense(
input_obs,
units=64,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(
mean=0.0, stddev=0.1, seed=0))
h2 = tf.layers.dense(
h1,
units=64,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(
mean=0.0, stddev=0.1, seed=0))
logits = tf.layers.dense(
h2,
units=2,
activation=None,
kernel_initializer=tf.random_normal_initializer(
mean=0.0, stddev=0.1, seed=0))
return logits
class ConvergenceTest(unittest.TestCase):
"""Run commonly used algorithms in single process mode.
Validate their convergence on classic simulators.
"""
def doTestDQN(self):
env = gym.make("CartPole-v0")
env.seed(0)
dqn_g = tf.Graph()
with dqn_g.as_default():
tf.set_random_seed(123)
agent = agents[DQN_AGENT_CONFIG["type"]](
env.observation_space,
env.action_space,
DQN_AGENT_CONFIG,
DQN_MODEL_CONFIG,
distributed_spec={})
reward_window = WindowStat("reward", 25)
obs, actions, rewards, next_obs, dones = list(), list(), list(), list(
), list()
act_count = 0
for i in range(600):
ob = env.reset()
done = False
episode_reward = .0
while not done:
action, results = agent.act(
[ob], deterministic=False, use_perturbed_action=False)
next_ob, reward, done, info = env.step(action[0])
act_count += 1
obs.append(ob)
actions.append(action[0])
rewards.append(reward)
next_obs.append(next_ob)
dones.append(done)
if agent.ready_to_send:
agent.send_experience(
obs=obs,
actions=actions,
rewards=rewards,
next_obs=next_obs,
dones=dones)
if agent.ready_to_receive:
batch_data = agent.receive_experience()
res = agent.learn(batch_data)
if DQN_AGENT_CONFIG.get("prioritized_replay", False):
agent.update_priorities(
indexes=batch_data["indexes"],
td_error=res["td_error"])
ob = next_ob
episode_reward += reward
if act_count % 1024 == 0:
print("timestep:", act_count, reward_window)
agent.add_episode(1)
reward_window.push(episode_reward)
return reward_window.stats()["reward_mean"]
def testDQN(self):
mean_episode_reward = self.doTestDQN()
self.assertTrue(mean_episode_reward >= 190)
def doTestDDPG(self):
np.random.seed(0)
env = gym.make("Pendulum-v0")
env.seed(0)
ddpg_g = tf.Graph()
with ddpg_g.as_default():
tf.set_random_seed(123)
agent = agents[DDPG_AGENT_CONFIG["type"]](
env.observation_space,
env.action_space,
DDPG_AGENT_CONFIG,
DDPG_MODEL_CONFIG,
distributed_spec={})
reward_window = WindowStat("reward", 25)
obs, actions, rewards, next_obs, dones = list(), list(), list(), list(
), list()
act_count = 0
for i in range(200):
ob = env.reset()
done = False
episode_reward = .0
while not done:
action, results = agent.act(
[ob], False, use_perturbed_action=False)
act_count += 1
next_ob, reward, done, info = env.step(action[0])
obs.append(ob)
actions.append(action[0])
rewards.append(0.1 * reward)
next_obs.append(next_ob)
dones.append(done)
if agent.ready_to_send:
agent.send_experience(
obs=obs,
actions=actions,
rewards=rewards,
dones=dones,
next_obs=next_obs)
if agent.ready_to_receive:
batch_data = agent.receive_experience()
res = agent.learn(batch_data)
if DDPG_AGENT_CONFIG.get("prioritized_replay", False):
agent.update_priorities(
indexes=batch_data["indexes"],
td_error=res["td_error"])
ob = next_ob
episode_reward += reward
if act_count % 1024 == 0:
print("timestep:", act_count, reward_window)
agent.add_episode(1)
reward_window.push(episode_reward)
return reward_window.stats()["reward_mean"]
def testDDPG(self):
mean_episode_reward = self.doTestDDPG()
self.assertTrue(mean_episode_reward >= -300)
def doTestPPO(self):
env = gym.make("CartPole-v0")
env.seed(0)
ppo_g = tf.Graph()
with ppo_g.as_default():
tf.set_random_seed(123)
agent = agents[PPO_AGENT_CONFIG["type"]](
env.observation_space,
env.action_space,
PPO_AGENT_CONFIG,
PPO_MODEL_CONFIG,
distributed_spec={})
reward_window = WindowStat("reward", 25)
obs, actions, rewards, next_obs, dones, value_preds, logits = list(
), list(), list(), list(), list(), list(), list()
act_count = 0
for i in range(300):
ob = env.reset()
done = False
episode_reward = .0
while not done:
action, results = agent.act([ob], False)
next_ob, reward, done, info = env.step(action[0])
act_count += 1
obs.append(ob)
actions.append(action[0])
rewards.append(0.1 * reward)
next_obs.append(next_ob)
dones.append(done)
logits.append(results["logits"][0])
value_preds.append(results["value_preds"][0])
if agent.ready_to_send:
agent.send_experience(
obs=obs,
actions=actions,
rewards=rewards,
dones=dones,
next_obs=next_obs,
value_preds=value_preds,
logits=logits)
if agent.ready_to_receive:
batch_data = agent.receive_experience()
res = agent.learn(batch_data)
ob = next_ob
episode_reward += reward
if act_count % 1024 == 0:
print("timestep:", act_count, reward_window)
reward_window.push(episode_reward)
return reward_window.stats()["reward_mean"]
def testPPO(self):
mean_episode_reward = self.doTestPPO()
self.assertTrue(mean_episode_reward >= 190)
def doTestES(self):
np.random.seed(0)
env = gym.make("CartPole-v0")
env.seed(0)
es_g = tf.Graph()
with es_g.as_default():
tf.set_random_seed(123)
agent = agents[ES_AGENT_CONFIG["type"]](
env.observation_space,
env.action_space,
ES_AGENT_CONFIG,
ES_MODEL_CONFIG,
distributed_spec={},
custom_model=MyESmodel)
reward_window = WindowStat("reward", 25)
perturbation_scale = 0.1
seeds, rewards, perturbation_scales = list(), list(), list()
is_positive_direction = list()
episode_per_perturbation = 1
returns = list()
for i in range(5000):
ob = env.reset()
done = False
episode_reward = .0
if i % episode_per_perturbation == 0:
# perturb parameters every `episode_per_seed` episodes
is_positive = True if len(
is_positive_direction
) == 0 else is_positive_direction[-1] != True
# each seed twice
seed = np.random.randint(1000000) if is_positive else seeds[-1]
perturbation_scale = max(perturbation_scale * (1 - i / 2000.0),
0.02)
feed = agent.model.perturbation_feed
fetch = [agent.model.reset_perturbation_op]
agent.executor.run(
fetches=fetch,
feed_dict={
feed['perturbation_seeds']: [seed],
feed['perturbation_scales']: [perturbation_scale],
feed['positive_perturbation']: is_positive
})
if is_positive:
seeds.append(seed)
perturbation_scales.append(perturbation_scale)
is_positive_direction.append(is_positive)
while not done:
action, result = agent.act(
[ob], True, use_perturbed_action=True)
next_ob, reward, done, info = env.step(action[0])
ob = next_ob
episode_reward += reward
rewards.append(episode_reward)
reward_window.push(episode_reward)
if len(rewards) == episode_per_perturbation:
returns.append(np.mean(rewards))
rewards = []
if len(returns) == 2 * agent.config.get(
'sample_batch_size', 100):
print(reward_window)
assert len(seeds) == (len(returns) / 2)
assert len(perturbation_scales) == (len(returns) / 2)
agent.learn(
batch_data=dict(
perturbation_seeds=seeds,
perturbation_scales=perturbation_scales,
returns=np.reshape(returns, [-1, 2])))
seeds = []
perturbation_scales = []
returns = []
is_positive_direction = []
# evaluation 20 episodes
test_rewards = list()
for j in range(10):
done = False
ob = env.reset()
episode_reward = 0
while not done:
action, result = agent.act(
[ob], True, use_perturbed_action=False)
next_ob, reward, done, info = env.step(action[0])
ob = next_ob
episode_reward += reward
test_rewards.append(episode_reward)
print("[evaluation] average reward of 20 episodes:",
np.mean(test_rewards))
print('train at ', i)
return np.mean(test_rewards)
def testES(self):
mean_episode_reward = self.doTestES()
self.assertTrue(mean_episode_reward >= 190)
if __name__ == "__main__":
unittest.main(verbosity=2) | tests/test_convergence.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
import gym
from easy_rl.agents import agents
from easy_rl.models import DQNModel
from easy_rl.utils.window_stat import WindowStat
from easy_rl.models import EvolutionStrategy
DQN_MODEL_CONFIG = dict(
# specific
type="DQN",
n_step=3,
dueling=False,
double_q=True,
num_atoms=11, # recommend to set 11 to run distributional dqn
v_min=0,
v_max=25,
# common
parameter_noise=False, # set True to use parameter_noise
gamma=0.95,
init_lr=1e-3,
lr_strategy_spec={
'type': 'exponential_decay',
'decay_steps': 1000,
'decay_rate': 0.9
},
global_norm_clip=40)
DQN_AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=4,
buffer_size=50000,
learning_starts=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
batch_size=256,
sync_target_frequency=100,
exploration_timesteps=40000,
perturbation_frequency=40, # recommend to set to 50
noise_kl_episodes=300 # after 300 episodes kl_threshold will decay to 1e-4
)
DDPG_MODEL_CONFIG = dict(
# specific
type="DDPG",
# common
parameter_noise=False, # set True to use parameter_noise
gamma=0.99,
actor_lr_init=1e-2,
actor_lr_strategy_spec={
'type': 'polynomial_decay',
'decay_steps': 10000,
'end_learning_rate': 1e-4
},
critic_lr_init=1e-2,
critic_lr_strategy_spec={
'type': 'polynomial_decay',
'decay_steps': 13000,
'end_learning_rate': 1e-3
},
global_norm_clip=100,
ornstein_uhlenbeck_spec={
"sigma": 0.1,
"theta": 0.3,
"noise_scale": 1.0
},
)
DDPG_AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=8,
buffer_size=50000,
learning_starts=2000,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta=0.4,
batch_size=1024,
sync_target_frequency=200,
perturbation_frequency=50, # recommend to set to 50
noise_kl_episodes=1000 # 1000 episode kl_threshold will decay to 1e-4
)
PPO_MODEL_CONFIG = dict(
# specific
type="PPO",
# common
init_lr=1e-3,
lr_strategy_spec={
'type': 'exponential_decay',
'decay_steps': 100,
'decay_rate': 0.9
},
global_norm_clip=40)
PPO_AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=64,
batch_size=128,
sub_train_batch=64,
train_epochs=2,
# gae
gamma=0.9,
lambda_=0.5,
use_gae=True,
)
ES_MODEL_CONFIG = dict(
# specific
type="ES",
# common
init_lr=0.01,
lr_strategy_spec={
'type': 'exponential_decay',
'decay_steps': 50,
'decay_rate': 0.9
},
global_norm_clip=40)
ES_AGENT_CONFIG = dict(
type="Agent",
sample_batch_size=100,
batch_size=100,
)
class MyESmodel(EvolutionStrategy):
def _encode_obs(self, input_obs, scope="encode_obs"):
with tf.variable_scope(name_or_scope=scope):
h1 = tf.layers.dense(
input_obs,
units=64,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(
mean=0.0, stddev=0.1, seed=0))
h2 = tf.layers.dense(
h1,
units=64,
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(
mean=0.0, stddev=0.1, seed=0))
logits = tf.layers.dense(
h2,
units=2,
activation=None,
kernel_initializer=tf.random_normal_initializer(
mean=0.0, stddev=0.1, seed=0))
return logits
class ConvergenceTest(unittest.TestCase):
"""Run commonly used algorithms in single process mode.
Validate their convergence on classic simulators.
"""
def doTestDQN(self):
env = gym.make("CartPole-v0")
env.seed(0)
dqn_g = tf.Graph()
with dqn_g.as_default():
tf.set_random_seed(123)
agent = agents[DQN_AGENT_CONFIG["type"]](
env.observation_space,
env.action_space,
DQN_AGENT_CONFIG,
DQN_MODEL_CONFIG,
distributed_spec={})
reward_window = WindowStat("reward", 25)
obs, actions, rewards, next_obs, dones = list(), list(), list(), list(
), list()
act_count = 0
for i in range(600):
ob = env.reset()
done = False
episode_reward = .0
while not done:
action, results = agent.act(
[ob], deterministic=False, use_perturbed_action=False)
next_ob, reward, done, info = env.step(action[0])
act_count += 1
obs.append(ob)
actions.append(action[0])
rewards.append(reward)
next_obs.append(next_ob)
dones.append(done)
if agent.ready_to_send:
agent.send_experience(
obs=obs,
actions=actions,
rewards=rewards,
next_obs=next_obs,
dones=dones)
if agent.ready_to_receive:
batch_data = agent.receive_experience()
res = agent.learn(batch_data)
if DQN_AGENT_CONFIG.get("prioritized_replay", False):
agent.update_priorities(
indexes=batch_data["indexes"],
td_error=res["td_error"])
ob = next_ob
episode_reward += reward
if act_count % 1024 == 0:
print("timestep:", act_count, reward_window)
agent.add_episode(1)
reward_window.push(episode_reward)
return reward_window.stats()["reward_mean"]
def testDQN(self):
mean_episode_reward = self.doTestDQN()
self.assertTrue(mean_episode_reward >= 190)
def doTestDDPG(self):
np.random.seed(0)
env = gym.make("Pendulum-v0")
env.seed(0)
ddpg_g = tf.Graph()
with ddpg_g.as_default():
tf.set_random_seed(123)
agent = agents[DDPG_AGENT_CONFIG["type"]](
env.observation_space,
env.action_space,
DDPG_AGENT_CONFIG,
DDPG_MODEL_CONFIG,
distributed_spec={})
reward_window = WindowStat("reward", 25)
obs, actions, rewards, next_obs, dones = list(), list(), list(), list(
), list()
act_count = 0
for i in range(200):
ob = env.reset()
done = False
episode_reward = .0
while not done:
action, results = agent.act(
[ob], False, use_perturbed_action=False)
act_count += 1
next_ob, reward, done, info = env.step(action[0])
obs.append(ob)
actions.append(action[0])
rewards.append(0.1 * reward)
next_obs.append(next_ob)
dones.append(done)
if agent.ready_to_send:
agent.send_experience(
obs=obs,
actions=actions,
rewards=rewards,
dones=dones,
next_obs=next_obs)
if agent.ready_to_receive:
batch_data = agent.receive_experience()
res = agent.learn(batch_data)
if DDPG_AGENT_CONFIG.get("prioritized_replay", False):
agent.update_priorities(
indexes=batch_data["indexes"],
td_error=res["td_error"])
ob = next_ob
episode_reward += reward
if act_count % 1024 == 0:
print("timestep:", act_count, reward_window)
agent.add_episode(1)
reward_window.push(episode_reward)
return reward_window.stats()["reward_mean"]
def testDDPG(self):
mean_episode_reward = self.doTestDDPG()
self.assertTrue(mean_episode_reward >= -300)
def doTestPPO(self):
env = gym.make("CartPole-v0")
env.seed(0)
ppo_g = tf.Graph()
with ppo_g.as_default():
tf.set_random_seed(123)
agent = agents[PPO_AGENT_CONFIG["type"]](
env.observation_space,
env.action_space,
PPO_AGENT_CONFIG,
PPO_MODEL_CONFIG,
distributed_spec={})
reward_window = WindowStat("reward", 25)
obs, actions, rewards, next_obs, dones, value_preds, logits = list(
), list(), list(), list(), list(), list(), list()
act_count = 0
for i in range(300):
ob = env.reset()
done = False
episode_reward = .0
while not done:
action, results = agent.act([ob], False)
next_ob, reward, done, info = env.step(action[0])
act_count += 1
obs.append(ob)
actions.append(action[0])
rewards.append(0.1 * reward)
next_obs.append(next_ob)
dones.append(done)
logits.append(results["logits"][0])
value_preds.append(results["value_preds"][0])
if agent.ready_to_send:
agent.send_experience(
obs=obs,
actions=actions,
rewards=rewards,
dones=dones,
next_obs=next_obs,
value_preds=value_preds,
logits=logits)
if agent.ready_to_receive:
batch_data = agent.receive_experience()
res = agent.learn(batch_data)
ob = next_ob
episode_reward += reward
if act_count % 1024 == 0:
print("timestep:", act_count, reward_window)
reward_window.push(episode_reward)
return reward_window.stats()["reward_mean"]
def testPPO(self):
mean_episode_reward = self.doTestPPO()
self.assertTrue(mean_episode_reward >= 190)
def doTestES(self):
np.random.seed(0)
env = gym.make("CartPole-v0")
env.seed(0)
es_g = tf.Graph()
with es_g.as_default():
tf.set_random_seed(123)
agent = agents[ES_AGENT_CONFIG["type"]](
env.observation_space,
env.action_space,
ES_AGENT_CONFIG,
ES_MODEL_CONFIG,
distributed_spec={},
custom_model=MyESmodel)
reward_window = WindowStat("reward", 25)
perturbation_scale = 0.1
seeds, rewards, perturbation_scales = list(), list(), list()
is_positive_direction = list()
episode_per_perturbation = 1
returns = list()
for i in range(5000):
ob = env.reset()
done = False
episode_reward = .0
if i % episode_per_perturbation == 0:
# perturb parameters every `episode_per_seed` episodes
is_positive = True if len(
is_positive_direction
) == 0 else is_positive_direction[-1] != True
# each seed twice
seed = np.random.randint(1000000) if is_positive else seeds[-1]
perturbation_scale = max(perturbation_scale * (1 - i / 2000.0),
0.02)
feed = agent.model.perturbation_feed
fetch = [agent.model.reset_perturbation_op]
agent.executor.run(
fetches=fetch,
feed_dict={
feed['perturbation_seeds']: [seed],
feed['perturbation_scales']: [perturbation_scale],
feed['positive_perturbation']: is_positive
})
if is_positive:
seeds.append(seed)
perturbation_scales.append(perturbation_scale)
is_positive_direction.append(is_positive)
while not done:
action, result = agent.act(
[ob], True, use_perturbed_action=True)
next_ob, reward, done, info = env.step(action[0])
ob = next_ob
episode_reward += reward
rewards.append(episode_reward)
reward_window.push(episode_reward)
if len(rewards) == episode_per_perturbation:
returns.append(np.mean(rewards))
rewards = []
if len(returns) == 2 * agent.config.get(
'sample_batch_size', 100):
print(reward_window)
assert len(seeds) == (len(returns) / 2)
assert len(perturbation_scales) == (len(returns) / 2)
agent.learn(
batch_data=dict(
perturbation_seeds=seeds,
perturbation_scales=perturbation_scales,
returns=np.reshape(returns, [-1, 2])))
seeds = []
perturbation_scales = []
returns = []
is_positive_direction = []
# evaluation 20 episodes
test_rewards = list()
for j in range(10):
done = False
ob = env.reset()
episode_reward = 0
while not done:
action, result = agent.act(
[ob], True, use_perturbed_action=False)
next_ob, reward, done, info = env.step(action[0])
ob = next_ob
episode_reward += reward
test_rewards.append(episode_reward)
print("[evaluation] average reward of 20 episodes:",
np.mean(test_rewards))
print('train at ', i)
return np.mean(test_rewards)
def testES(self):
mean_episode_reward = self.doTestES()
self.assertTrue(mean_episode_reward >= 190)
if __name__ == "__main__":
unittest.main(verbosity=2) | 0.741674 | 0.18462 |
from typing import List, Optional, Union
import pyinflect # noqa: F401
import spacy
from nltk.tokenize.treebank import TreebankWordDetokenizer
from spacy.symbols import AUX, NOUN, PRON, PROPN, VERB, aux, cc, nsubj
from spacy.tokens import Span, Token
from spacy.tokens.doc import Doc
from initialize import spacy_nlp
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
"""
Base Class for implementing the different input transformations a generation should be robust against.
"""
def uncapitalize(string: str):
"""De-capitalize first character of string
E.g. 'How is Michael doing?' -> 'how is Michael doing?'
"""
if len(string):
return string[0].lower() + string[1:]
return ""
def front_auxiliary(auxiliary: Token) -> str:
"""Take auxiliary (type: spacy Token) and return capitalized, expanded
(i.e. un-contracted) auxiliary (type: str). Differentiates certain English
identical English contractions (e.g. "'d", "'s") using morphology data
stored in auxiliary `Token` object.
E.g.:
- <Token 'has'> -> 'Has'
- <Token "'ve"> -> 'Have'
"""
if auxiliary.text == "'d":
if "Part" in auxiliary.head.morph.get("VerbForm"):
return "Had"
else:
return "Would"
elif auxiliary.text == "'s":
if "Past" in auxiliary.head.morph.get("Tense"):
return "Has"
else:
return "Is"
elif auxiliary.text == "'ve":
return "Have"
elif auxiliary.text == "'ll":
return "Will"
else:
return auxiliary.text.capitalize()
def front_be_verb(be_verb: Token) -> str:
"""Take be verb (type: spacy Token), return capitalized, expanded (i.e.
un-contracted) form.
E.g.:
- <Token 'is'> -> 'Is'
- <Token "'re"> -> 'Are'
"""
if be_verb.text == "'s":
return "Is"
elif be_verb.text == "'re":
return "Are"
elif be_verb.text == "'m":
return "Am"
else:
return be_verb.text.capitalize()
class YesNoQuestionPerturbation(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
TaskType.QUESTION_ANSWERING,
TaskType.QUESTION_GENERATION,
]
languages = ["en"]
def __init__(self, seed=0, max_outputs=1):
super().__init__(seed, max_outputs=max_outputs)
self.detokenizer = TreebankWordDetokenizer()
self.nlp = spacy_nlp if spacy_nlp else spacy.load("en_core_web_sm")
def statement_to_question(self, sentence: Span) -> Union[str, None]:
"""Given a statement (type: spacy Span), convert to corresponding
yes-or-no question.
"""
# Look for sentence verb head, starting with first token
verb_head: Token = sentence[0]
while verb_head != verb_head.head:
verb_head = verb_head.head
# Give up on sentence if POS tag doesn't match dependency tag
if verb_head.pos not in {AUX, VERB}:
return None
# If there's a coordinating conjunction, give up
for child in verb_head.children:
if child.dep == cc:
return None
# Look for auxiliary verb
auxiliary: Union[Token, str, None] = None
for child in verb_head.children:
if child.dep == aux:
auxiliary = child
# Give up on sentence if POS tag doesn't match dependency tag
if auxiliary is not None and auxiliary.pos != AUX:
return None
# Look for root token of subject
for child in verb_head.children:
if child.dep == nsubj:
subject_head = child
break
# If there's no root subject, just give up
else:
return None
# Give up on sentence if POS tag doesn't match dependency tag
if subject_head.pos not in {NOUN, PROPN, PRON}:
return None
subject_phrase_tokens = [
t.text_with_ws if t.pos == PROPN else uncapitalize(t.text_with_ws)
for t in subject_head.subtree
]
subject_phrase = "".join(subject_phrase_tokens).strip()
# Get pre-verb adverbs, etc. (expand "n't" to "not"):
all_left_tokens = sentence[: verb_head.i - sentence.start]
head_left_tokens = [
token
for token in all_left_tokens
if token != subject_head
and subject_head not in token.ancestors
and token != auxiliary
and auxiliary not in token.ancestors
]
head_left = "".join(
"not "
if token.text == "n't" and token.head in (verb_head, auxiliary)
else uncapitalize(token.text_with_ws)
for token in head_left_tokens
).strip()
# Get object, adverbs, prep. phrases, etc. (expand "n't" to "not"):
head_index = verb_head.i + 1 - sentence.start
head_right = "".join(
"not "
if token.text == "n't" and token.head in (verb_head, auxiliary)
else token.text_with_ws
for token in sentence[head_index:]
).strip()
# Change last token to "?"
if len(head_right) and head_right[-1] in {".", "!"}:
head_right = head_right[:-1]
head_right += "?"
# Make the question:
# If there is an auxiliary, make q: [AUX] [SUBJ] [LEFT] [VERB] [RIGHT]
if auxiliary is not None:
new_auxiliary = front_auxiliary(auxiliary)
question = self.detokenizer.detokenize(
filter(
len,
[
new_auxiliary,
subject_phrase,
head_left,
verb_head.text,
head_right,
],
)
)
# If it's a be verb, make q: [BE] [SUBJ] [LEFT] [RIGHT]
elif verb_head.lemma == self.nlp.vocab.strings["be"]:
new_be_verb = front_be_verb(verb_head)
question = self.detokenizer.detokenize(
filter(
len, [new_be_verb, subject_phrase, head_left, head_right]
)
)
# All other verbs, make q: [DO] [SUBJ] [LEFT] [VERB] [RIGHT]
else:
morph = verb_head.morph.to_dict()
tense = morph.get("Tense")
if tense == "Past":
auxiliary = "Did"
elif (
morph.get("Person") == "Three"
and morph.get("Number") == "Sing"
):
auxiliary = "Does"
else:
auxiliary = "Do"
infinitive = verb_head._.inflect("VB")
if infinitive is None:
return None
question = self.detokenizer.detokenize(
filter(
len,
[
auxiliary,
subject_phrase,
head_left,
infinitive,
head_right,
],
)
)
return question
def rhetoricalize_question(self, sentence: str):
"""Add appropriate "yes" or "no" to question. Remove "not" for "no"
questions.
E.g.:
- "Did Jenny come home?" -> "Did Jenny come home? Yes."
- "Did Jenny not come home?" -> "Did Jenny come home? No."
"""
doc: Doc = self.nlp(sentence)
# Find verb head
verb_head: Token = doc[0]
while verb_head != verb_head.head:
verb_head = verb_head.head
# Give up on sentence if POS tag doesn't match dependency tag
if verb_head.pos not in {AUX, VERB}:
return None
# Look for negation
not_token: Optional[Token] = None
for token in doc:
if token.text == "not":
not_token = token
# If there is negation, remove it and append a "no"
if not_token is not None:
second_half_index = not_token.i + 1
positive_question_tokens = list(doc[: not_token.i]) + list(
doc[second_half_index:]
)
return (
"".join(t.text_with_ws for t in positive_question_tokens)
+ " No."
)
# Otherwise, append a "yes"
else:
return sentence + " Yes."
def generate(self, sentence: str) -> List[str]:
doc: Doc = self.nlp(sentence)
outputs: List[str] = []
for sentence in doc.sents:
# TODO: Test if sentence is statement or question
question = self.statement_to_question(sentence)
if question is not None:
rhetorical_question = self.rhetoricalize_question(question)
outputs.append(rhetorical_question)
return outputs | transformations/yes_no_question/transformation.py | from typing import List, Optional, Union
import pyinflect # noqa: F401
import spacy
from nltk.tokenize.treebank import TreebankWordDetokenizer
from spacy.symbols import AUX, NOUN, PRON, PROPN, VERB, aux, cc, nsubj
from spacy.tokens import Span, Token
from spacy.tokens.doc import Doc
from initialize import spacy_nlp
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
"""
Base Class for implementing the different input transformations a generation should be robust against.
"""
def uncapitalize(string: str):
"""De-capitalize first character of string
E.g. 'How is Michael doing?' -> 'how is Michael doing?'
"""
if len(string):
return string[0].lower() + string[1:]
return ""
def front_auxiliary(auxiliary: Token) -> str:
"""Take auxiliary (type: spacy Token) and return capitalized, expanded
(i.e. un-contracted) auxiliary (type: str). Differentiates certain English
identical English contractions (e.g. "'d", "'s") using morphology data
stored in auxiliary `Token` object.
E.g.:
- <Token 'has'> -> 'Has'
- <Token "'ve"> -> 'Have'
"""
if auxiliary.text == "'d":
if "Part" in auxiliary.head.morph.get("VerbForm"):
return "Had"
else:
return "Would"
elif auxiliary.text == "'s":
if "Past" in auxiliary.head.morph.get("Tense"):
return "Has"
else:
return "Is"
elif auxiliary.text == "'ve":
return "Have"
elif auxiliary.text == "'ll":
return "Will"
else:
return auxiliary.text.capitalize()
def front_be_verb(be_verb: Token) -> str:
"""Take be verb (type: spacy Token), return capitalized, expanded (i.e.
un-contracted) form.
E.g.:
- <Token 'is'> -> 'Is'
- <Token "'re"> -> 'Are'
"""
if be_verb.text == "'s":
return "Is"
elif be_verb.text == "'re":
return "Are"
elif be_verb.text == "'m":
return "Am"
else:
return be_verb.text.capitalize()
class YesNoQuestionPerturbation(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
TaskType.QUESTION_ANSWERING,
TaskType.QUESTION_GENERATION,
]
languages = ["en"]
def __init__(self, seed=0, max_outputs=1):
super().__init__(seed, max_outputs=max_outputs)
self.detokenizer = TreebankWordDetokenizer()
self.nlp = spacy_nlp if spacy_nlp else spacy.load("en_core_web_sm")
def statement_to_question(self, sentence: Span) -> Union[str, None]:
"""Given a statement (type: spacy Span), convert to corresponding
yes-or-no question.
"""
# Look for sentence verb head, starting with first token
verb_head: Token = sentence[0]
while verb_head != verb_head.head:
verb_head = verb_head.head
# Give up on sentence if POS tag doesn't match dependency tag
if verb_head.pos not in {AUX, VERB}:
return None
# If there's a coordinating conjunction, give up
for child in verb_head.children:
if child.dep == cc:
return None
# Look for auxiliary verb
auxiliary: Union[Token, str, None] = None
for child in verb_head.children:
if child.dep == aux:
auxiliary = child
# Give up on sentence if POS tag doesn't match dependency tag
if auxiliary is not None and auxiliary.pos != AUX:
return None
# Look for root token of subject
for child in verb_head.children:
if child.dep == nsubj:
subject_head = child
break
# If there's no root subject, just give up
else:
return None
# Give up on sentence if POS tag doesn't match dependency tag
if subject_head.pos not in {NOUN, PROPN, PRON}:
return None
subject_phrase_tokens = [
t.text_with_ws if t.pos == PROPN else uncapitalize(t.text_with_ws)
for t in subject_head.subtree
]
subject_phrase = "".join(subject_phrase_tokens).strip()
# Get pre-verb adverbs, etc. (expand "n't" to "not"):
all_left_tokens = sentence[: verb_head.i - sentence.start]
head_left_tokens = [
token
for token in all_left_tokens
if token != subject_head
and subject_head not in token.ancestors
and token != auxiliary
and auxiliary not in token.ancestors
]
head_left = "".join(
"not "
if token.text == "n't" and token.head in (verb_head, auxiliary)
else uncapitalize(token.text_with_ws)
for token in head_left_tokens
).strip()
# Get object, adverbs, prep. phrases, etc. (expand "n't" to "not"):
head_index = verb_head.i + 1 - sentence.start
head_right = "".join(
"not "
if token.text == "n't" and token.head in (verb_head, auxiliary)
else token.text_with_ws
for token in sentence[head_index:]
).strip()
# Change last token to "?"
if len(head_right) and head_right[-1] in {".", "!"}:
head_right = head_right[:-1]
head_right += "?"
# Make the question:
# If there is an auxiliary, make q: [AUX] [SUBJ] [LEFT] [VERB] [RIGHT]
if auxiliary is not None:
new_auxiliary = front_auxiliary(auxiliary)
question = self.detokenizer.detokenize(
filter(
len,
[
new_auxiliary,
subject_phrase,
head_left,
verb_head.text,
head_right,
],
)
)
# If it's a be verb, make q: [BE] [SUBJ] [LEFT] [RIGHT]
elif verb_head.lemma == self.nlp.vocab.strings["be"]:
new_be_verb = front_be_verb(verb_head)
question = self.detokenizer.detokenize(
filter(
len, [new_be_verb, subject_phrase, head_left, head_right]
)
)
# All other verbs, make q: [DO] [SUBJ] [LEFT] [VERB] [RIGHT]
else:
morph = verb_head.morph.to_dict()
tense = morph.get("Tense")
if tense == "Past":
auxiliary = "Did"
elif (
morph.get("Person") == "Three"
and morph.get("Number") == "Sing"
):
auxiliary = "Does"
else:
auxiliary = "Do"
infinitive = verb_head._.inflect("VB")
if infinitive is None:
return None
question = self.detokenizer.detokenize(
filter(
len,
[
auxiliary,
subject_phrase,
head_left,
infinitive,
head_right,
],
)
)
return question
def rhetoricalize_question(self, sentence: str):
"""Add appropriate "yes" or "no" to question. Remove "not" for "no"
questions.
E.g.:
- "Did Jenny come home?" -> "Did Jenny come home? Yes."
- "Did Jenny not come home?" -> "Did Jenny come home? No."
"""
doc: Doc = self.nlp(sentence)
# Find verb head
verb_head: Token = doc[0]
while verb_head != verb_head.head:
verb_head = verb_head.head
# Give up on sentence if POS tag doesn't match dependency tag
if verb_head.pos not in {AUX, VERB}:
return None
# Look for negation
not_token: Optional[Token] = None
for token in doc:
if token.text == "not":
not_token = token
# If there is negation, remove it and append a "no"
if not_token is not None:
second_half_index = not_token.i + 1
positive_question_tokens = list(doc[: not_token.i]) + list(
doc[second_half_index:]
)
return (
"".join(t.text_with_ws for t in positive_question_tokens)
+ " No."
)
# Otherwise, append a "yes"
else:
return sentence + " Yes."
def generate(self, sentence: str) -> List[str]:
doc: Doc = self.nlp(sentence)
outputs: List[str] = []
for sentence in doc.sents:
# TODO: Test if sentence is statement or question
question = self.statement_to_question(sentence)
if question is not None:
rhetorical_question = self.rhetoricalize_question(question)
outputs.append(rhetorical_question)
return outputs | 0.875242 | 0.283949 |
import numpy as np
import matplotlib.pyplot as plt
# importing the numpy & matplot libraries to help manipulate the data, and giving them shorthand names
# Alternately I could import these into iPython while testing. Remember I'm working on a multivariate dataset
data = np.genfromtxt('Data/Iris.csv', delimiter=',')
# importing the iris dataset, as a csv file (syntax found on stack overflow)
col1 = (data[:,0])
col2 = (data[:,1])
col3 = (data[:,2])
col4 = (data[:,3])
# name the columns, so I can call them later if required e.g. for histogram plots
#NEW FUNCTION for column mean (prototyped in my FuncTest.py in the CDs-Sample-Python-Code repository):
def colmean(colno):
meancol = (np.mean(data[:,colno]))
return meancol
print("Column 1 mean is:",'{:0.3f}'.format(colmean(0)))
print("Column 2 mean is:",'{:0.3f}'.format(colmean(1)))
print("Column 3 mean is:",'{:0.3f}'.format(colmean(2)))
print("Column 4 mean is:",'{:0.3f}'.format(colmean(3)))
# YAY! took >2hrs of trial & error to get right, w/help from Ian's "Defining functions" video
# NEW FUNCTIONS for column max, min, std dev:
def colmax(colno):
maxcol = (np.max(data[:,colno]))
return maxcol
print("Column 1 max is:",'{:0.1f}'.format(colmax(0)))
print("Column 2 max is:",'{:0.1f}'.format(colmax(1)))
print("Column 3 max is:",'{:0.1f}'.format(colmax(2)))
print("Column 4 max is:",'{:0.1f}'.format(colmax(3)))
def colmin(colno):
mincol = (np.min(data[:,colno]))
return mincol
print("Column 1 min is:",'{:0.1f}'.format(colmin(0)))
print("Column 2 min is:",'{:0.1f}'.format(colmin(1)))
print("Column 3 min is:",'{:0.1f}'.format(colmin(2)))
print("Column 4 min is:",'{:0.1f}'.format(colmin(3)))
def colstd(colno):
stdcol = (np.std(data[:,colno]))
return stdcol
print("Column 1 std dev is:",'{:0.3f}'.format(colstd(0)))
print("Column 2 std dev is:",'{:0.3f}'.format(colstd(1)))
print("Column 3 std dev is:",'{:0.3f}'.format(colstd(2)))
print("Column 4 std dev is:",'{:0.3f}'.format(colstd(3)))
# Now lets split out the 3 varieties in the dataset, for closer analysis ...
print("the value at row 3, column 2 is:",data[2,1])
# checking that I can call a value from a specific cell. Syntax is row,column
# now I want to call a range of rows in each column ...
# NEW generic functions for Mean, Max, Min of each Variety:
def colmeanS(colno):
meancolS = (np.mean(data[0:49,colno]))
return meancolS
print("(C1) Setosa S.L. mean is:",'{:0.3f}'.format(colmeanS(0)))
print("(C2) Setosa S.W. mean is:",'{:0.3f}'.format(colmeanS(1)))
print("(C3) Setosa P.L. mean is:",'{:0.3f}'.format(colmeanS(2)))
print("(C4) Setosa P.W. mean is:",'{:0.3f}'.format(colmeanS(3)))
# displays the mean for each Column in the Setosa sample
def colmaxS(colno):
maxcolS = (np.max(data[0:49,colno]))
return maxcolS
print("(C1) Setosa S.L. max is:",'{:0.3f}'.format(colmaxS(0)))
print("(C2) Setosa S.W. max is:",'{:0.3f}'.format(colmaxS(1)))
print("(C3) Setosa P.L. max is:",'{:0.3f}'.format(colmaxS(2)))
print("(C4) Setosa P.W. max is:",'{:0.3f}'.format(colmaxS(3)))
# displays the max for each Column in the Setosa sample
def colminS(colno):
mincolS = (np.min(data[0:49,colno]))
return mincolS
print("(C1) Setosa S.L. min is:",'{:0.3f}'.format(colminS(0)))
print("(C2) Setosa S.W. min is:",'{:0.3f}'.format(colminS(1)))
print("(C3) Setosa P.L. min is:",'{:0.3f}'.format(colminS(2)))
print("(C4) Setosa P.W. min is:",'{:0.3f}'.format(colminS(3)))
# displays the min for each Column in the Setosa sample
def colmeanVr(colno):
meancolVr = (np.mean(data[50:99,colno]))
return meancolVr
print("(C1) Versicolor S.L. mean is:",'{:0.3f}'.format(colmeanVr(0)))
print("(C2) Versicolor S.W. mean is:",'{:0.3f}'.format(colmeanVr(1)))
print("(C3) Versicolor P.L. mean is:",'{:0.3f}'.format(colmeanVr(2)))
print("(C4) Versicolor P.W. mean is:",'{:0.3f}'.format(colmeanVr(3)))
# displays the mean for each Column in the Versicolor sample
def colmaxVr(colno):
maxcolVr = (np.max(data[50:99,colno]))
return maxcolVr
print("(C1) Versicolor S.L. max is:",'{:0.3f}'.format(colmaxVr(0)))
print("(C2) Versicolor S.W. max is:",'{:0.3f}'.format(colmaxVr(1)))
print("(C3) Versicolor P.L. max is:",'{:0.3f}'.format(colmaxVr(2)))
print("(C4) Versicolor P.W. max is:",'{:0.3f}'.format(colmaxVr(3)))
# displays the max for each Column in the Versicolor sample
def colminVr(colno):
mincolVr = (np.min(data[50:99,colno]))
return mincolVr
print("(C1) Versicolor S.L. min is:",'{:0.3f}'.format(colminVr(0)))
print("(C2) Versicolor S.W. min is:",'{:0.3f}'.format(colminVr(1)))
print("(C3) Versicolor P.L. min is:",'{:0.3f}'.format(colminVr(2)))
print("(C4) Versicolor P.W. min is:",'{:0.3f}'.format(colminVr(3)))
# displays the min for each Column in the Versicolor sample
def colmeanVg(colno):
meancolVg = (np.mean(data[100:149,colno]))
return meancolVg
print("(C1) Virginica S.L. mean is:",'{:0.3f}'.format(colmeanVg(0)))
print("(C2) Virginica S.W. mean is:",'{:0.3f}'.format(colmeanVg(1)))
print("(C3) Virginica P.L. mean is:",'{:0.3f}'.format(colmeanVg(2)))
print("(C4) Virginica P.W. mean is:",'{:0.3f}'.format(colmeanVg(3)))
# displays the mean for each Column in the Virginica sample
def colmaxVg(colno):
maxcolVg = (np.max(data[100:149,colno]))
return maxcolVg
print("(C1) Virginica S.L. max is:",'{:0.3f}'.format(colmaxVg(0)))
print("(C2) Virginica S.W. max is:",'{:0.3f}'.format(colmaxVg(1)))
print("(C3) Virginica P.L. max is:",'{:0.3f}'.format(colmaxVg(2)))
print("(C4) Virginica P.W. max is:",'{:0.3f}'.format(colmaxVg(3)))
# displays the max for each Column in the Virginica sample
def colminVg(colno):
mincolVg = (np.min(data[100:149,colno]))
return mincolVg
print("(C1) Virginica S.L. min is:",'{:0.3f}'.format(colminVg(0)))
print("(C2) Virginica S.W. min is:",'{:0.3f}'.format(colminVg(1)))
print("(C3) Virginica P.L. min is:",'{:0.3f}'.format(colminVg(2)))
print("(C4) Virginica P.W. min is:",'{:0.3f}'.format(colminVg(3)))
# displays the min for each Column in the Virginica sample
# Std Deviations for Petal Length on 3 varieties(column3):
col3Setstd = (np.std(data[0:49,2]))
print("Petal Length Setosa std is:",'{:0.3f}'.format(col3Setstd))
col3Varstd = (np.std(data[50:99,2]))
print("Petal Length Versicolor std is:",'{:0.3f}'.format(col3Varstd))
col3Virgstd = (np.std(data[100:149,2]))
print("Petal Length Virginica std is:",'{:0.3f}'.format(col3Virgstd))
# Selecting data to plot Histograms:
plt.hist(col2)
plt.title('Histogram of Sepal Widths')
plt.xlabel('Sepal Width (cm)')
plt.ylabel('Frequency')
plt.show()
plt.hist(col3)
plt.title('Histogram of Petal Lengths')
plt.xlabel('Petal Length (cm)')
plt.ylabel('Frequency')
plt.show()
# learnt labeling cmds at https://matplotlib.org/gallery/pyplots/pyplot_text.html#sphx-glr-gallery-pyplots-pyplot-text-py
# also useful ref (though code used is R): http://www.lac.inpe.br/~rafael.santos/Docs/R/CAP386/IntroEDA-Iris.html
# Add the remaining two variables to plot Histograms:
plt.hist(col1)
plt.title('Histogram of Sepal Lengths')
plt.xlabel('Sepal Length (cm)')
plt.ylabel('Frequency')
plt.show()
plt.hist(col4)
plt.title('Histogram of Petal Widths')
plt.xlabel('Petal Width (cm)')
plt.ylabel('Frequency')
plt.show()
# now to plot the Petal Lengths by Variety:
col31 = (data[0:49,2])
col32 = (data[50:99,2])
col33 = (data[100:149,2])
plt.hist(col31)
plt.title('Petal Lengths of Setosa variety')
plt.xlabel('Petal Length (cm)')
plt.ylabel('Frequency')
plt.show()
plt.hist(col32)
plt.title('Petal Lengths of Versicolor variety')
plt.xlabel('Petal Length (cm)')
plt.ylabel('Frequency')
plt.show()
plt.hist(col33)
plt.title('Petal Lengths of Virginica variety')
plt.xlabel('Petal Length (cm)')
plt.ylabel('Frequency')
plt.show()
# method for Histogram 3-way layout found on Stack overflow:
# https://stackoverflow.com/questions/24319505/how-can-one-display-images-side-by-side-in-a-github-readme-md
# FOOTNOTES
# which data is found where?
# | SL | SW | PL | PW |
# -—————————————————————————————————————
# Setosa | 0 - 49 |
# Versicolor | 50 - 99 |
# Virginica | 100 - 149 |
# ——————————————————————————————————————-
# Listen. Strange women lying in ponds distributing swords is no basis for a system of government. | NumpyData.py |
import numpy as np
import matplotlib.pyplot as plt
# importing the numpy & matplot libraries to help manipulate the data, and giving them shorthand names
# Alternately I could import these into iPython while testing. Remember I'm working on a multivariate dataset
data = np.genfromtxt('Data/Iris.csv', delimiter=',')
# importing the iris dataset, as a csv file (syntax found on stack overflow)
col1 = (data[:,0])
col2 = (data[:,1])
col3 = (data[:,2])
col4 = (data[:,3])
# name the columns, so I can call them later if required e.g. for histogram plots
#NEW FUNCTION for column mean (prototyped in my FuncTest.py in the CDs-Sample-Python-Code repository):
def colmean(colno):
meancol = (np.mean(data[:,colno]))
return meancol
print("Column 1 mean is:",'{:0.3f}'.format(colmean(0)))
print("Column 2 mean is:",'{:0.3f}'.format(colmean(1)))
print("Column 3 mean is:",'{:0.3f}'.format(colmean(2)))
print("Column 4 mean is:",'{:0.3f}'.format(colmean(3)))
# YAY! took >2hrs of trial & error to get right, w/help from Ian's "Defining functions" video
# NEW FUNCTIONS for column max, min, std dev:
def colmax(colno):
maxcol = (np.max(data[:,colno]))
return maxcol
print("Column 1 max is:",'{:0.1f}'.format(colmax(0)))
print("Column 2 max is:",'{:0.1f}'.format(colmax(1)))
print("Column 3 max is:",'{:0.1f}'.format(colmax(2)))
print("Column 4 max is:",'{:0.1f}'.format(colmax(3)))
def colmin(colno):
mincol = (np.min(data[:,colno]))
return mincol
print("Column 1 min is:",'{:0.1f}'.format(colmin(0)))
print("Column 2 min is:",'{:0.1f}'.format(colmin(1)))
print("Column 3 min is:",'{:0.1f}'.format(colmin(2)))
print("Column 4 min is:",'{:0.1f}'.format(colmin(3)))
def colstd(colno):
stdcol = (np.std(data[:,colno]))
return stdcol
print("Column 1 std dev is:",'{:0.3f}'.format(colstd(0)))
print("Column 2 std dev is:",'{:0.3f}'.format(colstd(1)))
print("Column 3 std dev is:",'{:0.3f}'.format(colstd(2)))
print("Column 4 std dev is:",'{:0.3f}'.format(colstd(3)))
# Now lets split out the 3 varieties in the dataset, for closer analysis ...
print("the value at row 3, column 2 is:",data[2,1])
# checking that I can call a value from a specific cell. Syntax is row,column
# now I want to call a range of rows in each column ...
# NEW generic functions for Mean, Max, Min of each Variety:
def colmeanS(colno):
meancolS = (np.mean(data[0:49,colno]))
return meancolS
print("(C1) Setosa S.L. mean is:",'{:0.3f}'.format(colmeanS(0)))
print("(C2) Setosa S.W. mean is:",'{:0.3f}'.format(colmeanS(1)))
print("(C3) Setosa P.L. mean is:",'{:0.3f}'.format(colmeanS(2)))
print("(C4) Setosa P.W. mean is:",'{:0.3f}'.format(colmeanS(3)))
# displays the mean for each Column in the Setosa sample
def colmaxS(colno):
maxcolS = (np.max(data[0:49,colno]))
return maxcolS
print("(C1) Setosa S.L. max is:",'{:0.3f}'.format(colmaxS(0)))
print("(C2) Setosa S.W. max is:",'{:0.3f}'.format(colmaxS(1)))
print("(C3) Setosa P.L. max is:",'{:0.3f}'.format(colmaxS(2)))
print("(C4) Setosa P.W. max is:",'{:0.3f}'.format(colmaxS(3)))
# displays the max for each Column in the Setosa sample
def colminS(colno):
mincolS = (np.min(data[0:49,colno]))
return mincolS
print("(C1) Setosa S.L. min is:",'{:0.3f}'.format(colminS(0)))
print("(C2) Setosa S.W. min is:",'{:0.3f}'.format(colminS(1)))
print("(C3) Setosa P.L. min is:",'{:0.3f}'.format(colminS(2)))
print("(C4) Setosa P.W. min is:",'{:0.3f}'.format(colminS(3)))
# displays the min for each Column in the Setosa sample
def colmeanVr(colno):
meancolVr = (np.mean(data[50:99,colno]))
return meancolVr
print("(C1) Versicolor S.L. mean is:",'{:0.3f}'.format(colmeanVr(0)))
print("(C2) Versicolor S.W. mean is:",'{:0.3f}'.format(colmeanVr(1)))
print("(C3) Versicolor P.L. mean is:",'{:0.3f}'.format(colmeanVr(2)))
print("(C4) Versicolor P.W. mean is:",'{:0.3f}'.format(colmeanVr(3)))
# displays the mean for each Column in the Versicolor sample
def colmaxVr(colno):
maxcolVr = (np.max(data[50:99,colno]))
return maxcolVr
print("(C1) Versicolor S.L. max is:",'{:0.3f}'.format(colmaxVr(0)))
print("(C2) Versicolor S.W. max is:",'{:0.3f}'.format(colmaxVr(1)))
print("(C3) Versicolor P.L. max is:",'{:0.3f}'.format(colmaxVr(2)))
print("(C4) Versicolor P.W. max is:",'{:0.3f}'.format(colmaxVr(3)))
# displays the max for each Column in the Versicolor sample
def colminVr(colno):
mincolVr = (np.min(data[50:99,colno]))
return mincolVr
print("(C1) Versicolor S.L. min is:",'{:0.3f}'.format(colminVr(0)))
print("(C2) Versicolor S.W. min is:",'{:0.3f}'.format(colminVr(1)))
print("(C3) Versicolor P.L. min is:",'{:0.3f}'.format(colminVr(2)))
print("(C4) Versicolor P.W. min is:",'{:0.3f}'.format(colminVr(3)))
# displays the min for each Column in the Versicolor sample
def colmeanVg(colno):
meancolVg = (np.mean(data[100:149,colno]))
return meancolVg
print("(C1) Virginica S.L. mean is:",'{:0.3f}'.format(colmeanVg(0)))
print("(C2) Virginica S.W. mean is:",'{:0.3f}'.format(colmeanVg(1)))
print("(C3) Virginica P.L. mean is:",'{:0.3f}'.format(colmeanVg(2)))
print("(C4) Virginica P.W. mean is:",'{:0.3f}'.format(colmeanVg(3)))
# displays the mean for each Column in the Virginica sample
def colmaxVg(colno):
maxcolVg = (np.max(data[100:149,colno]))
return maxcolVg
print("(C1) Virginica S.L. max is:",'{:0.3f}'.format(colmaxVg(0)))
print("(C2) Virginica S.W. max is:",'{:0.3f}'.format(colmaxVg(1)))
print("(C3) Virginica P.L. max is:",'{:0.3f}'.format(colmaxVg(2)))
print("(C4) Virginica P.W. max is:",'{:0.3f}'.format(colmaxVg(3)))
# displays the max for each Column in the Virginica sample
def colminVg(colno):
mincolVg = (np.min(data[100:149,colno]))
return mincolVg
print("(C1) Virginica S.L. min is:",'{:0.3f}'.format(colminVg(0)))
print("(C2) Virginica S.W. min is:",'{:0.3f}'.format(colminVg(1)))
print("(C3) Virginica P.L. min is:",'{:0.3f}'.format(colminVg(2)))
print("(C4) Virginica P.W. min is:",'{:0.3f}'.format(colminVg(3)))
# displays the min for each Column in the Virginica sample
# Std Deviations for Petal Length on 3 varieties(column3):
col3Setstd = (np.std(data[0:49,2]))
print("Petal Length Setosa std is:",'{:0.3f}'.format(col3Setstd))
col3Varstd = (np.std(data[50:99,2]))
print("Petal Length Versicolor std is:",'{:0.3f}'.format(col3Varstd))
col3Virgstd = (np.std(data[100:149,2]))
print("Petal Length Virginica std is:",'{:0.3f}'.format(col3Virgstd))
# Selecting data to plot Histograms:
plt.hist(col2)
plt.title('Histogram of Sepal Widths')
plt.xlabel('Sepal Width (cm)')
plt.ylabel('Frequency')
plt.show()
plt.hist(col3)
plt.title('Histogram of Petal Lengths')
plt.xlabel('Petal Length (cm)')
plt.ylabel('Frequency')
plt.show()
# learnt labeling cmds at https://matplotlib.org/gallery/pyplots/pyplot_text.html#sphx-glr-gallery-pyplots-pyplot-text-py
# also useful ref (though code used is R): http://www.lac.inpe.br/~rafael.santos/Docs/R/CAP386/IntroEDA-Iris.html
# Add the remaining two variables to plot Histograms:
plt.hist(col1)
plt.title('Histogram of Sepal Lengths')
plt.xlabel('Sepal Length (cm)')
plt.ylabel('Frequency')
plt.show()
plt.hist(col4)
plt.title('Histogram of Petal Widths')
plt.xlabel('Petal Width (cm)')
plt.ylabel('Frequency')
plt.show()
# now to plot the Petal Lengths by Variety:
col31 = (data[0:49,2])
col32 = (data[50:99,2])
col33 = (data[100:149,2])
plt.hist(col31)
plt.title('Petal Lengths of Setosa variety')
plt.xlabel('Petal Length (cm)')
plt.ylabel('Frequency')
plt.show()
plt.hist(col32)
plt.title('Petal Lengths of Versicolor variety')
plt.xlabel('Petal Length (cm)')
plt.ylabel('Frequency')
plt.show()
plt.hist(col33)
plt.title('Petal Lengths of Virginica variety')
plt.xlabel('Petal Length (cm)')
plt.ylabel('Frequency')
plt.show()
# method for Histogram 3-way layout found on Stack overflow:
# https://stackoverflow.com/questions/24319505/how-can-one-display-images-side-by-side-in-a-github-readme-md
# FOOTNOTES
# which data is found where?
# | SL | SW | PL | PW |
# -—————————————————————————————————————
# Setosa | 0 - 49 |
# Versicolor | 50 - 99 |
# Virginica | 100 - 149 |
# ——————————————————————————————————————-
# Listen. Strange women lying in ponds distributing swords is no basis for a system of government. | 0.410402 | 0.614857 |
from pubnub import utils
from pubnub.endpoints.endpoint import Endpoint
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.exceptions import PubNubException
from pubnub.models.consumer.message_count import PNMessageCountResult
class MessageCount(Endpoint):
MESSAGE_COUNT_PATH = '/v3/history/sub-key/%s/message-counts/%s'
def __init__(self, pubnub):
Endpoint.__init__(self, pubnub)
self._channel = []
self._channels_timetoken = []
def channel(self, channel):
utils.extend_list(self._channel, channel)
return self
def channel_timetokens(self, timetokens):
timetokens = [str(item) for item in timetokens]
utils.extend_list(self._channels_timetoken, timetokens)
return self
def custom_params(self):
params = {}
if len(self._channels_timetoken) > 0:
if len(self._channels_timetoken) > 1:
params['channelsTimetoken'] = utils.join_items(self._channels_timetoken)
else:
params['timetoken'] = self._channels_timetoken[0]
return params
def build_path(self):
return MessageCount.MESSAGE_COUNT_PATH % (
self.pubnub.config.subscribe_key,
utils.join_channels(self._channel)
)
def http_method(self):
return HttpMethod.GET
def is_auth_required(self):
return True
def validate_params(self):
self.validate_subscribe_key()
self.validate_channel()
if len(self._channels_timetoken) != len(self._channel):
raise PubNubException('The number of channels and the number of timetokens do not match.')
def create_response(self, result): # pylint: disable=W0221
return PNMessageCountResult(result)
def request_timeout(self):
return self.pubnub.config.non_subscribe_request_timeout
def connect_timeout(self):
return self.pubnub.config.connect_timeout
def operation_type(self):
return PNOperationType.PNMessageCountOperation
def name(self):
return "Message Count" | pubnub/endpoints/message_count.py | from pubnub import utils
from pubnub.endpoints.endpoint import Endpoint
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.exceptions import PubNubException
from pubnub.models.consumer.message_count import PNMessageCountResult
class MessageCount(Endpoint):
MESSAGE_COUNT_PATH = '/v3/history/sub-key/%s/message-counts/%s'
def __init__(self, pubnub):
Endpoint.__init__(self, pubnub)
self._channel = []
self._channels_timetoken = []
def channel(self, channel):
utils.extend_list(self._channel, channel)
return self
def channel_timetokens(self, timetokens):
timetokens = [str(item) for item in timetokens]
utils.extend_list(self._channels_timetoken, timetokens)
return self
def custom_params(self):
params = {}
if len(self._channels_timetoken) > 0:
if len(self._channels_timetoken) > 1:
params['channelsTimetoken'] = utils.join_items(self._channels_timetoken)
else:
params['timetoken'] = self._channels_timetoken[0]
return params
def build_path(self):
return MessageCount.MESSAGE_COUNT_PATH % (
self.pubnub.config.subscribe_key,
utils.join_channels(self._channel)
)
def http_method(self):
return HttpMethod.GET
def is_auth_required(self):
return True
def validate_params(self):
self.validate_subscribe_key()
self.validate_channel()
if len(self._channels_timetoken) != len(self._channel):
raise PubNubException('The number of channels and the number of timetokens do not match.')
def create_response(self, result): # pylint: disable=W0221
return PNMessageCountResult(result)
def request_timeout(self):
return self.pubnub.config.non_subscribe_request_timeout
def connect_timeout(self):
return self.pubnub.config.connect_timeout
def operation_type(self):
return PNOperationType.PNMessageCountOperation
def name(self):
return "Message Count" | 0.622574 | 0.077343 |
import torch
from torch import nn
from torch.nn import functional as F
from torch_geometric.nn import MessagePassing, global_mean_pool
from torch_geometric.utils import degree, dense_to_sparse
from torch_geometric.nn import ECConv
from torch_scatter import scatter_add
def _make_block_diag(mats, mat_sizes):
block_diag = torch.zeros(sum(mat_sizes), sum(mat_sizes))
for i, (mat, size) in enumerate(zip(mats, mat_sizes)):
cum_size = sum(mat_sizes[:i])
block_diag[cum_size:cum_size+size,cum_size:cum_size+size] = mat
return block_diag
class ECCLayer(nn.Module):
def __init__(self, dim_input, dim_embedding, dropout=0.):
super().__init__()
fnet1 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_input))
fnet2 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
fnet3 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
self.conv1 = ECConv(dim_input, dim_embedding, nn=fnet1)
self.conv2 = ECConv(dim_embedding, dim_embedding, nn=fnet2)
self.conv3 = ECConv(dim_embedding, dim_embedding, nn=fnet3)
self.bn1 = nn.BatchNorm1d(dim_embedding)
self.bn2 = nn.BatchNorm1d(dim_embedding)
self.bn3 = nn.BatchNorm1d(dim_embedding)
self.dropout = dropout
def forward(self, x, edge_index, edge_attr):
edge_attr = edge_attr.unsqueeze(-1) if edge_attr.dim() == 1 else edge_attr
x = F.relu(self.conv1(x, edge_index, edge_attr))
x = F.dropout(self.bn1(x), p=self.dropout, training=self.training)
x = F.relu(self.conv2(x, edge_index, edge_attr))
x = F.dropout(self.bn2(x), p=self.dropout, training=self.training)
x = F.relu(self.conv3(x, edge_index, edge_attr))
x = F.dropout(self.bn3(x), p=self.dropout, training=self.training)
return x
class ECC(nn.Module):
"""
Uses fixed architecture.
IMPORTANT NOTE: we will consider dataset which do not have edge labels.
Therefore, we avoid learning the function that associates a weight matrix
to an edge specific weight.
"""
def __init__(self, dim_features, dim_target, model_configs, dataset_configs):
super().__init__()
self.model_configs = model_configs
self.dropout = model_configs['dropout']
self.dropout_final = model_configs['dropout_final']
self.num_layers = model_configs['num_layers']
dim_embedding = model_configs['dim_embedding']
self.layers = nn.ModuleList([])
for i in range(self.num_layers):
dim_input = dim_features if i == 0 else dim_embedding
layer = ECCLayer(dim_input, dim_embedding, dropout=self.dropout)
self.layers.append(layer)
fnet = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
self.final_conv = ECConv(dim_embedding, dim_embedding, nn=fnet)
self.final_conv_bn = nn.BatchNorm1d(dim_embedding)
self.fc1 = nn.Linear(dim_embedding, dim_embedding)
self.fc2 = nn.Linear(dim_embedding, dim_target)
self.task_type = dataset_configs["task_type"]
self.multiclass_num_classes = dataset_configs["multiclass_num_classes"] if self.task_type == 'Multi-Classification' else None
self.classification = self.task_type == 'Classification'
if self.classification:
self.sigmoid = nn.Sigmoid()
self.multiclass = self.task_type == 'Multi-Classification'
if self.multiclass:
self.multiclass_softmax = nn.Softmax(dim=2)
self.regression = self.task_type == 'Regression'
if self.regression:
self.relu = nn.ReLU()
assert not (self.classification and self.regression and self.multiclass)
def make_block_diag(self, matrix_list):
mat_sizes = [m.size(0) for m in matrix_list]
return _make_block_diag(matrix_list, mat_sizes)
def get_ecc_conv_parameters(self, data, layer_no):
v_plus_list, laplacians = data.v_plus, data.laplacians
# print([v_plus[layer_no] for v_plus in v_plus_list])
v_plus_batch = torch.cat([v_plus[layer_no] for v_plus in v_plus_list], dim=0)
laplacian_layer_list = [laplacians[i][layer_no] for i in range(len(laplacians))]
laplacian_block_diagonal = self.make_block_diag(laplacian_layer_list)
# First layer
lap_edge_idx, lap_edge_weights = dense_to_sparse(laplacian_block_diagonal)
lap_edge_weights = lap_edge_weights.squeeze(-1)
# Convert v_plus_batch to boolean
return lap_edge_idx, lap_edge_weights, (v_plus_batch == 1)
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x.requires_grad = True
self.conv_acts = []
self.conv_grads = []
self.edge_grads = []
for i, layer in enumerate(self.layers):
# TODO should lap_edge_index[0] be equal to edge_idx?
lap_edge_idx, lap_edge_weights, v_plus_batch = self.get_ecc_conv_parameters(data, layer_no=i)
edge_index = lap_edge_idx if i != 0 else edge_index
edge_weight = lap_edge_weights if i != 0 else x.new_ones((edge_index.size(1), ))
edge_index = edge_index.to(self.model_configs["device"])
edge_weight = edge_weight.to(self.model_configs["device"])
edge_weight.requires_grad = True
# apply convolutional layer
with torch.enable_grad():
x = layer(x, edge_index, edge_weight)
x.register_hook(self.activations_hook)
self.conv_acts.append(x)
edge_weight.register_hook(self.edge_attrs_hook)
# pooling
x = x[v_plus_batch]
batch = batch[v_plus_batch]
# final_convolution
lap_edge_idx, lap_edge_weight, v_plus_batch = self.get_ecc_conv_parameters(data, layer_no=self.num_layers)
lap_edge_idx = lap_edge_idx.to(self.model_configs["device"])
lap_edge_weight = lap_edge_weight.to(self.model_configs["device"])
lap_edge_weight.requires_grad = True
x = F.relu(self.final_conv(x, lap_edge_idx, lap_edge_weight.unsqueeze(-1)))
x = F.dropout(self.final_conv_bn(x), p=self.dropout, training=self.training)
lap_edge_weight.register_hook(self.edge_attrs_hook)
self.lap_edge_weight = lap_edge_weight
# TODO: is the following line needed before global pooling?
# batch = batch[v_plus_batch]
graph_emb = global_mean_pool(x, batch)
x = F.relu(self.fc1(graph_emb))
x = F.dropout(x, p=self.dropout_final, training=self.training)
# No ReLU specified here todo check with source code (code is not so clear)
x = self.fc2(x)
# Don't apply sigmoid during training b/c using BCEWithLogitsLoss
if self.classification and not self.training:
x = self.sigmoid(x)
if self.multiclass:
x = x.reshape((x.size(0), -1, self.multiclass_num_classes)) # batch size x num targets x num classes per target
if not self.training:
x = self.multiclass_softmax(x) # to get probabilities during evaluation, but not during training as we're using CrossEntropyLoss
return x
def get_gap_activations(self, data):
output = self.forward(data)
output.backward()
return self.conv_acts[-1], None
def get_prediction_weights(self):
w = self.fc2.weight.t()
return w[:, 0]
def get_intermediate_activations_gradients(self, data):
output = self.forward(data)
output.backward()
conv_grads = [conv_g.grad for conv_g in self.conv_grads]
return self.conv_acts, self.conv_grads
def activations_hook(self, grad):
self.conv_grads.append(grad)
def edge_attrs_hook(self, grad):
self.edge_grads.append(grad)
def get_gradients(self, data):
data.x.requires_grad_()
data.x.retain_grad()
output = self.forward(data)
output.backward()
atom_grads = data.x.grad
edge_grads_list = [edge_g.grad for edge_g in self.edge_grads]
edge_grads = edge_grads_list[-1]
return data.x, atom_grads, self.lap_edge_weight, edge_grads | MolRep/Models/graph_based/ECC.py | import torch
from torch import nn
from torch.nn import functional as F
from torch_geometric.nn import MessagePassing, global_mean_pool
from torch_geometric.utils import degree, dense_to_sparse
from torch_geometric.nn import ECConv
from torch_scatter import scatter_add
def _make_block_diag(mats, mat_sizes):
block_diag = torch.zeros(sum(mat_sizes), sum(mat_sizes))
for i, (mat, size) in enumerate(zip(mats, mat_sizes)):
cum_size = sum(mat_sizes[:i])
block_diag[cum_size:cum_size+size,cum_size:cum_size+size] = mat
return block_diag
class ECCLayer(nn.Module):
def __init__(self, dim_input, dim_embedding, dropout=0.):
super().__init__()
fnet1 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_input))
fnet2 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
fnet3 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
self.conv1 = ECConv(dim_input, dim_embedding, nn=fnet1)
self.conv2 = ECConv(dim_embedding, dim_embedding, nn=fnet2)
self.conv3 = ECConv(dim_embedding, dim_embedding, nn=fnet3)
self.bn1 = nn.BatchNorm1d(dim_embedding)
self.bn2 = nn.BatchNorm1d(dim_embedding)
self.bn3 = nn.BatchNorm1d(dim_embedding)
self.dropout = dropout
def forward(self, x, edge_index, edge_attr):
edge_attr = edge_attr.unsqueeze(-1) if edge_attr.dim() == 1 else edge_attr
x = F.relu(self.conv1(x, edge_index, edge_attr))
x = F.dropout(self.bn1(x), p=self.dropout, training=self.training)
x = F.relu(self.conv2(x, edge_index, edge_attr))
x = F.dropout(self.bn2(x), p=self.dropout, training=self.training)
x = F.relu(self.conv3(x, edge_index, edge_attr))
x = F.dropout(self.bn3(x), p=self.dropout, training=self.training)
return x
class ECC(nn.Module):
"""
Uses fixed architecture.
IMPORTANT NOTE: we will consider dataset which do not have edge labels.
Therefore, we avoid learning the function that associates a weight matrix
to an edge specific weight.
"""
def __init__(self, dim_features, dim_target, model_configs, dataset_configs):
super().__init__()
self.model_configs = model_configs
self.dropout = model_configs['dropout']
self.dropout_final = model_configs['dropout_final']
self.num_layers = model_configs['num_layers']
dim_embedding = model_configs['dim_embedding']
self.layers = nn.ModuleList([])
for i in range(self.num_layers):
dim_input = dim_features if i == 0 else dim_embedding
layer = ECCLayer(dim_input, dim_embedding, dropout=self.dropout)
self.layers.append(layer)
fnet = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
self.final_conv = ECConv(dim_embedding, dim_embedding, nn=fnet)
self.final_conv_bn = nn.BatchNorm1d(dim_embedding)
self.fc1 = nn.Linear(dim_embedding, dim_embedding)
self.fc2 = nn.Linear(dim_embedding, dim_target)
self.task_type = dataset_configs["task_type"]
self.multiclass_num_classes = dataset_configs["multiclass_num_classes"] if self.task_type == 'Multi-Classification' else None
self.classification = self.task_type == 'Classification'
if self.classification:
self.sigmoid = nn.Sigmoid()
self.multiclass = self.task_type == 'Multi-Classification'
if self.multiclass:
self.multiclass_softmax = nn.Softmax(dim=2)
self.regression = self.task_type == 'Regression'
if self.regression:
self.relu = nn.ReLU()
assert not (self.classification and self.regression and self.multiclass)
def make_block_diag(self, matrix_list):
mat_sizes = [m.size(0) for m in matrix_list]
return _make_block_diag(matrix_list, mat_sizes)
def get_ecc_conv_parameters(self, data, layer_no):
v_plus_list, laplacians = data.v_plus, data.laplacians
# print([v_plus[layer_no] for v_plus in v_plus_list])
v_plus_batch = torch.cat([v_plus[layer_no] for v_plus in v_plus_list], dim=0)
laplacian_layer_list = [laplacians[i][layer_no] for i in range(len(laplacians))]
laplacian_block_diagonal = self.make_block_diag(laplacian_layer_list)
# First layer
lap_edge_idx, lap_edge_weights = dense_to_sparse(laplacian_block_diagonal)
lap_edge_weights = lap_edge_weights.squeeze(-1)
# Convert v_plus_batch to boolean
return lap_edge_idx, lap_edge_weights, (v_plus_batch == 1)
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x.requires_grad = True
self.conv_acts = []
self.conv_grads = []
self.edge_grads = []
for i, layer in enumerate(self.layers):
# TODO should lap_edge_index[0] be equal to edge_idx?
lap_edge_idx, lap_edge_weights, v_plus_batch = self.get_ecc_conv_parameters(data, layer_no=i)
edge_index = lap_edge_idx if i != 0 else edge_index
edge_weight = lap_edge_weights if i != 0 else x.new_ones((edge_index.size(1), ))
edge_index = edge_index.to(self.model_configs["device"])
edge_weight = edge_weight.to(self.model_configs["device"])
edge_weight.requires_grad = True
# apply convolutional layer
with torch.enable_grad():
x = layer(x, edge_index, edge_weight)
x.register_hook(self.activations_hook)
self.conv_acts.append(x)
edge_weight.register_hook(self.edge_attrs_hook)
# pooling
x = x[v_plus_batch]
batch = batch[v_plus_batch]
# final_convolution
lap_edge_idx, lap_edge_weight, v_plus_batch = self.get_ecc_conv_parameters(data, layer_no=self.num_layers)
lap_edge_idx = lap_edge_idx.to(self.model_configs["device"])
lap_edge_weight = lap_edge_weight.to(self.model_configs["device"])
lap_edge_weight.requires_grad = True
x = F.relu(self.final_conv(x, lap_edge_idx, lap_edge_weight.unsqueeze(-1)))
x = F.dropout(self.final_conv_bn(x), p=self.dropout, training=self.training)
lap_edge_weight.register_hook(self.edge_attrs_hook)
self.lap_edge_weight = lap_edge_weight
# TODO: is the following line needed before global pooling?
# batch = batch[v_plus_batch]
graph_emb = global_mean_pool(x, batch)
x = F.relu(self.fc1(graph_emb))
x = F.dropout(x, p=self.dropout_final, training=self.training)
# No ReLU specified here todo check with source code (code is not so clear)
x = self.fc2(x)
# Don't apply sigmoid during training b/c using BCEWithLogitsLoss
if self.classification and not self.training:
x = self.sigmoid(x)
if self.multiclass:
x = x.reshape((x.size(0), -1, self.multiclass_num_classes)) # batch size x num targets x num classes per target
if not self.training:
x = self.multiclass_softmax(x) # to get probabilities during evaluation, but not during training as we're using CrossEntropyLoss
return x
def get_gap_activations(self, data):
output = self.forward(data)
output.backward()
return self.conv_acts[-1], None
def get_prediction_weights(self):
w = self.fc2.weight.t()
return w[:, 0]
def get_intermediate_activations_gradients(self, data):
output = self.forward(data)
output.backward()
conv_grads = [conv_g.grad for conv_g in self.conv_grads]
return self.conv_acts, self.conv_grads
def activations_hook(self, grad):
self.conv_grads.append(grad)
def edge_attrs_hook(self, grad):
self.edge_grads.append(grad)
def get_gradients(self, data):
data.x.requires_grad_()
data.x.retain_grad()
output = self.forward(data)
output.backward()
atom_grads = data.x.grad
edge_grads_list = [edge_g.grad for edge_g in self.edge_grads]
edge_grads = edge_grads_list[-1]
return data.x, atom_grads, self.lap_edge_weight, edge_grads | 0.866175 | 0.559771 |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from ibc.core.channel.v1 import (
genesis_pb2 as ibc_dot_core_dot_channel_dot_v1_dot_genesis__pb2,
)
from ibc.core.client.v1 import (
genesis_pb2 as ibc_dot_core_dot_client_dot_v1_dot_genesis__pb2,
)
from ibc.core.connection.v1 import (
genesis_pb2 as ibc_dot_core_dot_connection_dot_v1_dot_genesis__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="ibc/core/types/v1/genesis.proto",
package="ibc.core.types.v1",
syntax="proto3",
serialized_options=b"Z+github.com/cosmos/ibc-go/modules/core/types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1fibc/core/types/v1/genesis.proto\x12\x11ibc.core.types.v1\x1a\x14gogoproto/gogo.proto\x1a ibc/core/client/v1/genesis.proto\x1a$ibc/core/connection/v1/genesis.proto\x1a!ibc/core/channel/v1/genesis.proto"\xa8\x02\n\x0cGenesisState\x12W\n\x0e\x63lient_genesis\x18\x01 \x01(\x0b\x32 .ibc.core.client.v1.GenesisStateB\x1d\xc8\xde\x1f\x00\xf2\xde\x1f\x15yaml:"client_genesis"\x12\x63\n\x12\x63onnection_genesis\x18\x02 \x01(\x0b\x32$.ibc.core.connection.v1.GenesisStateB!\xc8\xde\x1f\x00\xf2\xde\x1f\x19yaml:"connection_genesis"\x12Z\n\x0f\x63hannel_genesis\x18\x03 \x01(\x0b\x32!.ibc.core.channel.v1.GenesisStateB\x1e\xc8\xde\x1f\x00\xf2\xde\x1f\x16yaml:"channel_genesis"B-Z+github.com/cosmos/ibc-go/modules/core/typesb\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
ibc_dot_core_dot_client_dot_v1_dot_genesis__pb2.DESCRIPTOR,
ibc_dot_core_dot_connection_dot_v1_dot_genesis__pb2.DESCRIPTOR,
ibc_dot_core_dot_channel_dot_v1_dot_genesis__pb2.DESCRIPTOR,
],
)
_GENESISSTATE = _descriptor.Descriptor(
name="GenesisState",
full_name="ibc.core.types.v1.GenesisState",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="client_genesis",
full_name="ibc.core.types.v1.GenesisState.client_genesis",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\310\336\037\000\362\336\037\025yaml:"client_genesis"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="connection_genesis",
full_name="ibc.core.types.v1.GenesisState.connection_genesis",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\310\336\037\000\362\336\037\031yaml:"connection_genesis"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="channel_genesis",
full_name="ibc.core.types.v1.GenesisState.channel_genesis",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\310\336\037\000\362\336\037\026yaml:"channel_genesis"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=184,
serialized_end=480,
)
_GENESISSTATE.fields_by_name[
"client_genesis"
].message_type = ibc_dot_core_dot_client_dot_v1_dot_genesis__pb2._GENESISSTATE
_GENESISSTATE.fields_by_name[
"connection_genesis"
].message_type = ibc_dot_core_dot_connection_dot_v1_dot_genesis__pb2._GENESISSTATE
_GENESISSTATE.fields_by_name[
"channel_genesis"
].message_type = ibc_dot_core_dot_channel_dot_v1_dot_genesis__pb2._GENESISSTATE
DESCRIPTOR.message_types_by_name["GenesisState"] = _GENESISSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenesisState = _reflection.GeneratedProtocolMessageType(
"GenesisState",
(_message.Message,),
{
"DESCRIPTOR": _GENESISSTATE,
"__module__": "ibc.core.types.v1.genesis_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.types.v1.GenesisState)
},
)
_sym_db.RegisterMessage(GenesisState)
DESCRIPTOR._options = None
_GENESISSTATE.fields_by_name["client_genesis"]._options = None
_GENESISSTATE.fields_by_name["connection_genesis"]._options = None
_GENESISSTATE.fields_by_name["channel_genesis"]._options = None
# @@protoc_insertion_point(module_scope) | terra_sdk/protobuf/ibc/core/types/v1/genesis_pb2.py | """Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from ibc.core.channel.v1 import (
genesis_pb2 as ibc_dot_core_dot_channel_dot_v1_dot_genesis__pb2,
)
from ibc.core.client.v1 import (
genesis_pb2 as ibc_dot_core_dot_client_dot_v1_dot_genesis__pb2,
)
from ibc.core.connection.v1 import (
genesis_pb2 as ibc_dot_core_dot_connection_dot_v1_dot_genesis__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="ibc/core/types/v1/genesis.proto",
package="ibc.core.types.v1",
syntax="proto3",
serialized_options=b"Z+github.com/cosmos/ibc-go/modules/core/types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1fibc/core/types/v1/genesis.proto\x12\x11ibc.core.types.v1\x1a\x14gogoproto/gogo.proto\x1a ibc/core/client/v1/genesis.proto\x1a$ibc/core/connection/v1/genesis.proto\x1a!ibc/core/channel/v1/genesis.proto"\xa8\x02\n\x0cGenesisState\x12W\n\x0e\x63lient_genesis\x18\x01 \x01(\x0b\x32 .ibc.core.client.v1.GenesisStateB\x1d\xc8\xde\x1f\x00\xf2\xde\x1f\x15yaml:"client_genesis"\x12\x63\n\x12\x63onnection_genesis\x18\x02 \x01(\x0b\x32$.ibc.core.connection.v1.GenesisStateB!\xc8\xde\x1f\x00\xf2\xde\x1f\x19yaml:"connection_genesis"\x12Z\n\x0f\x63hannel_genesis\x18\x03 \x01(\x0b\x32!.ibc.core.channel.v1.GenesisStateB\x1e\xc8\xde\x1f\x00\xf2\xde\x1f\x16yaml:"channel_genesis"B-Z+github.com/cosmos/ibc-go/modules/core/typesb\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
ibc_dot_core_dot_client_dot_v1_dot_genesis__pb2.DESCRIPTOR,
ibc_dot_core_dot_connection_dot_v1_dot_genesis__pb2.DESCRIPTOR,
ibc_dot_core_dot_channel_dot_v1_dot_genesis__pb2.DESCRIPTOR,
],
)
_GENESISSTATE = _descriptor.Descriptor(
name="GenesisState",
full_name="ibc.core.types.v1.GenesisState",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="client_genesis",
full_name="ibc.core.types.v1.GenesisState.client_genesis",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\310\336\037\000\362\336\037\025yaml:"client_genesis"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="connection_genesis",
full_name="ibc.core.types.v1.GenesisState.connection_genesis",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\310\336\037\000\362\336\037\031yaml:"connection_genesis"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="channel_genesis",
full_name="ibc.core.types.v1.GenesisState.channel_genesis",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\310\336\037\000\362\336\037\026yaml:"channel_genesis"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=184,
serialized_end=480,
)
_GENESISSTATE.fields_by_name[
"client_genesis"
].message_type = ibc_dot_core_dot_client_dot_v1_dot_genesis__pb2._GENESISSTATE
_GENESISSTATE.fields_by_name[
"connection_genesis"
].message_type = ibc_dot_core_dot_connection_dot_v1_dot_genesis__pb2._GENESISSTATE
_GENESISSTATE.fields_by_name[
"channel_genesis"
].message_type = ibc_dot_core_dot_channel_dot_v1_dot_genesis__pb2._GENESISSTATE
DESCRIPTOR.message_types_by_name["GenesisState"] = _GENESISSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenesisState = _reflection.GeneratedProtocolMessageType(
"GenesisState",
(_message.Message,),
{
"DESCRIPTOR": _GENESISSTATE,
"__module__": "ibc.core.types.v1.genesis_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.types.v1.GenesisState)
},
)
_sym_db.RegisterMessage(GenesisState)
DESCRIPTOR._options = None
_GENESISSTATE.fields_by_name["client_genesis"]._options = None
_GENESISSTATE.fields_by_name["connection_genesis"]._options = None
_GENESISSTATE.fields_by_name["channel_genesis"]._options = None
# @@protoc_insertion_point(module_scope) | 0.329715 | 0.070336 |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
__license__ = 'Apache 2.0'
import wx
import massoc
from massoc.scripts.main import resource_path
from wx.lib.pubsub import pub
from massoc.GUI.intro import IntroPanel
from massoc.GUI.input import InputPanel
from massoc.GUI.process import ProcessPanel
from massoc.GUI.network import NetworkPanel
from massoc.GUI.database import DataPanel
from massoc.GUI.analysis import AnalysisPanel
import multiprocessing
# source: https://stackoverflow.com/questions/4004353/logging-strategy-for-gui-program
general_settings = {"biom_file": None,
"otu_table": None,
"tax_table": None,
"sample_data": None,
"otu_meta": None,
"cluster": None,
"split": None,
"prev": 20,
"fp": None,
"levels": None,
"tools": None,
"spiec": None,
"conet": None,
"conet_bash": None,
"spiec_settings": None,
"spar": None,
"spar_pval": None,
"spar_boot": None,
"nclust": None,
"name": None,
"cores": None,
"rar": None,
"min": None,
"network": None,
"assoc": None,
"agglom": None,
"logic": None,
"agglom_weight": None,
"export": None,
"neo4j": None,
"procbioms": None,
"address": "bolt://localhost:7687",
"username": "neo4j",
"password": "<PASSWORD>",
"variable": None,
"weight": None,
"networks": None,
"output": None,
"add": None}
class BuildFrame(wx.Frame):
"""Constructor"""
def __init__(self):
wx.Frame.__init__(self, None, title='massoc', size=(800, 700))
ico = wx.Icon(resource_path("massoc.png"), wx.BITMAP_TYPE_PNG)
self.SetIcon(ico)
p = wx.Panel(self)
self.nb = wx.Notebook(p)
self.tab1 = IntroPanel(self.nb)
self.tab2 = InputPanel(self.nb)
self.tab3 = ProcessPanel(self.nb)
self.tab4 = NetworkPanel(self.nb)
self.tab5 = DataPanel(self.nb)
self.tab6 = AnalysisPanel(self.nb)
self.nb.AddPage(self.tab1, "Start")
self.nb.AddPage(self.tab2, "Input files")
self.nb.AddPage(self.tab3, "Preprocessing")
self.nb.AddPage(self.tab4, "Network inference")
self.nb.AddPage(self.tab5, "Network database")
self.nb.AddPage(self.tab6, "Network analysis")
self.settings = general_settings
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
p.SetSizer(sizer)
# listens to help messages from uncoupled tab files
self.CreateStatusBar()
pub.subscribe(self.change_statusbar, 'change_statusbar')
self.Show()
pub.subscribe(self.format_settings, 'input_settings')
pub.subscribe(self.format_settings, 'process_settings')
pub.subscribe(self.format_settings, 'network_settings')
pub.subscribe(self.format_settings, 'data_settings')
pub.subscribe(self.format_settings, 'analysis_settings')
pub.subscribe(self.load_settings, 'load_settings')
def format_settings(self, msg):
"""
Listener function for settings from tabs in notebook.
"""
try:
for key in msg:
self.settings[key] = msg[key]
except:
pass
pub.sendMessage('show_settings', msg=self.settings)
def load_settings(self, msg):
try:
for key in msg:
self.settings[key] = msg[key]
except:
pass
pub.sendMessage('show_settings', msg=self.settings)
def change_statusbar(self, msg):
self.SetStatusText(msg)
if __name__ == "__main__":
multiprocessing.freeze_support()
app = wx.App(False)
frame = BuildFrame()
app.MainLoop() | massocGUI.py | __author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
__license__ = 'Apache 2.0'
import wx
import massoc
from massoc.scripts.main import resource_path
from wx.lib.pubsub import pub
from massoc.GUI.intro import IntroPanel
from massoc.GUI.input import InputPanel
from massoc.GUI.process import ProcessPanel
from massoc.GUI.network import NetworkPanel
from massoc.GUI.database import DataPanel
from massoc.GUI.analysis import AnalysisPanel
import multiprocessing
# source: https://stackoverflow.com/questions/4004353/logging-strategy-for-gui-program
general_settings = {"biom_file": None,
"otu_table": None,
"tax_table": None,
"sample_data": None,
"otu_meta": None,
"cluster": None,
"split": None,
"prev": 20,
"fp": None,
"levels": None,
"tools": None,
"spiec": None,
"conet": None,
"conet_bash": None,
"spiec_settings": None,
"spar": None,
"spar_pval": None,
"spar_boot": None,
"nclust": None,
"name": None,
"cores": None,
"rar": None,
"min": None,
"network": None,
"assoc": None,
"agglom": None,
"logic": None,
"agglom_weight": None,
"export": None,
"neo4j": None,
"procbioms": None,
"address": "bolt://localhost:7687",
"username": "neo4j",
"password": "<PASSWORD>",
"variable": None,
"weight": None,
"networks": None,
"output": None,
"add": None}
class BuildFrame(wx.Frame):
"""Constructor"""
def __init__(self):
wx.Frame.__init__(self, None, title='massoc', size=(800, 700))
ico = wx.Icon(resource_path("massoc.png"), wx.BITMAP_TYPE_PNG)
self.SetIcon(ico)
p = wx.Panel(self)
self.nb = wx.Notebook(p)
self.tab1 = IntroPanel(self.nb)
self.tab2 = InputPanel(self.nb)
self.tab3 = ProcessPanel(self.nb)
self.tab4 = NetworkPanel(self.nb)
self.tab5 = DataPanel(self.nb)
self.tab6 = AnalysisPanel(self.nb)
self.nb.AddPage(self.tab1, "Start")
self.nb.AddPage(self.tab2, "Input files")
self.nb.AddPage(self.tab3, "Preprocessing")
self.nb.AddPage(self.tab4, "Network inference")
self.nb.AddPage(self.tab5, "Network database")
self.nb.AddPage(self.tab6, "Network analysis")
self.settings = general_settings
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
p.SetSizer(sizer)
# listens to help messages from uncoupled tab files
self.CreateStatusBar()
pub.subscribe(self.change_statusbar, 'change_statusbar')
self.Show()
pub.subscribe(self.format_settings, 'input_settings')
pub.subscribe(self.format_settings, 'process_settings')
pub.subscribe(self.format_settings, 'network_settings')
pub.subscribe(self.format_settings, 'data_settings')
pub.subscribe(self.format_settings, 'analysis_settings')
pub.subscribe(self.load_settings, 'load_settings')
def format_settings(self, msg):
"""
Listener function for settings from tabs in notebook.
"""
try:
for key in msg:
self.settings[key] = msg[key]
except:
pass
pub.sendMessage('show_settings', msg=self.settings)
def load_settings(self, msg):
try:
for key in msg:
self.settings[key] = msg[key]
except:
pass
pub.sendMessage('show_settings', msg=self.settings)
def change_statusbar(self, msg):
self.SetStatusText(msg)
if __name__ == "__main__":
multiprocessing.freeze_support()
app = wx.App(False)
frame = BuildFrame()
app.MainLoop() | 0.422505 | 0.168309 |
from typing import Dict
from python import DOCUMENT_ID, TOPIC_ID
from python.handwritten_baseline.pipeline.data.base import Dataset, BaselineDataProcessorStage
class DataReducerStage(BaselineDataProcessorStage):
def __init__(self, pos, config, config_global, logger):
super(DataReducerStage, self).__init__(pos, config, config_global, logger)
self._num_topics = config.get("num_topics", None)
self._num_docs_per_topic = config.get("num_docs_per_topic", None)
def _process_dataset(self,
dataset: Dataset,
live_objects: Dict) -> Dataset:
docs = dataset.documents
# select subset of topics
if self._num_topics is not None:
actual_num_topics = len(docs.index.unique(TOPIC_ID))
if self._num_topics > actual_num_topics:
raise ValueError(
f"This dataset only has {actual_num_topics} topics, but you asked for a subset of {self._num_topics} topics.")
topics_to_use = docs.index.unique(TOPIC_ID).to_series().sample(self._num_topics, random_state=0).values
selected_docs = docs.loc[docs.index.get_level_values(TOPIC_ID).isin(topics_to_use)]
else:
selected_docs = docs
# select subset of documents per topic
if self._num_docs_per_topic is not None:
selected_docs = selected_docs.groupby(TOPIC_ID, as_index=False).apply(
lambda df: df.sample(min(len(df), self._num_docs_per_topic), random_state=0))
selected_docs.index = selected_docs.index.droplevel(0)
selected_docs.sort_index(inplace=True)
self.logger.warning(f"Number of documents limited to {len(selected_docs)}!")
dataset.documents = selected_docs
selected_doc_ids = dataset.documents[DOCUMENT_ID]
dataset.tokens = dataset.tokens.loc[dataset.tokens.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
dataset.mentions_action = dataset.mentions_action.loc[
dataset.mentions_action.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
if dataset.mentions_time is not None:
dataset.mentions_time = dataset.mentions_time.loc[
dataset.mentions_time.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
if dataset.mentions_location is not None:
dataset.mentions_location = dataset.mentions_location.loc[
dataset.mentions_location.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
if dataset.mentions_participants is not None:
dataset.mentions_participants = dataset.mentions_participants.loc[
dataset.mentions_participants.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
if dataset.mentions_other is not None:
dataset.mentions_other = dataset.mentions_other.loc[
dataset.mentions_other.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
return dataset
component = DataReducerStage | python/handwritten_baseline/pipeline/data/processing/reducer.py | from typing import Dict
from python import DOCUMENT_ID, TOPIC_ID
from python.handwritten_baseline.pipeline.data.base import Dataset, BaselineDataProcessorStage
class DataReducerStage(BaselineDataProcessorStage):
def __init__(self, pos, config, config_global, logger):
super(DataReducerStage, self).__init__(pos, config, config_global, logger)
self._num_topics = config.get("num_topics", None)
self._num_docs_per_topic = config.get("num_docs_per_topic", None)
def _process_dataset(self,
dataset: Dataset,
live_objects: Dict) -> Dataset:
docs = dataset.documents
# select subset of topics
if self._num_topics is not None:
actual_num_topics = len(docs.index.unique(TOPIC_ID))
if self._num_topics > actual_num_topics:
raise ValueError(
f"This dataset only has {actual_num_topics} topics, but you asked for a subset of {self._num_topics} topics.")
topics_to_use = docs.index.unique(TOPIC_ID).to_series().sample(self._num_topics, random_state=0).values
selected_docs = docs.loc[docs.index.get_level_values(TOPIC_ID).isin(topics_to_use)]
else:
selected_docs = docs
# select subset of documents per topic
if self._num_docs_per_topic is not None:
selected_docs = selected_docs.groupby(TOPIC_ID, as_index=False).apply(
lambda df: df.sample(min(len(df), self._num_docs_per_topic), random_state=0))
selected_docs.index = selected_docs.index.droplevel(0)
selected_docs.sort_index(inplace=True)
self.logger.warning(f"Number of documents limited to {len(selected_docs)}!")
dataset.documents = selected_docs
selected_doc_ids = dataset.documents[DOCUMENT_ID]
dataset.tokens = dataset.tokens.loc[dataset.tokens.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
dataset.mentions_action = dataset.mentions_action.loc[
dataset.mentions_action.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
if dataset.mentions_time is not None:
dataset.mentions_time = dataset.mentions_time.loc[
dataset.mentions_time.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
if dataset.mentions_location is not None:
dataset.mentions_location = dataset.mentions_location.loc[
dataset.mentions_location.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
if dataset.mentions_participants is not None:
dataset.mentions_participants = dataset.mentions_participants.loc[
dataset.mentions_participants.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
if dataset.mentions_other is not None:
dataset.mentions_other = dataset.mentions_other.loc[
dataset.mentions_other.index.get_level_values(DOCUMENT_ID).isin(selected_doc_ids)]
return dataset
component = DataReducerStage | 0.661923 | 0.480844 |
from .models import MovieNightEvent, Movie, UserAttendence, LocationPermission
from rest_framework import serializers
from django.utils import timezone
from django.contrib.auth.models import User
from .utils import badgify
import pytz
def strfdelta(tdelta, fmt):
d = {"days": abs(tdelta.days)}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
class MovieNightEventSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
date = serializers.DateTimeField(format="%B %d, %Y, %I:%M %p")
date_delta = serializers.SerializerMethodField()
movies = serializers.SerializerMethodField()
vote_enabled = serializers.SerializerMethodField()
status = serializers.SerializerMethodField()
reg_users = serializers.SerializerMethodField()
winning_movie = serializers.SerializerMethodField()
rawdate = serializers.SerializerMethodField()
def get_rawdate(self, MovieNight):
return MovieNight.date
def get_reg_users(self, MovieNight):
return MovieNight.get_num_registered()
def get_movies(self, MovieNight):
return ', '.join([str(movie.title) for movie in MovieNight.MovieList.all()])
def get_status(self, MovieNight):
return MovieNight.get_status()
def get_vote_enabled(self, MovieNight):
return MovieNight.voting_enabled()
def get_winning_movie(self, MovieNight):
try:
winning_movie, _, _ = MovieNight.get_winning_movie()
return '{} ({})'.format(winning_movie.title, winning_movie.year)
except:
return "?"
def get_date_delta(self, MovieNight):
date = MovieNight.date
now = timezone.now()
timedelta = date - now
timedelta_secs = int(timedelta.total_seconds())
# localize to boston TZ
boston_tz = pytz.timezone("America/New_York")
fmt = "%B %d, %Y, %I:%M %p %Z%z"
date_boston_time = date.astimezone(boston_tz)
if timedelta_secs > 0:
return date_boston_time.strftime(fmt) + " (" + strfdelta(timedelta, "In {days}d, {hours}hrs, {minutes}min") + ")"
else:
timedelta = now - date
return date_boston_time.strftime(fmt) + " (" + strfdelta(timedelta, "{days}d, {hours}hrs, {minutes}min ago") + ")"
class Meta:
model = MovieNightEvent
fields = (
'id', 'motto', 'date', "movies", "isdraft", "movies", "date_delta", "vote_enabled", "status", "reg_users", 'winning_movie', "rawdate"
)
class ProfileSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
join_date = serializers.SerializerMethodField()
is_invitor = serializers.SerializerMethodField()
invitation_key = serializers.SerializerMethodField()
firstlastname = serializers.SerializerMethodField()
def get_firstlastname(self, Profile):
return Profile.user.first_name + " " + Profile.user.last_name
def get_invitation_key(self, Profile):
return Profile.invitation_key
def get_is_invitor(self, Profile):
return Profile.is_invitor
def get_join_date(self, Profile):
return Profile.user.date_joined
class Meta:
model = UserAttendence
fields = (
'id', 'firstlastname', 'is_invitor', "invitation_key", 'join_date'
)
class LocationPermissionSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
location = serializers.SerializerMethodField()
username = serializers.SerializerMethodField()
firstlastname = serializers.SerializerMethodField()
role = serializers.SerializerMethodField()
invitation_key = serializers.SerializerMethodField()
join_date = serializers.SerializerMethodField()
user_id = serializers.SerializerMethodField()
has_access = serializers.SerializerMethodField()
def get_has_access(self, LocationPermission):
return (LocationPermission.revoked_access == False)
def get_location(self, LocationPermission):
return LocationPermission.location.name
def get_username(self, LocationPermission):
return LocationPermission.user.username
def get_firstlastname(self, LocationPermission):
return LocationPermission.user.first_name + " " + LocationPermission.user.last_name
def get_role(self, LocationPermission):
return LocationPermission.get_role_display()
def get_invitation_key(self, LocationPermission):
return LocationPermission.get_invite_code()
def get_join_date(self, LocationPermission):
return LocationPermission.user.date_joined
def get_user_id(self, LocationPermission):
return LocationPermission.user.id
class Meta:
model = LocationPermission
fields = (
'id', 'location', 'username', "firstlastname", 'role', 'invitation_key', 'join_date', 'user_id', 'has_access'
)
class RestrictedLocationPermissionSerializer(serializers.ModelSerializer):
# This is called by Ambassadors and does not return invitation keys
id = serializers.IntegerField(read_only=True)
location = serializers.SerializerMethodField()
username = serializers.SerializerMethodField()
firstlastname = serializers.SerializerMethodField()
role = serializers.SerializerMethodField()
join_date = serializers.SerializerMethodField()
user_id = serializers.SerializerMethodField()
has_access = serializers.SerializerMethodField()
revoke_access_hash = serializers.SerializerMethodField()
def get_revoke_access_hash(self, LocationPermission):
if LocationPermission.can_invite():
return "<button type='button' class='btn btn-secondary btn-sm' data-toggle='modal' data-target='#no_change_modal'>N/A</button>"
elif not LocationPermission.revoked_access:
return "<a class='btn btn-danger btn-sm' href='/toggle_access_invite/" + LocationPermission.rev_access_hash + "' role='button'>Revoke Access</a>"
else:
return "<a class='btn btn-success btn-sm' href='/toggle_access_invite/" + LocationPermission.rev_access_hash + "' role='button'>Grant Access</a>"
def get_has_access(self, LocationPermission):
return (LocationPermission.revoked_access == False)
def get_location(self, LocationPermission):
return LocationPermission.location.name
def get_username(self, LocationPermission):
return LocationPermission.user.username
def get_firstlastname(self, LocationPermission):
return LocationPermission.user.first_name + " " + LocationPermission.user.last_name
def get_role(self, LocationPermission):
return LocationPermission.get_role_display()
def get_join_date(self, LocationPermission):
return LocationPermission.user.date_joined
def get_user_id(self, LocationPermission):
return LocationPermission.user.id
class Meta:
model = LocationPermission
fields = (
'revoke_access_hash', 'id', 'location', 'username', "firstlastname", 'role', 'join_date', 'user_id', 'has_access'
)
class UserAttendenceSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
user = serializers.SerializerMethodField()
toppings = serializers.SerializerMethodField()
reg_date = serializers.SerializerMethodField()
firstlastname = serializers.SerializerMethodField()
def get_firstlastname(self, UserAttendence):
return UserAttendence.user.first_name + " " + UserAttendence.user.last_name
def get_reg_date(self, UserAttendence):
date = UserAttendence.registered_at
boston_tz = pytz.timezone("America/New_York")
fmt = "%B %d, %Y, %I:%M %p %Z%z"
date_boston_time = date.astimezone(boston_tz).strftime(fmt)
return date_boston_time
def get_user(self, UserAttendence):
return UserAttendence.user.username
def get_toppings(self, UserAttendence):
return ' '.join([badgify(o.topping.topping, 'primary') for o in UserAttendence.get_toppings()])
class Meta:
model = UserAttendence
fields = (
'id', 'user', 'toppings', 'reg_date', "registration_complete", "movienight", 'firstlastname'
) | userhandling/serializers.py | from .models import MovieNightEvent, Movie, UserAttendence, LocationPermission
from rest_framework import serializers
from django.utils import timezone
from django.contrib.auth.models import User
from .utils import badgify
import pytz
def strfdelta(tdelta, fmt):
d = {"days": abs(tdelta.days)}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
class MovieNightEventSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
date = serializers.DateTimeField(format="%B %d, %Y, %I:%M %p")
date_delta = serializers.SerializerMethodField()
movies = serializers.SerializerMethodField()
vote_enabled = serializers.SerializerMethodField()
status = serializers.SerializerMethodField()
reg_users = serializers.SerializerMethodField()
winning_movie = serializers.SerializerMethodField()
rawdate = serializers.SerializerMethodField()
def get_rawdate(self, MovieNight):
return MovieNight.date
def get_reg_users(self, MovieNight):
return MovieNight.get_num_registered()
def get_movies(self, MovieNight):
return ', '.join([str(movie.title) for movie in MovieNight.MovieList.all()])
def get_status(self, MovieNight):
return MovieNight.get_status()
def get_vote_enabled(self, MovieNight):
return MovieNight.voting_enabled()
def get_winning_movie(self, MovieNight):
try:
winning_movie, _, _ = MovieNight.get_winning_movie()
return '{} ({})'.format(winning_movie.title, winning_movie.year)
except:
return "?"
def get_date_delta(self, MovieNight):
date = MovieNight.date
now = timezone.now()
timedelta = date - now
timedelta_secs = int(timedelta.total_seconds())
# localize to boston TZ
boston_tz = pytz.timezone("America/New_York")
fmt = "%B %d, %Y, %I:%M %p %Z%z"
date_boston_time = date.astimezone(boston_tz)
if timedelta_secs > 0:
return date_boston_time.strftime(fmt) + " (" + strfdelta(timedelta, "In {days}d, {hours}hrs, {minutes}min") + ")"
else:
timedelta = now - date
return date_boston_time.strftime(fmt) + " (" + strfdelta(timedelta, "{days}d, {hours}hrs, {minutes}min ago") + ")"
class Meta:
model = MovieNightEvent
fields = (
'id', 'motto', 'date', "movies", "isdraft", "movies", "date_delta", "vote_enabled", "status", "reg_users", 'winning_movie', "rawdate"
)
class ProfileSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
join_date = serializers.SerializerMethodField()
is_invitor = serializers.SerializerMethodField()
invitation_key = serializers.SerializerMethodField()
firstlastname = serializers.SerializerMethodField()
def get_firstlastname(self, Profile):
return Profile.user.first_name + " " + Profile.user.last_name
def get_invitation_key(self, Profile):
return Profile.invitation_key
def get_is_invitor(self, Profile):
return Profile.is_invitor
def get_join_date(self, Profile):
return Profile.user.date_joined
class Meta:
model = UserAttendence
fields = (
'id', 'firstlastname', 'is_invitor', "invitation_key", 'join_date'
)
class LocationPermissionSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
location = serializers.SerializerMethodField()
username = serializers.SerializerMethodField()
firstlastname = serializers.SerializerMethodField()
role = serializers.SerializerMethodField()
invitation_key = serializers.SerializerMethodField()
join_date = serializers.SerializerMethodField()
user_id = serializers.SerializerMethodField()
has_access = serializers.SerializerMethodField()
def get_has_access(self, LocationPermission):
return (LocationPermission.revoked_access == False)
def get_location(self, LocationPermission):
return LocationPermission.location.name
def get_username(self, LocationPermission):
return LocationPermission.user.username
def get_firstlastname(self, LocationPermission):
return LocationPermission.user.first_name + " " + LocationPermission.user.last_name
def get_role(self, LocationPermission):
return LocationPermission.get_role_display()
def get_invitation_key(self, LocationPermission):
return LocationPermission.get_invite_code()
def get_join_date(self, LocationPermission):
return LocationPermission.user.date_joined
def get_user_id(self, LocationPermission):
return LocationPermission.user.id
class Meta:
model = LocationPermission
fields = (
'id', 'location', 'username', "firstlastname", 'role', 'invitation_key', 'join_date', 'user_id', 'has_access'
)
class RestrictedLocationPermissionSerializer(serializers.ModelSerializer):
# This is called by Ambassadors and does not return invitation keys
id = serializers.IntegerField(read_only=True)
location = serializers.SerializerMethodField()
username = serializers.SerializerMethodField()
firstlastname = serializers.SerializerMethodField()
role = serializers.SerializerMethodField()
join_date = serializers.SerializerMethodField()
user_id = serializers.SerializerMethodField()
has_access = serializers.SerializerMethodField()
revoke_access_hash = serializers.SerializerMethodField()
def get_revoke_access_hash(self, LocationPermission):
if LocationPermission.can_invite():
return "<button type='button' class='btn btn-secondary btn-sm' data-toggle='modal' data-target='#no_change_modal'>N/A</button>"
elif not LocationPermission.revoked_access:
return "<a class='btn btn-danger btn-sm' href='/toggle_access_invite/" + LocationPermission.rev_access_hash + "' role='button'>Revoke Access</a>"
else:
return "<a class='btn btn-success btn-sm' href='/toggle_access_invite/" + LocationPermission.rev_access_hash + "' role='button'>Grant Access</a>"
def get_has_access(self, LocationPermission):
return (LocationPermission.revoked_access == False)
def get_location(self, LocationPermission):
return LocationPermission.location.name
def get_username(self, LocationPermission):
return LocationPermission.user.username
def get_firstlastname(self, LocationPermission):
return LocationPermission.user.first_name + " " + LocationPermission.user.last_name
def get_role(self, LocationPermission):
return LocationPermission.get_role_display()
def get_join_date(self, LocationPermission):
return LocationPermission.user.date_joined
def get_user_id(self, LocationPermission):
return LocationPermission.user.id
class Meta:
model = LocationPermission
fields = (
'revoke_access_hash', 'id', 'location', 'username', "firstlastname", 'role', 'join_date', 'user_id', 'has_access'
)
class UserAttendenceSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
user = serializers.SerializerMethodField()
toppings = serializers.SerializerMethodField()
reg_date = serializers.SerializerMethodField()
firstlastname = serializers.SerializerMethodField()
def get_firstlastname(self, UserAttendence):
return UserAttendence.user.first_name + " " + UserAttendence.user.last_name
def get_reg_date(self, UserAttendence):
date = UserAttendence.registered_at
boston_tz = pytz.timezone("America/New_York")
fmt = "%B %d, %Y, %I:%M %p %Z%z"
date_boston_time = date.astimezone(boston_tz).strftime(fmt)
return date_boston_time
def get_user(self, UserAttendence):
return UserAttendence.user.username
def get_toppings(self, UserAttendence):
return ' '.join([badgify(o.topping.topping, 'primary') for o in UserAttendence.get_toppings()])
class Meta:
model = UserAttendence
fields = (
'id', 'user', 'toppings', 'reg_date', "registration_complete", "movienight", 'firstlastname'
) | 0.456894 | 0.165088 |
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import numpy as np
def gradient_penalty_loss(averaged_output, x_hat):
gradients = tf.gradients(averaged_output, x_hat)[0]
gradients_sqr = tf.square(gradients)
gradients_sqr_sum = tf.reduce_sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
gradients_l2_norm = tf.sqrt(gradients_sqr_sum)
gradient_penalty = tf.square(gradients_l2_norm - 1)
return tf.reduce_mean(gradient_penalty)
def discriminator_loss(real_output, fake_output, averaged_output, interpolated_img, lamb_gp=10):
real_loss = -tf.reduce_mean(real_output)
fake_loss = tf.reduce_mean(fake_output)
gp_loss = gradient_penalty_loss(averaged_output, interpolated_img)
total_loss = real_loss + fake_loss + gp_loss* lamb_gp
return total_loss
def generator_loss(fake_output):
return -tf.reduce_mean(fake_output)
def reconstrution_loss(loss_object, real_image, recon_image, lamb_rec=10):
return loss_object(real_image, recon_image) * lamb_rec
def domain_classification_loss(loss_object, category, output, lamb_cls=1):
return loss_object(category, output) * lamb_cls
def random_weighted_average(inputs):
alpha = tf.random.uniform((inputs[0].shape[0], 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
def save_imgs(epoch, generator, real_x):
gene_imgs = generator(real_x, [0, 1, 0, 1, 0], training=False)
gene_imgs = ((gene_imgs.numpy() + 1) * 127.5).astype(np.uint8)
real_x = ((real_x.numpy() + 1) * 127.5).astype(np.uint8)
fig = plt.figure(figsize=(8, 16))
tmp = 0
for i in range(0, real_x.shape[0]):
plt.subplot(4, 2, i + 1 + tmp)
plt.imshow(real_x[i])
plt.axis('off')
plt.subplot(4, 2, i + 2 + tmp)
plt.imshow(gene_imgs[i])
plt.axis('off')
tmp += 1
fig.savefig("images/result_{}.png".format(str(epoch).zfill(5)))
print('Success saving images')
def preprocess_data(file_path, image_label, target_label):
# image = tf.io.read_file(file_path)
# image = tf.image.decode_jpeg(image)
image = process_path(file_path)
image = resize(image, (128, 128))
image = normalize(image)
return image, image_label, target_label
def normalize(image):
image = tf.cast(image, dtype=tf.float32)
image = (image / 127.5) - 1
return image
def resize(image, size):
h, w = size
image = tf.image.resize(image, [h, w], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image
def process_path(file_path):
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img)
return img | stargan/utils.py | import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
import numpy as np
def gradient_penalty_loss(averaged_output, x_hat):
gradients = tf.gradients(averaged_output, x_hat)[0]
gradients_sqr = tf.square(gradients)
gradients_sqr_sum = tf.reduce_sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
gradients_l2_norm = tf.sqrt(gradients_sqr_sum)
gradient_penalty = tf.square(gradients_l2_norm - 1)
return tf.reduce_mean(gradient_penalty)
def discriminator_loss(real_output, fake_output, averaged_output, interpolated_img, lamb_gp=10):
real_loss = -tf.reduce_mean(real_output)
fake_loss = tf.reduce_mean(fake_output)
gp_loss = gradient_penalty_loss(averaged_output, interpolated_img)
total_loss = real_loss + fake_loss + gp_loss* lamb_gp
return total_loss
def generator_loss(fake_output):
return -tf.reduce_mean(fake_output)
def reconstrution_loss(loss_object, real_image, recon_image, lamb_rec=10):
return loss_object(real_image, recon_image) * lamb_rec
def domain_classification_loss(loss_object, category, output, lamb_cls=1):
return loss_object(category, output) * lamb_cls
def random_weighted_average(inputs):
alpha = tf.random.uniform((inputs[0].shape[0], 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
def save_imgs(epoch, generator, real_x):
gene_imgs = generator(real_x, [0, 1, 0, 1, 0], training=False)
gene_imgs = ((gene_imgs.numpy() + 1) * 127.5).astype(np.uint8)
real_x = ((real_x.numpy() + 1) * 127.5).astype(np.uint8)
fig = plt.figure(figsize=(8, 16))
tmp = 0
for i in range(0, real_x.shape[0]):
plt.subplot(4, 2, i + 1 + tmp)
plt.imshow(real_x[i])
plt.axis('off')
plt.subplot(4, 2, i + 2 + tmp)
plt.imshow(gene_imgs[i])
plt.axis('off')
tmp += 1
fig.savefig("images/result_{}.png".format(str(epoch).zfill(5)))
print('Success saving images')
def preprocess_data(file_path, image_label, target_label):
# image = tf.io.read_file(file_path)
# image = tf.image.decode_jpeg(image)
image = process_path(file_path)
image = resize(image, (128, 128))
image = normalize(image)
return image, image_label, target_label
def normalize(image):
image = tf.cast(image, dtype=tf.float32)
image = (image / 127.5) - 1
return image
def resize(image, size):
h, w = size
image = tf.image.resize(image, [h, w], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image
def process_path(file_path):
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img)
return img | 0.72331 | 0.533641 |
import xmlrpclib
from threading import Thread
from SimpleXMLRPCServer import SimpleXMLRPCServer
config = {}
def initialize():
global ccu_url, _gateway_is_connected
ccu_ip = config["ccu_ip"]
ccu_port = config["ccu_port"]
ccu_url = "http://{ip}:{port}".format(ip=ccu_ip, port=ccu_port)
_gateway_is_connected = False
# configuration of the xml-rpc server used to receive events from the ccu
rpc_ip = config["rpc_ip"]
rpc_port = int(config["rpc_port"])
server = ServerThread(rpc_ip, rpc_port)
server.start()
# register server with ccu
__get_proxy__().init("http://{ip}:{port}".format(ip=rpc_ip, port=rpc_port), "wirehome")
def start():
pass
def stop():
pass
def get_device_values(address):
return __get_proxy__().getParamset(address, "VALUES")
def get_device_value(address, name):
return __get_proxy__().getValue(address, name)
def __get_proxy__():
global ccu_url
return xmlrpclib.ServerProxy(ccu_url)
class XMLRPCHandler:
def __init__(self):
pass
def event(self, interface_id, address, value_key, value):
self.__fire_event(address, value_key, value)
return unicode("")
def listDevices(self, _):
return []
def newDevices(self, _):
return unicode("")
def newDevice(self, _):
# it is necessary to return a unicode string here. otherwise, the ccu just won't answer anymore
return unicode("")
def deleteDevices(self, ):
return unicode("")
def __fire_event(self, address, property_name, new_value):
wirehome.message_bus.publish({
"type": "homematic.ccu.event.device_state_changed",
"address": address,
"property": property_name,
"new": new_value
})
class ServerThread(Thread):
def __init__(self, ip, port):
Thread.__init__(self)
self.server = SimpleXMLRPCServer((ip, port), logRequests=False)
self.server.register_instance(XMLRPCHandler())
self.server.register_introspection_functions()
self.server.register_multicall_functions()
def run(self):
self.server.serve_forever() | wirehome.services.homematic.ccu/1.0.0/script.py | import xmlrpclib
from threading import Thread
from SimpleXMLRPCServer import SimpleXMLRPCServer
config = {}
def initialize():
global ccu_url, _gateway_is_connected
ccu_ip = config["ccu_ip"]
ccu_port = config["ccu_port"]
ccu_url = "http://{ip}:{port}".format(ip=ccu_ip, port=ccu_port)
_gateway_is_connected = False
# configuration of the xml-rpc server used to receive events from the ccu
rpc_ip = config["rpc_ip"]
rpc_port = int(config["rpc_port"])
server = ServerThread(rpc_ip, rpc_port)
server.start()
# register server with ccu
__get_proxy__().init("http://{ip}:{port}".format(ip=rpc_ip, port=rpc_port), "wirehome")
def start():
pass
def stop():
pass
def get_device_values(address):
return __get_proxy__().getParamset(address, "VALUES")
def get_device_value(address, name):
return __get_proxy__().getValue(address, name)
def __get_proxy__():
global ccu_url
return xmlrpclib.ServerProxy(ccu_url)
class XMLRPCHandler:
def __init__(self):
pass
def event(self, interface_id, address, value_key, value):
self.__fire_event(address, value_key, value)
return unicode("")
def listDevices(self, _):
return []
def newDevices(self, _):
return unicode("")
def newDevice(self, _):
# it is necessary to return a unicode string here. otherwise, the ccu just won't answer anymore
return unicode("")
def deleteDevices(self, ):
return unicode("")
def __fire_event(self, address, property_name, new_value):
wirehome.message_bus.publish({
"type": "homematic.ccu.event.device_state_changed",
"address": address,
"property": property_name,
"new": new_value
})
class ServerThread(Thread):
def __init__(self, ip, port):
Thread.__init__(self)
self.server = SimpleXMLRPCServer((ip, port), logRequests=False)
self.server.register_instance(XMLRPCHandler())
self.server.register_introspection_functions()
self.server.register_multicall_functions()
def run(self):
self.server.serve_forever() | 0.485844 | 0.095771 |
import torndb
import logging
import json
import environment
COMPANY_SERVICE =\
torndb.Connection(
'mysql',
'company_service',
user=environment.get_user(),
password=<PASSWORD>(),
)
def release():
COMPANY_SERVICE.close()
class Crawler(object):
'''
爬虫的持久化对象
'''
@staticmethod
def select(crawler_id):
'''
获取爬虫信息
'''
sql =\
(
'SELECT * '
'FROM contribute_crawler.crawler '
'WHERE crawler_id = {crawler_id}'
).format(crawler_id=crawler_id)
return COMPANY_SERVICE.get(sql)
@staticmethod
def _update(index, value_dict, search_column='crawler_id'):
'''
更新爬虫信息
'''
value_sql = ""
#生成更新语句
for key, value in value_dict.iteritems():
#字符串需要用单引号包围
if isinstance(value, basestring):
value = ''.join(('\'', value, '\''))
value_sql += '{key} = {value} '.format(key=key, value=value)
sql =\
(
'UPDATE contribute_crawler.crawler '
'SET {value_sql} '
'WHERE {search_column} = {index}'
).format(
value_sql=value_sql,
search_column=search_column,
index=index,
)
COMPANY_SERVICE.execute(sql)
@staticmethod
def status(crawler_id, new_status, text=None, search_column='crawler_id'):
'''
更新爬虫状态
'''
#状态列表
_status = [
'error',
'finished',
'pending',
'crawling',
]
#检查新状态
if new_status not in _status:
logging.error('Error: '+new_status+' not defined in Crawler update')
return
#状态附带信息的添加
if text is not None:
new_status = ''.join((new_status, ':', text))
value_dict = {'crawler_status': new_status}
Crawler._update(crawler_id, value_dict, search_column=search_column)
@staticmethod
def register(crawler_id, container):
'''
生成新的爬虫任务
'''
container_id = container['Id']
value_dict = {'crawler_jobid': container_id}
Crawler._update(crawler_id, value_dict)
class Model(object):
'''
模型持久化对象
'''
@staticmethod
def select(model_id):
'''
读取模型
'''
sql = (
'SELECT * '
'FROM model '
'WHERE model_id = {model_id}'
).format(model_id=model_id)
return COMPANY_SERVICE.get(sql) | database.py | import torndb
import logging
import json
import environment
COMPANY_SERVICE =\
torndb.Connection(
'mysql',
'company_service',
user=environment.get_user(),
password=<PASSWORD>(),
)
def release():
COMPANY_SERVICE.close()
class Crawler(object):
'''
爬虫的持久化对象
'''
@staticmethod
def select(crawler_id):
'''
获取爬虫信息
'''
sql =\
(
'SELECT * '
'FROM contribute_crawler.crawler '
'WHERE crawler_id = {crawler_id}'
).format(crawler_id=crawler_id)
return COMPANY_SERVICE.get(sql)
@staticmethod
def _update(index, value_dict, search_column='crawler_id'):
'''
更新爬虫信息
'''
value_sql = ""
#生成更新语句
for key, value in value_dict.iteritems():
#字符串需要用单引号包围
if isinstance(value, basestring):
value = ''.join(('\'', value, '\''))
value_sql += '{key} = {value} '.format(key=key, value=value)
sql =\
(
'UPDATE contribute_crawler.crawler '
'SET {value_sql} '
'WHERE {search_column} = {index}'
).format(
value_sql=value_sql,
search_column=search_column,
index=index,
)
COMPANY_SERVICE.execute(sql)
@staticmethod
def status(crawler_id, new_status, text=None, search_column='crawler_id'):
'''
更新爬虫状态
'''
#状态列表
_status = [
'error',
'finished',
'pending',
'crawling',
]
#检查新状态
if new_status not in _status:
logging.error('Error: '+new_status+' not defined in Crawler update')
return
#状态附带信息的添加
if text is not None:
new_status = ''.join((new_status, ':', text))
value_dict = {'crawler_status': new_status}
Crawler._update(crawler_id, value_dict, search_column=search_column)
@staticmethod
def register(crawler_id, container):
'''
生成新的爬虫任务
'''
container_id = container['Id']
value_dict = {'crawler_jobid': container_id}
Crawler._update(crawler_id, value_dict)
class Model(object):
'''
模型持久化对象
'''
@staticmethod
def select(model_id):
'''
读取模型
'''
sql = (
'SELECT * '
'FROM model '
'WHERE model_id = {model_id}'
).format(model_id=model_id)
return COMPANY_SERVICE.get(sql) | 0.285671 | 0.074905 |
import textwrap
from exceptions import Error
class TextTable(object):
def __init__(self, field_names, **kwargs):
'''
Arguments:
field_names - list or tuple of field names
vertical_str - vertical separator betwwen each columns
'''
self._field_names = field_names
self._rows = []
self._sequence = [False, '', 0]
self._max_widths = {}
self._vertical_str = ' '
self._padding_width = 0
supported_options = ('vertical_str',)
for key, value in kwargs.items():
if key not in supported_options:
raise Error('unsupported option: ' + key)
setattr(self, '_'+key, value)
def set_sequence(self, enable, field_name='Seq', start=1):
'''
set whether need sequence for each row.
Arguments:
enable - whether need sequence for each row
field_name - the name of sequence field
start - the start number of sequence
'''
self._sequence = [enable, field_name, start]
def set_max_width(self, field_name, max_width):
'''
set max width of sepcified column, if max width is shorter than the length of field name,
the max width will be the length of field name
Arguments:
field_name - specify the field
max_width - max width of the specified field
if the actual value exceed the max width, will be split in multiple lines
'''
self._max_widths[field_name] = max_width
def _format_rows(self, rows):
'''
convert each column to string
'''
formatted_rows = []
for index, row in enumerate(rows):
formatted_row = [str(col) for col in row]
if self._sequence[0]:
formatted_row.insert(0, str(index+self._sequence[2]))
formatted_rows.append(formatted_row)
return formatted_rows
def _calculate_widths(self, field_names, rows):
'''
calculate max width of each column
'''
widths = [len(field) for field in field_names]
for row in rows:
for index, value in enumerate(row):
lines = value.split('\n')
max_len = max([len(line) for line in lines])
field_name = field_names[index]
if field_name in self._max_widths:
widths[index] = max(widths[index], min(max_len, self._max_widths[field_name]))
else:
widths[index] = max(widths[index], max_len)
return widths
def _get_row_string(self, field_names, row, widths):
'''
get formatted row string
'''
lines = []
total_width = 0
padding = self._padding_width * ' '
for index, field, value, width, in zip(range(0, len(row)), field_names, row, widths):
last_column = True if index == len(row) - 1 else False
col_lines = value.split('\n')
final_col_lines = []
for line in col_lines:
final_col_lines += textwrap.wrap(line, width)
for index, line in enumerate(final_col_lines):
if len(lines) <= index:
column = total_width*' ' + line + (width-len(line))*' '
lines.append(padding + column + padding)
if not last_column:
lines[index] += self._vertical_str
else:
column = (total_width-len(lines[index]))*' ' + line + (width-len(line))*' '
lines[index] += padding + column + padding
if not last_column:
lines[index] += self._vertical_str
total_width += width + self._padding_width*2 + len(self._vertical_str)
return '\n'.join(lines)
def to_string(self, ignore_field_names=False):
'''
get formatted result
'''
return '\n'.join(self.to_lines(ignore_field_names))
def to_lines(self, ignore_field_names=False):
'''
get formatted result
'''
field_names = [self._sequence[1]] + list(self._field_names) if self._sequence[0] else self._field_names
formatted_rows = self._format_rows(self._rows)
widths = self._calculate_widths(field_names, formatted_rows)
lines = []
if not ignore_field_names:
lines.append(self._get_row_string(field_names, field_names, widths))
for row in formatted_rows:
lines.append(self._get_row_string(field_names, row, widths))
return lines
def add_row(self, row):
'''
Arguments:
row - list or tuple of field values
'''
if len(row) != len(self._field_names):
raise Error("Row has different number of values with field names, (row) %d!=%d (field)" \
% (len(row), len(self._field_names)))
new_row = [col if col is not None else '' for col in row]
self._rows.append(new_row)
def add_rows(self, rows):
for row in rows:
self.add_row(row)
if __name__ == "__main__":
table = TextTable(['Name', 'Age', 'Gender', 'Desc', 'Nationality'], vertical_str=' ')
table.add_row(('You', 10, 'male', 'You are a boy', 'China'))
table.add_row(('Me', 100, 'male', 'I am an old man', 'Japan'))
table.add_row(('She', 18, 'female', 'She is a pretty girl', 'America'))
table.add_row(('He', 1, 'male', 'He is a little baby', 'British'))
#table.set_sequence(True)
print(table.to_string()) | src/texttable.py | import textwrap
from exceptions import Error
class TextTable(object):
def __init__(self, field_names, **kwargs):
'''
Arguments:
field_names - list or tuple of field names
vertical_str - vertical separator betwwen each columns
'''
self._field_names = field_names
self._rows = []
self._sequence = [False, '', 0]
self._max_widths = {}
self._vertical_str = ' '
self._padding_width = 0
supported_options = ('vertical_str',)
for key, value in kwargs.items():
if key not in supported_options:
raise Error('unsupported option: ' + key)
setattr(self, '_'+key, value)
def set_sequence(self, enable, field_name='Seq', start=1):
'''
set whether need sequence for each row.
Arguments:
enable - whether need sequence for each row
field_name - the name of sequence field
start - the start number of sequence
'''
self._sequence = [enable, field_name, start]
def set_max_width(self, field_name, max_width):
'''
set max width of sepcified column, if max width is shorter than the length of field name,
the max width will be the length of field name
Arguments:
field_name - specify the field
max_width - max width of the specified field
if the actual value exceed the max width, will be split in multiple lines
'''
self._max_widths[field_name] = max_width
def _format_rows(self, rows):
'''
convert each column to string
'''
formatted_rows = []
for index, row in enumerate(rows):
formatted_row = [str(col) for col in row]
if self._sequence[0]:
formatted_row.insert(0, str(index+self._sequence[2]))
formatted_rows.append(formatted_row)
return formatted_rows
def _calculate_widths(self, field_names, rows):
'''
calculate max width of each column
'''
widths = [len(field) for field in field_names]
for row in rows:
for index, value in enumerate(row):
lines = value.split('\n')
max_len = max([len(line) for line in lines])
field_name = field_names[index]
if field_name in self._max_widths:
widths[index] = max(widths[index], min(max_len, self._max_widths[field_name]))
else:
widths[index] = max(widths[index], max_len)
return widths
def _get_row_string(self, field_names, row, widths):
'''
get formatted row string
'''
lines = []
total_width = 0
padding = self._padding_width * ' '
for index, field, value, width, in zip(range(0, len(row)), field_names, row, widths):
last_column = True if index == len(row) - 1 else False
col_lines = value.split('\n')
final_col_lines = []
for line in col_lines:
final_col_lines += textwrap.wrap(line, width)
for index, line in enumerate(final_col_lines):
if len(lines) <= index:
column = total_width*' ' + line + (width-len(line))*' '
lines.append(padding + column + padding)
if not last_column:
lines[index] += self._vertical_str
else:
column = (total_width-len(lines[index]))*' ' + line + (width-len(line))*' '
lines[index] += padding + column + padding
if not last_column:
lines[index] += self._vertical_str
total_width += width + self._padding_width*2 + len(self._vertical_str)
return '\n'.join(lines)
def to_string(self, ignore_field_names=False):
'''
get formatted result
'''
return '\n'.join(self.to_lines(ignore_field_names))
def to_lines(self, ignore_field_names=False):
'''
get formatted result
'''
field_names = [self._sequence[1]] + list(self._field_names) if self._sequence[0] else self._field_names
formatted_rows = self._format_rows(self._rows)
widths = self._calculate_widths(field_names, formatted_rows)
lines = []
if not ignore_field_names:
lines.append(self._get_row_string(field_names, field_names, widths))
for row in formatted_rows:
lines.append(self._get_row_string(field_names, row, widths))
return lines
def add_row(self, row):
'''
Arguments:
row - list or tuple of field values
'''
if len(row) != len(self._field_names):
raise Error("Row has different number of values with field names, (row) %d!=%d (field)" \
% (len(row), len(self._field_names)))
new_row = [col if col is not None else '' for col in row]
self._rows.append(new_row)
def add_rows(self, rows):
for row in rows:
self.add_row(row)
if __name__ == "__main__":
table = TextTable(['Name', 'Age', 'Gender', 'Desc', 'Nationality'], vertical_str=' ')
table.add_row(('You', 10, 'male', 'You are a boy', 'China'))
table.add_row(('Me', 100, 'male', 'I am an old man', 'Japan'))
table.add_row(('She', 18, 'female', 'She is a pretty girl', 'America'))
table.add_row(('He', 1, 'male', 'He is a little baby', 'British'))
#table.set_sequence(True)
print(table.to_string()) | 0.568655 | 0.23292 |
from __future__ import print_function
from __future__ import absolute_import
from past.builtins import basestring
import os
import shutil
import sys
import time
class OutputWrangler:
"""
This is used in place of an output file when forking to trivially
parallelize computations.
For example, if you have 100 jobs to do and you want to fork into
10 processes, and have the output written to output_file.txt, call:
fork_out = OutputWrangler('output_file.txt, n_forks=10, n_jobs=100)
To then cause a fork, call
my_fork, my_jobs = fork_out.fork()
Then process the jobs, writing output with fork_out.write(...).
The output from each process will be written to a unique file.
The files will be combined once all forks have completed their
jobs.
"""
def __init__(self, filename=None, n_forks=1, n_jobs=1, force=False):
self.dummy = (filename is None)
if self.dummy:
filename = 'mobyfork'
if n_forks > 1 and sys.flags.interactive:
print('Interactive mode detected, suppressing fork.')
n_forks = 1
if n_jobs < n_forks:
n_forks = n_jobs
self.tmp_files = [filename + '_%i.tmp' % i for i in range(n_forks)]
self.done_files = [filename + '_%i.done' % i for i in range(n_forks)]
self.filename = filename
self.children = None
ok = True
for f in self.done_files:
if os.path.exists(f):
if force:
print('Removing %s' % f)
try:
os.remove(f)
except:
print('Could not remove %s' % f)
ok = False
else:
print('File %s exists, remove to procede.' % f)
ok = False
if not ok:
raise RuntimeError
self.n_forks = n_forks
self.n_jobs = n_jobs
def fork(self, fork_index=None):
"""
Fork into multiple child processes.
If you do not want to fork, but want to use the object within
an existing forked process, pass fork_index=my_fork (where
my_fork is an index from 0 to n_jobs-1).
"""
if fork_index is None:
self.children = []
for i in range(1, self.n_forks):
pid = os.fork()
if pid == 0:
print('Spawning child process %i' % i)
self._set_index(i)
return i, job_indices(self.n_jobs, self.n_forks, i)
self.children.append(pid)
fork_index = 0
self._set_index(fork_index)
return fork_index, job_indices(self.n_jobs, self.n_forks, fork_index)
def _set_index(self, index):
self.index = index
if len(self.tmp_files) > 0:
self.fout = open(self.tmp_files[index], 'w')
def write(self, data):
return self.fout.write(data)
def flush(self):
return self.fout.flush()
def close(self):
# Close temporary file and rename it
del self.fout
os.rename(self.tmp_files[self.index], self.done_files[self.index])
# If not main process, exit.
if self.index != 0:
return
# Main process waits for all forks to complete.
first_time = True,
not_done = [i for i in self.done_files if not os.path.exists(i)]
if len(not_done) > 0:
print('Blocking for all threads to complete...')
sleepage = 1
while len(not_done) > 0:
time.sleep(sleepage)
sleepage = min(sleepage+1, 10)
not_done = [i for i in self.done_files if not os.path.exists(i)]
# Zombie avoidance protocol
if self.children is not None:
for pid in self.children:
os.waitpid(pid,0)
if not self.dummy:
print('Assembling output to %s' % self.filename)
fout = open(self.filename, 'w')
for infile in self.done_files:
shutil.copyfileobj(open(infile), fout)
del fout
for infile in self.done_files:
os.remove(infile)
def __del__(self):
if hasattr(self, 'fout'):
self.close()
def cleanup(self):
for d in self.done_files + self.tmp_files:
if os.path.exists(d):
os.remove(d)
def job_indices(n_jobs, n_forks, fork_index):
if n_jobs == 0:
return []
return range((fork_index)*n_jobs//n_forks,
(fork_index+1)*n_jobs//n_forks) | python/util/fork.py | from __future__ import print_function
from __future__ import absolute_import
from past.builtins import basestring
import os
import shutil
import sys
import time
class OutputWrangler:
"""
This is used in place of an output file when forking to trivially
parallelize computations.
For example, if you have 100 jobs to do and you want to fork into
10 processes, and have the output written to output_file.txt, call:
fork_out = OutputWrangler('output_file.txt, n_forks=10, n_jobs=100)
To then cause a fork, call
my_fork, my_jobs = fork_out.fork()
Then process the jobs, writing output with fork_out.write(...).
The output from each process will be written to a unique file.
The files will be combined once all forks have completed their
jobs.
"""
def __init__(self, filename=None, n_forks=1, n_jobs=1, force=False):
self.dummy = (filename is None)
if self.dummy:
filename = 'mobyfork'
if n_forks > 1 and sys.flags.interactive:
print('Interactive mode detected, suppressing fork.')
n_forks = 1
if n_jobs < n_forks:
n_forks = n_jobs
self.tmp_files = [filename + '_%i.tmp' % i for i in range(n_forks)]
self.done_files = [filename + '_%i.done' % i for i in range(n_forks)]
self.filename = filename
self.children = None
ok = True
for f in self.done_files:
if os.path.exists(f):
if force:
print('Removing %s' % f)
try:
os.remove(f)
except:
print('Could not remove %s' % f)
ok = False
else:
print('File %s exists, remove to procede.' % f)
ok = False
if not ok:
raise RuntimeError
self.n_forks = n_forks
self.n_jobs = n_jobs
def fork(self, fork_index=None):
"""
Fork into multiple child processes.
If you do not want to fork, but want to use the object within
an existing forked process, pass fork_index=my_fork (where
my_fork is an index from 0 to n_jobs-1).
"""
if fork_index is None:
self.children = []
for i in range(1, self.n_forks):
pid = os.fork()
if pid == 0:
print('Spawning child process %i' % i)
self._set_index(i)
return i, job_indices(self.n_jobs, self.n_forks, i)
self.children.append(pid)
fork_index = 0
self._set_index(fork_index)
return fork_index, job_indices(self.n_jobs, self.n_forks, fork_index)
def _set_index(self, index):
self.index = index
if len(self.tmp_files) > 0:
self.fout = open(self.tmp_files[index], 'w')
def write(self, data):
return self.fout.write(data)
def flush(self):
return self.fout.flush()
def close(self):
# Close temporary file and rename it
del self.fout
os.rename(self.tmp_files[self.index], self.done_files[self.index])
# If not main process, exit.
if self.index != 0:
return
# Main process waits for all forks to complete.
first_time = True,
not_done = [i for i in self.done_files if not os.path.exists(i)]
if len(not_done) > 0:
print('Blocking for all threads to complete...')
sleepage = 1
while len(not_done) > 0:
time.sleep(sleepage)
sleepage = min(sleepage+1, 10)
not_done = [i for i in self.done_files if not os.path.exists(i)]
# Zombie avoidance protocol
if self.children is not None:
for pid in self.children:
os.waitpid(pid,0)
if not self.dummy:
print('Assembling output to %s' % self.filename)
fout = open(self.filename, 'w')
for infile in self.done_files:
shutil.copyfileobj(open(infile), fout)
del fout
for infile in self.done_files:
os.remove(infile)
def __del__(self):
if hasattr(self, 'fout'):
self.close()
def cleanup(self):
for d in self.done_files + self.tmp_files:
if os.path.exists(d):
os.remove(d)
def job_indices(n_jobs, n_forks, fork_index):
if n_jobs == 0:
return []
return range((fork_index)*n_jobs//n_forks,
(fork_index+1)*n_jobs//n_forks) | 0.338186 | 0.165357 |
import tensorflow as tf
from MemoryNetwork import MemoryNetwork
import babi_dataset_utils as bb
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
import errno
flags = tf.app.flags
# dataset configs
flags.DEFINE_string("dataset_selector", "babi", "dataset selector: 'babi' or 'penn' [babi]")
flags.DEFINE_string("data_dir", 'datasets/bAbI/tasks_1-20_v1-2/en/', "Data directory [datasets/bAbI/tasks_1-20_v1-2/en/]")
flags.DEFINE_boolean("babi_joint", False, "run jointly on all bAbI tasks, if applicable [False]")
flags.DEFINE_integer("babi_task_id", 1, "bAbI task to train on, if applicable [1]")
flags.DEFINE_float("validation_frac", 0.1, "train-validation split [0.1]")
flags.DEFINE_string("vocab_dir", 'vocab/', "directory to persist vocab-int dictionary [vocab/]")
flags.DEFINE_string("vocab_filename", "", "optional flag to allow us to load a persisted vocab dictionary from a pkl file")
flags.DEFINE_string("max_sentence_len_filename", "", "optional flag to allow us to load a persisted max_sentence_len value from a pkl file")
# checkpoint configs
flags.DEFINE_string("checkpoint_dir", "/Users/lucaslingle/git/memn2n/checkpoints/", "checkpoints path [/Users/lucaslingle/git/memn2n/checkpoints/]")
flags.DEFINE_string("model_name", "MemN2N", "a filename prefix for checkpoints [MemN2N]")
flags.DEFINE_string("mode", 'train', "train or test [train]")
flags.DEFINE_boolean("load", False, "load from latest checkpoint [False]")
flags.DEFINE_integer("save_freq_epochs", 5, "number of epochs between checkpoints [5]")
# training configs
flags.DEFINE_integer("batch_size", 32, "batch size [32]")
flags.DEFINE_integer("epochs", 100, "number of epochs [100]")
flags.DEFINE_float("initial_learning_rate", 0.01, "initial learning rate [0.01]")
flags.DEFINE_float("gradient_clip", 40, "maximum gradient norm [40]")
flags.DEFINE_float("gradient_noise_scale", 0.001, "stddev for adding gaussian noise to gradient [0.001]")
flags.DEFINE_float("anneal_const", 0.5, "annealing constant [0.5]")
flags.DEFINE_integer("anneal_epochs", 25, "number of epochs per annealing [25]")
# model configs
flags.DEFINE_integer("number_of_memories", 50, "memory size [50]")
flags.DEFINE_integer("embedding_dim", 20, "word embedding dimension [20]")
flags.DEFINE_integer("number_of_hops", 3, "number of hops [3]")
flags.DEFINE_boolean("linear_start", False, "start with linear attention (as opposed to softmaxed) [False]")
flags.DEFINE_boolean("position_encoding", True, "position encoding [True]")
flags.DEFINE_string("weight_tying_scheme", 'adj', "weight tying scheme: 'adj' or 'rnnlike' [adj]")
flags.DEFINE_boolean("random_noise", False, "random noise (insert empty memories to regularize temporal embedding) [False]")
flags.DEFINE_string("word_emb_initializer", 'random_normal_initializer', "weight initializer class name for word embedding weights. [random_normal_initializer]")
flags.DEFINE_float("word_emb_init_scale", 0.1, "value for stddev or gain argument of the word_emb_initializer [0.1]")
flags.DEFINE_string("temporal_emb_initializer", 'random_normal_initializer', "weight initializer class name for temporal embedding weights. [random_normal_initializer]")
flags.DEFINE_float("temporal_emb_init_scale", 0.1, "value for stddev or gain argument of the temporal_emb_initializer [0.1]")
FLAGS = flags.FLAGS
def get_vocab_filename_from_settings(FLAGS):
if len(FLAGS.vocab_filename) > 0:
candidate_vocab_filename = FLAGS.vocab_filename
return candidate_vocab_filename
candidate_vocab_filename = 'vocab_{}_{}_{}.pkl'.format(
FLAGS.dataset_selector,
FLAGS.data_dir.strip("/").split("/")[-1],
'joint' if FLAGS.babi_joint else 'task_{}'.format(FLAGS.babi_task_id)
)
return candidate_vocab_filename
def get_max_sentence_len_filename_from_settings(FLAGS):
if len(FLAGS.max_sentence_len_filename) > 0:
candidate_max_sentence_len_filename = FLAGS.max_sentence_len_filename
return candidate_max_sentence_len_filename
candidate_max_sentence_len_filename = 'max_sentence_len_{}_{}_{}.pkl'.format(
FLAGS.dataset_selector,
FLAGS.data_dir.strip("/").split("/")[-1],
'joint' if FLAGS.babi_joint else 'task_{}'.format(FLAGS.babi_task_id)
)
return candidate_max_sentence_len_filename
def compute_and_save_babi_vocab_meta(FLAGS, vocab_save_fp, max_sentence_len_save_fp):
# compute and save a vocab dictionary as a pickle file
babi = bb.bAbI()
if FLAGS.babi_joint:
_, _, _ = babi.prepare_data_for_joint_tasks(
FLAGS.data_dir, FLAGS.validation_frac, vocab_dict=None, max_sentence_len=None)
else:
_, _, _ = babi.prepare_data_for_single_task(
FLAGS.data_dir, FLAGS.babi_task_id, FLAGS.validation_frac, vocab_dict=None, max_sentence_len=None)
babi.save_vocab_dict_to_file(data=babi.vocab_dict, fp=vocab_save_fp)
babi.save_max_sentence_len_to_file(data=babi.max_sentence_len, fp=max_sentence_len_save_fp)
return
def main():
if FLAGS.dataset_selector == 'babi':
babi = bb.bAbI()
learning_rate = FLAGS.initial_learning_rate
candidate_vocab_filename = get_vocab_filename_from_settings(FLAGS)
candidate_max_sentence_len_filename = get_max_sentence_len_filename_from_settings(FLAGS)
candidate_vocab_fp = os.path.join(FLAGS.vocab_dir, candidate_vocab_filename)
vocab_fp_exists = os.path.exists(candidate_vocab_fp)
candidate_max_sentence_len_fp = os.path.join(FLAGS.vocab_dir, candidate_max_sentence_len_filename)
max_sentence_len_fp_exists = os.path.exists(candidate_max_sentence_len_fp)
# must compute and persist vocab metadata if we aren't loading anything
if not FLAGS.load:
compute_and_save_babi_vocab_meta(FLAGS, candidate_vocab_fp, candidate_max_sentence_len_fp)
if FLAGS.load and not vocab_fp_exists:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), candidate_vocab_fp)
if FLAGS.load and not max_sentence_len_fp_exists:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), candidate_max_sentence_len_fp)
with tf.Graph().as_default() as graph:
# load our vocab dictionary
vocab_dict = babi.load_vocab_dict_from_file(candidate_vocab_fp)
max_sentence_len = babi.load_max_sentence_len_from_file(candidate_max_sentence_len_fp)
# prepare the data
if FLAGS.babi_joint:
train, val, test = babi.prepare_data_for_joint_tasks(
FLAGS.data_dir, FLAGS.validation_frac,
vocab_dict=vocab_dict, max_sentence_len=max_sentence_len
)
else:
train, val, test = babi.prepare_data_for_single_task(
FLAGS.data_dir, FLAGS.babi_task_id, FLAGS.validation_frac,
vocab_dict=vocab_dict, max_sentence_len=max_sentence_len
)
print("len(vocab_dict) is {}, and max_sentence_len is {}".format(
len(vocab_dict), max_sentence_len
))
# instantiate the model
model = MemoryNetwork(vocab_size=len(vocab_dict),
embedding_dim=FLAGS.embedding_dim,
number_of_hops=FLAGS.number_of_hops,
batch_size=FLAGS.batch_size,
number_of_memories=FLAGS.number_of_memories,
max_sentence_len=babi.max_sentence_len,
gradient_clip=FLAGS.gradient_clip,
gradient_noise_scale=FLAGS.gradient_noise_scale,
weight_tying_scheme=FLAGS.weight_tying_scheme,
position_encoding=FLAGS.position_encoding,
word_emb_initializer=FLAGS.word_emb_initializer,
word_emb_init_scale=FLAGS.word_emb_init_scale,
temporal_emb_initializer=FLAGS.temporal_emb_initializer,
temporal_emb_init_scale=FLAGS.temporal_emb_init_scale)
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
if FLAGS.load:
print("attempting to restore from {}".format(FLAGS.checkpoint_dir))
model.load(sess, FLAGS.checkpoint_dir)
word_emb_varnames_in_restored_list_order = list(map(lambda v: v.name, model.embedding_matrices['word']))
print(word_emb_varnames_in_restored_list_order)
nr_training_examples = len(train)
nr_validation_examples = len(val)
nr_test_examples = len(test)
if FLAGS.mode == 'train':
print("mode: train")
print("nr_training_examples: {}, nr_validation_examples: {}, nr_test_examples: {}".format(
nr_training_examples, nr_validation_examples, nr_test_examples
))
for epoch in range(1, FLAGS.epochs + 1):
# reshuffle training data before commencing an epoch,
# get new batches each time rather than cycling thru previously seen batches
# (should improve quality of gradient estimates a bit)
np.random.shuffle(train)
for i in range(0, nr_training_examples, FLAGS.batch_size):
if (i + FLAGS.batch_size) > nr_training_examples:
break
start_idx = i
end_idx = i + FLAGS.batch_size
sqa_batch = train[start_idx:end_idx]
sqa_batch_standardized = list(map(
lambda sqa: bb.bAbI.standardize_features(
sqa,
babi.max_sentence_len,
FLAGS.number_of_memories,
babi.vocab_dict[babi.pad_token],
intersperse_empty_memories=FLAGS.random_noise
),
sqa_batch
))
sentences_ints, sentences_timeword_ints, question_ints, answer_ints = zip(*sqa_batch_standardized)
feed_dict = {
model.linear_start_indicator: FLAGS.linear_start,
model.learning_rate: learning_rate,
model.sentences_ints_batch: sentences_ints,
model.sentences_timewords_ints_batch: sentences_timeword_ints,
model.question_ints_batch: question_ints,
model.answer_ints_batch: answer_ints,
}
_, loss, acc = sess.run(
[model.train_op, model.summed_cross_entropy_batch, model.acc_batch],
feed_dict=feed_dict
)
mean_cross_entropy = loss / float(FLAGS.batch_size)
print("epoch {}, iter {}, batch mean_cross_entropy {}, batch accuracy {}".format(
epoch, i, mean_cross_entropy, acc
))
if epoch > 1 and (epoch % FLAGS.anneal_epochs) == 0:
learning_rate *= FLAGS.anneal_const
if epoch > 1 and (epoch % FLAGS.save_freq_epochs) == 0:
model.save(
session=sess,
checkpoint_dir=FLAGS.checkpoint_dir,
checkpoint_name='{}_epoch{}'.format(FLAGS.model_name, epoch)
)
model.save(
session=sess,
checkpoint_dir=FLAGS.checkpoint_dir,
checkpoint_name='{}_epoch{}'.format(FLAGS.model_name, FLAGS.epochs)
)
print("finished training!")
sum_cross_entropy = 0.0
nr_correct = 0
if nr_validation_examples == 0:
print("no validation examples. exiting now.")
sys.exit(0)
for i in range(0, nr_validation_examples, FLAGS.batch_size):
if (i + FLAGS.batch_size) > nr_validation_examples:
break
start_idx = i
end_idx = i + FLAGS.batch_size
sqa_batch = val[start_idx:end_idx]
sqa_batch_standardized = list(map(
lambda sqa: bb.bAbI.standardize_features(
sqa,
babi.max_sentence_len,
FLAGS.number_of_memories,
babi.vocab_dict[babi.pad_token],
intersperse_empty_memories=FLAGS.random_noise
),
sqa_batch
))
sentences_ints, sentences_timeword_ints, question_ints, answer_ints = zip(*sqa_batch_standardized)
feed_dict = {
model.linear_start_indicator: FLAGS.linear_start,
model.learning_rate: 0.0,
model.sentences_ints_batch: sentences_ints,
model.sentences_timewords_ints_batch: sentences_timeword_ints,
model.question_ints_batch: question_ints,
model.answer_ints_batch: answer_ints,
}
loss, acc = sess.run(
[model.summed_cross_entropy_batch, model.acc_batch],
feed_dict=feed_dict
)
mean_cross_entropy = loss / float(FLAGS.batch_size)
sum_cross_entropy += loss
nr_correct += int(acc * FLAGS.batch_size)
print("validation set, iter {}, batch mean_cross_entropy {}, batch accuracy {}".format(
i, mean_cross_entropy, acc
))
mean_cross_entropy = sum_cross_entropy / float(nr_validation_examples - (nr_validation_examples % FLAGS.batch_size))
accuracy = nr_correct / float(nr_validation_examples - (nr_validation_examples % FLAGS.batch_size))
error_rate = 1.0 - accuracy
print("mean cross_entropy on validation set: {}, \naccuracy: {}, \nerror_rate: {}".format(
mean_cross_entropy, accuracy, error_rate
))
if FLAGS.mode == 'test':
sum_cross_entropy = 0.0
nr_correct = 0
for epoch in range(0, 1):
for i in range(0, nr_test_examples, FLAGS.batch_size):
if (i + FLAGS.batch_size) > nr_test_examples:
break
start_idx = i
end_idx = i + FLAGS.batch_size
sqa_batch = test[start_idx:end_idx]
sqa_batch_standardized = list(map(
lambda sqa: bb.bAbI.standardize_features(
sqa,
babi.max_sentence_len,
FLAGS.number_of_memories,
babi.vocab_dict[babi.pad_token],
intersperse_empty_memories=FLAGS.random_noise
),
sqa_batch
))
sentences_ints, sentences_timeword_ints, question_ints, answer_ints = zip(*sqa_batch_standardized)
feed_dict = {
model.linear_start_indicator: FLAGS.linear_start,
model.learning_rate: 0.0,
model.sentences_ints_batch: sentences_ints,
model.sentences_timewords_ints_batch: sentences_timeword_ints,
model.question_ints_batch: question_ints,
model.answer_ints_batch: answer_ints,
}
loss, acc = sess.run(
[model.summed_cross_entropy_batch, model.acc_batch],
feed_dict=feed_dict
)
mean_cross_entropy = loss / float(FLAGS.batch_size)
sum_cross_entropy += loss
nr_correct += int(acc * FLAGS.batch_size)
print("test set, iter {}, batch mean_cross_entropy {}, batch accuracy {}".format(
i, mean_cross_entropy, acc
))
mean_cross_entropy = sum_cross_entropy / float(nr_test_examples - (nr_test_examples % FLAGS.batch_size))
accuracy = nr_correct / float(nr_test_examples - (nr_test_examples % FLAGS.batch_size))
error_rate = 1.0 - accuracy
print("mean cross_entropy on test set: {}, \naccuracy: {}, \nerror_rate: {}".format(
mean_cross_entropy, accuracy, error_rate
))
if FLAGS.mode == 'viz':
for epoch in range(0, 1):
for i in range(0, FLAGS.batch_size, FLAGS.batch_size):
if (i + FLAGS.batch_size) > nr_test_examples:
break
start_idx = i
end_idx = i + FLAGS.batch_size
sqa_batch = test[start_idx:end_idx]
sqa_batch_standardized = list(map(
lambda sqa: bb.bAbI.standardize_features(
sqa,
babi.max_sentence_len,
FLAGS.number_of_memories,
babi.vocab_dict[babi.pad_token],
intersperse_empty_memories=FLAGS.random_noise
),
sqa_batch
))
sentences_ints, sentences_timeword_ints, question_ints, answer_ints = zip(*sqa_batch_standardized)
feed_dict = {
model.linear_start_indicator: FLAGS.linear_start,
model.learning_rate: 0.0,
model.sentences_ints_batch: sentences_ints,
model.sentences_timewords_ints_batch: sentences_timeword_ints,
model.question_ints_batch: question_ints,
model.answer_ints_batch: answer_ints,
}
show_temporal = True
temporal_matrices = sess.run(
[
model.embedding_matrices['temporal'][
model.routing_formulas['temporal'][FLAGS.weight_tying_scheme]['T_A'](0)
],
model.embedding_matrices['temporal'][
model.routing_formulas['temporal'][FLAGS.weight_tying_scheme]['T_A'](1)
],
model.embedding_matrices['temporal'][
model.routing_formulas['temporal'][FLAGS.weight_tying_scheme]['T_A'](2)
],
model.embedding_matrices['temporal'][
model.routing_formulas['temporal'][FLAGS.weight_tying_scheme]['T_C'](2)
]
],
feed_dict=feed_dict
)
for temporal_matrix in temporal_matrices:
column_labels = [str(i) for i in range(model.d)]
row_labels = [str(i) for i in range(model.M)]
fig, ax = plt.subplots()
heatmap = ax.pcolor(temporal_matrix, cmap=plt.cm.get_cmap('Blues'))
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(temporal_matrix.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(temporal_matrix.shape[1]) + 0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(column_labels, minor=False)
plt.show()
return
main() | main.py | import tensorflow as tf
from MemoryNetwork import MemoryNetwork
import babi_dataset_utils as bb
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
import errno
flags = tf.app.flags
# dataset configs
flags.DEFINE_string("dataset_selector", "babi", "dataset selector: 'babi' or 'penn' [babi]")
flags.DEFINE_string("data_dir", 'datasets/bAbI/tasks_1-20_v1-2/en/', "Data directory [datasets/bAbI/tasks_1-20_v1-2/en/]")
flags.DEFINE_boolean("babi_joint", False, "run jointly on all bAbI tasks, if applicable [False]")
flags.DEFINE_integer("babi_task_id", 1, "bAbI task to train on, if applicable [1]")
flags.DEFINE_float("validation_frac", 0.1, "train-validation split [0.1]")
flags.DEFINE_string("vocab_dir", 'vocab/', "directory to persist vocab-int dictionary [vocab/]")
flags.DEFINE_string("vocab_filename", "", "optional flag to allow us to load a persisted vocab dictionary from a pkl file")
flags.DEFINE_string("max_sentence_len_filename", "", "optional flag to allow us to load a persisted max_sentence_len value from a pkl file")
# checkpoint configs
flags.DEFINE_string("checkpoint_dir", "/Users/lucaslingle/git/memn2n/checkpoints/", "checkpoints path [/Users/lucaslingle/git/memn2n/checkpoints/]")
flags.DEFINE_string("model_name", "MemN2N", "a filename prefix for checkpoints [MemN2N]")
flags.DEFINE_string("mode", 'train', "train or test [train]")
flags.DEFINE_boolean("load", False, "load from latest checkpoint [False]")
flags.DEFINE_integer("save_freq_epochs", 5, "number of epochs between checkpoints [5]")
# training configs
flags.DEFINE_integer("batch_size", 32, "batch size [32]")
flags.DEFINE_integer("epochs", 100, "number of epochs [100]")
flags.DEFINE_float("initial_learning_rate", 0.01, "initial learning rate [0.01]")
flags.DEFINE_float("gradient_clip", 40, "maximum gradient norm [40]")
flags.DEFINE_float("gradient_noise_scale", 0.001, "stddev for adding gaussian noise to gradient [0.001]")
flags.DEFINE_float("anneal_const", 0.5, "annealing constant [0.5]")
flags.DEFINE_integer("anneal_epochs", 25, "number of epochs per annealing [25]")
# model configs
flags.DEFINE_integer("number_of_memories", 50, "memory size [50]")
flags.DEFINE_integer("embedding_dim", 20, "word embedding dimension [20]")
flags.DEFINE_integer("number_of_hops", 3, "number of hops [3]")
flags.DEFINE_boolean("linear_start", False, "start with linear attention (as opposed to softmaxed) [False]")
flags.DEFINE_boolean("position_encoding", True, "position encoding [True]")
flags.DEFINE_string("weight_tying_scheme", 'adj', "weight tying scheme: 'adj' or 'rnnlike' [adj]")
flags.DEFINE_boolean("random_noise", False, "random noise (insert empty memories to regularize temporal embedding) [False]")
flags.DEFINE_string("word_emb_initializer", 'random_normal_initializer', "weight initializer class name for word embedding weights. [random_normal_initializer]")
flags.DEFINE_float("word_emb_init_scale", 0.1, "value for stddev or gain argument of the word_emb_initializer [0.1]")
flags.DEFINE_string("temporal_emb_initializer", 'random_normal_initializer', "weight initializer class name for temporal embedding weights. [random_normal_initializer]")
flags.DEFINE_float("temporal_emb_init_scale", 0.1, "value for stddev or gain argument of the temporal_emb_initializer [0.1]")
FLAGS = flags.FLAGS
def get_vocab_filename_from_settings(FLAGS):
if len(FLAGS.vocab_filename) > 0:
candidate_vocab_filename = FLAGS.vocab_filename
return candidate_vocab_filename
candidate_vocab_filename = 'vocab_{}_{}_{}.pkl'.format(
FLAGS.dataset_selector,
FLAGS.data_dir.strip("/").split("/")[-1],
'joint' if FLAGS.babi_joint else 'task_{}'.format(FLAGS.babi_task_id)
)
return candidate_vocab_filename
def get_max_sentence_len_filename_from_settings(FLAGS):
if len(FLAGS.max_sentence_len_filename) > 0:
candidate_max_sentence_len_filename = FLAGS.max_sentence_len_filename
return candidate_max_sentence_len_filename
candidate_max_sentence_len_filename = 'max_sentence_len_{}_{}_{}.pkl'.format(
FLAGS.dataset_selector,
FLAGS.data_dir.strip("/").split("/")[-1],
'joint' if FLAGS.babi_joint else 'task_{}'.format(FLAGS.babi_task_id)
)
return candidate_max_sentence_len_filename
def compute_and_save_babi_vocab_meta(FLAGS, vocab_save_fp, max_sentence_len_save_fp):
# compute and save a vocab dictionary as a pickle file
babi = bb.bAbI()
if FLAGS.babi_joint:
_, _, _ = babi.prepare_data_for_joint_tasks(
FLAGS.data_dir, FLAGS.validation_frac, vocab_dict=None, max_sentence_len=None)
else:
_, _, _ = babi.prepare_data_for_single_task(
FLAGS.data_dir, FLAGS.babi_task_id, FLAGS.validation_frac, vocab_dict=None, max_sentence_len=None)
babi.save_vocab_dict_to_file(data=babi.vocab_dict, fp=vocab_save_fp)
babi.save_max_sentence_len_to_file(data=babi.max_sentence_len, fp=max_sentence_len_save_fp)
return
def main():
if FLAGS.dataset_selector == 'babi':
babi = bb.bAbI()
learning_rate = FLAGS.initial_learning_rate
candidate_vocab_filename = get_vocab_filename_from_settings(FLAGS)
candidate_max_sentence_len_filename = get_max_sentence_len_filename_from_settings(FLAGS)
candidate_vocab_fp = os.path.join(FLAGS.vocab_dir, candidate_vocab_filename)
vocab_fp_exists = os.path.exists(candidate_vocab_fp)
candidate_max_sentence_len_fp = os.path.join(FLAGS.vocab_dir, candidate_max_sentence_len_filename)
max_sentence_len_fp_exists = os.path.exists(candidate_max_sentence_len_fp)
# must compute and persist vocab metadata if we aren't loading anything
if not FLAGS.load:
compute_and_save_babi_vocab_meta(FLAGS, candidate_vocab_fp, candidate_max_sentence_len_fp)
if FLAGS.load and not vocab_fp_exists:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), candidate_vocab_fp)
if FLAGS.load and not max_sentence_len_fp_exists:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), candidate_max_sentence_len_fp)
with tf.Graph().as_default() as graph:
# load our vocab dictionary
vocab_dict = babi.load_vocab_dict_from_file(candidate_vocab_fp)
max_sentence_len = babi.load_max_sentence_len_from_file(candidate_max_sentence_len_fp)
# prepare the data
if FLAGS.babi_joint:
train, val, test = babi.prepare_data_for_joint_tasks(
FLAGS.data_dir, FLAGS.validation_frac,
vocab_dict=vocab_dict, max_sentence_len=max_sentence_len
)
else:
train, val, test = babi.prepare_data_for_single_task(
FLAGS.data_dir, FLAGS.babi_task_id, FLAGS.validation_frac,
vocab_dict=vocab_dict, max_sentence_len=max_sentence_len
)
print("len(vocab_dict) is {}, and max_sentence_len is {}".format(
len(vocab_dict), max_sentence_len
))
# instantiate the model
model = MemoryNetwork(vocab_size=len(vocab_dict),
embedding_dim=FLAGS.embedding_dim,
number_of_hops=FLAGS.number_of_hops,
batch_size=FLAGS.batch_size,
number_of_memories=FLAGS.number_of_memories,
max_sentence_len=babi.max_sentence_len,
gradient_clip=FLAGS.gradient_clip,
gradient_noise_scale=FLAGS.gradient_noise_scale,
weight_tying_scheme=FLAGS.weight_tying_scheme,
position_encoding=FLAGS.position_encoding,
word_emb_initializer=FLAGS.word_emb_initializer,
word_emb_init_scale=FLAGS.word_emb_init_scale,
temporal_emb_initializer=FLAGS.temporal_emb_initializer,
temporal_emb_init_scale=FLAGS.temporal_emb_init_scale)
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
if FLAGS.load:
print("attempting to restore from {}".format(FLAGS.checkpoint_dir))
model.load(sess, FLAGS.checkpoint_dir)
word_emb_varnames_in_restored_list_order = list(map(lambda v: v.name, model.embedding_matrices['word']))
print(word_emb_varnames_in_restored_list_order)
nr_training_examples = len(train)
nr_validation_examples = len(val)
nr_test_examples = len(test)
if FLAGS.mode == 'train':
print("mode: train")
print("nr_training_examples: {}, nr_validation_examples: {}, nr_test_examples: {}".format(
nr_training_examples, nr_validation_examples, nr_test_examples
))
for epoch in range(1, FLAGS.epochs + 1):
# reshuffle training data before commencing an epoch,
# get new batches each time rather than cycling thru previously seen batches
# (should improve quality of gradient estimates a bit)
np.random.shuffle(train)
for i in range(0, nr_training_examples, FLAGS.batch_size):
if (i + FLAGS.batch_size) > nr_training_examples:
break
start_idx = i
end_idx = i + FLAGS.batch_size
sqa_batch = train[start_idx:end_idx]
sqa_batch_standardized = list(map(
lambda sqa: bb.bAbI.standardize_features(
sqa,
babi.max_sentence_len,
FLAGS.number_of_memories,
babi.vocab_dict[babi.pad_token],
intersperse_empty_memories=FLAGS.random_noise
),
sqa_batch
))
sentences_ints, sentences_timeword_ints, question_ints, answer_ints = zip(*sqa_batch_standardized)
feed_dict = {
model.linear_start_indicator: FLAGS.linear_start,
model.learning_rate: learning_rate,
model.sentences_ints_batch: sentences_ints,
model.sentences_timewords_ints_batch: sentences_timeword_ints,
model.question_ints_batch: question_ints,
model.answer_ints_batch: answer_ints,
}
_, loss, acc = sess.run(
[model.train_op, model.summed_cross_entropy_batch, model.acc_batch],
feed_dict=feed_dict
)
mean_cross_entropy = loss / float(FLAGS.batch_size)
print("epoch {}, iter {}, batch mean_cross_entropy {}, batch accuracy {}".format(
epoch, i, mean_cross_entropy, acc
))
if epoch > 1 and (epoch % FLAGS.anneal_epochs) == 0:
learning_rate *= FLAGS.anneal_const
if epoch > 1 and (epoch % FLAGS.save_freq_epochs) == 0:
model.save(
session=sess,
checkpoint_dir=FLAGS.checkpoint_dir,
checkpoint_name='{}_epoch{}'.format(FLAGS.model_name, epoch)
)
model.save(
session=sess,
checkpoint_dir=FLAGS.checkpoint_dir,
checkpoint_name='{}_epoch{}'.format(FLAGS.model_name, FLAGS.epochs)
)
print("finished training!")
sum_cross_entropy = 0.0
nr_correct = 0
if nr_validation_examples == 0:
print("no validation examples. exiting now.")
sys.exit(0)
for i in range(0, nr_validation_examples, FLAGS.batch_size):
if (i + FLAGS.batch_size) > nr_validation_examples:
break
start_idx = i
end_idx = i + FLAGS.batch_size
sqa_batch = val[start_idx:end_idx]
sqa_batch_standardized = list(map(
lambda sqa: bb.bAbI.standardize_features(
sqa,
babi.max_sentence_len,
FLAGS.number_of_memories,
babi.vocab_dict[babi.pad_token],
intersperse_empty_memories=FLAGS.random_noise
),
sqa_batch
))
sentences_ints, sentences_timeword_ints, question_ints, answer_ints = zip(*sqa_batch_standardized)
feed_dict = {
model.linear_start_indicator: FLAGS.linear_start,
model.learning_rate: 0.0,
model.sentences_ints_batch: sentences_ints,
model.sentences_timewords_ints_batch: sentences_timeword_ints,
model.question_ints_batch: question_ints,
model.answer_ints_batch: answer_ints,
}
loss, acc = sess.run(
[model.summed_cross_entropy_batch, model.acc_batch],
feed_dict=feed_dict
)
mean_cross_entropy = loss / float(FLAGS.batch_size)
sum_cross_entropy += loss
nr_correct += int(acc * FLAGS.batch_size)
print("validation set, iter {}, batch mean_cross_entropy {}, batch accuracy {}".format(
i, mean_cross_entropy, acc
))
mean_cross_entropy = sum_cross_entropy / float(nr_validation_examples - (nr_validation_examples % FLAGS.batch_size))
accuracy = nr_correct / float(nr_validation_examples - (nr_validation_examples % FLAGS.batch_size))
error_rate = 1.0 - accuracy
print("mean cross_entropy on validation set: {}, \naccuracy: {}, \nerror_rate: {}".format(
mean_cross_entropy, accuracy, error_rate
))
if FLAGS.mode == 'test':
sum_cross_entropy = 0.0
nr_correct = 0
for epoch in range(0, 1):
for i in range(0, nr_test_examples, FLAGS.batch_size):
if (i + FLAGS.batch_size) > nr_test_examples:
break
start_idx = i
end_idx = i + FLAGS.batch_size
sqa_batch = test[start_idx:end_idx]
sqa_batch_standardized = list(map(
lambda sqa: bb.bAbI.standardize_features(
sqa,
babi.max_sentence_len,
FLAGS.number_of_memories,
babi.vocab_dict[babi.pad_token],
intersperse_empty_memories=FLAGS.random_noise
),
sqa_batch
))
sentences_ints, sentences_timeword_ints, question_ints, answer_ints = zip(*sqa_batch_standardized)
feed_dict = {
model.linear_start_indicator: FLAGS.linear_start,
model.learning_rate: 0.0,
model.sentences_ints_batch: sentences_ints,
model.sentences_timewords_ints_batch: sentences_timeword_ints,
model.question_ints_batch: question_ints,
model.answer_ints_batch: answer_ints,
}
loss, acc = sess.run(
[model.summed_cross_entropy_batch, model.acc_batch],
feed_dict=feed_dict
)
mean_cross_entropy = loss / float(FLAGS.batch_size)
sum_cross_entropy += loss
nr_correct += int(acc * FLAGS.batch_size)
print("test set, iter {}, batch mean_cross_entropy {}, batch accuracy {}".format(
i, mean_cross_entropy, acc
))
mean_cross_entropy = sum_cross_entropy / float(nr_test_examples - (nr_test_examples % FLAGS.batch_size))
accuracy = nr_correct / float(nr_test_examples - (nr_test_examples % FLAGS.batch_size))
error_rate = 1.0 - accuracy
print("mean cross_entropy on test set: {}, \naccuracy: {}, \nerror_rate: {}".format(
mean_cross_entropy, accuracy, error_rate
))
if FLAGS.mode == 'viz':
for epoch in range(0, 1):
for i in range(0, FLAGS.batch_size, FLAGS.batch_size):
if (i + FLAGS.batch_size) > nr_test_examples:
break
start_idx = i
end_idx = i + FLAGS.batch_size
sqa_batch = test[start_idx:end_idx]
sqa_batch_standardized = list(map(
lambda sqa: bb.bAbI.standardize_features(
sqa,
babi.max_sentence_len,
FLAGS.number_of_memories,
babi.vocab_dict[babi.pad_token],
intersperse_empty_memories=FLAGS.random_noise
),
sqa_batch
))
sentences_ints, sentences_timeword_ints, question_ints, answer_ints = zip(*sqa_batch_standardized)
feed_dict = {
model.linear_start_indicator: FLAGS.linear_start,
model.learning_rate: 0.0,
model.sentences_ints_batch: sentences_ints,
model.sentences_timewords_ints_batch: sentences_timeword_ints,
model.question_ints_batch: question_ints,
model.answer_ints_batch: answer_ints,
}
show_temporal = True
temporal_matrices = sess.run(
[
model.embedding_matrices['temporal'][
model.routing_formulas['temporal'][FLAGS.weight_tying_scheme]['T_A'](0)
],
model.embedding_matrices['temporal'][
model.routing_formulas['temporal'][FLAGS.weight_tying_scheme]['T_A'](1)
],
model.embedding_matrices['temporal'][
model.routing_formulas['temporal'][FLAGS.weight_tying_scheme]['T_A'](2)
],
model.embedding_matrices['temporal'][
model.routing_formulas['temporal'][FLAGS.weight_tying_scheme]['T_C'](2)
]
],
feed_dict=feed_dict
)
for temporal_matrix in temporal_matrices:
column_labels = [str(i) for i in range(model.d)]
row_labels = [str(i) for i in range(model.M)]
fig, ax = plt.subplots()
heatmap = ax.pcolor(temporal_matrix, cmap=plt.cm.get_cmap('Blues'))
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(temporal_matrix.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(temporal_matrix.shape[1]) + 0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(column_labels, minor=False)
plt.show()
return
main() | 0.470493 | 0.259088 |
class StatementsRouter:
route_app_labels = {'auth', 'contenttypes', 'session', 'admin', 'statements'}
def db_for_read(self, model, **hints):
"""
Attempts to read auth and contenttypes models go to auth_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'default'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth and contenttypes models go to auth_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'default'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth or contenttypes apps is
involved.
"""
if (
obj1._meta.app_label in self.route_app_labels or
obj2._meta.app_label in self.route_app_labels
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the auth and contenttypes apps only appear in the
'auth_db' database.
"""
if app_label in self.route_app_labels:
return db == 'default'
return None
class OurfishRouter:
route_app_labels = {'ourfish'}
def db_for_read(self, model, **hints):
"""
Attempts to read auth and contenttypes models go to auth_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'ourfish'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth and contenttypes models go to auth_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'ourfish'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth or contenttypes apps is
involved.
"""
if (
obj1._meta.app_label in self.route_app_labels or
obj2._meta.app_label in self.route_app_labels
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the auth and contenttypes apps only appear in the
'auth_db' database.
"""
if app_label in self.route_app_labels:
return db == 'ourfish'
return None | mysite/routers/db_routers.py | class StatementsRouter:
route_app_labels = {'auth', 'contenttypes', 'session', 'admin', 'statements'}
def db_for_read(self, model, **hints):
"""
Attempts to read auth and contenttypes models go to auth_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'default'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth and contenttypes models go to auth_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'default'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth or contenttypes apps is
involved.
"""
if (
obj1._meta.app_label in self.route_app_labels or
obj2._meta.app_label in self.route_app_labels
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the auth and contenttypes apps only appear in the
'auth_db' database.
"""
if app_label in self.route_app_labels:
return db == 'default'
return None
class OurfishRouter:
route_app_labels = {'ourfish'}
def db_for_read(self, model, **hints):
"""
Attempts to read auth and contenttypes models go to auth_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'ourfish'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth and contenttypes models go to auth_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'ourfish'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth or contenttypes apps is
involved.
"""
if (
obj1._meta.app_label in self.route_app_labels or
obj2._meta.app_label in self.route_app_labels
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the auth and contenttypes apps only appear in the
'auth_db' database.
"""
if app_label in self.route_app_labels:
return db == 'ourfish'
return None | 0.489015 | 0.154983 |
from keystoneauth1 import adapter
import mock
from openstack.tests.unit import base
from otcextensions.sdk import sdk_resource
# Only a basic tests for extended functionality are implemented since
# the _list code is copied from sdk.resource to override headers
# TODO(agoncharov) make sense to implement (copy) existing base_resource
# tests from SDK
PROJECT_ID = '123'
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'id': IDENTIFIER,
'links': '1',
'name': '2',
'ram': 3,
}
class Res(sdk_resource.Resource):
base_path = '/'
allow_list = True
class TestBaseResource(base.TestCase):
def setUp(self):
super(TestBaseResource, self).setUp()
self.sess = mock.Mock(spec=adapter.Adapter)
self.sess.get_project_id = mock.Mock(return_value=PROJECT_ID)
self.sot = Res(**EXAMPLE)
# inject some properties to enable methods
# self.sot.allow_list = True
# self.sot.base_path = '/'
self.base_path = self.sot.base_path
self.headers = {"Content-Type": "application/json"}
def test_basic(self):
sot = sdk_resource.Resource()
self.assertFalse(sot.allow_list)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
def test_list_defaults(self):
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = []
self.sess.get.return_value = mock_response
result = list(self.sot.list(self.sess))
self.sess.get.assert_called_once_with(
self.base_path,
params={},
)
self.assertEqual([], result)
def test_list_override_headers(self):
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = [EXAMPLE]
self.sess.get.return_value = mock_response
result = list(self.sot.list(self.sess, headers={'a': 'b'}))
self.sess.get.assert_called_once_with(
self.base_path,
headers={"a": "b"},
params={},
)
self.assertEqual([sdk_resource.Resource(**EXAMPLE)], result)
def test_list_override_endpoint(self):
# sot = _base.Resource()
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = [EXAMPLE]
self.sess.get.return_value = mock_response
result = list(self.sot.list(
self.sess,
headers={'a': 'b'},
endpoint_override='http:example.com'))
self.sess.get.assert_called_once_with(
self.base_path,
headers={"a": "b"},
endpoint_override='http:example.com',
params={},
)
self.assertEqual([self.sot], result) | otcextensions/tests/unit/sdk/test_sdk_resource.py | from keystoneauth1 import adapter
import mock
from openstack.tests.unit import base
from otcextensions.sdk import sdk_resource
# Only a basic tests for extended functionality are implemented since
# the _list code is copied from sdk.resource to override headers
# TODO(agoncharov) make sense to implement (copy) existing base_resource
# tests from SDK
PROJECT_ID = '123'
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'id': IDENTIFIER,
'links': '1',
'name': '2',
'ram': 3,
}
class Res(sdk_resource.Resource):
base_path = '/'
allow_list = True
class TestBaseResource(base.TestCase):
def setUp(self):
super(TestBaseResource, self).setUp()
self.sess = mock.Mock(spec=adapter.Adapter)
self.sess.get_project_id = mock.Mock(return_value=PROJECT_ID)
self.sot = Res(**EXAMPLE)
# inject some properties to enable methods
# self.sot.allow_list = True
# self.sot.base_path = '/'
self.base_path = self.sot.base_path
self.headers = {"Content-Type": "application/json"}
def test_basic(self):
sot = sdk_resource.Resource()
self.assertFalse(sot.allow_list)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
def test_list_defaults(self):
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = []
self.sess.get.return_value = mock_response
result = list(self.sot.list(self.sess))
self.sess.get.assert_called_once_with(
self.base_path,
params={},
)
self.assertEqual([], result)
def test_list_override_headers(self):
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = [EXAMPLE]
self.sess.get.return_value = mock_response
result = list(self.sot.list(self.sess, headers={'a': 'b'}))
self.sess.get.assert_called_once_with(
self.base_path,
headers={"a": "b"},
params={},
)
self.assertEqual([sdk_resource.Resource(**EXAMPLE)], result)
def test_list_override_endpoint(self):
# sot = _base.Resource()
mock_response = mock.Mock()
mock_response.status_code = 200
mock_response.json.return_value = [EXAMPLE]
self.sess.get.return_value = mock_response
result = list(self.sot.list(
self.sess,
headers={'a': 'b'},
endpoint_override='http:example.com'))
self.sess.get.assert_called_once_with(
self.base_path,
headers={"a": "b"},
endpoint_override='http:example.com',
params={},
)
self.assertEqual([self.sot], result) | 0.35031 | 0.247669 |
class Terminal:
def __init__(self, estimates, regcoeffs):
self._estimates = estimates
self._regcoeffs = regcoeffs
def Process(self, entity):
entity.time_Sysp = entity.allTime
# Entities receiving no treatment OR palliative treatment (for recurrence)
if hasattr(entity, 'endOfLife') == False:
# Entities with recurrence may be palliative or NoTx
if hasattr(entity, 'recurrence') == True:
entity.utility.append(("Incurable disease", self._estimates.Util_Incurable.sample(), entity.allTime))
if entity.tx_recur == 'Palliative':
if hasattr(entity, "palliativeMonth") == False:
entity.palliativeMonth = 1
# Entity experiences spontaneous remission
if entity.palliativeMonth >= 520:
entity.stateNum = 4.8 # Entity is in remission and receives no more care
entity.currentState = "Remission"
entity.cancerDetected == 9 # Cancer is in remission and so no further clinical events are scheduled
entity.time_deadOfDisease = 777777 # Death from disease set to implausibly high value
entity.time_Recurrence = 666666 # Future recurrence set to impossible date
else:
entity.resources.append(("Treatment - Palliative", entity.allTime))
entity.events.append(("Palliative care - month%2.0f"%entity.palliativeMonth, entity.allTime))
entity.palliativeMonth +=1
elif entity.tx_recur == 'Notx':
if hasattr(entity, "notxMonth") == False:
entity.notxMonth = 1
# Entity experiences spontaneous remission
if entity.notxMonth >= 520:
entity.stateNum = 4.8 # Entity is in remission and receives no more care
entity.currentState = "Remission"
entity.cancerDetected == 9 # Cancer is in remission and so no further clinical events are scheduled
entity.time_deadOfDisease = 777777 # Death from disease set to implausibly high value
entity.time_Recurrence = 666666 # Future recurrence set to impossible date
else:
entity.resources.append(("Treatment - Recurrence - No Treatment", entity.allTime))
entity.events.append(("Best supportive care - month%2.0f"%entity.notxMonth, entity.allTime))
entity.notxMonth += 1
entity.time_Sysp += 30 # Advance clock one month
else:
entity.stateNum = 99
entity.currentState = "ERROR - Terminal Disease - entity is in the Terminal disease state, but has not recurred or been assigned an end of life flag. Check 'SysP_RecurTx' or 'Glb_Checktime'"
print("Entity was not assigned an 'endOfLife' or 'recurrence' flag. Check 'SysP_RecurTx' or 'Glb_Checktime'")
# END IF
# Entity is in last three months of life
elif hasattr(entity, 'endOfLife') == True:
#Terminal disease - end-of-life care
entity.resources.append(("Treatment - End of Life", entity.allTime))
entity.events.append(("End-of-life care", entity.allTime))
entity.utility.append(("End of life", self._estimates.Util_EOL.sample(), entity.allTime))
entity.allTime = entity.time_DeadofDisease # Advance clock to death
else: # Error
entity.stateNum = 99
entity.currentState = "ERROR - Advanced Disease"
print("Entity was not assigned an 'endOfLife' value. Check 'SysP_RecurTx' or 'Glb_Checktime'")
####################################################
# VARIABLES CREATED IN THIS STEP:
#
# adv_hadSalvage - flag indicating that the entity has received salvage surgery
# adv_reirrad - flag indicating that the entity has received a second round of RT
# adv_chemoCount - a counter for the number of cycles of advanced chemotherapy received
# chemoLimit - the maximum number of cycles of chemo an entity can receive
# EoLMonth - a counter to denote the number of months into the terminal phase an entity has come | Code/SysP_Terminal.py | class Terminal:
def __init__(self, estimates, regcoeffs):
self._estimates = estimates
self._regcoeffs = regcoeffs
def Process(self, entity):
entity.time_Sysp = entity.allTime
# Entities receiving no treatment OR palliative treatment (for recurrence)
if hasattr(entity, 'endOfLife') == False:
# Entities with recurrence may be palliative or NoTx
if hasattr(entity, 'recurrence') == True:
entity.utility.append(("Incurable disease", self._estimates.Util_Incurable.sample(), entity.allTime))
if entity.tx_recur == 'Palliative':
if hasattr(entity, "palliativeMonth") == False:
entity.palliativeMonth = 1
# Entity experiences spontaneous remission
if entity.palliativeMonth >= 520:
entity.stateNum = 4.8 # Entity is in remission and receives no more care
entity.currentState = "Remission"
entity.cancerDetected == 9 # Cancer is in remission and so no further clinical events are scheduled
entity.time_deadOfDisease = 777777 # Death from disease set to implausibly high value
entity.time_Recurrence = 666666 # Future recurrence set to impossible date
else:
entity.resources.append(("Treatment - Palliative", entity.allTime))
entity.events.append(("Palliative care - month%2.0f"%entity.palliativeMonth, entity.allTime))
entity.palliativeMonth +=1
elif entity.tx_recur == 'Notx':
if hasattr(entity, "notxMonth") == False:
entity.notxMonth = 1
# Entity experiences spontaneous remission
if entity.notxMonth >= 520:
entity.stateNum = 4.8 # Entity is in remission and receives no more care
entity.currentState = "Remission"
entity.cancerDetected == 9 # Cancer is in remission and so no further clinical events are scheduled
entity.time_deadOfDisease = 777777 # Death from disease set to implausibly high value
entity.time_Recurrence = 666666 # Future recurrence set to impossible date
else:
entity.resources.append(("Treatment - Recurrence - No Treatment", entity.allTime))
entity.events.append(("Best supportive care - month%2.0f"%entity.notxMonth, entity.allTime))
entity.notxMonth += 1
entity.time_Sysp += 30 # Advance clock one month
else:
entity.stateNum = 99
entity.currentState = "ERROR - Terminal Disease - entity is in the Terminal disease state, but has not recurred or been assigned an end of life flag. Check 'SysP_RecurTx' or 'Glb_Checktime'"
print("Entity was not assigned an 'endOfLife' or 'recurrence' flag. Check 'SysP_RecurTx' or 'Glb_Checktime'")
# END IF
# Entity is in last three months of life
elif hasattr(entity, 'endOfLife') == True:
#Terminal disease - end-of-life care
entity.resources.append(("Treatment - End of Life", entity.allTime))
entity.events.append(("End-of-life care", entity.allTime))
entity.utility.append(("End of life", self._estimates.Util_EOL.sample(), entity.allTime))
entity.allTime = entity.time_DeadofDisease # Advance clock to death
else: # Error
entity.stateNum = 99
entity.currentState = "ERROR - Advanced Disease"
print("Entity was not assigned an 'endOfLife' value. Check 'SysP_RecurTx' or 'Glb_Checktime'")
####################################################
# VARIABLES CREATED IN THIS STEP:
#
# adv_hadSalvage - flag indicating that the entity has received salvage surgery
# adv_reirrad - flag indicating that the entity has received a second round of RT
# adv_chemoCount - a counter for the number of cycles of advanced chemotherapy received
# chemoLimit - the maximum number of cycles of chemo an entity can receive
# EoLMonth - a counter to denote the number of months into the terminal phase an entity has come | 0.541409 | 0.294114 |
import time
import rospy
import rospkg
import os
import sys
import numpy as np
import tensorflow as tf
from styx_msgs.msg import TrafficLight
from io import StringIO
MINIMUM_CONFIDENCE = 0.4
class TLClassifier(object):
def __init__(self, simulator):
# current_path = os.path.dirname(os.path.realpath(__file__))
self.simulator_used = simulator
# We support two different frozen graphes which are trained with
# real car camera data and with data from the simulator. Depending
# where the application is executed (car or simulator) different
# models are loaded.
if (self.simulator_used == 1):
model_path = 'light_classification/classifiers/inference_graph_sim.pb'
else:
model_path = 'light_classification/classifiers/inference_graph_real.pb'
rospy.logwarn('model path {0}'.format(model_path))
detection_graph = self.load_graph(model_path)
# The input placeholder for the image.
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
self.sess = tf.Session(graph=detection_graph)
def load_graph(self, graph_file):
# Loads a frozen TF inference graph
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Load a sample image
image_expanded = np.expand_dims(image, axis=0)
result = TrafficLight.UNKNOWN
# Perform detection
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores,
self.detection_classes],
feed_dict={self.image_tensor: image_expanded})
# Remove unnecessary dimensions
scores = np.squeeze(scores)
classes = np.squeeze(classes)
# Debug classifications
# rospy.logwarn('TF classes {0} and scores {1}'.format(classes, scores))
# Find traffic light with highest confidence level
conv_level = MINIMUM_CONFIDENCE
score = 0
for i in range(boxes.shape[0]):
if scores[i] > conv_level:
conv_level = scores[i]
if classes[i] == 2: #'Green':
result = TrafficLight.GREEN
elif classes[i] == 4: #'Red':
result = TrafficLight.RED
elif classes[i] == 3: #'Yellow':
result = TrafficLight.YELLOW
score = scores[i]
# Debug traffic light output - Red: 0, 1: Yellow, 2: Green, 4: Unknown
# rospy.logwarn('Traffic light {0} ({1})'.format(result, score))
return result | ros/src/tl_detector/light_classification/tl_classifier.py | import time
import rospy
import rospkg
import os
import sys
import numpy as np
import tensorflow as tf
from styx_msgs.msg import TrafficLight
from io import StringIO
MINIMUM_CONFIDENCE = 0.4
class TLClassifier(object):
def __init__(self, simulator):
# current_path = os.path.dirname(os.path.realpath(__file__))
self.simulator_used = simulator
# We support two different frozen graphes which are trained with
# real car camera data and with data from the simulator. Depending
# where the application is executed (car or simulator) different
# models are loaded.
if (self.simulator_used == 1):
model_path = 'light_classification/classifiers/inference_graph_sim.pb'
else:
model_path = 'light_classification/classifiers/inference_graph_real.pb'
rospy.logwarn('model path {0}'.format(model_path))
detection_graph = self.load_graph(model_path)
# The input placeholder for the image.
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
self.sess = tf.Session(graph=detection_graph)
def load_graph(self, graph_file):
# Loads a frozen TF inference graph
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Load a sample image
image_expanded = np.expand_dims(image, axis=0)
result = TrafficLight.UNKNOWN
# Perform detection
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores,
self.detection_classes],
feed_dict={self.image_tensor: image_expanded})
# Remove unnecessary dimensions
scores = np.squeeze(scores)
classes = np.squeeze(classes)
# Debug classifications
# rospy.logwarn('TF classes {0} and scores {1}'.format(classes, scores))
# Find traffic light with highest confidence level
conv_level = MINIMUM_CONFIDENCE
score = 0
for i in range(boxes.shape[0]):
if scores[i] > conv_level:
conv_level = scores[i]
if classes[i] == 2: #'Green':
result = TrafficLight.GREEN
elif classes[i] == 4: #'Red':
result = TrafficLight.RED
elif classes[i] == 3: #'Yellow':
result = TrafficLight.YELLOW
score = scores[i]
# Debug traffic light output - Red: 0, 1: Yellow, 2: Green, 4: Unknown
# rospy.logwarn('Traffic light {0} ({1})'.format(result, score))
return result | 0.704668 | 0.357147 |
import functools
from typing import Optional
from absl import logging
from growneuron.imagenet import data_util
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
def build_input_fn(
builder,
global_batch_size,
topology,
is_training,
image_size = 224):
"""Build input function.
Args:
builder: TFDS builder for specified dataset.
global_batch_size: Global batch size.
topology: An instance of `tf.tpu.experimental.Topology` or None.
is_training: Whether to build in training mode.
image_size: Size of the output images.
Returns:
A function that accepts a dict of params and returns a tuple of images and
features, to be used as the input_fn in TPUEstimator.
"""
def _input_fn(input_context):
"""Inner input function."""
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
logging.info('Global batch size: %d', global_batch_size)
logging.info('Per-replica batch size: %d', batch_size)
preprocess_fn = get_preprocess_fn(is_training, image_size)
def map_fn(image, label):
"""Produces multiple transformations of the same batch."""
image = preprocess_fn(image)
return image, label
dataset = builder.as_dataset(
split='train' if is_training else 'validation',
shuffle_files=is_training,
as_supervised=True)
logging.info('num_input_pipelines: %d', input_context.num_input_pipelines)
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
if input_context.num_input_pipelines > 1:
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if is_training:
buffer_multiplier = 50 if image_size <= 32 else 10
dataset = dataset.shuffle(batch_size * buffer_multiplier)
dataset = dataset.repeat(-1)
dataset = dataset.map(
map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=is_training)
prefetch_buffer_size = 2 * topology.num_tpus_per_task if topology else 2
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return _input_fn
def get_preprocess_fn(is_training, image_size=224):
"""Get function that accepts an image and returns a preprocessed image."""
# Disable test cropping for small images (e.g. CIFAR)
if image_size <= 32:
test_crop = False
else:
test_crop = True
return functools.partial(
data_util.preprocess_image,
image_size=image_size,
is_training=is_training,
test_crop=test_crop) | growneuron/imagenet/data.py | import functools
from typing import Optional
from absl import logging
from growneuron.imagenet import data_util
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
def build_input_fn(
builder,
global_batch_size,
topology,
is_training,
image_size = 224):
"""Build input function.
Args:
builder: TFDS builder for specified dataset.
global_batch_size: Global batch size.
topology: An instance of `tf.tpu.experimental.Topology` or None.
is_training: Whether to build in training mode.
image_size: Size of the output images.
Returns:
A function that accepts a dict of params and returns a tuple of images and
features, to be used as the input_fn in TPUEstimator.
"""
def _input_fn(input_context):
"""Inner input function."""
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
logging.info('Global batch size: %d', global_batch_size)
logging.info('Per-replica batch size: %d', batch_size)
preprocess_fn = get_preprocess_fn(is_training, image_size)
def map_fn(image, label):
"""Produces multiple transformations of the same batch."""
image = preprocess_fn(image)
return image, label
dataset = builder.as_dataset(
split='train' if is_training else 'validation',
shuffle_files=is_training,
as_supervised=True)
logging.info('num_input_pipelines: %d', input_context.num_input_pipelines)
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
if input_context.num_input_pipelines > 1:
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if is_training:
buffer_multiplier = 50 if image_size <= 32 else 10
dataset = dataset.shuffle(batch_size * buffer_multiplier)
dataset = dataset.repeat(-1)
dataset = dataset.map(
map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=is_training)
prefetch_buffer_size = 2 * topology.num_tpus_per_task if topology else 2
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return _input_fn
def get_preprocess_fn(is_training, image_size=224):
"""Get function that accepts an image and returns a preprocessed image."""
# Disable test cropping for small images (e.g. CIFAR)
if image_size <= 32:
test_crop = False
else:
test_crop = True
return functools.partial(
data_util.preprocess_image,
image_size=image_size,
is_training=is_training,
test_crop=test_crop) | 0.938513 | 0.454714 |
import pandas as pd
pd.options.mode.chained_assignment = None
from datetime import datetime
#Carga de datos
Regions = ["WYJ", "YVR"]
WeatherData = {}
HouseFeatures = pd.DataFrame()
HouseData = pd.DataFrame()
#Cargar Caracacteristicas de casa
with open(r"Data/HouseHold/Features.csv") as file:
HouseFeatures = pd.read_csv(file, sep = ",")
#Codificacion Aislante casa
for facing in HouseFeatures["Facing"].unique():
if (facing == "East") or (facing == "West"):
HouseFeatures.loc[HouseFeatures["Facing"] == facing, "Isolation"] = 0
else:
HouseFeatures.loc[HouseFeatures["Facing"] == facing, "Isolation"] = 1
ElectricAC = ["HP", "FPE", "FAC", "PAC", "BHE", "IFRHE"]
#Codificacion AC
for HVAC in HouseFeatures["HVAC"].values:
if (type(HVAC) == str):
result = 0
for AC in ElectricAC:
result = HVAC.find(AC)
if (result != -1):
break
if (result != -1):
HouseFeatures.loc[HouseFeatures["HVAC"] == HVAC, "HVAC"] = 1
else:
HouseFeatures.loc[HouseFeatures["HVAC"] == HVAC, "HVAC"] = 0
print("--------------------------- House Features ---------------------------")
print(HouseFeatures)
print("----------------------------------------------------------------------")
#Cargar Datos de energia por casa
for House in range(1, 29):
with open(r"Data/HouseHold/Residential_" + str(House) + ".csv") as file:
buffer = pd.read_csv(file, sep = ",")
buffer['ID'] = House
HouseData = HouseData.append(buffer, ignore_index = True, verify_integrity= True, sort = True)
print("--------------------------- House Energy ---------------------------")
print(HouseData.head())
print("--------------------------------------------------------------------")
#Cargar dias festivos
with open(r"Data/HouseHold/holidays.csv") as file:
Holidays = pd.read_csv(file, sep = ",")
print("--------------------------- HoliDays ------------------------------")
print(Holidays.head())
print("-------------------------------------------------------------------")
#Agrupar consumo por dia
Output = pd.DataFrame()
for House in range(1,29):
print(House)
BufferHouse = HouseData.loc[HouseData["ID"] == House]
for Date in BufferHouse["date"].unique():
BufferDate = BufferHouse.loc[BufferHouse["date"] == Date]
Output = Output.append({"ID":House, "date": Date, "Energy_KWH":BufferDate["energy_kWh"].sum()}, ignore_index = True)
#Enlazar consumo de energia con caracteristicas de la casa
for House in range(1,29):
Features = pd.DataFrame(HouseFeatures.loc[HouseFeatures["House"] == House])
if (not Features.empty):
FeaturesIndex = Features.index
for column in Features.columns:
Output.loc[Output["ID"] == House, column] = Features[column][FeaturesIndex[0]]
#Codificacion Tipo de casa
HouseType = pd.DataFrame()
for tipe in HouseFeatures["HouseType"].unique():
buffer = Output.loc[(Output["HouseType"] == tipe)&(Output["RUs"] == 0)]
mean = buffer["Energy_KWH"].mean()
HouseType = HouseType.append({"Type":tipe, "mean":mean}, ignore_index=True)
print(HouseType)
print(HouseType.sort_values("mean")["Type"].unique())
code = 0
for tipe in HouseType.sort_values("mean")["Type"].unique():
Output.loc[Output["HouseType"] == tipe, "HouseType"] = code
code += 1
#Agregar dias festivos y fines de semana
for Date in Output["date"].unique():
YMD = Date.split("-")
Holiday = 0
if (not Holidays.loc[Holidays["date"] == Date].empty):
Holiday = 1
if (datetime(int(YMD[0]),int(YMD[1]),int(YMD[2])).weekday() > 4):
Holiday = 1
Output.loc[Output["date"] == Date, "Holiday"] = Holiday
print("--------------------------- Energy by Day ------------------------------")
print(Output.head())
print("------------------------------------------------------------------------")
#Exportar datos
Output.to_csv(r"Data/HouseHold/HouseData.csv", sep = ",", index = False) | FormatDataConsumption.py | import pandas as pd
pd.options.mode.chained_assignment = None
from datetime import datetime
#Carga de datos
Regions = ["WYJ", "YVR"]
WeatherData = {}
HouseFeatures = pd.DataFrame()
HouseData = pd.DataFrame()
#Cargar Caracacteristicas de casa
with open(r"Data/HouseHold/Features.csv") as file:
HouseFeatures = pd.read_csv(file, sep = ",")
#Codificacion Aislante casa
for facing in HouseFeatures["Facing"].unique():
if (facing == "East") or (facing == "West"):
HouseFeatures.loc[HouseFeatures["Facing"] == facing, "Isolation"] = 0
else:
HouseFeatures.loc[HouseFeatures["Facing"] == facing, "Isolation"] = 1
ElectricAC = ["HP", "FPE", "FAC", "PAC", "BHE", "IFRHE"]
#Codificacion AC
for HVAC in HouseFeatures["HVAC"].values:
if (type(HVAC) == str):
result = 0
for AC in ElectricAC:
result = HVAC.find(AC)
if (result != -1):
break
if (result != -1):
HouseFeatures.loc[HouseFeatures["HVAC"] == HVAC, "HVAC"] = 1
else:
HouseFeatures.loc[HouseFeatures["HVAC"] == HVAC, "HVAC"] = 0
print("--------------------------- House Features ---------------------------")
print(HouseFeatures)
print("----------------------------------------------------------------------")
#Cargar Datos de energia por casa
for House in range(1, 29):
with open(r"Data/HouseHold/Residential_" + str(House) + ".csv") as file:
buffer = pd.read_csv(file, sep = ",")
buffer['ID'] = House
HouseData = HouseData.append(buffer, ignore_index = True, verify_integrity= True, sort = True)
print("--------------------------- House Energy ---------------------------")
print(HouseData.head())
print("--------------------------------------------------------------------")
#Cargar dias festivos
with open(r"Data/HouseHold/holidays.csv") as file:
Holidays = pd.read_csv(file, sep = ",")
print("--------------------------- HoliDays ------------------------------")
print(Holidays.head())
print("-------------------------------------------------------------------")
#Agrupar consumo por dia
Output = pd.DataFrame()
for House in range(1,29):
print(House)
BufferHouse = HouseData.loc[HouseData["ID"] == House]
for Date in BufferHouse["date"].unique():
BufferDate = BufferHouse.loc[BufferHouse["date"] == Date]
Output = Output.append({"ID":House, "date": Date, "Energy_KWH":BufferDate["energy_kWh"].sum()}, ignore_index = True)
#Enlazar consumo de energia con caracteristicas de la casa
for House in range(1,29):
Features = pd.DataFrame(HouseFeatures.loc[HouseFeatures["House"] == House])
if (not Features.empty):
FeaturesIndex = Features.index
for column in Features.columns:
Output.loc[Output["ID"] == House, column] = Features[column][FeaturesIndex[0]]
#Codificacion Tipo de casa
HouseType = pd.DataFrame()
for tipe in HouseFeatures["HouseType"].unique():
buffer = Output.loc[(Output["HouseType"] == tipe)&(Output["RUs"] == 0)]
mean = buffer["Energy_KWH"].mean()
HouseType = HouseType.append({"Type":tipe, "mean":mean}, ignore_index=True)
print(HouseType)
print(HouseType.sort_values("mean")["Type"].unique())
code = 0
for tipe in HouseType.sort_values("mean")["Type"].unique():
Output.loc[Output["HouseType"] == tipe, "HouseType"] = code
code += 1
#Agregar dias festivos y fines de semana
for Date in Output["date"].unique():
YMD = Date.split("-")
Holiday = 0
if (not Holidays.loc[Holidays["date"] == Date].empty):
Holiday = 1
if (datetime(int(YMD[0]),int(YMD[1]),int(YMD[2])).weekday() > 4):
Holiday = 1
Output.loc[Output["date"] == Date, "Holiday"] = Holiday
print("--------------------------- Energy by Day ------------------------------")
print(Output.head())
print("------------------------------------------------------------------------")
#Exportar datos
Output.to_csv(r"Data/HouseHold/HouseData.csv", sep = ",", index = False) | 0.210198 | 0.216529 |
# Built-ins
import os
import warnings
import datetime
import threading
# Package
import __init__
from elf import utils
from elf.webio import get_soup
from elf.webio import download_page
from elf.parsing import parsetable
warnings.warn('EDGAR search-by-text can only search 3 years back. Use alternative downloader if data older than 3 years is needed.')
SEARCH_MAIN = 'https://searchwww.sec.gov/EDGARFSClient/jsp/EDGAR_MainAccess.jsp?'
def __dateargs__(startMonth=None, startDay=None, startYear = None,
endMonth = None, endDay = None, endYear = None):
now = datetime.datetime.today().date()
if(startMonth or startDay or startYear): assert startMonth and startDay and startYear, 'Must define starting month, day, and year'
if(endMonth or endDay or endYear ): assert endMonth and endDay and endYear, 'Must define ending month, day, and year'
if(startMonth and not endMonth): endMonth = now.month
if(startDay and not endDay ): endDay = now.day
if(startYear and not endYear ): endYear = now.year
if(endMonth and not startMonth): startMonth = endMonth
if(endDay and not startDay ): startDay = endDay
if(endYear and not startYear ): startYear = endYear - 4
if(endYear and startYear):
assert endYear - startYear <= 4, 'Cannot search more than 3 years back'
return startMonth, startDay, startYear, endMonth, endDay, endYear
def __formatlink__(text, form_type = None, stemming = True,
comp=None, cik=None, sic=None,
startMonth = None, startDay = None, startYear = None,
endMonth = None, endDay = None, endYear = None,
page = 0):
startMonth, startDay, startYear, endMonth, endDay, endYear = __dateargs__(
startMonth, startDay, startYear,
endMonth, endDay, endYear)
from_date = 'fromDate={m}/{d}/{y}'.format(m=str(startMonth).zfill(2), d=str(startDay).zfill(2), y=startYear)
to_date = 'toDate={m}/{d}/{y}'.format(m=str(endMonth).zfill(2), d=str(endDay).zfill(2), y=endYear)
date_arg = '&{from_date}&{to_date}&'.format(from_date=from_date, to_date=to_date) if \
(startMonth or startDay or startYear or endMonth or endDay or endYear) else ''
filter_query = '&queryCo={}'.format(comp) if comp else \
'&queryCik={}'.format(cik) if cik else \
'&querySic={}'.format(sic) if sic else ''
text_opt = 'search_text={}'.format(text)
sort_opt = 'sort=Date'
form_opt = 'formType=Form{}'.format(form_type.replace(' ','').upper()) if form_type else 'formType=1'
adv = 'isAdv=true'
stem = 'stemming=true' if stemming else 'stemming=false'
res = 'numResults=100'
offset = 'startDoc={}&'.format(1 + (100*page))
return '{link}{text}&sort=Date&{form}&isAdv=true&{date}{stem}&{offset}numResults=100{fquery}'.format(
link=SEARCH_MAIN, text=text_opt, form=form_opt, stem=stem,
date = date_arg,
offset=offset if page > 0 else '',
fquery=filter_query)
def __searchresults__(soup):
x = soup.find_all('tr')
for i, xx in enumerate(x):
if('class' in xx.attrs and 'infoBorder' in xx.attrs['class']):
try:
date = x[i+1].find_all('td')[0].text.replace('/','')
_ = x[i+1].find_all('td')[1].find('a').attrs['href'].split("'")
link = _[1]
comp = _[3]
edgar_ids = x[i+2].find_all('td')[1].find_all('a')#[0].text
try: cik = edgar_ids[0].text
except IndexError: cik = None
try: sic = edgar_ids[1].text
except IndexError: sic = None
yield [link, comp, date, cik, sic]
except AttributeError:
break
def __getlinks__(text, form_type = None, stemming = True,
comp=None, cik=None, sic=None,
startMonth = None, startDay = None, startYear = None,
endMonth = None, endDay = None, endYear = None):
'''generator for each link and document info for each search result'''
i, count = 0, 0
while(True):
qlink = __formatlink__(text, form_type = form_type, stemming = stemming,
comp=comp, cik=cik, sic=sic,
startMonth = startMonth, startDay = startDay, startYear = startYear,
endMonth = endMonth, endDay = endDay, endYear = endYear,
page = i)
s = get_soup(qlink)
#Check if final page
try: pmin = int(s.find(id='header').find('td').text.split()[0])
except AttributeError as e: return None
if(pmin < count): return None
for link in __searchresults__(s):
yield link
count+=1
i+=1
def download_by_text(text, outputpath, form_type = None, stemming = True,
comp=None, cik=None, sic=None,
startMonth = None, startDay = None, startYear = None,
endMonth = None, endDay = None, endYear = None,
downloadpoolsize = 100):
os.makedirs(outputpath, exist_ok=True)
link_iter = __getlinks__(text, form_type = form_type, stemming = stemming,
comp=comp, cik=cik, sic=sic,
startMonth = startMonth, startDay = startDay, startYear = startYear,
endMonth = endMonth, endDay = endDay, endYear = endYear)
download_lock = threading.Semaphore(downloadpoolsize)
threads = []
logfile = os.path.join(outputpath, 'log.txt')
with open(logfile, mode='w', encoding='UTF-8', errors='ignore') as w:
for link_data in link_iter:
link, comp, date, cik, sic = link_data
filename = os.path.join(outputpath, '{form_type}{company}_{date}.htm'.format(
form_type=form_type+'_' if form_type else '', company=comp, date=date))
w.write('{}\n'.format('\t'.join(map(lambda x: '' if not x else x, link_data))))
def _():
with download_lock:
download_page(link, filename)
t = threading.Thread(target=_)
threads.append(t)
t.start()
for thread in threads: thread.join()
return len(threads) | search_by_text.py |
# Built-ins
import os
import warnings
import datetime
import threading
# Package
import __init__
from elf import utils
from elf.webio import get_soup
from elf.webio import download_page
from elf.parsing import parsetable
warnings.warn('EDGAR search-by-text can only search 3 years back. Use alternative downloader if data older than 3 years is needed.')
SEARCH_MAIN = 'https://searchwww.sec.gov/EDGARFSClient/jsp/EDGAR_MainAccess.jsp?'
def __dateargs__(startMonth=None, startDay=None, startYear = None,
endMonth = None, endDay = None, endYear = None):
now = datetime.datetime.today().date()
if(startMonth or startDay or startYear): assert startMonth and startDay and startYear, 'Must define starting month, day, and year'
if(endMonth or endDay or endYear ): assert endMonth and endDay and endYear, 'Must define ending month, day, and year'
if(startMonth and not endMonth): endMonth = now.month
if(startDay and not endDay ): endDay = now.day
if(startYear and not endYear ): endYear = now.year
if(endMonth and not startMonth): startMonth = endMonth
if(endDay and not startDay ): startDay = endDay
if(endYear and not startYear ): startYear = endYear - 4
if(endYear and startYear):
assert endYear - startYear <= 4, 'Cannot search more than 3 years back'
return startMonth, startDay, startYear, endMonth, endDay, endYear
def __formatlink__(text, form_type = None, stemming = True,
comp=None, cik=None, sic=None,
startMonth = None, startDay = None, startYear = None,
endMonth = None, endDay = None, endYear = None,
page = 0):
startMonth, startDay, startYear, endMonth, endDay, endYear = __dateargs__(
startMonth, startDay, startYear,
endMonth, endDay, endYear)
from_date = 'fromDate={m}/{d}/{y}'.format(m=str(startMonth).zfill(2), d=str(startDay).zfill(2), y=startYear)
to_date = 'toDate={m}/{d}/{y}'.format(m=str(endMonth).zfill(2), d=str(endDay).zfill(2), y=endYear)
date_arg = '&{from_date}&{to_date}&'.format(from_date=from_date, to_date=to_date) if \
(startMonth or startDay or startYear or endMonth or endDay or endYear) else ''
filter_query = '&queryCo={}'.format(comp) if comp else \
'&queryCik={}'.format(cik) if cik else \
'&querySic={}'.format(sic) if sic else ''
text_opt = 'search_text={}'.format(text)
sort_opt = 'sort=Date'
form_opt = 'formType=Form{}'.format(form_type.replace(' ','').upper()) if form_type else 'formType=1'
adv = 'isAdv=true'
stem = 'stemming=true' if stemming else 'stemming=false'
res = 'numResults=100'
offset = 'startDoc={}&'.format(1 + (100*page))
return '{link}{text}&sort=Date&{form}&isAdv=true&{date}{stem}&{offset}numResults=100{fquery}'.format(
link=SEARCH_MAIN, text=text_opt, form=form_opt, stem=stem,
date = date_arg,
offset=offset if page > 0 else '',
fquery=filter_query)
def __searchresults__(soup):
x = soup.find_all('tr')
for i, xx in enumerate(x):
if('class' in xx.attrs and 'infoBorder' in xx.attrs['class']):
try:
date = x[i+1].find_all('td')[0].text.replace('/','')
_ = x[i+1].find_all('td')[1].find('a').attrs['href'].split("'")
link = _[1]
comp = _[3]
edgar_ids = x[i+2].find_all('td')[1].find_all('a')#[0].text
try: cik = edgar_ids[0].text
except IndexError: cik = None
try: sic = edgar_ids[1].text
except IndexError: sic = None
yield [link, comp, date, cik, sic]
except AttributeError:
break
def __getlinks__(text, form_type = None, stemming = True,
comp=None, cik=None, sic=None,
startMonth = None, startDay = None, startYear = None,
endMonth = None, endDay = None, endYear = None):
'''generator for each link and document info for each search result'''
i, count = 0, 0
while(True):
qlink = __formatlink__(text, form_type = form_type, stemming = stemming,
comp=comp, cik=cik, sic=sic,
startMonth = startMonth, startDay = startDay, startYear = startYear,
endMonth = endMonth, endDay = endDay, endYear = endYear,
page = i)
s = get_soup(qlink)
#Check if final page
try: pmin = int(s.find(id='header').find('td').text.split()[0])
except AttributeError as e: return None
if(pmin < count): return None
for link in __searchresults__(s):
yield link
count+=1
i+=1
def download_by_text(text, outputpath, form_type = None, stemming = True,
comp=None, cik=None, sic=None,
startMonth = None, startDay = None, startYear = None,
endMonth = None, endDay = None, endYear = None,
downloadpoolsize = 100):
os.makedirs(outputpath, exist_ok=True)
link_iter = __getlinks__(text, form_type = form_type, stemming = stemming,
comp=comp, cik=cik, sic=sic,
startMonth = startMonth, startDay = startDay, startYear = startYear,
endMonth = endMonth, endDay = endDay, endYear = endYear)
download_lock = threading.Semaphore(downloadpoolsize)
threads = []
logfile = os.path.join(outputpath, 'log.txt')
with open(logfile, mode='w', encoding='UTF-8', errors='ignore') as w:
for link_data in link_iter:
link, comp, date, cik, sic = link_data
filename = os.path.join(outputpath, '{form_type}{company}_{date}.htm'.format(
form_type=form_type+'_' if form_type else '', company=comp, date=date))
w.write('{}\n'.format('\t'.join(map(lambda x: '' if not x else x, link_data))))
def _():
with download_lock:
download_page(link, filename)
t = threading.Thread(target=_)
threads.append(t)
t.start()
for thread in threads: thread.join()
return len(threads) | 0.286768 | 0.371593 |