input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""Let viewers pay currency to boost currency payouts
for everyone in chat for x seconds"""
import json
import os, os.path
import operator
import time
import codecs
#---------------------------------------
# [Required] Script information
#---------------------------------------
ScriptName = "Festival"
Website = "https://www.twitch.tv/newtc"
Creator = "Newt"
Version = "1.0.0.0"
Description = "Allows users to select stories for me to tell"
#---------------------------------------
# Versions
#---------------------------------------
"""
1.0.0.0 - Initial release
"""
#---------------------------------------
# Variables
#---------------------------------------
settingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
story_file = os.path.join(os.path.dirname(__file__), "stories.json")
pending_file = os.path.join(os.path.dirname(__file__), "pending.json")
#---------------------------------------
# Classes
#---------------------------------------
class Settings:
"""
Tries to load settings from file if given
The 'default' variable names need to match UI_Config"""
def __init__(self, settingsFile=None):
if settingsFile is not None and os.path.isfile(settingsFile):
with codecs.open(settingsFile, encoding='utf-8-sig', mode='r') as f:
self.__dict__ = json.load(f, encoding='utf-8-sig')
else: #set variables if no settings file
self.Enabled = True
self.OnlyLive = False
self.Command = "!stories"
self.SubmissionRewards = "100"
def ReloadSettings(self, data):
"""Reload settings on save through UI"""
self.__dict__ = json.loads(data, encoding='utf-8-sig')
return
def SaveSettings(self, settingsFile):
"""Save settings to files (json and js)"""
with codecs.open(settingsFile, encoding='utf-8-sig', mode='w+') as f:
json.dump(self.__dict__, f, encoding='utf-8-sig')
with codecs.open(settingsFile.replace("json", "js"), encoding='utf-8-sig', mode='w+') as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8-sig')))
return
def ReloadSettings(jsonData):
"""Reload settings"""
# Globals
global MySet
# Reload saved settings
MySet.ReloadSettings(jsonData)
# End of ReloadSettings
return
class Story:
StoryInfo = ""
StoryValue = 0
StoryContributor = ""
def __init__(self, info, contributor):
self.StoryInfo = info
self.StoryContributor = contributor
def get_story_info(self):
return self.StoryInfo
def set_story_info(self, info):
self.StoryInfo = info
def get_value(self):
return self.StoryValue
def set_value(self, value):
self.StoryValue = value
def get_contributor(self):
return self.StoryContributor
def set_contributor(self, contributor):
self.StoryContributor = contributor
class NewtClass:
NewtClass = True
NewtStreamerLevel = 5
#---------------------------------------
# [Required] functions
#---------------------------------------
def Init():
"""Required tick function"""
# Globals
global MySet
global m_Active
global selected_stories
global story_timer
global last_removed_story
m_Active = False
selected_stories = []
story_timer = time.time() + 5400
last_removed_story = {}
# story_timer += time.time() + 600
# Load in saved settings
MySet = Settings(settingsFile)
if not os.path.exists(story_file):
Parent.SendStreamMessage("No story file found. Creating a new one.")
data = {}
with codecs.open(story_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(data, f, encoding='utf-8-sig', indent=2)
if not os.path.exists(pending_file):
Parent.SendStreamMessage("No pending file found. Creating a new one.")
data = {}
with codecs.open(pending_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(data, f, encoding='utf-8-sig', indent=2)
# convert_to_new_format()
# End of Init
return
def Execute(data):
"""Required Execute function"""
roll_permissions = ["subscriber", "moderator", "vip", "vip+"]
retVal = ''
global selected_stories
global story_timer
if data.IsChatMessage():
if data.GetParam(0).lower() == MySet.Command.lower() or data.GetParam(0).lower() == "!story":
# parse the input to something usable by the script
data_input = data.Message
data_input = data_input.split()
data_input = data_input[2:]
title = ' '.join(data_input)
data_input = '_'.join(data_input).lower()
# two word commands
if data.GetParamCount() == 2:
if data.GetParam(1).lower() == "display":
respond(data, display_story_list())
if data.GetParam(1).lower() == "selected":
respond(data, parse_selected_stories())
if data.GetParam(1).lower() == "roll":
if Parent.HasPermission(data.User,"user_specific", "newtc"):
if len(selected_stories) > 0:
roll_story()
story_timer = time.time() + 3600
else:
roll_unselected_story()
if data.GetParam(1).lower() == "pending":
respond(data, display_pending_list())
if data.GetParam(1).lower() == "links":
respond(data, display_pending_links())
# single word commands
if data.GetParamCount() == 1:
respond(data, display_story_list())
# variable length commands
if data.GetParamCount() > 1:
if data.GetParam(1).lower() == "info":
respond(data, "Info for " + title + ": " + story_info(data_input))
if data.GetParam(1).lower() == "select":
story_added = select_story(data_input, selected_stories, data.UserName)
if (story_added == True):
respond(data, "Added " + title + " to the next story spin.")
elif (story_added == False):
respond(data, "That story is already in the next story spin.")
if data.GetParam(1).lower() == "add":
# get the final value and save is as the link
length = data.GetParamCount()
info = data.GetParam(length - 1)
# build the name
name = []
for param in range(2, length-1):
name.append(data.GetParam(param))
data_input = '_'.join(name)
# save the contributor
contributor = data.UserName.lower()
if data_input:
add_story(data_input, info, contributor)
if data.GetParam(1).lower() == ("remove" or "subtract"):
remove_story(data_input)
if data.GetParam(1).lower() == "restore":
re_add(data_input)
if data.GetParam(1).lower() == "approve":
approve_story(data_input)
if data.GetParam(0).lower()[0] == '!':
if data.GetParam(0).lower()[1:] in load_story_list():
respond(data, load_story_list()[data.GetParam(0).lower()[1:]])
return
def Tick():
"""Required tick function"""
global story_timer
# roll a new story every 3 hours
if time.time() > story_timer:
if len(selected_stories) > 0:
roll_story()
# else:
# roll_unselected_story()
story_timer = time.time() + 3600
return
def respond(data, output):
retVal = output
# If the original message is from a discord message
if data.IsFromDiscord():
# if the original message is from a whisper
if data.IsWhisper():
Parent.SendDiscordDM(data.User, retVal)
else:
Parent.SendDiscordMessage(retVal)
# If the original message is from a live stream
else:
if data.IsWhisper():
Parent.SendStreamWhisper(data.UserName, retVal)
else:
Parent.SendStreamMessage(str(retVal))
def load_story_list():
"""Returns a the list of counters as a settings object"""
with codecs.open(story_file, encoding='utf-8-sig', mode='r') as f:
data = json.load(f, encoding='utf-8-sig')
return data
def load_pending_list():
"""Returns a the list of counters as a settings object"""
with codecs.open(pending_file, encoding='utf-8-sig', mode='r') as f:
data = json.load(f, encoding='utf-8-sig')
return data
# display all available stories
def display_story_list():
data = load_story_list()
retval = ''
for key in data.keys():
upper = ''
# uppercase every first letter
for word in key.split("_"):
output = word.replace(word[0], word[0].upper(), 1)
upper += output + " "
# get rid of the last space
upper = upper[:-1]
retval += upper + ', '
retval = retval.replace('_', ' ')
retval = retval[:-2]
return retval
# display all available stories
def display_pending_list():
data = load_pending_list()
retval = ''
for key in data.keys():
upper = ''
# uppercase every first letter
for word in key.split("_"):
output = word.replace(word[0], word[0].upper(), 1)
upper += output + " "
# get rid of the last space
upper = upper[:-1]
retval += upper + ', '
retval = retval.replace('_', ' ')
retval = retval[:-2]
return retval
def display_pending_links():
data = load_pending_list()
retval = ''
for key in data.keys():
output = key + ": " + data[key]["info"]
# get rid of the last space
retval += output + ' , '
return retval
def parse_selected_stories():
# returns a list of selected stories
global selected_stories
retval = ''
if len(selected_stories) != 0:
for stories in selected_stories:
upper = ''
# uppercase every first letter
for word in stories.split("_"):
output = word.replace(word[0], word[0].upper(), 1)
upper += output + " "
# get rid of the last space
upper = upper[:-1]
retval += upper + ', '
retval = retval.replace('_', ' ')
retval = retval[:-2]
else:
retval = "There are no stories selected! Please select one."
return retval
# returns the story info
def story_info(story):
data = load_story_list()
if story.lower() in data:
return data[story.lower()]["info"]
else:
return "The story " + story + " is not in the story selection yet. Send me a link and I can add it."
# parses the story's name into an easily readable string
def story_name(story):
data = load_story_list()
if story.lower() in data.keys():
upper = ''
for word in story.split("_"):
output = word.replace(word[0], word[0].upper(), 1)
upper += output + " "
upper = upper[:-1]
return upper
else:
return ""
# select a story
def select_story(story, selected_stories, user):
global story_timer
data = load_story_list()
if story.lower() in data.keys():
if story.lower() not in selected_stories:
selected_stories.append(story.lower())
story_timer = time.time() + 1800
if data[story.lower()]["contributor"] != user.lower():
# add more points each time anyone other than the user selects it
data[story.lower()]["value"] += 50
with codecs.open(story_file, encoding='utf-8-sig', mode='w+') as f:
json.dump(data, f, encoding='utf-8-sig', indent=2)
return True
else:
return False
# select a story from chosen stories
def roll_story():
global selected_stories
choice = selected_stories[Parent.GetRandom(0, len(selected_stories))]
retval = "The story that was selected was: " + story_name(choice) + ". You can follow along at " + story_info(choice)
Parent.SendStreamMessage(retval)
# reset selected stories
selected_stories = []
# payout if the user is in chat
data = load_story_list()
if (data[choice.lower()]["contributor"] in Parent.GetViewerList()) and (data[choice]["value"] > 0):
user = data[choice.lower()]["contributor"]
value = data[choice.lower()]["value"]
Parent.AddPoints(user.lower(), user.lower(), value)
# remove the story we rolled from the list
remove_story(choice.lower())
return choice
def roll_unselected_story():
data = load_story_list()
stories = data.keys()
choice = stories[Parent.GetRandom(0, len(stories))]
retval = "Rolling from the main story list. The story that was selected was: " + story_name(choice) + ". You can follow along at " + story_info(
choice)
Parent.SendStreamMessage(retval)
if (data[choice.lower()]["contributor"] in Parent.GetViewerList()) and (data[choice]["value"] > 0):
user = data[choice.lower()]["contributor"]
value = data[choice.lower()]["value"]
Parent.AddPoints(user.lower(), user.lower(), value)
remove_story(choice.lower())
return choice
# add a story
def add_story(story, info, contributor):
retval = False
# if the counter already exists
if story in load_pending_list() or load_story_list():
Parent.SendStreamMessage("That story already exists.")
# else | |
<gh_stars>0
import datetime
import json
from django.core.exceptions import ObjectDoesNotExist
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User, Group
from account.models import Account
from core.models import BudgetModel, RigType, UseType, IncomeType, VehicleMake, VehicleModel, VehicleType, \
VehiclePurchaseType, VehicleStatus, Satisfaction, BudgetGroup, BudgetCategory
from compare.models import MyBudgetGroup, MyBudgetCategory
from .models import RVHousehold, Member, HouseholdMembers, HouseholdInvite, Vehicle
from .forms import MyInfoForm, HouseholdProfileForm, InviteMemberForm, VehicleForm
# For now, all test cases for household are defined in one class
class HouseholdTest(TestCase):
@classmethod
def setUpClass(cls):
"""
Set up initial test data
:return:
"""
"""
Create core model objects needed for household objects
"""
try:
budget_model = BudgetModel.objects.get(budget_model='RVHousehold')
except ObjectDoesNotExist:
budget_model = BudgetModel()
budget_model.budget_model = 'RVHousehold'
budget_model.budget_model_description = 'Budget model for RV community'
budget_model.save()
rig = RigType()
rig.rig_type = 'Motorhome'
rig.rig_type_description = 'Motorhome'
rig.save()
use = UseType()
use.use_type = 'Full-time'
use.use_type_description = 'Full-time'
use.save()
income = IncomeType()
income.income_type = 'Self-employed'
income.income_type_description = 'Self-employed'
income.save()
make = VehicleMake()
make.filter = 'rv'
make.make = 'Tiffin'
make.save()
model = VehicleModel()
model.make = make
model.model_name = 'Allegro Bus'
model.save()
v_type = VehicleType()
v_type.filter = 'rv'
v_type.type = 'Motorhome'
v_type.type_description = 'Your RV is a motorhome.'
v_type.save()
purchase = VehiclePurchaseType()
purchase.purchase_type = 'Used-Private'
purchase.purchase_description = 'Purchased directly from an individual.'
purchase.save()
status = VehicleStatus()
status.vehicle_status = 'Owner'
status.vehicle_status_description = 'Still owned by me.'
status.save()
satisfaction = Satisfaction()
satisfaction.satisfaction_index = 5
satisfaction.satisfaction_description = 'Love it!'
satisfaction.satisfaction_definition = 'Would definitely purchase again.'
satisfaction.save()
"""
Create auth group for forum permissions
"""
group = Group()
group.name = 'forum_customers'
group.save()
"""
Create template budget objects
"""
budget_group = BudgetGroup()
budget_group.budget_model = budget_model
budget_group.group_name = 'Health Care'
budget_group.group_description = 'Expenses associated with staying well'
budget_group.group_list_order = 1
budget_group.group_perma_key = 'perma-key-value'
budget_group.save()
category = BudgetCategory()
category.budget_group = budget_group
category.category = 'Insurance'
category.category_perma_key = 'perma-key-value'
category.save()
"""
Create users and associated objects for dashboard test cases.
"""
# 1. Just created account, has not setup household or provided personal details
User.objects.create_user('alex', email='<EMAIL>', password='password')
# 2. New account, only has personal details
user = User.objects.create_user('barney', email='<EMAIL>', password='password')
account = Account.objects.get(user=user)
member = Member()
member.account = account
member.phone_number = '415-413-4393'
member.owner = True
member.newsletter = True
member.save()
user.first_name = 'Barney'
user.last_name = 'Balderdash'
user.save()
# 3. New account, has provided personal details and household information, but no vehicles
user = User.objects.create_user('chuck', email='<EMAIL>', password='password')
account = Account.objects.get(user=user)
member = Member()
member.account = account
member.phone_number = '415-413-4401'
member.owner = True
member.newsletter = True
member.save()
user.first_name = 'Charles'
user.last_name = 'Carter'
user.save()
household = RVHousehold()
household.members_in_household = 2
household.oldest_birthyear = 1950
household.budget_model = budget_model
household.opt_in_contribute = True
household.paid_through = datetime.datetime.now().date() + datetime.timedelta(days=1000)
household.subscription_status = 'Beta'
household.start_year = 2000
household.rig_type = rig
household.use_type = use
household.income_type = income
household.pets_dog = 1
household.save()
household_member = HouseholdMembers()
household_member.member_account = account
household_member.household_membership = household
household_member.save()
# 4. Expired account
user = User.objects.create_user('dave', email='<EMAIL>', password='password')
account = Account.objects.get(user=user)
member = Member()
member.account = account
member.phone_number = '415-413-4402'
member.owner = True
member.newsletter = True
member.save()
user.first_name = 'David'
user.last_name = 'Davis'
user.save()
household = RVHousehold()
household.members_in_household = 1
household.oldest_birthyear = 1951
household.budget_model = budget_model
household.opt_in_contribute = True
household.paid_through = datetime.datetime.now().date() - datetime.timedelta(days=1)
household.subscription_status = 'Beta'
household.start_year = 1985
household.rig_type = rig
household.use_type = use
household.income_type = income
household.pets_dog = 0
household.save()
household_member = HouseholdMembers()
household_member.member_account = account
household_member.household_membership = household
household_member.save()
"""
Create users and associated objects for my info test cases.
"""
User.objects.create_user('eric', email='<EMAIL>', password='password')
"""
Create users and associated objects for household test cases.
"""
User.objects.create_user('fred', email='<EMAIL>', password='password')
"""
Create users and associated objects for household member test cases.
"""
# Expired membership
user = User.objects.create_user('greg', email='<EMAIL>', password='password')
account = Account.objects.get(user=user)
member = Member()
member.account = account
member.phone_number = '415-413-4410'
member.owner = True
member.newsletter = True
member.save()
user.first_name = 'Greg'
user.last_name = 'Gardiner'
user.save()
household = RVHousehold()
household.members_in_household = 2
household.oldest_birthyear = 1954
household.budget_model = budget_model
household.opt_in_contribute = True
household.paid_through = datetime.datetime.now().date() - datetime.timedelta(days=1)
household.subscription_status = 'Beta'
household.start_year = 1982
household.rig_type = rig
household.use_type = use
household.income_type = income
household.pets_dog = 1
household.save()
household_member = HouseholdMembers()
household_member.member_account = account
household_member.household_membership = household
household_member.save()
# Current membership
user = User.objects.create_user('harry', email='<EMAIL>', password='password')
account = Account.objects.get(user=user)
member = Member()
member.account = account
member.phone_number = '415-413-4411'
member.owner = True
member.newsletter = True
member.save()
user.first_name = 'Harry'
user.last_name = 'Hughes'
user.save()
household = RVHousehold()
household.members_in_household = 2
household.oldest_birthyear = 1951
household.budget_model = budget_model
household.opt_in_contribute = True
household.paid_through = datetime.datetime.now().date() + datetime.timedelta(days=1000)
household.subscription_status = 'Beta'
household.start_year = 1980
household.rig_type = rig
household.use_type = use
household.income_type = income
household.pets_dog = 2
household.save()
household_member = HouseholdMembers()
household_member.member_account = account
household_member.household_membership = household
household_member.save()
# Member of harry's household
user = User.objects.create_user('annie', email='<EMAIL>', password='password')
account = Account.objects.get(user=user)
member = Member()
member.account = account
member.phone_number = '415-413-5511'
member.owner = False
member.newsletter = True
member.save()
user.first_name = 'Annie'
user.last_name = 'Arneau-Hughes'
user.save()
household_member = HouseholdMembers()
household_member.member_account = account
household_member.household_membership = household
household_member.save()
# Random invites for tests
invite = HouseholdInvite()
invite.invite_household = household
invite.email = '<EMAIL>'
invite.security_code = '1234567'
invite.invite_date = datetime.datetime.now().date()
invite.save()
invite = HouseholdInvite()
invite.invite_household = household
invite.email = '<EMAIL>'
invite.security_code = '1234567'
invite.invite_date = datetime.datetime.now().date()
invite.save()
@classmethod
def tearDownClass(cls):
pass
"""
Test the models
"""
def test_models(self):
budget_model = BudgetModel.objects.get(budget_model='RVHousehold')
self.assertEquals(str(budget_model), 'RVHousehold')
rig = RigType.objects.get(rig_type='Motorhome')
self.assertEquals(str(rig), 'Motorhome')
use = UseType.objects.get(use_type='Full-time')
self.assertEquals(str(use), 'Full-time')
income = IncomeType.objects.get(income_type='Self-employed')
self.assertEquals(str(income), 'Self-employed')
make = VehicleMake.objects.get(make='Tiffin')
self.assertEquals(str(make), 'Tiffin')
v_model = VehicleModel.objects.get(model_name='Allegro Bus')
self.assertEquals(str(v_model), 'Allegro Bus')
v_type = VehicleType.objects.get(type='Motorhome')
self.assertEquals(str(v_type), 'Motorhome')
purchase = VehiclePurchaseType.objects.get(purchase_type='Used-Private')
self.assertEquals(str(purchase), 'Used-Private')
status = VehicleStatus.objects.get(vehicle_status='Owner')
self.assertEquals(str(status), 'Owner')
satisfaction = Satisfaction.objects.get(satisfaction_index=5)
self.assertEquals(str(satisfaction), 'Love it!')
member = Member.objects.get(phone_number='415-413-4393')
self.assertEquals(str(member), '415-413-4393')
household = RVHousehold.objects.get(pk=1)
self.assertEquals(str(household), '1')
household_member = HouseholdMembers.objects.all()[0]
self.assertEquals(str(household_member), 'Member key: {} Household key: {}'.format(
household_member.member_account,
household_member.household_membership))
budget_group = BudgetGroup.objects.get(group_name='Health Care')
self.assertEquals(str(budget_group), 'Health Care')
category = BudgetCategory.objects.get(category='Insurance')
self.assertEquals(str(category), 'Insurance')
invite = HouseholdInvite.objects.get(email='<EMAIL>')
self.assertEquals(str(invite), '<EMAIL>')
"""
Test various states and conditions for the dashboard view
"""
def test_dashboard_new_user(self):
"""
New user, hasn't setup personal information or household yet
:return:
"""
self.client = Client()
logged_in = self.client.login(username='alex', password='password')
self.assertEquals(logged_in, True)
response = self.client.get(reverse('household:household_dashboard'), secure=True)
summary = response.context['summary']
# Tags indicating need for personal info and household information should exist
self.assertEquals(summary['need_myinfo'], 'Please take a moment to provide your name and a phone number.')
self.assertEquals(summary['need_household'],
'To activate your free trial, please setup your household information.')
# And other tags should not exist until after those things are provided
with self.assertRaises(KeyError):
test = summary['need_vehicles']
test = summary['free_trial']
def test_dashboard_new_user_personal_info_provided(self):
"""
New user, has setup personal information but not household yet
:return:
"""
self.client = Client()
logged_in = self.client.login(username='barney', password='password')
self.assertEquals(logged_in, True)
response = self.client.get(reverse('household:household_dashboard'), secure=True)
summary = response.context['summary']
# Personal info for Barney should exist
self.assertEquals(summary['first_name'], 'Barney')
self.assertEquals(summary['last_name'], 'Balderdash')
self.assertEquals(summary['phone_number'], '415-413-4393')
# Household info is still missing
self.assertEquals(summary['need_household'],
'To activate your free trial, please setup your household information.')
# And other tags should not exist until after those things are provided
with self.assertRaises(KeyError):
test = summary['need_vehicles']
test = summary['free_trial']
def test_dashboard_new_user_with_household_setup(self):
"""
New user, has provided personal info and household
:return:
"""
self.client = Client()
logged_in = self.client.login(username='chuck', password='password')
self.assertEquals(logged_in, True)
response = self.client.get(reverse('household:household_dashboard'), secure=True)
summary = response.context['summary']
# Personal info for Charles should exist
self.assertEquals(summary['first_name'], 'Charles')
self.assertEquals(summary['last_name'], 'Carter')
self.assertEquals(summary['phone_number'], '415-413-4401')
# Household info should exist
self.assertEquals(summary['start_year'], 2000)
self.assertEquals(summary['pets'], 1)
# And now info is displayed about subscription and vehicles
self.assertEquals(summary['need_vehicles'][0:14], 'Please specify')
self.assertEquals(summary['free_trial'][0:6], 'Thanks')
def test_dashboard_expired_subscription(self):
"""
Expired subscription
:return:
"""
self.client = Client()
logged_in = self.client.login(username='dave', password='password')
self.assertEquals(logged_in, True)
response = self.client.get(reverse('household:household_dashboard'), secure=True)
summary = response.context['summary']
# Personal info for David should exist
self.assertEquals(summary['first_name'], 'David')
self.assertEquals(summary['last_name'], 'Davis')
self.assertEquals(summary['phone_number'], '415-413-4402')
# Household info should exist
self.assertEquals(summary['start_year'], 1985)
self.assertEquals(summary['pets'], 0)
# And now info is displayed about subscription and vehicles
self.assertEquals(summary['need_vehicles'][0:14], 'Please specify')
self.assertEquals(summary['expired'][0:30], 'Your subscription has expired.')
"""
Test my_info view and form
"""
def test_my_info_view(self):
self.client = Client()
logged_in = self.client.login(username='eric', password='password')
self.assertEquals(logged_in, True)
data = {
'first_name': 'Eric',
'last_name': 'Emmerson',
'phone_number': '415-413-4403',
'newsletter': True,
'owner': True
}
response = self.client.post(reverse('household:my_info'), data=data, secure=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username='eric')
account = Account.objects.get(user=user)
member = Member.objects.get(account=account)
self.assertEquals(user.last_name, 'Emmerson')
self.assertEquals(member.phone_number, '415-413-4403')
def test_my_info_form_empty(self):
data = | |
0-based.
use_short_seq_opt: A bool, whether using short sequence optimization.
Returns:
encoded: [B, 1, D].
updated_key_vec: [T, B, N, H].
updated_value_vec: [T, B, N, H].
Raises:
ValueError: If value projection is disabled.
"""
p = self.params
if not p.enable_value_proj:
raise ValueError('Value projection must be enabled for Transformer '
'machine translation.')
new_key_vec = query_vec
new_value_vec = query_vec
t, b, n, h = py_utils.GetShape(cached_key_vec, 4)
# Project inputs to key, value and query. Each has shape [B, 1, N, H].
new_key_proj = self.key.FProp(theta.key, new_key_vec)
new_value_proj = self.value.FProp(theta.value, new_value_vec)
query_proj = self.query.FProp(theta.query, query_vec)
# The cached_key and cached_value have shape [T, B, N, H].
indices = tf.reshape(
tf.one_hot(time_step, t, dtype=cached_key_vec.dtype), [t, 1, 1, 1])
cached_key_vec += tf.reshape(new_key_proj, [1, b, n, h]) * indices
cached_value_vec += tf.reshape(new_value_proj, [1, b, n, h]) * indices
if paddings is None:
paddings = tf.zeros([b, t], dtype=new_key_vec.dtype)
encoded = self._DotAttenOneStep(
theta,
query_proj,
cached_key_vec,
cached_value_vec,
paddings,
segment_mask,
per_step_padding,
time_step=time_step,
use_short_seq_opt=use_short_seq_opt)
# Post projection.
encoded = self.post.FProp(theta.post, encoded)
return encoded, cached_key_vec, cached_value_vec
@classmethod
def FPropMeta(cls, p, *args):
# args[0]: [b, t, d], args[1]: [b, s, d], args[2]: [b, s, d],
# args[3]: [b, s], args[4]: [b, t, s] if not None
args = tuple(py_utils.Flatten(args))
py_utils.CheckShapes(args)
b, t, d = args[0]
s = args[3][1]
n = p.num_heads
# O(b * t * s * d) computation for self-attention and there are four
# projection layers, two of which has O(b * t * d^2), the other two has
# O(b * s * d^2). Each multiple-sum took 2 flops. Approximately
# self_attention took 15 flops per element since softmax is expensive.
flops = 15 * b * t * s * d + 2 * 2 * (b * t * d * d + b * s * d * d)
return py_utils.NestedMap(flops=flops, out_shapes=(args[0], (b, n, t, s)))
class MultiHeadedAttentionXL(MultiHeadedAttention):
"""Transformer-XL multiheaded attention with relative positional embedding.
https://arxiv.org/pdf/1901.02860.pdf section 3.3.
Notice this is only intended for self attention.
"""
@classmethod
def Params(cls):
p = super(MultiHeadedAttentionXL, cls).Params()
p.Define('rel_pos_emb_dim', None,
'Dimension of relative positional embedding.')
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs a MultiHeadedAttentionXL object."""
super(MultiHeadedAttentionXL, self).__init__(params)
params = self.params
assert not params.packed_input, 'Packed input not implemented yet.'
if params.rel_pos_emb_dim is None or params.rel_pos_emb_dim <= 0:
raise ValueError('Invalide rel_pos_emb_dim: %s' % params.rel_pos_emb_dim)
with tf.variable_scope(params.name):
emb_params = layers.PositionalEmbeddingLayer.Params().Set(
embedding_dim=params.rel_pos_emb_dim)
self.CreateChild('pos_emb', emb_params)
# Projection layer for relative position encoding
dim_per_head = params.hidden_dim // params.num_heads
pos_proj_tpl = params.proj_tpl.Copy().Set(
input_dim=params.rel_pos_emb_dim,
num_heads=params.num_heads,
dim_per_head=dim_per_head,
use_bias=False)
self.CreateChild('pos_proj', pos_proj_tpl)
u_pc = py_utils.WeightParams(
shape=[params.num_heads, dim_per_head],
init=py_utils.WeightInit.Constant(0.0),
dtype=params.dtype,
collections=[self.__class__.__name__ + '_vars'])
v_pc = py_utils.WeightParams(
shape=[params.num_heads, dim_per_head],
init=py_utils.WeightInit.Constant(0.0),
dtype=params.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('u', u_pc)
self.CreateVariable('v', v_pc)
def _AttenLogits(self, theta, query, key, per_step_padding):
b, _, n, h = py_utils.GetShape(key, 4)
t = py_utils.GetShape(query)[1]
# This layer only supports self attention.
key = py_utils.HasShape(key, [b, t, n, h])
if per_step_padding is None:
is_causal_padding = False
else:
causal_padding = tf.tile(
tf.reshape(CausalPadding(t), [1, t, t]), [b, 1, 1])
is_causal_padding = tf.reduce_all(
tf.equal(
tf.cast(per_step_padding, dtype=tf.int32),
tf.cast(causal_padding, dtype=tf.int32)))
# [1, 2T - 1]
pos = tf.expand_dims(tf.range(-(t - 1), t, name='relative_pos'), 0)
sin_emb = self.pos_emb.FPropWithPosition(theta.pos_emb, pos)
# [1, 2T - 1, N, H]
sin_emb = self.pos_proj.FProp(theta.pos_proj, sin_emb)
# [2T - 1, N, H]
sin_emb = tf.squeeze(sin_emb, 0)
logits = relative_atten_util.AttenLogitsTransformerXL(
query, key, sin_emb, theta.u, theta.v, is_causal_padding)
return logits
def _AttenLogitsOneStep(self, theta, query, key, time_step):
"""Attention logits for one single target (query) step.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query: [B, N, H].
key: [S, B, N, H] or [S, B, N*H/128, 128].
time_step: Current time step.
Returns:
A Tensor of shape [S, B, N]
"""
p = self.params
s, b, _, _ = py_utils.GetShape(key, 4)
n = p.num_heads
h = p.hidden_dim // n
# Transformer_XL relative attention.
if time_step is None:
raise ValueError('`time_step` can not be None when using relative '
'position encoding in attention.')
# term a and c.
logits = tf.einsum('BNH,SBNH->SBN', query + theta.u,
tf.reshape(key, [s, b, n, h]))
position = tf.expand_dims(time_step - tf.range(s), 0)
# [1, s, emb_dim]
sin_emb = self.pos_emb.FPropWithPosition(theta.pos_emb, position)
sin_emb = self.pos_proj.FProp(theta.pos_proj, sin_emb)
# [s, n, h]
sin_emb = tf.squeeze(sin_emb, 0)
# term b an d.
logits += tf.einsum('BNH,SNH->SBN', query + theta.v, sin_emb)
return logits
def ExtendStep(self,
theta,
query_vec,
cached_key_vec,
cached_value_vec,
paddings,
segment_mask,
per_step_padding,
time_step,
use_short_seq_opt=False):
# TODO(jamesqin): support use_short_seq_opt for TransofrmerXL attention.
assert not use_short_seq_opt
return super(MultiHeadedAttentionXL,
self).ExtendStep(theta, query_vec, cached_key_vec,
cached_value_vec, paddings, segment_mask,
per_step_padding, time_step,
use_short_seq_opt)
class MultiHeadedAttentionRPE(MultiHeadedAttention):
"""Multiheaded attention with relative positional embedding ...
See https://arxiv.org/pdf/1803.02155.pdf.
Notice this is only intended for self attention.
"""
@classmethod
def Params(cls):
p = super(MultiHeadedAttentionRPE, cls).Params()
p.Define('rel_pos_emb_dim', None,
'Dimension of relative positional embedding.')
p.Define('rel_pos_radius', None,
'Relative distance is clipped to [-radius, radius].')
p.Define('skip_value_emb', False, 'If skipping value positional embedding.')
p.Define(
'use_global_emb', True,
'If using global relative positional embedding. Only effective if '
'`rel_pos_emb_tpl` is not None.')
return p
@base_layer.initializer
def __init__(self, params):
"""Constructs a MultiHeadedAttentionRPE object."""
super(MultiHeadedAttentionRPE, self).__init__(params)
params = self.params
assert not params.packed_input, 'Packed input not implemented yet.'
if not params.rel_pos_radius:
raise ValueError('Invalid rel_pos_radius: %s' % params.rel_pos_radius)
if params.rel_pos_emb_dim is None:
rel_pos_emb_dim = params.hidden_dim
else:
rel_pos_emb_dim = params.rel_pos_emb_dim
rel_pos_emb_tpl = layers.RelativePositionalEmbeddingLayer.Params().Set(
radius=params.rel_pos_radius, dim=rel_pos_emb_dim)
if rel_pos_emb_dim != params.hidden_dim:
# Projection layer for relative position encoding
dim_per_head = params.hidden_dim // params.num_heads
pos_proj_tpl = params.proj_tpl.Copy().Set(
input_dim=rel_pos_emb_dim,
num_heads=params.num_heads,
dim_per_head=dim_per_head,
use_bias=False)
else:
pos_proj_tpl = None
with tf.variable_scope(
params.name, reuse=tf.AUTO_REUSE if params.use_global_emb else False):
self.CreateChild('key_emb', rel_pos_emb_tpl)
# Add projection layer if rel_pos_emb_dim is different from hidden_dim.
if pos_proj_tpl is not None:
self.CreateChild('key_pos_proj', pos_proj_tpl)
if not params.skip_value_emb:
self.CreateChild('value_emb', rel_pos_emb_tpl)
if pos_proj_tpl is not None:
self.CreateChild('value_pos_proj', pos_proj_tpl)
def _RelativePositionValueEmb(self, theta, key):
"""Gets relative positional value embedding.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
key: The attention key, a tensor of shape [batch, seqlen, dim]
Returns:
Relative positional embedding, a Tensor of shape
[tgt_time=seqlen, src_time=seqlen, num_heads, attenion_dim]
"""
emb_layer = self.value_emb
emb_theta = theta.value_emb
seqlen = py_utils.GetShape(key)[1]
src_time_indices = tf.tile(tf.expand_dims(tf.range(seqlen), 0), [seqlen, 1])
tgt_time_indices = tf.tile(
tf.expand_dims(tf.range(seqlen), -1), [1, seqlen])
# [tgt_time=T, src_time=T, num_heads x hidden_dim]
pos_emb = emb_layer.FProp(emb_theta, src_time_indices - tgt_time_indices)
params = self.params
num_heads = self.params.num_heads
tgt_time, src_time, _ = py_utils.GetShape(pos_emb)
pos_proj_layer = 'value_pos_proj'
if hasattr(self, pos_proj_layer):
return getattr(self, pos_proj_layer).FProp(
getattr(theta, pos_proj_layer), pos_emb)
else:
return tf.reshape(
pos_emb,
[tgt_time, src_time, num_heads, params.hidden_dim // num_heads])
def _AttenLogits(self, theta, query, key, per_step_padding):
# TODO(jamesqin): optimize it.
b, _, n, h = py_utils.GetShape(key, 4)
t = py_utils.GetShape(query)[1]
# This layer only supports self attention.
key = py_utils.HasShape(key, [b, t, n, h])
if per_step_padding is None:
is_causal_padding = False
else:
causal_padding = tf.tile(
tf.reshape(CausalPadding(t), [1, t, t]), [b, 1, 1])
is_causal_padding = tf.reduce_all(
tf.equal(
tf.cast(per_step_padding, dtype=tf.int32),
tf.cast(causal_padding, dtype=tf.int32)))
# [1, 2T - 1]
pos = tf.expand_dims(tf.range(-(t - 1), t), 0)
# [1, 2T - 1, rel_pos_emb_dim]
abs_emb = self.key_emb.FProp(theta.key_emb, pos)
if hasattr(self, 'key_pos_proj'):
# [1, 2T - 1, N, H]
abs_emb = self.key_pos_proj.FProp(theta.key_pos_proj, abs_emb)
# [2T - 1, N, H]
abs_emb = tf.squeeze(abs_emb, 0)
else:
abs_emb = tf.reshape(abs_emb, [2 * t - 1, n, h])
return relative_atten_util.AttenLogitsRPE(query, key, abs_emb,
is_causal_padding)
def _AttenLogitsOneStep(self, theta, query, key, time_step):
"""Attention logits for one single target (query) step.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query: [B, N, H].
key: [S, B, N, H] or [S, B, N*H/128, 128].
time_step: Current time step.
Returns:
A Tensor of shape [S, B, N]
"""
p = self.params
s, b, _, _ = py_utils.GetShape(key, 4)
n = p.num_heads
h = p.hidden_dim // n
# Transformer_XL relative attention.
if time_step is None:
raise ValueError('`time_step` can not be None when using relative '
'position encoding in attention.')
# Gets positional embedding.
# [1, S]
rel_dists = tf.expand_dims(time_step - tf.range(s), 0)
# [1, S, rel_pos_emb_dim]
pos_emb = self.key_emb.FPropDefaultTheta(rel_dists)
if hasattr(self, 'key_pos_proj'):
# [1, S, N, H]
pos_emb = self.key_pos_proj.FProp(theta.key_pos_proj, pos_emb)
# [S, 1, N, H]
pos_emb = tf.transpose(pos_emb, [1, 0, 2, 3])
else:
pos_emb = tf.reshape(pos_emb, [s, 1, n, h])
return tf.einsum('BNH,SBNH->SBN', query,
tf.reshape(key, [s, b, | |
recv recv info')
reply_info = frecv()
dprint("send status = ",reply_info )
if ( reply_info == "recv ok" ): return "OK"
return "NOT OK"
def recv_file(self,filepath,old_file_md5,fsend,frecv):
dprint( 'step into recv_file' )
file_md5 = frecv()
if file_md5 == old_file_md5: #same content
fsend('no need to send')
dprint("same file, no need to recv")
return ('same file',file_md5)
fsend('need to send')
dprint('try recv size')
size = frecv()
dprint("size = ",size)
size = int( size )
psize = int( size / COL_SIZE )
#print(size, psize, COL_SIZE, CONSTANTS.RUNNING_SIDE)
if psize == 0 : psize = 1
full_size = size
tt = 0
while True:
try:
t0 = time.time()
size = full_size
real_md5 = hashlib.md5()
filecontent = []
while size > 0 :
#print('recv blk ',size,' ',end=' ' )
blk = frecv(is_bytes=True)
real_md5.update( blk )
filecontent.append( blk );
size -= 1
if CONSTANTS.RUNNING_SIDE =='user' and size % psize == 0:
print_progress( 1.0-1.0*size/full_size ,full_size,psize/(time.time() - t0 ) )
t0 = time.time()
pass
pass
break
except Exception as e:
tt+=1
if tt == 10:
print('failed abort')
break
print('send failed, try again:',e)
pass
pass
sys.stdout.write("\r"+" "*(terminal_size().col)+"\r")
real_md5 = real_md5.hexdigest()
dprint( 'recv md5 ', real_md5," , ", file_md5 )
if False and file_md5 != real_md5:
dprint('md5 inconsistent')
fsend('md5 inconsistent')
return ('recv fail',file_md5)
#recved and content is right, write to filepath
dprint('send recv ok')
fsend('recv ok')
dprint("try put content to >>>", filepath )
with open(filepath,'wb')as fp:
for blk in filecontent:
fp.write(blk)
pass
pass
dprint('new file write done')
return ('recv ok',file_md5)
def send_module(self,full_parent='/',file_tree=[],renametype='eva2normal',fsend=None,frecv=None,fsend_eof=None):
#dprint('send the module')
#send files
#send md5 let server check whether need to transfer
for parent,dirnames,filenames in file_tree:
dprint(parent,dirnames,filenames)
for dirname in dirnames:
dprint('send dir $%s %s'%(parent,dirname) )
fsend('dir $%s %s'%(parent,dirname) )
if frecv()!= 'ok':
if fsend_eof: fsend_eof()
raise Exception('error while sending files to server,fail to send dir')
#dprint(dirname,'over')
#send info (type,parent,name,md5)
for filename in filenames:
if renametype == 'eva2normal': send_filename = self.rto_eva_file(filename)
elif renametype == 'unchange': send_filename = filename
else : send_filename = filename
if ( send_filename == "" ): continue;
if parent:
print(" sending %s \t\tin %s"%(send_filename,parent));
else:
print(" sending %s"%send_filename);
dprint('send file $%s %s'%(parent,send_filename) )
fsend('file $%s %s'%(parent,send_filename) )
reply_info = frecv()
if reply_info != 'ready to recv file':
print('can not send files when send module')
if fsend_eof: fsend_eof()
raise Exception('can not send files when send module, fail to send file')
filefullpath = os.path.join(full_parent,parent,filename)
dprint('file path = ',filefullpath)
recvstat = self.send_file( filefullpath ,fsend, frecv )
dprint('recvstat = ',recvstat)
if recvstat != 'OK':
print('send files failed when send module')
if fsend_eof: fsend_eof()
raise Exception('send files failed when send module')
#dprint(filename,'over')
#end for
#recv final info
fsend('module_send_over')
reply_info = frecv()
if reply_info == 'module_send finished':
return True
return False
def recv_module(self,full_parent='/',renametype='unchange',suffix='',old_suffix='',fsend=None,frecv=None):
#normal files and directory
#dprint('recv the module')
while True:
info = frecv()
info = info.split()
dprint('\ninfo =>>>%s<<<'%(info,))
if len(info) == 1: break
if len(info) != 3: raise Exception('file description invalid.' )
(filetype,parent,filename) = (info[0],info[1][1:],info[2] )
if filetype == 'module_send_over': break
os.chdir( os.path.join(full_parent,parent) )
# dir
if filetype == 'dir':
#new dir name
if renametype == 'normal2eva': new_file = filename + suffix
if os.path.exists(filename):
if not os.path.isdir(filename): os.rename(filename,'%s.bak'%filename)
shell_status_output('mkdir %s'%filename)
if suffix: shell_status_output('ln -fs %s %s'%(filename,new_file))
else:
shell_status_output('mkdir %s'%filename)
if suffix: shell_status_output('ln -fs %s %s'%(filename,new_file))
fsend('ok')
#dprint(filename,'over')
continue
#file
if filetype == 'file':
###new_file_name
if renametype == 'unchange': #directly overwrite if exist
new_file = filename
if parent:
print(" recving %s \t\tin %s"%(new_file,parent));
else:
print(" recving %s"%new_file);
fsend('ready to recv file')
(recv_result,new_md5) = self.recv_file(new_file,'NO_OLD_FILE',fsend,frecv)
#dprint(filename,'over')
continue
if renametype == 'normal2eva': #keep
new_file = self.to_eva_file(filename) + suffix
if parent:
print(" recving %s \t\tin %s"%(new_file,parent));
else:
print(" recving %s"%new_file);
old_file = self.to_eva_file( filename ) + old_suffix
if os.path.exists( old_file ):
old_md5 = file_content_get(old_file+'.md5')
else:
old_md5 = 'NO_OLD_FILE'
#dprint('old_file',old_file,'with md5 ',old_md5)
fsend('ready to recv file')
(recv_result,new_md5) = self.recv_file(new_file,old_md5,fsend,frecv)
if recv_result == 'same file':
if os.path.islink(old_file):
shell_status_output('ln -fs %s %s'%(os.readlink(old_file),new_file))
shell_status_output('ln -fs %s %s'%(os.readlink(old_file+'.md5'),new_file+'.md5'))
else:
shell_status_output('ln -fs %s %s'%(old_file,new_file))
shell_status_output('ln -fs %s %s'%(old_file+'.md5',new_file+'.md5'))
elif recv_result == 'recv ok':
file_content_set(new_file+'.md5',new_md5)
else:
raise Exception('file recv error '+recv_result)
#dprint(filename,'over')
continue
#otherwize
raise Exception('file description invalid, may from attackers')
#dprint('recv module over')
fsend('module_send finished')
return True
##########################################
#uncoding and encoding class
from binascii import b2a_hex, a2b_hex
import base64
def to_bytes(s):
if isinstance(s,str): return s.encode()
return s
def to_str(s):
if not isinstance(s,str): return s.decode()
return s
def encoding(s): #encode a string
if isinstance(s,str): s = s.encode()
return base64.encodestring(s) #creq1
def decoding(s): #decode a encoded string
if isinstance(s,str): s = s.encode()
missing_padding = 4 - len(s)%4
if missing_padding:
s+=b'='*missing_padding
return base64.decodestring(s) #creq1
def random_key(bytelen = 32,base= None):
if not base: base = list(string.ascii_letters)+list(string.digits)+list('!@#$%^&*()<>,./?')
else: base = list(base)
key = ''
for i in range(0,bytelen):
key = key + random.choice(base)
return key
def random_simple_key(bytelen = 32,base= None):
if not base: base = list(string.ascii_letters)+list(string.digits)
else: base = list(base)
key = ''
for i in range(0,bytelen):
key = key + random.choice(base)
return key
def passcrypt(passwd):
if isinstance(passwd,str): passwd = passwd.encode()
return hashlib.sha1( passwd ).hexdigest()
from Crypto.Hash import MD5
from Crypto.PublicKey import RSA
from Crypto.Util import randpool
import pickle
class RSAEncryption:
blah = None
RSAKey = None
RSAPubKey = None
RSAPubKeyStr = None
Type = None
def __init__(self,Type='host',pubkeystr='',privkeystr=''):
#position = host || client
self.Type = Type
if (Type == 'host' or pubkeystr == ''):
self.blah = randpool.RandomPool()
self.RSAKey = RSA.generate(1024,self.blah.get_bytes)
self.RSAPubKey = self.RSAKey.publickey()
self.RSAPubKeyStr = pickle.dumps(self.RSAPubKey)
elif (Type== 'client'):
self.RSAPubKeyStr = decoding(pubkeystr)
self.RSAPubKey = pickle.loads(self.RSAPubKeyStr)
else:
raise Exception('RSAEncryption Type',Type,'is not allowed!')
def get_pub_key_str(self):
return encoding(self.RSAPubKeyStr)
def encrypt(self,text):
if isinstance(text,str): text = text.encode()
return encoding( self.RSAPubKey.encrypt(text,32)[0] )
def decrypt(self,text):
if isinstance(text,str): text = text.encode()
return self.RSAKey.decrypt( decoding(text) )
pass
from Crypto.Cipher import AES
class AESEncryption:
key = None
mode = None
cryptor = None
name = None
def __init__(self,key=''):
if key == '' : key = self.generate_random_key(32)
self.key = key
self.mode = AES.MODE_CBC
#self.cryptor = AES.new(self.key,self.mode,b'0000000000000000')
def generate_random_key(self,bytelen):
return random_key(bytelen)
def get_key(self):
return self.key
def encrypt(self,text=''):
#dprint('type text = ',type(text) )
if not isinstance(text,str): text = text.decode()
cryptor = AES.new(self.key,self.mode,b'0000000000000000')
unitlen = 16
count = len(text)+2
if count < unitlen:
text = 'X'+text +'X'+ ('\0'*(unitlen - count) )
elif count > unitlen:
text = 'X'+text +'X'+ ('\0'*(unitlen - (count%unitlen)))
else:
text = 'X'+text +'X'
ciphertext = cryptor.encrypt(text)
#dprint('aes enrypt %s -> %s'%(text,b2a_hex(ciphertext) ) )
return b2a_hex(ciphertext)
def decrypt(self,text=''):
if isinstance(text,str): text = text.encode()
cryptor = AES.new(self.key,self.mode,b'0000000000000000')
if len(text)%2 != 0: raise Exception('Text Decryption Error.')
plaintext = cryptor.decrypt(a2b_hex(text)).rstrip(b'\0')[1:-1]
#dprint('aes decrypt %s -> %s'%(text,plaintext))
return plaintext
## python email interface
import smtplib
from email.mime.text import MIMEText
mailto_list=["<EMAIL>"]
def send_mail(mail_name,mail_host,mail_user,mail_pass,mail_postfix,to_list,subject,content):
me=mail_name+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content,_subtype='html',_charset='utf-8')
msg['Subject'] = subject
msg['From'] = me
msg['To'] = ";".join(to_list)
try:
s = smtplib.SMTP()
s.connect(mail_host)
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception as e:
print(str(e) )
return False
#linux daemon program enwrap interface
import atexit
from signal import SIGTERM
class ServerDaemon:
#pid_dir = os.getenv("EVAWIZ_ROOT")+"/pids"
pid_dir = evawiz_root+"/pids"
def __init__(self,run_func,pidfile='evawiz.pid',name='evawiz',stdin='/dev/null',stdout='/dev/null',stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.run_func = run_func
if not os.path.exists(self.pid_dir): os.makedirs(self.pid_dir)
self.pidfile = os.path.join(self.pid_dir,pidfile)
self.name = name
def daemonize(self):
"""
do the UNIX double-fork magic
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def run(self):
self.run_func()
#------------------------------
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
sys.stderr.write("pidfile %s already exist. %s already running?\n" %(self.pidfile,self.name) )
sys.exit(1)
#start the daemon
sys.stdout.write("Starting %s ...\n"%(self.name))
sys.stdout.flush()
self.daemonize()
self.run()
def | |
<filename>swagger_client/models/project.py
# coding: utf-8
"""
Harbor API
These APIs provide services for manipulating Harbor project.
OpenAPI spec version: 0.3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Project(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'project_id': 'int',
'owner_id': 'int',
'name': 'str',
'creation_time': 'str',
'update_time': 'str',
'deleted': 'int',
'owner_name': 'str',
'public': 'int',
'togglable': 'bool',
'current_user_role_id': 'int',
'repo_count': 'int',
'enable_content_trust': 'bool',
'prevent_vulnerable_images_from_running': 'bool',
'prevent_vulnerable_images_from_running_severity': 'str',
'automatically_scan_images_on_push': 'bool'
}
attribute_map = {
'project_id': 'project_id',
'owner_id': 'owner_id',
'name': 'name',
'creation_time': 'creation_time',
'update_time': 'update_time',
'deleted': 'deleted',
'owner_name': 'owner_name',
'public': 'public',
'togglable': 'Togglable',
'current_user_role_id': 'current_user_role_id',
'repo_count': 'repo_count',
'enable_content_trust': 'enable_content_trust',
'prevent_vulnerable_images_from_running': 'prevent_vulnerable_images_from_running',
'prevent_vulnerable_images_from_running_severity': 'prevent_vulnerable_images_from_running_severity',
'automatically_scan_images_on_push': 'automatically_scan_images_on_push'
}
def __init__(self, project_id=None, owner_id=None, name=None, creation_time=None, update_time=None, deleted=None, owner_name=None, public=None, togglable=None, current_user_role_id=None, repo_count=None, enable_content_trust=None, prevent_vulnerable_images_from_running=None, prevent_vulnerable_images_from_running_severity=None, automatically_scan_images_on_push=None):
"""
Project - a model defined in Swagger
"""
self._project_id = None
self._owner_id = None
self._name = None
self._creation_time = None
self._update_time = None
self._deleted = None
self._owner_name = None
self._public = None
self._togglable = None
self._current_user_role_id = None
self._repo_count = None
self._enable_content_trust = None
self._prevent_vulnerable_images_from_running = None
self._prevent_vulnerable_images_from_running_severity = None
self._automatically_scan_images_on_push = None
if project_id is not None:
self.project_id = project_id
if owner_id is not None:
self.owner_id = owner_id
if name is not None:
self.name = name
if creation_time is not None:
self.creation_time = creation_time
if update_time is not None:
self.update_time = update_time
if deleted is not None:
self.deleted = deleted
if owner_name is not None:
self.owner_name = owner_name
if public is not None:
self.public = public
if togglable is not None:
self.togglable = togglable
if current_user_role_id is not None:
self.current_user_role_id = current_user_role_id
if repo_count is not None:
self.repo_count = repo_count
if enable_content_trust is not None:
self.enable_content_trust = enable_content_trust
if prevent_vulnerable_images_from_running is not None:
self.prevent_vulnerable_images_from_running = prevent_vulnerable_images_from_running
if prevent_vulnerable_images_from_running_severity is not None:
self.prevent_vulnerable_images_from_running_severity = prevent_vulnerable_images_from_running_severity
if automatically_scan_images_on_push is not None:
self.automatically_scan_images_on_push = automatically_scan_images_on_push
@property
def project_id(self):
"""
Gets the project_id of this Project.
Project ID
:return: The project_id of this Project.
:rtype: int
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""
Sets the project_id of this Project.
Project ID
:param project_id: The project_id of this Project.
:type: int
"""
self._project_id = project_id
@property
def owner_id(self):
"""
Gets the owner_id of this Project.
The owner ID of the project always means the creator of the project.
:return: The owner_id of this Project.
:rtype: int
"""
return self._owner_id
@owner_id.setter
def owner_id(self, owner_id):
"""
Sets the owner_id of this Project.
The owner ID of the project always means the creator of the project.
:param owner_id: The owner_id of this Project.
:type: int
"""
self._owner_id = owner_id
@property
def name(self):
"""
Gets the name of this Project.
The name of the project.
:return: The name of this Project.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Project.
The name of the project.
:param name: The name of this Project.
:type: str
"""
self._name = name
@property
def creation_time(self):
"""
Gets the creation_time of this Project.
The creation time of the project.
:return: The creation_time of this Project.
:rtype: str
"""
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
"""
Sets the creation_time of this Project.
The creation time of the project.
:param creation_time: The creation_time of this Project.
:type: str
"""
self._creation_time = creation_time
@property
def update_time(self):
"""
Gets the update_time of this Project.
The update time of the project.
:return: The update_time of this Project.
:rtype: str
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""
Sets the update_time of this Project.
The update time of the project.
:param update_time: The update_time of this Project.
:type: str
"""
self._update_time = update_time
@property
def deleted(self):
"""
Gets the deleted of this Project.
A deletion mark of the project (1 means it's deleted, 0 is not)
:return: The deleted of this Project.
:rtype: int
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""
Sets the deleted of this Project.
A deletion mark of the project (1 means it's deleted, 0 is not)
:param deleted: The deleted of this Project.
:type: int
"""
self._deleted = deleted
@property
def owner_name(self):
"""
Gets the owner_name of this Project.
The owner name of the project.
:return: The owner_name of this Project.
:rtype: str
"""
return self._owner_name
@owner_name.setter
def owner_name(self, owner_name):
"""
Sets the owner_name of this Project.
The owner name of the project.
:param owner_name: The owner_name of this Project.
:type: str
"""
self._owner_name = owner_name
@property
def public(self):
"""
Gets the public of this Project.
The public status of the project.
:return: The public of this Project.
:rtype: int
"""
return self._public
@public.setter
def public(self, public):
"""
Sets the public of this Project.
The public status of the project.
:param public: The public of this Project.
:type: int
"""
self._public = public
@property
def togglable(self):
"""
Gets the togglable of this Project.
Correspond to the UI about whether the project's publicity is updatable (for UI)
:return: The togglable of this Project.
:rtype: bool
"""
return self._togglable
@togglable.setter
def togglable(self, togglable):
"""
Sets the togglable of this Project.
Correspond to the UI about whether the project's publicity is updatable (for UI)
:param togglable: The togglable of this Project.
:type: bool
"""
self._togglable = togglable
@property
def current_user_role_id(self):
"""
Gets the current_user_role_id of this Project.
The role ID of the current user who triggered the API (for UI)
:return: The current_user_role_id of this Project.
:rtype: int
"""
return self._current_user_role_id
@current_user_role_id.setter
def current_user_role_id(self, current_user_role_id):
"""
Sets the current_user_role_id of this Project.
The role ID of the current user who triggered the API (for UI)
:param current_user_role_id: The current_user_role_id of this Project.
:type: int
"""
self._current_user_role_id = current_user_role_id
@property
def repo_count(self):
"""
Gets the repo_count of this Project.
The number of the repositories under this project.
:return: The repo_count of this Project.
:rtype: int
"""
return self._repo_count
@repo_count.setter
def repo_count(self, repo_count):
"""
Sets the repo_count of this Project.
The number of the repositories under this project.
:param repo_count: The repo_count of this Project.
:type: int
"""
self._repo_count = repo_count
@property
def enable_content_trust(self):
"""
Gets the enable_content_trust of this Project.
Whether content trust is enabled or not. If it is enabled, user cann't pull unsigned images from this project.
:return: The enable_content_trust of this Project.
:rtype: bool
"""
return self._enable_content_trust
@enable_content_trust.setter
def enable_content_trust(self, enable_content_trust):
"""
Sets the enable_content_trust of this Project.
Whether content trust is enabled or not. If it is enabled, user cann't pull unsigned images from this project.
:param enable_content_trust: The enable_content_trust of this Project.
:type: bool
"""
self._enable_content_trust = enable_content_trust
@property
def prevent_vulnerable_images_from_running(self):
"""
Gets the prevent_vulnerable_images_from_running of this Project.
Whether prevent the vulnerable images from running.
:return: The prevent_vulnerable_images_from_running of this Project.
:rtype: bool
"""
return self._prevent_vulnerable_images_from_running
@prevent_vulnerable_images_from_running.setter
def prevent_vulnerable_images_from_running(self, prevent_vulnerable_images_from_running):
"""
Sets the prevent_vulnerable_images_from_running of this Project.
Whether prevent the vulnerable images from running.
:param prevent_vulnerable_images_from_running: The prevent_vulnerable_images_from_running of this Project.
:type: bool
"""
self._prevent_vulnerable_images_from_running = prevent_vulnerable_images_from_running
@property
def prevent_vulnerable_images_from_running_severity(self):
"""
Gets the prevent_vulnerable_images_from_running_severity of this Project.
If the vulnerability is high than severity defined here, the images cann't be pulled.
:return: The prevent_vulnerable_images_from_running_severity of this Project.
:rtype: str
"""
return self._prevent_vulnerable_images_from_running_severity
@prevent_vulnerable_images_from_running_severity.setter
def prevent_vulnerable_images_from_running_severity(self, prevent_vulnerable_images_from_running_severity):
"""
Sets the prevent_vulnerable_images_from_running_severity of this Project.
If the vulnerability is high than severity defined here, the images cann't be pulled.
:param prevent_vulnerable_images_from_running_severity: The prevent_vulnerable_images_from_running_severity of this Project.
:type: str
"""
self._prevent_vulnerable_images_from_running_severity = prevent_vulnerable_images_from_running_severity
@property
def automatically_scan_images_on_push(self):
"""
Gets the automatically_scan_images_on_push of this Project.
Whether scan images automatically when pushing.
:return: The automatically_scan_images_on_push of this Project.
:rtype: bool
"""
return self._automatically_scan_images_on_push
@automatically_scan_images_on_push.setter
def automatically_scan_images_on_push(self, automatically_scan_images_on_push):
"""
Sets the automatically_scan_images_on_push of this Project.
Whether scan images automatically when pushing.
:param automatically_scan_images_on_push: The automatically_scan_images_on_push of | |
we do split on an iterator which has stages attached at it(by compute_at), the inner
most iterator of split results will become the new attach point.
"""
self.state_object, res = _ffi_api.StateSplit(
self.state_object, self._resolve_stage_id(stage), iterator, lengths, inner_to_outer
)
return res
def follow_split(self, stage, iterator, src_step_id, n_split):
"""The schedule primitive similar to split, but uses split factors from previous steps.
This step splits the iterator by the same factors as the given SplitStep.
Notes
------
This step is useful in a scenario that we have subgraph Dense -> Relu,
and we want to compute the Dense stage at ReLU. In this case, we need them to have
the same tiling structure of common outer loops.
The follow_split step could be used here to split the Dense stage and makes sure its
splitting factors are the same as the given split step for the ReLU stage.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be split, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to split.
src_step_id : int
The index of the split step to be followed in the history.
n_split : int
The number of split level.
Returns
-------
res_its : List[Iterator]
The splitted new Iterators.
"""
self.state_object, res = _ffi_api.StateFollowSplit(
self.state_object, self._resolve_stage_id(stage), iterator, src_step_id, n_split
)
return res
def follow_fused_split(self, stage, iterator, src_step_ids, level, factor_or_nparts):
"""Schedule primitive extends to split step.
This step is used to split an iterator by the same factors
as the given list of SplitSteps and FuseSteps.
Notes
------
This step is useful in a scenario that we have a subgraph
in GPU schedule: Input -> Dense
for i.0@j.0 = ... : Bind to blockIdx.x
for i.1@j.1 = ... : Bind to threadIdx.x
for i.2@j.2 = ...
Input_shared = Input ...
for k = ...
Dense = ...
We intend to apply cooperative fetching with the input stage, while the threadIdx.x
axis is bound to an iterator generated by split & fuse step.
The follow_fused_step is used split the iterator to 2 parts, while the split factor
matches the final extent of the threadIdx.x bound iterator.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be split, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to split.
src_step_ids : List[int]
The indices of the split steps to be followed in the history.
level : int
Use the length in this split level.
factor_or_nparts : bool
True to use `factor` for split from inner to outer,
False to use `nparts` for split from outer to inner.
Returns
-------
res_its : List[Iterator]
The splitted new Iterators.
"""
self.state_object, res = _ffi_api.StateFollowFusedSplit(
self.state_object,
self._resolve_stage_id(stage),
iterator,
src_step_ids,
level,
factor_or_nparts,
)
return res
def storage_align(self, stage, iterator, factor, offset):
"""Schedule primitive corresponding to `te.Stage.storage_align`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be storage aligned, which can be specified by the integer index,
Operation, or output tensor of the stage.
iterator : Iterator
The iterator to be aligned.
factor : int
The factor in alignment specification.
offset : int
The offset in the alignment specification.
"""
self.state_object = _ffi_api.StateStorageAlign(
self.state_object, self._resolve_stage_id(stage), iterator, factor, offset
)
def compute_at(self, stage, target_stage, target_iter):
"""Schedule primitive corresponding to `te.Stage.compute_at`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The source Stage of computed at, which can be specified by the integer index,
Operation, or output tensor of the stage.
target_stage : Union[int, Operation, Tensor]
The target stage of compute_at, which can be specified by the integer index, Operation,
or output tensor of the stage.
target_iter : Iterator
The target Iterator of compute_at.
Notes
-----
After compute_at, we need careful dependency analysis to compute the accurate bound
information. However, it is relatively expensive and complicated, so we just fill "None"
as bound for the newly created iterators.
Call ComputeDAG::InferBound on the returned state to get the complete bound information.
"""
self.state_object = _ffi_api.StateComputeAt(
self.state_object,
self._resolve_stage_id(stage),
self._resolve_stage_id(target_stage),
target_iter,
)
def compute_inline(self, stage):
"""Schedule primitive corresponding to `te.Stage.compute_inline`, see also the `te.Stage`
for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be marked compute inlined, which can be specified by the integer index,
Operation, or output tensor of the stage.
"""
self.state_object = _ffi_api.StateComputeInline(
self.state_object, self._resolve_stage_id(stage)
)
def compute_root(self, stage):
"""Schedule primitive corresponding to `te.Stage.compute_root`.
Ssee also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be marked compute at root, which can be specified by the integer index,
Operation, or output tensor of the stage.
Notes
-----
After compute_root, we need careful dependency analysis to compute the accurate bound
information. However, it is relatively expensive and complicated, so we just fill "None"
as bound for the newly created iterators.
Call ComputeDAG::InferBound on the returned state to get the complete bound information.
"""
self.state_object = _ffi_api.StateComputeRoot(
self.state_object, self._resolve_stage_id(stage)
)
def cache_read(self, stage, scope_name, reader_stages):
"""Schedule primitive corresponding to `te.Schedule.cache_read`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be cache_read, which can be specified by the integer index, Operation,
or output tensor of the stage.
scope_name : str
The scope name of the newly added read stage.
reader_stages : List[Union[int, Operation, Tensor]]
The reader stages. Each of the list can be specified by the integer index, Operation,
or output tensor of the stage.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Cache read step will insert an extra stage to the original ComputeDAG (at the back of the
target stage).
"""
reader_stage_ids = [self._resolve_stage_id(i) for i in reader_stages]
self.state_object, new_stage_id = _ffi_api.StateCacheRead(
self.state_object,
self._resolve_stage_id(stage),
scope_name,
reader_stage_ids,
self.compute_dag,
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make them work.
self._apply_stage_id_offset(int(new_stage_id))
self._update_stage_id_map()
return self.stages[int(new_stage_id)].op
def cache_write(self, stage, scope_name):
"""Schedule primitive corresponding to `te.Schedule.cache_write`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be cache_write, which can be specified by the integer index, Operation,
or output tensor of the stage.
scope_name : str
The scope name of the newly added compute stage.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Cache write step will insert an extra stage to the original ComputeDAG (in the front of the
target stage).
This step will cache write all output tensors of the target stage.
"""
self.state_object, new_stage_id = _ffi_api.StateCacheWrite(
self.state_object, self._resolve_stage_id(stage), scope_name, self.compute_dag
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make them work.
self._apply_stage_id_offset(int(new_stage_id))
self._update_stage_id_map()
return self.stages[int(new_stage_id)].op
def rfactor(self, stage, iterator, factor_iter_id):
"""Schedule primitive corresponding to `te.Schedule.rfactor`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be factored, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The reduction iterator to be factored.
factor_iter_id : int
The position where the new iterator is placed.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Rfactor step will insert an extra stage to the original ComputeDAG (in the front of the
target stage).
"""
self.state_object, new_stage_id = _ffi_api.StateRfactor(
self.state_object,
self._resolve_stage_id(stage),
iterator,
factor_iter_id,
self.compute_dag,
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make | |
<filename>ppq/parser/matex_exporter.py
from ast import Continue
from typing import Dict, List, Tuple
import onnx
import torch
from onnx import helper
from ppq.core import (COMPELING_OP_TYPES, EXPORT_DEVICE_SWITCHER, PPQ_NAME,
ChannelwiseTensorQuantizationConfig, DataType,
OperationMeta, QuantizationProperty, QuantizationStates,
TensorMeta, TensorQuantizationConfig,
convert_any_to_torch_tensor)
from ppq.IR import (BaseGraph, Operation, QuantableOperation,
QuantableVariable, Variable)
from ppq.IR.base.command import GraphCommand, GraphCommandType
from ppq.IR.morph import GraphDeviceSwitcher, GraphFormatter
from ppq.utils.round import ppq_tensor_round
from .onnx_exporter import OnnxExporter
# legacy exporter since ppq 0.6.4
# use onnxruntime exporter instead.
class MetaxExporter(OnnxExporter):
"""ONNXRUNTIME int8 QDQ format exporter, no further actions should be applied to the graph because we will modify the graph
in-place, and the modified graph can't be executed. We remove Clip and Relu ops(fuse into computing op) here when asym quantization
for activation is applied, and following the official implementation, when an variable has multiple outputs, we assume the same
quantization scales and offset. For parameters, we pre-quantize the value and only insert DequantizeLinear op, both per-layer/per-channel
and asym/sym quantizations are supported for export, the exported onnx model is tested to align with PPQ monitor when CUDAExecutionProvider
is applied in onnxruntime-gpu >= 1.8.1, i.e., to run the model correctly if you have gpu and onnxruntime-gpu version installed
X W b X quant(W) quant(b)
\ | / \ | /
\ | / quant dequant dequant
Conv -> \ | /
| dequant | /
| \ | /
Conv
|
quant
|
dequant
|
```
import onnxruntime as ort
sess_options = ort.SessionOptions()
sess = ort.InferenceSession(file_path, sess_options, providers=['CUDAExecutionProvider'])
res = sess.run(None, {sess.get_inputs()[0].name : dummy_input.cpu().numpy()})
```
"""
def __init__(self, removed_activation_types: List[str] = ['Relu', 'Clip']) -> None:
super().__init__()
self.removed_activation_types = removed_activation_types
def inplace_quantization(self, var: QuantableVariable, is_bias: bool) -> Tuple[torch.Tensor, torch.Tensor, int]:
config = var.dest_op_configs[0]
assert isinstance(config, TensorQuantizationConfig)
tensor = var.value
scale, offset = config.scale, config.offset
axis = 1
if config.policy.has_property(QuantizationProperty.PER_TENSOR):
tensor = ppq_tensor_round((tensor / scale), config.rounding) + offset
tensor = torch.clamp(tensor, config.quant_min, config.quant_max)
else:
assert isinstance(config, ChannelwiseTensorQuantizationConfig)
shape = [1 if axis != config.channel_axis else -1 for axis in range(tensor.ndim)]
scale, offset = scale.view(shape), offset.view(shape)
tensor = ppq_tensor_round((tensor / scale), config.rounding) + offset
tensor = torch.clamp(tensor, config.quant_min, config.quant_max)
axis = config.channel_axis
if is_bias:
var.value = tensor.type(torch.int32)
elif config.policy.has_property(QuantizationProperty.ASYMMETRICAL):
var.value = tensor.type(torch.uint8)
else:
var.value = tensor.type(torch.int8)
return (convert_any_to_torch_tensor(config.scale, dtype=torch.float32),
convert_any_to_torch_tensor(config.offset, dtype=var.value.dtype), axis)
def insert_quant_dequant_on_var(
self, graph: BaseGraph, var: QuantableVariable,
config: TensorQuantizationConfig=None, single_branch: bool=False,
dest_op: Operation=None) -> None:
"""insert quant and dequant op on common quantable variables, by default a pair of quant
and dequant ops will be inserted on var, i.e., all destinations of original var will be
replaced by output of dequant op, but you can also insert on single var--dest_op branch
by setting single_branch=True, in this case you should give the desired dest_op as the
destination op of dequant op
Args:
graph (BaseGraph): PPQ IR graph.
var (Variable): quantable variables, parameters assumed.
config (TensorQuantizationConfig, optional): quantization config. Defaults to None.
single_branch (bool, optional): whether to insert on var(replace all destinations)
or insert on just single branch. Defaults to False.
dest_op (Operation, optional): shouldn't be None when single_branch is True. Defaults to None.
"""
if config is None:
configs = [cfg for cfg in [var.source_op_config] + var.dest_op_configs if cfg is not None]
config = configs[0]
offset_dtype = torch.int8
if config.policy.has_property(QuantizationProperty.ASYMMETRICAL): offset_dtype = torch.uint8
scale = convert_any_to_torch_tensor(config.scale, dtype=torch.float32)
offset = convert_any_to_torch_tensor(config.offset, dtype=offset_dtype)
qt_svar = graph.create_variable(name=None, value=scale.clone(), is_parameter=True)
qt_zvar = graph.create_variable(name=None, value=offset.clone(), is_parameter=True)
dq_svar = graph.create_variable(name=None, value=scale.clone(), is_parameter=True)
dq_zvar = graph.create_variable(name=None, value=offset.clone(), is_parameter=True)
qt_op = graph.create_operation(op_type='QuantizeLinear', attributes={})
dq_op = graph.create_operation(op_type='DequantizeLinear', attributes={})
if single_branch:
upstream_op, downstream_op = var.source_op, dest_op
graph.insert_op_between_ops(qt_op, up_op=upstream_op, down_op=downstream_op)
graph.insert_op_between_ops(dq_op, up_op=qt_op, down_op=downstream_op)
if not single_branch:
graph.insert_op_on_var(dq_op, var=var.name)
graph.insert_op_on_var(qt_op, var=var.name)
graph.create_link_with_op(variable=qt_svar, upstream_op=None, downstream_op=qt_op)
graph.create_link_with_op(variable=qt_zvar, upstream_op=None, downstream_op=qt_op)
graph.create_link_with_op(variable=dq_svar, upstream_op=None, downstream_op=dq_op)
graph.create_link_with_op(variable=dq_zvar, upstream_op=None, downstream_op=dq_op)
def insert_dequant_param(self, graph: BaseGraph, var: Variable, is_bias: bool) -> None:
# apply inplace quantization for parameters and only insert dequant op
# on pre-quant var
scale, offset, axis = self.inplace_quantization(var, is_bias)
dequant_op = graph.create_operation(op_type='DequantizeLinear', attributes={'axis':axis})
graph.insert_op_on_var(dequant_op, var.name)
dq_svar = graph.create_variable(name=None, value=scale.clone(), is_parameter=True)
dq_zvar = graph.create_variable(name=None, value=offset.clone(), is_parameter=True)
graph.create_link_with_op(dq_svar, upstream_op=None, downstream_op=dequant_op)
graph.create_link_with_op(dq_zvar, upstream_op=None, downstream_op=dequant_op)
def correct_param_meta(self, graph: BaseGraph) -> None:
# correct parameter meta data
for var in graph.variables.values():
if var.is_parameter:
for op in var.dest_ops:
if op.meta_data is None:
op.meta_data = OperationMeta([TensorMeta(DataType.FP32, None, v.name) for v in
op.inputs], [TensorMeta(DataType.FP32, None, v.name) for v in
op.outputs], op.name, op.type, -1)
if torch.is_tensor(var.value):
op.meta_data.input_metas[op.inputs.index(var)] = (
TensorMeta.parsing_from_torch_tensor(var.value, var.name))
else:
op.meta_data.input_metas[op.inputs.index(var)] = (
TensorMeta.parsing_from_numpy_ndarray(var.value, var.name))
# add variable meta info in topo order
for op in graph.topological_sort():
if op.type == 'QuantizeLinear' and op.inputs[0].source_op is not None:
input_var = op.inputs[0]
op.meta_data.input_metas[0] = input_var.meta
op.meta_data.output_metas[0].shape = input_var.meta.shape
op.meta_data.output_metas[0].dtype = op.meta_data.input_metas[2].dtype
dequant_op = op.outputs[0].dest_ops[0]
dequant_op.meta_data.input_metas[0] = op.meta_data.output_metas[0]
dequant_op.meta_data.output_metas[0].shape = input_var.meta.shape
dequant_op.meta_data.output_metas[0].dtype = dequant_op.meta_data.input_metas[1].dtype
# must be input
elif op.type == 'QuantizeLinear' and op.inputs[0].value is None:
var = op.outputs[0]
dest_op = var.dest_ops[0]
dest_idx = var.dest_idx[0]
meta = dest_op.meta_data.input_metas[dest_idx]
# meta can't be None itself because we have built TensorMeta
# for every input when we correct param meta
while meta.shape is None or meta.dtype is None:
assert isinstance(dest_op, Operation)
var = dest_op.outputs[0]
dest_op = var.dest_ops[0]
dest_idx = var.dest_idx[0]
meta = dest_op.meta_data.input_metas[dest_idx]
dequant_op = op.outputs[0].dest_ops[0]
dequant_op.meta_data.output_metas[0] = meta
dequant_op.meta_data.input_metas[0].shape = meta.shape
dequant_op.meta_data.input_metas[0].dtype = dequant_op.meta_data.input_metas[2].dtype
op.meta_data.input_metas[0] = meta
op.meta_data.output_metas[0].shape = meta.shape
op.meta_data.output_metas[0].dtype = op.meta_data.input_metas[2].dtype
elif op.type == 'DequantizeLinear' and op.inputs[0].source_op is None:
op.meta_data.output_metas[0].shape = op.meta_data.input_metas[0].shape
op.meta_data.output_metas[0].dtype = op.meta_data.input_metas[1].dtype
def remove_activation(self, graph: BaseGraph, activation_ops: List[Operation]) -> None:
# Activation op can only be relu and clip,
# so it is safe to access op.inputs[0], op.outputs[0] as their input and output.
for op in activation_ops:
if not isinstance(op, QuantableOperation): continue
if len(graph.get_upstream_operations(op)) == 0: Continue
upstream_op = graph.get_upstream_operations(op)[0]
if not isinstance(upstream_op, QuantableOperation): continue
input_var, input_cfg = op.inputs[0], op.config.input_quantization_config[0]
if not input_cfg.policy.has_property(QuantizationProperty.ASYMMETRICAL): continue
# PATCH 20220304 Removing graph output op might cause error.
if op.outputs[0].name in graph.outputs:
graph.outputs.pop(op.outputs[0].name)
graph.outputs[input_var.name] = input_var
input_var, output_var = op.inputs[0], op.outputs[0]
graph.remove_operation(op)
graph.create_link_with_var(input_var, output_var)
formater = GraphFormatter(graph)
formater(GraphCommand(GraphCommandType.DELETE_ISOLATED))
def required_opsets(self) -> Dict[str, int]:
extra_domain_versions = [
("ai.onnx", 13)
]
return dict(extra_domain_versions)
def transform_op(self, graph:BaseGraph) -> None:
# this func transform representation of certain op from opset 11 to 13
for op in graph.operations.values():
if op.type == 'ReduceSum' or op.type == 'Squeeze' or op.type == 'Unsqueeze':
axes = convert_any_to_torch_tensor(op.attributes.pop('axes'), dtype=torch.int64)
var = graph.create_variable(name=None, value=axes, is_parameter=True)
graph.create_link_with_op(variable=var, upstream_op=None, downstream_op=op)
op.meta_data.input_metas.append(TensorMeta.parsing_from_torch_tensor(var.value, var.name))
elif op.type == 'Split':
split = convert_any_to_torch_tensor(op.attributes.pop('split'), dtype=torch.int64)
var = graph.create_variable(name=None, value=split, is_parameter=True)
graph.create_link_with_op(variable=var, upstream_op=None, downstream_op=op)
op.meta_data.input_metas.append(TensorMeta.parsing_from_torch_tensor(var.value, var.name))
def collect_compel_pair(self, graph: BaseGraph) -> None:
# make sure settings of output of Add, Concat, Sub ops are applied to inputs as well
# this func should be called only for a supplemental method for coherent quantization
# setting for special ops
compel_ops, compel_pairs = [], []
for op in graph.operations.values():
if op.type in COMPELING_OP_TYPES and isinstance(op, QuantableOperation):
compel_ops.append(op)
for op in compel_ops:
assert isinstance(op, QuantableOperation)
for var in op.inputs:
assert isinstance(var, QuantableVariable)
if var.source_op_config is not None and \
var.source_op_config.dominated_by != op.config.input_quantization_config[0].dominated_by:
compel_pairs.append((var, op, op.config.input_quantization_config[0]))
return compel_pairs
def export(self, file_path: str, graph: BaseGraph, config_path: str = None) -> None:
# remove switchers.
if not EXPORT_DEVICE_SWITCHER:
processer = GraphDeviceSwitcher(graph)
processer.remove_switcher()
# if a valid config path is given, export quantization config to there.
if config_path is not None:
super().export_quantization_config(config_path, graph)
# collect compel var-op pair in advance to avoid graph change influence
compel_pairs = self.collect_compel_pair(graph)
# collect quantable vars, where we need to insert quant and dequant op
# note that we assume all quantization configs of the same variable maintained
# by different ops are actually the same
quantable_vars,removed_activations = [], []
for var in graph.variables.values():
if isinstance(var, QuantableVariable):
configs = [var.source_op_config] + var.dest_op_configs
for cfg in configs:
if cfg is not None and not QuantizationStates.can_export(cfg.state):
raise AttributeError(f"quantization state of variable {var.name} is unexpected, \
please check if you have finished the whole quantization process")
elif cfg is not None and cfg.state not in {QuantizationStates.FP32, QuantizationStates.SOI}:
quantable_vars.append((cfg, var))
break
for cfg, var in quantable_vars:
assert isinstance(var, QuantableVariable)
assert isinstance(cfg, TensorQuantizationConfig)
# assume parameter var is used by only one op
if | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['GlobalHttpEventCollectorArgs', 'GlobalHttpEventCollector']
@pulumi.input_type
class GlobalHttpEventCollectorArgs:
def __init__(__self__, *,
dedicated_io_threads: Optional[pulumi.Input[int]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
enable_ssl: Optional[pulumi.Input[bool]] = None,
max_sockets: Optional[pulumi.Input[int]] = None,
max_threads: Optional[pulumi.Input[int]] = None,
port: Optional[pulumi.Input[int]] = None,
use_deployment_server: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a GlobalHttpEventCollector resource.
:param pulumi.Input[int] dedicated_io_threads: Number of threads used by HTTP Input server.
:param pulumi.Input[bool] disabled: Input disabled indicator.
:param pulumi.Input[bool] enable_ssl: Enable SSL protocol for HTTP data input. `true` = SSL enabled, `false` = SSL disabled.
:param pulumi.Input[int] max_sockets: Maximum number of simultaneous HTTP connections accepted. Adjusting this value may cause server performance issues and is not generally recommended. Possible values for this setting vary by OS.
:param pulumi.Input[int] max_threads: Maximum number of threads that can be used by active HTTP transactions. Adjusting this value may cause server performance issues and is not generally recommended. Possible values for this setting vary by OS.
:param pulumi.Input[int] port: HTTP data input IP port.
:param pulumi.Input[int] use_deployment_server: Indicates whether the event collector input writes its configuration to a deployment server repository. When this setting is set to 1 (enabled), the input writes its configuration to the directory specified as repositoryLocation in serverclass.conf.
Copy the full contents of the splunk_httpinput app directory to this directory for the configuration to work. When enabled, only the tokens defined in the splunk_httpinput app in this repository are viewable and editable on the API and the Data Inputs page in Splunk Web. When disabled, the input writes its configuration to $SPLUNK_HOME/etc/apps by default. Defaults to 0 (disabled).
"""
if dedicated_io_threads is not None:
pulumi.set(__self__, "dedicated_io_threads", dedicated_io_threads)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if enable_ssl is not None:
pulumi.set(__self__, "enable_ssl", enable_ssl)
if max_sockets is not None:
pulumi.set(__self__, "max_sockets", max_sockets)
if max_threads is not None:
pulumi.set(__self__, "max_threads", max_threads)
if port is not None:
pulumi.set(__self__, "port", port)
if use_deployment_server is not None:
pulumi.set(__self__, "use_deployment_server", use_deployment_server)
@property
@pulumi.getter(name="dedicatedIoThreads")
def dedicated_io_threads(self) -> Optional[pulumi.Input[int]]:
"""
Number of threads used by HTTP Input server.
"""
return pulumi.get(self, "dedicated_io_threads")
@dedicated_io_threads.setter
def dedicated_io_threads(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dedicated_io_threads", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Input disabled indicator.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="enableSsl")
def enable_ssl(self) -> Optional[pulumi.Input[bool]]:
"""
Enable SSL protocol for HTTP data input. `true` = SSL enabled, `false` = SSL disabled.
"""
return pulumi.get(self, "enable_ssl")
@enable_ssl.setter
def enable_ssl(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ssl", value)
@property
@pulumi.getter(name="maxSockets")
def max_sockets(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of simultaneous HTTP connections accepted. Adjusting this value may cause server performance issues and is not generally recommended. Possible values for this setting vary by OS.
"""
return pulumi.get(self, "max_sockets")
@max_sockets.setter
def max_sockets(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_sockets", value)
@property
@pulumi.getter(name="maxThreads")
def max_threads(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of threads that can be used by active HTTP transactions. Adjusting this value may cause server performance issues and is not generally recommended. Possible values for this setting vary by OS.
"""
return pulumi.get(self, "max_threads")
@max_threads.setter
def max_threads(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_threads", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
HTTP data input IP port.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="useDeploymentServer")
def use_deployment_server(self) -> Optional[pulumi.Input[int]]:
"""
Indicates whether the event collector input writes its configuration to a deployment server repository. When this setting is set to 1 (enabled), the input writes its configuration to the directory specified as repositoryLocation in serverclass.conf.
Copy the full contents of the splunk_httpinput app directory to this directory for the configuration to work. When enabled, only the tokens defined in the splunk_httpinput app in this repository are viewable and editable on the API and the Data Inputs page in Splunk Web. When disabled, the input writes its configuration to $SPLUNK_HOME/etc/apps by default. Defaults to 0 (disabled).
"""
return pulumi.get(self, "use_deployment_server")
@use_deployment_server.setter
def use_deployment_server(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "use_deployment_server", value)
@pulumi.input_type
class _GlobalHttpEventCollectorState:
def __init__(__self__, *,
dedicated_io_threads: Optional[pulumi.Input[int]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
enable_ssl: Optional[pulumi.Input[bool]] = None,
max_sockets: Optional[pulumi.Input[int]] = None,
max_threads: Optional[pulumi.Input[int]] = None,
port: Optional[pulumi.Input[int]] = None,
use_deployment_server: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering GlobalHttpEventCollector resources.
:param pulumi.Input[int] dedicated_io_threads: Number of threads used by HTTP Input server.
:param pulumi.Input[bool] disabled: Input disabled indicator.
:param pulumi.Input[bool] enable_ssl: Enable SSL protocol for HTTP data input. `true` = SSL enabled, `false` = SSL disabled.
:param pulumi.Input[int] max_sockets: Maximum number of simultaneous HTTP connections accepted. Adjusting this value may cause server performance issues and is not generally recommended. Possible values for this setting vary by OS.
:param pulumi.Input[int] max_threads: Maximum number of threads that can be used by active HTTP transactions. Adjusting this value may cause server performance issues and is not generally recommended. Possible values for this setting vary by OS.
:param pulumi.Input[int] port: HTTP data input IP port.
:param pulumi.Input[int] use_deployment_server: Indicates whether the event collector input writes its configuration to a deployment server repository. When this setting is set to 1 (enabled), the input writes its configuration to the directory specified as repositoryLocation in serverclass.conf.
Copy the full contents of the splunk_httpinput app directory to this directory for the configuration to work. When enabled, only the tokens defined in the splunk_httpinput app in this repository are viewable and editable on the API and the Data Inputs page in Splunk Web. When disabled, the input writes its configuration to $SPLUNK_HOME/etc/apps by default. Defaults to 0 (disabled).
"""
if dedicated_io_threads is not None:
pulumi.set(__self__, "dedicated_io_threads", dedicated_io_threads)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if enable_ssl is not None:
pulumi.set(__self__, "enable_ssl", enable_ssl)
if max_sockets is not None:
pulumi.set(__self__, "max_sockets", max_sockets)
if max_threads is not None:
pulumi.set(__self__, "max_threads", max_threads)
if port is not None:
pulumi.set(__self__, "port", port)
if use_deployment_server is not None:
pulumi.set(__self__, "use_deployment_server", use_deployment_server)
@property
@pulumi.getter(name="dedicatedIoThreads")
def dedicated_io_threads(self) -> Optional[pulumi.Input[int]]:
"""
Number of threads used by HTTP Input server.
"""
return pulumi.get(self, "dedicated_io_threads")
@dedicated_io_threads.setter
def dedicated_io_threads(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dedicated_io_threads", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
Input disabled indicator.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter(name="enableSsl")
def enable_ssl(self) -> Optional[pulumi.Input[bool]]:
"""
Enable SSL protocol for HTTP data input. `true` = SSL enabled, `false` = SSL disabled.
"""
return pulumi.get(self, "enable_ssl")
@enable_ssl.setter
def enable_ssl(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ssl", value)
@property
@pulumi.getter(name="maxSockets")
def max_sockets(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of simultaneous HTTP connections accepted. Adjusting this value may cause server performance issues and is not generally recommended. Possible values for this setting vary by OS.
"""
return pulumi.get(self, "max_sockets")
@max_sockets.setter
def max_sockets(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_sockets", value)
@property
@pulumi.getter(name="maxThreads")
def max_threads(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of threads that can be used by active HTTP transactions. Adjusting this value may cause server performance issues and is not generally recommended. Possible values for this setting vary by OS.
"""
return pulumi.get(self, "max_threads")
@max_threads.setter
def max_threads(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_threads", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
HTTP data input IP port.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="useDeploymentServer")
def use_deployment_server(self) -> Optional[pulumi.Input[int]]:
"""
Indicates whether the event collector input writes its configuration to a deployment server repository. When this setting is set to 1 (enabled), the input writes its configuration to the directory specified as repositoryLocation in serverclass.conf.
Copy the full contents of the splunk_httpinput app directory to this directory for the configuration to work. When enabled, only the tokens defined in the splunk_httpinput app in this repository are viewable and editable on the API and the Data Inputs page in Splunk Web. When disabled, the | |
"""Holonomic Functions and Differential Operators"""
from __future__ import print_function, division
from sympy import symbols, Symbol, diff, S, Dummy, Order, rf, meijerint
from sympy.printing import sstr
from .linearsolver import NewMatrix
from .recurrence import HolonomicSequence, RecurrenceOperator, RecurrenceOperators
from sympy.core.compatibility import range
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.core.sympify import sympify
from sympy.polys.domains import QQ, ZZ
from sympy.polys.domains.pythonrational import PythonRational
from sympy.simplify.hyperexpand import hyperexpand
from sympy.functions.special.hyper import hyper, meijerg
from sympy.core.numbers import NaN, Infinity, NegativeInfinity
from sympy.matrices import Matrix
from sympy.polys.polyclasses import DMF
from sympy.polys.polyroots import roots
from sympy.functions.elementary.exponential import exp_polar, exp
def DifferentialOperators(base, generator):
"""
Returns an Algebra of Differential Operators and the operator for
differentiation i.e. the `Dx` operator.
The first argument needs to be the base polynomial ring for the algebra
and the second argument must be a generator which can be either a
noncommutative Symbol or a string.
Examples
=======
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
"""
ring = DifferentialOperatorAlgebra(base, generator)
return (ring, ring.derivative_operator)
class DifferentialOperatorAlgebra(object):
"""
An Ore Algebra is a set of noncommutative polynomials in the
intermediate `Dx` and coefficients in a base ring A. It follows the
commutation rule:
Dx * a = sigma(a) * Dx + delta(a)
Where sigma: A --> A is an endomorphism and delta: A --> A is a
skew-derivation i.e. delta(ab) = delta(a) * b + sigma(a) * delta(b)
If one takes the sigma as identity map and delta as the standard derivation
then it becomes the algebra of Differential Operators also called
a Weyl Algebra i.e. an algebra whose elements are Differential Operators.
This class represents a Weyl Algebra and serves as the parent ring for
Differential Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring
ZZ[x]
See Also
========
DifferentialOperator
"""
def __init__(self, base, generator):
# the base polynomial ring for the algebra
self.base = base
# the operator representing differentiation i.e. `Dx`
self.derivative_operator = DifferentialOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = symbols('Dx', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = symbols(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Differential Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
def _add_lists(list1, list2):
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
class DifferentialOperator(object):
"""
Differential Operators are elements of Weyl Algebra. The Operators
are defined by a list of polynomials in the base ring and the
parent ring of the Operator.
Takes a list of polynomials for each power of Dx and the
parent ring which must be an instance of DifferentialOperatorAlgebra.
A Differential Operator can be created easily using
the operator `Dx`. See examples below.
Examples
========
>>> from sympy.holonomic.holonomic import DifferentialOperator, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> DifferentialOperator([0, 1, x**2], R)
(1)Dx + (x**2)Dx**2
>>> (x*Dx*x + 1 - Dx**2)**2
(2*x**2 + 2*x + 1) + (4*x**3 + 2*x**2 - 4)Dx + (x**4 - 6*x - 2)Dx**2 + (-2*x**2)Dx**3 + (1)Dx**4
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
# the parent ring for this operator
# must be an DifferentialOperatorAlgebra object
self.parent = parent
# sequence of polynomials in x for each power of Dx
# the list should not have trailing zeroes
# represents the operator
# convert the expressions into ring elements using from_sympy
if isinstance(list_of_poly, list):
for i, j in enumerate(list_of_poly):
if not isinstance(j, self.parent.base.dtype):
list_of_poly[i] = self.parent.base.from_sympy(sympify(j))
self.listofpoly = list_of_poly
# highest power of `Dx`
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two DifferentialOperator and returns another
DifferentialOperator instance using the commutation rule
Dx*a = a*Dx + a'
"""
listofself = self.listofpoly
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiplies a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Dx^i * b
def _mul_Dxi_b(b):
sol1 = [self.parent.base.zero]
sol2 = []
if isinstance(b, list):
for i in b:
sol1.append(i)
sol2.append(i.diff())
else:
sol1.append(self.parent.base.from_sympy(b))
sol2.append(self.parent.base.from_sympy(b).diff())
return _add_lists(sol1, sol2)
for i in range(1, len(listofself)):
# find Dx^i * b in ith iteration
listofother = _mul_Dxi_b(listofother)
# solution = solution + listofself[i] * (Dx^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return DifferentialOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(sympify(other))
sol = []
for j in self.listofpoly:
sol.append(other * j)
return DifferentialOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, DifferentialOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return DifferentialOperator(sol, self.parent)
else:
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(sympify(other))]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return DifferentialOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return DifferentialOperator([self.parent.base.one], self.parent)
# if self is `Dx`
if self.listofpoly == self.parent.derivative_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return DifferentialOperator(sol, self.parent)
# the general case
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')Dx'
continue
print_str += '(' + sstr(j) + ')' + 'Dx**' + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, DifferentialOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
def _normalize(list_of, parent, negative=True):
"""
Normalize a given annihilator
"""
num = []
denom = []
base = parent.base
K = base.get_field()
R = ZZ.old_poly_ring(base.gens[0])
lcm_denom = R.from_sympy(S(1))
list_of_coeff = []
# convert polynomials to the elements of associated
# fraction field
for i, j in enumerate(list_of):
if isinstance(j, base.dtype):
list_of_coeff.append(K.new(j.rep))
elif not isinstance(j, K.dtype):
list_of_coeff.append(K.from_sympy(sympify(j)))
else:
list_of_coeff.append(j)
# corresponding numerators of the sequence of polynomials
num.append(base(list_of_coeff[i].num))
# corresponding denominators
den = list_of_coeff[i].den
if isinstance(den[0], PythonRational):
for i, j in enumerate(den):
den[i] = j.p
denom.append(R(den))
# lcm of denominators in the coefficients
for i in denom:
lcm_denom = i.lcm(lcm_denom)
if negative is True:
lcm_denom = -lcm_denom
lcm_denom = K.new(lcm_denom.rep)
# multiply the coefficients with lcm
for i, j in enumerate(list_of_coeff):
list_of_coeff[i] = j * lcm_denom
gcd_numer = base.from_FractionField(list_of_coeff[-1], K)
# gcd of numerators in the coefficients
for i in num:
gcd_numer = i.gcd(gcd_numer)
gcd_numer = K.new(gcd_numer.rep)
# divide all the coefficients by the gcd
for i, j in enumerate(list_of_coeff):
list_of_coeff[i] = base.from_FractionField(j / gcd_numer, K)
return DifferentialOperator(list_of_coeff, parent)
def _derivate_diff_eq(listofpoly):
"""
Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
where a0, a1,... are polynomials or rational functions. The function
returns b0, b1, | |
<filename>kopf/reactor/indexing.py
import collections.abc
import logging
from typing import Any, Dict, Generic, Iterable, Iterator, \
Mapping, Optional, Set, Tuple, TypeVar, Union
from kopf.reactor import causation, handling, lifecycles, registries
from kopf.storage import states
from kopf.structs import bodies, configuration, containers, ephemera, handlers, patches, references
Key = Tuple[references.Namespace, Optional[str], Optional[str]]
_K = TypeVar('_K')
_V = TypeVar('_V')
class Store(ephemera.Store[_V], Generic[_V]):
"""
A specific implementation of `.ephemera.Store` usable by inxeders.
The resources-to-values association is internal and is not exposed
to handlers or operators. Currently, it is a dictionary
with the keys of form ``(namespace, name, uid)`` of type `Key`,
but the implementation can later change without notice.
The store is O(1) for updates/deletions due to ``dict`` used internally.
"""
__items: Dict[Key, _V]
def __init__(self) -> None:
super().__init__()
self.__items = {}
def __repr__(self) -> str:
return repr(list(self.__items.values()))
def __bool__(self) -> bool:
return bool(self.__items)
def __len__(self) -> int:
return len(self.__items)
def __iter__(self) -> Iterator[_V]:
return iter(self.__items.values())
def __contains__(self, obj: object) -> bool:
return any(val == obj for val in self.__items.values())
# Indexers' internal protocol. Must not be used by handlers & operators.
def _discard(self, acckey: Key) -> None:
try:
del self.__items[acckey]
except KeyError:
pass
# Indexers' internal protocol. Must not be used by handlers & operators.
def _replace(self, acckey: Key, obj: _V) -> None:
# Minimise the dict updates and rehashes for no need: only update if really changed.
if acckey not in self.__items or self.__items[acckey] != obj:
self.__items[acckey] = obj
class Index(ephemera.Index[_K, _V], Generic[_K, _V]):
"""
A specific implementation of `.ephemera.Index` usable by indexers.
The indexers and all writing interfaces for indices are not exposed
to handlers or operators or developers, they remain strictly internal.
Only the read-only indices and stores are exposed.
The forward index points to the indexed values of one or more objects.
The lookups are O(1), as Python's dict description promises.
The reverse index points to the main index's keys where a specific object
is stored, thus reducing the updates/deletions from O(K) to O(k), where
"K" is the number of all keys, "k" is the number of keys per object.
Assuming the amount of keys per object is usually fixed, it is O(1).
"""
__items: Dict[_K, Store[_V]]
__reverse: Dict[Key, Set[_K]]
def __init__(self) -> None:
super().__init__()
self.__items = {}
self.__reverse = {}
def __repr__(self) -> str:
return repr(self.__items)
def __bool__(self) -> bool:
return bool(self.__items)
def __len__(self) -> int:
return len(self.__items)
def __iter__(self) -> Iterator[_K]:
return iter(self.__items)
def __getitem__(self, item: _K) -> Store[_V]:
return self.__items[item]
def __contains__(self, item: object) -> bool: # for performant lookups!
return item in self.__items
# Indexers' internal protocol. Must not be used by handlers & operators.
def _discard(self, acckey: Key, obj_keys: Optional[Iterable[_K]] = None) -> None:
# We know all the keys where that object is indexed, so we delete only from there.
# Assume that the reverse/forward indices are consistent. If not, fix it, not "fall back".
if acckey in self.__reverse:
obj_keys = obj_keys if obj_keys is not None else self.__reverse[acckey].copy()
for obj_key in obj_keys:
# Discard from that store and remove all freshly emptied stores.
store = self.__items[obj_key]
store._discard(acckey)
if not store:
del self.__items[obj_key]
# One by one -- so that the reverse index is consistent even in case of errors.
self.__reverse[acckey].discard(obj_key)
if not self.__reverse[acckey]:
del self.__reverse[acckey]
# Indexers' internal protocol. Must not be used by handlers & operators.
def _replace(self, acckey: Key, obj: Mapping[_K, _V]) -> None:
# Remember where the object is stored, so that the updates/deletions are O(1) later.
try:
reverse = self.__reverse[acckey]
except KeyError:
reverse = self.__reverse[acckey] = set()
# Update (append or replace) all stores that are still related to `obj`.
for obj_key, obj_val in obj.items():
try:
store = self.__items[obj_key]
except KeyError:
store = self.__items[obj_key] = Store()
store._replace(acckey, obj_val)
reverse.add(obj_key)
# Discard from all stores that surely do not contain `obj` anymore.
self._discard(acckey, reverse - set(obj.keys()))
class OperatorIndexer:
"""
Indexers are read-write managers of read-only and minimalistic indices.
.. note::
Indexers are internal to the framework, they are not exposed
to the operator developers (except for embedded operators).
"""
index: Index[Any, Any]
def __init__(self) -> None:
super().__init__()
self.index = Index()
def __repr__(self) -> str:
return repr(self.index)
def discard(self, key: Key) -> None:
""" Remove all values of the object, and keep ready for re-indexing. """
self.index._discard(key)
def replace(self, key: Key, obj: object) -> None:
""" Store/merge the object's indexing results. """
obj = obj if isinstance(obj, collections.abc.Mapping) else {None: obj}
self.index._replace(key, obj)
class OperatorIndexers(Dict[handlers.HandlerId, OperatorIndexer]):
def __init__(self) -> None:
super().__init__()
self.indices = OperatorIndices(self)
def ensure(self, __handlers: Iterable[handlers.ResourceIndexingHandler]) -> None:
"""
Pre-create indices/indexers to match the existing handlers.
Any other indices will cause a KeyError at runtime.
This is done to control the consistency of in-memory structures.
"""
for handler in __handlers:
self[handler.id] = OperatorIndexer()
def discard(
self,
body: bodies.Body,
) -> None:
""" Remove all values of this object from all indexers. Forget it! """
key = self.make_key(body)
for id, indexer in self.items():
indexer.discard(key)
def replace(
self,
body: bodies.Body,
outcomes: Mapping[handlers.HandlerId, states.HandlerOutcome],
) -> None:
""" Interpret the indexing results and apply them to the indices. """
key = self.make_key(body)
# Store the values: either for new objects or those re-matching the filters.
for id, outcome in outcomes.items():
if outcome.exception is not None:
self[id].discard(key)
elif outcome.result is not None:
self[id].replace(key, outcome.result)
# Purge the values: for those stopped matching the filters.
for id, indexer in self.items():
if id not in outcomes:
indexer.discard(key)
def make_key(self, body: bodies.Body) -> Key:
"""
Make a key to address an object in internal containers.
The key is not exposed to the users via indices,
so its structure and type can be safely changed any time.
However, the key must be as lightweight as possible:
no dataclasses or namedtuples, only builtins.
The name and namespace do not add value on top of the uid's uniqueness.
They are here for debugging and for those rare objects
that have no uid but are still exposed via the K8s API
(highly unlikely to be indexed though).
"""
meta = body.get('metadata', {})
return (meta.get('namespace'), meta.get('name'), meta.get('uid'))
class OperatorIndices(ephemera.Indices):
"""
A read-only view of indices of the operator.
This view is carried through the whole call stack of the operator
in a cause object, and later unfolded into the kwargs of the handlers.
Why? First, carrying the indexers creates a circular import chain:
* "causation" requires "OperatorIndexers" from "indexing".
* "indexing" requires "ResourceIndexingCause" from "causation".
The chain is broken by having a separate interface: `~ephemera.Indices`,
while the implementation remains here.
Second, read-write indexers create a temptation to modify them
in modules and components that should not do this.
Only "indexing" (this module) should modify the indices via indexers.
"""
def __init__(self, indexers: "OperatorIndexers") -> None:
super().__init__()
self.__indexers = indexers
def __len__(self) -> int:
return len(self.__indexers)
def __iter__(self) -> Iterator[str]:
return iter(self.__indexers)
def __getitem__(self, id: str) -> Index[Any, Any]:
return self.__indexers[handlers.HandlerId(id)].index
def __contains__(self, id: object) -> bool:
return id in self.__indexers
async def index_resource(
*,
indexers: OperatorIndexers,
registry: registries.OperatorRegistry,
settings: configuration.OperatorSettings,
resource: references.Resource,
raw_event: bodies.RawEvent,
memory: containers.ResourceMemory,
logger: Union[logging.Logger, logging.LoggerAdapter],
body: bodies.Body,
) -> None:
"""
Populate the indices from the received event. Log but ignore all errors.
This is a lightweight and standalone process, which is executed before
any real handlers are invoked. Multi-step calls are also not supported.
If the handler fails, it fails and is never retried.
Note: K8s-event posting is skipped for `kopf.on.event` handlers,
as they should be silent. Still, the messages are logged normally.
"""
if not registry._resource_indexing.has_handlers(resource=resource):
pass
elif raw_event['type'] == 'DELETED':
# Do not index it if it is deleted. Just discard quickly (ASAP!).
indexers.discard(body=body)
else:
# Otherwise, go for full indexing with handlers invocation with all kwargs.
cause = causation.ResourceIndexingCause(
resource=resource,
indices=indexers.indices,
logger=logger,
patch=patches.Patch(), # NB: not applied. TODO: get rid of it!
body=body,
memo=memory.memo,
)
# Note: the indexing state contains only failures & retries. Successes will be re-executed.
indexing_handlers = registry._resource_indexing.get_handlers(cause=cause)
state = memory.indexing_state
state = state if state is not | |
y**2 + z**2) - 15)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxxxy(x,y,z):
return 45*y*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxxxz(x,y,z):
return 45*z*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxxyx(x,y,z):
return 45*y*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxxyy(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxxyz(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txxxzx(x,y,z):
return 45*z*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxxzy(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txxxzz(x,y,z):
return 15*x*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxyxx(x,y,z):
return 45*y*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxyxy(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxyxz(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txxyyx(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxyyy(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxyyz(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxyzx(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txxyzy(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxyzz(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxzxx(x,y,z):
return 45*z*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxzxy(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txxzxz(x,y,z):
return 15*x*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxzyx(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txxzyy(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxzyz(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxzzx(x,y,z):
return 15*x*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxzzy(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txxzzz(x,y,z):
return 15*z*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyxxx(x,y,z):
return 45*y*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyxxy(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyxxz(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txyxyx(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyxyy(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyxyz(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyxzx(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txyxzy(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyxzz(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyyxx(x,y,z):
return 15*x*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyyxy(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyyxz(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyyyx(x,y,z):
return 15*y*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 21*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyyyy(x,y,z):
return 45*x*(-21*y**4/(x**2 + y**2 + z**2)**2 + 14*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyyyz(x,y,z):
return 315*x*y*z*(-3*y**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txyyzx(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyyzy(x,y,z):
return 315*x*y*z*(-3*y**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txyyzz(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyzxx(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txyzxy(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyzxz(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyzyx(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyzyy(x,y,z):
return 315*x*y*z*(-3*y**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txyzyz(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyzzx(x,y,z):
return 15*y*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyzzy(x,y,z):
return 15*x*(-63*y**2*z**2/(x**2 + y**2 + z**2)**2 + 7*y**2/(x**2 + y**2 + z**2) + 7*z**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txyzzz(x,y,z):
return 315*x*y*z*(-3*z**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txzxxx(x,y,z):
return 45*z*(-21*x**4/(x**2 + y**2 + z**2)**2 + 14*x**2/(x**2 + y**2 + z**2) - 1)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzxxy(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txzxxz(x,y,z):
return 15*x*(-63*x**2*z**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 21*z**2/(x**2 + y**2 + z**2) - 3)/(x**2 + y**2 + z**2)**(7/2)
@jit(nopython=True, cache=True)
def Txzxyx(x,y,z):
return 315*x*y*z*(-3*x**2/(x**2 + y**2 + z**2) + 1)/(x**2 + y**2 + z**2)**(9/2)
@jit(nopython=True, cache=True)
def Txzxyy(x,y,z):
return 15*z*(-63*x**2*y**2/(x**2 + y**2 + z**2)**2 + 7*x**2/(x**2 + y**2 + z**2) + 7*y**2/(x**2 + y**2 + z**2) - 1)/(x**2 + | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Minimal streaming codec for a Monero binary serialization.
Used for a binary serialization in blockchain and for hash computation for signatures.
Equivalent of BEGIN_SERIALIZE_OBJECT(), /src/serialization/serialization.h
- The wire binary format does not use tags. Structure has to be read from the binary stream
with the scheme specified in order to parse the structure.
- Heavily uses variable integer serialization - similar to the UTF8 or LZ4 number encoding.
- Supports: blob, string, integer types - variable or fixed size, containers of elements,
variant types, messages of elements
For de-serializing (loading) types, object with `AsyncReader`
interface is required:
>>> class AsyncReader:
>>> async def areadinto(self, buffer):
>>> """
>>> Reads `len(buffer)` bytes into `buffer`, or raises `EOFError`.
>>> """
For serializing (dumping) types, object with `AsyncWriter` interface is
required:
>>> class AsyncWriter:
>>> async def awrite(self, buffer):
>>> """
>>> Writes all bytes from `buffer`, or raises `EOFError`.
>>> """
'''
import logging
import sys
from . import helpers
from .protobuf import const, load_uvarint, dump_uvarint
from .core.readwriter import MemoryReaderWriter
from .core.base_types import *
from .core.erefs import has_elem, set_elem, get_elem, ElemRefArr, ElemRefObj, eref, is_elem_ref
from .core.int_serialize import *
from .core.message_types import *
from .core.obj_helper import *
from .core.versioning import TypeWrapper, VersionDatabase, VersionSetting
logger = logging.getLogger(__name__)
def import_def(module, name):
if module not in sys.modules:
if not module.startswith("monero_serialize"):
raise ValueError("Module not allowed: %s" % module)
logger.debug("Importing: from %s import %s", module, name)
__import__(module, None, None, (name,), 0)
r = getattr(sys.modules[module], name)
return r
class Archive(object):
"""
Archive object for object binary serialization / deserialization.
Resembles Archive API from the Monero codebase or Boost serialization archive.
The design goal is to provide uniform API both for serialization and deserialization
so the code is not duplicated for serialization and deserialization but the same
for both ways in order to minimize potential bugs in the code.
In order to use the archive for both ways we have to use so-called field references
as we cannot directly modify given element as a parameter (value-passing) as its performed
in C++ code. see: eref(), get_elem(), set_elem()
"""
def __init__(self, iobj, writing=True, versions=None, **kwargs):
self.writing = writing
self.iobj = iobj
self.tracker = helpers.Tracker()
# Using boost versioning also for BC format.
self.version_settings = versions # type: VersionSetting
def _cur_version(self, tw, elem=None):
has_version = False
if elem:
relem = get_elem(elem)
if relem and hasattr(relem, 'BOOST_VERSION_CUR'):
version = getattr(relem, 'BOOST_VERSION_CUR')
has_version = True
if not has_version and self.version_settings and tw in self.version_settings:
version = self.version_settings[tw]
else:
version = tw.get_current_version('bc')
return version
async def version(self, tp, params, version=None, elem=None):
tw = TypeWrapper(tp, params)
return self._cur_version(tw, elem)
async def tag(self, tag):
"""
:param tag:
:return:
"""
async def begin_array(self):
"""
Mark start of the array. Used for JSON serialization.
:return:
"""
async def end_array(self):
"""
Mark end of the array. Used for JSON serialization.
:return:
"""
async def begin_object(self):
"""
Mark start of the object. Used for JSON serialization.
:return:
"""
async def end_object(self):
"""
Mark end of the object. Used for JSON serialization.
:return:
"""
async def prepare_container(self, size, container, elem_type=None):
"""
Prepares container for serialization
:param size:
:param container:
:return:
"""
if not self.writing:
if container is None:
return gen_elem_array(size, elem_type)
fvalue = get_elem(container)
if fvalue is None:
fvalue = []
fvalue += gen_elem_array(max(0, size - len(fvalue)), elem_type)
set_elem(container, fvalue)
return fvalue
async def prepare_message(self, msg, msg_type):
"""
Prepares message for serialization
:param msg:
:param msg_type:
:return:
"""
if self.writing:
return
return set_elem(msg, msg_type())
async def uvarint(self, elem):
"""
Uvarint
:param elem:
:return:
"""
if self.writing:
return await dump_uvarint(self.iobj, elem)
else:
return await load_uvarint(self.iobj)
async def uint(self, elem, elem_type, params=None, width=None):
"""
Fixed size int
:param elem:
:param elem_type:
:param params:
:param width:
:return:
"""
if self.writing:
return await dump_uint(self.iobj, elem, width if width else elem_type.WIDTH)
else:
return await load_uint(self.iobj, width if width else elem_type.WIDTH)
async def unicode_type(self, elem):
"""
Unicode type
:param elem:
:return:
"""
if self.writing:
return await dump_unicode(self.iobj, elem)
else:
return await load_unicode(self.iobj)
async def blob(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps blob
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
if hasattr(elem_type, "serialize_archive"):
elem = elem_type() if elem is None else elem
return await elem.serialize_archive(
self, elem=elem, elem_type=elem_type, params=params
)
if self.writing:
return await dump_blob(
self.iobj, elem=elem, elem_type=elem_type, params=params
)
else:
return await load_blob(
self.iobj, elem_type=elem_type, params=params, elem=elem
)
async def container(self, container=None, container_type=None, params=None):
"""
Loads/dumps container
:return:
"""
if hasattr(container_type, "serialize_archive"):
container = container_type() if container is None else container
return await container.serialize_archive(
self, elem=container, elem_type=container_type, params=params
)
if self.writing:
return await self._dump_container(
self.iobj, container, container_type, params
)
else:
return await self._load_container(
self.iobj, container_type, params=params, container=container
)
async def container_size(
self, container_len=None, container_type=None, params=None
):
"""
Container size
:param container_len:
:param container_type:
:param params:
:return:
"""
if hasattr(container_type, "serialize_archive"):
raise ValueError("not supported")
if self.writing:
return await self._dump_container_size(
self.iobj, container_len, container_type, params
)
else:
raise ValueError("Not supported")
async def container_val(self, elem, container_type, params=None):
"""
Single cont value
:param elem:
:param container_type:
:param params:
:return:
"""
if hasattr(container_type, "serialize_archive"):
raise ValueError("not supported")
if self.writing:
return await self._dump_container_val(
self.iobj, elem, container_type, params
)
else:
raise ValueError("Not supported")
async def tuple(self, elem=None, elem_type=None, params=None):
"""
Loads/dumps tuple
:return:
"""
if hasattr(elem_type, "serialize_archive"):
container = elem_type() if elem is None else elem
return await container.serialize_archive(
self, elem=elem, elem_type=elem_type, params=params
)
if self.writing:
return await self._dump_tuple(self.iobj, elem, elem_type, params)
else:
return await self._load_tuple(
self.iobj, elem_type, params=params, elem=elem
)
async def variant(self, elem=None, elem_type=None, params=None, wrapped=None):
"""
Loads/dumps variant type
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
if hasattr(elem_type, "serialize_archive"):
elem = elem_type() if elem is None else elem
return await elem.serialize_archive(
self, elem=elem, elem_type=elem_type, params=params
)
if self.writing:
return await self._dump_variant(
self.iobj,
elem=elem,
elem_type=elem_type if elem_type else elem.__class__,
params=params,
)
else:
return await self._load_variant(
self.iobj,
elem_type=elem_type if elem_type else elem.__class__,
params=params,
elem=elem,
wrapped=wrapped,
)
async def message(self, msg, msg_type=None, use_version=None):
"""
Loads/dumps message
Format: *fields
:param msg:
:param msg_type:
:param use_version:
:return:
"""
elem_type = msg_type if msg_type is not None else msg.__class__
msg = elem_type() if msg is None else msg
if hasattr(elem_type, "serialize_archive"):
version = await self.version(elem_type, None, elem=msg) if use_version is None else use_version
return await msg.serialize_archive(self, version=version)
mtype = msg.__class__ if msg_type is None else msg_type
fields = mtype.f_specs()
if hasattr(mtype, "serialize_archive"):
raise ValueError("Cannot directly load, has to use archive with %s" % mtype)
await self.message_fields(msg, fields)
return msg
async def message_field(self, msg, field, fvalue=None):
"""
Dumps/Loads message field
:param msg:
:param field:
:param fvalue: explicit value for dump
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
try:
self.tracker.push_field(fname)
if self.writing:
await self._dump_message_field(self.iobj, msg, field, fvalue=fvalue)
else:
await self._load_message_field(self.iobj, msg, field)
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
async def message_fields(self, msg, fields):
"""
Load/dump individual message fields
:param msg:
:param fields:
:return:
"""
for field in fields:
await self.message_field(msg, field)
return msg
def _get_type(self, elem_type):
# If part of our hierarchy - return the object
if issubclass(elem_type, XmrType):
return elem_type
# Basic decision types
etypes = (
UVarintType,
IntType,
BlobType,
UnicodeType,
VariantType,
ContainerType,
TupleType,
MessageType,
)
cname = elem_type.__name__
for e in etypes:
if cname == e.__name__:
return e
# Inferred type: need to translate it to the current
try:
m = elem_type.__module__
r = import_def(m, cname)
sub_test = issubclass(r, XmrType)
logger.debug(
"resolved %s, sub: %s, id_e: %s, id_mod: %s",
r,
sub_test,
id(r),
id(sys.modules[m]),
)
if not sub_test:
logger.warning("resolution hierarchy broken")
return r
except Exception as e:
raise ValueError(
"Could not translate elem type: %s %s, exc: %s %s"
% (type(elem_type), elem_type, type(e), e)
)
def _is_type(self, elem_type, test_type):
return issubclass(elem_type, test_type)
async def field(self, elem=None, elem_type=None, params=None):
"""
Archive field
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_type = elem_type if elem_type else elem.__class__
fvalue = None
etype = self._get_type(elem_type)
if self._is_type(etype, UVarintType):
fvalue = await self.uvarint(get_elem(elem))
elif self._is_type(etype, IntType):
fvalue = await self.uint(
elem=get_elem(elem), elem_type=elem_type, params=params
)
elif self._is_type(etype, BlobType):
fvalue = await self.blob(
elem=get_elem(elem), elem_type=elem_type, params=params
)
elif self._is_type(etype, UnicodeType):
fvalue = await self.unicode_type(get_elem(elem))
elif self._is_type(etype, VariantType):
fvalue = await self.variant(
elem=get_elem(elem), | |
y0[i-1, j[1:]].value = value(y0[i, j[1:]])
for u in self.u:
umhe = getattr(self.lsmhe, u)
umhe[i-1] = value(umhe[i])
self.adjust_nu0_mhe()
def patch_input_mhe(self, src_kind, **kwargs):
"""Loads inputs into the mhe model, by default takes the last finite element"""
src = kwargs.pop("src", self.PlantSample)
fe = kwargs.pop("fe", self.nfe_tmhe - 1)
if src_kind == "mod":
for u in self.u:
usrc = getattr(src, u)
utrg = getattr(self.lsmhe, u)
utrg[fe].value = value(usrc[0]) #: This has to work
elif src_kind == "dict":
for u in self.u:
utrg = getattr(self.lsmhe, u)
utrg[fe].value = self.curr_u[u]
else:
raise ValueError("Either use mod or dict %s" % src_kind)
def init_step_mhe(self, patch_pred_y=False, **kwargs):
"""Takes the last state-estimate from the mhe to perform an open-loop simulation that initializes
the last slice of the mhe horizon. By default the last finite element will be taken as reference.
Operations:
# Load initial guess to ref
# Set values for inputs
# Set values for initial states
# Solve reference
# Load back to lsmhe
Args:
patch_pred_y:
**kwargs:
"""
tgt = self.dum_mhe
src = self.lsmhe
fe_src = kwargs.pop("fe", self.nfe_tmhe - 1)
#: Load initial guess to tgt
load_iguess(src, tgt, fe_src, 0)
#: Set values for inputs
for u in self.u: #: This should update the inputs
usrc = getattr(src, u)
utgt = getattr(tgt, u)
utgt[0].value = (value(usrc[fe_src]))
#: Set values for initial states
t_ncp = t_ij(self.lsmhe.t, fe_src, self.ncp_tmhe)
for x in self.states:
pn = x + "_ic"
p = getattr(tgt, pn)
vs = getattr(self.lsmhe, x)
for ks in p.keys():
p[ks].value = value(vs[(t_ncp,) + (ks,)])
self.lsmhe.display(filename='lsmhe')
self.dum_mhe.display(filename='dum')
#: Solve
test = self.solve_dyn(tgt, o_tee=True, stop_if_nopt=False, max_cpu_time=300,
jacobian_regularization_value=1e-04,
jacobian_regularization_exponent=2.,
halt_on_ampl_error=False,
output_file="init_mhe.txt")
#: Load solution as a guess to lsmhe
if test != 0:
self.journalist("I", self._iteration_count, "init_step_mhe", "Failed prediction for next step")
load_iguess(tgt, src, 0, fe_src)
if patch_pred_y: #: patch the measurement associated with the solution of the dummy model to the mhe
self.journalist("I", self._iteration_count, "init_step_mhe", "Prediction for advanced-step.. Ready")
self.patch_meas_mhe(tgt, noisy=True)
self.adjust_nu0_mhe()
# self.adjust_w_mhe()
def create_rh_sfx(self, set_suffix=True):
"""Creates relevant suffixes for k_aug (prior at fe=2) (Reduced_Hess)
Args:
set_suffix (bool): True if update must be done
Returns:
None
"""
if hasattr(self.lsmhe, "dof_v"):
self.lsmhe.dof_v.clear()
else:
self.lsmhe.dof_v = Suffix(direction=Suffix.EXPORT) #: dof_v
if hasattr(self.lsmhe, "rh_name"):
self.lsmhe.rh_name.clear()
else:
self.lsmhe.rh_name = Suffix(direction=Suffix.IMPORT) #: Red_hess_name
if hasattr(self.lsmhe, "f_timestamp"):
self.lsmhe.f_timestamp.clear()
else:
self.lsmhe.f_timestamp = Suffix(direction=Suffix.EXPORT,
datatype=Suffix.INT)
t_prior = t_ij(self.lsmhe.t, 1, 0)
if set_suffix:
for key in self.x_noisy:
var = getattr(self.lsmhe, key)
for j in self.x_vars[key]:
var[(t_prior,) + j].set_suffix_value(self.lsmhe.dof_v, 1)
def create_sens_suffix_mhe(self, set_suffix=True):
"""Creates relevant suffixes for k_aug (Sensitivity)
Args:
set_suffix (bool): True if update must be done
Returns:
None"""
if hasattr(self.lsmhe, "dof_v"):
self.lsmhe.dof_v.clear()
else:
self.lsmhe.dof_v = Suffix(direction=Suffix.EXPORT) #: dof_v
if hasattr(self.lsmhe, "rh_name"):
self.lsmhe.rh_name.clear()
else:
self.lsmhe.rh_name = Suffix(direction=Suffix.IMPORT) #: Red_hess_name
if not hasattr(self.lsmhe, "DeltaP"):
self.lsmhe.DeltaP = Suffix(direction=Suffix.EXPORT)
if not hasattr(self.lsmhe, "dcdp"):
self.lsmhe.dcdp = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT)
i = 1
print(self.y)
print(self.y_vars)
print(self.yk_key)
for y in self.y:
for j in self.y_vars[y]:
k = self.yk_key[(y,) + j]
self.lsmhe.hyk_c_mhe[self.nfe_tmhe-1, k].set_suffix_value(self.lsmhe.dcdp, i)
i += 1
#self.lsmhe.hyk_c_mhe.pprint()
print(i, "measurements")
for j in range(0, self.ncp_tmhe + 1):
t_mhe = t_ij(self.lsmhe.t, self.nfe_tmhe - 1, j)
for u in self.u:
con_w = getattr(self.lsmhe, u + "_cdummy_mhe")
con_w[t_mhe].set_suffix_value(self.lsmhe.dcdp, i)
i += 1
print(i, "inputs")
#con_w.pprint()
if set_suffix:
t_ = t_ij(self.lsmhe.t, self.nfe_tmhe - 1, self.ncp_tmhe)
for key in self.x_noisy:
var = getattr(self.lsmhe, key)
for j in self.x_vars[key]:
var[(t_,) + j].set_suffix_value(self.lsmhe.dof_v, 1)
def check_active_bound_noisy(self):
"""Checks if the dof_(super-basic) have active bounds, if so, add them to the exclusion list"""
if hasattr(self.lsmhe, "dof_v"):
self.lsmhe.dof_v.clear()
else:
self.lsmhe.dof_v = Suffix(direction=Suffix.EXPORT) #: dof_v
if hasattr(self.lsmhe, "rh_name"):
self.lsmhe.rh_name.clear()
else:
self.lsmhe.rh_name = Suffix(direction=Suffix.IMPORT) #: Red_hess_name
t_prior = t_ij(self.lsmhe.t, 1, 0)
self.xkN_nexcl = []
k = 0
for x in self.x_noisy:
v = getattr(self.lsmhe, x)
for j in self.x_vars[x]:
active_bound = False
if v[(t_prior,) + j].lb:
if v[(t_prior,) + j].value - v[(t_prior,) + j].lb < 1e-08:
active_bound = True
if v[(t_prior,) + j].ub:
if v[(t_prior,) + j].ub - v[(t_prior,) + j].value < 1e-08:
active_bound = True
if active_bound:
print("Active bound {:s}, {:d}, value {:f}".format(x, j[0], v[(t_prior,) + j].value), file=sys.stderr)
v[(t_prior,) + j].set_suffix_value(self.lsmhe.dof_v, 0)
self.xkN_nexcl.append(0)
k += 1
else:
v[(t_prior,) + j].set_suffix_value(self.lsmhe.dof_v, 1)
self.xkN_nexcl.append(1) #: Not active, add it to the non-exclusion list.
if k > 0:
print("I[[check_active_bound_noisy]] {:d} Active bounds.".format(k))
def deact_icc_mhe(self):
"""Deactivates the icc constraints in the mhe problem"""
if self.deact_ics:
for i in self.x_noisy:
try:
ic_con = getattr(self.lsmhe, i + "_icc")
for k in self.x_vars[i]:
ic_con[k].deactivate()
# self.lsmhe.del_component(ic_con[k])
except AttributeError:
continue
#: Maybe only for a subset of the states
else:
for i in self.x_noisy:
# if i in self.x_noisy:
ic_con = getattr(self.lsmhe, i + "_icc")
for k in self.x_vars[i]:
ic_con[k].deactivate()
def regen_objective_fun(self):
"""Given the exclusion list, regenerate the expression for the arrival cost"""
self.lsmhe.Arrival_e_mhe.set_value(0.5 * sum((self.xkN_l[j] - self.lsmhe.x_0_mhe[j]) *
sum(self.lsmhe.PikN_mhe[j, k] *
(self.xkN_l[k] - self.lsmhe.x_0_mhe[k]) for k in
self.lsmhe.xkNk_mhe if self.xkN_nexcl[k])
for j in self.lsmhe.xkNk_mhe if self.xkN_nexcl[j]))
self.lsmhe.obfun_mhe.set_value(self.lsmhe.Arrival_e_mhe +
self.lsmhe.R_e_mhe +
self.lsmhe.Q_e_mhe +
self.lsmhe.U_e_mhe)
if self.lsmhe.obfun_dum_mhe.active:
self.lsmhe.obfun_dum_mhe.deactivate()
if not self.lsmhe.obfun_mhe.active:
self.lsmhe.obfun_mhe.activate()
if not self.lsmhe.hyk_c_mhe.active:
self.lsmhe.hyk_c_mhe.activate()
def load_covariance_prior(self):
"""Computes the reduced-hessian (inverse of the prior-covariance)
Reads the result_hessian.txt file that contains the covariance information"""
self.journalist("I", self._iteration_count, "load_covariance_prior", "K_AUG w red_hess")
self.k_aug.options["compute_inv"] = ""
if hasattr(self.lsmhe, "f_timestamp"):
self.lsmhe.f_timestamp.clear()
else:
self.lsmhe.f_timestamp = Suffix(direction=Suffix.EXPORT,
datatype=Suffix.INT)
self.create_rh_sfx()
try:
self.k_aug.solve(self.lsmhe, tee=True)
except ApplicationError:
self.journalist("E", self._iteration_count, "load_covariance_prior", "K_AUG failed; no covariance info was loaded")
# self.lsmhe.write_nl(name="failed_covariance.nl")
return 1
self.lsmhe.f_timestamp.display(ostream=sys.stderr)
self._PI.clear()
with open("inv_.in", "r") as rh:
ll = []
l = rh.readlines()
row = 0
for i in l:
ll = i.split()
col = 0
for j in ll:
self._PI[row, col] = float(j)
col += 1
row += 1
rh.close()
print("-" * 120)
print("I[[load covariance]] e-states nrows {:d} ncols {:d}".format(len(l), len(ll)))
print("-" * 120)
ftimings = open("timings_k_aug.txt", "r")
s = ftimings.readline()
ftimings.close()
f = open("timings_mhe_kaug_cov.txt", "a")
f.write(str(s) + '\n')
f.close()
def set_state_covariance(self):
"""Sets covariance(inverse) for the prior_state.
Args:
None
Return:
None
"""
t_prior = t_ij(self.lsmhe.t, 1, 0)
pikn = getattr(self.lsmhe, "PikN_mhe")
for key_j in self.x_noisy:
for key_k in self.x_noisy:
vj = getattr(self.lsmhe, key_j)
vk = getattr(self.lsmhe, key_k)
for j in self.x_vars[key_j]:
if vj[(t_prior,) + j].get_suffix_value(self.lsmhe.dof_v) == 0:
#: This state is at its bound, skip
continue
for k in self.x_vars[key_k]:
if vk[(t_prior,) + k].get_suffix_value(self.lsmhe.dof_v) == 0:
#: This state is at its bound, skip
print("vj {:s} {:d} .sfx={:d}, vk {:s} {:d}.sfx={:d}"
.format(key_j, j[0], vj[(t_prior,) + j].get_suffix_value(self.lsmhe.dof_v),
key_k, k[0], vk[(t_prior,) + k].get_suffix_value(self.lsmhe.dof_v),))
continue
row = vj[(t_prior,) + j].get_suffix_value(self.lsmhe.rh_name)
col = vk[(t_prior,) + k].get_suffix_value(self.lsmhe.rh_name)
#: Ampl does not give you back 0's
if not row:
row = 0
if not col:
col = 0
# print((row, col), (key_j, j), (key_k, k))
q0j = self.xkN_key[(key_j,) + j]
q0k = self.xkN_key[(key_k,) + k]
pi = self._PI[row, col]
try:
pikn[q0j, q0k] = pi
except KeyError:
errk = key_j + "_" + str(j) + ", " + key_k + "_" + str(k)
print("Kerror, var {:}".format(errk))
pikn[q0j, q0k] = 0.0
def set_prior_state_from_prior_mhe(self):
"""Mechanism to assign a value to x0 (prior-state) from the previous mhe
Args:
None
Returns:
None
"""
t_prior = t_ij(self.lsmhe.t, 1, 0)
for x in self.x_noisy:
var = getattr(self.lsmhe, x)
for j in self.x_vars[x]:
z0dest = getattr(self.lsmhe, "x_0_mhe")
z0 = self.xkN_key[(x,) + j]
z0dest[z0] = value(var[(t_prior,) + j])
def prior_phase(self):
"""Encapsulates all the prior-state related issues, like collection, covariance computation and update"""
# Prior-Covariance stuff
self.check_active_bound_noisy()
self.load_covariance_prior()
self.set_state_covariance()
self.regen_objective_fun()
# Update prior-state
self.set_prior_state_from_prior_mhe()
def update_noise_meas(self, cov_dict):
self.journalist("I", self._iteration_count, "introduce_noise_meas", "Noise introduction")
for y in self.y:
for j in self.y_vars[y]:
sigma = cov_dict[(y, j), (y, j), 1]
self.curr_m_noise[(y, j)] = np.random.normal(0, sigma)
# noise = np.random.normal(0, sigma)
# # print(noise)
# vv += noise
# vy[(1, self.ncp_t) + j].set_value(vv)
# vy.display(ostream=f1)
# f.close()
# f1.close()
def print_r_mhe(self):
self.journalist("I", self._iteration_count, "print_r_mhe", "Results at" + os.getcwd())
self.journalist("I", self._iteration_count, "print_r_mhe", "Results suffix " + self.res_file_suf)
for x in self.x_noisy:
elist = []
rlist = []
t_Nmhe = t_ij(self.lsmhe.t, self.nfe_tmhe - 1, self.ncp_tmhe)
t_sim = t_ij(self.PlantSample.t, 0, self.ncp_t)
xe = getattr(self.lsmhe, x)
xr = getattr(self.PlantSample, x)
for j in self.x_vars[x]:
elist.append(value(xe[(t_Nmhe,) + j]))
rlist.append(value(xr[(t_sim,) + j]))
self.s_estimate[x].append(elist)
self.s_real[x].append(rlist)
# with open("res_mhe_ee.txt", "w") as f:
# for x | |
from tkinter import *
import numpy as np
import random
import time
import math
#pylint: skip-file
"""Information:
Board is 9x3 (much wider)
19 width, 7 height
"""
class Maze():
def __init__(self):
self.tkroot = Tk()
self.color = '#a6a48f'
self.debug = False
def input(self):
"""Initialize canvas"""
self.canvas = Canvas(self.tkroot, width=950, height=350)
self.canvas.pack(expand=True, fill=BOTH)
self.canvas.configure(bg=self.color)
"""Entries:
Default is 19x7 board
"""
self.eheight = Entry(self.tkroot, font='Times 16')
self.ewidth = Entry(self.tkroot, font='Times 16')
self.eheight.insert(END, 7)
self.ewidth.insert(END, 19)
self.canvas.create_window(450, 150, anchor=CENTER, window=self.eheight)
self.canvas.create_window(450, 200, anchor=CENTER, window=self.ewidth)
"""Home screen text"""
self.canvas.create_text(450, 50, font='Times 30 bold', text="Among Us Maze Task")
self.canvas.create_text(300, 150, font='Times 20', text="Height: ")
self.canvas.create_text(300, 200, font='Times 20', text="Width: ")
"""Input button"""
self.begin_button = Button(self.canvas, font='Times 20', text="Begin", command=lambda:self.begin())
self.canvas.create_window(450, 300, anchor=CENTER, window=self.begin_button)
self.tkroot.mainloop()
"""Errors:
Inputs must be integer and odd
"""
def error_screen(self):
self.canvas.pack_forget() #Temporarily hide the home screen
"""Create new temporary canvas"""
self.errorc = Canvas(self.tkroot, width=950, height=350)
self.errorc.pack(expand=True, fill=BOTH)
self.errorc.configure(bg=self.color)
self.errorc.create_text(450, 100, font='Times 20', text="Make sure the height and width you put are odd integers!")
"""Button for returning to home screen"""
self.go_back = Button(self.errorc, text="Return to main page",
command=lambda:[self.errorc.destroy(), self.canvas.pack()])
self.errorc.create_window(450, 250, anchor=CENTER, window=self.go_back)
def begin(self):
try:
self.height = int(self.eheight.get())
self.width = int(self.ewidth.get())
"""inputs must be odd"""
if self.height%2 == 0 or self.width%2 == 0:
print("Not odd")
raise Exception("Error")
"""Destroy old canvas and reinitalize new one of appropriate size"""
self.canvas.destroy()
self.canvas = Canvas(self.tkroot, width=50*self.width, height=50*self.height)
self.canvas.pack(expand=True, fill=BOTH)
self.canvas.configure(bg='#a6a48f')
except:
self.error_screen() #If not successful, go to error screen
self.setup() #Execute setup command if everything else is successful
def setup(self):
"""Initialize square pegs:
I did this section first, which is why the code is quite naive;
More space efficient in the future could be using the final_grid created to insert the square pegs
"""
self.square_img = PhotoImage(file="resized_square.png") #Square peg thing image for maze task
for i in range(math.floor(self.width/2)+1):
for j in range(math.floor(self.height/2)+1):
self.canvas.create_image(100*i, 100*j, anchor=NW, image=self.square_img)
"""Call DFS algorithm:
Keep trying until algorithm finds a maze that works
"""
self.retries = 0
while True:
try:
self.grid = np.zeros([self.height, self.width], dtype=np.int8)
self.vis = np.zeros([self.height, self.width])
self.final_grid, self.peg, self.barrier, self.no_barrier, self.count = self.dfs(self.grid,
self.vis, 1, 1, 0)
self.final_grid[0][1] = self.no_barrier #Start point cannot have barrier
self.final_grid[self.height-1][self.width-2] = self.no_barrier #End point cannot have barrier
print("Number of retries: %d \n Length of Path: %d" % (self.retries, self.count))
break
except:
self.retries += 1
pass
"""Add random barriers"""
self.total_spots = math.floor(self.width/2)*(math.floor(self.height/2)+1) + math.floor(self.height/2)*(math.floor(
self.width/2)+1) #Total spots given configuration
self.num_barriers = random.randint(int(self.total_spots*0.45), int(self.total_spots*0.55)) #45% to 55% of the board is barrier
self.final_grid = self.add_barriers(self.final_grid, self.num_barriers)
print("Number of barriers: %d" % self.num_barriers)
"""Initialize barriers"""
self.topdown = PhotoImage(file="topdown_resize.png")
self.leftright = PhotoImage(file="leftright_resize.png")
for i in range(self.height):
for j in range(self.width):
if self.final_grid[i][j] == 8:
if i%2 == 1:
self.canvas.create_image(12.5+50*j, 50*i+0.4, anchor=NW, image=self.topdown) #create_image=(width, height)
else:
self.canvas.create_image(50*j+2, 12.5+50*i, anchor=NW, image=self.leftright)
"""Initialize variables"""
self.tkroot.setvar(name="xvar", value=50)
self.tkroot.setvar(name="yvar", value=0)
self.tkroot.setvar(name="xvis", value=1)
self.tkroot.setvar(name="yvis", value=0)
self.tkroot.setvar(name="done", value=0)
self.visited = []
"""Bind buttons"""
self.canvas.bind('<B1-Motion>', self.draw_line)
self.canvas.bind('<ButtonRelease-1>', self.reset)
self.start = time.perf_counter() #Start of timer
self.local_start = time.perf_counter()
"""Depth first search algorithm:
Finds a possible maze;
Legends are defined by numbers and found in the comments below;
Semi-naive, could use self.peg/self.barrier/self.no_barrier initializations
"""
def dfs(self, grid, vis, i, j, count):
grid[i][j] = 1
"""
North - 0
East - 1
South - 2
West - 3
"""
"""Legend
7 - Peg
8 - Barrier
9 - None
"""
peg = 7
barrier = 8
no_barrier = 9
"""Stop at end"""
if i == self.height-2 and j == self.width-2:
return grid, peg, barrier, no_barrier, count
possible_dir = [] #Array of possible directions
"""North"""
if i != 1:
if vis[i-2][j] == 0:
possible_dir.append(0)
vis[i-2][j] = 1
"""South"""
if i != self.height-2:
if vis[i+2][j] == 0:
possible_dir.append(2)
vis[i+2][j] = 1
"""West"""
if j != 1:
if vis[i][j-2] == 0:
possible_dir.append(3)
vis[i][j-2] = 1
"""East"""
if j != self.width-2:
if vis[i][j+2] == 0:
possible_dir.append(1)
vis[i][j+2] = 1
direction = np.random.choice(possible_dir, 1)
if direction == 0:
grid[i-1][j] = no_barrier
return self.dfs(grid, vis, i-2, j, count+1)
if direction == 1:
grid[i][j+1] = no_barrier
return self.dfs(grid, vis, i, j+2, count+1)
if direction == 2:
grid[i+1][j] = no_barrier
return self.dfs(grid, vis, i+2, j, count+1)
if direction == 3:
grid[i][j-1] = no_barrier
return self.dfs(grid, vis, i, j-2, count+1)
"""Driver function for adding barriers, used above"""
def add_barriers(self, grid, num):
"""Add pegs into the grid (so those grid spots are ignored when tracking motion)"""
for i in range(0, self.height, 2):
for j in range(0, self.width, 2):
grid[i][j] = self.peg
while num:
direction = np.random.choice(['LR', 'UD'], 1)
if direction == 'LR':
i = np.random.choice(np.arange(0, self.height, 2))
j = np.random.choice(np.arange(1, self.width, 2))
else:
i = np.random.choice(np.arange(1, self.height, 2))
j = np.random.choice(np.arange(0, self.width, 2))
if grid[i][j] == 0:
grid[i][j] = self.barrier
num -= 1
return grid
"""Draw Line Function:
Tracks cursor motion and leaves a trail;
There is an issue of skipping corners;
Sensitivity could be improved/optimized;
Math is very dependent on 50 pixel grid squares;
Real Among Us is not just 50 pixel grid squares, therefore this function is not that flexible;
Code specific -
Keep track of visited array so you cannot visit the same square (error: sometimes the grid square isn't drawn in but
the code assumes you are there so you cannot visit the square you were at previously)
"""
def draw_line(self, event):
y = int(event.y/50) #y pos of cursor
x = int(event.x/50) #x pos of cursor
if y >= self.height or x >= self.width:
return #Don't go off screen
"""Read as if NOT peg AND NOT visited and NOT visited then keep going"""
val = self.final_grid[y][x]
if val != 7 and val != 8 and [x, y] not in self.visited:
xc = self.tkroot.getvar(name="xvar")
yc = self.tkroot.getvar(name="yvar")
if yc == 0 and xc == 50:
if yc+75 > event.y > yc+50 and xc < event.x < xc+50:
self.tkroot.setvar(name="yvar", value=yc+50)
self.canvas.create_rectangle(xc, yc, xc+50, yc+50, fill='white', width=0, tag="line")
else:
"""y-axis then x-axis motion"""
if xc < event.x < xc+50:
if event.y > yc+25:
self.canvas.create_rectangle(xc, yc, xc+50, yc+50, fill='white', width=0, tag="line")
self.canvas.create_rectangle(xc, yc, xc+50, yc+50, fill='yellow', width=0, tag="tmp")
self.done = self.tkroot.getvar(name="done")
if x == self.width-2 and y == self.height-1 and not self.done:
self.tkroot.setvar(name="done", value=1)
self.congrats()
elif yc-49 < event.y < yc-25:
self.canvas.create_rectangle(xc, yc-50, xc+50, yc, fill='white', width=0, tag="line")
self.canvas.create_rectangle(xc, yc-50, xc+50, yc, fill='yellow', width=0, tag="tmp")
elif yc < event.y < yc+50:
if event.x > xc+25:
self.canvas.create_rectangle(xc, yc, xc+50, yc+50, fill='white', width=0, tag="line")
self.canvas.create_rectangle(xc, yc, xc+50, yc+50, fill='yellow', width=0, tag="tmp")
elif xc-49 < event.x < xc-25:
self.canvas.create_rectangle(xc-50, yc, xc, yc+50, fill='white', width=0, tag="line")
self.canvas.create_rectangle(xc-50, yc, xc, yc+50, fill='yellow', width=0, tag="tmp")
xvis = self.tkroot.getvar(name="xvis")
yvis = self.tkroot.getvar(name="yvis")
if xc < event.x < xc+50:
if yc+99 > event.y > yc+50:
self.tkroot.setvar(name="yvar", value=yc+50)
self.visited.append([x, y-1])
self.tkroot.setvar(name="yvis", value=yvis+1)
elif yc-49 < event.y < yc-25:
self.tkroot.setvar(name="yvar", value=yc-50)
self.visited.append([x, y+1])
self.tkroot.setvar(name="yvis", value=yvis-1)
if yc+49 <= event.y <= yc+60 or yc+1 >= event.y >= yc-10:
self.canvas.itemconfig("tmp", fill="white") #Remove trail
elif yc < event.y < yc+50:
if xc+99 > event.x > xc+50:
self.tkroot.setvar(name="xvar", value=xc+50)
self.visited.append([x-1, y])
self.tkroot.setvar(name="xvis", value=xvis+1)
elif xc-49 < event.x < xc-25:
self.tkroot.setvar(name="xvar", value=xc-50)
self.visited.append([x+1, y])
self.tkroot.setvar(name="xvis", value=xvis-1)
if xc+49 <= event.x <= xc+60 or xc+1 >= event.x >= xc-10:
self.canvas.itemconfig("tmp", fill="white") #Remove trail
if self.debug:
print(int(event.x/50), int(event.y/50), xc, yc)
if self.debug and [x, y] in self.visited:
#print(self.visited)
print('Value: %d' % val)
"""Reset if mouse is released"""
def reset(self, event):
self.canvas.delete("line")
self.canvas.delete("tmp")
self.tkroot.setvar(name="xvar", value=50)
self.tkroot.setvar(name="yvar", value=0)
self.tkroot.setvar(name="xvis", value=1)
self.tkroot.setvar(name="yvis", value=0)
self.tkroot.setvar(name="done", value=0)
self.visited.clear()
self.local_start = time.perf_counter()
print('Reset')
"""Congratulations screen"""
def congrats(self):
self.end = time.perf_counter() #End timer
print('Congratulations on finishing the maze!')
"""Initialize new canvas"""
self.congratsc = Canvas(self.tkroot, width=950, height=350) #Each space is 50px
self.congratsc.pack(expand=True, fill=BOTH)
self.congratsc.configure(bg='#a6a48f')
self.canvas.pack_forget()
self.congratsc.create_text(450, 75, font="Times 16 bold", text="Congratulations! You completed the maze in %.3f seconds for a total of %.3f seconds."
% (float(self.end)-float(self.local_start), float(self.end)-float(self.start)))
"""Recover old maze"""
self.recover = Button(self.congratsc, text="Try the maze again", command=lambda:[self.congratsc.pack_forget(), self.canvas.pack(), self.reinit_timer()])
self.congratsc.create_window(400, 200, anchor=CENTER, window=self.recover)
"""Create new maze"""
self.new_maze = Button(self.congratsc, text="New maze!", command=lambda:[self.congratsc.pack_forget(), self.canvas.pack(), self.setup()])
self.congratsc.create_window(500, 200, anchor=CENTER, window=self.new_maze)
| |
import traceback
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.http import Http404
from django_bulk_update.helper import bulk_update
from django_filters.constants import EMPTY_VALUES
from django_filters.rest_framework import DjangoFilterBackend
from django_filters.rest_framework import FilterSet
from rest_framework.filters import OrderingFilter
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.serializers import ListSerializer
from rest_framework.serializers import ModelSerializer
from rest_framework.serializers import raise_errors_on_nested_writes
from rest_framework.serializers import Serializer
from rest_framework.serializers import ValidationError
from rest_framework.settings import api_settings
from rest_framework.status import HTTP_201_CREATED
from rest_framework.status import HTTP_204_NO_CONTENT
from rest_framework.utils import html
from rest_framework.utils import model_meta
from rest_framework.viewsets import ReadOnlyModelViewSet
from contentcuration.viewsets.common import MissingRequiredParamsException
from contentcuration.viewsets.sync.utils import log_sync_exception
class SimpleReprMixin(object):
def __repr__(self):
"""
DRF's default __repr__ implementation prints out all fields, and in the process
of that can evaluate querysets. If those querysets haven't yet had filters applied,
this will lead to full table scans, which are a big no-no if you like running servers.
"""
return "{} object".format(self.__class__.__name__)
# Add mixin first to make sure __repr__ for mixin is first in MRO
class BulkModelSerializer(SimpleReprMixin, ModelSerializer):
def __init__(self, *args, **kwargs):
super(BulkModelSerializer, self).__init__(*args, **kwargs)
# Track any changes that should be propagated back to the frontend
self.changes = []
@classmethod
def id_attr(cls):
ModelClass = cls.Meta.model
info = model_meta.get_field_info(ModelClass)
return getattr(cls.Meta, "update_lookup_field", info.pk.name)
def get_value(self, data, attr):
"""
Method to get a value based on the attribute name
accepts data which can be either a dict or a Django Model
Uses the underlying DRF Field methods for the field
to return the value.
"""
id_field = self.fields[attr]
if isinstance(data, dict):
return id_field.get_value(data)
# Otherwise should be a model instance
return id_field.get_attribute(data)
def id_value_lookup(self, data):
"""
Method to get the value for an id to use in lookup dicts
In the case of a simple id, this is just the str of the value
In the case of a combined index, we make a tuple of the values.
"""
id_attr = self.id_attr()
if isinstance(id_attr, str):
return str(self.get_value(data, id_attr))
# Could alternatively have coerced the list of values to a string
# but this seemed more explicit in terms of the intended format.
id_values = (self.get_value(data, attr) for attr in id_attr)
# For the combined index, use any related objects' primary key
combined_index = (idx.pk if hasattr(idx, 'pk') else idx for idx in id_values)
return tuple(combined_index)
def set_id_values(self, data, obj):
"""
Method to set all ids values on a dict (obj)
from either a dict or a model (data)
"""
obj.update(self.get_id_values(data))
return obj
def get_id_values(self, data):
"""
Return a dict of the id value(s) from data
which can be either a dict or a model
"""
id_attr = self.id_attr()
obj = {}
if isinstance(id_attr, str):
obj[id_attr] = self.get_value(data, id_attr)
else:
for attr in id_attr:
obj[attr] = self.get_value(data, attr)
return obj
def remove_id_values(self, obj):
"""
Return a copy of obj with its id value(s) removed.
"""
obj = obj.copy()
id_attr = self.id_attr()
if isinstance(id_attr, str):
del obj[id_attr]
else:
for attr in id_attr:
del obj[attr]
return obj
def to_internal_value(self, data):
ret = super(BulkModelSerializer, self).to_internal_value(data)
# add update_lookup_field field back to validated data
# since super by default strips out read-only fields
# hence id will no longer be present in validated_data
if isinstance(self.parent, BulkListSerializer):
self.set_id_values(data, ret)
return ret
def update(self, instance, validated_data):
# To ensure caution, require nested_writes to be explicitly allowed
if not (hasattr(self.Meta, "nested_writes") and self.Meta.nested_writes):
raise_errors_on_nested_writes("update", self, validated_data)
info = model_meta.get_field_info(instance)
# Simply set each attribute on the instance, and then save it.
# Note that unlike `.create()` we don't need to treat many-to-many
# relationships as being a special case. During updates we already
# have an instance pk for the relationships to be associated with.
for attr, value in validated_data.items():
if attr in info.relations and info.relations[attr].to_many:
raise ValueError("Many to many fields must be explicitly handled", attr)
setattr(instance, attr, value)
if hasattr(instance, "on_update") and callable(instance.on_update):
instance.on_update()
if not getattr(self, "parent"):
instance.save()
return instance
def create(self, validated_data):
# To ensure caution, require nested_writes to be explicitly allowed
if not (hasattr(self.Meta, "nested_writes") and self.Meta.nested_writes):
raise_errors_on_nested_writes("create", self, validated_data)
ModelClass = self.Meta.model
# Remove many-to-many relationships from validated_data.
# They are not valid arguments to the default `.create()` method,
# as they require that the instance has already been saved.
info = model_meta.get_field_info(ModelClass)
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
raise ValueError(
"Many to many fields must be explicitly handled", field_name
)
if not relation_info.reverse and (field_name in validated_data):
if not isinstance(
validated_data[field_name], relation_info.related_model
):
# Trying to set a foreign key but do not have the object, only the key
validated_data[
relation_info.model_field.attname
] = validated_data.pop(field_name)
instance = ModelClass(**validated_data)
if hasattr(instance, "on_create") and callable(instance.on_create):
instance.on_create()
if not getattr(self, "parent", False):
instance.save()
return instance
# Add mixin first to make sure __repr__ for mixin is first in MRO
class BulkListSerializer(SimpleReprMixin, ListSerializer):
def __init__(self, *args, **kwargs):
super(BulkListSerializer, self).__init__(*args, **kwargs)
# Track any changes that should be propagated back to the frontend
self.changes = []
# Track any objects that weren't found
self.missing_keys = set()
def _data_lookup_dict(self):
"""
Return a data lookup dict keyed by the id attribute
based off the Django in bulk method
"""
if self.instance:
return {self.child.id_value_lookup(obj): obj for obj in self.instance}
return {}
def to_internal_value(self, data):
"""
List of dicts of native values <- List of dicts of primitive datatypes.
Modified from https://github.com/encode/django-rest-framework/blob/master/rest_framework/serializers.py
based on suggestions from https://github.com/miki725/django-rest-framework-bulk/issues/68
This is to prevent an error whereby the DRF Unique validator fails when the instance on the child
serializer is a queryset and not an object.
"""
if html.is_html_input(data):
data = html.parse_html_list(data, default=[])
if not isinstance(data, list):
message = self.error_messages["not_a_list"].format(
input_type=type(data).__name__
)
raise ValidationError(
{api_settings.NON_FIELD_ERRORS_KEY: [message]}, code="not_a_list"
)
if not self.allow_empty and len(data) == 0:
message = self.error_messages["empty"]
raise ValidationError(
{api_settings.NON_FIELD_ERRORS_KEY: [message]}, code="empty"
)
ret = []
errors = []
data_lookup = self._data_lookup_dict()
for item in data:
try:
# prepare child serializer to only handle one instance
self.child.instance = data_lookup.get(self.child.id_value_lookup(item))
self.child.initial_data = item
validated = self.child.run_validation(item)
except ValidationError as exc:
errors.append(exc.detail)
else:
ret.append(validated)
errors.append({})
if any(errors):
raise ValidationError(errors)
return ret
def update(self, queryset, all_validated_data):
concrete_fields = {f.name for f in self.child.Meta.model._meta.concrete_fields}
all_validated_data_by_id = {}
properties_to_update = set()
for obj in all_validated_data:
obj_id = self.child.id_value_lookup(obj)
obj = self.child.remove_id_values(obj)
if obj.keys():
all_validated_data_by_id[obj_id] = obj
properties_to_update.update(obj.keys())
properties_to_update = properties_to_update.intersection(concrete_fields)
# this method is handed a queryset that has been pre-filtered
# to the specific instance ids in question, by `create_from_updates` on the bulk update mixin
objects_to_update = queryset.only(*properties_to_update)
updated_objects = []
updated_keys = set()
for obj in objects_to_update:
# Coerce to string as some ids are of the UUID class
obj_id = self.child.id_value_lookup(obj)
obj_validated_data = all_validated_data_by_id.get(obj_id)
# If no valid data was passed back then this will be None
if obj_validated_data is not None:
# Reset the child serializer changes attribute
self.child.changes = []
# use model serializer to actually update the model
# in case that method is overwritten
instance = self.child.update(obj, obj_validated_data)
# If the update method does not return an instance for some reason
# do not try to run further updates on the model, as there is no
# object to update.
if instance:
updated_objects.append(instance)
updated_keys.add(obj_id)
# Collect any registered changes from this run of the loop
self.changes.extend(self.child.changes)
if len(all_validated_data_by_id) != len(updated_keys):
self.missing_keys = set(all_validated_data_by_id.keys())\
.difference(updated_keys)
bulk_update(updated_objects, update_fields=properties_to_update)
return updated_objects
def create(self, validated_data):
ModelClass = self.child.Meta.model
objects_to_create = []
for model_data in validated_data:
# Reset the child serializer changes attribute
self.child.changes = []
object_to_create = self.child.create(model_data)
objects_to_create.append(object_to_create)
# Collect any registered changes from this run of the loop
self.changes.extend(self.child.changes)
try:
created_objects = ModelClass._default_manager.bulk_create(objects_to_create)
except TypeError:
tb = traceback.format_exc()
msg = (
"Got a `TypeError` when calling `%s.%s.create()`. "
"This may be because you have a writable field on the "
"serializer class that is not a valid argument to "
"`%s.%s.create()`. You may need to make the field "
"read-only, or override the %s.create() method to handle "
"this correctly.\nOriginal exception was:\n %s"
% (
ModelClass.__name__,
ModelClass._default_manager.name,
ModelClass.__name__,
ModelClass._default_manager.name,
self.__class__.__name__,
tb,
)
)
raise TypeError(msg)
return created_objects
class ValuesViewsetOrderingFilter(OrderingFilter):
def get_default_valid_fields(self, queryset, view, context=None):
"""
The original implementation of this makes the assumption that the | |
param
tip = 'Select the Z-score critical value. Options are 1.65, 1.96, 2.58. Equal to pvalue of 0.1, 0.05, 0.01, respectively. '
tip += 'Recommended 1.96 (p-value 0.05). A Z-score of 1.96 may consider too data to be outliers, 2.58 too few.'
w_zscore_value = build_dropdown(desc='Z-Score Value: ', opts=[('1.65', 1.65), ('1.96', 1.96), ('2.58', 2.58)],
val=1.96, handler=zscore_value_handler, params=params, tip=tip)
# drop down for handling of missing data param
tip = 'Set how to handle missing data. Either drop it (whole year will be lost), or interpolate it (linear). '
tip += 'Interpolation is recommended. Dropping data can result in significant data loss.'
w_miss_data_handle = build_dropdown(desc='Handle Missing Data: ', opts=[('Interpolate', 'interpolate'), ('Drop', 'drop')],
val='interpolate', handler=miss_data_handle_handler, params=params, tip=tip)
# radiobutton for show figure values param
tip = 'Use interpolation to fill in missing pixels if present.'
w_fill_pixels = build_radio_button(desc='Fill empty pixels:', opts=[('Yes', True), ('No', False)], val=False,
handler=fill_pixels_handler, params=params, tip=tip)
# radiobutton for show figure values param
tip = 'Show a panel of figure of all images once data computed computed.'
w_plot_interp_idx = build_radio_button(desc='Show Figures:', opts=[('Yes', True), ('No', False)], val=False,
handler=plot_interp_idx_handler, params=params, tip=tip)
# stylise dashboard
control_layout = widgets.Layout(margin='10px 15px 10px 0')
empty_cell = build_empty_cell(desc=' ', val=' ')
# build dashboard
w_dash_r0 = widgets.HBox([w_miss_data_handle, w_fill_pixels, empty_cell], layout=control_layout)
w_dash_r1 = widgets.HBox([w_zscore, w_zscore_value, empty_cell], layout=control_layout)
w_dash_r2 = widgets.HBox([w_plot_interp_idx, empty_cell, empty_cell], layout=control_layout)
# create dash as accordion
w_dash = widgets.Accordion(children=[w_dash_r0, w_dash_r1, w_dash_r2], selected_index=0)
w_dash.set_title(0, 'Interpolation parameters')
w_dash.set_title(1, 'Z-score analysis parameters')
w_dash.set_title(2, 'Show figure parameters')
# return dashboard
return w_dash
# generate standardisation preparation dashboard
def build_prepare_standardisation_dashboard(params):
# event handler for standardisation greennest/moistest percentile float
def green_moist_percentile_handler(event, params):
if event['type'] == 'change':
params.green_moist_percentile = event['new']
# event handler for standardisation greennest/moistest percentile float
def slope_steady_percentile_handler(event, params):
if event['type'] == 'change':
params.slope_steady_percentile = event['new']
# event handler for standardisation greennest/moistest percentile float
def max_num_sites_handler(event, params):
if event['type'] == 'change':
params.max_num_sites = event['new']
# event handler for show standardisation index figure boolean
def plot_standard_idx_handler(event, params):
if event['type'] == 'change':
params.plot_standard_idx = event['new']
# float slider for greenest/moistestpercentile value param
tip = 'Select the percentile for obtaining the greennest/moistest pixels. We recommend using the default value (99.5%). '
tip += 'However, if some features in your study area drive up the vegetation/moisture values, reduce this value 1 or 2%.'
w_green_moist_percentile = build_float_slider(desc='Greennest/Moistest Percentile:', min_val=80.0, max_val=99.99, step_val=0.1, d_width='175px',
val=99.5, form='.1f', handler=green_moist_percentile_handler, params=params, tip=tip)
# int slider for steadiest slope value param
tip = 'Select the percentile for obtaining the steadiest pixels. We recommend using the default value (5%). This will obtain areas of the slope '
tip += 'between with 5% either side of slope = 0 (i.e. no changing pixels). If not enough pixels are returned below, try increasing by 1-2% and re-run.'
w_slope_steady_percentile = build_int_slider(desc='Steadiest Slope Percentile:', min_val=1, max_val=30, step_val=1, d_width='175px',
val=5, form='d', handler=slope_steady_percentile_handler, params=params, tip=tip)
# float slider for maximum slope value param
tip = 'Set the maximum number of invariant green/moist sites to be used in standardisation. Default is 50. Fewer sites can provide slightly more accurate '
tip += 'standardisation, but may suffer if pixels fluctuate across years. Opposite is true for more sites. We recommend 50.'
w_max_num_sites = build_int_slider(desc='Max Number Sites:', min_val=25, max_val=100, step_val=5, val=50, form='d', d_width='175px',
handler=max_num_sites_handler, params=params, tip=tip)
# radiobutton for show figure values param
tip = 'Show a panel of figure of all images once data computed computed.'
w_plot_standard_idx = build_radio_button(desc='Show Figures:', opts=[('Yes', True), ('No', False)], val=False, d_width='175px',
handler=plot_standard_idx_handler, params=params, tip=tip)
# stylise dashboard
control_layout = widgets.Layout(margin='10px 15px 10px 0')
empty_cell = build_empty_cell(desc=' ', val=' ')
# build dashboard
w_dash_r0 = widgets.HBox([w_green_moist_percentile, w_slope_steady_percentile, w_max_num_sites], layout=control_layout)
w_dash_r1 = widgets.HBox([w_plot_standard_idx, empty_cell, empty_cell], layout=control_layout)
# create dash as accordion
w_dash = widgets.Accordion(children=[w_dash_r0, w_dash_r1], selected_index=0)
w_dash.set_title(0, 'Standardisation invariant site parameters')
w_dash.set_title(1, 'Show figure parameters')
# return dashboard
return w_dash
# generate likelihood threshold dashboard
def build_likelihood_threshold_dashboard(params, years_list):
# event handler for likelihood threshold type radio
def like_thresh_type_handler(event, params):
if event['type'] == 'change':
params.like_thresh_type = event['new']
# event handler for likelihood threshold date drop down
def like_thresh_date_handler(event, params):
if event['type'] == 'change':
params.like_thresh_date = event['new']
# event handler for likelihood threshold standard deviation float
def like_thresh_stdv_handler(event, params):
if event['type'] == 'change':
params.like_thresh_stdv = event['new']
# event handler for mapping likelihood boolean
def map_likelihood_handler(event, params):
if event['type'] == 'change':
params.map_likelihood = event['new']
# event handler for ground truth file dialog
def ground_sites_path_handler(params):
params.ground_sites_path = w_ground_sites_path.selected
# event handler for write data dialog
def write_data_path_handler(params):
params.write_data_path = w_write_data_path.selected
# drop down for likelihood threshold type param
tip = 'Select thresholding type. If groundtruth data for a particular year is available, select Groundtruth Shapefile. If not, select Standard Deviation.'
w_like_thresh_type = build_dropdown(desc='Threshold Type:', opts=[('Standard Deviation', 'stdv'), ('Groundtruth Shapefile', 'shape')],
val='stdv', handler=like_thresh_type_handler, params=params, tip=tip)
# filedialog to select shapefiles
w_ground_sites_path = build_file_dialog(directory=os.getcwd(), filename='', title='Select Groundtruth Points (.shp):',
handler=ground_sites_path_handler, params=params)
# drop down for likelihood threshold date
tip = 'Select date to apply threshold. If groundtruth data is provided, it is best to match the year the data was collected. Ultimately, we recommend using the all option. '
tip += 'This will find the median of all likelihood maps and threshold this via standard deviation. This will reduce seasonal variation.'
w_like_thresh_date = build_dropdown(desc='Threshold Date:', opts=years_list, val='all',
handler=like_thresh_date_handler, params=params, tip=tip)
# float slider for standard deviation value param
tip = 'Select the standard deviation value for thresholding. We recommend 1.5 to 2. Lower values will return more GDV likelihood pixels. A higher value returns the converse.'
w_like_thresh_stdv = build_float_slider(desc='Standard Deviation:', min_val=0.5, max_val=3.5, step_val=0.5,
val=1.5, form='.1f', handler=like_thresh_stdv_handler, params=params, tip=tip)
# radiobutton for show figure values param
tip = 'Show an interactive map of likelihood (thresholded and non-thresholded) of selected date once data computed.'
w_map_likelihood = build_radio_button(desc='Show Map:', opts=[('Yes', True), ('No', False)], val=True,
handler=map_likelihood_handler, params=params, tip=tip)
# write data path
w_write_data_path = build_file_dialog(directory=os.getcwd(), filename='', title='Export Folder:',
handler=write_data_path_handler, params=params)
# create throw-away html title for controls
lbl = widgets.HTML(value='<h4>Set groundwater dependent vegetation likelihood thresholding parameters and data export options: </h4>')
empty_cell = build_empty_cell(desc=' ', val=' ')
# stylise dashboard
control_layout = widgets.Layout(margin='10px 15px 10px 0px')
# build dashboard
w_dash_r0 = widgets.HBox([w_ground_sites_path], layout=control_layout)
w_dash_r1 = widgets.HBox([w_like_thresh_type, w_like_thresh_date, w_like_thresh_stdv, w_map_likelihood], layout=control_layout)
w_dash_r2 = widgets.HBox([w_write_data_path], layout=control_layout)
# create dash as accordion
#w_dash = widgets.Accordion(children=[w_dash_r5, w_dash_r0, w_dash_r1, w_dash_r2, w_dash_r3, w_dash_r4], selected_index=1)
w_dash = widgets.Accordion(children=[w_dash_r0, w_dash_r1, w_dash_r2], selected_index=1)
w_dash.set_title(0, 'Groundtruthed shapefile import parameters')
w_dash.set_title(1, 'Likelihood modelling parameters')
w_dash.set_title(2, 'Export likelihood data parameters')
# return dashboard
return w_dash
# generate trend dashboard
def build_trend_dashboard(params):
# event handler for trend platform (ls or s2)
def trend_platform_handler(event, params):
if event['type'] == 'change':
params.platform = event['new']
# event handler for trend product (nbar or nbart)
def trend_product_handler(event, params):
if event['type'] == 'change':
params.product = event['new']
# event handler for trend query date range (converts year ints to YYYY-MM-DD strings)
def trend_query_date_handler(event, params):
if event['type'] == 'change':
params.query_dates = helpers.prepare_dates(date_tuple=event['new'])
# event handler for trend minimum cloud cover percentage
def trend_cloud_cover_handler(event, params):
if event['type'] == 'change':
params.cloud_cover = helpers.prepare_cloud_percentage(event['new'])
# event handler for trend cloud masking boolean
def trend_cloud_mask_handler(event, params):
if event['type'] == 'change':
params.cloud_mask = event['new']
# event handler for mankendall type handler
def trend_mk_type_handler(event, params):
if event['type'] == 'change':
params.mk_type = event['new']
w_time_slice.index = None
w_time_slice.options = change_time_slices(params.mk_type)
# event handler for trend time slice handler
def trend_time_slice_handler(event, params):
if event['type'] == 'change':
params.time_slice = event['new']
# event handler for mankendall significant only handler
def trend_sig_only_handler(event, params):
if event['type'] == 'change':
params.mk_sig_only = event['new']
# event handler for mannkendall trend type handler
def trend_trend_type_handler(event, params):
if event['type'] == 'change':
params.trend_type = event['new']
# event handler for trend vegetation index (ndvi, savi, etc.)
def trend_veg_idx_handler(event, params):
if event['type'] == 'change':
params.veg_idx = | |
<reponame>laurennk/QCElemental<filename>qcelemental/physical_constants/context.py<gh_stars>0
"""
Contains relevant physical constants
"""
import collections
from decimal import Decimal
from typing import Union
from functools import lru_cache
from ..datum import Datum, print_variables
from .ureg import build_units_registry
class PhysicalConstantsContext:
"""CODATA 2014 physical constants set from NIST.
Parameters
----------
context : {'CODATA2014'}
Origin of loaded data.
Attributes
----------
name : str
The name of the context ('CODATA2014')
pc : dict of Datum
Each physical constant is an entry in `pc`, where key is the
lowercased string of the NIST name (or any alias) and the
value is a Datum object with `lbl` the exact NIST name string,
`units`, `data` value as Decimal object, and any uncertainty
in the `comment` field.
year : int
The year the context was created.
"""
_transtable = str.maketrans(' -/{', '__p_', '.,()}')
def __init__(self, context="CODATA2014"):
self.pc = collections.OrderedDict()
from ..data import nist_2014_codata
if context == "CODATA2014":
self.doi = nist_2014_codata["doi"]
self.raw_codata = nist_2014_codata['constants']
# physical constant loop
for k, v in self.raw_codata.items():
self.pc[k] = Datum(
v["quantity"],
v["unit"],
Decimal(v["value"]),
comment='uncertainty={}'.format(v["uncertainty"]),
doi=self.doi)
else:
raise KeyError("Context set as '{}', only contexts {'CODATA2014', } are currently supported")
self.name = context
self.year = int(context.replace("CODATA", ""))
self._ureg = None
# Extra relationships
self.pc['calorie-joule relationship'] = Datum(
'calorie-joule relationship', 'J', Decimal('4.184'), comment='uncertainty=(exact)')
aliases = [
('h', 'J', self.pc['hertz-joule relationship'].data, 'The Planck constant (Js)'),
('c', 'Hz', self.pc['inverse meter-hertz relationship'].data, 'Speed of light (ms$^{-1}$)'),
('kb', 'J', self.pc['kelvin-joule relationship'].data, 'The Boltzmann constant (JK$^{-1}$)'),
('R', 'J mol^-1 K^-1', self.pc['molar gas constant'].data, 'Universal gas constant (JK$^{-1}$mol$^{-1}$)'),
('bohr2angstroms', 'AA', self.pc['bohr radius'].data * Decimal('1.E10'), 'Bohr to Angstroms conversion factor'),
('bohr2m', 'm', self.pc['bohr radius'].data, 'Bohr to meters conversion factor'),
('bohr2cm', 'cm', self.pc['bohr radius'].data * Decimal('100'), 'Bohr to centimeters conversion factor'),
('amu2g', 'g', self.pc['atomic mass constant'].data * Decimal('1000'), 'Atomic mass units to grams conversion factor'),
('amu2kg', 'kg', self.pc['atomic mass constant'].data, 'Atomic mass units to kg conversion factor' ),
('au2amu', 'u', self.pc['electron mass in u'].data, 'Atomic units (m$@@e$) to atomic mass units conversion factor'),
('hartree2J', 'J', self.pc['hartree energy'].data, 'Hartree to joule conversion factor'),
('hartree2aJ', 'aJ', self.pc['hartree energy'].data * Decimal('1.E18'), 'Hartree to attojoule (10$^{-18}$J) conversion factor'),
('cal2J', 'J', self.pc['calorie-joule relationship'].data, 'Calorie to joule conversion factor'),
('dipmom_au2si', 'C m', self.pc['atomic unit of electric dipole mom.'].data, 'Atomic units to SI units (Cm) conversion factor for dipoles'),
('dipmom_au2debye', '???', self.pc['atomic unit of electric dipole mom.'].data * Decimal('1.E21') / self.pc['hertz-inverse meter relationship'].data,
'Atomic units to Debye conversion factor for dipoles'),
('dipmom_debye2si', '???', self.pc['hertz-inverse meter relationship'].data * Decimal('1.E-21'), 'Debye to SI units (Cm) conversion factor for dipoles'),
('c_au', '', self.pc['inverse fine-structure constant'].data, 'Speed of light in atomic units'),
('hartree2ev', 'eV', self.pc['hartree energy in ev'].data, 'Hartree to eV conversion factor'),
('hartree2wavenumbers', 'cm^-1', self.pc['hartree-inverse meter relationship'].data * Decimal('0.01'), 'Hartree to cm$^{-1}$ conversion factor'),
('hartree2kcalmol', 'kcal mol^-1', self.pc['hartree energy'].data * self.pc['avogadro constant'].data * Decimal('0.001') / self.pc['calorie-joule relationship'].data,
'Hartree to kcal mol$^{-1}$ conversion factor'),
('hartree2kJmol', 'kJ mol^-1', self.pc['hartree energy'].data * self.pc['avogadro constant'].data * Decimal('0.001'), 'Hartree to kilojoule mol$^{-1}$ conversion factor'),
('hartree2MHz', 'MHz', self.pc['hartree-hertz relationship'].data * Decimal('1.E-6'), 'Hartree to MHz conversion factor'),
('kcalmol2wavenumbers', 'kcal cm mol^-1', Decimal('10') * self.pc['calorie-joule relationship'].data / self.pc['molar planck constant times c'].data,
'kcal mol$^{-1}$ to cm$^{-1}$ conversion factor'),
('e0', 'F m^-1', self.pc['electric constant'].data, 'Vacuum permittivity (Fm$^{-1}$)'),
('na', 'mol^-1', self.pc['avogadro constant'].data, "Avogadro's number"),
('me', 'kg', self.pc['electron mass'].data, 'Electron rest mass (in kg)'),
] # yapf: disable
# add alternate names for constants or derived values to help QC programs
for alias in aliases:
ident, units, value, comment = alias
self.pc[ident.lower()] = Datum(ident, units, value, comment=comment)
# add constants as directly callable member data
for qca in self.pc.values():
callname = qca.label.translate(self._transtable)
setattr(self, callname, float(qca.data))
def __str__(self):
return "PhysicalConstantsContext(context='{}')".format(self.name)
@property
def ureg(self) -> 'UnitRegistry':
"""Returns the internal Pint units registry.
Returns
-------
UnitRegistry
The pint context
"""
if self._ureg is None:
self._ureg = build_units_registry(self)
return self._ureg
def get(self, physical_constant: str, return_tuple: bool=False) -> Union[float, 'Datum']:
"""Access a physical constant, `physical_constant`.
Parameters
----------
physical_constant : str
Case-insensitive string of physical constant with NIST name.
return_tuple : bool, optional
See below.
Returns
-------
Union[float, 'Datum']
When ``return_tuple=False``, value of physical constant.
When ``return_tuple=True``, Datum with units, description, uncertainty, and value of physical constant as Decimal.
"""
qca = self.pc[physical_constant.lower()]
if return_tuple:
return qca
else:
return float(qca.data)
# h 'hertz-joule relationship' = 6.62606896E-34 # The Planck constant (Js)
# c 'inverse meter-hertz relationship' = 2.99792458E8 # Speed of light (ms$^{-1}$)
# kb 'kelvin-joule relationship' = 1.3806504E-23 # The Boltzmann constant (JK$^{-1}$)
# R 'molar gas constant' = 8.314472 # Universal gas constant (JK$^{-1}$mol$^{-1}$)
# bohr2angstroms 'Bohr radius' * 1.E10 = 0.52917720859 # Bohr to Angstroms conversion factor
# bohr2m 'Bohr radius' = 0.52917720859E-10 # Bohr to meters conversion factor
# bohr2cm 'Bohr radius' * 100 = 0.52917720859E-8 # Bohr to centimeters conversion factor
# amu2kg 'atomic mass constant' = 1.660538782E-27 # Atomic mass units to kg conversion factor
# au2amu 'electron mass in u' = 5.485799097E-4 # Atomic units (m$@@e$) to atomic mass units conversion factor
# hartree2J 'Hartree energy' = 4.359744E-18 # Hartree to joule conversion factor
# hartree2aJ 'Hartree energy' * 1.E18 = 4.359744 # Hartree to attojoule (10$^{-18}$J) conversion factor
# cal2J = 4.184 # Calorie to joule conversion factor
# dipmom_au2si 'atomic unit of electric dipole mom.' = 8.47835281E-30 # Atomic units to SI units (Cm) conversion factor for dipoles
# dipmom_au2debye 'atomic unit of electric dipole mom.' / ('hertz-inverse meter relationship' * 1.E-21) = 2.54174623 # Atomic units to Debye conversion factor for dipoles
# dipmom_debye2si 'hertz-inverse meter relationship' * 1.E-21 = 3.335640952E-30 # Debye to SI units (Cm) conversion factor for dipoles
# c_au 'inverse fine-structure constant' = 137.035999679 # Speed of light in atomic units
# hartree2ev 'Hartree energy in eV' = 27.21138 # Hartree to eV conversion factor
# hartree2wavenumbers 'hartree-inverse meter relationship' * 0.01 = 219474.6 # Hartree to cm$^{-1}$ conversion factor
# hartree2kcalmol hartree2kJmol / cal2J = 627.5095 # Hartree to kcal mol$^{-1}$ conversion factor
# hartree2kJmol 'Hartree energy'*'Avogadro constant'*0.001 = 2625.500 # Hartree to kilojoule mol$^{-1}$ conversion factor
# hartree2MHz 'hartree-hertz relationship' = 6.579684E9 # Hartree to MHz conversion factor
# kcalmol2wavenumbers 10. / 'molar Planck constant times c'*4.184 = 349.7551 # kcal mol$^{-1}$ to cm$^{-1}$ conversion factor
# e0 'electric constant' = 8.854187817E-12 # Vacuum permittivity (Fm$^{-1}$)
# na 'Avogadro constant' = 6.02214179E23 # Avogadro's number
# me 'electron mass' = 9.10938215E-31 # Electron rest mass (in kg)
def Quantity(self, data: str) -> 'Quantity':
"""Returns a Pint Quantity.
"""
return self.ureg.Quantity(data)
@lru_cache()
def conversion_factor(self, base_unit: Union[str, 'Quantity'], conv_unit: Union[str, 'Quantity']) -> float:
"""Provides the conversion factor from one unit to another.
The conversion factor is based on the current contexts CODATA.
Parameters
----------
base_unit : Union[str, 'Quantity']
The original units
conv_unit : Union[str, 'Quantity']
The units to convert to
Examples
--------
>>> conversion_factor("meter", "picometer")
1e-12
>>> conversion_factor("feet", "meter")
0.30479999999999996
>>> conversion_factor(10 * ureg.feet, "meter")
3.0479999999999996
Returns
-------
float
The requested conversion factor
"""
# Add a little magic incase the incoming values have scalars
import pint
factor = 1.0
# First make sure we either have Units or Quantities
if isinstance(base_unit, str):
base_unit = self.ureg.parse_expression(base_unit)
if isinstance(conv_unit, str):
conv_unit = self.ureg.parse_expression(conv_unit)
# Need to play with prevector if we have Quantities
if isinstance(base_unit, pint.quantity._Quantity):
factor *= base_unit.magnitude
base_unit = base_unit.units
if isinstance(conv_unit, pint.quantity._Quantity):
factor /= conv_unit.magnitude
conv_unit = conv_unit.units
return self.ureg.convert(factor, base_unit, conv_unit)
def string_representation(self) -> str:
"""Print name, value, and units of all physical constants."""
return print_variables(self.pc)
def run_comparison(self):
"""Compare the existing physical constant information for Psi4 (in checkup_data folder) to `self`. Specialized use."""
try:
from .. import checkup_data
except ImportError: # pragma: no cover
print('Info for comparison (directory checkup_data) not installed. Run from source.')
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
tol = 1.e-8
print(bcolors.OKBLUE + '\nChecking ({}) physconst vs. Psi4 ...'.format(tol) + bcolors.ENDC)
for pc in dir(checkup_data.physconst):
if not pc.startswith('__'):
ref = self.get(pc)
val = getattr(checkup_data.physconst, pc)
rat = abs(1.0 - float(ref) / val)
if rat > 1.e-4:
print(bcolors.FAIL + 'Physical Constant {} ratio differs by {:12.8f}: {} (this) vs {} (psi)'.
format(pc, rat, ref, val) | |
"""
Reliable and extremely fast kernel density estimator for one and two-dimensional
samples.
The kernel density estimations here are kept as simple and as separated from the rest
of the code as possible. They do nothing but kernel density estimation. The
motivation for their partial reimplementation is that the existing kernel density
estimators are:
* suboptimal (like scipy where no kernel bandwidth optimization is done), or
* come with a gorilla holding a banana and the entire jungle although only the
banana is needed.
Do one thing and do it well.
Botev's Matlab codes are the starting point of this implementation as those mostly
follow the above principle.
TODO:
- [low] add cdf estimate as in ``kde_1d.m``.
- [high] more thorough input check, mostly shape and type.
- [high] check the details of ``histc`` in Matlab and ``np.histogram`` make sure that
appending a zero to ``sample_hist`` is always valid.
"""
import copy
import logging
from typing import Iterable, Tuple, Union
import numpy as np
from scipy import fft, optimize
from scipy.stats import gaussian_kde
N_X_VEC = int(2**14)
N_ROW_MX = int(2**8)
# ======================================================================================
# 1D
# ======================================================================================
def kde_1d(
sample_vec: Union[np.ndarray, list],
n_x_vec: int = N_X_VEC,
x_min: Union[int, float] = None,
x_max: Union[int, float] = None,
weight_vec: Union[np.ndarray, list] = None,
return_bandwidth: bool = False,
) -> Union[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, float]]:
"""
Reliable and extremely fast kernel density estimator for one-dimensional sample.
Gaussian kernel is assumed and the bandwidth is chosen automatically.
Unlike many other implementations, this one is immune to problems caused by
multimodal densities with widely separated modes. The estimation does not
deteriorate for multimodal densities, because we never assume a parametric model
for the sample.
.. note::
* The elements of ``sample_vec`` that fall between ``x_min`` and ``x_max`` will
be treated as the full sample, i.e. the kernel density over ``[x_min, x_max]``
will integrate to one.
* If the search for finding the optimal bandwidth fails the functions falls
back to ``scipy.stats.gaussian_kde``.
Args:
sample_vec:
A vector of sample points from which the density estimate is constructed.
n_x_vec:
The number of ``x_vec`` points used in the uniform discretization of
the interval ``[x_min, x_max]``. ``n_x_vec`` has to be a power of two. If
``n_x_vec`` is not a power of two, then ``n_x_vec`` is rounded up to the
next power of two, i.e., ``n_x_vec`` is set to
``n_x_vec=2**ceil(log2(n_x_vec))``; the default value of ``n_x_vec`` is
``n_x_vec=2**14``.
x_min:
The lower boundary of the interval over which the density estimate is
constructed.
x_max:
The upper boundary of the interval over which the density estimate is
constructed.
weight_vec:
Weights of sample points. This must have the same shape as ``sample_vec``.
If ``None`` (default), the samples are assumed to be equally weighted.
Only the values of elements relative to each other matter,
i.e. multiplying ``weight_vec`` by a non-negative scalar does not change
the results.
return_bandwidth:
Should the used bandwidth be returned?
Raises:
ValueError: If ``weight_vec`` has at least one negative value.
Warns:
Root finding failed (Brent's method): Optimal bandwidth finding failed,
falling back to the rule-of-thumb bandwidth of ``scipy.stats.gaussian_kde``.
Returns:
Kernel densities, a vector of length ``n_x_vec`` with the values of
the density estimate at the grid points (``x_vec``).
Kernel density grid (``x_vec``), a vector of grid points over which
the kernel density estimate is computed.
Optimal bandwidth (Gaussian kernel assumed), returned only if
``return_bandwidth`` is ``True``.
Examples:
.. code-block:: python
import numpy as np
import matplotlib.pyplot as plt
from lightkde import kde_1d
.. code-block:: python
sample_vec = [
-1.3145, -0.5197, 0.9326, 3.2358, 0.3814,
-0.3226, 2.1121, 1.1357, 0.4376, -0.0332
]
density_vec, x_vec = kde_1d(sample_vec)
.. code-block:: python
sample_vec = np.hstack((np.random.normal(loc=-8, size=100),
np.random.normal(loc=-3, size=100),
np.random.normal(loc=7, size=100)))
density_vec, x_vec = kde_1d(sample_vec)
plt.subplots()
plt.plot(x_vec, density_vec)
plt.show()
The kde bandwidth selection method is outlined in [1]. This implementation is
based on the implementation of <NAME> [2] who based his
implementation on the Matlab implementation by <NAME> [3].
References:
[1] <NAME>, <NAME>, and <NAME> (2010) Annals of
Statistics, Volume 38, Number 5, pages 2916-2957.
[2] https://github.com/Daniel-B-Smith/KDE-for-SciPy/blob/a9982909bbb92a7e243e5fc9a74f957d883f1c5d/kde.py # noqa: E501
Updated on: 6 Feb 2013.
[3] https://nl.mathworks.com/matlabcentral/fileexchange/14034-kernel-density-estimator # noqa: E501
Updated on: 30 Dec 2015.
"""
sample_vec = np.array(sample_vec).ravel()
n_sample = len(np.unique(sample_vec))
# Parameters to set up the x_vec on which to calculate
n_x_vec = int(2 ** np.ceil(np.log2(n_x_vec)))
if x_min is None or x_max is None:
sample_min = np.min(sample_vec)
sample_max = np.max(sample_vec)
sample_range = sample_max - sample_min
x_min = sample_min - sample_range / 10 if x_min is None else x_min
x_max = sample_max + sample_range / 10 if x_max is None else x_max
# watch out, scaling of weight_vec
if weight_vec is not None:
weight_vec = np.atleast_1d(weight_vec).squeeze()
if np.any(weight_vec < 0):
raise ValueError("Argument: weight_vec cannot have negative elements!")
weight_vec = weight_vec / np.sum(weight_vec) * n_sample
# Range of x_vec
x_range = x_max - x_min
# Histogram the sample_vec to get a crude first approximation of the density
step = x_range / (n_x_vec - 1)
x_vec = np.arange(start=x_min, stop=x_max + 0.1 * step, step=step)
sample_hist, bin_edges = np.histogram(sample_vec, bins=x_vec, weights=weight_vec)
# for easier comparison with Matlab, the count for [x_vec[-1], +Inf [ is also
# added, i.e. 0
sample_hist = np.append(sample_hist, 0)
sample_hist = sample_hist / n_sample
# discrete cosine transform of initial sample_vec
dct_sample = fft.dct(sample_hist, norm=None)
ic = np.arange(1, n_x_vec, dtype=float) ** 2
sq_dct_sample = (dct_sample[1:] / 2) ** 2.0
# The fixed point calculation finds the bandwidth = t_star
guess = 0.1
try:
t_star = optimize.brentq(
f=fixed_point, a=0, b=guess, args=(n_sample, ic, sq_dct_sample)
)
except (ValueError, RuntimeError) as e:
logging.warning(
"Failed to find the optimal bandwidth.\n\t"
f"Root finding (Brent's method) failed with error: {e}.\n\t"
"We fall back to use ``scipy.stats.gaussian_kde``).\n\t"
"Please carefully check the results!"
)
# t_star = 0.28 * n_x_vec ** (-2 / 5)
gkde = gaussian_kde(sample_vec, weights=weight_vec)
density_vec = gkde.evaluate(x_vec)
if return_bandwidth:
return density_vec, x_vec, np.nan
else:
return density_vec, x_vec
# Smooth the DCTransformed sample_vec using t_star
sm_dct_sample = dct_sample * np.exp(
-np.arange(n_x_vec) ** 2 * np.pi**2 * t_star / 2
)
# Inverse DCT to get density
density_vec = fft.idct(sm_dct_sample, norm=None) / x_range
bandwidth = np.sqrt(t_star) * x_range
density_vec = density_vec / np.trapz(density_vec, x_vec)
if return_bandwidth:
return density_vec, x_vec, bandwidth
else:
return density_vec, x_vec
def fixed_point(t, n_sample, ic, sq_dct_sample):
# this implements the function t-zeta*gamma**[l](t)
c7 = 7
ic = np.longdouble(ic)
n_sample = np.longdouble(n_sample)
sq_dct_sample = np.longdouble(sq_dct_sample)
f = (
2
* np.pi ** (2 * c7)
* np.sum(ic**c7 * sq_dct_sample * np.exp(-ic * np.pi**2 * t))
)
for s in range(c7, 1, -1):
k0 = np.prod(range(1, 2 * s, 2)) / np.sqrt(2 * np.pi)
const = (1 + (1 / 2) ** (s + 1 / 2)) / 3
time = (2 * const * k0 / n_sample / f) ** (2 / (3 + 2 * s))
f = (
2
* np.pi ** (2 * s)
* np.sum(ic**s * sq_dct_sample * np.exp(-ic * np.pi**2 * time))
)
return t - (2 * n_sample * np.sqrt(np.pi) * f) ** (-2 / 5)
# ======================================================================================
# 2D
# ======================================================================================
def kde_2d(
sample_mx: Union[np.ndarray, list],
n_row_mx: int = N_ROW_MX,
xy_min: Union[np.ndarray, Iterable] = None,
xy_max: Union[np.ndarray, Iterable] = None,
weight_vec: Union[np.ndarray, list] = None,
return_bandwidth: bool = False,
) -> Union[
Tuple[np.ndarray, np.ndarray, np.ndarray],
Tuple[np.ndarray, np.ndarray, np.ndarray, float],
]:
"""
Fast and accurate state-of-the-art bivariate kernel density estimator with
diagonal bandwidth matrix.
The kernel is assumed to be Gaussian. The two bandwidth parameters are chosen
optimally without ever using/assuming a parametric model for the sample_vec or
any "rules of thumb". Unlike many other procedures, this one is immune to
accuracy failures in the estimation of multimodal densities with widely separated
modes.
Args:
sample_mx:
A 2D matrix of sample_vec from which the density estimate is
constructed, the matrix must have two columns that represent the two
coordinates (x,y) of the 2D sample_vec.
n_row_mx:
Number of points along each dimension (same for columns) where the
estimate of the density will be returned, i.e. total number of points is
``n_row_x_mx**2``.
xy_min:
The lower x and y boundaries of the interval over which the | |
# -*- coding: utf-8 -*-
"""Product views."""
from collections import defaultdict
from flask import Blueprint, render_template, request, jsonify, make_response, redirect
from flask_login import login_required, current_user
from pid.common.models import Project, HardwareType, Company, Disposition, Approver
from pid.database import get_record_by_id_and_class
from pid.vendorpart.models import VendorPart
from pid.mail import send_email
from pid.part.models import Part
from pid.user.models import User
from .forms import CreateBuildForm, AddExtraProductComponentForm
from .models import Build, Product, ProductComponent, Discrepancy, ExtraProductComponent
from pid.vendorproduct.models import VendorProduct, VendorBuild
blueprint = Blueprint('product', __name__, url_prefix='/product', static_folder='../static')
# ======= BUILD VIEWS ======= #
@blueprint.route('/build/create', methods=['GET', 'POST'])
@login_required
def create_build():
"""Create new Build. Also creates one or more Products."""
form = CreateBuildForm(request.form)
if request.method == 'GET':
part = Part.get_by_id(request.args.get('part_id'))
existing_build_id = request.args.get('existing_build_id')
if existing_build_id:
existing_build = Build.get_by_id(existing_build_id)
build_identifier = existing_build.build_identifier
else:
build_identifier = Build.get_next_build_identifier_for_design_number_and_part_identifier(part.design.design_number, part.part_identifier)
lot_identifier = Product.get_next_lot_number_for_design_number_and_part_identifier(part.design.design_number, part.part_identifier)
existing_serial_numbers = ','.join(Product.get_serial_numbers_for_design_number_and_part_identifier(part.design.design_number, part.part_identifier))
# Pre-populate with values from design
form.project.data = part.design.project
variables = {
'form': form,
'part': part,
'build_identifier': build_identifier,
'lot_identifier': lot_identifier,
'existing_serial_numbers': existing_serial_numbers,
'existing_build_id': existing_build_id
}
return render_template('product/create_build_modal.html', **variables)
validated, data = form.validate_on_submit()
if validated:
part = Part.get_by_id(form.part_id.data)
vendor = form.vendor.data
owner = form.owner.data
build_identifier = form.build_identifier.data
# Create build
if form.existing_build_id.data:
build = Build.get_by_id(form.existing_build_id.data)
else:
build = Build.create(part=part, vendor=vendor, owner=owner, build_identifier=build_identifier)
# For each s/n in s/n, create product
revision = part.design.revision
summary = part.design.summary
hardware_type = form.hardware_type.data
project = form.project.data
# Create serial numbers
serial_numbers = data.get('serial_numbers', [])
for sn in serial_numbers:
product = Product.create(serial_number=sn, part=part, build=build, revision=revision, summary=summary,
hardware_type=hardware_type, project=project, owner=owner)
# Create Product Components
for part_component in part.components:
for i in range(part_component.quantity):
if part_component.part:
ProductComponent.create(parent=product, part=part_component.part, ordering=part_component.ordering)
elif part_component.vendor_part:
ProductComponent.create(parent=product, vendor_part=part_component.vendor_part, ordering=part_component.ordering)
# Or create LOT product
lot_record = data.get('lot_record', None)
if lot_record:
product = Product.create(serial_number=lot_record, part=part, build=build, revision=revision,
summary=summary, hardware_type=hardware_type, product_type='LOT',
project=project, owner=owner)
# Create Product Components
for part_component in part.components:
for i in range(part_component.quantity):
if part_component.part:
ProductComponent.create(parent=product, part=part_component.part, ordering=part_component.ordering)
elif part_component.vendor_part:
ProductComponent.create(parent=product, vendor_part=part_component.vendor_part, ordering=part_component.ordering)
# Or create STOCK product
is_stock = data.get('is_stock', False)
if is_stock:
product = Product.create(serial_number='STCK', part=part, build=build, revision=revision,
summary=summary, hardware_type=hardware_type, product_type='STOCK',
project=project, owner=owner)
jsonData = {
'success': True,
'product_number': product.product_number.replace(' ', '-') # Slugify product_number
}
return jsonify(jsonData), 200, {'ContentType': 'application/json'}
else:
part = Part.get_by_id(request.form['part_id'])
build_identifier = Build.get_next_build_identifier_for_design_number_and_part_identifier(part.design.design_number, part.part_identifier)
lot_identifier = Product.get_next_lot_number_for_design_number_and_part_identifier(part.design.design_number, part.part_identifier)
existing_serial_numbers = ','.join(Product.get_serial_numbers_for_design_number_and_part_identifier(part.design.design_number, part.part_identifier))
variables = {
'form': form,
'part': part,
'build_identifier': build_identifier,
'lot_identifier': lot_identifier,
'existing_serial_numbers': existing_serial_numbers
}
response = make_response(render_template('product/create_build_modal.html', **variables), 500)
return response
@blueprint.route('/build/update', methods=['POST'])
@login_required
def update_build():
id = request.form['pk']
# UID for field will be ala [fieldname]-[classname]-[id]-editable, field name will be first section always
field = request.form['name'].split('-')[0]
field_value = request.form['value']
# TODO: Check if build exists
build = Build.get_by_id(id)
original_value = None
if field == 'notes':
original_value = build.notes
build.update(notes=field_value)
elif field == 'owner':
if build.owner:
original_value = build.owner.get_name()
owner = User.get_by_id(field_value)
build.update(owner=owner)
field_value = owner.get_name() if owner else None
elif field == 'purchase_order':
original_value = build.purchase_order
build.update(purchase_order=field_value)
elif field == 'vendor':
if build.vendor:
original_value = build.vendor.name
vendor = Company.get_by_id(field_value)
build.update(vendor=vendor)
field_value = vendor.name if vendor else None
build.add_change_log_entry(action='Edit', field=field.title().replace('_', ' '),
original_value=original_value, new_value=field_value)
return jsonify({'success': True}), 200, {'ContentType': 'application/json'}
# ======= PRODUCT VIEWS ======= #
@blueprint.route('/add_extra_product_component/', methods=['POST'])
@login_required
def add_extra_product_component():
form = AddExtraProductComponentForm(request.form)
validated = form.validate_on_submit()
if validated:
quantity = form.quantity.data
part_group = form.part_group.data
part_id = form.part_id.data
product_id = form.product_id.data
product = Product.get_by_id(product_id)
ordering = len(product.extra_components) + 1
if part_group == 'Part': # part_group from typeahead group, not class name
part = Part.get_by_id(part_id)
for i in range(quantity):
epc = ExtraProductComponent.create(parent=product, part=part, ordering=ordering)
elif part_group == 'Vendor Part': # part_group from typeahead group, not class name
vendor_part = VendorPart.get_by_id(part_id)
for i in range(quantity):
epc = ExtraProductComponent.create(parent=product, vendor_part=vendor_part, ordering=ordering)
components_array = arrange_product_components(product)
extra_components_array = arrange_extra_product_components(product)
variables = {
'success': True,
'product': product,
'components_array': components_array,
'extra_components_array': extra_components_array
}
new_value = '{0} - Quantity: {1}'.format(epc.get_part().get_descriptive_url(), quantity)
product.add_change_log_entry(action='Add', field='Extra Component', new_value=new_value)
return render_template('product/as-built/component_list.html', **variables)
else:
product_id = form.product_id.data
product = Product.get_by_id(product_id)
part = None
if form.part_id.data:
part_group = form.part_group.data
part_id = form.part_id.data
if part_group == 'Part': # part_group from typeahead, not class name
part = Part.get_by_id(part_id)
elif part_group == 'Vendor Part': # part_group from typeahead, not class name
part = VendorPart.get_by_id(part_id)
variables = {
'form': form,
'product': product,
'part': part
}
response = make_response(render_template('product/add_product_component_modal.html', **variables), 500)
return response
@blueprint.route('/delete_product_component', methods=['POST'])
@login_required
def delete_product_component():
id = request.form['pk']
amount = request.form['amount']
component = ExtraProductComponent.get_by_id(id) # Only ExtraProductComponents can be deleted
old_value = '{0}'.format(component.get_part().get_descriptive_url())
parent = component.parent
if amount == 'all_unassigned':
components = component.get_all_unassigned_extra_product_components_like_this()
for c in components:
# TODO: Find a why to do this all in one call
c.delete()
elif amount == 'all_assigned':
components = component.get_all_assigned_extra_product_components_like_this()
for c in components:
# TODO: Find a why to do this all in one call
c.delete()
elif amount == 'single':
component.delete()
parent.add_change_log_entry(action='Remove', field='Extra Component', original_value=old_value)
return jsonify({'success': True}), 200, {'ContentType': 'application/json'}
@blueprint.route('/get_add_product_component_modal', methods=['POST'])
@login_required
def get_add_product_component_modal():
product_id = request.form.get('product_id')
product = Product.get_by_id(product_id)
form = AddExtraProductComponentForm(request.form)
form.product_id.data = product.id
variables = {
'product': product,
'form': form
}
return render_template('product/add_product_component_modal.html', **variables)
@blueprint.route('/get_update_product_revision_modal', methods=['POST'])
@login_required
def get_update_product_revision_modal():
product_id = request.form.get('product_id')
product = Product.get_by_id(product_id)
designs = product.part.design.find_all_revisions()
revisions = [d.revision for d in designs]
revisions.remove(product.revision)
variables = {
'product': product,
'revisions': revisions
}
return render_template('product/update_product_revision_modal.html', **variables)
@blueprint.route('/revise', methods=['POST'])
@login_required
def revise_product():
product_id = request.form['product_id']
new_revision = request.form['revision']
product = Product.get_by_id(product_id)
parts = product.part.get_parts_for_design_revisions() # With same identifier
parts.remove(product.part) # Remove old part from this list
for part in parts: # Then remove parts not belonging to new revision
if part.design.revision != new_revision:
parts.remove(part)
# Should be left with only one part ideally
new_part = parts[0]
# Convert old and new components to dicts for easiness
old_part_components = defaultdict(list)
old_vendor_components = defaultdict(list)
old_part_components_tracker = set()
old_vendor_components_tracker = set()
for component in product.components:
if component.part:
old_part_components[component.part.part_number].append(component)
old_part_components_tracker.add(component.part.part_number)
else:
old_vendor_components[component.vendor_part.part_number].append(component)
old_vendor_components_tracker.add(component.vendor_part.part_number)
new_part_components = defaultdict(list)
new_vendor_components = defaultdict(list)
for component in new_part.components:
for i in range(component.quantity):
if component.part:
new_part_components[component.part.part_number].append(component)
else:
new_vendor_components[component.vendor_part.part_number].append(component)
# Go through new components and add or subtract if needed
for pn, ncs in new_part_components.items():
ocs = old_part_components.get(pn, None)
if ocs:
# Component existed in old revision as well
if len(ncs) > len(ocs):
# More components in new rev than in old rev, make new ones
for i in range(len(ncs) - len(ocs)):
ProductComponent.create(parent=product, part=ncs[0].part, ordering=ncs[0].ordering)
# Then update the old ones to new part
for c in ocs:
c.update(part=ncs[0].part)
if len(ncs) < len(ocs):
# Fewer components in new rev than in old rev, delete all the old ones and make new ones
for c in ocs:
c.delete()
for i in range(len(ncs)):
ProductComponent.create(parent=product, part=ncs[0].part, ordering=ncs[0].ordering)
else:
# New component that has did not exist in old
for i in range(len(ncs)):
ProductComponent.create(parent=product, part=ncs[0].part, ordering=ncs[0].ordering)
old_part_components_tracker.discard(pn)
# Do the same for vendor parts
for pn, ncs in new_vendor_components.items():
ocs = old_vendor_components.get(pn, None)
if ocs:
# Component existed in old revision as well
if len(ncs) > len(ocs):
# More components in new rev than in old rev, make new ones
for i in range(len(ncs) - len(ocs)):
ProductComponent.create(parent=product, vendor_part=ncs[0].vendor_part, ordering=ncs[0].ordering)
# Then update the old ones to new part
for c in ocs:
c.update(vendor_part=ncs[0].vendor_part)
if len(ncs) < len(ocs):
# Fewer components in new rev than in old rev, delete all the old ones and make new ones
for c in ocs:
c.delete()
for i in range(len(ncs)):
ProductComponent.create(parent=product, vendor_part=ncs[0].vendor_part, ordering=ncs[0].ordering)
else:
# New component that has did not exist in old
for i in range(len(ncs)):
ProductComponent.create(parent=product, vendor_part=ncs[0].vendor_part, ordering=ncs[0].ordering)
old_vendor_components_tracker.discard(pn)
# The remaning components have been removed in the new revision, delete them
for pn in old_part_components_tracker:
ocs = old_part_components.get(pn, None)
for c in ocs:
c.delete()
# Do the same for vendor parts
for pn in old_vendor_components_tracker:
ocs = old_vendor_components.get(pn, None)
for c in ocs:
c.delete()
product.add_change_log_entry(action='Revise', field='Revision',
original_value=product.revision, new_value=new_revision)
product.update(revision=new_revision, part=new_part, state=product.workflow.initial_state)
return jsonify({'success': True, 'url': product.get_url()}), 200, {'ContentType': 'application/json'}
@blueprint.route('/typeahead_search', methods=['GET'])
@login_required
def typeahead_search():
query = request.args.get('query')
products = Product.typeahead_search(query)
results = []
for product in products:
results_dict = {}
results_dict['class'] = product.get_class_name()
results_dict['icon'] = '<i class="pri-typeahead-icon pri-icons-record-product" aria-hidden="true"></i>'
results_dict['id'] = product.id
results_dict['name'] = product.get_name()
results_dict['number'] = product.get_unique_identifier()
results_dict['object_type'] = 'Product'
results_dict['state'] = product.state
results_dict['thumb_url'] = product.get_thumbnail_url()
results_dict['url'] = product.get_url()
results.append(results_dict)
return jsonify({'success': True, 'data': results}), 200, {'ContentType': 'application/json'}
@blueprint.route('/update', methods=['POST'])
@login_required
def update_product():
id = request.form['pk']
# UID for field will be ala [fieldname]-[classname]-[id]-editable, field name will be first section always
field = request.form['name'].split('-')[0]
| |
recursionFnc_(typepathString, i)
return dic
# **************************************************************************************************************** #
@classmethod
def _grh_getNodePortdata_(cls, nodepathString, portkeyString, asString):
exclude_datatype_list = [
'mesh',
'attributeAlias',
]
typepathString = cls.MOD_maya_cmds.nodeType(nodepathString)
if cls._dcc_getNodPortIsAppExist(nodepathString, portkeyString) is True:
if cls._dcc_getNodPortIsMessage(typepathString, portkeyString):
if cls._dcc_getNodPortSourceExist(nodepathString, portkeyString):
return bscMethods.MaAttrpath.nodepathString(
cls._dcc_getNodPortSourceStr(nodepathString, portkeyString)
)
return ''
elif cls._dcc_getNodPortIsEnumerate(typepathString, portkeyString):
return cls.MOD_maya_cmds.getAttr(
cls._dcc_toAttributeString(nodepathString, portkeyString),
asString=asString,
silent=1
)
else:
datatype = cls._dcc_getNodPorttype(typepathString, portkeyString)
if not datatype in exclude_datatype_list:
return cls.MOD_maya_cmds.getAttr(
cls._dcc_toAttributeString(nodepathString, portkeyString), silent=1
)
@classmethod
def _grh_getNodPortraw(cls, nodepathString, portkeyString, asString=True):
def getArrayPortdataFnc_(nodepathString_, portStrings_):
return [
cls._grh_getNodePortdata_(
nodepathString_,
_l,
asString=False
)
for _l in portStrings_
]
def getArrayFnc_(nodepathString_, formatStrings_):
_lis = []
for _format in formatStrings_:
_formatString, _indexArray = _format
_portStringList = []
for _index in _indexArray:
s = _formatString.format(*_index)
_portStringList.append(s)
_lis.append(_portStringList)
_lis_ = []
for _j in zip(*_lis):
for _k in _j:
_lis_.append(
_k
)
return getArrayPortdataFnc_(nodepathString_, _lis_)
def getMultiBranchFnc_(nodepathString_, portkeyString_):
_lis = []
_isArrayEnable = False
_portkeyStringList = cls._grh_getNodePortSearchPortkeyStrings(nodepathString_, portkeyString_)
for _i in _portkeyStringList:
_format = cls._grh_getNodePortFormat(nodepathString_, _i)
if _format is not None:
_isArrayEnable = True
_lis.append(_format)
else:
_lis.append(_i)
if _isArrayEnable is True:
return getArrayFnc_(nodepathString_, _lis)
else:
return getArrayPortdataFnc_(nodepathString_, _lis)
typepathString = cls.MOD_maya_cmds.nodeType(nodepathString)
if cls._dcc_getNodPortHasChildren(typepathString, portkeyString) is True:
return getMultiBranchFnc_(nodepathString, portkeyString)
else:
format_ = cls._grh_getNodePortFormat(nodepathString, portkeyString)
if format_ is not None:
formatString, indexArray = format_
return getArrayFnc_(nodepathString, [(formatString, indexArray)])
else:
return cls._grh_getNodePortdata_(
nodepathString,
portkeyString,
asString=asString
)
@classmethod
def _grh_getNodePortIndexArray(cls, nodepathString, portkeyString):
def recursionFnc_(categoryString_, nodepathString_, portString_):
if cls._dcc_getNodPortIsAppExist(nodepathString_, portString_):
key = cls._grh_getNodePortkey_(portString_)
_indexes = cls._dcc_getNodPortIndexes(nodepathString_, portString_)
_children = cls._dcc_getNodPortChildren(categoryString_, portString_)
_hasIndex = _indexes != []
_hasChild = _children != []
_check = (_hasIndex, _hasChild)
if _indexes:
if key in dic:
__indexes = dic[key]
if __indexes != _indexes:
for _i in _indexes:
if not _i in __indexes:
__indexes.append(_i)
dic[key] = __indexes
else:
dic[key] = _indexes
if _check == (True, True):
for index_ in _indexes:
for childPortname in _children:
_fullpathPortnameString = u'{}[{}].{}'.format(portString_, index_, childPortname)
recursionFnc_(categoryString_, nodepathString_, _fullpathPortnameString)
elif _check == (True, False):
for index_ in _indexes:
_fullpathPortnameString = u'{}[{}]'.format(portString_, index_)
recursionFnc_(categoryString_, nodepathString_, _fullpathPortnameString)
elif _check == (False, True):
for childPortname in _children:
_fullpathPortnameString = u'{}.{}'.format(portString_, childPortname)
recursionFnc_(categoryString_, nodepathString_, _fullpathPortnameString)
dic = {}
typepathString = cls.MOD_maya_cmds.nodeType(nodepathString)
portPathdata = cls._grh_getNodePortPathdata_(portkeyString)
for i in portPathdata:
recursionFnc_(typepathString, nodepathString, i)
return dic
@classmethod
def _grh_getNodePortFormat(cls, nodepathString, portkeyString):
indexArrayDict = cls._grh_getNodePortIndexArray(nodepathString, portkeyString)
portPathdata = cls._grh_getNodePortPathdata_(portkeyString)
_portString = ''
_lis = []
for seq, i in enumerate(portPathdata):
_portname = i.split(cls.DEF_mya_node_port_pathsep)[-1]
if i in indexArrayDict:
indexes = indexArrayDict[i]
_indexString = u'[{{{}}}]'.format(len(_lis))
if seq > 0:
_portString += (cls.DEF_mya_node_port_pathsep + _portname + _indexString)
else:
_portString += (_portname + _indexString)
_lis.append(indexes)
else:
if seq > 0:
_portString += (cls.DEF_mya_node_port_pathsep + _portname)
else:
_portString += _portname
if _lis:
return _portString, bscMethods.NestedArray.mapTo(_lis)
class Mtd_MaUtility(Mtd_MaBasic):
@classmethod
def _dcc_getNodFullpathNodepathStr(cls, nodepathString):
if not nodepathString.startswith(cls.DEF_mya_node_pathsep):
return cls.MOD_maya_cmds.ls(nodepathString, long=1)[0]
else:
return nodepathString
@classmethod
def _toAppExistStringList(cls, nodepathString, fullPath=True):
lis = []
if isinstance(nodepathString, (str, unicode)):
if cls._isAppExist(nodepathString):
if fullPath is True:
lis = [cls._dcc_getNodFullpathNodepathStr(nodepathString)]
else:
lis = [bscMethods.MaNodeString.nodenameWithNamespace(nodepathString)]
elif isinstance(nodepathString, (tuple, list)):
for i in nodepathString:
if cls._isAppExist(i):
if fullPath is True:
lis.append(cls._dcc_getNodFullpathNodepathStr(i))
else:
lis.append(bscMethods.MaNodeString.nodenameWithNamespace(i))
return lis
@staticmethod
def _setListCleanup(lis):
lis_ = list(set(lis))
lis_.sort(key=lis.index)
return lis_
@classmethod
def _isAppExist(cls, nodepathString):
if nodepathString is not None:
return cls.MOD_maya_cmds.objExists(nodepathString)
return False
class Mtd_MaAttribute(Mtd_MaBasic):
DEF_mya_porttype_dict = {
'bool': 'boolean',
'long': 'integer',
'short': 'integer',
'byte': 'integer',
'float': 'float',
'double': 'float',
'char': 'string',
}
@classmethod
def _getAttributeQueryNameString(cls, attrpathString):
_ = attrpathString.split(cls.DEF_mya_node_port_pathsep)[-1]
if _.endswith(u']'):
return _.split(u'[')[0]
return _
@classmethod
def _toAttributePortsepSplit(cls, attrpathString):
_ = attrpathString.split(cls.DEF_mya_node_port_pathsep)
return _[0], cls.DEF_mya_node_port_pathsep.join(_[1:])
@classmethod
def _getAttributeType(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
attributeType=1
)
@classmethod
def _getAttributeData(cls, attrpathString):
if cls._getAttributeIsAppExist(attrpathString) is True:
if cls._getAttributeIsMessage(attrpathString):
if cls._getAttributeHasSource(attrpathString):
return bscMethods.MaAttrpath.nodepathString(cls._getAttributeSource(attrpathString))
else:
return cls.MOD_maya_cmds.getAttr(attrpathString, silent=1)
@classmethod
def _getAttributePorttype(cls, attrpathString):
if cls._getAttributeIsEnum(attrpathString):
return 'string'
elif cls._getAttributeIsColor(attrpathString):
return 'color'
elif cls._getAttributeIsFilename(attrpathString):
return 'filename'
_ = cls._getAttributeType(attrpathString)
if _ in cls.DEF_mya_porttype_dict:
return cls.DEF_mya_porttype_dict[_]
return _
@classmethod
def _getAttributePortdata(cls, attrpathString, asString):
exclude_datatype_list = [
'mesh',
'attributeAlias',
'TdataCompound',
]
exclude_porttype_list = [
'polyFaces'
]
if cls._getAttributeIsAppExist(attrpathString) is True:
if cls._getAttributeIsMessage(attrpathString):
if cls._getAttributeHasSource(attrpathString):
return bscMethods.MaAttrpath.nodepathString(cls._getAttributeSource(attrpathString))
return ''
else:
porttype = cls._getAttributePorttype(attrpathString)
if porttype not in exclude_porttype_list:
datatype = cls._getAttributeDatatype(attrpathString)
attrpathString = cls._getAttributeString_(attrpathString)
if datatype == 'enum':
return cls.MOD_maya_cmds.getAttr(attrpathString, asString=asString, silent=1)
elif datatype not in exclude_datatype_list:
_ = cls.MOD_maya_cmds.getAttr(attrpathString, silent=1)
if datatype in cls.DEF_mya_datatype_compchannel_list:
return list(_[0])
return _
@classmethod
def _getAttributeIsAppExist(cls, attrpathString):
attrpathString = cls._getAttributeString_(attrpathString)
return cls.MOD_maya_cmds.objExists(attrpathString)
@classmethod
def _getAttributeIsNodeExist(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
exists=1
)
@classmethod
def _getAttributeIsCompound(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
usesMultiBuilder=1
)
@classmethod
def _getAttributeIsMultichannel(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
multi=1
)
@classmethod
def _getAttributeIndexes(cls, attrpathString):
"""
:param attrpathString: etc. aiRampFloat1.ramp
:return:
"""
attrpathString = cls._getAttributeString_(attrpathString)
return cls.MOD_maya_cmds.getAttr(attrpathString, multiIndices=1, silent=1) or []
@classmethod
def _getAttributeIsMessage(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
message=1
)
@classmethod
def _getAttributeIsColor(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
usedAsColor=1
)
@classmethod
def _getAttributeIsFilename(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
usedAsFilename=1
)
@classmethod
def _getAttributeIsEnum(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
enum=1
)
@classmethod
def _getAttributeNicename(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
niceName=1
)
@classmethod
def _getAttributeHasParent(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
listParent=1
) is not None
@classmethod
def _getAttributeParentPortname(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
_ = cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
listParent=1
)
if _:
return _[0]
@classmethod
def _getAttributeHasChild(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
numberOfChildren=1
) > 0
@classmethod
def _getAttributeChildPortnameList(cls, attrpathString):
nodepathString, portpathString = cls._getAttributeQueryString_(attrpathString)
return cls.MOD_maya_cmds.attributeQuery(
cls._getAttributeQueryNameString(portpathString),
node=nodepathString,
listChildren=1
) or []
@classmethod
def _getAttributeQueryString_(cls, attrpathString):
if isinstance(attrpathString, (tuple, list)):
nodepathString, portpathString = attrpathString
else:
nodepathString, portpathString = cls._toAttributePortsepSplit(attrpathString)
return nodepathString, portpathString
@classmethod
def _getAttributeString_(cls, attrpathString):
if isinstance(attrpathString, (tuple, list)):
attrpathString = cls.DEF_mya_node_port_pathsep.join(list(attrpathString))
return attrpathString
@classmethod
def _getAttributeHasChannels(cls, attrpathString):
return cls._getAttributePorttype(attrpathString) in cls.DEF_mya_datatype_compchannel_list
@classmethod
def _getAttributeChannelnameList(cls, attrpathString):
if cls._getAttributeHasChannels(attrpathString):
return cls._getAttributeChildPortnameList(attrpathString)
return []
@classmethod
def _getAttributeDefaultData(cls, attrpathString):
pass
@classmethod
def _getAttributeDatatype(cls, attrpathString):
attrpathString = cls._getAttributeString_(attrpathString)
return cls.MOD_maya_cmds.getAttr(attrpathString, type=1, silent=1)
@classmethod
def _getAttributeHasSource(cls, attrpathString):
attrpathString = cls._getAttributeString_(attrpathString)
return cls.MOD_maya_cmds.connectionInfo(attrpathString, isExactDestination=1)
@classmethod
def _getAttributeIsSource(cls, attrpathString):
attrpathString = cls._getAttributeString_(attrpathString)
return cls.MOD_maya_cmds.connectionInfo(attrpathString, isExactSource=1)
@classmethod
def _getAttributeSource(cls, attrpathString):
attrpathString = cls._getAttributeString_(attrpathString)
return cls.MOD_maya_cmds.connectionInfo(attrpathString, sourceFromDestination=1)
@classmethod
def _getAttributeHasTargets(cls, attrpathString):
return cls.MOD_maya_cmds.connectionInfo(attrpathString, isExactSource=1)
@classmethod
def _getAttributeIsTarget(cls, attrpathString):
return cls.MOD_maya_cmds.connectionInfo(attrpathString, isExactDestination=1)
@classmethod
def _getAttributeTargetList(cls, attrpathString):
attrpathString = cls._getAttributeString_(attrpathString)
return cls.MOD_maya_cmds.connectionInfo(attrpathString, destinationFromSource=1) or []
@classmethod
def _getAttributeNodeString(cls, attrpathString):
return bscMethods.MaAttrpath.nodepathString(attrpathString)
@classmethod
def _getAttributeFullpathPortname(cls, attrpathString):
return bscMethods.MaAttrpath.portpathString(attrpathString)
@classmethod
def _getAttributePortname(cls, attrpathString):
return bscMethods.MaAttrpath.name(attrpathString)
class Mtd_MaFile(Mtd_MaBasic):
DEF_mya_type_maya_ascii = 'mayaAscii'
DEF_mya_type_maya_binary = 'mayaBinary'
AlembicType = 'Alembic'
#
FileTypeDic = {
'.ma': DEF_mya_type_maya_ascii,
'.mb': DEF_mya_type_maya_binary,
'.abc': AlembicType
}
#
MaFileExportAllOption = 'exportAll'
MaFileExportSelectedOption = 'exportSelected'
#
MaFileConstructionHistoryOption = 'constructionHistory'
MaFileShaderOption = 'shader'
#
MaFileExportSelectedOptions = [
MaFileConstructionHistoryOption,
MaFileShaderOption
]
VAR_file_export_kwarg_dic = dict(
type='mayaAscii',
options='v=0',
force=True,
defaultExtensions=True,
exportAll=True,
preserveReferences=False,
)
VAR_file_import_kwarg_dic = dict(
options='v=0;',
type='mayaAscii',
i=True,
renameAll=True,
mergeNamespacesOnClash=True,
namespace=':',
preserveReferences=True
)
@classmethod
def _getMaFileType(cls, fileString):
ext = bscMethods.OsFile.ext(fileString)
return cls.FileTypeDic.get(ext, cls.DEF_mya_type_maya_ascii)
@classmethod
def _maFileExportCommand(cls, fileString, optionKwargs=None):
if optionKwargs is None:
optionKwargs = cls.VAR_file_export_kwarg_dic.copy()
#
optionKwargs['type'] = cls._getMaFileType(fileString)
#
cls.MOD_maya_cmds.file(fileString, **optionKwargs)
@classmethod
def _maFileImportCommand(cls, fileString, optionKwargs=None):
if optionKwargs is None:
optionKwargs = cls.VAR_file_import_kwarg_dic.copy()
#
optionKwargs['type'] = cls._getMaFileType(fileString)
#
cls.MOD_maya_cmds.file(
fileString,
**optionKwargs
)
@classmethod
def _setMaFileImport(cls, fileString, namespace=':'):
optionKwargs = cls.VAR_file_import_kwarg_dic.copy()
#
optionKwargs['type'] = cls._getMaFileType(fileString)
optionKwargs['namespace'] = namespace
#
cls.MOD_maya_cmds.file(
fileString,
**optionKwargs
)
@classmethod
def _setMaFileImportWithGroup(cls, fileString, groupString, namespace=':'):
cls.MOD_maya_cmds.file(
fileString,
i=1,
options='v=0;',
type=cls._getMaFileType(fileString),
ra=1,
mergeNamespacesOnClash=1,
namespace=namespace,
preserveReferences=1,
groupReference=True,
groupString=groupString
)
@classmethod
def _setMaAlembicImport(cls, fileString, namespace=':'):
cls.MOD_maya_cmds.loadPlugin('AbcImport', quiet=1)
cls.MOD_maya_cmds.file(
fileString,
i=1,
options='v=0;',
type='Alembic',
ra=1,
mergeNamespacesOnClash=1,
namespace=namespace,
preserveReferences=1
)
#
if namespace:
alembicNodeString = namespace + ':' + bscMethods.OsFile.name(fileString) + '_AlembicNode'
else:
alembicNodeString = bscMethods.OsFile.name(fileString) + '_AlembicNode'
if cls.MOD_maya_cmds.objExists(alembicNodeString):
pass
else:
cls.MOD_maya_cmds.createNode(cls.DEF_mya_type_alembic, name=alembicNodeString)
cls.MOD_maya_cmds.setAttr(alembicNodeString + '.abc_File', fileString, type='string')
@classmethod
def _setMaMaterialExport(cls, fileString, shadingEngines, aiAovs):
cls.MOD_maya_cmds.select(clear=1)
if shadingEngines:
cls.MOD_maya_cmds.select(shadingEngines, noExpand=1)
if aiAovs:
cls.MOD_maya_cmds.select(aiAovs, add=1)
cls.MOD_maya_cmds.file(rename=fileString)
cls.MOD_maya_cmds.file(
force=1,
options='v=0',
type=cls._getMaFileType(fileString),
preserveReferences=0,
exportSelected=1
)
cls.MOD_maya_cmds.select(clear=1)
@classmethod
def _setMaFileExportSelected(cls, fileString, nodepathString, withHistory=False):
temporaryFile = bscMethods.OsFile.temporaryName(fileString)
cls.MOD_maya_cmds.select(nodepathString)
cls.MOD_maya_cmds.file(
temporaryFile,
force=1,
options='v=0',
type=cls._getMaFileType(fileString),
preserveReferences=0,
exportSelected=1,
constructionHistory=withHistory
)
cls.MOD_maya_cmds.select(clear=1)
bscMethods.OsFile.copyTo(temporaryFile, fileString)
@classmethod
def _setMaFileExportSelectedWithSet(cls, fileString, nodepathString, setString, withHistory=False):
cls.MOD_maya_cmds.select(clear=1)
cls.MOD_maya_cmds.select(nodepathString)
if isinstance(setString, str):
if | |
else self.token()
)
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
drs1 = self.process_next_expression(None)
self.assertNextToken(DrtTokens.COMMA)
drs2 = self.process_next_expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2)
elif tok == 'eq':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (
self.discourse_id if self.discourse_id is not None else self.token()
)
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = list(map(int, self.handle_refs()))
self.assertNextToken(DrtTokens.COMMA)
var1 = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
var2 = int(self.token())
self.assertNextToken(DrtTokens.CLOSE)
return BoxerEq(disc_id, sent_id, word_ids, var1, var2)
elif tok == 'card':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (
self.discourse_id if self.discourse_id is not None else self.token()
)
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = map(int, self.handle_refs())
self.assertNextToken(DrtTokens.COMMA)
var = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
value = self.token()
self.assertNextToken(DrtTokens.COMMA)
type = self.token()
self.assertNextToken(DrtTokens.CLOSE)
return BoxerCard(disc_id, sent_id, word_ids, var, value, type)
elif tok == 'whq':
self.assertNextToken(DrtTokens.OPEN)
disc_id = (
self.discourse_id if self.discourse_id is not None else self.token()
)
self.assertNextToken(DrtTokens.COMMA)
sent_id = self.nullableIntToken()
self.assertNextToken(DrtTokens.COMMA)
word_ids = list(map(int, self.handle_refs()))
self.assertNextToken(DrtTokens.COMMA)
ans_types = self.handle_refs()
self.assertNextToken(DrtTokens.COMMA)
drs1 = self.process_next_expression(None)
self.assertNextToken(DrtTokens.COMMA)
var = int(self.token())
self.assertNextToken(DrtTokens.COMMA)
drs2 = self.process_next_expression(None)
self.assertNextToken(DrtTokens.CLOSE)
return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2)
except Exception as e:
raise LogicalExpressionException(self._currentIndex, str(e))
assert False, repr(tok)
def nullableIntToken(self):
t = self.token()
return int(t) if t != 'None' else None
def get_next_token_variable(self, description):
try:
return self.token()
except ExpectedMoreTokensException as e:
raise ExpectedMoreTokensException(e.index, 'Variable expected.')
class AbstractBoxerDrs(object):
def variables(self):
"""
:return: (set<variables>, set<events>, set<propositions>)
"""
variables, events, propositions = self._variables()
return (variables - (events | propositions), events, propositions - events)
def variable_types(self):
vartypes = {}
for t, vars in zip(('z', 'e', 'p'), self.variables()):
for v in vars:
vartypes[v] = t
return vartypes
def _variables(self):
"""
:return: (set<variables>, set<events>, set<propositions>)
"""
return (set(), set(), set())
def atoms(self):
return set()
def clean(self):
return self
def _clean_name(self, name):
return name.replace('-', '_').replace("'", "_")
def renumber_sentences(self, f):
return self
def __hash__(self):
return hash("{0}".format(self))
@python_2_unicode_compatible
class BoxerDrs(AbstractBoxerDrs):
def __init__(self, refs, conds, consequent=None):
AbstractBoxerDrs.__init__(self)
self.refs = refs
self.conds = conds
self.consequent = consequent
def _variables(self):
variables = (set(), set(), set())
for cond in self.conds:
for s, v in zip(variables, cond._variables()):
s.update(v)
if self.consequent is not None:
for s, v in zip(variables, self.consequent._variables()):
s.update(v)
return variables
def atoms(self):
atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set())
if self.consequent is not None:
atoms.update(self.consequent.atoms())
return atoms
def clean(self):
consequent = self.consequent.clean() if self.consequent else None
return BoxerDrs(self.refs, [c.clean() for c in self.conds], consequent)
def renumber_sentences(self, f):
consequent = self.consequent.renumber_sentences(f) if self.consequent else None
return BoxerDrs(
self.refs, [c.renumber_sentences(f) for c in self.conds], consequent
)
def __repr__(self):
s = 'drs([%s], [%s])' % (
', '.join("%s" % r for r in self.refs),
', '.join("%s" % c for c in self.conds),
)
if self.consequent is not None:
s = 'imp(%s, %s)' % (s, self.consequent)
return s
def __eq__(self, other):
return (
self.__class__ == other.__class__
and self.refs == other.refs
and len(self.conds) == len(other.conds)
and reduce(
operator.and_, (c1 == c2 for c1, c2 in zip(self.conds, other.conds))
)
and self.consequent == other.consequent
)
def __ne__(self, other):
return not self == other
__hash__ = AbstractBoxerDrs.__hash__
@python_2_unicode_compatible
class BoxerNot(AbstractBoxerDrs):
def __init__(self, drs):
AbstractBoxerDrs.__init__(self)
self.drs = drs
def _variables(self):
return self.drs._variables()
def atoms(self):
return self.drs.atoms()
def clean(self):
return BoxerNot(self.drs.clean())
def renumber_sentences(self, f):
return BoxerNot(self.drs.renumber_sentences(f))
def __repr__(self):
return 'not(%s)' % (self.drs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.drs == other.drs
def __ne__(self, other):
return not self == other
__hash__ = AbstractBoxerDrs.__hash__
@python_2_unicode_compatible
class BoxerIndexed(AbstractBoxerDrs):
def __init__(self, discourse_id, sent_index, word_indices):
AbstractBoxerDrs.__init__(self)
self.discourse_id = discourse_id
self.sent_index = sent_index
self.word_indices = word_indices
def atoms(self):
return set([self])
def __eq__(self, other):
return (
self.__class__ == other.__class__
and self.discourse_id == other.discourse_id
and self.sent_index == other.sent_index
and self.word_indices == other.word_indices
and reduce(operator.and_, (s == o for s, o in zip(self, other)))
)
def __ne__(self, other):
return not self == other
__hash__ = AbstractBoxerDrs.__hash__
def __repr__(self):
s = '%s(%s, %s, [%s]' % (
self._pred(),
self.discourse_id,
self.sent_index,
', '.join("%s" % wi for wi in self.word_indices),
)
for v in self:
s += ', %s' % v
return s + ')'
class BoxerPred(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.name = name
self.pos = pos
self.sense = sense
def _variables(self):
return (set([self.var]), set(), set())
def change_var(self, var):
return BoxerPred(
self.discourse_id,
self.sent_index,
self.word_indices,
var,
self.name,
self.pos,
self.sense,
)
def clean(self):
return BoxerPred(
self.discourse_id,
self.sent_index,
self.word_indices,
self.var,
self._clean_name(self.name),
self.pos,
self.sense,
)
def renumber_sentences(self, f):
new_sent_index = f(self.sent_index)
return BoxerPred(
self.discourse_id,
new_sent_index,
self.word_indices,
self.var,
self.name,
self.pos,
self.sense,
)
def __iter__(self):
return iter((self.var, self.name, self.pos, self.sense))
def _pred(self):
return 'pred'
class BoxerNamed(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.name = name
self.type = type
self.sense = sense
def _variables(self):
return (set([self.var]), set(), set())
def change_var(self, var):
return BoxerNamed(
self.discourse_id,
self.sent_index,
self.word_indices,
var,
self.name,
self.type,
self.sense,
)
def clean(self):
return BoxerNamed(
self.discourse_id,
self.sent_index,
self.word_indices,
self.var,
self._clean_name(self.name),
self.type,
self.sense,
)
def renumber_sentences(self, f):
return BoxerNamed(
self.discourse_id,
f(self.sent_index),
self.word_indices,
self.var,
self.name,
self.type,
self.sense,
)
def __iter__(self):
return iter((self.var, self.name, self.type, self.sense))
def _pred(self):
return 'named'
class BoxerRel(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var1 = var1
self.var2 = var2
self.rel = rel
self.sense = sense
def _variables(self):
return (set([self.var1, self.var2]), set(), set())
def clean(self):
return BoxerRel(
self.discourse_id,
self.sent_index,
self.word_indices,
self.var1,
self.var2,
self._clean_name(self.rel),
self.sense,
)
def renumber_sentences(self, f):
return BoxerRel(
self.discourse_id,
f(self.sent_index),
self.word_indices,
self.var1,
self.var2,
self.rel,
self.sense,
)
def __iter__(self):
return iter((self.var1, self.var2, self.rel, self.sense))
def _pred(self):
return 'rel'
class BoxerProp(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, drs):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.drs = drs
def _variables(self):
return tuple(
map(operator.or_, (set(), set(), set([self.var])), self.drs._variables())
)
def referenced_labels(self):
return set([self.drs])
def atoms(self):
return self.drs.atoms()
def clean(self):
return BoxerProp(
self.discourse_id,
self.sent_index,
self.word_indices,
self.var,
self.drs.clean(),
)
def renumber_sentences(self, f):
return BoxerProp(
self.discourse_id,
f(self.sent_index),
self.word_indices,
self.var,
self.drs.renumber_sentences(f),
)
def __iter__(self):
return iter((self.var, self.drs))
def _pred(self):
return 'prop'
class BoxerEq(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var1, var2):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var1 = var1
self.var2 = var2
def _variables(self):
return (set([self.var1, self.var2]), set(), set())
def atoms(self):
return set()
def renumber_sentences(self, f):
return BoxerEq(
self.discourse_id,
f(self.sent_index),
self.word_indices,
self.var1,
self.var2,
)
def __iter__(self):
return iter((self.var1, self.var2))
def _pred(self):
return 'eq'
class BoxerCard(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, var, value, type):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.var = var
self.value = value
self.type = type
def _variables(self):
return (set([self.var]), set(), set())
def renumber_sentences(self, f):
return BoxerCard(
self.discourse_id,
f(self.sent_index),
self.word_indices,
self.var,
self.value,
self.type,
)
def __iter__(self):
return iter((self.var, self.value, self.type))
def _pred(self):
return 'card'
class BoxerOr(BoxerIndexed):
def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.drs1 = drs1
self.drs2 = drs2
def _variables(self):
return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables()))
def atoms(self):
return self.drs1.atoms() | self.drs2.atoms()
def clean(self):
return BoxerOr(
self.discourse_id,
self.sent_index,
self.word_indices,
self.drs1.clean(),
self.drs2.clean(),
)
def renumber_sentences(self, f):
return BoxerOr(
self.discourse_id,
f(self.sent_index),
self.word_indices,
self.drs1,
self.drs2,
)
def __iter__(self):
return iter((self.drs1, self.drs2))
def _pred(self):
return 'or'
class BoxerWhq(BoxerIndexed):
def __init__(
self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2
):
BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
self.ans_types = ans_types
self.drs1 = drs1
self.variable = variable
self.drs2 = drs2
def _variables(self):
return tuple(
map(
operator.or_,
(set([self.variable]), set(), set()),
self.drs1._variables(),
self.drs2._variables(),
)
)
def atoms(self):
return self.drs1.atoms() | self.drs2.atoms()
def clean(self):
return BoxerWhq(
self.discourse_id,
self.sent_index,
self.word_indices,
self.ans_types,
self.drs1.clean(),
self.variable,
self.drs2.clean(),
)
def renumber_sentences(self, f):
return BoxerWhq(
self.discourse_id,
f(self.sent_index),
self.word_indices,
self.ans_types,
self.drs1,
self.variable,
self.drs2,
)
def __iter__(self):
return iter(
('[' + ','.join(self.ans_types) + ']', self.drs1, self.variable, self.drs2)
)
def _pred(self):
return 'whq'
class PassthroughBoxerDrsInterpreter(object):
def interpret(self, ex):
return ex
class NltkDrtBoxerDrsInterpreter(object):
def __init__(self, occur_index=False):
self._occur_index = occur_index
def interpret(self, ex):
"""
:param ex: ``AbstractBoxerDrs``
:return: ``DrtExpression``
"""
if isinstance(ex, BoxerDrs):
drs = DRS(
[Variable(r) for r in ex.refs], list(map(self.interpret, ex.conds))
)
if ex.consequent is not None:
drs.consequent = self.interpret(ex.consequent)
return drs
elif isinstance(ex, BoxerNot):
return DrtNegatedExpression(self.interpret(ex.drs))
elif isinstance(ex, BoxerPred):
pred = self._add_occur_indexing('%s_%s' % (ex.pos, ex.name), ex)
return self._make_atom(pred, ex.var)
elif isinstance(ex, BoxerNamed):
pred = self._add_occur_indexing('ne_%s_%s' % (ex.type, ex.name), ex)
return self._make_atom(pred, ex.var)
elif isinstance(ex, BoxerRel):
pred = self._add_occur_indexing('%s' % (ex.rel), ex)
return self._make_atom(pred, ex.var1, ex.var2)
elif isinstance(ex, BoxerProp):
return DrtProposition(Variable(ex.var), self.interpret(ex.drs))
elif isinstance(ex, BoxerEq):
return DrtEqualityExpression(
DrtVariableExpression(Variable(ex.var1)),
DrtVariableExpression(Variable(ex.var2)),
)
elif isinstance(ex, BoxerCard):
pred | |
<reponame>osoco/better-ways-of-thinking-about-software<gh_stars>1-10
"""
This module contains celery task functions for handling the management of subtasks.
"""
import json
import logging
from contextlib import contextmanager
from datetime import datetime
from time import time
from uuid import uuid4
import psutil
from celery.states import READY_STATES, RETRY, SUCCESS
from django.core.cache import cache
from django.db import DatabaseError, transaction
from common.djangoapps.util.db import outer_atomic
from .exceptions import DuplicateTaskException
from .models import PROGRESS, QUEUING, InstructorTask
TASK_LOG = logging.getLogger('edx.celery.task')
# Lock expiration should be long enough to allow a subtask to complete.
SUBTASK_LOCK_EXPIRE = 60 * 10 # Lock expires in 10 minutes
# Number of times to retry if a subtask update encounters a lock on the InstructorTask.
# (These are recursive retries, so don't make this number too large.)
MAX_DATABASE_LOCK_RETRIES = 5
def _get_number_of_subtasks(total_num_items, items_per_task):
"""
Determines number of subtasks that would be generated by _generate_items_for_subtask.
This needs to be calculated before the query is executed so that the list of all subtasks can be
stored in the InstructorTask before any subtasks are started.
The number of subtask_id values returned by this should match the number of chunks returned
by the generate_items_for_subtask generator.
"""
num_subtasks, remainder = divmod(total_num_items, items_per_task)
if remainder:
num_subtasks += 1
return num_subtasks
@contextmanager
def track_memory_usage(metric, course_id): # lint-amnesty, pylint: disable=unused-argument
"""
Context manager to track how much memory (in bytes) a given process uses.
Metrics will look like: 'course_email.subtask_generation.memory.rss'
or 'course_email.subtask_generation.memory.vms'.
"""
memory_types = ['rss', 'vms']
process = psutil.Process()
baseline_memory_info = process.memory_info()
baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types]
yield
for memory_type, baseline_usage in zip(memory_types, baseline_usages):
total_memory_info = process.memory_info()
total_usage = getattr(total_memory_info, memory_type)
memory_used = total_usage - baseline_usage # lint-amnesty, pylint: disable=unused-variable
def _generate_items_for_subtask(
item_querysets, # lint-amnesty, # pylint: disable=bad-option-value
item_fields,
total_num_items,
items_per_task,
total_num_subtasks,
course_id,
):
"""
Generates a chunk of "items" that should be passed into a subtask.
Arguments:
`item_querysets` : a list of query sets, each of which defines the "items" that should be passed to subtasks.
`item_fields` : the fields that should be included in the dict that is returned.
These are in addition to the 'pk' field.
`total_num_items` : the result of summing the count of each queryset in `item_querysets`.
`items_per_query` : size of chunks to break the query operation into.
`items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask.
`course_id` : course_id of the course. Only needed for the track_memory_usage context manager.
Returns: yields a list of dicts, where each dict contains the fields in `item_fields`, plus the 'pk' field.
Warning: if the algorithm here changes, the _get_number_of_subtasks() method should similarly be changed.
"""
num_items_queued = 0
all_item_fields = list(item_fields)
all_item_fields.append('pk')
num_subtasks = 0
items_for_task = []
with track_memory_usage('course_email.subtask_generation.memory', course_id):
for queryset in item_querysets:
for item in queryset.values(*all_item_fields).iterator():
if len(items_for_task) == items_per_task and num_subtasks < total_num_subtasks - 1:
yield items_for_task
num_items_queued += items_per_task
items_for_task = []
num_subtasks += 1
items_for_task.append(item)
# yield remainder items for task, if any
if items_for_task:
yield items_for_task
num_items_queued += len(items_for_task)
# Note, depending on what kind of DB is used, it's possible for the queryset
# we iterate over to change in the course of the query. Therefore it's
# possible that there are more (or fewer) items queued than were initially
# calculated. It also means it's possible that the last task contains
# more items than items_per_task allows. We expect this to be a small enough
# number as to be negligible.
if num_items_queued != total_num_items:
TASK_LOG.info("Number of items generated by chunking %s not equal to original total %s", num_items_queued, total_num_items) # lint-amnesty, pylint: disable=line-too-long
class SubtaskStatus:
"""
Create and return a dict for tracking the status of a subtask.
SubtaskStatus values are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
Object is not JSON-serializable, so to_dict and from_dict methods are provided so that
it can be passed as a serializable argument to tasks (and be reconstituted within such tasks).
In future, we may want to include specific error information
indicating the reason for failure.
Also, we should count up "not attempted" separately from attempted/failed.
"""
def __init__(self, task_id, attempted=None, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None): # lint-amnesty, pylint: disable=line-too-long
"""Construct a SubtaskStatus object."""
self.task_id = task_id
if attempted is not None:
self.attempted = attempted
else:
self.attempted = succeeded + failed
self.succeeded = succeeded
self.failed = failed
self.skipped = skipped
self.retried_nomax = retried_nomax
self.retried_withmax = retried_withmax
self.state = state if state is not None else QUEUING
@classmethod
def from_dict(cls, d):
"""Construct a SubtaskStatus object from a dict representation."""
options = dict(d)
task_id = options['task_id']
del options['task_id']
return SubtaskStatus.create(task_id, **options)
@classmethod
def create(cls, task_id, **options):
"""Construct a SubtaskStatus object."""
return cls(task_id, **options)
def to_dict(self):
"""
Output a dict representation of a SubtaskStatus object.
Use for creating a JSON-serializable representation for use by tasks.
"""
return self.__dict__
def increment(self, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None):
"""
Update the result of a subtask with additional results.
Kwarg arguments are incremented to the existing values.
The exception is for `state`, which if specified is used to override the existing value.
"""
self.attempted += (succeeded + failed)
self.succeeded += succeeded
self.failed += failed
self.skipped += skipped
self.retried_nomax += retried_nomax
self.retried_withmax += retried_withmax
if state is not None:
self.state = state
def get_retry_count(self):
"""Returns the number of retries of any kind."""
return self.retried_nomax + self.retried_withmax
def __repr__(self):
"""Return print representation of a SubtaskStatus object."""
return f'SubtaskStatus<{self.to_dict()!r}>'
def __str__(self):
"""Return unicode version of a SubtaskStatus object representation."""
return str(repr(self))
def initialize_subtask_info(entry, action_name, total_num, subtask_id_list):
"""
Store initial subtask information to InstructorTask object.
The InstructorTask's "task_output" field is initialized. This is a JSON-serialized dict.
Counters for 'attempted', 'succeeded', 'failed', 'skipped' keys are initialized to zero,
as is the 'duration_ms' value. A 'start_time' is stored for later duration calculations,
and the total number of "things to do" is set, so the user can be told how much needs to be
done overall. The `action_name` is also stored, to help with constructing more readable
task_progress messages.
The InstructorTask's "subtasks" field is also initialized. This is also a JSON-serialized dict.
Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
subtasks. 'Total' is set here to the total number, while the other three are initialized to zero.
Once the counters for 'succeeded' and 'failed' match the 'total', the subtasks are done and
the InstructorTask's "status" will be changed to SUCCESS.
The "subtasks" field also contains a 'status' key, that contains a dict that stores status
information for each subtask. The value for each subtask (keyed by its task_id)
is its subtask status, as defined by SubtaskStatus.to_dict().
This information needs to be set up in the InstructorTask before any of the subtasks start
running. If not, there is a chance that the subtasks could complete before the parent task
is done creating subtasks. Doing so also simplifies the save() here, as it avoids the need
for locking.
Monitoring code should assume that if an InstructorTask has subtask information, that it should
rely on the status stored in the InstructorTask object, rather than status stored in the
corresponding AsyncResult.
"""
task_progress = {
'action_name': action_name,
'attempted': 0,
'failed': 0,
'skipped': 0,
'succeeded': 0,
'total': total_num,
'duration_ms': int(0),
'start_time': time()
}
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = PROGRESS
# Write out the subtasks information.
num_subtasks = len(subtask_id_list)
# Note that may not be necessary to store initial value with all those zeroes!
# Write out as a dict, so it will go more smoothly | |
<reponame>sjforeman/cora<filename>cora/scripts/makesky.py
"""Command line script for making sky maps.
"""
# === Start Python 2/3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import * # noqa pylint: disable=W0401, W0614
from future.builtins.disabled import * # noqa pylint: disable=W0401, W0614
# === End Python 2/3 compatibility
import os
import click
import numpy as np
class ListOfType(click.ParamType):
"""Click option type that accepts a list of objects of a given type.
Must be a type accepted by a Python literal eval.
Parameters
----------
name : str
Name of click type.
type_ : type object
Type to accept.
"""
def __init__(self, name, type_):
self.name = name
self.type = type_
def convert(self, value, param, ctx):
import ast
try:
l = ast.literal_eval(value)
except (SyntaxError, ValueError):
self.fail('Could not parse "%s" into list.' % value)
if not isinstance(l, list):
self.fail('Could not parse "%s" into list.' % value)
if not all([isinstance(x, self.type) for x in l]):
self.fail('Not all values were of type "%s"' % repr(self.type))
return l
class FreqState(object):
"""Process and store the frequency spec from the command line."""
def __init__(self):
# Set the CHIME band as the internal default
self.freq = (800.0, 400.0, 1025)
self.channel_range = None
self.channel_list = None
self.channel_bin = 1
self.freq_mode = "centre"
@property
def frequencies(self):
"""The frequency centres in MHz."""
return self._calculate()[0]
@property
def freq_width(self):
"""The frequency width in MHz."""
return self._calculate()[1]
def _calculate(self):
"""Calculate the frequencies from the parameters."""
# Generate the set of frequency channels given the parameters
sf, ef, nf = self.freq
if self.freq_mode == "centre":
df = abs(ef - sf) / nf
frequencies = np.linspace(sf, ef, nf, endpoint=False)
elif self.freq_mode == "centre_nyquist":
df = abs((ef - sf) / (nf - 1))
frequencies = np.linspace(sf, ef, nf, endpoint=True)
else:
df = (ef - sf) / nf
frequencies = sf + df * (np.arange(nf) + 0.5)
# Rebin frequencies if needed
if self.channel_bin > 1:
frequencies = frequencies.reshape(-1, self.channel_bin).mean(axis=1)
df = df * self.channel_bin
# Select a subset of channels if required
if self.channel_list is not None:
frequencies = frequencies[self.channel_list]
elif self.channel_range is not None:
frequencies = frequencies[self.channel_range[0] : self.channel_range[1]]
return frequencies, df
@classmethod
def _set_attr(cls, ctx, param, value):
state = ctx.ensure_object(cls)
setattr(state, param.name, value)
return value
@classmethod
def options(cls, f):
FREQ = ListOfType("frequency list", int)
options = [
click.option(
"--freq",
help=(
"Define the frequency channelisation, give the start and stop "
"frequencies (in MHz) and the effective number of channels. "
"Default is for CHIME: FSTART=800.0, FSTOP=400.0, FNUM=1025"
),
metavar="FSTART FSTOP FNUM",
type=(float, float, int),
default=(800.0, 400.0, 1024),
expose_value=False,
callback=cls._set_attr,
),
click.option(
"--channel-range",
help="Select a range of frequency channels. Overriden by channel range.",
type=(int, int),
metavar="CSTART CSTOP",
default=(None, None),
expose_value=False,
callback=cls._set_attr,
),
click.option(
"--channel-list",
help="Select a list of frequency channels. Takes priority over channel range.",
type=FREQ,
metavar="CHANNEL LIST",
default=None,
expose_value=False,
callback=cls._set_attr,
),
click.option(
"--channel-bin",
help="If set, average over BIN channels. The binning is done before channel selection.",
metavar="BIN",
type=int,
default=1,
expose_value=False,
callback=cls._set_attr,
),
click.option(
"--freq-mode",
type=click.Choice(["centre", "centre_nyquist", "edge"]),
default="centre",
help=(
'Choose if FSTART and FSTOP are the edges of the band ("edge"), '
"or whether they are the central frequencies of the first and "
"last channel, in this case the last (nyquist) frequency can "
'either be skipped ("centre", default) or included '
'("centre_nyquist"). The behaviour of the "centre" mode '
"matches the output of the CASPER PFB-FIR block."
),
expose_value=False,
callback=cls._set_attr,
),
]
handle = click.make_pass_decorator(cls, ensure=True)(f)
for option in options:
handle = option(handle)
return handle
def map_options(f):
"""The set of options for generating a map."""
options = [
click.option(
"--nside",
help="Set the map resolution (default: 256)",
metavar="NSIDE",
default=256,
),
click.option(
"--pol",
type=click.Choice(["full", "zero", "none"]),
default="full",
help="Pick polarisation mode. Full output, zero polarisation, or only return Stokes I (default: full).",
),
click.option(
"--filename",
help="Output file [default=map.h5]",
metavar="FILENAME",
default="map.h5",
),
]
handle = FreqState.options(f)
for option in options:
handle = option(handle)
return handle
@click.group()
def cli():
"""Generate a map of the low frequency radio sky.
To produce frequency bins compatible with the output of a CASPER based
channelisation, for example in CHIME, set --freq-mode=centre and set the
--freq parameter to the start and (missing) end frequencies, and then add
one to the number of channels (for the missing Nyquist frequency).
"""
pass
@cli.command()
@map_options
@click.option(
"--maxflux",
default=1e6,
type=float,
help="Maximum flux of point included point source (in Jy). Default is 1 MJy.",
)
def foreground(fstate, nside, pol, filename, maxflux):
"""Generate a full foreground sky map.
The requested map must have more than two frequencies for this type.
"""
if fstate.frequencies.shape[0] < 2:
print("Number of frequencies must be more than two.")
return
from cora.foreground import galaxy, pointsource
# Read in arguments.
gal = galaxy.ConstrainedGalaxy()
gal.nside = nside
gal.frequencies = fstate.frequencies
# Fetch galactic sky
cs = gal.getpolsky() if pol == "full" else gal.getsky()
# Fetch point source maps
ps = pointsource.CombinedPointSources.like_map(gal)
ps.flux_max = maxflux
cs = cs + (ps.getpolsky() if pol == "full" else ps.getsky())
# Save map
write_map(filename, cs, gal.frequencies, fstate.freq_width, pol != "none")
@cli.command()
@map_options
@click.option("--spectral-index", default="md", type=click.Choice(["md", "gsm", "gd"]))
def galaxy(fstate, nside, pol, filename, spectral_index):
"""Generate a Milky way only foreground map.
Use Haslam (extrapolated with a spatially varying spectral index) as a base,
and then generate random spatial, spectral and polarisation fluctuations for
unconstrained modes.
The requested map must have more than two frequencies for this type.
"""
if fstate.frequencies.shape[0] < 2:
print("Number of frequencies must be more than two.")
return
from cora.foreground import galaxy
# Read in arguments.
gal = galaxy.ConstrainedGalaxy()
gal.nside = nside
gal.frequencies = fstate.frequencies
gal.spectral_map = spectral_index
# Fetch galactic sky
cs = gal.getpolsky() if pol == "full" else gal.getsky()
# Save map
write_map(filename, cs, gal.frequencies, fstate.freq_width, pol != "none")
@cli.command()
@map_options
@click.option(
"--maxflux",
default=1e6,
type=float,
help="Maximum flux of point included point source (in Jy). Default is 1 MJy.",
)
def pointsource(fstate, nside, pol, filename, maxflux):
"""Generate a point source only foreground map.
For S > 4 Jy (at 600 MHz) use real point sources, for dimmer sources (but S >
0.1 Jy at 151 MHz) generate a synthetic population, for even dimmer sources
use a Gaussian realisation of unresolved sources.
"""
from cora.foreground import pointsource
# Fetch point source maps
ps = pointsource.CombinedPointSources()
ps.nside = nside
ps.frequencies = fstate.frequencies
ps.flux_max = maxflux
cs = ps.getpolsky() if pol == "full" else ps.getsky()
# Save map
write_map(filename, cs, ps.frequencies, fstate.freq_width, pol != "none")
@cli.command("21cm")
@map_options
@click.option(
"--eor",
is_flag=True,
help="Use parameters more suitable for reionisation epoch (rather than intensity mapping).",
)
@click.option(
"--oversample",
type=int,
help="Oversample in redshift by 2**oversample_z + 1 to approximate finite width bins.",
)
def _21cm(fstate, nside, pol, filename, eor, oversample):
"""Generate a Gaussian simulation of the unresolved 21cm background.
"""
from cora.signal import corr21cm
# Read in arguments.
if eor:
cr = corr21cm.EoR21cm()
else:
cr = corr21cm.Corr21cm()
cr.nside = nside
cr.frequencies = fstate.frequencies
cr.oversample = oversample if oversample is not None else 3
# Generate signal realisation and save.
sg_map = cr.getpolsky() if pol == "full" else cr.getsky()
# Save map
write_map(filename, sg_map, cr.frequencies, fstate.freq_width, pol != "none")
@cli.command()
@map_options
def gaussianfg(fstate, nside, pol, filename, eor, oversample):
"""Generate a full-sky Gaussian random field for synchrotron emission.
"""
import numpy as np
from cora.core import skysim
from cora.util import hputil
from cora.foreground import galaxy
fsyn = galaxy.FullSkySynchrotron()
fpol = galaxy.FullSkyPolarisedSynchrotron()
# Set frequency parameters
fsyn.frequencies = fstate.frequencies
nfreq = len(fsyn.frequencies)
lmax = 3 * nside
npol = 4 if pol == "full" else 1
cv_fg = np.zeros((lmax + 1, npol, nfreq, npol, nfreq))
cv_fg[:, 0, :, 0, :] = skysim.clarray(
fsyn.angular_powerspectrum, lmax, fsyn.nu_pixels
)
if ctx.obj.full_pol:
cv_fg[:, 1, :, 1, :] = skysim.clarray(
fpol.angular_powerspectrum, lmax, fsyn.nu_pixels
)
cv_fg[:, 2, :, 2, :] = skysim.clarray(
fpol.angular_powerspectrum, lmax, fsyn.nu_pixels
)
cv_fg = cv_fg.reshape(lmax + 1, npol * nfreq, npol * nfreq)
alms = skysim.mkfullsky(cv_fg, nside, alms=True).reshape(
npol, nfreq, lmax + 1, lmax + 1
)
alms = alms.transpose((1, 0, 2, 3))
maps = hputil.sphtrans_inv_sky(alms, nside)
write_map(filename, maps, fsyn.frequencies, fstate.freq_width, pol != "none")
@cli.command()
@map_options
@click.option("--ra", type=float, help="RA (in degrees) for source to add.", default=0)
@click.option("--dec", type=float, help="DEC (in degrees) of source to add.", default=0)
def singlesource(fstate, nside, pol, filename, ra, dec):
"""Generate a test map with a single | |
<filename>IfxAlchemy/base.py
# +--------------------------------------------------------------------------+
# | Licensed Materials - Property of IBM & OpenInformix |
# | |
# | (C) Copyright IBM Corporation 2008, 2016. |
# +--------------------------------------------------------------------------+
# | This module complies with SQLAlchemy 0.8 and is |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable |
# | law or agreed to in writing, software distributed under the License is |
# | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
# | KIND, either express or implied. See the License for the specific |
# | language governing permissions and limitations under the License. |
# +--------------------------------------------------------------------------+
# | |
# | Authors: <NAME>, <NAME> |
# | |
# +--------------------------------------------------------------------------+
# ///////////////////////////////////////////////////////////////////////////
# +--------------------------------------------------------------------------+
# | Authors: <NAME>, <NAME>, <NAME> |
# | Contributors: <NAME>, <NAME> |
# +--------------------------------------------------------------------------+
"""Support for Informix database
"""
import datetime, re
from sqlalchemy import types as sa_types
from sqlalchemy import schema as sa_schema
from sqlalchemy import util
from sqlalchemy.sql import compiler
from sqlalchemy.sql import operators
from sqlalchemy.engine import default
from sqlalchemy import __version__ as SA_Version
from . import reflection as ifx_reflection
from sqlalchemy.types import BLOB, CHAR, CLOB, DATE, DATETIME, INTEGER,\
SMALLINT, BIGINT, DECIMAL, NUMERIC, REAL, TIME, TIMESTAMP,\
VARCHAR, FLOAT
SA_Version = [int(ver_token) for ver_token in SA_Version.split('.')[0:2]]
# as documented from:
RESERVED_WORDS = set(
['activate', 'disallow', 'locale', 'result', 'add', 'disconnect', 'localtime',
'result_set_locator', 'after', 'distinct', 'localtimestamp', 'return', 'alias',
'do', 'locator', 'returns', 'all', 'double', 'locators', 'revoke', 'allocate', 'drop',
'lock', 'right', 'allow', 'dssize', 'lockmax', 'rollback', 'alter', 'dynamic',
'locksize', 'routine', 'and', 'each', 'long', 'row', 'any', 'editproc', 'loop',
'row_number', 'as', 'else', 'maintained', 'rownumber', 'asensitive', 'elseif',
'materialized', 'rows', 'associate', 'enable', 'maxvalue', 'rowset', 'asutime',
'encoding', 'microsecond', 'rrn', 'at', 'encryption', 'microseconds', 'run',
'attributes', 'end', 'minute', 'savepoint', 'audit', 'end-exec', 'minutes', 'schema',
'authorization', 'ending', 'minvalue', 'scratchpad', 'aux', 'erase', 'mode', 'scroll',
'auxiliary', 'escape', 'modifies', 'search', 'before', 'every', 'month', 'second',
'begin', 'except', 'months', 'seconds', 'between', 'exception', 'new', 'secqty',
'binary', 'excluding', 'new_table', 'security', 'bufferpool', 'exclusive',
'nextval', 'select', 'by', 'execute', 'no', 'sensitive', 'cache', 'exists', 'nocache',
'sequence', 'call', 'exit', 'nocycle', 'session', 'called', 'explain', 'nodename',
'session_user', 'capture', 'external', 'nodenumber', 'set', 'cardinality',
'extract', 'nomaxvalue', 'signal', 'cascaded', 'fenced', 'nominvalue', 'simple',
'case', 'fetch', 'none', 'some', 'cast', 'fieldproc', 'noorder', 'source', 'ccsid',
'file', 'normalized', 'specific', 'char', 'final', 'not', 'sql', 'character', 'for',
'null', 'sqlid', 'check', 'foreign', 'nulls', 'stacked', 'close', 'free', 'numparts',
'standard', 'cluster', 'from', 'obid', 'start', 'collection', 'full', 'of', 'starting',
'collid', 'function', 'old', 'statement', 'column', 'general', 'old_table', 'static',
'comment', 'generated', 'on', 'stay', 'commit', 'get', 'open', 'stogroup', 'concat',
'global', 'optimization', 'stores', 'condition', 'go', 'optimize', 'style', 'connect',
'goto', 'option', 'substring', 'connection', 'grant', 'or', 'summary', 'constraint',
'graphic', 'order', 'synonym', 'contains', 'group', 'out', 'sysfun', 'continue',
'handler', 'outer', 'sysibm', 'count', 'hash', 'over', 'sysproc', 'count_big',
'hashed_value', 'overriding', 'system', 'create', 'having', 'package',
'system_user', 'cross', 'hint', 'padded', 'table', 'current', 'hold', 'pagesize',
'tablespace', 'current_date', 'hour', 'parameter', 'then', 'current_lc_ctype',
'hours', 'part', 'time', 'current_path', 'identity', 'partition', 'timestamp',
'current_schema', 'if', 'partitioned', 'to', 'current_server', 'immediate',
'partitioning', 'transaction', 'current_time', 'in', 'partitions', 'trigger',
'current_timestamp', 'including', 'password', 'trim', 'current_timezone',
'inclusive', 'path', 'type', 'current_user', 'increment', 'piecesize', 'undo',
'cursor', 'index', 'plan', 'union', 'cycle', 'indicator', 'position', 'unique', 'data',
'inherit', 'precision', 'until', 'database', 'inner', 'prepare', 'update',
'datapartitionname', 'inout', 'prevval', 'usage', 'datapartitionnum',
'insensitive', 'primary', 'user', 'date', 'insert', 'priqty', 'using', 'day',
'integrity', 'privileges', 'validproc', 'days', 'intersect', 'procedure', 'value',
'into', 'program', 'values', 'is', 'psid', 'variable',
'isobid', 'query', 'variant', 'dbinfo', 'isolation', 'queryno', 'vcat',
'dbpartitionname', 'iterate', 'range', 'version', 'dbpartitionnum', 'jar', 'rank',
'view', 'deallocate', 'java', 'read', 'volatile', 'declare', 'join', 'reads', 'volumes',
'default', 'key', 'recovery', 'when', 'defaults', 'label', 'references', 'whenever',
'definition', 'language', 'referencing', 'where', 'delete', 'lateral', 'refresh',
'while', 'dense_rank', 'lc_ctype', 'release', 'with', 'denserank', 'leave', 'rename',
'without', 'describe', 'left', 'repeat', 'wlm', 'descriptor', 'like', 'reset', 'write',
'deterministic', 'linktype', 'resignal', 'xmlelement', 'diagnostics', 'local',
'restart', 'year', 'disable', 'localdate', 'restrict', 'years', '', 'abs', 'grouping',
'regr_intercept', 'are', 'int', 'regr_r2', 'array', 'integer', 'regr_slope',
'asymmetric', 'intersection', 'regr_sxx', 'atomic', 'interval', 'regr_sxy', 'avg',
'large', 'regr_syy', 'bigint', 'leading', 'rollup', 'blob', 'ln', 'scope', 'boolean',
'lower', 'similar', 'both', 'match', 'smallint', 'ceil', 'max', 'specifictype',
'ceiling', 'member', 'sqlexception', 'char_length', 'merge', 'sqlstate',
'character_length', 'method', 'sqlwarning', 'clob', 'min', 'sqrt', 'coalesce', 'mod',
'stddev_pop', 'collate', 'module', 'stddev_samp', 'collect', 'multiset',
'submultiset', 'convert', 'national', 'sum', 'corr', 'natural', 'symmetric',
'corresponding', 'nchar', 'tablesample', 'covar_pop', 'nclob', 'timezone_hour',
'covar_samp', 'normalize', 'timezone_minute', 'cube', 'nullif', 'trailing',
'cume_dist', 'numeric', 'translate', 'current_default_transform_group',
'octet_length', 'translation', 'current_role', 'only', 'treat',
'current_transform_group_for_type', 'overlaps', 'true', 'dec', 'overlay',
'uescape', 'decimal', 'percent_rank', 'unknown', 'deref', 'percentile_cont',
'unnest', 'element', 'percentile_disc', 'upper', 'exec', 'power', 'var_pop', 'exp',
'real', 'var_samp', 'false', 'recursive', 'varchar', 'filter', 'ref', 'varying',
'float', 'regr_avgx', 'width_bucket', 'floor', 'regr_avgy', 'window', 'fusion',
'regr_count', 'within', 'asc'])
class _IFX_Boolean(sa_types.Boolean):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return bool(value)
return process
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif bool(value):
return '1'
else:
return '0'
return process
class _IFX_Date(sa_types.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = datetime.date(value.year, value.month, value.day)
return value
return process
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = datetime.date(value.year, value.month, value.day)
return str(value)
return process
class DOUBLE(sa_types.Numeric):
__visit_name__ = 'DOUBLE'
class LONGVARCHAR(sa_types.VARCHAR):
__visit_name_ = 'LONGVARCHAR'
class DBCLOB(sa_types.CLOB):
__visit_name__ = "DBCLOB"
class GRAPHIC(sa_types.CHAR):
__visit_name__ = "GRAPHIC"
class VARGRAPHIC(sa_types.Unicode):
__visit_name__ = "VARGRAPHIC"
class LONGVARGRAPHIC(sa_types.UnicodeText):
__visit_name__ = "LONGVARGRAPHIC"
class XML(sa_types.Text):
__visit_name__ = "XML"
colspecs = {
sa_types.Boolean: _IFX_Boolean,
sa_types.Date: _IFX_Date
}
ischema_names = {
'BLOB': BLOB,
'CHAR': CHAR,
'CHARACTER': CHAR,
'CLOB': CLOB,
'DATE': DATE,
'DATETIME': DATETIME,
'INTEGER': INTEGER,
'SMALLINT': SMALLINT,
'BIGINT': BIGINT,
'DECIMAL': DECIMAL,
'NUMERIC': NUMERIC,
'REAL': REAL,
'DOUBLE': DOUBLE,
'FLOAT': FLOAT,
'TIME': TIME,
'TIMESTAMP': TIMESTAMP,
'VARCHAR': VARCHAR,
'LONGVARCHAR': LONGVARCHAR,
'XML': XML,
'GRAPHIC': GRAPHIC,
'VARGRAPHIC': VARGRAPHIC,
'LONGVARGRAPHIC': LONGVARGRAPHIC,
'DBCLOB': DBCLOB
}
class IfxTypeCompiler(compiler.GenericTypeCompiler):
def visit_TIMESTAMP(self, type_):
return "TIMESTAMP"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_DATETIME(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_SMALLINT(self, type_):
return "SMALLINT"
def visit_INT(self, type_):
return "INT"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_FLOAT(self, type_):
return "FLOAT" if type_.precision is None else \
"FLOAT(%(precision)s)" % {'precision': type_.precision}
def visit_DOUBLE(self, type_):
return "DOUBLE"
def visit_XML(self, type_):
return "XML"
def visit_CLOB(self, type_):
return "CLOB"
def visit_BLOB(self, type_):
return "BLOB(1M)" if type_.length in (None, 0) else \
"BLOB(%(length)s)" % {'length': type_.length}
def visit_DBCLOB(self, type_):
return "DBCLOB(1M)" if type_.length in (None, 0) else \
"DBCLOB(%(length)s)" % {'length': type_.length}
def visit_VARCHAR(self, type_):
return "VARCHAR(%(length)s)" % {'length': type_.length}
def visit_LONGVARCHAR(self, type_):
return "LONG VARCHAR"
def visit_VARGRAPHIC(self, type_):
return "VARGRAPHIC(%(length)s)" % {'length': type_.length}
def visit_LONGVARGRAPHIC(self, type_):
return "LONG VARGRAPHIC"
def visit_CHAR(self, type_):
return "CHAR" if type_.length in (None, 0) else \
"CHAR(%(length)s)" % {'length': type_.length}
def visit_GRAPHIC(self, type_):
return "GRAPHIC" if type_.length in (None, 0) else \
"GRAPHIC(%(length)s)" % {'length': type_.length}
def visit_DECIMAL(self, type_):
if not type_.precision:
return "DECIMAL(31, 0)"
elif not type_.scale:
return "DECIMAL(%(precision)s, 0)" % {'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % {
'precision': type_.precision, 'scale': type_.scale}
def visit_numeric(self, type_):
return self.visit_DECIMAL(type_)
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_date(self, type_):
return self.visit_DATE(type_)
def visit_time(self, type_):
return self.visit_TIME(type_)
def visit_integer(self, type_):
return self.visit_INT(type_)
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_unicode(self, type_):
return self.visit_VARGRAPHIC(type_)
def visit_unicode_text(self, type_):
return self.visit_LONGVARGRAPHIC(type_)
def visit_string(self, type_):
return self.visit_VARCHAR(type_)
def visit_TEXT(self, type_):
return self.visit_CLOB(type_)
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
class IfxCompiler(compiler.SQLCompiler):
if SA_Version < [0, 9]:
def visit_false(self, expr, **kw):
return '0'
def visit_true(self, expr, **kw):
return '1'
def get_cte_preamble(self, recursive):
return "WITH"
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def for_update_clause(self, select):
if select.for_update == True:
return ' WITH RS USE AND KEEP UPDATE LOCKS'
elif select.for_update == 'read':
return ' WITH RS USE AND KEEP SHARE LOCKS'
else:
return ''
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (self.process(binary.left),
self.process(binary.right))
def limit_clause(self, select,**kwargs):
if (select._limit is not None) and (select._offset is None):
return " FETCH FIRST %s ROWS ONLY" % select._limit
else:
return ""
def visit_select(self, select, **kwargs):
limit, offset = select._limit, select._offset
sql_ori = compiler.SQLCompiler.visit_select(self, select, **kwargs)
if offset is not None:
__rownum = 'Z.__ROWNUM'
sql_split = re.split("[\s+]FROM ", sql_ori, 1)
sql_sec = ""
sql_sec = " \nFROM %s " % ( sql_split[1] )
dummyVal = "Z.__Ifx_"
sql_pri = ""
sql_sel = "SELECT "
if select._distinct:
sql_sel = "SELECT DISTINCT "
sql_select_token = sql_split[0].split( | |
dirs_and_neighbors(self):
'''Return a list of (dir, pos) pairs for each neighboring position within the grid.'''
return [(d, p) for d, p in ((_d, self.neighbor(_d)) for _d in DIRECTIONS) if p is not None]
class Atom:
'''Represent an Atom, including its element, grid position, and attached bonds.
'''
def __init__(self, element, pos):
self.bonds = [0, 0, 0, 0] # up, right, down, left
self.set_element(element)
self.set_pos(pos)
def __str__(self):
return self.symbol.rjust(2) # Pad element symbol to two chars
def __repr__(self):
return f'Atom({self.symbol}, {self.pos}, {self.bonds})'
def __eq__(self, other):
return (type(self) == type(other)
and self.element == other.element
and self.pos == other.pos
and self.bonds == other.bonds)
def get_json_str(self):
'''Return a string representing this atom in the level json's format.'''
return f'{self.col}{self.row}{self.atomic_num}{self.bonds[RIGHT]}{self.bonds[DOWN]}'
def remaining_bonds(self):
'''Return the # of remaining bonds this atom is allowed.'''
return self.max_bonds - sum(self.bonds)
def set_pos(self, pos):
'''Change this atom's position in the grid.'''
self.pos = pos
self.row = self.pos.row
self.col = self.pos.col
def set_element(self, element):
if sum(self.bonds) > element.max_bonds:
raise ValueError(f"Too many bonds to change atom {self} to element {element}")
self.element = element
# Exposing some sub-attributes for convenience
self.atomic_num = element.atomic_num
self.symbol = element.symbol
self.max_bonds = element.max_bonds
class Molecule:
'''Represents an input/output zone and the molecule constructed therein.
'''
def __init__(self, large_output=False):
self.name = 'Randite'
self.large_output = large_output
self.num_rows = 4 + 4*large_output
self.num_cols = 4
self.formula = Formula()
# TODO: Could potentially merge grid and used_posns into a single GridPos:Atom dict.
# The main convenience of the grid at this point is that it'll automatically yell at us
# if we start asking for atoms from a GridPos that's out-of-bounds.
self.grid = [[None, None, None, None] for _ in range(self.num_rows)]
self.used_posns = set() # Tracked so that we can easily iterate over the atoms in the molecule
# To optimize the performance of available_positions(), we'll roughly track the # of open
# bonds available on this molecule.
# An atom with no open adjacencies in the grid contributes 0 to this count.
self.open_bonds = 0
def __getitem__(self, pos):
'''Return the atom at the specified grid position or None.'''
return self.grid[pos.row][pos.col]
def __setitem__(self, pos, item):
'''Set the specified grid position (item should be None or an Atom).'''
self.grid[pos.row][pos.col] = item
if item is None:
self.used_posns.remove(pos)
else:
self.used_posns.add(pos)
def __iter__(self):
'''Iterate over each atom in this molecule. Order of iteration is not defined.'''
return (self[p] for p in self.used_posns)
def __len__(self):
'''Return the # of atoms in this molecule.'''
return len(self.used_posns)
def __str__(self):
'''Pretty-print this molecule.'''
result = ' _________________ \n' # Border of the input/output zone
for r in range(self.num_rows):
result += '|'
for c in range(self.num_cols):
atom = self.grid[r][c]
# Represent any atoms here
if atom is None:
result += 2*' '
else:
result += str(atom).rjust(2)
# Represent any bonds to the right of the atom
left_atom = atom
right_atom = self.grid[r][c + 1] if c + 1 < self.num_cols else None
bond_str = ' '
if left_atom is not None and right_atom is not None \
and left_atom.bonds[RIGHT] != right_atom.bonds[LEFT]:
bond_str = '?'
elif left_atom is not None and left_atom.bonds[RIGHT] != 0:
bond_str = str(left_atom.bonds[RIGHT])
elif right_atom is not None and right_atom.bonds[LEFT] != 0:
bond_str = str(right_atom.bonds[LEFT])
if c < self.num_cols - 1:
result += ' ' + bond_str + ' '
result += '|\n'
# Add a row of vertical bonds
if r < self.num_rows - 1:
result += '|'
for c in range(self.num_cols):
top_atom = self.grid[r][c]
if r + 1 < self.num_rows:
bottom_atom = self.grid[r + 1][c]
else:
bottom_atom = None
bond_str = ' '
if top_atom is not None and bottom_atom is not None \
and top_atom.bonds[DOWN] != bottom_atom.bonds[UP]:
bond_str = '??'
elif top_atom is not None and top_atom.bonds[DOWN] != 0:
bond_str = ' ' + str(top_atom.bonds[DOWN])
elif bottom_atom is not None and bottom_atom.bonds[UP] != 0:
bond_str = ' ' + str(bottom_atom.bonds[UP])
result += bond_str
if c < self.num_cols - 1:
result += 3*' '
result += '|\n'
result += '|_________________|\n'
return result
__repr__ = __str__
def get_json_str(self):
'''Return a string representing this molecule in the level json's format.'''
result = f'{self.name};{self.formula.get_json_str()}'
for atom in self:
result += ';' + atom.get_json_str()
return result
def update_formula(self):
'''To be called after mutating any atom elements. Update the formula of this molecule.'''
self.formula = Formula()
for atom in self:
self.formula[atom.element] += 1
def update_open_bonds(self):
'''Update the count of open bonds. Since we only care about updating it well
enough to know when it's 0, we'll ignore the triple bond limit, and count any open side of
an atom as adding the remainder of its max bond count to the open bonds.
'''
self.open_bonds = 0
for atom in self:
if any(self[pos] is None for pos in atom.pos.neighbors()):
self.open_bonds += atom.remaining_bonds() # Not exact but we don't need it to be
def open_positions(self):
'''Return a list of valid grid positions where an atom could be added to this molecule.'''
# For an empty molecule, all positions are open
if len(self) == 0:
return [GridPos(r, c, large_output=self.large_output)
for r in range(self.num_rows) for c in range(self.num_cols)]
# If there are no remaining bonds, we can skip the overhead of walking through the atoms
elif self.open_bonds == 0:
return []
checked_posns = set() # For O(1) checks on whether a position has already been added
for atom in self:
if atom.remaining_bonds() > 0:
for pos in atom.pos.neighbors():
if self[pos] is None and pos not in checked_posns:
checked_posns.add(pos)
return list(checked_posns)
def add_atom(self, new_atom):
'''Adds the given Atom to this molecule. The Atom's position must be open in this molecule.
Also adds any bonds specified by the incoming atom to its neighboring atoms.
For convenience of more complex operations, it is allowable to add an atom with unfulfilled
bonds or which is not connected to the rest of the molecule.
'''
if self[new_atom.pos] is not None:
raise Exception(f"Conflict with existing atom; cannot add {repr(new_atom)} to \n{self}")
# Add the atom into our grid / formula. Then add its bonds while re-calculating self.open_bonds
self[new_atom.pos] = new_atom
self.used_posns.add(new_atom.pos)
self.formula[new_atom.element] += 1
# Quick helper to check if an atom within this molecule's grid has at least 1 open side
def has_open_side(atom):
return any(self[pos] is None for pos in atom.pos.neighbors())
# Partial update of the number of open bonds this molecule has
if has_open_side(new_atom):
self.open_bonds += new_atom.remaining_bonds()
# Add bonds to neighbours matching the bonds indicated on this atom
for dir, pos in new_atom.pos.dirs_and_neighbors():
adj_atom = self[pos]
if adj_atom is not None:
adj_atom.bonds[opposite_dir(dir)] = new_atom.bonds[dir]
# Subtract the bond we just added from the molecule's 'open bonds'
self.open_bonds -= new_atom.bonds[dir]
# If we closed off the neighbor's last open face, we've additionally removed
# however many bonds it now has left from the molecule's 'open' bonds
if not has_open_side(adj_atom):
self.open_bonds -= adj_atom.remaining_bonds()
def remove_atom(self, atom):
'''Remove the specified atom from this molecule. Must exactly match.'''
if self[atom.pos] != atom:
raise ValueError(f"Specified atom {repr(new_atom)} does not match an atom in:\n{self}"
+ "\nCannot be removed.")
self[atom.pos] = None
self.formula[atom.element] -= 1
# Remove any now-trailing bonds on neighbors
for dir, pos in atom.pos.dirs_and_neighbors():
adj_atom = self[pos]
if adj_atom is not None:
adj_atom.bonds[opposite_dir(dir)] = 0
self.update_open_bonds()
def is_connected(self):
'''For the purposes of more advanced construction algorithms we allow adding atoms in
unconnected cells. This checks if the molecule is currently 'connected' and thus valid.
We'll count empty molecules as unconnected.
'''
if len(self) == 0:
return False
# Do a DFS starting from one atom and following the bonds of the molecule. If we don't
# find every atom, it's not connected
sample_pos = next(iter(self.used_posns))
stack = [self[sample_pos]]
# We don't have to actually 'visit' every atom, seeing them as neighbors is sufficient
seen = {sample_pos} # Track the grid positions of seen connected atoms
while stack:
if len(seen) == len(self):
return True
atom = stack.pop()
# Check for connected neighbors. When we | |
['FINISH']}}, # ['FINISH','SJRAND','SJRAND_P']
{'event' : {'$ne':'ORDER_API'}},
{'lock' : 0}
]},
{'event':1, 'status':1, 'b_time':1, 'e_time':1, 'lock':1,
'man':1, 'user_12306':1, 'comment':1, 'pay_limit_time':1,
'payStatus':1, 'orderNo':1, 'trainStartTime':1, 'orderType':1, 'return':1, 'ticketPay':1}
) .sort([('b_time',1)]) # 先下单的先打码
if db_todo.count()>0:
for todo in db_todo:
if todo['event'] == 'ORDER_UI': # 人工处理界面,不处理手工下单的订单
continue
start_tick = int(time.mktime(time.strptime(todo['trainStartTime'],"%Y-%m-%d %H:%M")))
result['data'].append({
'id' : str(todo['_id']),
'event' : todo['event'],
'status' : todo['status'],
'elapse' : int(time.time())-todo['e_time'], #todo['e_time']-todo['b_time'],
'lock' : todo['lock'],
'man' : todo['man'],
'user' : todo['user_12306'] if todo.has_key('user_12306') else '',
'comment' : todo['comment'],
'limit' : todo['pay_limit_time'] if todo.has_key('pay_limit_time') else '',
'payStatus' : todo['payStatus'] if todo.has_key('payStatus') else '',
'orderNo' : todo['orderNo'],
'urgent' : 1 if (start_tick-int(time.time()))/3600<24 else 0,
'orderType' : todo['orderType'],
'return' : todo['return'],
'ticketPay' : todo['ticketPay'],
})
result['num']=len(result['data'])
#print result
web.header("Content-Type", "application/json")
return json.dumps(result)
class CheckoutSjrand2: # 提交验证码后返回新的图片
def GET(self):
import json
web.header("Content-Type", "application/json")
result={'data':[]}
if logged(PRIV_USER|PRIV_KAM):
user_data=web.input(todo='', rand_code='99,87', p='0')
if user_data.todo!='': # 有验证码提交
db_todo=web_db.todo.find_and_modify(
query= {'$and' : [
{ '_id': ObjectId(user_data.todo) },
{ 'lock' : 0}
]},
update= {'$set': {'lock':1}},
fields= {'status':1}
)
if db_todo==None:
return json.dumps({'ret':-1})
now_tick = int(time.time())
if db_todo['status']=='SJRAND':
todo_update={'status' : 'LOGIN',
'comment' : '',
'man' : 0,
'lock' : 0,
'rand_code' : user_data.rand_code, #sj_rand,
'cs_rand' : session.uname,
'cs_time' : now_tick
}
elif db_todo['status']=='SJRAND_P':
todo_update={'status' : 'BOOK',
'comment' : '',
'man' : 0,
'lock' : 0,
'rand_code_p' : user_data.rand_code, #sj_rand,
'cs_rand_p' : session.uname,
'cs_time' : now_tick
}
else:
todo_update={
'lock' : 0,
'cs_time' : now_tick
}
#return json.dumps({'ret':-1})
r = web_db.todo.update({'_id':ObjectId(user_data.todo)}, {'$set': todo_update})
#print r
#return json.dumps({'ret':0})
# 准备新的随机码图片, 只返回一个 -----------------------------------
if user_data.p=='1':
code_type = 'SJRAND_P'
else:
code_type = 'SJRAND'
db_todo=web_db.todo.find_and_modify(
query = {'$and': [
{'status' : code_type},
{'event' : {'$nin': ['ORDER_API', 'ORDER_UI']}},
{'lock' : 0},
{'cs_time': {'$lt': int(time.time())} }
]
},
sort = [('b_time',1)], # 先下单的先打码
update = {'$set': {'cs_time':int(time.time())+10}}, # 排名拖后,避免不同客服同时刷到。
fields = {'event':1, 'status':1, 'b_time':1, 'e_time':1, 'lock':1, 'man':1}
#, 'user_12306':1, 'comment':1, 'ticket_no_complete':1, 'payStatus':1}
)
if db_todo!=None:
result['data'].append({
'id' : str(db_todo['_id']),
'event' : db_todo['event'],
'status' : db_todo['status'],
#'elapse' : db_todo['e_time']-db_todo['b_time'],
'lock' : db_todo['lock'],
'man' : db_todo['man'],
#'user' : db_todo['user_12306'] if db_todo.has_key('user_12306') else '',
#'comment' : db_todo['comment'],
#'limit' : db_todo['ticket_no_complete']['orderDBList'][0]['tickets'][0]['pay_limit_time'] \
# if db_todo.has_key('ticket_no_complete') else '',
#'payStatus' : db_todo['payStatus'] if db_todo.has_key('payStatus') else '',
})
result['num']=len(result['data'])
#print result
web.header("Content-Type", "application/json")
return json.dumps(result)
class CheckoutSjrand3:
def GET(self):
import json
result={'data':[]}
if logged(PRIV_USER):
db_todo=web_db.todo.find(
{'$and': [
{'status' : {'$in': ['PAY', 'SCAN', 'SCAN2']}}, # 只处理支付有关
{'event' : {'$ne':'ORDER_API'}},
{'lock' : 0}
]},
{'event':1, 'status':1, 'b_time':1, 'e_time':1, 'lock':1,
'man':1, 'user_12306':1, 'comment':1, 'pay_limit_time':1,
'payStatus':1, 'orderNo':1, 'trainStartTime':1, 'orderType':1, 'return':1, 'ticketPay':1}
) .sort([('b_time',1)]) # 先下单的先打码
if db_todo.count()>0:
for todo in db_todo:
if todo['event'] == 'ORDER_UI': # 人工处理界面,不处理手工下单的订单
continue
start_tick = int(time.mktime(time.strptime(todo['trainStartTime'],"%Y-%m-%d %H:%M")))
result['data'].append({
'id' : str(todo['_id']),
'event' : todo['event'],
'status' : todo['status'],
'elapse' : int(time.time())-todo['e_time'], #todo['e_time']-todo['b_time'],
'lock' : todo['lock'],
'man' : todo['man'],
'user' : todo['user_12306'] if todo.has_key('user_12306') else '',
#'comment' : todo['comment'],
'limit' : todo['pay_limit_time'] if todo.has_key('pay_limit_time') else '',
'payStatus' : todo['payStatus'] if todo.has_key('payStatus') else '',
'orderNo' : todo['orderNo'],
'urgent' : 1 if (start_tick-int(time.time()))/3600<24 else 0,
'orderType' : todo['orderType'],
'return' : todo['return'],
'ticketPay' : todo['ticketPay'],
})
result['num']=len(result['data'])
#print result
web.header("Content-Type", "application/json")
return json.dumps(result)
class VerifySjrand:
def GET(self):
import json
web.header("Content-Type", "application/json")
if logged(PRIV_USER):
user_data=web.input(todo='', rand_code='99,87')
if '' == user_data.todo:
return json.dumps({'ret':-1})
db_todo=web_db.todo.find_and_modify(
query= {'$and' : [
{ '_id': ObjectId(user_data.todo) },
{ 'lock' : 0}
]},
update= {'$set': {'lock':1}},
fields= {'status':1}
)
if db_todo==None:
return json.dumps({'ret':-1})
now_tick = int(time.time())
if db_todo['status']=='SJRAND':
todo_update={'status' : 'LOGIN',
'comment' : '',
'man' : 0,
'lock' : 0,
'rand_code' : user_data.rand_code, #sj_rand,
'cs_rand' : session.uname,
'cs_time' : now_tick
}
elif db_todo['status']=='SJRAND_P':
todo_update={'status' : 'BOOK',
'comment' : '',
'man' : 0,
'lock' : 0,
'rand_code_p' : user_data.rand_code, #sj_rand,
'cs_rand_p' : session.uname,
'cs_time' : now_tick
}
else:
todo_update={
'lock' : 0,
'cs_time' : now_tick
}
#return json.dumps({'ret':-1})
r = web_db.todo.update({'_id':ObjectId(user_data.todo)}, {'$set': todo_update})
#print r
return json.dumps({'ret':0})
else:
return json.dumps({'ret':-1})
class Pay2:
def GET(self):
if logged(PRIV_USER):
render = create_render(session.privilege)
user_data=web.input(todo='')
if user_data.todo=='':
return render.info('参数错误!')
db_status = web_db.todo.find_one({'_id':ObjectId(user_data.todo)},{'status':1, 'gateway_result':1})
if db_status==None:
return render.info('参数错误!')
if db_status.has_key('gateway_result'): # 已生成一次支付宝交易,不再自动生成,避免重复付款
todo_update={'status' : 'FAIL',
'comment' : '0|PAY|已生成一次支付宝交易,需手工检查.',
'man' : 0,
'lock' : 0,
'e_time' : int(time.time())
}
r = web_db.todo.update({'_id':ObjectId(user_data.todo)}, {'$set': todo_update})
return render.info('已生成一次支付宝交易,需手工检查.')
if db_status['status']=='PAY':
todo_update={'status' : 'PAY2',
'comment' : '',
'man' : 0,
'lock' : 0,
'e_time' : int(time.time())
}
r = web_db.todo.update({'_id':ObjectId(user_data.todo)}, {'$set': todo_update})
#print r
return render.router(session.uname, user_level[session.privilege], user_data.todo)
else:
return render.info('状态不对,可能有人正在付款!')
else:
raise web.seeother('/')
class AliForm:
def GET(self):
if logged(PRIV_USER):
render = create_render(session.privilege)
user_data=web.input(todo='')
if user_data.todo=='':
return render.info('参数错误!')
db_todo=web_db.todo.find_one({'_id': ObjectId(user_data.todo)},{'alipay_form':1})
if db_todo!=None:
# 状态转到 SCAN2, 等待确认结果
todo_update={'status' : 'SCAN2',
'comment' : '',
'man' : 1,
'lock' : 0,
'e_time' : int(time.time())
}
web_db.todo.update({'_id': ObjectId(user_data.todo)}, {'$set': todo_update})
# 取得车次信息
return render.ali_form(session.uname, user_level[session.privilege],
user_data.todo, db_todo['alipay_form'])
else:
return render.info('出错,请重新提交。')
else:
raise web.seeother('/')
class PayResult:
def GET(self):
import json
web.header("Content-Type", "application/json")
if logged(PRIV_USER):
user_data=web.input(todo='', success='')
if '' in (user_data.success, user_data.todo):
return json.dumps({'ret':-1})
if user_data.success=='1':
todo_update={'status' : 'SCAN3', # 检查支付结果
'comment' : '',
'man' : 0,
'lock' : 0,
'e_time' : int(time.time())
}
else:
todo_update={'status' : 'PAY', # 重新支付
'comment' : '',
'man' : 1,
'lock' : 0,
'e_time' : int(time.time())
}
r = web_db.todo.update({'_id':ObjectId(user_data.todo)}, {'$set': todo_update})
#print r
return json.dumps({'ret':0})
else:
return json.dumps({'ret':-1})
class ViewEvent:
def GET(self):
if logged(PRIV_USER):
render = create_render(session.privilege)
user_data=web.input(todo='')
if user_data.todo=='':
return render.info('参数错误!')
auth_level = -1
if session.uname in setting.auth_user:
auth_level = 999
elif session.uname in setting.cs_admin:
auth_level = 1
db_todo=web_db.todo.find_one({'_id': ObjectId(user_data.todo)})
if db_todo!=None:
pass_1<PASSWORD>6 = ''
if auth_level>0 and db_todo['event'] in ('ORDER_SINGLE','ORDER_JOINT') and db_todo.has_key('user_12306'):
db_u=web_db.user_12306.find_one({'uname': db_todo['user_12306']})
if db_u!=None:
pass_12306 = db_u['passwd']
return render.view_event(session.uname, user_level[session.privilege],
user_data.todo, db_todo, int(time.time()-db_todo['e_time']),
auth_level,pass_12306) # 授权客服才能修改
else:
return render.info('出错,请重新提交。')
else:
raise web.seeother('/')
def POST(self):
if logged(PRIV_USER):
render = create_render(session.privilege)
user_data=web.input(todo='', status='', crmtext0='', crmtext='')
if '' in (user_data.status, user_data.todo):
return render.info('错误的参数!')
# 保存客服备注
if user_data.status=='__CRM__':
if user_data.crmtext0[0:3]=='n/a':
crmt = u'%s %s\r\n%s' % (time_str(), session.uname, user_data.crmtext)
else:
crmt = u'%s%s %s\r\n%s' % (user_data.crmtext0, time_str(), session.uname, user_data.crmtext)
web_db.todo.update({'_id':ObjectId(user_data.todo)}, {'$set' : {'crm_text' : crmt}})
return render.info('保存完成', goto="/view_event?todo=%s" % user_data.todo)
# 授权客服才能修改
auth = False
if user_data.status in ['RETURN_OK', 'FREE_USER', 'SCAN3', 'NO_TICKET', 'QUERY', 'FINISH'] and session.uname in setting.cs_admin:
auth = True
elif session.uname in setting.auth_user:
auth = True
if not auth:
return render.info('无操作权限!')
todo_update={
'lock' : 0,
'e_time' : int(time.time())
}
if user_data.status != '__NOP__':
todo_update['status']=user_data.status
if user_data.status=='PAY':
todo_update['man']=1
else:
todo_update['man']=0
if user_data.status=='RETURN':
todo_update['return']=1
elif user_data.status == '__CANCEL_RETURN__': # 手工拒绝退票
todo_update['status']='REPORT'
todo_update['return']=-1
todo_update['comment']='1|__CANCEL_RETURN__|已取纸质车票或临近开车.'
r = web_db.todo.update({'_id':ObjectId(user_data.todo)}, {'$set': todo_update})
#print r
return render.info('提交完成',goto="javascript:window.opener=null;window.close();",text2='关闭窗口')
else:
raise web.seeother('/')
class Crm:
def GET(self):
if logged(PRIV_USER):
render = create_render(session.privilege)
return render.crm(session.uname, user_level[session.privilege])
else:
raise web.seeother('/')
def POST(self):
if logged(PRIV_USER):
render = create_render(session.privilege)
user_data=web.input(cat='', content='')
if user_data.cat=='' or user_data.content=='':
return render.info('错误的参数!')
if user_data.cat=='_id':
condi = {user_data.cat:ObjectId(user_data.content.strip())}
else:
condi = {user_data.cat:user_data.content.strip()}
db_todo = web_db.todo.find(condi, {'orderNo':1,'tripNum':1,'seq':1}).sort([('_id',1)])
if db_todo.count()>0:
return render.report_order(session.uname, user_level[session.privilege], db_todo)
else:
return render.info('未查到订单信息。')
else:
raise web.seeother('/')
class Report:
def GET(self):
if logged(PRIV_USER):
render = create_render(session.privilege)
user_data=web.input(start_date='', cat='')
if user_data['start_date']=='':
return render.report(session.uname, user_level[session.privilege])
import stations
start_tick = time.mktime(time.strptime('%s 00:00:00' % user_data['start_date'],'%Y-%m-%d %H:%M:%S'))
stop_tick = time.mktime(time.strptime('%s 23:59:59' % user_data['start_date'],'%Y-%m-%d %H:%M:%S'))
and_request = [
{'event' : {'$ne' : 'ORDER_UI'}},
{'status': 'FINISH'},
{'e_time': {'$gte' : start_tick}},
{'e_time': {'$lte' : stop_tick}},
]
if user_data.cat=='1': # 成功付款
and_request.append({'pay_off':1})
and_request.append({'return':0})
elif user_data.cat=='2': # 成功退款
and_request.append({'pay_off':1})
and_request.append({'return':2})
elif user_data.cat=='3': # 尚未付款(含出错)
and_request.append({'pay_off': {'$ne':1}})
db_todo=web_db.todo.find({'$and': and_request}, {
'alipay_form':1, 'ticket_no':1, 'passengers':1, 'return':1, 'comment':1,
'dptStation':1, 'arrStation':1, 'trainNo':1, 'start_date':1, 'pay_off':1, 'orderNo':1
})
report=[]
total_pay = 0
total_return = 0
amt_pay = 0.0
amt_return = 0.0
if db_todo.count()>0:
for item in db_todo:
if item.has_key('pay_off') and item['pay_off']==1:
report.append((
item['_id'],
item['ticket_no'],
item['dptStation'],
item['arrStation'],
item['start_date'],
item['trainNo'],
item['alipay_form']['ord_amt'],
'已退票' if item['return']==2 else '',
item['orderNo'] if item.has_key('orderNo') else 'n/a'
))
if item['return']==2:
amt_return += float(item['alipay_form']['ord_amt'])
total_return += 1
else:
amt_pay += float(item['alipay_form']['ord_amt'])
total_pay += 1
else:
msg=item['comment'].split('|')
report.append((
item['_id'],
item['ticket_no'] if item.has_key('ticket_no') else 'n/a',
item['dptStation'] if item.has_key('dptStation') else 'n/a',
item['arrStation'] if item.has_key('arrStation') else 'n/a',
item['start_date'] if item.has_key('start_date') else 'n/a',
item['trainNo'] if item.has_key('trainNo') else 'n/a',
'n/a',
msg[2] if len(msg)==3 else item['comment'],
item['orderNo'] if item.has_key('orderNo') else 'n/a'
))
return render.report_result(session.uname, user_level[session.privilege], report,
(total_pay, amt_pay, total_return, amt_return), user_data.start_date)
else:
raise web.seeother('/')
########## API 功能 ####################################################
class APIInfo:
def GET(self):
if logged(PRIV_API):
render = create_render(session.privilege)
db_user=web_db.user.find_one({'_id':session.uid},{'API_ID':1, 'API_KEY':1})
if db_user!=None and db_user.has_key('API_ID') and db_user.has_key('API_KEY'):
return render.api_info(session.uname, user_level[session.privilege], db_user['API_ID'], db_user['API_KEY'])
else:
return render.info('未找到用户信息。')
else:
raise web.seeother('/')
#
# API 建立新的task,参数:
# {
# 'api_key' : 'XXXX',
# 'login_info' : { 'user':'jack_139', 'passwd':'????' } --- base64
# 'secret' : hashlib.md5('%s%s%s' % (API_KEY, api_id, date)).hexdigest().upper()
# 'device_id' : ''
# }
#
# 返回:
# {
# 'ret' : 0, # 0 正常,<0 出错
# 'task' : '' # todo_id
# }
#
# 出错代码:
# -1 系统不受理
# -2 json格式错误
# -3 secret不匹配
# -4 上次请求未结束
#
class APITask:
def POST(self):
import json, hashlib, base64
while 1:
try:
data=json.loads(web.data())
print data
except ValueError:
# json格式有问题
print web.data()
ret = { 'ret': -2}
break
try:
db_user=web_db.user.find_one({'API_ID':data['api_key']},{'API_KEY':1})
if db_user==None:
# 未找到uid
ret = { 'ret': -1}
break
HMAC = hashlib.md5(
'%s%s%s' % (db_user['API_KEY'],
data['api_key'],
data['login_info'])
).hexdigest().upper()
except KeyError:
# 参数有问题
ret = { 'ret': -2}
break
if data['secret']!=HMAC:
# 加密串不对
ret = { 'ret': -3}
break
# 准备参数
login_info = json.loads(base64.b64decode(data['login_info']))
tick=int(time.time())
todo_id=web_db.todo.insert({
'uid' : data['device_id'] if data.has_key('device_id') else 'api', # 用于标识不用的手机个体
'event' : 'ORDER_API',
'status' : 'FINISH',
'user_12306' : login_info['user'],
'pass_12306' : login_info['passwd'],
'lock' : 0,
'man' : 0,
'next_status' : '',
'comment' : '',
'history' : [],
'b_time' : tick,
'e_time' : tick
})
# 记录 device_id
if data.has_key('device_id'):
web_db.device.update({'device_id':data['device_id']}, {'$set':{'time':tick}}, upsert=True)
if todo_id!=None:
ret = { 'ret': 0, 'task' : str(todo_id)}
else:
# '查询失败,请稍后重试。'
ret = { 'ret': -1}
break
web.header("Content-Type", "application/json")
return json.dumps(ret)
#
# API 查询车次,参数:
# {
# 'api_key' : 'XXXX',
# 'task' : '',
# 'departure' : 'VBB',
# 'arrival' : 'HBB'
# 'date' : '2015-01-09',
# 'secret' : hashlib.md5('%s%s%s%s%s%s' % (API_KEY, api_id, task, departure, arrival, date)).hexdigest().upper()
# }
#
# 返回:
# {
# 'ret' : 0, # 0 正常,<0 出错
# }
#
# 出错代码:
# -1 系统不受理
# -2 json格式错误
# -3 secret不匹配
# -4 上次请求未结束
#
class APIQuery:
def POST(self):
import json, hashlib
while 1:
try:
data=json.loads(web.data())
print data
except ValueError:
# json格式有问题
print web.data()
ret = { 'ret': -2}
break
try:
db_user=web_db.user.find_one({'API_ID':data['api_key']},{'API_KEY':1})
if db_user==None:
# 未找到uid
ret = { 'ret': -1}
break
HMAC = hashlib.md5(
'%s%s%s%s%s%s' % (db_user['API_KEY'],
data['api_key'],
data['task'],
data['departure'],
data['arrival'],
data['date'])
).hexdigest().upper()
except KeyError:
# 参数有问题
ret = { 'ret': -2}
break
if data['secret']!=HMAC:
# 加密串不对
ret = { 'ret': -3}
break
db_todo=web_db.todo.find_one({'_id':ObjectId(data['task'])},{'event':1, 'status':1, 'result':1})
if db_todo==None:
# 未找到task
ret = { 'ret': -5}
break
if db_todo['status']!='FINISH':
# 未结束
print db_todo['status']
ret = { 'ret': -4}
break
tick=int(time.time())
web_db.todo.update({'_id':ObjectId(data['task'])},
{'$set':{
'status' : 'QUERY',
'start_station' : data['departure'],
'stop_station' : data['arrival'],
'start_date' : data['date'],
'lock' : 0,
'man' : 0,
'retry' : 0,
'next_status' : '',
'comment' : '',
'e_time' : tick
}
})
ret = { 'ret': 0 }
break
web.header("Content-Type", "application/json")
return json.dumps(ret)
#
# API 下单,参数:
# {
# 'api_key' : 'XXXX',
# 'task' : '<KEY>',
# 'passengers' : [
# {
# 'name' : '关涛',
# 'certType' : '1',
# 'certNo' : '12010419760404761X',
# 'ticketType' : '1',
# }
# ] --- base64
# 'seat_type' : '1',
# 's' : 查询信息的火车票串
# 'secret' : hashlib.md5('%s%s%s%s%s' % (API_KEY, api_id, task, s, passengers)).hexdigest().upper()
# }
#
# 返回:
# {
# 'ret' : 0, # 0 正常,<0 出错
# }
#
# 出错代码:
# -1 系统不受理
# -2 json格式错误
# -3 secret不匹配
# -4 请求未结束
# -5 未找到task
#
class APIOrder:
def POST(self):
import json, hashlib, base64, urllib
while 1:
try:
data=json.loads(web.data())
#print data
except ValueError:
# json格式有问题
print web.data()
ret = { 'ret': -2}
break
try:
db_user=web_db.user.find_one({'API_ID':data['api_key']},{'API_KEY':1})
if db_user==None:
# 未找到uid
ret = { 'ret': -1}
break
HMAC = hashlib.md5(
'%s%s%s%s%s' % (db_user['API_KEY'],
data['api_key'],
data['task'],
data['s'],
data['passengers'])
).hexdigest().upper()
except KeyError:
# 参数有问题
print data
ret = { 'ret': -2}
break
if data['secret']!=HMAC:
# 加密串不对
ret = { 'ret': -3}
break
db_todo=web_db.todo.find_one({'_id':ObjectId(data['task'])},{'event':1, 'status':1, 'result':1})
if db_todo==None:
# 未找到task
ret = { 'ret': -5}
break
if db_todo['status']!='FINISH':
# 未结束
print db_todo['status']
ret = { 'ret': -4}
break
# 准备参数
passengers = | |
<reponame>Opportunitylivetv/fbthrift
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
from thrift.util.Recursive import fix_spec
from thrift.Thrift import *
from thrift.protocol.TProtocol import TProtocolException
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
if not '__pypy__' in sys.builtin_module_names:
try:
from thrift.protocol import fastproto
except:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'OldStructure', 'NewStructure', 'NewStructure2', 'NewStructureNested', 'NewStructureNestedField']
class OldStructure:
"""
Attributes:
- features
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.features = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
if _size0 >= 0:
for _i4 in six.moves.range(_size0):
_key5 = iprot.readI16()
_val6 = iprot.readDouble()
self.features[_key5] = _val6
else:
while iprot.peekMap():
_key7 = iprot.readI16()
_val8 = iprot.readDouble()
self.features[_key7] = _val8
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('OldStructure')
if self.features != None:
oprot.writeFieldBegin('features', TType.MAP, 1)
oprot.writeMapBegin(TType.I16, TType.DOUBLE, len(self.features))
for kiter9,viter10 in self.features.items():
oprot.writeI16(kiter9)
oprot.writeDouble(viter10)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
value = pprint.pformat(self.features, indent=0)
value = padding.join(value.splitlines(True))
L.append(' features=%s' % (value))
return "%s(\n%s)" % (self.__class__.__name__, ",\n".join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class NewStructure:
"""
Attributes:
- features
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0, forward_compatibility=True)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2, forward_compatibility=True)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.features = {}
(_ktype12, _vtype13, _size11 ) = iprot.readMapBegin()
_ktype12 = _ktype12 if _ktype12 != TType.STOP else TType.I16
_vtype13 = _vtype13 if _vtype13 != TType.STOP else TType.DOUBLE
if _size11 >= 0:
for _i15 in six.moves.range(_size11):
_key16 = iprot.readIntegral(_ktype12)
_val17 = iprot.readFloatingPoint(_vtype13)
self.features[_key16] = _val17
else:
while iprot.peekMap():
_key18 = iprot.readIntegral(_ktype12)
_val19 = iprot.readFloatingPoint(_vtype13)
self.features[_key18] = _val19
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('NewStructure')
if self.features != None:
oprot.writeFieldBegin('features', TType.MAP, 1)
oprot.writeMapBegin(TType.I16, TType.DOUBLE, len(self.features))
for kiter20,viter21 in self.features.items():
oprot.writeI16(kiter20)
oprot.writeDouble(viter21)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
value = pprint.pformat(self.features, indent=0)
value = padding.join(value.splitlines(True))
L.append(' features=%s' % (value))
return "%s(\n%s)" % (self.__class__.__name__, ",\n".join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class NewStructure2:
"""
Attributes:
- features
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0, forward_compatibility=True)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2, forward_compatibility=True)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.features = {}
(_ktype23, _vtype24, _size22 ) = iprot.readMapBegin()
_ktype23 = _ktype23 if _ktype23 != TType.STOP else TType.I16
_vtype24 = _vtype24 if _vtype24 != TType.STOP else TType.FLOAT
if _size22 >= 0:
for _i26 in six.moves.range(_size22):
_key27 = iprot.readIntegral(_ktype23)
_val28 = iprot.readFloatingPoint(_vtype24)
self.features[_key27] = _val28
else:
while iprot.peekMap():
_key29 = iprot.readIntegral(_ktype23)
_val30 = iprot.readFloatingPoint(_vtype24)
self.features[_key29] = _val30
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.checkRequired()
def checkRequired(self):
return
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('NewStructure2')
if self.features != None:
oprot.writeFieldBegin('features', TType.MAP, 1)
oprot.writeMapBegin(TType.I16, TType.FLOAT, len(self.features))
for kiter31,viter32 in self.features.items():
oprot.writeI16(kiter31)
oprot.writeFloat(viter32)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
value = pprint.pformat(self.features, indent=0)
value = padding.join(value.splitlines(True))
L.append(' features=%s' % (value))
return "%s(\n%s)" % (self.__class__.__name__, ",\n".join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
class NewStructureNested:
"""
Attributes:
- lst
- mp
- s
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0, forward_compatibility=True)
self.checkRequired()
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2, forward_compatibility=True)
self.checkRequired()
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.lst = []
(_etype36, _size33) = iprot.readListBegin()
if _size33 >= 0:
for _i37 in six.moves.range(_size33):
_elem38 = {}
(_ktype40, _vtype41, _size39 ) = iprot.readMapBegin()
_ktype40 = _ktype40 if _ktype40 != TType.STOP else TType.I16
_vtype41 = _vtype41 if _vtype41 != TType.STOP else TType.FLOAT
if _size39 >= 0:
for _i43 in six.moves.range(_size39):
_key44 = iprot.readIntegral(_ktype40)
_val45 = iprot.readFloatingPoint(_vtype41)
_elem38[_key44] = _val45
else:
while iprot.peekMap():
_key46 = iprot.readIntegral(_ktype40)
_val47 = iprot.readFloatingPoint(_vtype41)
_elem38[_key46] = _val47
iprot.readMapEnd()
self.lst.append(_elem38)
else:
while iprot.peekList():
_elem48 = {}
(_ktype50, _vtype51, _size49 ) = iprot.readMapBegin()
_ktype50 = _ktype50 if _ktype50 != TType.STOP else TType.I16
_vtype51 = _vtype51 if _vtype51 != TType.STOP else TType.FLOAT
if _size49 >= 0:
for _i53 in six.moves.range(_size49):
_key54 = iprot.readIntegral(_ktype50)
_val55 = iprot.readFloatingPoint(_vtype51)
_elem48[_key54] = _val55
else:
while iprot.peekMap():
_key56 = iprot.readIntegral(_ktype50)
_val57 = iprot.readFloatingPoint(_vtype51)
| |
<gh_stars>1-10
# PopulationSim
# See full license in LICENSE.txt.
import logging
import os
import numpy as np
STATUS_OPTIMAL = 'OPTIMAL'
STATUS_FEASIBLE = 'FEASIBLE'
STATUS_SUCCESS = [STATUS_OPTIMAL, STATUS_FEASIBLE]
def np_integerizer_ortools(
incidence,
resid_weights,
log_resid_weights,
control_importance_weights,
total_hh_control_index,
lp_right_hand_side,
relax_ge_upper_bound,
hh_constraint_ge_bound):
"""
ortools single-integerizer function taking numpy data types and conforming to a
standard function signature that allows it to be swapped interchangeably with alternate
LP implementations.
Parameters
----------
incidence : numpy.ndarray(control_count, sample_count) float
resid_weights : numpy.ndarray(sample_count,) float
log_resid_weights : numpy.ndarray(sample_count,) float
control_importance_weights : numpy.ndarray(control_count,) float
total_hh_control_index : int
lp_right_hand_side : numpy.ndarray(control_count,) float
relax_ge_upper_bound : numpy.ndarray(control_count,) float
hh_constraint_ge_bound : numpy.ndarray(control_count,) float
Returns
-------
resid_weights_out : numpy.ndarray(sample_count,)
status_text : str
"""
from ortools.linear_solver import pywraplp
STATUS_TEXT = {
pywraplp.Solver.OPTIMAL: 'OPTIMAL',
pywraplp.Solver.FEASIBLE: 'FEASIBLE',
pywraplp.Solver.INFEASIBLE: 'INFEASIBLE',
pywraplp.Solver.UNBOUNDED: 'UNBOUNDED',
pywraplp.Solver.ABNORMAL: 'ABNORMAL',
pywraplp.Solver.NOT_SOLVED: 'NOT_SOLVED',
}
CBC_TIMEOUT_IN_SECONDS = 60
control_count, sample_count = incidence.shape
# - Instantiate a mixed-integer solver
solver = pywraplp.Solver('IntegerizeCbc', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# - Create binary integer variables
x = [[]] * sample_count
for hh in range(0, sample_count):
# max_x == 0.0 if float_weights is an int, otherwise 1.0
max_x = 1.0 - (resid_weights[hh] == 0.0)
x[hh] = solver.NumVar(0.0, max_x, 'x_' + str(hh))
# - Create positive continuous constraint relaxation variables
relax_le = [[]] * control_count
relax_ge = [[]] * control_count
for c in range(0, control_count):
# no relaxation for total households control
if c != total_hh_control_index:
relax_le[c] = solver.NumVar(0.0, lp_right_hand_side[c], 'relax_le_' + str(c))
relax_ge[c] = solver.NumVar(0.0, relax_ge_upper_bound[c], 'relax_ge_' + str(c))
# - Set objective function coefficients
# use negative for objective and positive for relaxation penalties since solver is minimizing
# objective = solver.Objective()
# for hh in range(sample_count):
# objective.SetCoefficient(x[hh], -1.0 * log_resid_weights[hh])
# for c in range(control_count):
# if c != total_hh_control_index:
# objective.SetCoefficient(relax_le[c], control_importance_weights[c])
# objective.SetCoefficient(relax_ge[c], control_importance_weights[c])
z = solver.Sum(x[hh] * log_resid_weights[hh]
for hh in range(sample_count)) - \
solver.Sum(relax_le[c] * control_importance_weights[c]
for c in range(control_count) if c != total_hh_control_index) - \
solver.Sum(relax_ge[c] * control_importance_weights[c]
for c in range(control_count) if c != total_hh_control_index)
objective = solver.Maximize(z)
# - inequality constraints
hh_constraint_ge = [[]] * control_count
hh_constraint_le = [[]] * control_count
for c in range(0, control_count):
# don't add inequality constraints for total households control
if c == total_hh_control_index:
continue
# add the lower bound relaxation inequality constraint
hh_constraint_le[c] = solver.Constraint(0, lp_right_hand_side[c])
for hh in range(0, sample_count):
hh_constraint_le[c].SetCoefficient(x[hh], incidence[c, hh])
hh_constraint_le[c].SetCoefficient(relax_le[c], -1.0)
# add the upper bound relaxation inequality constraint
hh_constraint_ge[c] = solver.Constraint(lp_right_hand_side[c], hh_constraint_ge_bound[c])
for hh in range(0, sample_count):
hh_constraint_ge[c].SetCoefficient(x[hh], incidence[c, hh])
hh_constraint_ge[c].SetCoefficient(relax_ge[c], 1.0)
# using Add and Sum is easier to read but a lot slower
# for c in range(control_count):
# if c == total_hh_control_index:
# continue
# solver.Add(solver.Sum(x[hh]*incidence[c, hh] for hh in range(sample_count)) - relax_le[c]
# >= 0)
# solver.Add(solver.Sum(x[hh]*incidence[c, hh] for hh in range(sample_count)) - relax_le[c]
# <= lp_right_hand_side[c])
# solver.Add(solver.Sum(x[hh]*incidence[c, hh] for hh in range(sample_count)) + relax_ge[c]
# >= lp_right_hand_side[c])
# solver.Add(solver.Sum(x[hh]*incidence[c, hh] for hh in range(sample_count)) + relax_ge[c]
# <= hh_constraint_ge_bound[c])
# - equality constraint for the total households control
total_hh_constraint = lp_right_hand_side[total_hh_control_index]
constraint_eq = solver.Constraint(total_hh_constraint, total_hh_constraint)
for hh in range(0, sample_count):
constraint_eq.SetCoefficient(x[hh], 1.0)
solver.set_time_limit(CBC_TIMEOUT_IN_SECONDS * 1000)
solver.EnableOutput()
result_status = solver.Solve()
status_text = STATUS_TEXT[result_status]
if status_text in STATUS_SUCCESS:
resid_weights_out = np.asanyarray(map(lambda x: x.solution_value(), x)).astype(np.float64)
else:
resid_weights_out = resid_weights
return resid_weights_out, status_text
def np_simul_integerizer_ortools(
sub_int_weights,
parent_countrol_importance,
parent_relax_ge_upper_bound,
sub_countrol_importance,
sub_float_weights,
sub_resid_weights,
lp_right_hand_side,
parent_hh_constraint_ge_bound,
sub_incidence,
parent_incidence,
total_hh_right_hand_side,
relax_ge_upper_bound,
parent_lp_right_hand_side,
hh_constraint_ge_bound,
parent_resid_weights,
total_hh_sub_control_index,
total_hh_parent_control_index):
"""
ortools-based siuml-integerizer function taking numpy data types and conforming to a
standard function signature that allows it to be swapped interchangeably with alternate
LP implementations.
Parameters
----------
sub_int_weights : numpy.ndarray(sub_zone_count, sample_count) int
parent_countrol_importance : numpy.ndarray(parent_control_count,) float
parent_relax_ge_upper_bound : numpy.ndarray(parent_control_count,) float
sub_countrol_importance : numpy.ndarray(sub_control_count,) float
sub_float_weights : numpy.ndarray(sub_zone_count, sample_count) float
sub_resid_weights : numpy.ndarray(sub_zone_count, sample_count) float
lp_right_hand_side : numpy.ndarray(sub_zone_count, sub_control_count) float
parent_hh_constraint_ge_bound : numpy.ndarray(parent_control_count,) float
sub_incidence : numpy.ndarray(sample_count, sub_control_count) float
parent_incidence : numpy.ndarray(sample_count, parent_control_count) float
total_hh_right_hand_side : numpy.ndarray(sub_zone_count,) float
relax_ge_upper_bound : numpy.ndarray(sub_zone_count, sub_control_count) float
parent_lp_right_hand_side : numpy.ndarray(parent_control_count,) float
hh_constraint_ge_bound : numpy.ndarray(sub_zone_count, sub_control_count) float
parent_resid_weights : numpy.ndarray(sample_count,) float
total_hh_sub_control_index : int
total_hh_parent_control_index : int
Returns
-------
resid_weights_out : numpy.ndarray of float
residual weights in range [0..1] as solved,
or, in case of failure, sub_resid_weights unchanged
status_text : string
STATUS_OPTIMAL, STATUS_FEASIBLE in case of success, or a solver-specific failure status
"""
from ortools.linear_solver import pywraplp
STATUS_TEXT = {
pywraplp.Solver.OPTIMAL: STATUS_OPTIMAL,
pywraplp.Solver.FEASIBLE: STATUS_FEASIBLE,
pywraplp.Solver.INFEASIBLE: 'INFEASIBLE',
pywraplp.Solver.UNBOUNDED: 'UNBOUNDED',
pywraplp.Solver.ABNORMAL: 'ABNORMAL',
pywraplp.Solver.NOT_SOLVED: 'NOT_SOLVED',
}
CBC_TIMEOUT_IN_SECONDS = 60
sample_count, sub_control_count = sub_incidence.shape
_, parent_control_count = parent_incidence.shape
sub_zone_count, _ = sub_float_weights.shape
# setting indexes to -1 prevents creation of hh_controls relaxation variables
# setting hh_control importance to zero eliminates them from the objective function
# the latter approach is used by the cvx version
# total_hh_sub_control_index = -1
sub_countrol_importance[total_hh_sub_control_index] = 0
# FIXME total_hh_parent_control_index should not exist???
if total_hh_parent_control_index > 0:
parent_countrol_importance[total_hh_parent_control_index] = 0
# - Instantiate a mixed-integer solver
solver = pywraplp.Solver('SimulIntegerizeCbc', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
solver.EnableOutput()
solver.set_time_limit(CBC_TIMEOUT_IN_SECONDS * 1000)
# constraints = [
# x >= 0.0,
# x <= x_max,
#
# relax_le >= 0.0,
# relax_le <= lp_right_hand_side,
# relax_ge >= 0.0,
# relax_ge <= relax_ge_upper_bound,
#
# parent_relax_le >= 0.0,
# parent_relax_le <= parent_lp_right_hand_side,
# parent_relax_ge >= 0.0,
# parent_relax_ge <= parent_relax_ge_upper_bound,
# ]
# x_max is 1.0 unless resid_weights is zero, in which case constrain x to 0.0
x_max = (~(sub_float_weights == sub_int_weights)).astype(float)
# - Create resid weight variables
x = {}
for z in range(sub_zone_count):
for hh in range(sample_count):
x[z, hh] = solver.NumVar(0.0, x_max[z, hh], 'x[%s,%s]' % (z, hh))
# - Create positive continuous constraint relaxation variables
relax_le = {}
relax_ge = {}
for z in range(sub_zone_count):
for c in range(sub_control_count):
# no relaxation for total households control
if c == total_hh_sub_control_index:
continue
relax_le[z, c] = \
solver.NumVar(0.0, lp_right_hand_side[z, c], 'relax_le[%s,%s]' % (z, c))
relax_ge[z, c] = \
solver.NumVar(0.0, relax_ge_upper_bound[z, c], 'relax_ge[%s,%s]' % (z, c))
parent_relax_le = {}
parent_relax_ge = {}
for c in range(parent_control_count):
parent_relax_le[c] = \
solver.NumVar(0.0, parent_lp_right_hand_side[c], 'parent_relax_le[%s]' % c)
parent_relax_ge[c] = \
solver.NumVar(0.0, parent_relax_ge_upper_bound[c], 'parent_relax_ge[%s]' % c)
LOG_OVERFLOW = -725
log_resid_weights = np.log(np.maximum(sub_resid_weights, np.exp(LOG_OVERFLOW)))
assert not np.isnan(log_resid_weights).any()
log_parent_resid_weights = \
np.log(np.maximum(parent_resid_weights, np.exp(LOG_OVERFLOW)))
assert not np.isnan(log_parent_resid_weights).any()
# objective = cvx.Maximize(
# cvx.sum_entries(cvx.mul_elemwise(log_resid_weights, cvx.vec(x))) +
# cvx.sum_entries(cvx.mul_elemwise(log_parent_resid_weights, cvx.vec(cvx.sum_entries(x, axis=0)))) - # nopep8
# cvx.sum_entries(relax_le * sub_countrol_importance) -
# cvx.sum_entries(relax_ge * sub_countrol_importance) -
# cvx.sum_entries(cvx.mul_elemwise(parent_countrol_importance, parent_relax_le)) -
# cvx.sum_entries(cvx.mul_elemwise(parent_countrol_importance, parent_relax_ge))
# )
z = solver.Sum(x[z, hh] * log_resid_weights[z, hh]
for z in range(sub_zone_count)
for hh in range(sample_count)) + \
solver.Sum(x[z, hh] * log_parent_resid_weights[hh]
for hh in range(sample_count)
for z in range(sub_zone_count)) - \
solver.Sum(relax_le[z, c] * sub_countrol_importance[c]
for z in range(sub_zone_count)
for c in range(sub_control_count) if c != total_hh_sub_control_index) - \
solver.Sum(relax_ge[z, c] * sub_countrol_importance[c]
for z in range(sub_zone_count)
for c in range(sub_control_count) if c != total_hh_sub_control_index) - \
solver.Sum(parent_relax_le[c] * parent_countrol_importance[c]
for c in range(parent_control_count)) - \
solver.Sum(parent_relax_ge[c] * parent_countrol_importance[c]
for c in range(parent_control_count))
objective = solver.Maximize(z)
# constraints = [
# # - sub inequality constraints
# (x * sub_incidence) - relax_le >= 0,
# (x * sub_incidence) - relax_le <= lp_right_hand_side,
# (x * sub_incidence) + relax_ge >= lp_right_hand_side,
# (x * sub_incidence) + relax_ge <= hh_constraint_ge_bound,
# ]
# - sub inequality constraints
sub_constraint_ge = {}
sub_constraint_le = {}
for z in range(sub_zone_count):
for c in range(sub_control_count):
# don't add inequality constraints for total households control
if c == total_hh_sub_control_index:
continue
sub_constraint_le[z, c] = \
solver.Constraint(0, lp_right_hand_side[z, c])
for hh in range(sample_count):
sub_constraint_le[z, c].SetCoefficient(x[z, hh], sub_incidence[hh, c])
sub_constraint_le[z, c].SetCoefficient(relax_le[z, c], -1.0)
sub_constraint_ge[z, c] = \
solver.Constraint(lp_right_hand_side[z, c], hh_constraint_ge_bound[z, c])
for hh in range(sample_count):
sub_constraint_ge[z, c].SetCoefficient(x[z, hh], sub_incidence[hh, c])
sub_constraint_ge[z, c].SetCoefficient(relax_ge[z, c], 1.0)
# constraints = [
# # - equality constraint for the total households control
# cvx.sum_entries(x, axis=1) == total_hh_right_hand_side,
# ]
# - equality constraint for the total households control
constraint_eq = {}
for z in range(sub_zone_count):
total_hh_constraint = total_hh_right_hand_side[z]
constraint_eq[z] = solver.Constraint(total_hh_constraint, total_hh_constraint)
for hh in range(sample_count):
constraint_eq[z].SetCoefficient(x[z, hh], 1.0)
# constraints = [
# cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) - parent_relax_le >= 0, # nopep8
# cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) - parent_relax_le <= parent_lp_right_hand_side, # nopep8
# | |
G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return w
def func_63949d009f634cc9b27a04766b475d10(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return x
def func_411a896678d040c593e4cf9c842c7c0f(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return cuts
def func_029d683300554fbdb863aaf436793867(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return iU
def func_b551156296d348b9942db45d25761505(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return part
def func_09e8864adb564413ae955a5233a1fee6(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return a
def func_d12690792af543dfba2c871eae37b9b5(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return iL
def func_5e063018afcd4caea44b8d4f77954d69(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return i
def func_3daf6663cafb45f5b51ab05cd40eab19(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
return area
def func_4857f1834f8643a18feb2b3d2c3adfd4(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
while True:
sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0])
sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0])
s = sU - sL
nxL = L[iL + 1][0]
nxU = U[iU + 1][0]
nx = min(nxL, nxU)
na = 2 * w * (nx - x) + s * (nx - x) * (nx - x)
if a + na >= part:
dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s))
x += dx
a += 2 * w * dx + s * dx * dx
cuts.append(x)
w += s * dx
a = 0
else:
dx = nx - x
a += 2 * w * dx + s * dx * dx
x = nx
w += s * dx
if nx == nxL:
iL += 1
if nx == nxU:
iU += 1
if iL >= len(L) - 1:
break
if iU >= len(U) - 1:
break
return iU
def func_9fc3005a9af049b28a9d7f07ea241878(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
while True:
sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0])
sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0])
s = sU - sL
nxL = L[iL + 1][0]
nxU = U[iU + 1][0]
nx = min(nxL, nxU)
na = 2 * w * (nx - x) + s * (nx - x) * (nx - x)
if a + na >= part:
dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s))
x += dx
a += 2 * w * dx + s * dx * dx
cuts.append(x)
w += s * dx
a = 0
else:
dx = nx - x
a += 2 * w * dx + s * dx * dx
x = nx
w += s * dx
if nx == nxL:
iL += 1
if nx == nxU:
iU += 1
if iL >= len(L) - 1:
break
if iU >= len(U) - 1:
break
return nxU
def func_13071893c570466a99ccb020b9adf6c9(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
x = 0
while True:
sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0])
sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0])
s = sU - sL
nxL = L[iL + 1][0]
nxU = U[iU + 1][0]
nx = min(nxL, nxU)
na = 2 * w * (nx - x) + s * (nx - x) * (nx - x)
if a + na >= part:
dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s))
x += dx
a += 2 * w * dx + s * dx * dx
cuts.append(x)
w += s * dx
a = 0
else:
dx = nx - x
a += 2 * w * dx + s * dx * dx
x = nx
w += s * dx
if nx == | |
of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFrom']:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessEnvValueFrom(dict):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
def __init__(__self__, *,
config_map_key_ref: Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromConfigMapKeyRef'] = None,
field_ref: Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromFieldRef'] = None,
resource_field_ref: Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromResourceFieldRef'] = None,
secret_key_ref: Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromSecretKeyRef'] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param 'DatadogAgentSpecAgentProcessEnvValueFromConfigMapKeyRefArgs' config_map_key_ref: Selects a key of a ConfigMap.
:param 'DatadogAgentSpecAgentProcessEnvValueFromFieldRefArgs' field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param 'DatadogAgentSpecAgentProcessEnvValueFromResourceFieldRefArgs' resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param 'DatadogAgentSpecAgentProcessEnvValueFromSecretKeyRefArgs' secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromConfigMapKeyRef']:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromFieldRef']:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
return pulumi.get(self, "field_ref")
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromResourceFieldRef']:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromSecretKeyRef']:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessEnvValueFromConfigMapKeyRef(dict):
"""
Selects a key of a ConfigMap.
"""
def __init__(__self__, *,
key: str,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
Selects a key of a ConfigMap.
:param str key: The key to select.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> str:
"""
The key to select.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessEnvValueFromFieldRef(dict):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
def __init__(__self__, *,
field_path: str,
api_version: Optional[str] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param str field_path: Path of the field to select in the specified API version.
:param str api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> str:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessEnvValueFromResourceFieldRef(dict):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
def __init__(__self__, *,
resource: str,
container_name: Optional[str] = None,
divisor: Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromResourceFieldRefDivisor'] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param str resource: Required: resource to select
:param str container_name: Container name: required for volumes, optional for env vars
:param 'DatadogAgentSpecAgentProcessEnvValueFromResourceFieldRefDivisorArgs' divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> str:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def divisor(self) -> Optional['outputs.DatadogAgentSpecAgentProcessEnvValueFromResourceFieldRefDivisor']:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessEnvValueFromResourceFieldRefDivisor(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessEnvValueFromSecretKeyRef(dict):
"""
Selects a key of a secret in the pod's namespace
"""
def __init__(__self__, *,
key: str,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
Selects a key of a secret in the pod's namespace
:param str key: The key of the secret to select from. Must be a valid secret key.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> str:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessResources(dict):
"""
Datadog Process Agent resource requests and limits Make sure to keep requests and limits equal to keep the pods in the Guaranteed QoS class Ref: http://kubernetes.io/docs/user-guide/compute-resources/
"""
def __init__(__self__, *,
limits: Optional[Mapping[str, 'outputs.DatadogAgentSpecAgentProcessResourcesLimits']] = None,
requests: Optional[Mapping[str, 'outputs.DatadogAgentSpecAgentProcessResourcesRequests']] = None):
"""
Datadog Process Agent resource requests and limits Make sure to keep requests and limits equal to keep the pods in the Guaranteed QoS class Ref: http://kubernetes.io/docs/user-guide/compute-resources/
:param Mapping[str, 'DatadogAgentSpecAgentProcessResourcesLimitsArgs'] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param Mapping[str, 'DatadogAgentSpecAgentProcessResourcesRequestsArgs'] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[Mapping[str, 'outputs.DatadogAgentSpecAgentProcessResourcesLimits']]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@property
@pulumi.getter
def requests(self) -> Optional[Mapping[str, 'outputs.DatadogAgentSpecAgentProcessResourcesRequests']]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessResourcesLimits(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentProcessResourcesRequests(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecAgentRbac(dict):
"""
RBAC | |
chunks if multiple elements of the same type are in one page."""
assert isinstance(js, str), ('Added Javascript should be of type str "%s"' % js)
if name is None or not name in self._jsNames:
self._jsOut.append(js)
if name is not None:
self._jsNames.add(name)
def hasJs(self):
return len(self._jsOut)
def importJs(self, path):
"""Import a chunk of UTF-8 CSS code from the path."""
if os.path.exists(path):
f = codecs.open(path, 'r', 'utf-8')
self.addJs(f.read())
f.close()
else:
self.comment('Cannot find JS file "%s"' % path)
def copyPath(self, path):
"""Collect path of files to copy to the output website."""
self._copyPaths.append(path)
def getJs(self):
"""Answers the flat string of JS."""
return ''.join(self._jsOut)
def writeJs(self, path):
"""Write the collected set of css JS to path."""
try:
f = codecs.open(path, 'w', 'utf-8')
f.write(self.getJs())
f.close()
except IOError:
print('[%s.writeJs] Cannot write JS file "%s"' % (self.__class__.__name__, path))
# C S S
def addCss(self, css):
"""Add the css chunk to self.css, the ordered list of css for output.
Don't write if empty or None."""
if css:
assert isinstance(css, str), ('Added CSS should be of type str "%s"' % css)
self._cssOut.append(css)
def containsCss(self, css):
"""Answer the boolean string if this css already exists in the self._cssOut."""
return bool(css in self.getCss())
def getCss(self):
"""Answers the joined content of sel._cssOut."""
return ''.join(self._cssOut)
def hasCss(self):
"""Answers if there is any accumulated CSS in self._cssOut."""
return len(self._cssOut)
def importCss(self, path):
"""Import a chunk of UTF-8 CSS code from the path."""
if os.path.exists(path):
f = codecs.open(path, 'r', 'utf-8')
self.addCss(f.read())
f.close()
else:
self.comment('Cannot find CSS file "%s"' % path)
def writeCss(self, path, css=None):
"""Write the collected set of CSS chunks to path."""
try:
if css is None:
css = self.getCss()
f = codecs.open(path, 'w', 'utf-8')
f.write(css)
f.close()
except IOError:
print('[HtmlBuilder.writeCss] Cannot write CSS file "%s"' % path)
def headerCss(self, name):
"""Add the CSS code to the header of the output page.
>>> b = HtmlBuilder()
>>> b.headerCss('NameOfCss')
>>> 'Generated by PageBot' in ''.join(b._cssOut)
True
"""
self.addCss(self.SECTION_CSS % ('CSS of "%s"\n\n\tGenerated by PageBot\n\tCreated %s' % (name, now())))
def resetCss(self):
"""Export the CSS to reset specific default behavior of browsers."""
self.addCss(self.RESET_CSS)
def sectionCss(self, title):
"""Add named section marker in CSS output.
TODO: Make optional if compact CSS is needed."""
self.addCss(self.SECTION_CSS % title)
# S A S S
def writeScss(self, path):
"""Write the collect set of SCSS variables to path."""
try:
f = codecs.open(path, 'w', 'utf-8')
for scssId, value in sorted(self._scssVariables.items()):
f.write('$%s: %s;\n' % (scssId, value))
f.close()
except IOError:
print('[HtmlBuilder.writeSass] Cannot write SCSS file "%s"' % path)
def compileScss(self, scssPath, cssPath=None, compressed=True):
"""For now using sass to support SCSS. SASS support could be added later."""
if cssPath is None:
cssPath = scssPath.replace('.scss', '.css')
css = sass.compile(filename=scssPath)#, output_style={True:sass.SASS_STYLE_COMPRESSED}.get(compressed))
f = codecs.open(cssPath, 'w', 'utf-8')
f.write(css)
f.close()
def build_scss(self, e, view):
scss = self._scssVariables
if e.cssId: # If the #id is defined, then use that as CSS reference.
scssId = e.cssId
elif e.cssClass: # Otherwise for now, we only can generate CSS if the element has a class name defined.
scssId = e.__class__.__name__ + '-' + e.cssClass.replace(' ','_')
else:
scssId = e.__class__.__name__
if upt(e.ml):
scss[scssId+'-margin-left'] = e.ml
if upt(e.mt):
scss[scssId+'-margin-top'] = e.mt
if upt(e.mb):
scss[scssId+'-margin-bottom'] = e.mb
if upt(e.mr):
scss[scssId+'-margin-right'] = e.mr
if upt(e.pl):
scss[scssId+'-padding-left'] = e.ml
if upt(e.pt):
scss[scssId+'-padding-top'] = e.pt
if upt(e.pb):
scss[scssId+'-padding-bottom'] = e.pb
if upt(e.pr):
scss[scssId+'-padding-right'] = e.pr
if e.css('font') is not None:
font = e.css('font')
if not hasattr(font, 'path'): # In case it is not a PageBot Font instance.
font = findFont(font, default=DEFAULT_FONT)
assert font is not None
scss[scssId+'-font-family'] = '"%s"' % font.info.fullName
if e.css('fontSize') is not None:
scss[scssId+'-font-size'] = e.css('fontSize')
if e.css('fontStyle') is not None:
scss[scssId+'-font-style'] = e.css('fontStyle')
if e.css('fontWeight') is not None:
scss[scssId+'-font-weight'] = e.css('fontWeight')
if e.css('tracking') is not None:
scss[scssId+'-letter-spacing'] = e.css('tracking')
if e.css('fill') not in (noColor, None): # Must Color instance
scss[scssId+'-background-color'] = e.css('fill').css
if e.css('textFill') not in (noColor, None): # Must be Color instance
scss[scssId+'-color'] = e.css('textFill').css
def build_css(self, scss, selector=None, e=None, message=None):
"""Build the CSS output for the defined selector and style."""
css = ''
attributes = []
if e:
style = e.style
if upt(e.ml):
attributes.append('margin-left: %s;' % e.ml)
if upt(e.mt):
attributes.append('margin-top: %s;' % e.mt)
if upt(e.mb):
attributes.append('margin-bottom: %s;' % e.mb)
if upt(e.mr):
attributes.append('margin-right: %s;' % e.mr)
if upt(e.pl):
attributes.append('padding-left: %s;' % e.pl)
if upt(e.pt):
attributes.append('padding-top: %s;' % e.pt)
if upt(e.pb):
attributes.append('padding-bottom: %s;' % e.pb)
if upt(e.pr):
attributes.append('padding-right: %s;' % e.pr)
if style.get('font') is not None:
attributes.append('font-family: "%s";' % style['font'])
if style.get('fontSize') is not None:
attributes.append('font-size: %s;' % style['fontSize'])
if style.get('fontStyle') is not None:
attributes.append('font-style: %s;' % style['fontStyle'])
if style.get('fontWeight') is not None:
attributes.append('font-weight: %s;' % style['fontWeight'])
if style.get('tracking') is not None:
attributes.append('letter-spacing: %s;' % style['tracking'])
if style.get('fill') not in (noColor, None): # Must Color instance
attributes.append('background-color: %s;' % style['fill'].css)
if style.get('textFill') not in (noColor, None): # Must be Color instance
attributes.append('color: %s;' % style['textFill'].css)
value = style.get('transition')
if value is not None:
attributes.append('transition=%s;' % value)
attributes.append('-webkit-transition=%s;' % value)
attributes.append('-moz-transition=%s;' % value)
attributes.append('-o-transition=%s;' % value)
if selector is not None and attributes:
css += '%s {\n\t%s} ' % (selector, '\n\t'.join(attributes))
if message is not None:
css += '/* %s */' % message
css += '\n'
self.addCss(css)
b = HtmlBuilder()
# Write all collected cSS vatiables into one file
b.writeScss(self.DEFAULT_CSS_PATH)
# H T M L
def addHtml(self, html):
"""Add the HTML chunk to self.html, the ordered list of HTML for
output. Test if the html is a plain string or of type
HtmlString(BabelString). Otherwise raise an error, because we don't
want to support BabelString conversion. They should have been created
of the right type in the context from the start."""
#if not isinstance(html, str): # It's something else, test on the kind of BabelString.
# assert isinstance(html, HtmlString)
try:
assert isinstance(html.s, str)
html = html.s # Get the collected html from the BabelString.
except AttributeError:
html = str(html) # Make sure it is a string
self._htmlOut.append(html)
write = addHtml
def importHtml(self, path):
"""Import a chunk of UTF-8 HTML code from the path."""
if os.path.exists(path):
f = codecs.open(path, 'r', 'utf-8')
self.addHtml(f.read())
f.close()
else:
self.comment('Cannot find HTML file "%s"' % path)
def writeHtml(self, path):
"""Write the collected set of html chunks to path."""
try:
f = codecs.open(path, 'w', 'utf-8')
f.write(self.getHtml())
f.close()
except IOError:
print('### Cannot write HTML file "%s"' % path)
def getHtml(self):
"""Answers the accumulated HTML as single string."""
return ''.join(self._htmlOut)
def clearHtml(self):
"""Clear the output stream, as should be done after each page export.
This is likely to be done by Page elements, starting to render a new page.
The content of previous pages then should be cleared."""
self._htmlOut = []
def clearCss(self):
"""We can safely clear the CSS, because the local CSS is not intended
to collect all for the entire site. THis is just for local additions in
the page. This is likely to be done by Page elements, starting to
render a new page. The content of previous pages then should be
cleared."""
self._cssOut = []
def docType(self, s=None):
self.write('<!DOCTYPE %s>\n' % (s or 'html'))
def html(self, xmlns=None, **args):
"""
<www href="http://www.w3schools.com/tags/tag_html.asp" target="external"/>
self.html(xmlns="http://www.w3.org/1999/xhtml", dir=ltr, lang=no, xmllang=no)
...
self._html()
Default value for xmlns is "http://www.w3.org/1999/xhtml".
>>> b = HtmlBuilder()
>>> b.compact = True
>>> b.html()
>>> b._html()
>>> b.getHtml()
'<html xmlns="http://www.w3.org/1999/xhtml"></html>'
"""
self.write('<html xmlns="%s"' % (xmlns or 'http://www.w3.org/1999/xhtml'))
self.getandwrite_attributes('html', args)
self.write('>')
self.tabIn()
# Push as last, so we can see the current tag on the stack
self._pushTag('html')
def _html(self):
self._closeTag('html')
def head(self, **args):
"""The head element can contain information about the document. The
browser does not display the "head information" to the user. The
following tags can be in the head section: base, link, meta, script,
style and title.
<www href="http://www.w3schools.com/tags/tag_head.asp" target="external"/>
self.head()
...
self._head()
"""
self.tabs()
self.write('<head')
self.getandwrite_attributes('head', args)
self.write('>')
# Push as last, so we can see the current tag on the stack
self._pushTag('head')
def _head(self):
self._closeTag('head')
def title(self):
"""This tag defines the title of the document.
<www href="http://www.w3schools.com/tags/tag_title.asp" target="external"/>
self.title()
...
self._title()
>>> b = | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import chart_studio.plotly as py
import plotly.graph_objs as go
import requests
import pandas as pd
import json
import datetime as dt
import flask
import os
import pymongo
from sshtunnel import SSHTunnelForwarder
from plotly.subplots import make_subplots
from newsapi import NewsApiClient
from app.currentsentiment import get_score
from data.prediction_UI import get_prediction
from textwrap import dedent as d
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from plotly import tools
from datetime import datetime, timedelta
__author__ = "<NAME>"
__status__ = "Production"
aboutus = """With Sentrade you can explore the correlation between financial data and sentiment analysis.
Once a ticker is selected, you will see its recent financial data as well as a sentiment analysis score based on the latest news.
We use this sentiment score to predict the stock movement for the current day. To do this, we trained a machine learning model on historical Tweets. You can explore our historical data by clicking on the graph. You will be able to see the financial data, sentiment score and relevant tweets from the selected day.
This is a temporary website built by 6 MSc students from Imperial College London and it will be removed in June 2020.
SenTrade makes no express or implied warranty that the website will be updated timely: please do not use it for trading purposes. SenTrade will not be liable for any damages or losses caused by the use of information provided.
If you are interested in the raw sentiment data to do your own analysis, call REST API via
http://api-sentrade.doc.ic.ac.uk/data/<company_name>
where company name can be chosen from: apple, amazon, facebook, google, microsoft, netflix, tesla, uber.
"""
easter_egg_message = """
‘三’百年前,将军前去战场,战事胶着,将军心急如焚,提脚
‘踹’向地面,忽见地面裂开一条细缝,金光闪烁,掘地一尺有余
‘得’宝剑一柄,后将军奋勇杀敌,所向披靡,战事得胜,将军获封为封疆大吏
后世人们为了纪念这个故事,将此事编为歌谣,传颂至今。歌名唤作‘三踹得’
"""
company_db_name = {
"AMZN" : "amazon",
"AAPL" : "apple",
"FB" : "facebook",
"GOOG" : "google",
"MSFT" : "microsoft",
"NFLX" : "netflix",
"TSLA" : "tesla",
"UBER" : "uber"
}
def Topbar(ticker):
"""
Returns the topbar.
:param ticker: string of the ticker
:return topbar: hmtl.Div
"""
if not ticker:
return
companies = {
"AMZN" : "Amazon.com Inc.",
"AAPL" : "Apple Inc.",
"FB" : "Facebook Inc.",
"GOOG" : "Alphabet Inc.",
"MSFT" : "Microsoft Corporation",
"NFLX" : "Netflix Inc.",
"TSLA" : "Tesla Inc.",
"UBER" : "Uber Technologies Inc."
}
db_client = pymongo.MongoClient(os.environ["CLIENT_ADDR"])
stock_price_db = db_client.stock_data
records = stock_price_db[company_db_name[ticker]].find().sort([("$natural", -1)]).limit(1)
for record in records:
price = record['close']
gain = price - record['open']
gain_style = {
'margin-left':'7px',
'margin-top':'5px',
'margin-right':'14px',
'width':'auto'}
if gain <= 0:
gain = "{:.2f}".format(gain)
gain_style['color'] = 'red'
else:
gain = "{:.2f}".format(gain)
gain = "+" + gain
gain_style['color'] = 'green'
prediction_string, prediction_colour = Prediction(ticker)
topbar = html.Div([
html.H3(ticker,
style={
'font-family':'sans-serif',
'font-weight':'700',
'font-size':'2.3rem',
'margin-top':'10px',
'margin-left': '24.5px',
'letter-spacing':'0.5px',
'width':'auto'
}),
html.H6(companies[ticker],
style={
'margin-top':'33px',
'margin-left':'10px',
'color':'grey',
'font-weight': '600',
'width':'auto'
}),
html.Div([
html.Div([
html.P(price,
style={
'font-family':'sans-serif',
'margin-top':'2px',
'font-size':'1.2em',
'font-weight': '900',
'width':'auto',
'color':'black',
'margin-bottom':'0'
}),
html.P("At Close",
style={
'font-family':'sans-serif',
'color':'#737373',
'font-size':'8pt',
'font-weight':'500',
'margin-top':'0',
})
]),
html.Div([
html.P(gain,
style=gain_style),
])],
style={
'display':'flex',
'margin-top':'6px',
'right':'460px',
'position':'absolute',
'border-right':'1px solid #e5e5e5',
'height':'45px'
}),
html.Div([
html.P(prediction_string,
style={
'font-family':'sans-serif',
'font-weight':'500',
'color':'#4F594F'
}
)],
style={
'height':'40px',
'line-height':'40px',
'margin-top':'9px',
'right':'115px',
'position':'absolute',
'width':'200px',
'text-align':'center',
'border-radius':'5px',
'background' : prediction_colour
})
],
style={
'height': '60px',
'border-bottom':'1px solid #e5e5e5',
'display':'flex'
})
return topbar
instruction = html.H6(
"Select a ticker",
id= 'instruction'
)
def collect_stock_data(db,company,close,date):
"""
Collects stock data from database and populates corresponding arrays with data.
:param db: the database
:param company: the company
:param close: the array of close data that needs to be populated
:param date: the array of dates that needs to be populated
"""
for record in db[company].find().sort("date"):
close.append(record["close"])
date.append(record["date"])
def collect_sentiment_data(db,company,bert,blob,date):
"""
Collects sentiment data from database and populates corresponding arrays with data.
:param db: the database
:param company: the company
:param bert: the array of bert sentiment data that needs to be populated
:param blob: the array of blob sentiment data that needs to be populated
:param date: the array of dates that needs to be populated
"""
for record in db[company].find({"7_day_sentiment_score":{"$exists":True}}).sort("date"):
if record["7_day_sentiment_score"] != 0:
blob.append(record["7_day_sentiment_score"])
date.append(record["date"])
for record in db[company].find({"7_day_bert_sentiment_score":{"$exists":True}}).sort("date"):
if record["7_day_bert_sentiment_score"] != 0:
bert.append(record["7_day_bert_sentiment_score"])
for record in db[company].find({"sentiment_current":{"$exists":True}}).sort("date"):
if record["sentiment_current"]:
blob.append(record["sentiment_current"])
date.append(record["date"])
def Graph(ticker):
"""
Returns the graph figure.
:param ticker: string of the ticker
:return fig: plotly.graph_object figure
"""
db_client = pymongo.MongoClient(os.environ["CLIENT_ADDR"])
if not ticker:
ticker = "AAPL"
stock_price_db = db_client.stock_data
sentiment_db = db_client.sentiment_data
close = []
stock_date = []
collect_stock_data(stock_price_db,company_db_name[ticker],close,stock_date)
bert_polarity = []
blob_polarity = []
sent_date = []
collect_sentiment_data(sentiment_db,company_db_name[ticker],bert_polarity,blob_polarity,sent_date)
sentiment = []
for i in range(len(bert_polarity)):
bert = bert_polarity[i]
bert *= 100
bert_polarity[i] = bert
blob = blob_polarity[i] + 1
blob /= 2
blob *= 100
blob_polarity[i] = blob
score = bert + blob
score /= 2
sentiment.append(score)
records = stock_price_db[company_db_name[ticker]].find().sort([("$natural", -1)]).limit(1)
for record in records:
price = record['close']
gain = price - record['open']
stock_color = 'rgb(57,126,46)'
if gain <= 0:
stock_color = 'rgb(204,36,34)'
eth_close = go.Scatter(
y = close,
x = stock_date,
name= "Close",
mode = "lines",
line=dict(color=stock_color)
)
eth_polarity = go.Scatter(
y = sentiment,
x = sent_date,
name = "Sentiment",
mode = "lines",
line=dict(color="rgba(111,192,245,0.8)")
)
fig = make_subplots(specs=[[{"secondary_y":True}]])
fig.add_trace(eth_close,secondary_y=False)
fig.add_trace(eth_polarity,secondary_y=True)
fig.update_layout(
margin= {'b': 0, 'r': 10, 'l': 60, 't': 0},
legend= {'x': 0.35,'y':-0.1},
xaxis=go.layout.XAxis(
rangeslider=dict(
visible=False
),
range= ["2018-11-01","2019-09-30"],
rangeselector=dict(
buttons=list([
dict(count=1,
label=" 1D ",
step="day",
stepmode="backward"),
dict(count=7,
label=" 1W ",
step="day",
stepmode="backward"),
dict(count=1,
label=" 1M ",
step="month",
stepmode="backward"),
dict(count=3,
label=" 3M ",
step="month",
stepmode="backward"),
dict(count=6,
label=" 6M ",
step="month",
stepmode="backward"
),
dict(count=1,
label=" 1Y ",
step="year",
stepmode="backward"),
dict(label=' ALL ',
step="all")
]),
x=0.05,
y=1.01,
font=dict(
family="sans-serif",
size=15,
color="#828282"),
bgcolor='#f5f5f5',
activecolor='#dbdbdb'
),
type="date"
),
legend_orientation="h"
)
return fig
def Prediction(ticker):
"""
Returns the prediction of the stock movement.
:param ticker: string of the ticker
:return prediction: the prediction
:return colour: the colour the prediction needs to be displayed in
"""
db_client = pymongo.MongoClient(os.environ["CLIENT_ADDR"])
stock_price_db = db_client.stock_data
records = stock_price_db[company_db_name[ticker]].find().sort([("$natural", -1)]).limit(1)
for record in records:
date = record["date"]
prediction = get_prediction(company_db_name[ticker],date)
# if (ticker == "AAPL"):
# prediction = 1
# else:
# prediction = 0
if (prediction == -5):
colour = "#f2f2f2"
string = "Prediction not available"
if (prediction == 0):
colour = "#f2f2f2"
string = "Stable"
if (prediction == 1):
colour = "rgba(3, 164, 3, 0.5)"
string = "Rise up to 5%"
if (prediction == 2):
colour = "rgba(3, 164, 3, 0.5)"
string = "Rise over 5%"
if (prediction == -1):
colour = "rgba(164, 19, 3,0.5)"
string = "Fall up to 5%"
if (prediction == -2):
colour = "rgba(164, 19, 3,0.5)"
string = "Fall over 5%"
return string, colour
def Tweets(ticker, graphDate,default=False):
"""
Returns the tweets section.
:param ticker: string of the ticker
:param graphDate: the date of the tweets
:param default: default flag, True if section is in default
:return news: html.Div
"""
db_client = pymongo.MongoClient(os.environ["CLIENT_ADDR"])
if not ticker:
news = html.H3(
"",
style={
'margin-top':'0px',
'textAlign':'center',
'color':'#9C9C9C'
}
)
else:
if default:
api = NewsApiClient(api_key='954c05db19404ee99531875f66d9d138')
three_days_ago = datetime.strptime(graphDate,'%Y-%m-%d') - timedelta(days=3)
all_articles = api.get_everything(q=company_db_name[ticker],
sources='bloomberg,business-insider,financial-post,fortune,recode,reuters,techcrunch,techradar,the-verge',
from_param=three_days_ago,
to=graphDate,
language='en',
sort_by='relevancy',
page=2)
articles = []
links = []
for article in all_articles["articles"]:
articles.append(article["title"])
links.append(article["url"])
scores = []
for title in articles:
scores.append(get_score(title))
news = html.Div(
children = [
html.Div(
html.Img(src='assets/news-logo.png',
style={'height':'50px',
'margin-left':'30%'})),
html.Div(
className = "table-news",
children = [
html.Div(
children = [
html.Div(
children = [
html.A(
className = "td-link",
children = articles[i],
href = links[i],
target = "_blank",
)
],
style={
'height':'auto',
'width':'auto',
'font-size' : '0.8rem',
'font-family' : 'sans-serif',
'margin-left':'10px',
'margin-right':'10px',
'line-height':'20px'
}
)
],
style=tweetstyle(scores,i,True)
)
for i in range(len(articles))
],
style={
'margin-left' :'3%',
'margin-right': '3%',
'margin-bottom': '3%'
}
)
],
style={
'background-color':'#f2f2f2',
'border-radius':'5px',
'margin-right' : '5.5%',
'overflow':'scroll',
'display':'block',
'margin-top' : '2%',
'height':'570px'
}
)
return news
db = db_client.twitter_data[company_db_name[ticker]]
dates = db.distinct("date")
if graphDate in dates:
tweets = []
tweets_polarity = []
for record in db.aggregate([
{ "$match" : {"date" : graphDate}},
{ "$sample" : {"size" : 100 } }
]):
if record["original_text"] not in tweets:
tweets.append(record["original_text"])
tweets_polarity.append(record["polarity"])
else:
tweets = ["No Tweets."]
tweets_polarity = ["None"]
news = html.Div(
children = [
html.Div(
html.Img(src='./assets/Twitter_Logo_Blue.png',
style={'height':'50px',
'margin-left':'43%'})),
html.Div(
className = "table-news",
children = [
html.Div(
children = [
html.Div(
children = [
html.A(
className = | |
<gh_stars>0
"""Data extraction from raw FPGA output
Complete FPGA data extraction depends on Bpod extraction
"""
from collections import OrderedDict
import logging
from pathlib import Path, PureWindowsPath
import uuid
import matplotlib.pyplot as plt
import numpy as np
from pkg_resources import parse_version
import alf.io
from brainbox.core import Bunch
import ibllib.dsp as dsp
import ibllib.exceptions as err
from ibllib.io import raw_data_loaders, spikeglx
from ibllib.io.extractors import biased_trials, training_trials
import ibllib.io.extractors.base as extractors_base
from ibllib.io.extractors.training_wheel import extract_wheel_moves
import ibllib.plots as plots
_logger = logging.getLogger('ibllib')
SYNC_BATCH_SIZE_SECS = 100 # number of samples to read at once in bin file for sync
WHEEL_RADIUS_CM = 1 # stay in radians
WHEEL_TICKS = 1024
BPOD_FPGA_DRIFT_THRESHOLD_PPM = 150 # throws an error if bpod to fpga clock drift is higher
F2TTL_THRESH = 0.01 # consecutive pulses with less than this threshold ignored
CHMAPS = {'3A':
{'ap':
{'left_camera': 2,
'right_camera': 3,
'body_camera': 4,
'bpod': 7,
'frame2ttl': 12,
'rotary_encoder_0': 13,
'rotary_encoder_1': 14,
'audio': 15
}
},
'3B':
{'nidq':
{'left_camera': 0,
'right_camera': 1,
'body_camera': 2,
'imec_sync': 3,
'frame2ttl': 4,
'rotary_encoder_0': 5,
'rotary_encoder_1': 6,
'audio': 7,
'bpod': 16,
'laser': 17,
'laser_ttl': 18},
'ap':
{'imec_sync': 6}
},
}
def get_ibl_sync_map(ef, version):
"""
Gets default channel map for the version/binary file type combination
:param ef: ibllib.io.spikeglx.glob_ephys_file dictionary with field 'ap' or 'nidq'
:return: channel map dictionary
"""
if version == '3A':
default_chmap = CHMAPS['3A']['ap']
elif version == '3B':
if ef.get('nidq', None):
default_chmap = CHMAPS['3B']['nidq']
elif ef.get('ap', None):
default_chmap = CHMAPS['3B']['ap']
return spikeglx.get_sync_map(ef['path']) or default_chmap
def _sync_to_alf(raw_ephys_apfile, output_path=None, save=False, parts=''):
"""
Extracts sync.times, sync.channels and sync.polarities from binary ephys dataset
:param raw_ephys_apfile: bin file containing ephys data or spike
:param output_path: output directory
:param save: bool write to disk only if True
:param parts: string or list of strings that will be appended to the filename before extension
:return:
"""
# handles input argument: support ibllib.io.spikeglx.Reader, str and pathlib.Path
if isinstance(raw_ephys_apfile, spikeglx.Reader):
sr = raw_ephys_apfile
else:
raw_ephys_apfile = Path(raw_ephys_apfile)
sr = spikeglx.Reader(raw_ephys_apfile)
# if no output, need a temp folder to swap for big files
if not output_path:
output_path = raw_ephys_apfile.parent
file_ftcp = Path(output_path).joinpath(f'fronts_times_channel_polarity{str(uuid.uuid4())}.bin')
# loop over chunks of the raw ephys file
wg = dsp.WindowGenerator(sr.ns, int(SYNC_BATCH_SIZE_SECS * sr.fs), overlap=1)
fid_ftcp = open(file_ftcp, 'wb')
for sl in wg.slice:
ss = sr.read_sync(sl)
ind, fronts = dsp.fronts(ss, axis=0)
# a = sr.read_sync_analog(sl)
sav = np.c_[(ind[0, :] + sl.start) / sr.fs, ind[1, :], fronts.astype(np.double)]
sav.tofile(fid_ftcp)
# print progress
wg.print_progress()
# close temp file, read from it and delete
fid_ftcp.close()
tim_chan_pol = np.fromfile(str(file_ftcp))
tim_chan_pol = tim_chan_pol.reshape((int(tim_chan_pol.size / 3), 3))
file_ftcp.unlink()
sync = {'times': tim_chan_pol[:, 0],
'channels': tim_chan_pol[:, 1],
'polarities': tim_chan_pol[:, 2]}
if save:
out_files = alf.io.save_object_npy(output_path, sync, '_spikeglx_sync', parts=parts)
return Bunch(sync), out_files
else:
return Bunch(sync)
def _assign_events_bpod(bpod_t, bpod_polarities, ignore_first_valve=True):
"""
From detected fronts on the bpod sync traces, outputs the synchronisation events
related to trial start and valve opening
:param bpod_t: numpy vector containing times of fronts
:param bpod_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
:param ignore_first_valve (True): removes detected valve events at indices le 2
:return: numpy arrays of times t_trial_start, t_valve_open and t_iti_in
"""
TRIAL_START_TTL_LEN = 2.33e-4
ITI_TTL_LEN = 0.4
# make sure that there are no 2 consecutive fall or consecutive rise events
assert(np.all(np.abs(np.diff(bpod_polarities)) == 2))
if bpod_polarities[0] == -1:
bpod_t = np.delete(bpod_t, 0)
# take only even time differences: ie. from rising to falling fronts
dt = np.diff(bpod_t)[::2]
# detect start trials event assuming length is 0.23 ms except the first trial
i_trial_start = np.r_[0, np.where(dt <= TRIAL_START_TTL_LEN)[0] * 2]
t_trial_start = bpod_t[i_trial_start]
# the last trial is a dud and should be removed
t_trial_start = t_trial_start[:-1]
# valve open events are between 50ms to 300 ms
i_valve_open = np.where(np.logical_and(dt > TRIAL_START_TTL_LEN,
dt < ITI_TTL_LEN))[0] * 2
if ignore_first_valve:
i_valve_open = np.delete(i_valve_open, np.where(i_valve_open < 2))
t_valve_open = bpod_t[i_valve_open]
# ITI events are above 400 ms
i_iti_in = np.where(dt > ITI_TTL_LEN)[0] * 2
i_iti_in = np.delete(i_iti_in, np.where(i_valve_open < 2))
t_iti_in = bpod_t[i_iti_in]
## some debug plots when needed
# import matplotlib.pyplot as plt
# import ibllib.plots as plots
# events = {'id': np.zeros(bpod_t.shape), 't': bpod_t, 'p': bpod_polarities}
# events['id'][i_trial_start] = 1
# events['id'][i_valve_open] = 2
# events['id'][i_iti_in] = 3
# i_abnormal = np.where(np.diff(events['id'][bpod_polarities != -1]) == 0)
# t_abnormal = events['t'][bpod_polarities != -1][i_abnormal]
# assert(np.all(events != 0))
# plt.figure()
# plots.squares(bpod_t, bpod_polarities)
# plots.vertical_lines(t_trial_start, ymin=-0.2, ymax=1.1, linewidth=0.5)
# plots.vertical_lines(t_valve_open, ymin=-0.2, ymax=1.1, linewidth=0.5)
# plots.vertical_lines(t_iti_in, ymin=-0.2, ymax=1.1, linewidth=0.5)
# plt.plot(t_abnormal, t_abnormal * 0 + .5, 'k*')
# plt.legend(['raw fronts', 'trial start', 'valve open', 'iti_in'])
return t_trial_start, t_valve_open, t_iti_in
def _rotary_encoder_positions_from_fronts(ta, pa, tb, pb, ticks=WHEEL_TICKS, radius=1,
coding='x4'):
"""
Extracts the rotary encoder absolute position as function of time from fronts detected
on the 2 channels. Outputs in units of radius parameters, by default radians
Coding options detailed here: http://www.ni.com/tutorial/7109/pt/
Here output is clockwise from subject perspective
:param ta: time of fronts on channel A
:param pa: polarity of fronts on channel A
:param tb: time of fronts on channel B
:param pb: polarity of fronts on channel B
:param ticks: number of ticks corresponding to a full revolution (1024 for IBL rotary encoder)
:param radius: radius of the wheel. Defaults to 1 for an output in radians
:param coding: x1, x2 or x4 coding (IBL default is x4)
:return: indices vector (ta) and position vector
"""
if coding == 'x1':
ia = np.searchsorted(tb, ta[pa == 1])
ia = ia[ia < ta.size]
ia = ia[pa[ia] == 1]
ib = np.searchsorted(ta, tb[pb == 1])
ib = ib[ib < tb.size]
ib = ib[pb[ib] == 1]
t = np.r_[ta[ia], tb[ib]]
p = np.r_[ia * 0 + 1, ib * 0 - 1]
ordre = np.argsort(t)
t = t[ordre]
p = p[ordre]
p = np.cumsum(p) / ticks * np.pi * 2 * radius
return t, p
elif coding == 'x2':
p = pb[np.searchsorted(tb, ta) - 1] * pa
p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 2
return ta, p
elif coding == 'x4':
p = np.r_[pb[np.searchsorted(tb, ta) - 1] * pa, -pa[np.searchsorted(ta, tb) - 1] * pb]
t = np.r_[ta, tb]
ordre = np.argsort(t)
t = t[ordre]
p = p[ordre]
p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 4
return t, p
def _assign_events_audio(audio_t, audio_polarities, return_indices=False):
"""
From detected fronts on the audio sync traces, outputs the synchronisation events
related to tone in
:param audio_t: numpy vector containing times of fronts
:param audio_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
:param return_indices (False): returns indices of tones
:return: numpy arrays t_ready_tone_in, t_error_tone_in
:return: numpy arrays ind_ready_tone_in, ind_error_tone_in if return_indices=True
"""
# make sure that there are no 2 consecutive fall or consecutive rise events
assert(np.all(np.abs(np.diff(audio_polarities)) == 2))
# take only even time differences: ie. from rising to falling fronts
dt = np.diff(audio_t)[::2]
# detect ready tone by length below 110 ms
i_ready_tone_in = np.r_[np.where(dt <= 0.11)[0] * 2]
t_ready_tone_in = audio_t[i_ready_tone_in]
# error tones are events lasting from 400ms to 600ms
i_error_tone_in = np.where(np.logical_and(0.4 < dt, dt < 1.2))[0] * 2
t_error_tone_in = audio_t[i_error_tone_in]
if return_indices:
return t_ready_tone_in, t_error_tone_in, i_ready_tone_in, i_error_tone_in
else:
return t_ready_tone_in, t_error_tone_in
def _assign_events_to_trial(t_trial_start, t_event, take='last'):
"""
Assign events to a trial given trial start times and event times.
Trials without an event
result in nan value in output time vector.
The output has a consistent size with t_trial_start and ready to output to alf.
:param t_trial_start: numpy vector of trial start times
:param t_event: numpy vector of event times to assign to trials
:param take: 'last' or 'first' (optional, default 'last'): index to take in case of duplicates
:return: numpy array of event times with the same shape of trial start.
"""
# make sure the events are sorted
try:
assert(np.all(np.diff(t_trial_start) >= 0))
except AssertionError:
raise ValueError('Trial starts vector not sorted')
try:
assert(np.all(np.diff(t_event) >= 0))
except AssertionError:
raise ValueError('Events vector is not sorted')
# remove events that happened before the first trial start
t_event = t_event[t_event >= t_trial_start[0]]
ind = np.searchsorted(t_trial_start, t_event) - 1
t_event_nans = np.zeros_like(t_trial_start) * np.nan
# select first or last | |
[deg C] (HH2015 used 1.5 degC +/- 1 degC)
frontalablation_k = 2 # frontal ablation rate [yr-1]
af = 0.7 # Bulk flow parameter for frontal ablation (m^-0.5)
# Calving width dictionary to override RGI elevation bins, which can be highly inaccurate at the calving front
width_calving_dict_fullfn = main_directory + '/../Calving_data/calvingfront_widths.csv'
width_calving_df = pd.read_csv(width_calving_dict_fullfn)
width_calving_dict = dict(zip(width_calving_df.RGIId, width_calving_df.front_width_m))
# Calving option (1=values from HH2015, 2=calibrate glaciers independently and use transfer fxns for others)
option_frontalablation_k = 1
# Calving parameter dictionary (according to Supplementary Table 3 in HH2015)
frontalablation_k0dict_fullfn = main_directory + '/../Calving_data/frontalablation_k0_dict.csv'
frontalablation_k0dict_df = pd.read_csv(frontalablation_k0dict_fullfn)
frontalablation_k0dict = dict(zip(frontalablation_k0dict_df.O1Region, frontalablation_k0dict_df.k0))
# Model parameter column names and filepaths
modelparams_colnames = ['lrgcm', 'lrglac', 'precfactor', 'precgrad', 'ddfsnow', 'ddfice', 'tempsnow', 'tempchange']
# Model parameter filepath
modelparams_fp = output_filepath + 'cal_opt' + str(option_calibration) + '/'
#modelparams_fp = output_filepath + 'cal_opt2_spc_20190806/'
#%% CLIMATE DATA
# ERA-INTERIM (Reference data)
# Variable names
era_varnames = ['temperature', 'precipitation', 'geopotential', 'temperature_pressurelevels']
# Note: do not change variable names as these are set to run with the download_erainterim_data.py script.
# If option 2 is being used to calculate the lapse rates, then the pressure level data is unnecessary.
# Dates
eraint_start_date = '19790101'
eraint_end_date = '20180501'
# Resolution
grid_res = '0.5/0.5'
# Bounding box (N/W/S/E)
#bounding_box = '90/0/-90/360'
bounding_box = '50/70/25/105'
# Lapse rate option
# option 0 - lapse rates are constant defined by input
# option 1 (default) - lapse rates derived from gcm pressure level temperature data (varies spatially and temporally)
# option 2 - lapse rates derived from surrounding pixels (varies spatially and temporally)
# Note: Be careful with option 2 as the ocean vs land/glacier temperatures can causeƒ unrealistic inversions
# This is the option used by Marzeion et al. (2012)
option_lr_method = 1
# ERA5
era5_fp = '/Volumes/PyGEM_data/ERA5/'
era5_temp_fn = 'ERA5_temp_monthly.nc'
era5_tempstd_fn = 'ERA5_tempstd_monthly.nc'
era5_prec_fn = 'ERA5_totalprecip_monthly.nc'
era5_elev_fn = 'ERA5_geopotential_monthly.nc'
era5_pressureleveltemp_fn = 'ERA5_pressureleveltemp_monthly.nc'
era5_lr_fn = 'ERA5_lapserates_monthly.nc'
# ERA-Interim
eraint_fp = main_directory + '/../Climate_data/ERA_Interim/download/'
eraint_temp_fn = 'ERAInterim_Temp2m_DailyMeanMonthly_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_prec_fn = 'ERAInterim_TotalPrec_DailyMeanMonthly_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_elev_fn = 'ERAInterim_geopotential.nc'
eraint_pressureleveltemp_fn = 'ERAInterim_pressureleveltemp_' + eraint_start_date + '_' + eraint_end_date + '.nc'
eraint_lr_fn = ('ERAInterim_lapserates_' + eraint_start_date + '_' + eraint_end_date + '_opt' + str(option_lr_method) +
'_world.nc')
# CMIP5 (GCM data)
cmip5_fp_var_prefix = main_directory + '/../Climate_data/cmip5/'
cmip5_fp_var_ending = '_r1i1p1_monNG/'
cmip5_fp_fx_prefix = main_directory + '/../Climate_data/cmip5/'
cmip5_fp_fx_ending = '_r0i0p0_fx/'
cmip5_fp_lr = main_directory + '/../Climate_data/cmip5/bias_adjusted_1995_2100/2018_0524/'
cmip5_lr_fn = 'biasadj_mon_lravg_1995_2015_R15.csv'
# COAWST (High-resolution climate data over HMA)
coawst_fp_unmerged = main_directory + '/../Climate_data/coawst/Monthly/'
coawst_fp = main_directory + '/../Climate_data/coawst/'
coawst_fn_prefix_d02 = 'wrfout_d02_Monthly_'
coawst_fn_prefix_d01 = 'wrfout_d01_Monthly_'
coawst_temp_fn_d02 = 'wrfout_d02_Monthly_T2_1999100100-2006123123.nc'
coawst_prec_fn_d02 = 'wrfout_d02_Monthly_TOTPRECIP_1999100100-2006123123.nc'
coawst_elev_fn_d02 = 'wrfout_d02_Monthly_HGHT.nc'
coawst_temp_fn_d01 = 'wrfout_d01_Monthly_T2_1999100100-2006123123.nc'
coawst_prec_fn_d01 = 'wrfout_d01_Monthly_TOTPRECIP_1999100100-2006123123.nc'
coawst_elev_fn_d01 = 'wrfout_d01_Monthly_HGHT.nc'
coawst_vns = ['T2', 'TOTPRECIP', 'HGHT']
coawst_d02_lon_min = 65
coawst_d02_lon_max = 99
coawst_d02_lat_min = 20
coawst_d02_lat_max = 38
#%% GLACIER DATA (RGI, ICE THICKNESS, ETC.)
# ===== RGI DATA =====
# Filepath for RGI files
rgi_fp = main_directory + '/../RGI/rgi60/00_rgi60_attribs/'
# Column names
rgi_lat_colname = 'CenLat'
rgi_lon_colname = 'CenLon'
elev_colname = 'elev'
indexname = 'GlacNo'
rgi_O1Id_colname = 'glacno'
rgi_glacno_float_colname = 'RGIId_float'
# Column names from table to drop
rgi_cols_drop = ['GLIMSId','BgnDate','EndDate','Status','Connect','Linkages','Name']
# ===== ADDITIONAL DATA (hypsometry, ice thickness, width) =====
# Filepath for the hypsometry files
binsize = 10 # Elevation bin height [m]
hyps_data = 'Farinotti' # Hypsometry dataset (options: 'Huss' from GlacierMIP or 'Farinotti' from Farinotti etal 2019)
if hyps_data == 'Farinotti':
option_shift_elevbins_20m = 0 # option to shift bins by 20 m (needed since off by 20 m, seem email 5/24/2018)
# Dictionary of hypsometry filenames
hyps_filepath = main_directory + '/../IceThickness_Farinotti/output/'
hyps_filedict = {1: 'area_km2_01_Farinotti2019_10m.csv',}
hyps_colsdrop = ['RGIId']
# Thickness data
thickness_filepath = main_directory + '/../IceThickness_Farinotti/output/'
thickness_filedict = {1: 'thickness_m_01_Farinotti2019_10m.csv'}
thickness_colsdrop = ['RGIId']
# Width data
width_filepath = main_directory + '/../IceThickness_Farinotti/output/'
width_filedict = {1: 'width_km_01_Farinotti2019_10m.csv'}
width_colsdrop = ['RGIId']
elif hyps_data == 'Huss':
option_shift_elevbins_20m = 1 # option to shift bins by 20 m (needed since off by 20 m, seem email 5/24/2018)
# Dictionary of hypsometry filenames
# (Files from <NAME> should be manually pre-processed to be 'RGI-ID', 'Cont_range', and bins starting at 5)
hyps_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
hyps_filedict = {
1: 'area_01_Huss_Alaska_10m.csv',
3: 'area_RGI03_10.csv',
4: 'area_RGI04_10.csv',
6: 'area_RGI06_10.csv',
7: 'area_RGI07_10.csv',
8: 'area_RGI08_10.csv',
9: 'area_RGI09_10.csv',
13: 'area_13_Huss_CentralAsia_10m.csv',
14: 'area_14_Huss_SouthAsiaWest_10m.csv',
15: 'area_15_Huss_SouthAsiaEast_10m.csv',
16: 'area_16_Huss_LowLatitudes_10m.csv',
17: 'area_17_Huss_SouthernAndes_10m.csv'}
hyps_colsdrop = ['RGI-ID','Cont_range']
# Thickness data
thickness_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
thickness_filedict = {
1: 'thickness_01_Huss_Alaska_10m.csv',
3: 'thickness_RGI03_10.csv',
4: 'thickness_RGI04_10.csv',
6: 'thickness_RGI06_10.csv',
7: 'thickness_RGI07_10.csv',
8: 'thickness_RGI08_10.csv',
9: 'thickness_RGI09_10.csv',
13: 'thickness_13_Huss_CentralAsia_10m.csv',
14: 'thickness_14_Huss_SouthAsiaWest_10m.csv',
15: 'thickness_15_Huss_SouthAsiaEast_10m.csv',
16: 'thickness_16_Huss_LowLatitudes_10m.csv',
17: 'thickness_17_Huss_SouthernAndes_10m.csv'}
thickness_colsdrop = ['RGI-ID','Cont_range']
# Width data
width_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'
width_filedict = {
1: 'width_01_Huss_Alaska_10m.csv',
3: 'width_RGI03_10.csv',
4: 'width_RGI04_10.csv',
6: 'width_RGI06_10.csv',
7: 'width_RGI07_10.csv',
8: 'width_RGI08_10.csv',
9: 'width_RGI09_10.csv',
13: 'width_13_Huss_CentralAsia_10m.csv',
14: 'width_14_Huss_SouthAsiaWest_10m.csv',
15: 'width_15_Huss_SouthAsiaEast_10m.csv',
16: 'width_16_Huss_LowLatitudes_10m.csv',
17: 'width_17_Huss_SouthernAndes_10m.csv'}
width_colsdrop = ['RGI-ID','Cont_range']
#%% MODEL TIME FRAME DATA
# Models require complete data for each year such that refreezing, scaling, etc. can be calculated
# Leap year option
option_leapyear = 1 # 1: include leap year days, 0: exclude leap years so February always has 28 days
# User specified start/end dates
# note: start and end dates must refer to whole years
startmonthday = '06-01'
endmonthday = '05-31'
wateryear_month_start = 10 # water year starting month
winter_month_start = 10 # first month of winter (for HMA winter is October 1 - April 30)
summer_month_start = 5 # first month of summer (for HMA summer is May 1 - Sept 30)
option_dates = 1 # 1: use dates from date table (first of each month), 2: dates from climate data
timestep = 'monthly' # time step ('monthly' only option at present)
# Seasonal dictionaries for WGMS data that is not provided
lat_threshold = 75
# Winter (start/end) and Summer (start/end)
monthdict = {'northernmost': [9, 5, 6, 8],
'north': [10, 4, 5, 9],
'south': [4, 9, 10, 3],
'southernmost': [3, 10, 11, 2]}
# Latitude threshold
# 01 - Alaska - < 75
# 02 - W Can - < 75
# 03 - N Can - > 74
# 04 - S Can - < 74
# 05 - Greenland - 60 - 80
# 06 - Iceland - < 75
# 07 - Svalbard - 70 - 80
# 08 - Scandinavia - < 70
# 09 - Russia - 72 - 82
# 10 - N Asia - 46 - 77
#%% CALIBRATION DATASETS
# ===== SHEAN GEODETIC =====
#shean_fp = main_directory + '/../DEMs/Shean_2019_0213/'
#shean_fn = 'hma_mb_20190215_0815_std+mean_all_filled_bolch.csv'
#shean_rgi_glacno_cn = 'RGIId'
#shean_mb_cn = 'mb_mwea'
#shean_mb_err_cn = 'mb_mwea_sigma'
#shean_time1_cn = 't1'
#shean_time2_cn = 't2'
#shean_area_cn = 'area_m2'
# ===== BERTHIER GEODETIC =====
berthier_fp = main_directory + '/../DEMs/Berthier/output/'
#berthier_fn = 'AK_all_20190913_wextrapolations_1980cheat.csv'
berthier_fn = 'AK_all_20190913.csv'
berthier_rgi_glacno_cn = 'RGIId'
berthier_mb_cn = 'mb_mwea'
berthier_mb_err_cn = 'mb_mwea_sigma'
berthier_time1_cn = 't1'
berthier_time2_cn = 't2'
berthier_area_cn = 'area_km2'
# ===== BRAUN GEODETIC =====
braun_fp = main_directory + '/../DEMs/Braun/output/'
braun_fn = 'braun_AK_all_20190924_wlarsen_mcnabb_best.csv'
#braun_fn = 'braun_AK_all_20190924_wextrapolations.csv'
#braun_fn = 'braun_AK_all_20190924.csv'
braun_rgi_glacno_cn = 'RGIId'
braun_mb_cn = 'mb_mwea'
braun_mb_err_cn = 'mb_mwea_sigma'
braun_time1_cn = 't1'
braun_time2_cn = 't2'
braun_area_cn = 'area_km2'
# ===== BRUN GEODETIC =====
brun_fp = main_directory + '/../DEMs/'
brun_fn = 'Brun_Nature2017_MB_glacier-wide.csv'
brun_rgi_glacno_cn = 'GLA_ID'
brun_mb_cn = 'MB [m w.a a-1]'
brun_mb_err_cn = 'err. on MB [m w.e a-1]'
# NEED TO FINISH SETTING UP BRUN WITH CLASS_MBDATA
# ===== MAUER GEODETIC =====
mauer_fp = main_directory + '/../DEMs/'
mauer_fn = 'Mauer_geoMB_HMA_1970s_2000_min80pctCov.csv'
mauer_rgi_glacno_cn = 'RGIId'
mauer_mb_cn = 'geoMassBal'
mauer_mb_err_cn = 'geoMassBalSig'
mauer_time1_cn = 't1'
mauer_time2_cn = 't2'
# ===== MCNABB GEODETIC =====
mcnabb_fp = main_directory + '/../DEMs/McNabb_data/wgms_dv/'
mcnabb_fn = 'McNabb_data_all_preprocessed.csv'
mcnabb_rgiid_cn = 'RGIId'
mcnabb_mb_cn = 'mb_mwea'
mcnabb_mb_err_cn = 'mb_mwea_sigma'
mcnabb_time1_cn = 'date0'
mcnabb_time2_cn = 'date1'
mcnabb_area_cn = 'area'
# ===== LARSEN GEODETIC =====
larsen_fp = main_directory + '/../DEMs/larsen/'
larsen_fn = 'larsen2015_supplementdata_wRGIIds_v3.csv'
larsen_rgiid_cn = 'RGIId'
larsen_mb_cn = 'mb_mwea'
larsen_mb_err_cn = 'mb_mwea_sigma'
larsen_time1_cn = 'date0'
larsen_time2_cn = 'date1'
larsen_area_cn = 'area'
# ===== WGMS =====
wgms_datasets = ['wgms_d', 'wgms_ee']
#wgms_datasets = ['wgms_d']
wgms_fp = main_directory + '/../WGMS/DOI-WGMS-FoG-2018-06/'
wgms_rgi_glacno_cn = 'glacno'
wgms_obs_type_cn = 'obs_type'
# WGMS lookup tables information
wgms_lookup_fn = 'WGMS-FoG-2018-06-AA-GLACIER-ID-LUT.csv'
rgilookup_fullfn = main_directory + '/../RGI/rgi60/00_rgi60_links/00_rgi60_links.csv'
rgiv6_fn_prefix = main_directory + '/../RGI/rgi60/00_rgi60_attribs/' + '*'
rgiv5_fn_prefix = main_directory + '/../RGI/00_rgi50_attribs/' + '*'
# WGMS (d) geodetic mass balance information
wgms_d_fn = 'WGMS-FoG-2018-06-D-CHANGE.csv'
wgms_d_fn_preprocessed = 'wgms_d_rgiv6_preprocessed.csv'
wgms_d_thickness_chg_cn = 'THICKNESS_CHG'
wgms_d_thickness_chg_err_cn = 'THICKNESS_CHG_UNC'
wgms_d_volume_chg_cn = 'VOLUME_CHANGE'
wgms_d_volume_chg_err_cn = 'VOLUME_CHANGE_UNC'
wgms_d_z1_cn = 'LOWER_BOUND'
wgms_d_z2_cn = 'UPPER_BOUND'
# WGMS (e/ee) glaciological mass balance information
wgms_e_fn = 'WGMS-FoG-2018-06-E-MASS-BALANCE-OVERVIEW.csv'
wgms_ee_fn = 'WGMS-FoG-2018-06-EE-MASS-BALANCE.csv'
wgms_ee_fn_preprocessed = 'wgms_ee_rgiv6_preprocessed.csv'
wgms_ee_mb_cn = 'BALANCE'
wgms_ee_mb_err_cn = 'BALANCE_UNC'
wgms_ee_t1_cn = 'YEAR'
wgms_ee_z1_cn = 'LOWER_BOUND'
wgms_ee_z2_cn = 'UPPER_BOUND'
wgms_ee_period_cn = 'period'
# ===== COGLEY DATA =====
cogley_fp = main_directory + '/../Calibration_datasets/'
cogley_fn_preprocessed = 'Cogley_Arctic_processed_wInfo.csv'
cogley_rgi_glacno_cn = 'glacno'
cogley_mass_chg_cn = 'geo_mass_kgm2a'
cogley_mass_chg_err_cn = 'geo_mass_unc'
cogley_z1_cn = 'Zmin'
cogley_z2_cn = 'Zmax'
cogley_obs_type_cn = 'obs_type'
# ===== REGIONAL DATA =====
# Regional data refers to all measurements that have lumped multiple glaciers together
# - a dictionary linking the regions to RGIIds is required
mb_group_fp = main_directory + '/../Calibration_datasets/'
mb_group_dict_fn = 'mb_group_dict.csv'
mb_group_data_fn = 'mb_group_data.csv'
mb_group_t1_cn = 'begin_period'
mb_group_t2_cn = 'end_period'
#%% REGIONS
grouping = ''
if grouping == 'watershed':
reg_vn = 'watershed'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_watershed.csv'
reg_csv = pd.read_csv(reg_dict_fn)
reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))
elif grouping == 'kaab':
reg_vn = 'kaab_name'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_kaab.csv'
reg_csv = pd.read_csv(reg_dict_fn)
reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))
elif grouping == 'himap':
reg_vn = 'bolch_name'
reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_bolch.csv'
reg_csv = pd.read_csv(reg_dict_fn)
| |
1000.0
i = 0
for y in ERA_time:
if calendar.isleap(y):
ERAfull[i,:,:] = 1000.*ERAfull[i,:,:]*366.*86400./rho
ERA[i,:,:] = 1000.*ERA[i,:,:]*366.*86400./rho
else:
ERAfull[i,:,:] = 1000.*ERAfull[i,:,:]*365.*86400./rho
ERA[i,:,:] = 1000.*ERA[i,:,:]*365.*86400./rho
i = i + 1
# Plots of precipitation climatologies ---
# Climatology (annual accumulation)
GPCP_climo = np.nanmean(GPCP, axis=0)
CMAP_climo = np.nanmean(CMAP, axis=0)
TCR_climo = np.nanmean(TCRfull, axis=0)
ERA_climo = np.nanmean(ERAfull, axis=0)
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
fmin = 0; fmax = 4000; nflevs=41
LMR_plotter(GPCP_climo,lat_GPCP,lon_GPCP,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max')
plt.title( 'GPCP '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(2,2,2)
fmin = 0; fmax = 4000; nflevs=41
LMR_plotter(CMAP_climo,lat_CMAP,lon_CMAP,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max')
plt.title( 'CMAP '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(2,2,3)
fmin = 0; fmax = 4000; nflevs=41
LMR_plotter(TCR_climo,lat2_TCR,lon2_TCR,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max')
plt.title( '20CR-V2 '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(2,2,4)
fmin = 0; fmax = 4000; nflevs=41
LMR_plotter(ERA_climo,lat2_ERA,lon2_ERA,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max')
plt.title( 'ERA20C '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold')
plt.clim(fmin,fmax)
fig.tight_layout()
plt.savefig('GPCP_CMAP_20CR_ERA_climo.png')
plt.close()
###############################################################
# END: load verification data #
###############################################################
# ----------------------------------------------------------
# Adjust so that all anomaly data pertain to the mean over a
# common user-defined reference period (e.g. 20th century)
# ----------------------------------------------------------
print('Re-center on %s-%s period' % (str(ref_period[0]), str(ref_period[1])))
stime = ref_period[0]
etime = ref_period[1]
# LMR
LMR = xam
smatch, ematch = find_date_indices(LMR_time,stime,etime)
LMR = LMR - np.mean(LMR[smatch:ematch,:,:],axis=0)
# verif
smatch, ematch = find_date_indices(GPCP_time,stime,etime)
GPCP = GPCP - np.mean(GPCP[smatch:ematch,:,:],axis=0)
smatch, ematch = find_date_indices(CMAP_time,stime,etime)
CMAP = CMAP - np.mean(CMAP[smatch:ematch,:,:],axis=0)
smatch, ematch = find_date_indices(TCR_time,stime,etime)
TCR = TCR - np.mean(TCR[smatch:ematch,:,:],axis=0)
smatch, ematch = find_date_indices(ERA_time,stime,etime)
ERA = ERA - np.mean(ERA[smatch:ematch,:,:],axis=0)
print('GPCP : Global: mean=', np.nanmean(GPCP), ' , std-dev=', np.nanstd(GPCP))
print('CMAP : Global: mean=', np.nanmean(CMAP), ' , std-dev=', np.nanstd(CMAP))
print('TCR : Global: mean=', np.nanmean(TCR), ' , std-dev=', np.nanstd(TCR))
print('ERA : Global: mean=', np.nanmean(ERA), ' , std-dev=', np.nanstd(ERA))
print('LMR : Global: mean=', np.nanmean(LMR), ' , std-dev=', np.nanstd(LMR))
# -----------------------------------
# Regridding the data for comparisons
# -----------------------------------
print('\n regridding data to a common grid...\n')
iplot_loc= False
#iplot_loc= True
# create instance of the spherical harmonics object for each grid
specob_lmr = Spharmt(nlon,nlat,gridtype='regular',legfunc='computed')
specob_gpcp = Spharmt(nlon_GPCP,nlat_GPCP,gridtype='regular',legfunc='computed')
specob_cmap = Spharmt(nlon_CMAP,nlat_CMAP,gridtype='regular',legfunc='computed')
specob_tcr = Spharmt(nlon_TCR,nlat_TCR,gridtype='regular',legfunc='computed')
specob_era = Spharmt(nlon_ERA,nlat_ERA,gridtype='regular',legfunc='computed')
# truncate to a lower resolution grid (common:21, 42, 62, 63, 85, 106, 255, 382, 799)
ntrunc_new = 42 # T42
ifix = np.remainder(ntrunc_new,2.0).astype(int)
nlat_new = ntrunc_new + ifix
nlon_new = int(nlat_new*1.5)
# lat, lon grid in the truncated space
dlat = 90./((nlat_new-1)/2.)
dlon = 360./nlon_new
veclat = np.arange(-90.,90.+dlat,dlat)
veclon = np.arange(0.,360.,dlon)
blank = np.zeros([nlat_new,nlon_new])
lat2_new = (veclat + blank.T).T
lon2_new = (veclon + blank)
# create instance of the spherical harmonics object for the new grid
specob_new = Spharmt(nlon_new,nlat_new,gridtype='regular',legfunc='computed')
lmr_trunc = np.zeros([nyrs,nlat_new,nlon_new])
print('lmr_trunc shape: ' + str(np.shape(lmr_trunc)))
# loop over years of interest and transform...specify trange at top of file
iw = 0
if nya > 0:
iw = (nya-1)/2
cyears = list(range(trange[0],trange[1]))
lg_csave = np.zeros([len(cyears)])
lc_csave = np.zeros([len(cyears)])
lt_csave = np.zeros([len(cyears)])
le_csave = np.zeros([len(cyears)])
gc_csave = np.zeros([len(cyears)])
gt_csave = np.zeros([len(cyears)])
ge_csave = np.zeros([len(cyears)])
te_csave = np.zeros([len(cyears)])
lmr_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
gpcp_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
cmap_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
tcr_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
era_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
lmr_zm = np.zeros([len(cyears),nlat_new])
gpcp_zm = np.zeros([len(cyears),nlat_new])
cmap_zm = np.zeros([len(cyears),nlat_new])
tcr_zm = np.zeros([len(cyears),nlat_new])
era_zm = np.zeros([len(cyears),nlat_new])
k = -1
for yr in cyears:
k = k + 1
LMR_smatch, LMR_ematch = find_date_indices(LMR_time,yr-iw,yr+iw+1)
GPCP_smatch, GPCP_ematch = find_date_indices(GPCP_time,yr-iw,yr+iw+1)
CMAP_smatch, CMAP_ematch = find_date_indices(CMAP_time,yr-iw,yr+iw+1)
TCR_smatch, TCR_ematch = find_date_indices(TCR_time,yr-iw,yr+iw+1)
ERA_smatch, ERA_ematch = find_date_indices(ERA_time,yr-iw,yr+iw+1)
print('------------------------------------------------------------------------')
print('working on year... %5s' %(str(yr)))
print(' %5s LMR index = %5s : LMR year = %5s' %(str(yr), str(LMR_smatch), str(LMR_time[LMR_smatch])))
if GPCP_smatch:
print(' %5s GPCP index = %5s : GPCP year = %5s' %(str(yr), str(GPCP_smatch), str(GPCP_time[GPCP_smatch])))
if CMAP_smatch:
print(' %5s CMAP index = %5s : CMAP year = %5s' %(str(yr), str(CMAP_smatch), str(CMAP_time[CMAP_smatch])))
if TCR_smatch:
print(' %5s TCP index = %5s : TCR year = %5s' %(str(yr), str(TCR_smatch), str(TCR_time[TCR_smatch])))
if ERA_smatch:
print(' %5s ERA index = %5s : ERA year = %5s' %(str(yr), str(ERA_smatch), str(ERA_time[ERA_smatch])))
# LMR
pdata_lmr = np.mean(LMR[LMR_smatch:LMR_ematch,:,:],0)
lmr_trunc = regrid(specob_lmr, specob_new, pdata_lmr, ntrunc=nlat_new-1, smooth=None)
# GPCP
if GPCP_smatch and GPCP_ematch:
pdata_gpcp = np.mean(GPCP[GPCP_smatch:GPCP_ematch,:,:],0)
else:
pdata_gpcp = np.zeros(shape=[nlat_GPCP,nlon_GPCP])
pdata_gpcp.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_gpcp).all():
gpcp_trunc = np.zeros(shape=[nlat_new,nlon_new])
gpcp_trunc.fill(np.nan)
else:
gpcp_trunc = regrid(specob_gpcp, specob_new, pdata_gpcp, ntrunc=nlat_new-1, smooth=None)
# CMAP
if CMAP_smatch and CMAP_ematch:
pdata_cmap = np.mean(CMAP[CMAP_smatch:CMAP_ematch,:,:],0)
else:
pdata_cmap = np.zeros(shape=[nlat_CMAP,nlon_CMAP])
pdata_cmap.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_cmap).all():
cmap_trunc = np.zeros(shape=[nlat_new,nlon_new])
cmap_trunc.fill(np.nan)
else:
cmap_trunc = regrid(specob_cmap, specob_new, pdata_cmap, ntrunc=nlat_new-1, smooth=None)
# TCR
if TCR_smatch and TCR_ematch:
pdata_tcr = np.mean(TCR[TCR_smatch:TCR_ematch,:,:],0)
else:
pdata_tcr = np.zeros(shape=[nlat_TCR,nlon_TCR])
pdata_tcr.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_tcr).all():
tcr_trunc = np.zeros(shape=[nlat_new,nlon_new])
tcr_trunc.fill(np.nan)
else:
tcr_trunc = regrid(specob_tcr, specob_new, pdata_tcr, ntrunc=nlat_new-1, smooth=None)
# ERA
if ERA_smatch and ERA_ematch:
pdata_era = np.mean(ERA[ERA_smatch:ERA_ematch,:,:],0)
else:
pdata_era = np.zeros(shape=[nlat_ERA,nlon_ERA])
pdata_era.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_era).all():
era_trunc = np.zeros(shape=[nlat_new,nlon_new])
era_trunc.fill(np.nan)
else:
era_trunc = regrid(specob_era, specob_new, pdata_era, ntrunc=nlat_new-1, smooth=None)
if iplot_individual_years:
# Precipitation products comparison figures (annually-averaged anomaly fields)
fmin = verif_dict[var][3]; fmax = verif_dict[var][4]; nflevs=41
fig = plt.figure()
ax = fig.add_subplot(5,1,1)
LMR_plotter(lmr_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('LMR '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(5,1,2)
LMR_plotter(gpcp_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('GPCP '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_gpcp*verif_dict[var][6],lat_GPCP,lon_GPCP,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title( 'GPCP '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(5,1,3)
LMR_plotter(cmap_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('CMAP '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_cmap*verif_dict[var][6],lat_GPCP,lon_GPCP,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title( 'CMAP '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(5,1,4)
LMR_plotter(tcr_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('20CR-V2 '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_tcr*verif_dict[var][6],lat_TCR,lon_TCR,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title( '20CR-V2 '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(5,1,5)
LMR_plotter(era_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('ERA20C '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_era*verif_dict[var][6],lat_ERA,lon_ERA,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title( 'ERA20C '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
fig.tight_layout()
plt.savefig(nexp+'_LMR_GPCP_CMAP_TCR_ERA_'+verif_dict[var][1]+'anom_'+str(yr)+'.png')
plt.close()
# save the full grids
lmr_allyears[k,:,:] = lmr_trunc
gpcp_allyears[k,:,:] = gpcp_trunc
cmap_allyears[k,:,:] = cmap_trunc
tcr_allyears[k,:,:] = tcr_trunc
era_allyears[k,:,:] = era_trunc
# -----------------------
# zonal-mean verification
# -----------------------
# LMR
lmr_zm[k,:] = np.mean(lmr_trunc,1)
# GPCP
fracok = np.sum(np.isfinite(gpcp_trunc),axis=1,dtype=np.float16)/float(nlon_GPCP)
boolok = np.where(fracok >= valid_frac)
boolnotok = np.where(fracok < valid_frac)
for i in boolok:
gpcp_zm[k,i] = np.nanmean(gpcp_trunc[i,:],axis=1)
gpcp_zm[k,boolnotok] = np.NAN
# CMAP
fracok = np.sum(np.isfinite(cmap_trunc),axis=1,dtype=np.float16)/float(nlon_CMAP)
boolok = np.where(fracok >= valid_frac)
boolnotok = np.where(fracok < valid_frac)
for i in boolok:
cmap_zm[k,i] = np.nanmean(cmap_trunc[i,:],axis=1)
cmap_zm[k,boolnotok] = np.NAN
# TCR
tcr_zm[k,:] = np.mean(tcr_trunc,1)
# ERA
era_zm[k,:] = np.mean(era_trunc,1)
if iplot_loc:
ncints = 30
cmap = 'bwr'
nticks = 6 # number of ticks on the colorbar
# set contours based on GPCP
maxabs = np.nanmax(np.abs(gpcp_trunc))
# round the contour interval, and then set limits to fit
dc = np.round(maxabs*2/ncints,2)
cl = dc*ncints/2.
cints = np.linspace(-cl,cl,ncints,endpoint=True)
# compare LMR with GPCP, CMAP, TCR and ERA
fig = plt.figure()
ax = fig.add_subplot(3,2,1)
m1 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(lmr_trunc))
cs = m1.contourf(lon2_new,lat2_new,lmr_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m1.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('LMR '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(3,2,3)
m2 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(gpcp_trunc))
cs = m2.contourf(lon2_new,lat2_new,gpcp_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m2.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('GPCP '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(3,2,4)
m3 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(cmap_trunc))
cs = m3.contourf(lon2_new,lat2_new,cmap_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m3.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('CMAP '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(3,2,5)
m3 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(tcr_trunc))
cs = m3.contourf(lon2_new,lat2_new,tcr_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m3.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('20CR-V2 '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(3,2,6)
m3 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(era_trunc))
cs = m3.contourf(lon2_new,lat2_new,era_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m3.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('ERA20C '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
plt.clim(-maxabs,maxabs)
# get these numbers by adjusting the figure interactively!!!
plt.subplots_adjust(left=0.05, bottom=0.45, right=0.95, top=0.95, wspace=0.1, hspace=0.0)
# plt.tight_layout(pad=0.3)
fig.suptitle(verif_dict[var][1] + ' for ' +str(nya) +' year centered average')
# anomaly correlation
lmrvec = np.reshape(lmr_trunc,(1,nlat_new*nlon_new))
gpcpvec = np.reshape(gpcp_trunc,(1,nlat_new*nlon_new))
cmapvec = np.reshape(cmap_trunc,(1,nlat_new*nlon_new))
tcrvec = np.reshape(tcr_trunc,(1,nlat_new*nlon_new))
eravec = np.reshape(era_trunc,(1,nlat_new*nlon_new))
# | |
xref_table = list()
position = f.write("%PDF-1.4\n")
for index, obj in enumerate(objects, 1):
xref_table.append(position)
position += write_object(index, obj, objects, f)
xref_pos = position
f.write("xref_table\n0 {}\n".format(len(xref_table) + 1))
f.write("{:010} {:05} f\n".format(0, 65536))
for position in xref_table:
f.write("{:010} {:05} n\n".format(position, 0))
f.write("trailer\n")
f.write(format_dict({"Size": len(xref_table), "Root": catalog}, objects))
f.write("\nstartxref\n{}\n%%EOF\n".format(xref_pos))
command_label = "/Gtext gs BT {x:.6f} {y:.6f} Td ({label}) Tj ET"
command_image = "q {0.x:.6f} 0 0 {0.y:.6f} 0 0 cm 1 0 0 -1 0 1 cm /{1} Do Q"
command_sticker = "q {mat[0][0]:.6f} {mat[1][0]:.6f} {mat[0][1]:.6f} {mat[1][1]:.6f} {pos.x:.6f} {pos.y:.6f} cm BT {align:.6f} 0 Td /F1 {size:.6f} Tf ({label}) Tj ET Q"
command_arrow = "q BT {pos.x:.6f} {pos.y:.6f} Td /F1 {size:.6f} Tf ({index}) Tj ET {mat[0][0]:.6f} {mat[1][0]:.6f} {mat[0][1]:.6f} {mat[1][1]:.6f} {arrow_pos.x:.6f} {arrow_pos.y:.6f} cm 0 0 m 1 -1 l 0 -0.25 l -1 -1 l f Q"
command_number = "q {mat[0][0]:.6f} {mat[1][0]:.6f} {mat[0][1]:.6f} {mat[1][1]:.6f} {pos.x:.6f} {pos.y:.6f} cm BT /F1 {size:.6f} Tf ({label}) Tj ET Q"
class Unfold(bpy.types.Operator):
"""Blender Operator: unfold the selected object."""
bl_idname = "mesh.unfold"
bl_label = "Unfold"
bl_description = "Mark seams so that the mesh can be exported as a paper model"
bl_options = {'REGISTER', 'UNDO'}
edit: bpy.props.BoolProperty(default=False, options={'HIDDEN'})
priority_effect_convex: bpy.props.FloatProperty(
name="Priority Convex", description="Priority effect for edges in convex angles",
default=default_priority_effect['CONVEX'], soft_min=-1, soft_max=10, subtype='FACTOR')
priority_effect_concave: bpy.props.FloatProperty(
name="Priority Concave", description="Priority effect for edges in concave angles",
default=default_priority_effect['CONCAVE'], soft_min=-1, soft_max=10, subtype='FACTOR')
priority_effect_length: bpy.props.FloatProperty(
name="Priority Length", description="Priority effect of edge length",
default=default_priority_effect['LENGTH'], soft_min=-10, soft_max=1, subtype='FACTOR')
do_create_uvmap: bpy.props.BoolProperty(
name="Create UVMap", description="Create a new UV Map showing the islands and page layout", default=False)
object = None
@classmethod
def poll(cls, context):
return context.active_object and context.active_object.type == "MESH"
def draw(self, context):
layout = self.layout
col = layout.column()
col.active = not self.object or len(self.object.data.uv_layers) < 8
col.prop(self.properties, "do_create_uvmap")
layout.label(text="Edge Cutting Factors:")
col = layout.column(align=True)
col.label(text="Face Angle:")
col.prop(self.properties, "priority_effect_convex", text="Convex")
col.prop(self.properties, "priority_effect_concave", text="Concave")
layout.prop(self.properties, "priority_effect_length", text="Edge Length")
def execute(self, context):
sce = bpy.context.scene
settings = sce.paper_model
recall_mode = context.object.mode
bpy.ops.object.mode_set(mode='EDIT')
self.object = context.object
cage_size = M.Vector((settings.output_size_x, settings.output_size_y))
priority_effect = {
'CONVEX': self.priority_effect_convex,
'CONCAVE': self.priority_effect_concave,
'LENGTH': self.priority_effect_length}
try:
unfolder = Unfolder(self.object)
unfolder.do_create_uvmap = self.do_create_uvmap
scale = sce.unit_settings.scale_length / settings.scale
unfolder.prepare(cage_size, priority_effect, scale, settings.limit_by_page)
unfolder.mesh.mark_cuts()
except UnfoldError as error:
self.report(type={'ERROR_INVALID_INPUT'}, message=error.args[0])
error.mesh_select()
bpy.ops.object.mode_set(mode=recall_mode)
return {'CANCELLED'}
mesh = self.object.data
mesh.update()
if mesh.paper_island_list:
unfolder.copy_island_names(mesh.paper_island_list)
island_list = mesh.paper_island_list
attributes = {item.label: (item.abbreviation, item.auto_label, item.auto_abbrev) for item in island_list}
island_list.clear() # remove previously defined islands
for island in unfolder.mesh.islands:
# add islands to UI list and set default descriptions
list_item = island_list.add()
# add faces' IDs to the island
for face in island.faces:
lface = list_item.faces.add()
lface.id = face.index
list_item["label"] = island.label
list_item["abbreviation"], list_item["auto_label"], list_item["auto_abbrev"] = attributes.get(
island.label,
(island.abbreviation, True, True))
island_item_changed(list_item, context)
mesh.paper_island_index = -1
del unfolder
bpy.ops.object.mode_set(mode=recall_mode)
return {'FINISHED'}
class ClearAllSeams(bpy.types.Operator):
"""Blender Operator: clear all seams of the active Mesh and all its unfold data"""
bl_idname = "mesh.clear_all_seams"
bl_label = "Clear All Seams"
bl_description = "Clear all the seams and unfolded islands of the active object"
@classmethod
def poll(cls, context):
return context.active_object and context.active_object.type == 'MESH'
def execute(self, context):
ob = context.active_object
mesh = ob.data
for edge in mesh.edges:
edge.use_seam = False
mesh.paper_island_list.clear()
return {'FINISHED'}
def page_size_preset_changed(self, context):
"""Update the actual document size to correct values"""
if hasattr(self, "limit_by_page") and not self.limit_by_page:
return
if self.page_size_preset == 'A4':
self.output_size_x = 0.210
self.output_size_y = 0.297
elif self.page_size_preset == 'A3':
self.output_size_x = 0.297
self.output_size_y = 0.420
elif self.page_size_preset == 'US_LETTER':
self.output_size_x = 0.216
self.output_size_y = 0.279
elif self.page_size_preset == 'US_LEGAL':
self.output_size_x = 0.216
self.output_size_y = 0.356
class PaperModelStyle(bpy.types.PropertyGroup):
line_styles = [
('SOLID', "Solid (----)", "Solid line"),
('DOT', "Dots (. . .)", "Dotted line"),
('DASH', "Short Dashes (- - -)", "Solid line"),
('LONGDASH', "Long Dashes (-- --)", "Solid line"),
('DASHDOT', "Dash-dotted (-- .)", "Solid line")
]
outer_color: bpy.props.FloatVectorProperty(
name="Outer Lines", description="Color of net outline",
default=(0.0, 0.0, 0.0, 1.0), min=0, max=1, subtype='COLOR', size=4)
outer_style: bpy.props.EnumProperty(
name="Outer Lines Drawing Style", description="Drawing style of net outline",
default='SOLID', items=line_styles)
line_width: bpy.props.FloatProperty(
name="Base Lines Thickness", description="Base thickness of net lines, each actual value is a multiple of this length",
default=1e-4, min=0, soft_max=5e-3, precision=5, step=1e-2, subtype="UNSIGNED", unit="LENGTH")
outer_width: bpy.props.FloatProperty(
name="Outer Lines Thickness", description="Relative thickness of net outline",
default=3, min=0, soft_max=10, precision=1, step=10, subtype='FACTOR')
use_outbg: bpy.props.BoolProperty(
name="Highlight Outer Lines", description="Add another line below every line to improve contrast",
default=True)
outbg_color: bpy.props.FloatVectorProperty(
name="Outer Highlight", description="Color of the highlight for outer lines",
default=(1.0, 1.0, 1.0, 1.0), min=0, max=1, subtype='COLOR', size=4)
outbg_width: bpy.props.FloatProperty(
name="Outer Highlight Thickness", description="Relative thickness of the highlighting lines",
default=5, min=0, soft_max=10, precision=1, step=10, subtype='FACTOR')
convex_color: bpy.props.FloatVectorProperty(
name="Inner Convex Lines", description="Color of lines to be folded to a convex angle",
default=(0.0, 0.0, 0.0, 1.0), min=0, max=1, subtype='COLOR', size=4)
convex_style: bpy.props.EnumProperty(
name="Convex Lines Drawing Style", description="Drawing style of lines to be folded to a convex angle",
default='DASH', items=line_styles)
convex_width: bpy.props.FloatProperty(
name="Convex Lines Thickness", description="Relative thickness of concave lines",
default=2, min=0, soft_max=10, precision=1, step=10, subtype='FACTOR')
concave_color: bpy.props.FloatVectorProperty(
name="Inner Concave Lines", description="Color of lines to be folded to a concave angle",
default=(0.0, 0.0, 0.0, 1.0), min=0, max=1, subtype='COLOR', size=4)
concave_style: bpy.props.EnumProperty(
name="Concave Lines Drawing Style", description="Drawing style of lines to be folded to a concave angle",
default='DASHDOT', items=line_styles)
concave_width: bpy.props.FloatProperty(
name="Concave Lines Thickness", description="Relative thickness of concave lines",
default=2, min=0, soft_max=10, precision=1, step=10, subtype='FACTOR')
freestyle_color: bpy.props.FloatVectorProperty(
name="Freestyle Edges", description="Color of lines marked as Freestyle Edge",
default=(0.0, 0.0, 0.0, 1.0), min=0, max=1, subtype='COLOR', size=4)
freestyle_style: bpy.props.EnumProperty(
name="Freestyle Edges Drawing Style", description="Drawing style of Freestyle Edges",
default='SOLID', items=line_styles)
freestyle_width: bpy.props.FloatProperty(
name="Freestyle Edges Thickness", description="Relative thickness of Freestyle edges",
default=2, min=0, soft_max=10, precision=1, step=10, subtype='FACTOR')
use_inbg: bpy.props.BoolProperty(
name="Highlight Inner Lines", description="Add another line below every line to improve contrast",
default=True)
inbg_color: bpy.props.FloatVectorProperty(
name="Inner Highlight", description="Color of the highlight for inner lines",
default=(1.0, 1.0, 1.0, 1.0), min=0, max=1, subtype='COLOR', size=4)
inbg_width: bpy.props.FloatProperty(
name="Inner Highlight Thickness", description="Relative thickness of the highlighting lines",
default=2, min=0, soft_max=10, precision=1, step=10, subtype='FACTOR')
sticker_fill: bpy.props.FloatVectorProperty(
name="Tabs Fill", description="Fill color of sticking tabs",
default=(0.9, 0.9, 0.9, 1.0), min=0, max=1, subtype='COLOR', size=4)
text_color: bpy.props.FloatVectorProperty(
name="Text Color", description="Color of all text used in the document",
default=(0.0, 0.0, 0.0, 1.0), min=0, max=1, subtype='COLOR', size=4)
bpy.utils.register_class(PaperModelStyle)
class ExportPaperModel(bpy.types.Operator):
"""Blender Operator: save the selected object's net and optionally bake its texture"""
bl_idname = "export_mesh.paper_model"
bl_label = "Export Paper Model"
bl_description = "Export the selected object's net and optionally bake its texture"
filepath: bpy.props.StringProperty(
name="File Path", description="Target file to save the SVG", options={'SKIP_SAVE'})
filename: bpy.props.StringProperty(
name="File Name", description="Name of the file", options={'SKIP_SAVE'})
directory: bpy.props.StringProperty(
name="Directory", description="Directory of the file", options={'SKIP_SAVE'})
page_size_preset: bpy.props.EnumProperty(
name="Page Size", description="Size of the exported document",
default='A4', update=page_size_preset_changed, items=global_paper_sizes)
output_size_x: bpy.props.FloatProperty(
name="Page Width", description="Width of the exported document",
default=0.210, soft_min=0.105, soft_max=0.841, subtype="UNSIGNED", unit="LENGTH")
output_size_y: bpy.props.FloatProperty(
name="Page Height", description="Height of the exported document",
default=0.297, soft_min=0.148, soft_max=1.189, subtype="UNSIGNED", unit="LENGTH")
output_margin: bpy.props.FloatProperty(
name="Page Margin", description="Distance from page borders to the printable area",
default=0.005, min=0, soft_max=0.1, step=0.1, subtype="UNSIGNED", unit="LENGTH")
output_type: bpy.props.EnumProperty(
name="Textures", description="Source of a texture for the model",
default='NONE', items=[
('NONE', "No Texture", "Export the net only"),
('TEXTURE', "From Materials", "Render the diffuse color and all painted textures"),
('AMBIENT_OCCLUSION', "Ambient Occlusion", "Render the Ambient Occlusion pass"),
('RENDER', "Full Render", "Render the material in actual scene illumination"),
('SELECTED_TO_ACTIVE', "Selected to Active", "Render all selected surrounding objects as a texture")
])
do_create_stickers: bpy.props.BoolProperty(
name="Create Tabs", description="Create gluing tabs around the net (useful for paper)",
default=True)
do_create_numbers: bpy.props.BoolProperty(
name="Create Numbers", description="Enumerate edges to make it clear which edges should be sticked together",
default=True)
sticker_width: bpy.props.FloatProperty(
name="Tabs and Text Size", description="Width of gluing tabs and their numbers",
default=0.005, soft_min=0, soft_max=0.05, step=0.1, subtype="UNSIGNED", unit="LENGTH")
angle_epsilon: bpy.props.FloatProperty(
name="Hidden Edge Angle", description="Folds with angle below this limit will not be drawn",
default=pi/360, min=0, soft_max=pi/4, step=0.01, subtype="ANGLE", unit="ROTATION")
output_dpi: bpy.props.FloatProperty(
name="Resolution (DPI)", description="Resolution of images in pixels per inch",
default=90, min=1, soft_min=30, soft_max=600, subtype="UNSIGNED")
bake_samples: bpy.props.IntProperty(
name="Samples", description="Number of samples to render for each pixel",
default=64, min=1, subtype="UNSIGNED")
file_format: bpy.props.EnumProperty(
name="Document Format", description="File format of the exported net",
default='PDF', items=[
('PDF', "PDF", "Adobe Portable Document Format 1.4"),
('SVG', "SVG", "W3C Scalable Vector Graphics"),
])
image_packing: bpy.props.EnumProperty(
name="Image Packing Method", description="Method of attaching | |
+ \
BaseModel._format_legend_line(r'$F^{YY}_S$' + ": Maximum costs of copying that ensure that the incumbent copies the entrant if the entrant is guaranteed to invest in a perfect substitute.", width=width) + "\n" + \
BaseModel._format_legend_line(r'$F^{YN}_S$' + ": Maximum costs of copying that ensure that the incumbent copies the entrant if the copying prevents the entrant from developing a perfect substitute.", width=width) + "\n" + \
BaseModel._format_legend_line(r'$F^{YY}_C$' + ": Maximum costs of copying that ensure that the incumbent copies the entrant if the entrant is guaranteed to invest in another complement.", width=width) + "\n" + \
BaseModel._format_legend_line(r'$F^{YN}_C$' + ": Maximum costs of copying that ensure that the incumbent copies the entrant if the copying prevents the entrant from developing another complement.", width=width)
@staticmethod
def _format_legend_line(line: str, width: int = 60, latex: bool = True) -> str:
space: str = "$\quad$" if latex else " " * 4
return textwrap.fill(line, width=width, initial_indent='', subsequent_indent=space * 3)
@staticmethod
def _get_color(i: int) -> str:
"""
Returns a string corresponding to a matplotlib - color for a given index.
The index helps to get different colors for different items, when iterating over list/dict/etc..
Parameters
----------
i: int
Index of the color.
Returns
-------
str
A string corresponding to a matplotlib - color for a given index.
"""
return ['salmon', 'khaki', 'limegreen', 'turquoise', 'powderblue', 'thistle', 'pink'][i]
@staticmethod
def _set_axis(axis: matplotlib.axes.Axes) -> None:
"""
Adjusts the axis to the given viewport.
Parameters
----------
axis: matplotlib.axes.Axes
To adjust to the given viewport.
"""
axis.autoscale_view()
axis.figure.tight_layout()
@staticmethod
def _set_axis_labels(axis: matplotlib.axes.Axes, title: str = "", x_label: str = "", y_label: str = "") -> None:
"""
Sets all the labels for a plot, containing the title, x - label and y - label.
Parameters
----------
axis
Axis to set the labels for.
title
Title of the axis.
x_label
Label of the x - axis.
y_label
Label of the y - axis.
"""
axis.set_title(title, loc='left', y=1.1)
axis.set_xlabel(x_label)
axis.set_ylabel(y_label)
def __str__(self) -> str:
str_representation = self._create_asset_str()
str_representation += "\n" + self._create_copying_costs_str()
str_representation += "\n" + self._create_payoff_str()
return str_representation
def _create_payoff_str(self):
"""
Creates a string representation for the payoffs of different stakeholder for different market configurations.
See Shelegia_Motta_2021.IModel.get_payoffs for the formulas of the payoffs.
Returns
-------
str
String representation for the payoffs of different stakeholder for different market configurations
"""
market_configurations: List[str] = list(self._payoffs.keys())
str_representation = 'Payoffs for different Market Configurations:\n\t' + ''.join(
['{0: <14}'.format(item) for item in market_configurations])
for utility_type in self._payoffs[market_configurations[0]].keys():
str_representation += '\n\t'
for market_configuration in market_configurations:
str_representation += '-' + '{0: <4}'.format(utility_type).replace('pi', 'π') + ': ' + '{0: <5}'.format(
str(self._payoffs[market_configuration][utility_type])) + '| '
return str_representation
def _create_copying_costs_str(self):
"""
Creates a string representation for the fixed costs of copying for the incumbent.
See Shelegia_Motta_2021.IModel.get_copying_fixed_costs_values for the formulas of the fixed costs of copying.
Returns
-------
str
String representation for the fixed costs of copying for the incumbent.
"""
str_representation = 'Costs for copying:'
for key in self._copying_fixed_costs.keys():
str_representation += '\n\t- ' + key + ':\t' + str(self._copying_fixed_costs[key])
return str_representation
def _create_asset_str(self):
"""
Creates a string representation for the assets of the entrant.
See Shelegia_Motta_2021.IModel.get_asset_values for the formulas of the assets of the entrant.
Returns
-------
str
String representation for the assets of the entrant.
"""
str_representation: str = 'Assets:'
for key in self._assets:
str_representation += '\n\t- ' + key + ':\t' + str(self._assets[key])
return str_representation
def __call__(self, A: float, F: float) -> Dict[str, str]:
"""
Makes the object callable and will return the equilibrium for a given pair of copying fixed costs of the incumbent
and assets of the entrant.
See Shelegia_Motta_2021.IModel.get_optimal_choice for further documentation.
"""
return self.get_optimal_choice(A=A, F=F)
class BargainingPowerModel(BaseModel):
"""
Besides the parameters used in the paper (and in the BaseModel), this class will introduce the parameter $\beta$ in the models, called
the bargaining power of the incumbent. $\beta$ describes how much of the profits from the complementary product of the entrant will go to the incumbent
In the paper the default value $\beta=0.5$ is used to derive the results, which indicate an equal share of the profits.
"""
def __init__(self, u: float = 1, B: float = 0.5, small_delta: float = 0.5, delta: float = 0.51,
K: float = 0.2, beta: float = 0.5):
"""
Besides $\\beta$ the parameters in this model do not change compared to Shelegia_Motta_2021.Models.BaseModel.
Parameters
----------
beta: float
Bargaining power of the incumbent relative to the entrant ($0 < \\beta < 1$).
"""
assert 0 < beta < 1, 'Invalid bargaining power beta (has to be between 0 and 1).'
self._beta: float = beta
super(BargainingPowerModel, self).__init__(u=u, B=B, small_delta=small_delta, delta=delta, K=K)
def _calculate_payoffs(self) -> Dict[str, Dict[str, float]]:
"""
Calculates the payoffs for different market configurations with the formulas given in the paper.
The formulas are tabulated in BargainingPowerModel.get_payoffs, which are different to the BaseModel.
Returns
-------
Dict[str, Dict[str, float]]
Contains the mentioned payoffs for different market configurations.
"""
payoffs: Dict[str, Dict[str, float]] = super()._calculate_payoffs()
# basic market.
payoffs['basic']['pi(I)'] = self._u + self._small_delta * self._beta
payoffs['basic']['pi(E)'] = self._small_delta * (1 - self._beta)
# additional complement of the entrant
payoffs['E(C)']['pi(I)'] = self._u + 2 * self._small_delta * self._beta
payoffs['E(C)']['pi(E)'] = 2 * self._small_delta * (1 - self._beta)
# additional complement of the incumbent and the entrant
payoffs['I(C)E(C)']['pi(I)'] = self._u + self._small_delta * (1 + self._beta)
payoffs['I(C)E(C)']['pi(E)'] = self._small_delta * (1 - self._beta)
return payoffs
def _calculate_copying_fixed_costs_values(self) -> Dict[str, float]:
"""
Calculates the thresholds for the fixed costs of copying for the incumbent.
The formulas are tabulated in BargainingPowerModel.get_copying_fixed_costs_values, which are different to the BaseModel.
Returns
-------
Dict[str, float]
Includes the thresholds for the fixed costs for copying of the incumbent.
"""
return {'F(YY)s': self._small_delta * (1 - self._beta),
'F(YN)s': self._u + self._small_delta * (2 - self._beta),
'F(YY)c': 2 * self._small_delta * (1 - self._beta),
'F(YN)c': self._small_delta * (2 - 3 * self._beta)}
def _calculate_asset_values(self) -> Dict[str, float]:
"""
Calculates the thresholds for the assets of the entrant.
The formulas are tabulated in BargainingPowerModel.get_asset_values, which are different to the BaseModel.
Returns
-------
Dict[str, float]
Includes the thresholds for the assets of the entrant.
"""
return {'A_s': self._K + self._B - self._delta - self._small_delta * (2 - self._beta),
'A_c': self._K + self._B - 3 * self._small_delta * (1 - self._beta),
'A-s': self._K + self._B - self._delta,
'A-c': self._K + self._B - self._small_delta * (1 - self._beta)}
def get_asset_values(self) -> Dict[str, float]:
"""
Returns the asset thresholds of the entrant.
| Threshold $\:\:\:\:\:$ | Name $\:\:\:\:\:$ | Formula $\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:$ |
|----------------|:----------|:-----------|
| $\\underline{A}_S$ | A_s | $B + K - \Delta - \delta(2 - \\beta)$ |
| $\\underline{A}_C$ | A_c | $B + K - 3\delta(1 - \\beta)$ |
| $\overline{A}_S$ | A-s | $B + K - \Delta$ |
| $\overline{A}_C$ | A-c | $B + K - \delta(1 - \\beta)$ |
<br>
Returns
-------
Dict[str, float]
Includes the thresholds for the assets of the entrant.
"""
return self._assets
def get_copying_fixed_costs_values(self) -> Dict[str, float]:
"""
Returns the fixed costs for copying thresholds of the incumbent.
| Threshold $\:\:\:\:\:$ | Name $\:\:\:\:\:$ | Formula $\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:$ |
|----------|:-------|:--------|
| $F^{YY}_S$ | F(YY)s | $\delta(1 - \\beta)$ |
| $F^{YN}_S$ | F(YN)s | $u + \delta(2 - \\beta)$ |
| $F^{YY}_C$ | F(YY)c | $2\delta(1 - \\beta)$ |
| $F^{YN}_C$ | F(YN)c | $\delta(2 - \\beta)$ |
<br>
Returns
-------
Dict[str, float]
Includes the thresholds for the fixed costs for copying of the incumbent.
"""
return self._copying_fixed_costs
def get_payoffs(self) -> Dict[str, Dict[str, float]]:
"""
Returns the payoffs for different market configurations.
A market configuration can include:
- $I_P$ : Primary product sold by the incumbent.
- $I_C$ : Complementary product to $I_P$ potentially sold by the incumbent, which is copied from $E_C$.
- $E_P$ : Perfect substitute to $I_P$ potentially sold by the entrant.
- $E_C$ : Complementary product to $I_P$ currently sold by the entrant
- $\\tilde{E}_C$ : Complementary product to $I_P$ potentially | |
from __future__ import annotations
from typing import Any
from hydrogen.elements import Element, T_element
class a(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("a", *children, **attributes)
class abbr(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("abbr", *children, **attributes)
class address(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("address", *children, **attributes)
class area(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("area", *children, **attributes)
class article(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("article", *children, **attributes)
class aside(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("aside", *children, **attributes)
class audio(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("audio", *children, **attributes)
class b(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("b", *children, **attributes)
class base(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("base", *children, **attributes)
class bdi(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("bdi", *children, **attributes)
class bdo(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("bdo", *children, **attributes)
class blockquote(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("blockquote", *children, **attributes)
class body(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("body", *children, **attributes)
class br(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("br", *children, **attributes)
class button(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("button", *children, **attributes)
class canvas(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("canvas", *children, **attributes)
class caption(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("caption", *children, **attributes)
class cite(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("cite", *children, **attributes)
class code(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("code", *children, **attributes)
class col(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("col", *children, **attributes)
class colgroup(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("colgroup", *children, **attributes)
class data(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("data", *children, **attributes)
class datalist(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("datalist", *children, **attributes)
class dd(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("dd", *children, **attributes)
class deleted(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("del", *children, **attributes)
class details(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("details", *children, **attributes)
class dfn(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("dfn", *children, **attributes)
class dialog(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("dialog", *children, **attributes)
class div(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("div", *children, **attributes)
class dl(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("dl", *children, **attributes)
class dt(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("dt", *children, **attributes)
class em(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("em", *children, **attributes)
class embed(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("embed", *children, **attributes)
class fieldset(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("fieldset", *children, **attributes)
class figcaption(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("figcaption", *children, **attributes)
class figure(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("figure", *children, **attributes)
class footer(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("footer", *children, **attributes)
class form(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("form", *children, **attributes)
class h1(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("h1", *children, **attributes)
class h2(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("h2", *children, **attributes)
class h3(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("h3", *children, **attributes)
class h4(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("h4", *children, **attributes)
class h5(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("h5", *children, **attributes)
class h6(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("h6", *children, **attributes)
class head(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("head", *children, **attributes)
class header(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("header", *children, **attributes)
class hr(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("hr", *children, **attributes)
class html(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("html", *children, **attributes)
class i(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("i", *children, **attributes)
class iframe(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("iframe", *children, **attributes)
class img(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("img", *children, **attributes)
class input(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("input", *children, **attributes)
class ins(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("ins", *children, **attributes)
class kbd(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("kbd", *children, **attributes)
class label(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("label", *children, **attributes)
class legend(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("legend", *children, **attributes)
class li(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("li", *children, **attributes)
class link(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("link", *children, **attributes)
class main(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("main", *children, **attributes)
class map(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("map", *children, **attributes)
class mark(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("mark", *children, **attributes)
class meta(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("meta", *children, **attributes)
class meter(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("meter", *children, **attributes)
class nav(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("nav", *children, **attributes)
class noscript(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("noscript", *children, **attributes)
class object(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("object", *children, **attributes)
class ol(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("ol", *children, **attributes)
class optgroup(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("optgroup", *children, **attributes)
class option(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("option", *children, **attributes)
class p(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("p", *children, **attributes)
class param(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("param", *children, **attributes)
class picture(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("picture", *children, **attributes)
class pre(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("pre", *children, **attributes)
class progress(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("progress", *children, **attributes)
class q(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("q", *children, **attributes)
class rp(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("rp", *children, **attributes)
class rt(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("rt", *children, **attributes)
class ruby(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("ruby", *children, **attributes)
class s(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("s", *children, **attributes)
class samp(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("samp", *children, **attributes)
class script(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("script", *children, **attributes)
class section(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("section", *children, **attributes)
class select(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("select", *children, **attributes)
class small(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("small", *children, **attributes)
class source(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("source", *children, **attributes)
class span(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("span", *children, **attributes)
class strong(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("strong", *children, **attributes)
class style(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("style", *children, **attributes)
class sub(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("sub", *children, **attributes)
class summary(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("summary", *children, **attributes)
class sup(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("sup", *children, **attributes)
class svg(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("svg", *children, **attributes)
class table(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("table", *children, **attributes)
class title(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("title", *children, **attributes)
class tbody(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("tbody", *children, **attributes)
class td(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("td", *children, **attributes)
class template(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("template", *children, **attributes)
class textarea(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("textarea", *children, **attributes)
class tfoot(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("tfoot", *children, **attributes)
class th(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("th", *children, **attributes)
class thead(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("thead", *children, **attributes)
class time(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("time", *children, **attributes)
class tile(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("tile", *children, **attributes)
class tr(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("tr", *children, **attributes)
class track(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
super().__init__("track", *children, **attributes)
class u(Element):
def __init__(self, *children: T_element, **attributes: Any) -> None:
| |
init
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.rel_embeddings,std=0.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if weight_init.startswith('jax'):
# leave cls token as zeros to match jax impl
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'rel_embeddings','pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from <NAME>, thanks
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1)
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.disentangled:
relative_pos=self.get_rel_pos(x)
rel_embeddings=self.get_rel_embedding()
if self.disentangled: #disentangled
x=self.pos_drop(x+self.pos_embed)
for module in self.blocks._modules.values():
x,_,_ = module(x,relative_pos,rel_embeddings)
#Sequential for multiple inputs
else:
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings if self.relative_attention else None
#rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
if rel_embeddings is not None:
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
relative_pos = build_relative_position(q, hidden_states.size(-2), bucket_size = self.position_buckets, max_position=self.max_relative_positions)
return relative_pos
class VisionTransformerWOPE(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, distilled=False,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None, weight_init='',
LAI=False,disentangled=True,share_att_key=False):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
weight_init: (str): weight init scheme
"""
super().__init__()
self.disentangled= self.relative_attention=disentangled
self.position_buckets=-1
self.max_relative_positions=embed_dim
self.pos_ebd_size=self.max_relative_positions*2
#self.rel_embeddings = nn.Embedding(self.pos_ebd_size, embed_dim)
self.rel_embeddings=nn.Parameter(torch.zeros(self.pos_ebd_size,embed_dim))
print(f'Disentangled attention: {disentangled}')
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.LayerNorm= nn.LayerNorm(embed_dim)
self.num_tokens = 2 if distilled else 1
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
#self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer,
disentangled=self.disentangled,share_att_key=share_att_key)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
# Weight init
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
trunc_normal_(self.rel_embeddings, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if weight_init.startswith('jax'):
# leave cls token as zeros to match jax impl
for n, m in self.named_modules():
_init_vit_weights(m, n, head_bias=head_bias, jax_impl=True)
else:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from <NAME>, thanks
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1)
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.disentangled:
relative_pos=self.get_rel_pos(x)
rel_embeddings=self.get_rel_embedding()
if self.disentangled: #disentangled
x=self.pos_drop(x)
for module in self.blocks._modules.values():
x,_,_ = module(x,relative_pos,rel_embeddings)
#Sequential for multiple inputs
else:
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
if self.dist_token is None:
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings if self.relative_attention else None
if rel_embeddings is not None:
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
relative_pos = build_relative_position(q, hidden_states.size(-2), bucket_size = self.position_buckets, max_position=self.max_relative_positions)
return relative_pos
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
q_ids = np.arange(0, query_size)
k_ids = np.arange(0, key_size)
rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0],1))
if bucket_size>0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = rel_pos_ids.unsqueeze(0)
return rel_pos_ids
def make_log_bucket_position(relative_pos, bucket_size, max_position):
sign = np.sign(relative_pos)
mid = bucket_size//2
abs_pos = np.where((relative_pos<mid) & (relative_pos > -mid), mid-1, np.abs(relative_pos))
log_pos = np.ceil(np.log(abs_pos/mid)/np.log((max_position-1)/mid) * (mid-1)) + mid
bucket_pos = np.where(abs_pos<=mid, relative_pos, log_pos*sign).astype(np.int)
return bucket_pos
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
def _init_vit_weights(m, n: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.constant_(m.bias, head_bias)
elif n.startswith('pre_logits'):
lecun_normal_(m.weight)
nn.init.zeros_(m.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
if 'mlp' in n:
nn.init.normal_(m.bias, std=1e-6)
else:
nn.init.zeros_(m.bias)
else:
trunc_normal_(m.weight, std=.02)
if m.bias is | |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""Converts a GGR-style raw data to IBEIS database."""
from __future__ import absolute_import, division, print_function
from detecttools.directory import Directory
from os.path import join, exists
import utool as ut
import ibeis
(print, rrr, profile) = ut.inject2(__name__)
def _fix_ggr2018_directory_structure(ggr_path):
# Manual fixes for bad directories
src_uri = join(ggr_path, 'Clarine\ Plane\ Kurungu/')
dst_uri = join(ggr_path, '231/')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '231B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, 'Alex\ Peltier\ -\ Plane\ -\ Ngurnit/giraffe\ grevy\ count\ feb\ 18/')
dst_uri = join(ggr_path, '232/')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '232B/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-2])
ut.delete(src_uri)
src_uri = join(ggr_path, 'Mint\ Media\ Footage', 'Mpala\ day\ 1\ spark', 'PANORAMA/')
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, 'Mint\ Media\ Footage', 'Mpala\ day\ 1/')
dst_uri = join(ggr_path, '233/')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '233B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, 'Mint\ Media\ Footage', 'Mpala\ day\ 1\ spark/')
dst_uri = join(ggr_path, '233', '233B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, 'Mint\ Media\ Footage', 'Mpala\ day2\ /')
dst_uri = join(ggr_path, '233', '233B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, 'Mint\ Media\ Footage', 'Mpala\ day\ 2\ spark/')
dst_uri = join(ggr_path, '233', '233B/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-2])
ut.delete(src_uri)
src_uri = join(ggr_path, '103\ \(1\)/')
dst_uri = join(ggr_path, '103/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '103\ \(ccef473b\)/')
dst_uri = join(ggr_path, '103/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '108\ \(1\)/')
dst_uri = join(ggr_path, '108/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '226A\ \(Shaba\ Funan\ Camp\)/')
dst_uri = join(ggr_path, '226/')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '226A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '121/*.*')
dst_uri = join(ggr_path, '121', '121A/')
ut.rsync(src_uri, dst_uri)
for src_filepath in ut.glob(src_uri.replace('\\', '')):
ut.delete(src_filepath)
src_uri = join(ggr_path, '54', '54A\(16\)/')
dst_uri = join(ggr_path, '54', '54A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '54', '54B\(16\)/')
dst_uri = join(ggr_path, '54', '54B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '87', '87/')
dst_uri = join(ggr_path, '87', '87A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '223', 'A/')
dst_uri = join(ggr_path, '223', '223A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '223', 'B/')
dst_uri = join(ggr_path, '223', '223B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '14', '15A/')
dst_uri = join(ggr_path, '14', '14A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '73/')
dst_uri = join(ggr_path, '85/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '117', '115A/')
dst_uri = join(ggr_path, '117', '117A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '200\ A/')
dst_uri = join(ggr_path, '200', '200A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '200\ B/')
dst_uri = join(ggr_path, '200', '200B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '200\ F/')
dst_uri = join(ggr_path, '200', '200F/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '200A/')
dst_uri = join(ggr_path, '201/')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '201A/')
ut.rsync(src_uri, dst_uri)
# ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '201\ E/')
dst_uri = join(ggr_path, '201', '201E/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '201\ F/')
dst_uri = join(ggr_path, '201', '201F/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '200A/')
dst_uri = join(ggr_path, '202/')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '202A/')
ut.rsync(src_uri, dst_uri)
# ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '202\ B/')
dst_uri = join(ggr_path, '202', '202B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '200', '202\ F/')
dst_uri = join(ggr_path, '202', '202F/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '230', '230A', 'El\ Karama/*.*')
dst_uri = join(ggr_path, '230', '230A/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-1])
ut.delete(src_uri)
src_uri = join(ggr_path, '136', '136B', '136B\ Grevys\ Rally/*.*')
dst_uri = join(ggr_path, '136', '136B/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-1])
ut.delete(src_uri)
src_uri = join(ggr_path, '160', '160E', '104DUSIT')
if exists(src_uri):
direct = Directory(src_uri, recursive=False)
filename_list = direct.files()
for filename in sorted(filename_list):
dst_uri = filename.replace('104DUSIT/', '').replace('.JPG', '_.JPG')
assert not exists(dst_uri)
ut.rsync(filename, dst_uri)
ut.delete(src_uri)
src_uri = join(ggr_path, '222', '222B', '102DUSIT')
if exists(src_uri):
direct = Directory(src_uri, recursive=False)
filename_list = direct.files()
for filename in sorted(filename_list):
dst_uri = filename.replace('102DUSIT/', '').replace('.JPG', '_.JPG')
assert not exists(dst_uri)
ut.rsync(filename, dst_uri)
ut.delete(src_uri)
# Errors found by QR codes
# No conflicts
src_uri = join(ggr_path, '5', '5A/')
dst_uri = join(ggr_path, '5', '5B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '14', '14A/')
dst_uri = join(ggr_path, '14', '14B/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '118', '118A/')
dst_uri = join(ggr_path, '192')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '192A/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-2])
ut.delete(src_uri)
src_uri = join(ggr_path, '119', '119A/')
dst_uri = join(ggr_path, '189')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '189A/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-2])
ut.delete(src_uri)
src_uri = join(ggr_path, '120', '120A/')
dst_uri = join(ggr_path, '190')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '190A/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-2])
ut.delete(src_uri)
src_uri = join(ggr_path, '138', '138C/')
dst_uri = join(ggr_path, '169', '169C/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri)
# Conflicts - Move first
src_uri = join(ggr_path, '115', '115A/')
dst_uri = join(ggr_path, '191')
ut.ensuredir(dst_uri)
dst_uri = join(dst_uri, '191A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
src_uri = join(ggr_path, '148', '148A/')
dst_uri = join(ggr_path, '149', '149A-temp/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
# Conflicts - Move second
src_uri = join(ggr_path, '117', '117A/')
dst_uri = join(ggr_path, '115', '115A/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-2])
ut.delete(src_uri)
src_uri = join(ggr_path, '149', '149A/')
dst_uri = join(ggr_path, '148', '148A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
# Conflicts - Move third
src_uri = join(ggr_path, '149', '149A-temp/')
dst_uri = join(ggr_path, '149', '149A/')
ut.rsync(src_uri, dst_uri)
ut.delete(src_uri.replace('\\', ''))
# Conflicts - Merge third
src_uri = join(ggr_path, '57', '57A/')
dst_uri = join(ggr_path, '25', '25A/')
ut.rsync(src_uri, dst_uri)
src_uri = src_uri.replace('\\', '')
src_uri = '/'.join(src_uri.split('/')[:-2])
ut.delete(src_uri)
def convert_ggr2018_to_ibeis(ggr_path, dbdir=None, purge=True, dry_run=False,
apply_updates=True, **kwargs):
r"""Convert the raw GGR2 (2018) data to an ibeis database.
Args
ggr_path (str): Directory to folder *containing* raw GGR 2018 data
dbdir (str): Output directory
CommandLine:
python -m ibeis convert_ggr2018_to_ibeis
Example:
>>> # SCRIPT
>>> from ibeis.dbio.ingest_ggr import * # NOQA
>>> default_ggr_path = join('/', 'data', 'ibeis', 'GGR2', 'GGR2018data')
>>> default_dbdir = join('/', 'data', 'ibeis', 'GGR2-IBEIS')
>>> dbdir = ut.get_argval('--dbdir', type_=str, default=default_dbdir)
>>> ggr_path = ut.get_argval('--ggr', type_=str, default=default_ggr_path)
>>> result = convert_ggr2018_to_ibeis(ggr_path, dbdir=dbdir, purge=False, dry_run=True, apply_updates=False)
>>> print(result)
"""
ALLOWED_NUMBERS = list(range(1, 250))
ALLOWED_LETTERS = ['A', 'B', 'C', 'D', 'E', 'F']
################################################################################
if apply_updates:
_fix_ggr2018_directory_structure(ggr_path)
################################################################################
blacklist_filepath_set = set([
join(ggr_path, 'Cameras info.numbers'),
join(ggr_path, 'Cameras info.xlsx'),
join(ggr_path, 'GGR_photos_MRC_29.1.18.ods'),
join(ggr_path, 'Cameras info-2.numbers'),
])
# Check root files
direct = Directory(ggr_path)
for filepath in direct.files(recursive=False):
try:
assert filepath in blacklist_filepath_set
ut.delete(filepath)
except AssertionError:
print('Unresolved root file found in %r' % (filepath, ))
continue
################################################################################
if purge:
ut.delete(dbdir)
ibs = ibeis.opendb(dbdir=dbdir)
################################################################################
# Check folder structure
assert exists(ggr_path)
direct = Directory(ggr_path, recursive=0)
direct1_list = direct.directories()
direct1_list.sort(key=lambda x: int(x.base()), reverse=False)
for direct1 in direct1_list:
if not dry_run:
print('Processing directory: %r' % (direct1, ))
base1 = direct1.base()
try:
int(base1)
except ValueError:
print('Error found in %r' % (direct1, ))
continue
try:
assert len(direct1.files(recursive=False)) == 0
except AssertionError:
print('Files found in %r' % (direct1, ))
continue
seen_letter_list = []
direct1_ = Directory(direct1.absolute_directory_path, recursive=0)
direct2_list = direct1_.directories()
direct2_list.sort(key=lambda x: x.base(), reverse=False)
for direct2 in direct2_list:
base2 = direct2.base()
try:
assert base2.startswith(base1)
except AssertionError:
print('Folder name heredity conflict %r with %r' % (direct2, direct1, ))
continue
try:
assert len(base2) >= 2
assert ' ' not in base2
number = base2[:-1]
letter = base2[-1]
number = int(number)
letter = letter.upper()
assert number in ALLOWED_NUMBERS
assert letter in ALLOWED_LETTERS
seen_letter_list.append(letter)
except ValueError:
print('Error found in %r' % (direct2, ))
continue
except AssertionError:
print('Folder name format error found in %r' % (direct2, ))
continue
direct2_ = Directory(direct2.absolute_directory_path, recursive=True, images=True)
try:
assert len(direct2_.directories()) == 0
except AssertionError:
print('Folders exist in file only level %r' % (direct2, ))
continue
filepath_list = sorted(direct2_.files())
if not dry_run:
try:
gid_list = ibs.add_images(filepath_list)
gid_list = ut.filter_Nones(gid_list)
gid_list = sorted(list(set(gid_list)))
imageset_text = 'GGR2,%d,%s' % (number, letter, )
note_list = [
'%s,%05d' % (imageset_text, index + 1)
for index, gid in enumerate(gid_list)
]
ibs.set_image_notes(gid_list, note_list)
ibs.set_image_imagesettext(gid_list, [imageset_text] * len(gid_list))
except Exception as ex: # NOQA
ut.embed()
seen_letter_set = set(seen_letter_list)
try:
assert len(seen_letter_set) == len(seen_letter_list)
except AssertionError:
print('Duplicate letters in %r with letters %r' % (direct1, seen_letter_list, ))
continue
try:
assert 'A' in seen_letter_set
except AssertionError:
print('WARNING: A camera not found in | |
# -*- coding: utf-8 -*-
#
# django-codenerix
#
# Codenerix GNU
#
# Project URL : http://www.codenerix.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from codenerix.djng.angular_model import NgModelFormMixin
from codenerix.djng import NgFormValidationMixin, NgForm, NgModelForm
from django.utils.translation import gettext as _
from django.forms.widgets import Select, CheckboxInput
from django.forms import NullBooleanField
from codenerix.helpers import model_inspect
from codenerix.widgets import (
StaticSelect,
DynamicSelect,
DynamicInput,
MultiStaticSelect,
MultiDynamicSelect,
)
class BaseForm(object):
def __init__(self, *args, **kwargs):
self.__language = None
self.attributes = {}
return super(BaseForm, self).__init__(*args, **kwargs)
def set_language(self, language):
self.__language = language
def set_attribute(self, key, value):
self.attributes[key] = value
def get_name(self):
# If name atrribute exists in Meta
if "name" in self.Meta.__dict__:
# Try to get name from Meta
name = self.Meta.name
else:
# If not try to find it automatically
info = model_inspect(self.Meta.model())
if info["verbose_name"]:
name = info["verbose_name"]
else:
name = info["modelname"]
return name
def __errors__(self):
return []
def clean_color(self):
color = self.cleaned_data["color"]
if len(color) != 0:
valores_validos = "#0123456789abcdefABCDEF"
r = True
for lt in color:
if lt not in valores_validos:
r = False
break
if not r or color[0] != "#" or not (len(color) == 4 or len(color) == 7):
self._errors["color"] = [_("Invalid color")]
return color
else:
return color
else:
return color
def get_errors(self):
# Where to look for fields
if "list_errors" in dir(self):
list_errors = self.list_errors
else:
# r = self.non_field_errors()
# list_errors = [element[5] for element in list(self.non_field_errors())[:-1]]
list_errors = []
for element in list(self.non_field_errors())[:-1]:
if len(element) >= 5:
list_errors.append(element[5])
return list_errors
def __groups__(self):
return []
def get_groups(self, gs=None, processed=[], initial=True):
"""
<--------------------------------------- 12 columns ------------------------------------>
<--- 6 columns ---> <--- 6 columns --->
------------------------------------------ ------------------------------------------
| Info | | Personal |
|==========================================| |==========================================|
| ----------------- ------------------ | | |
| | Passport | | Name | | | Phone Zipcode |
| |=================| | [.....] [.....] | | | [...........................] [.......] |
| | CID Country | | <- 6 -> <- 6 -> | | | <--- 8 columns ---> <-4 col-> |
| | [.....] [.....] | | | | | |
| | <- 6 -> <- 6 -> | ----------------- | | Address |
| ----------------- | | [.....................................] |
------------------------------------------ | <--- 12 columns ---> |
| [..] number |
| <--- 12 columns ---> |
| |
------------------------------------------
group = [
(_('Info'),(6,'#8a6d3b','#fcf8e3','center'),
(_('Identification'),6,
["cid",6],
["country",6],
),
(None,6,
["name",None,6],
["surname",None,6,False],
),
),
(_('Personal'),6,
["phone",None,8],
["zipcode",None,4],
["address",None,12],
["number",None,12, True],
),
]
Group: it is defined as tuple with 3 or more elements:
Grammar: (<Name>, <Attributes>, <Element1>, <Element2>, ..., <ElementN>)
If <Name> is None: no name will be given to the group and no panel decoration will be shown
If <Size in columns> is None: default of 6 will be used
<Attributes>:
it can be an integer that represent the size in columns
it can be a tuple with several attributes where each element represents:
(<Size in columns>,'#<Font color>','#<Background color>','<Alignment>')
<Element>:
it can be a Group
it can be a Field
Examples:
('Info', 6, ["name",6], ["surname",6]) -> Info panel using 6 columns with 2 boxes 6 columns for each with name and surname inputs
('Info', (6,None,'#fcf8e3','center'), ["name",6], ["surname",6]) -> Info panel using 6 columns with a yellow brackground in centered title, 2 boxes, 6 columns for each with name and surname inputs
('Info', 12, ('Name', 6, ["name",12]), ('Surname',6, ["surname",12])) -> Info panel using 12 columns with 2 panels inside
of 6 columns each named "Name" and "Surname" and inside each of them an input "name" and "surname" where it belongs.
Field: must be a list with at least 1 element in it:
Grammar: [<Name of field>, <Size in columns>, <Label>]
<Name of field>:
This must be filled always
It is the input's name inside the form
Must exists as a form element or as a grouped form element
<Size in columns>:
Size of the input in columns
If it is not defined or if it is defined as None: default of 6 will be used
<Label>:
It it is defined as False: the label for this field will not be shown
If it is not defined or if it is defined as None: default of True will be used (default input's label will be shown)
If it is a string: this string will be shown as a label
Examples:
['age'] Input 'age' will be shown with 6 columns and its default label
['age',8] Input 'age' will be shown with 8 columns and its default label
['age', None, False] Input 'age' will be shown with 6 columns and NO LABEL
['age',8,False] Input 'age' will be shown with 8 columns and NO LABEL
['age',8,_("Age in days")] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language
['age',8,_("Age in days"), True] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language, and input inline with label
['age',6, None, None, None, None, None, ["ng-click=functionjs('param1')", "ng-change=functionjs2()"]] Input 'age' with extras functions
['age',None,None,None,None, 'filter'] Input 'age' with extras filter ONLY DETAILS
['age',6, {'color': 'red'} Input 'age' will be shown with red title
"""
# Check if language is set
if not self.__language:
raise IOError("ERROR: No language suplied!")
# Initialize the list
if initial:
processed = []
# Where to look for fields
if "list_fields" in dir(self):
list_fields = self.list_fields
check_system = "html_name"
else:
list_fields = self
check_system = "name"
# Default attributes for fields
attributes = [
("columns", 6),
("color", None),
("bgcolor", None),
("textalign", None),
("inline", False), # input in line with label
("label", True),
("extra", None),
("extra_div", None),
("foreign_info", {}),
]
labels = [x[0] for x in attributes]
# Get groups if none was given
if gs is None:
gs = self.__groups__()
# Prepare the answer
groups = []
# Prepare focus control
focus_first = None
focus_must = None
# html helper for groups and fields
html_helper = self.html_helper()
# Start processing
for g in gs:
token = {}
token["name"] = g[0]
if token["name"] in html_helper:
if "pre" in html_helper[token["name"]]:
token["html_helper_pre"] = html_helper[token["name"]]["pre"]
if "post" in html_helper[token["name"]]:
token["html_helper_post"] = html_helper[token["name"]]["post"]
styles = g[1]
if type(styles) is tuple:
if len(styles) >= 1:
token["columns"] = g[1][0]
if len(styles) >= 2:
token["color"] = g[1][1]
if len(styles) >= 3:
token["bgcolor"] = g[1][2]
if len(styles) >= 4:
token["textalign"] = g[1][3]
if len(styles) >= 5:
token["inline"] = g[1][4]
if len(styles) >= 7:
token["extra"] = g[1][5]
if len(styles) >= 8:
token["extra_div"] = g[1][6]
else:
token["columns"] = g[1]
fs = g[2:]
fields = []
for f in fs:
# Field
atr = {}
# Decide weather this is a Group or not
if type(f) == tuple:
# Recursive
fields += self.get_groups([list(f)], processed, False)
else:
try:
list_type = [
str,
unicode,
]
except NameError:
list_type = [
str,
]
# Check if it is a list
if type(f) == list:
# This is a field with attributes, get the name
field = f[0]
if (
html_helper
and token["name"] in html_helper
and "items" in html_helper[token["name"]]
and field in html_helper[token["name"]]["items"]
):
if "pre" in html_helper[token["name"]]["items"][field]:
atr["html_helper_pre"] = html_helper[token["name"]][
"items"
][field]["pre"]
if "post" in html_helper[token["name"]]["items"][field]:
atr["html_helper_post"] = html_helper[token["name"]][
"items"
][field]["post"]
# Process each attribute (if any)
dictionary = False
for idx, element in enumerate(f[1:]):
if type(element) == dict:
dictionary = True
for key in element.keys():
if key in labels:
atr[key] = element[key]
else:
raise IOError(
"Unknown attribute '{0}' as field '{1}' in list of fields".format(
key, field
)
)
else:
if not dictionary:
if element is not None:
atr[attributes[idx][0]] = element
else:
| |
attempt_to_recreate_single_trigger, Queuing trigger to be removed: " + str(e) + ", trigger_info: " + str(trigger_info))
triggers_to_inform_and_remove.append((trigger_info, "Unable to recreate trigger"))
else:
# no active triggers frontend, add to the pending set again
print("[recreate_pending_triggers] No active frontend, Queuing up to be recreated, trigger_id: " + trigger_info["trigger_id"] + ", trigger_info: " + str(trigger_info))
add_to_global_pending_trigger_set(context, trigger_info["trigger_id"])
if len(triggers_to_inform_and_remove) > 0:
inform_workflows_for_triggers(triggers_to_inform_and_remove, context)
removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context)
def attempt_to_recreate_single_trigger(trigger_info, tf_ip_port, context):
print("[attempt_to_recreate_single_trigger] selected frontend: " + tf_ip_port + ", trigger_info: " + str(trigger_info))
status_msg = ""
# create the global trigger info all the information, and status set of starting, and not workflow associated
trigger_id = trigger_info["trigger_id"]
email = trigger_info["email"]
trigger_name = trigger_info["trigger_name"]
global_trigger_info = trigger_info.copy()
global_trigger_info["status"] = "starting"
global_trigger_info["frontend_ip_port"] = tf_ip_port
# add the global_trigger_info to global map
add_trigger_info(context, trigger_id, json.dumps(global_trigger_info))
url = "http://" + tf_ip_port + "/create_trigger"
# send the request and wait for response
print("[attempt_to_recreate_single_trigger] Contacting: " + url + ", with data: " + str(global_trigger_info["frontend_command_info"]))
res_obj = {}
try:
res = requests.post(url, json=global_trigger_info["frontend_command_info"])
if res.status_code != 200:
raise Exception("status code: " + str(res.status_code) + " returned")
res_obj = res.json()
except Exception as e:
status_msg = "POST Error: trigger_id: " + trigger_id + "," + str(e)
#print("[AddTrigger] " + status_msg)
if "status" in res_obj and res_obj["status"].lower() == "success":
# add the trigger_id to frontend map
print("[attempt_to_recreate_single_trigger] Success response from frontend")
frontend_info = get_frontend_info(context, tf_ip_port)
#print("get_frontend_info: " + str(frontend_info))
assert(frontend_info is not None)
frontend_info[trigger_id] = ''
add_frontend_info(context, tf_ip_port, json.dumps(frontend_info))
global_trigger_info["status"] = "ready"
add_trigger_info(context, trigger_id, json.dumps(global_trigger_info))
# add the trigger_name to user's list of triggers
user_triggers_list = get_user_trigger_list(context, email)
user_triggers_list[trigger_name] = ''
update_user_trigger_list(context, email, json.dumps(user_triggers_list))
# write the user's list
status_msg = "Trigger created successfully. Message: " + res_obj["message"] + ", details: " + str(global_trigger_info)
print("[attempt_to_recreate_single_trigger] " + status_msg)
return True, global_trigger_info
else:
if "message" in res_obj:
status_msg = status_msg + ", message: " + res_obj["message"]
status_msg = "Error: " + status_msg + ", response: " + str(res_obj)
print("[attempt_to_recreate_single_trigger] " + status_msg)
return False, global_trigger_info
def attempt_to_associate_trigger_with_workflows(trigger_id, workflow_name, context):
print("[attempt_to_associate_trigger_with_workflows] called with: trigger_id: " + str(trigger_id) + ", workflow_name: " + str(workflow_name))
trigger_info = get_trigger_info(context, trigger_id)
trigger_id = trigger_info["trigger_id"]
email = trigger_info["email"]
trigger_name = trigger_info["trigger_name"]
tf_ip_port = trigger_info["frontend_ip_port"]
workflow_info = trigger_info["associated_workflows"][workflow_name]
workflow_state = workflow_info["workflow_state"]
isWorkflowPresent, isWorkflowDeployed, workflow_details = isWorkflowPresentAndDeployed(email, workflow_name, context)
if isWorkflowPresent == False:
print("[attempt_to_associate_trigger_with_workflows] User: " + email + "Workflow: " + workflow_name + " not found.")
return False
if isWorkflowPresent == True:
# add the trigger name in workflow's metadata
print("[attempt_to_associate_trigger_with_workflows] User: " + email + "Workflow: " + workflow_name + " is present.")
addTriggerToWorkflowMetadata(email, trigger_name, workflow_name, workflow_state, workflow_details["id"], context)
addWorkflowToTriggerMetadata(workflow_name, workflow_state, trigger_id, context)
if isWorkflowDeployed == True:
# add the workflow to the trigger
print("[attempt_to_associate_trigger_with_workflows] User: " + email + "Workflow: " + workflow_name + " is deployed.")
addWorkflowToTrigger(email, workflow_name, workflow_state, workflow_details, trigger_id, trigger_name, context)
else:
print("[attempt_to_associate_trigger_with_workflows] User: " + email + "Workflow: " + workflow_name + " is not deployed. Keeping workflow to trigger association intact.")
return True
# TODO: write updated trigger info
def health_check_registered_frontends(context):
print("[health_check_registered_frontends] called")
triggers_to_recreate = []
tf_hosts = get_available_frontends(context)
if len(tf_hosts) == 0:
print("[health_check_registered_frontends] No available TriggersFrontend found")
return triggers_to_recreate
tf_hosts = list(tf_hosts)
for tf_ip_port in tf_hosts:
if not is_frontend_active(tf_ip_port):
# frontend is not active but is still registered with management
print("[health_check_registered_frontends] Removing inactive frontend: " + tf_ip_port)
triggers_to_inform_and_remove = []
frontend_info = get_frontend_info(context, tf_ip_port)
if frontend_info is None:
continue
remove_frontend_info(context, tf_ip_port)
print("[health_check_registered_frontends] Removing inactive frontend: frontend_info = " + str(frontend_info))
for trigger_id in frontend_info:
trigger_info = get_trigger_info(context, trigger_id)
if trigger_info is not None and trigger_info["frontend_ip_port"] == tf_ip_port:
if trigger_info["status"] == "ready":
# this ready trigger is still associated with an inactive frontend
print("[health_check_registered_frontends] Queuing up to be recreated, trigger_id: " + str(trigger_id) + ", trigger_info: " + str(trigger_info))
triggers_to_recreate.append((trigger_info, "READY trigger frontend not active"))
else:
print("[health_check_registered_frontends] Queuing up to be removed, since status is not ready, trigger_id: " + str(trigger_id) + ", trigger_info: " + str(trigger_info))
triggers_to_inform_and_remove.append((trigger_info, "Triggers frontend not active"))
else:
print("[health_check_registered_frontends] Ignoring trigger, since it belongs to a different frontend or does not exist, trigger_id: " + str(trigger_id) + ", trigger_info: " + str(trigger_info))
# this trigger is now associated with a different frontend, simply remove frontend information
pass
if len(triggers_to_inform_and_remove) > 0:
inform_workflows_for_triggers(triggers_to_inform_and_remove, context)
removeTriggerAndWorkflowAssociations(triggers_to_inform_and_remove, context, update_frontend_info=False)
return triggers_to_recreate
def is_frontend_active(tf_ip_port):
if tf_ip_port is None or tf_ip_port is "":
return False
url = "http://" + tf_ip_port + "/"
print("[is_frontend_active] Contacting: " + url + ", to check if it is alive")
try:
res = requests.get(url)
if res.status_code != 200:
raise Exception("[is_frontend_active] status code: " + str(res.status_code) + " returned")
if res.text is None or res.text != 'ok':
raise Exception("[is_frontend_active] response body: " + str(res.text) + " returned")
if res.text == 'ok':
print("[is_frontend_active] " + url + " is alive")
return True
except Exception as e:
status_msg = "[is_frontend_active] Error: " + str(e)
print(status_msg)
return False
def inform_workflows_for_triggers(pending_triggers, context):
for (trigger_info, error_msg) in pending_triggers:
print("[inform_workflows_for_triggers] for trigger: " + str(trigger_info))
frontend_command_info = trigger_info["frontend_command_info"]
associated_workflows = trigger_info["associated_workflows"]
for workflow_name in associated_workflows:
workflow_info = associated_workflows[workflow_name]
request_obj = { \
"trigger_status": "error",
"trigger_type": frontend_command_info["trigger_type"],
"trigger_name": frontend_command_info["trigger_name"],
"workflow_name": workflow_name,
"source": "",
"data": error_msg
}
url = workflow_info["workflow_url"]
workflow_state = workflow_info["workflow_state"]
execute_workflow(url, request_obj, workflow_state)
def removeTriggerAndWorkflowAssociations(pending_triggers, context, update_frontend_info=True):
for (trigger_info, error_msg) in pending_triggers:
if update_frontend_info == True:
removeTriggerFromFrontend(trigger_info, context)
try:
removeTriggerFromWorkflow(trigger_info, context)
except Exception as e:
print("Exception in removeTriggerFromWorkflow: " + str(e))
remove_trigger_info(context, trigger_info["trigger_id"])
def removeTriggerFromFrontend(trigger_info, context):
print("[removeTriggerFromFrontend] for trigger: " + str(trigger_info))
trigger_id = trigger_info["trigger_id"]
frontend_ip_port = trigger_info["frontend_ip_port"]
# remove the trigger_id from frontend map
frontend_info = get_frontend_info(context, frontend_ip_port)
if frontend_info is not None and trigger_id in frontend_info:
del frontend_info[trigger_id]
add_frontend_info(context, frontend_ip_port, json.dumps(frontend_info))
def removeTriggerFromWorkflow(trigger_info,context):
print("[removeTriggerFromWorkflow] for trigger: " + str(trigger_info))
associated_workflows = trigger_info["associated_workflows"].copy()
email = trigger_info["email"]
trigger_name = trigger_info["trigger_name"]
storage_userid = trigger_info["storage_userid"]
trigger_id = trigger_info["trigger_id"]
status_msg = ""
# do the delete trigger processing
for associated_workflow_name in associated_workflows:
del trigger_info["associated_workflows"][associated_workflow_name]
add_trigger_info(context, trigger_id, json.dumps(trigger_info))
isWorkflowPresent, isWorkflowDeployed, workflow_details = isWorkflowPresentAndDeployed(email, associated_workflow_name, context)
print("associated_workflow_name: " + associated_workflow_name + ", isWorkflowPresent: " + str(isWorkflowPresent) + ", details: " + str(workflow_details))
try:
if isWorkflowPresent == True:
# add the trigger name in workflow's metadata
deleteTriggerFromWorkflowMetadata(email, trigger_name, associated_workflow_name, workflow_details["id"], context)
except Exception as e:
status_msg = str(e)
print("[removeTriggerFromWorkflow] exeception: " + status_msg)
# check the user's storage area for the trigger name
user_triggers_list = get_user_trigger_list(context, email)
print("user_triggers_list = " + str(user_triggers_list))
if trigger_name in user_triggers_list:
del user_triggers_list[trigger_name]
update_user_trigger_list(context, email, json.dumps(user_triggers_list))
return status_msg
def select_random_active_frontend(tf_hosts):
random.seed(time.time())
selected_tf = ""
while len(tf_hosts) > 0:
tf_ip_port = tf_hosts[random.randint(0,len(tf_hosts)-1)]
if is_frontend_active(tf_ip_port):
selected_tf = tf_ip_port
break
else:
tf_hosts.remove(tf_ip_port)
return selected_tf
def isWorkflowPresentAndDeployed(email, workflowname, sapi):
workflows = sapi.get(email + "_list_workflows", True)
if workflows is not None and workflows != "":
workflows = json.loads(workflows)
else:
workflows = {}
isWorkflowPresent = False
isWorkflowDeployed = False
details = {}
if workflowname in workflows:
wf_id = workflows[workflowname]
wf = sapi.get(email + "_workflow_" + wf_id, True)
if wf is not None and wf != "":
isWorkflowPresent = True
wf = json.loads(wf)
details["email"] = email
details["name"] = workflowname
details["id"] = wf_id
wf_status = sapi.get("workflow_status_" + wf_id, True)
details["status"] = wf_status
details["endpoints"] = list(sapi.retrieveSet(wf_id + "_workflow_endpoints", is_private=True))
if "modified" in wf:
details["modified"] = wf["modified"]
if "associatedTriggerableTables" in wf:
details["associatedTriggerableTables"] = wf["associatedTriggerableTables"]
if "associatedTriggers" in wf:
details["associatedTriggers"] = wf["associatedTriggers"]
if wf["status"] == "deployed" or wf["status"] == "deploying":
isWorkflowDeployed = True
return isWorkflowPresent, isWorkflowDeployed, details
def deleteTriggerFromWorkflowMetadata(email, trigger_name, workflow_name, workflow_id, context):
wf = context.get(email + "_workflow_" + workflow_id, True)
if wf is None or wf == "":
print("[deleteTriggerFromWorkflowMetadata] User: " + email + ", Workflow: " +
workflow_name + ": couldn't retrieve workflow metadata.")
raise Exception("[deleteTriggerFromWorkflowMetadata] User: " + email +
", Workflow: " + workflow_name + ": couldn't retrieve workflow metadata.")
wf = json.loads(wf)
print("[deleteTriggerFromWorkflowMetadata] User: " + email + ", Workflow: " +
workflow_name + ": Current workflow metadata: " + str(wf))
if 'associatedTriggers' not in wf:
wf['associatedTriggers'] = {}
associatedTriggers = wf['associatedTriggers']
if trigger_name in associatedTriggers:
del associatedTriggers[trigger_name]
wf['associatedTriggers'] = associatedTriggers
wf = context.put(email + "_workflow_" + workflow_id, | |
Do",
"created_at": "05:30 AM, 31 Dec, 2021",
"creator": {
"first_name": "Sunny",
"last_name": "Singhal",
"email": "<EMAIL>"
}
}, {
"id": 97,
"name": "TODO - 97",
"status": "To Do",
"created_at": "05:30 AM, 02 Jan, 2022",
"creator": {
"first_name": "Sunny",
"last_name": "Singhal",
"email": "<EMAIL>"
}
}, {
"id": 98,
"name": "TODO - 98",
"status": "To Do",
"created_at": "05:30 AM, 27 Dec, 2021",
"creator": {
"first_name": "Sunny",
"last_name": "Singhal",
"email": "<EMAIL>"
}
}, {
"id": 99,
"name": "TODO - 99",
"status": "To Do",
"created_at": "05:30 AM, 28 Dec, 2021",
"creator": {
"first_name": "Sunny",
"last_name": "Singhal",
"email": "<EMAIL>"
}
}, {
"id": 100,
"name": "TODO - 100",
"status": "To Do",
"created_at": "05:30 AM, 27 Dec, 2021",
"creator": {
"first_name": "Sunny",
"last_name": "Singhal",
"email": "<EMAIL>"
}
}, {
"id": 101,
"name": "TODO - 101",
"status": "To Do",
"created_at": "05:30 AM, 02 Jan, 2022",
"creator": {
"first_name": "Sunny",
"last_name": "Singhal",
"email": "<EMAIL>"
}
}, {
"id": 102,
"name": "TODO - 102",
"status": "Done",
"created_at": "05:30 AM, 01 Jan, 2022",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 103,
"name": "TODO - 103",
"status": "To Do",
"created_at": "05:30 AM, 02 Jan, 2022",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 104,
"name": "TODO - 104",
"status": "To Do",
"created_at": "05:30 AM, 31 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 105,
"name": "TODO - 105",
"status": "To Do",
"created_at": "05:30 AM, 31 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 106,
"name": "TODO - 106",
"status": "To Do",
"created_at": "05:30 AM, 31 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 107,
"name": "TODO - 107",
"status": "Done",
"created_at": "05:30 AM, 29 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 108,
"name": "TODO - 108",
"status": "Done",
"created_at": "05:30 AM, 29 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 109,
"name": "TODO - 109",
"status": "Done",
"created_at": "05:30 AM, 29 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 110,
"name": "TODO - 110",
"status": "To Do",
"created_at": "05:30 AM, 31 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 111,
"name": "TODO - 111",
"status": "Done",
"created_at": "05:30 AM, 29 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 112,
"name": "TODO - 112",
"status": "To Do",
"created_at": "05:30 AM, 30 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 113,
"name": "TODO - 113",
"status": "To Do",
"created_at": "05:30 AM, 27 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 114,
"name": "TODO - 114",
"status": "Done",
"created_at": "05:30 AM, 01 Jan, 2022",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 115,
"name": "TODO - 115",
"status": "To Do",
"created_at": "05:30 AM, 27 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 116,
"name": "TODO - 116",
"status": "Done",
"created_at": "05:30 AM, 29 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 117,
"name": "TODO - 117",
"status": "To Do",
"created_at": "05:30 AM, 31 Dec, 2021",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 118,
"name": "TODO - 118",
"status": "Done",
"created_at": "05:30 AM, 01 Jan, 2022",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 119,
"name": "TODO - 119",
"status": "To Do",
"created_at": "05:30 AM, 02 Jan, 2022",
"creator": {
"first_name": "Chirag",
"last_name": "Gupta",
"email": "<EMAIL>"
}
}, {
"id": 1,
"name": "TODO - 1",
"status": "To Do",
"created_at": "05:30 AM, 25 Dec, 2021",
"creator": {
"first_name": "Amal",
"last_name": "Raj",
"email": "<EMAIL>"
}
}
]
db_hit_count = len(connection.queries)
data = todos_utils.fetch_all_todo_list_with_user_details()
new_db_hit_count = len(connection.queries)
actual_hit_count = new_db_hit_count - db_hit_count
self.assertEqual(
actual_hit_count,
1,
msg='Expected only 1 db hit got {}'.format(new_db_hit_count - db_hit_count)
)
self.assertCountEqual(
data,
expected_data
)
def test_fetch_projects_details(self):
expected_data = [
{'id': 5, 'name': 'Project E', 'status': 'In progress', 'existing_member_count': 1, 'max_members': 1},
{'id': 4, 'name': 'Project D', 'status': 'In progress', 'existing_member_count': 1, 'max_members': 4},
{'id': 10, 'name': 'Project J', 'status': 'Completed', 'existing_member_count': 3, 'max_members': 3},
{'id': 6, 'name': 'Project F', 'status': 'To be started', 'existing_member_count': 4, 'max_members': 5},
{'id': 2, 'name': 'Project B', 'status': 'Completed', 'existing_member_count': 2, 'max_members': 2},
{'id': 7, 'name': 'Project G', 'status': 'In progress', 'existing_member_count': 2, 'max_members': 2},
{'id': 1, 'name': 'Project A', 'status': 'To be started', 'existing_member_count': 2, 'max_members': 3},
{'id': 8, 'name': 'Project H', 'status': 'To be started', 'existing_member_count': 1, 'max_members': 1},
{'id': 11, 'name': 'Project K', 'status': 'To be started', 'existing_member_count': 4, 'max_members': 4},
{'id': 9, 'name': 'Project I', 'status': 'Completed', 'existing_member_count': 2, 'max_members': 2},
{'id': 3, 'name': 'Project C', 'status': 'In progress', 'existing_member_count': 3, 'max_members': 3}
]
db_hit_count = len(connection.queries)
data = todos_utils.fetch_projects_details()
new_db_hit_count = len(connection.queries)
actual_hit_count = new_db_hit_count - db_hit_count
self.assertEqual(
actual_hit_count,
1,
msg='Expected only 1 db hit got {}'.format(new_db_hit_count - db_hit_count)
)
self.assertCountEqual(
data,
expected_data
)
def test_fetch_users_todo_stats(self):
expected_data = [
{'id': 4, 'first_name': 'Nikhil', 'last_name': 'Khurana', 'email': '<EMAIL>',
'completed_count': 3, 'pending_count': 7},
{'id': 6, 'first_name': 'Sunny', 'last_name': 'Singhal', 'email': '<EMAIL>',
'completed_count': 8, 'pending_count': 22},
{'id': 2, 'first_name': 'Gurpreet', 'last_name': 'Singh', 'email': '<EMAIL>',
'completed_count': 9, 'pending_count': 15},
{'id': 7, 'first_name': 'Chirag', 'last_name': 'Gupta', 'email': '<EMAIL>',
'completed_count': 8, 'pending_count': 10},
{'id': 3, 'first_name': 'Naveen', 'last_name': 'Kumar', 'email': '<EMAIL>',
'completed_count': 5, 'pending_count': 13},
{'id': 1, 'first_name': 'Amal', 'last_name': 'Raj', 'email': '<EMAIL>',
'completed_count': 3, 'pending_count': 16}
]
db_hit_count = len(connection.queries)
data = todos_utils.fetch_users_todo_stats()
new_db_hit_count = len(connection.queries)
actual_hit_count = new_db_hit_count - db_hit_count
self.assertEqual(
actual_hit_count,
1,
msg='Expected only 1 db hit got {}'.format(new_db_hit_count - db_hit_count)
)
self.assertCountEqual(
data,
expected_data
)
def test_fetch_five_users_with_max_pending_todos(self):
expected_data = [
{'id': 6, 'first_name': 'Sunny', 'last_name': 'Singhal', 'email': '<EMAIL>',
'pending_count': 22},
{'id': 1, 'first_name': 'Amal', 'last_name': 'Raj', 'email': '<EMAIL>',
'pending_count': 16},
{'id': 2, 'first_name': 'Gurpreet', 'last_name': 'Singh', 'email': '<EMAIL>',
'pending_count': 15},
{'id': 3, 'first_name': 'Naveen', 'last_name': 'Kumar', 'email': '<EMAIL>',
'pending_count': 13},
{'id': 7, 'first_name': 'Chirag', 'last_name': 'Gupta', 'email': '<EMAIL>',
'pending_count': 10}
]
db_hit_count = len(connection.queries)
data = todos_utils.fetch_five_users_with_max_pending_todos()
new_db_hit_count = len(connection.queries)
actual_hit_count = new_db_hit_count - db_hit_count
self.assertEqual(
actual_hit_count,
1,
msg='Expected only 1 db hit got {}'.format(new_db_hit_count - db_hit_count)
)
self.assertListEqual(
data,
expected_data
)
def test_fetch_users_with_n_pending_todos(self):
expected_data = [
{'id': 7, 'first_name': 'Chirag', 'last_name': 'Gupta', 'email': '<EMAIL>',
'pending_count': 10}
]
db_hit_count = len(connection.queries)
data = todos_utils.fetch_users_with_n_pending_todos(n=10)
new_db_hit_count = len(connection.queries)
actual_hit_count = new_db_hit_count - db_hit_count
self.assertEqual(
actual_hit_count,
1,
msg='Expected only 1 db hit got {}'.format(new_db_hit_count - db_hit_count)
)
self.assertListEqual(
data,
expected_data
)
def test_fetch_completed_todos_with_in_date_range(self):
expected_data = [
{
'id': 18, 'name': 'TODO - 18', 'creator': '<NAME>', 'email': '<EMAIL>',
'created_at': '05:30 AM, 26 Dec, 2021', 'status': 'Done'
},
{
'id': 25, 'name': 'TODO - 25', 'creator': '<NAME>',
'email': '<EMAIL>', 'created_at': '05:30 AM, 26 Dec, 2021',
'status': 'Done'
},
{
'id': 35, 'name': 'TODO - 35', 'creator': 'G<NAME>',
'email': '<EMAIL>',
'created_at': '05:30 AM, 26 Dec, 2021', 'status': 'Done'
},
{
'id': 78, 'name': 'TODO - 78', 'creator': '<NAME>',
'email': '<EMAIL>', 'created_at': '05:30 AM, 26 Dec, 2021',
'status': 'Done'
},
{
'id': 87, 'name': 'TODO - 87', 'creator': '<NAME>',
'email': '<EMAIL>',
'created_at': '05:30 AM, 26 Dec, 2021', 'status': 'Done'
},
{
'id': 92, 'name': 'TODO - 92', 'creator': '<NAME>',
'email': '<EMAIL>', 'created_at': '05:30 AM, 26 Dec, 2021',
'status': 'Done'
}
]
db_hit_count = len(connection.queries)
data = todos_utils.fetch_completed_todos_with_in_date_range('21-12-2021', '29-12-2021')
new_db_hit_count = len(connection.queries)
actual_hit_count = new_db_hit_count - db_hit_count
self.assertEqual(
actual_hit_count,
1,
msg='Expected only 1 db hit got {}'.format(new_db_hit_count - db_hit_count)
)
self.assertListEqual(
data,
expected_data
)
def test_fetch_project_with_member_name_start_or_end_with_a(self):
expected_data = [
{'project_name': 'Project G', 'done': False, 'max_members': 2},
{'project_name': 'Project J', 'done': True, 'max_members': 3},
{'project_name': 'Project K', 'done': False, 'max_members': 4},
{'project_name': 'Project B', 'done': True, 'max_members': 2},
{'project_name': 'Project I', 'done': True, 'max_members': 2},
{'project_name': 'Project E', 'done': | |
<reponame>lbanner/osf.io
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from invoke import task, run
from invoke.exceptions import Failure
from website import settings
logging.getLogger('invoke').setLevel(logging.CRITICAL)
HERE = os.path.dirname(os.path.abspath(__file__))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def get_bin_path():
"""Get parent path of current python binary.
"""
return os.path.dirname(sys.executable)
def bin_prefix(cmd):
"""Prefix command with current binary path.
"""
return os.path.join(get_bin_path(), cmd)
try:
run('pip freeze | grep rednose', hide='both')
TEST_CMD = 'nosetests --rednose'
except Failure:
TEST_CMD = 'nosetests'
@task
def server(host=None, port=5000, debug=True):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True, mfr=True)
app.run(host=host, port=port, debug=debug, extra_files=[settings.ASSET_HASH_PATH])
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
Available variables:
{context}
"""
def make_shell_context():
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
app = init_app()
context = {
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell():
context = make_shell_context()
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context)
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_host=None, db_port=None, db_name=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_host:
os.environ['SHAREJS_DB_HOST'] = db_host
if db_port:
os.environ['SHAREJS_DB_PORT'] = db_port
if db_name:
os.environ['SHAREJS_DB_NAME'] = db_name
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug"):
"""Run the Celery process."""
cmd = 'celery worker -A framework.tasks -l {0}'.format(level)
run(bin_prefix(cmd))
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(python='python'):
'''Migrate the search-enabled models.'''
cmd = '{0} -m website.search_migration.migrate'.format(python)
run(bin_prefix(cmd))
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
@task
def requirements(all=False, download_cache=None):
"""Install dependencies."""
cmd = "pip install --upgrade -r dev-requirements.txt"
if WHEELHOUSE_PATH:
cmd += ' --use-wheel --find-links {}'.format(WHEELHOUSE_PATH)
if download_cache:
cmd += ' --download-cache {0}'.format(download_cache)
run(bin_prefix(cmd), echo=True)
if all:
addon_requirements(download_cache=download_cache)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False):
"""Alias of `invoke test_osf`.
"""
if all:
test_all()
else:
test_osf()
@task
def test_all(flake=False):
if flake:
flake()
test_osf()
test_addons()
@task
def wheelhouse(repo, path):
version = '.'.join([str(i) for i in sys.version_info[0:2]])
run('pip install wheel --upgrade', pty=False)
name = 'wheelhouse-{}.tar.gz'.format(version)
url = '{}/archive/{}.tar.gz'.format(repo, version)
# download and extract the wheelhouse github repository archive
run('mkdir {}'.format(path), pty=False)
run('curl -o {} -L {}'.format(name, url), pty=False)
run('tar -xvf {} --strip 1 -C {}'.format(name, path), pty=False)
run('rm -f {}'.format(name), pty=False)
@task
def addon_requirements(download_cache=None):
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --use-wheel --find-links {}'.format(WHEELHOUSE_PATH)
if download_cache:
cmd += ' --download-cache {0}'.format(download_cache)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task
def npm_bower():
print('Installing bower')
run('npm install -g bower', echo=True)
@task
def bower_install():
print('Installing bower-managed packages')
run('bower install', echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(all=True)
encryption()
npm_bower()
bower_install()
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts import metrics
from scripts.analytics import (
logs, addons, comments, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
metrics, logs, addons, comments, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
@task
def clear_mfr_cache():
run('rm -rf {0}/*'.format(settings.MFR_TEMP_PATH), echo=True)
run('rm -rf {0}/*'.format(settings.MFR_CACHE_PATH), echo=True)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
| |
from discord.ext import commands
from utils.extras import send_webhook
from utils.db import get_config_value
from utils.embeds import log_embed_danger, log_embed_warn, log_embed_info
import textwrap
import ago
import re
# message logs, member logs, server logs, welcome logs, invite logs
class Logging(commands.Cog):
"""Everything related to logging."""
def __init__(self, bot):
self.bot = bot
self.config_collection = bot.db.config
self.invites_collection = bot.db.invites
self.invite_regex = 'https:\/\/discord.gg\/[a-zA-Z0-9]+'
async def _get_log_channel(self, guild, log_type):
is_logging_enabled = await get_config_value(
self.config_collection,
guild.id,
'is_logging_enabled'
)
if not is_logging_enabled:
return None
# try to get channel by type
logging_channel_by_type_id = await get_config_value(
self.config_collection,
guild.id,
log_type + '_channel'
)
# else fallback to default log_channel
if not logging_channel_by_type_id:
logging_channel_default_id = await get_config_value(
self.config_collection,
guild.id,
'log_channel'
)
return guild.get_channel(logging_channel_default_id)
return guild.get_channel(logging_channel_by_type_id)
async def _get_webhook(self, guild, log_type):
logging_channel_obj = await self._get_log_channel(guild, log_type)
if not logging_channel_obj:
return
all_webhooks = await logging_channel_obj.webhooks()
if not all_webhooks:
avatar = await self.bot.user.avatar_url.read()
webhook = await logging_channel_obj.create_webhook(
name='Jeju Logging',
avatar=avatar,
reason='Jeju bot logging'
)
return webhook
for webhook in all_webhooks:
if webhook.name == 'Jeju Logging':
return webhook
avatar = await self.bot.user.avatar_url.read()
webhook = await logging_channel_obj.create_webhook(
name='<NAME>',
avatar=avatar,
reason='Jeju bot logging'
)
return webhook
async def _get_invite(self, guild):
old_invites_docs = await self.invites_collection.find({'guild_id': guild.id}).to_list(None)
if not old_invites_docs:
return
current_invites = await guild.invites()
for old_invite_doc in old_invites_docs:
current_invite = [
invite for invite in current_invites if invite.id == old_invite_doc['_id']]
if not current_invite: # invite got deleted
await self.invites_collection.delete_one({'_id': old_invite_doc['_id']})
continue
if old_invite_doc['uses'] < current_invite[0].uses:
await self.invites_collection.update_one(
{'_id': current_invite[0].id},
{'$set': {'uses': current_invite[0].uses}}
)
return current_invite[0]
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
invite_matches = re.findall(self.invite_regex, message.content)
if invite_matches:
invite_matches = list(dict.fromkeys(
invite_matches)) # to remove duplicates
for invite_match in invite_matches:
try:
invite = await self.bot.fetch_invite(invite_match)
except:
return
webhook = await self._get_webhook(message.guild, 'message_log')
if not webhook:
return
embed = log_embed_warn(
'Invite link detected',
self.bot
)
embed.add_field(name='ID:', value=invite.id, inline=True)
embed.add_field(name='Guild:',
value=invite.guild.name, inline=True)
embed.add_field(name='Inviter:',
value=invite.inviter, inline=True)
embed.add_field(name='Members:',
value=invite.approximate_member_count)
embed.add_field(name='URL', value=invite.url)
embed.set_footer(
text=f'Channel: {message.channel.name}', icon_url=message.author.avatar_url)
embed.set_thumbnail(url=invite.guild.icon_url)
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_message_delete(self, message):
if not message:
return
if not message.content:
return
if message.author.bot:
return
webhook = await self._get_webhook(message.guild, 'message_log')
if not webhook:
return
embed = log_embed_danger(
'Message Deleted',
self.bot
)
embed.add_field(name='Author:', value=message.author.name, inline=True)
embed.add_field(name='Channel:',
value=message.channel.name, inline=True)
embed.add_field(name='Content:', value=message.content, inline=False)
embed.set_footer(
text=f'Message ID: {message.id}', icon_url=message.author.avatar_url)
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_bulk_message_delete(self, messages):
webhook = await self._get_webhook(messages[0].guild, 'message_log')
if not webhook:
return
if messages[0].author.bot:
return
available_space_per_message = 2048 / len(messages)
description = ''
for message in messages:
to_be_added = f'**By {message.author}**: {message.content}'
shortened_message = textwrap.shorten(
to_be_added, width=available_space_per_message)
description += f'{shortened_message}\n'
embed = log_embed_danger(
'Bulk Messages Deleted(Purged)',
self.bot,
description=description
)
embed.set_footer(text=f'{len(messages)} messages deleted',
icon_url=messages[0].author.avatar_url)
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_message_edit(self, message_before, message_after):
if not (message_before.content == message_after.content):
webhook = await self._get_webhook(message_after.guild, 'message_log')
if not webhook:
return
if message_after.author.bot:
return
embed = log_embed_warn(
'Message Edited',
self.bot
)
embed.add_field(
name='Before:', value=message_before.content, inline=False)
embed.add_field(
name='After:', value=message_after.content, inline=False)
embed.set_footer(
text=f'Message ID: {message_after.id}', icon_url=message_after.author.avatar_url)
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
webhook = await self._get_webhook(channel.guild, 'server_log')
if not webhook:
return
embed = log_embed_danger(
'Channel deleted',
self.bot
)
embed.add_field(name='Name:', value=channel.name, inline=True)
embed.add_field(name='Type:', value=channel.type, inline=True)
embed.add_field(name='Category:', value=channel.category, inline=True)
embed.set_footer(text=f'Channel ID: {channel.id}')
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_guild_channel_create(self, channel):
webhook = await self._get_webhook(channel.guild, 'server_log')
if not webhook:
return
embed = log_embed_info(
'Channel created',
self.bot
)
embed.add_field(name='Name:', value=channel.name, inline=True)
embed.add_field(name='Type:', value=channel.type, inline=True)
embed.add_field(name='Category:', value=channel.category, inline=True)
embed.set_footer(text=f'Channel ID: {channel.id}')
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_guild_channel_update(self, channel_before, channel_after):
webhook = await self._get_webhook(channel_after.guild, 'server_log')
if not webhook:
return
embed = log_embed_warn(
'Channel updated',
self.bot
)
if not (channel_before.name == channel_after.name):
embed.add_field(
name='Name:', value=channel_before.name, inline=True)
embed.add_field(
name='Name Changed:', value=f'**Before**: {channel_before.name}\n**After**: {channel_after.name}', inline=False)
embed.set_footer(text=f'Channel ID: {channel_after.id}')
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
return
if str(channel_after.type) == 'text':
if not (channel_before.topic == channel_after.topic):
embed.add_field(
name='Name:', value=channel_before.name, inline=True)
embed.add_field(
name='Topic Changed:', value=f'**Before**: {channel_before.topic}\n**After**: {channel_after.topic}', inline=False)
embed.set_footer(text=f'Channel ID: {channel_after.id}')
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
return
if not (channel_before.overwrites == channel_after.overwrites):
embed.add_field(
name='Name:', value=channel_before.name, inline=True)
overwrites_before = channel_before.overwrites
overwrites_after = channel_after.overwrites
ov_before_list_objs = list(overwrites_before.values())
new_ov = [
ov for ov in overwrites_after if ov not in overwrites_before]
removed_ov = [
ov for ov in overwrites_before if ov not in overwrites_after]
if new_ov:
embed.add_field(
name='Overwrites created', value=f'Permission overwrites created for `{new_ov[0]}`', inline=False)
elif removed_ov:
embed.add_field(
name='Overwrites removed', value=f'Permission overwrites removed for `{removed_ov[0]}`', inline=False)
else:
role_or_member = None
ov_modified_before = None
ov_modified_after = None
for i, ov in enumerate(overwrites_after.values()):
if ov != ov_before_list_objs[i]:
ov_modified_before = ov_before_list_objs[i]
ov_modified_after = ov
role_or_member = list(overwrites_after)[i]
old_perms = list(iter(ov_modified_before))
new_perms = list(iter(ov_modified_after))
changed_perms = []
for i, perm in enumerate(new_perms):
if perm != old_perms[i]:
changed_perms.append(perm)
changed_perms_string = ''
for perm in changed_perms:
to_be_added = f'`{perm[0]}`: '
if perm[1] is None:
to_be_added += 'Overwrites removed'
else:
to_be_added += 'Allowed' if perm[1] else 'Denied'
to_be_added += '\n'
changed_perms_string += to_be_added
embed.add_field(name='Permission overwrites updated:',
value=f'Permission overwrites updated for `{role_or_member.name}`', inline=False)
embed.add_field(name='Changes:',
value=changed_perms_string, inline=False)
embed.set_footer(text=f'Channel ID: {channel_after.id}')
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_member_join(self, member):
webhook_join = await self._get_webhook(member.guild, 'join_leave_log')
if not webhook_join:
return
join_embed = log_embed_info(
'Member joined',
self.bot
)
join_embed.add_field(name='Name:', value=member.name, inline=True)
join_embed.add_field(
name='Mention:', value=member.mention, inline=True)
join_embed.add_field(name='Joined Discord:', value=ago.human(
member.created_at), inline=True)
join_embed.set_footer(
text=f'Member ID: {member.id}', icon_url=member.avatar_url)
join_embed.set_thumbnail(url=member.avatar_url)
await send_webhook(
webhook_join.url,
self.bot.aio_session,
embed=join_embed
)
webhook_invites = await self._get_webhook(member.guild, 'invite_log')
if not webhook_invites:
return
invite = await self._get_invite(member.guild)
invite_embed = log_embed_info(
'Member joined using invite link',
self.bot,
)
if not invite:
invite_embed.description = 'Sorry, I couldn\'t figure out how this person joined.'
else:
invite_embed.add_field(
name='Name:', value=member.name, inline=True)
invite_embed.add_field(name='ID:', value=invite.id, inline=True)
invite_embed.add_field(
name='Guild:', value=invite.guild.name, inline=True)
invite_embed.add_field(
name='Inviter:', value=invite.inviter.name, inline=True)
invite_embed.add_field(name='URL', value=invite.url, inline=True)
invite_embed.set_thumbnail(url=member.avatar_url)
invite_embed.set_footer(
text=f'User ID: {member.id}', icon_url=member.avatar_url)
await send_webhook(
webhook_invites.url,
self.bot.aio_session,
embed=invite_embed
)
@commands.Cog.listener()
async def on_member_remove(self, member):
webhook = await self._get_webhook(member.guild, 'join_leave_log')
if not webhook:
return
roles = ', '.join([role.mention for role in member.roles][1:])
embed = log_embed_danger(
'Member left',
self.bot
)
embed.add_field(name='Name:', value=member.name, inline=True)
embed.add_field(name='Mention:', value=member.mention, inline=True)
embed.add_field(name='Joined Discord:', value=ago.human(
member.created_at), inline=True)
embed.add_field(name='Roles:', value=roles or 'No roles', inline=False)
embed.set_footer(
text=f'Member ID: {member.id}', icon_url=member.avatar_url)
embed.set_thumbnail(url=member.avatar_url)
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_member_update(self, member_before, member_after):
webhook = await self._get_webhook(member_after.guild, 'people_log')
if not webhook:
return
embed = log_embed_warn(
'Member update',
self.bot
)
if not (member_before.nick == member_after.nick):
# nickname change
embed.add_field(name='Name:', value=member_after.name, inline=True)
embed.add_field(name='Nickname change:',
value=f'**Before**: {member_before.nick}\n**After**: {member_after.nick}', inline=False)
embed.set_footer(
text=f'Member ID: {member_after.id}', icon_url=member_after.avatar_url)
embed.set_thumbnail(url=member_after.avatar_url)
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
return
if not (member_before.roles == member_after.roles):
new_roles = [
role for role in member_after.roles if role not in member_before.roles]
removed_roles = [
role for role in member_before.roles if role not in member_after.roles]
if len(member_before.roles) < len(member_after.roles):
# added roles
embed.add_field(
name='Name:', value=member_after.name, inline=True)
embed.add_field(name='Roles added:', value=', '.join(
[role.mention for role in new_roles]), inline=False)
embed.set_footer(
text=f'Member ID: {member_after.id}', icon_url=member_after.avatar_url)
embed.set_thumbnail(url=member_after.avatar_url)
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
else:
# removed roles
embed.add_field(
name='Name:', value=member_after.name, inline=True)
embed.add_field(name='Roles removed:', value=', '.join(
[role.mention for role in removed_roles]), inline=False)
embed.set_footer(
text=f'Member ID: {member_after.id}', icon_url=member_after.avatar_url)
embed.set_thumbnail(url=member_after.avatar_url)
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
@commands.Cog.listener()
async def on_user_update(self, user_before, user_after):
all_guilds = self.bot.guilds
guilds_which_user_shares = [
guild for guild in all_guilds if user_after in guild.members]
webhooks = []
for guild in guilds_which_user_shares:
webhook = await self._get_webhook(guild, 'people_log')
if not webhook:
continue
webhooks.append(webhook)
if not webhooks:
return
embed = log_embed_warn(
'User updated',
self.bot
)
if not (user_before.name == user_after.name):
# username update
embed.add_field(name='Name:', value=user_after.name, inline=True)
embed.add_field(name='Username update:',
value=f'**Before**: {user_before.name}\n**After**: {user_after.name}', inline=False)
embed.set_footer(
text=f'User ID: {user_after.id}', icon_url=user_after.avatar_url)
embed.set_thumbnail(url=user_after.avatar_url)
for webhook in webhooks:
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
return
if not (user_before.discriminator == user_after.discriminator):
# discriminator update
embed.add_field(name='Name:', value=user_after.name, inline=True)
embed.add_field(name='Discriminator update:',
value=f'**Before**: {user_before.discriminator}\n**After**: {user_after.discriminator}', inline=False)
embed.set_footer(
text=f'User ID: {user_after.id}', icon_url=user_after.avatar_url)
embed.set_thumbnail(url=user_after.avatar_url)
for webhook in webhooks:
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
return
if not (user_before.avatar == user_after.avatar):
# avatar update
embed.add_field(name='Name:', value=user_after.name, inline=True)
embed.add_field(
name='Avatar update:', value=f'**Before**: {user_before.avatar_url}\n**After**: {user_after.avatar_url}', inline=False)
embed.set_footer(
text=f'User ID: {user_after.id}', icon_url=user_after.avatar_url)
embed.set_thumbnail(url=user_after.avatar_url)
for webhook in webhooks:
await send_webhook(
webhook.url,
self.bot.aio_session,
embed=embed
)
return
@commands.Cog.listener()
async def on_guild_update(self, guild_before, guild_after):
webhook = await self._get_webhook(guild_after, 'server_log')
if not webhook:
return
embed = log_embed_warn(
'Guild update',
self.bot
)
if | |
["TRANS", "PTRANS", "NMTRANS"]):
sites_cart = None
if (m_j is None):
m_1,m_2,m_3,m_4 = m_i,m_i,m_i,m_i
elif (m_i.i_conformer != 0 and m_j.i_conformer != 0):
assert m_i.i_conformer == m_j.i_conformer
for tor in tor_list:
if (m_j is not None):
m_1,m_2,m_3,m_4 = [(m_i, m_j)[comp_id-1] for comp_id in (
tor.atom_1_comp_id,
tor.atom_2_comp_id,
tor.atom_3_comp_id,
tor.atom_4_comp_id)]
if ( tor.atom_id_1 not in m_1.monomer_atom_dict
or tor.atom_id_2 not in m_2.monomer_atom_dict
or tor.atom_id_3 not in m_3.monomer_atom_dict
or tor.atom_id_4 not in m_4.monomer_atom_dict):
if ( tor.atom_id_1.replace("'", "*") in m_1.monomer_atom_dict
and tor.atom_id_2.replace("'", "*") in m_2.monomer_atom_dict
and tor.atom_id_3.replace("'", "*") in m_3.monomer_atom_dict
and tor.atom_id_4.replace("'", "*") in m_4.monomer_atom_dict):
tor.atom_id_1 = tor.atom_id_1.replace("'", "*")
tor.atom_id_2 = tor.atom_id_2.replace("'", "*")
tor.atom_id_3 = tor.atom_id_3.replace("'", "*")
tor.atom_id_4 = tor.atom_id_4.replace("'", "*")
else:
counters.corrupt_monomer_library_definitions += 1
continue
atoms = (m_1.expected_atoms.get(tor.atom_id_1, None),
m_2.expected_atoms.get(tor.atom_id_2, None),
m_3.expected_atoms.get(tor.atom_id_3, None),
m_4.expected_atoms.get(tor.atom_id_4, None))
if (None in atoms):
if ( m_1.monomer_atom_dict[tor.atom_id_1].type_symbol == "H"
or m_2.monomer_atom_dict[tor.atom_id_2].type_symbol == "H"
or m_3.monomer_atom_dict[tor.atom_id_3].type_symbol == "H"
or m_4.monomer_atom_dict[tor.atom_id_4].type_symbol == "H"):
counters.unresolved_hydrogen += 1
else:
counters.unresolved_non_hydrogen += 1
elif ( tor.value_angle is None
or tor.value_angle_esd in [None, 0]):
counters.undefined += 1
else:
counters.resolved += 1
i_seqs = [atom.i_seq for atom in atoms]
trans_cis_ids = [
"TRANS", "PTRANS", "NMTRANS",
"CIS", "PCIS", "NMCIS"]
if (special_position_dict.involves_special_positions(i_seqs)):
counters.discarded_because_of_special_positions += 1
elif (involves_broken_bonds(broken_bond_i_seq_pairs, i_seqs)):
pass
elif ( tor.id in ["psi", "phi"]
and self.chem_link_id in trans_cis_ids
and peptide_link_params.discard_psi_phi):
pass
elif ( tor.id == "omega"
and self.chem_link_id in trans_cis_ids
and peptide_link_params.discard_omega):
pass
else:
if (dihedral_function_type == "determined_by_sign_of_periodicity"):
periodicity = tor.period
elif (dihedral_function_type == "all_sinusoidal"):
periodicity = max(1, tor.period)
elif (dihedral_function_type == "all_harmonic"):
periodicity = -abs(tor.period)
else:
raise RuntimeError(
"Unknown dihedral_function_type: %s"
% str(dihedral_function_type))
try:
if len(tor.alt_value_angle) == 0:
alt_value_angle = None
else:
alt_value_angle = [float(t) for t in tor.alt_value_angle.split(",")]
except Exception:
alt_value_angle = None
proxy = geometry_restraints.dihedral_proxy(
i_seqs=i_seqs,
angle_ideal=tor.value_angle,
weight=1/tor.value_angle_esd**2,
periodicity=periodicity,
origin_id=origin_id,
alt_angle_ideals=alt_value_angle)
if (sites_cart is not None and tor.id == "omega"):
assert abs(tor.value_angle - 180) < 1.e-6
if (peptide_link_params.omega_esd_override_value is not None):
assert peptide_link_params.omega_esd_override_value > 0
proxy.weight = 1/peptide_link_params.omega_esd_override_value**2
r = geometry_restraints.dihedral(
sites_cart=sites_cart,
proxy=proxy)
if ( not peptide_link_params.apply_all_trans and
abs(r.delta) > 180-peptide_link_params.cis_threshold):
self.chem_link_id = self.chem_link_id.replace("TRANS", "CIS")
proxy.angle_ideal = 0
if cis_trans_specifications:
for ca_i_seq in cis_trans_specifications:
if ca_i_seq[0] == i_seqs[3]: # specify the trailing CA
if self.chem_link_id in ["PTRANS", "PCIS"]:
cis_trans_specifications[ca_i_seq] = "P%s" % (
cis_trans_specifications[ca_i_seq],
)
elif self.chem_link_id in ["NMTRANS", "NMCIS"]:
cis_trans_specifications[ca_i_seq] = "NM%s" % (
cis_trans_specifications[ca_i_seq],
)
if self.chem_link_id!=cis_trans_specifications[ca_i_seq].upper():
if self.chem_link_id.find("TRANS")>-1:
self.chem_link_id=self.chem_link_id.replace('TRANS', 'CIS')
proxy.angle_ideal=0
else:
self.chem_link_id=self.chem_link_id.replace('CIS','TRANS')
proxy.angle_ideal=180
registry_process_result = dihedral_proxy_registry.process(
source_info=source_info_server(m_i=m_i, m_j=m_j),
proxy=proxy)
evaluate_registry_process_result(
proxy_label="dihedral", m_i=m_i, m_j=m_j, i_seqs=i_seqs,
registry_process_result=registry_process_result,
lines=["tor id: " + str(tor.id)])
class add_chirality_proxies(object):
def __init__(self,
counters,
m_i,
m_j,
chir_list,
chirality_proxy_registry,
special_position_dict,
chir_volume_esd,
lib_link=None,
origin_id=0,
broken_bond_i_seq_pairs=None):
self.counters = counters
self.counters.unsupported_volume_sign = dicts.with_default_value(0)
if (m_j is None):
m_c,m_1,m_2,m_3 = m_i,m_i,m_i,m_i
elif (m_i.i_conformer != 0 and m_j.i_conformer != 0):
assert m_i.i_conformer == m_j.i_conformer
for chir in chir_list:
if (m_j is not None):
m_c,m_1,m_2,m_3 = [(m_i, m_j)[comp_id-1] for comp_id in (
chir.atom_centre_comp_id,
chir.atom_1_comp_id,
chir.atom_2_comp_id,
chir.atom_3_comp_id)]
volume_sign = chir.volume_sign
if (volume_sign is not None):
volume_sign = volume_sign[:4].lower()
if (volume_sign not in ["posi", "nega", "both"]):
counters.unsupported_volume_sign[volume_sign] += 1
continue
if ( chir.atom_id_centre not in m_c.monomer_atom_dict
or chir.atom_id_1 not in m_1.monomer_atom_dict
or chir.atom_id_2 not in m_2.monomer_atom_dict
or chir.atom_id_3 not in m_3.monomer_atom_dict):
if ( chir.atom_id_1.replace("'", "*") in m_1.monomer_atom_dict
and chir.atom_id_2.replace("'", "*") in m_2.monomer_atom_dict
and chir.atom_id_3.replace("'", "*") in m_3.monomer_atom_dict
and chir.atom_id_centre.replace("'", "*") in m_c.monomer_atom_dict):
chir.atom_id_1 = chir.atom_id_1.replace("'", "*")
chir.atom_id_2 = chir.atom_id_2.replace("'", "*")
chir.atom_id_3 = chir.atom_id_3.replace("'", "*")
chir.atom_id_centre = chir.atom_id_centre.replace("'", "*")
else:
counters.corrupt_monomer_library_definitions += 1
continue
atoms = (m_c.expected_atoms.get(chir.atom_id_centre, None),
m_1.expected_atoms.get(chir.atom_id_1, None),
m_2.expected_atoms.get(chir.atom_id_2, None),
m_3.expected_atoms.get(chir.atom_id_3, None))
if (None in atoms):
if ( m_c.monomer_atom_dict[chir.atom_id_centre].type_symbol == "H"
or m_1.monomer_atom_dict[chir.atom_id_1].type_symbol == "H"
or m_2.monomer_atom_dict[chir.atom_id_2].type_symbol == "H"
or m_3.monomer_atom_dict[chir.atom_id_3].type_symbol == "H"):
counters.unresolved_hydrogen += 1
else:
counters.unresolved_non_hydrogen += 1
elif ( volume_sign is None
or chir_volume_esd in [None, 0]):
counters.undefined += 1
else:
if (m_j is None):
volume_ideal = m_i.monomer.get_chir_volume_ideal(chir)
else:
volume_ideal = lib_link.get_chir_volume_ideal(
m_i.monomer, m_j.monomer, chir)
if (volume_ideal is None):
counters.undefined += 1
else:
counters.resolved += 1
i_seqs = [atom.i_seq for atom in atoms]
if (special_position_dict.involves_special_positions(i_seqs)):
counters.discarded_because_of_special_positions += 1
elif (involves_broken_bonds(broken_bond_i_seq_pairs, i_seqs)):
pass
else:
registry_process_result = chirality_proxy_registry.process(
source_info=source_info_server(m_i=m_i, m_j=m_j),
proxy=geometry_restraints.chirality_proxy(
i_seqs=i_seqs,
volume_ideal=volume_ideal,
both_signs=(volume_sign == "both"),
origin_id=origin_id,
weight=1/chir_volume_esd**2))
evaluate_registry_process_result(
proxy_label="chirality", m_i=m_i, m_j=m_j, i_seqs=i_seqs,
registry_process_result=registry_process_result)
class add_planarity_proxies(object):
def __init__(self,
counters,
m_i,
m_j,
plane_list,
planarity_proxy_registry,
special_position_dict,
peptide_link_params=None,
origin_id=0,
broken_bond_i_seq_pairs=None):
self.counters = counters
self.counters.less_than_four_sites = dicts.with_default_value(0)
if ( m_j is not None
and m_i.i_conformer != 0 and m_j.i_conformer != 0):
assert m_i.i_conformer == m_j.i_conformer
for plane in plane_list:
this_plane_has_unresolved_non_hydrogen = False
i_seqs = []
weights = []
for plane_atom in plane.plane_atoms:
if (m_j is None):
m_x = m_i
else:
assert plane_atom.atom_comp_id in (1,2)
m_x = (m_i, m_j)[plane_atom.atom_comp_id-1]
if (plane_atom.atom_id not in m_x.monomer_atom_dict):
if ( plane_atom.atom_id.replace("'", "*") in m_x.monomer_atom_dict):
plane_atom.atom_id = plane_atom.atom_id.replace("'", "*")
else:
counters.corrupt_monomer_library_definitions += 1
continue
atom = m_x.expected_atoms.get(plane_atom.atom_id, None)
if (atom is None):
if (m_x.monomer_atom_dict[plane_atom.atom_id].type_symbol == "H"):
counters.unresolved_hydrogen += 1
else:
counters.unresolved_non_hydrogen += 1
this_plane_has_unresolved_non_hydrogen = True
elif (plane_atom.dist_esd in [None, 0]):
counters.undefined += 1
else:
counters.resolved += 1
i_seq = atom.i_seq
if (special_position_dict.involves_special_positions([i_seq])):
counters.discarded_because_of_special_positions += 1
else:
i_seqs.append(i_seq)
weights.append(1/plane_atom.dist_esd**2)
if (len(i_seqs) < 4):
if (this_plane_has_unresolved_non_hydrogen):
counters.less_than_four_sites[plane.plane_id] += 1
elif (involves_broken_bonds(broken_bond_i_seq_pairs, i_seqs)):
pass
else:
registry_process_result = planarity_proxy_registry.process(
source_info=source_info_server(m_i=m_i, m_j=m_j),
proxy=geometry_restraints.planarity_proxy(
i_seqs=flex.size_t(i_seqs),
origin_id=origin_id,
weights=flex.double(weights)))
evaluate_registry_process_result(
proxy_label="planarity", m_i=m_i, m_j=m_j, i_seqs=i_seqs,
registry_process_result=registry_process_result,
lines=["plane id: " + str(plane.plane_id)])
class add_parallelity_proxies(object):
def __init__(self,
counters,
m_i,
m_j,
parallelity_proxy_registry,
special_position_dict,
broken_bond_i_seq_pairs=None,
weight=0.05):
# probably is not used anymore
if weight <=0:
raise Sorry("Weight for parallelity restraint should be > 0.")
self.counters = counters
if ( m_j is not None
and m_i.i_conformer != 0 and m_j.i_conformer != 0):
assert m_i.i_conformer == m_j.i_conformer
counters.resolved += 1
# making i_seqs, j_seqs,weight
i_seqs = []
j_seqs = []
for seqs, m in [(i_seqs, m_i), (j_seqs, m_j)]:
for p in m.monomer.get_planes():
for plane_atom in p.plane_atoms:
atom = m.expected_atoms.get(plane_atom.atom_id, None)
if atom is not None and atom.i_seq not in seqs:
seqs.append(atom.i_seq)
if (involves_broken_bonds(broken_bond_i_seq_pairs, i_seqs+j_seqs)):
pass
elif len(i_seqs) < 3 or len(j_seqs) < 3:
pass
else:
registry_process_result = parallelity_proxy_registry.process(
source_info=source_info_server(m_i=m_i, m_j=m_j),
proxy=geometry_restraints.parallelity_proxy(
i_seqs=flex.size_t(i_seqs),
j_seqs=flex.size_t(j_seqs),
weight=weight))
evaluate_registry_process_result(
proxy_label="parallelity", m_i=m_i, m_j=m_j, i_seqs=i_seqs,
registry_process_result=registry_process_result,
lines=["plane id: " + "???"])
# XXX TODO synonymes
def ener_lib_as_nonbonded_params(
ener_lib,
assume_hydrogens_all_missing,
factor_1_4_interactions,
default_distance,
minimum_distance,
const_shrink_donor_acceptor,
use_lib_vdw=False):
params = geometry_restraints.nonbonded_params(
factor_1_4_interactions=factor_1_4_interactions,
const_shrink_1_4_interactions=0,
default_distance=default_distance,
minimum_distance=minimum_distance,
const_shrink_donor_acceptor=const_shrink_donor_acceptor)
if (use_lib_vdw):
tables = {"": [], "h": []}
for vdw in ener_lib.lib_vdw:
assert vdw.H_flag in ["", "h"]
if (vdw.H_flag == ""):
tables[""].append(vdw)
else:
tables["h"].append(vdw)
if (assume_hydrogens_all_missing):
reverse_prefs = ["", "h"]
else:
reverse_prefs = ["h", ""]
for code in reverse_prefs:
for vdw in tables[code]:
atom_types = [vdw.atom_type_1, vdw.atom_type_2]
atom_types.sort()
params.distance_table.setdefault(
atom_types[0])[atom_types[1]] = vdw.radius_min
if (assume_hydrogens_all_missing):
pref1, pref2 = ["vdwh_radius", "vdw_radius"]
else:
pref1, pref2 = ["vdw_radius", "vdwh_radius"]
for atom_type,energy_lib_atom in ener_lib.lib_atom.items():
if (len(atom_type) == 0): continue
r = getattr(energy_lib_atom, pref1)
if (r is None):
r = getattr(energy_lib_atom, pref2)
if (r is not None):
params.radius_table[atom_type] = r
r_ionic = getattr(energy_lib_atom, "ion_radius")
if (r_ionic is not None):
params.ionic_radius_table[atom_type] = r_ionic
# N = 0, D = 1, A = 2, B = 3, H = 4
if getattr(energy_lib_atom, "hb_type") == 'N':
params.donor_acceptor_table[atom_type] = 0
elif getattr(energy_lib_atom, "hb_type") == 'D':
params.donor_acceptor_table[atom_type] = 1
elif getattr(energy_lib_atom, "hb_type") == 'A':
params.donor_acceptor_table[atom_type] = 2
elif getattr(energy_lib_atom, "hb_type") == 'B':
params.donor_acceptor_table[atom_type] = 3
elif getattr(energy_lib_atom, "hb_type") == 'H':
params.donor_acceptor_table[atom_type] = 4
return params
def is_same_model_as_before(model_type_indices, i_model, models):
m_i = models[i_model]
for j_model in range(0, i_model):
if (model_type_indices[j_model] != j_model): continue
if (m_i.is_identical_hierarchy(other=models[j_model])):
model_type_indices[i_model] = j_model
return True
model_type_indices[i_model] = i_model
return False
class conformer_i_seq(dict):
def __iadd__(self, other):
self.update(other)
return self
def convert(self):
rc = []
for i, (i_seq, item) in enumerate(sorted(self.items())):
assert len(rc)==i
rc.append(item)
return rc
class build_chain_proxies(object):
def __init__(self,
mon_lib_srv,
ener_lib,
translate_cns_dna_rna_residue_names,
rna_sugar_pucker_analysis_params,
apply_cif_modifications,
apply_cif_links_mm_pdbres_dict,
link_distance_cutoff,
not_linked_show_max,
dihedral_function_type,
chir_volume_esd,
peptide_link_params,
pdb_hierarchy,
pdb_atoms,
sites_cart,
special_position_dict,
keep_monomer_mappings,
all_monomer_mappings,
scattering_type_registry,
nonbonded_energy_type_registry,
geometry_proxy_registries,
cystein_sulphur_i_seqs,
cystein_monomer_mappings,
is_unique_model,
i_model,
i_conformer,
is_first_conformer_in_chain,
conformer,
conformation_dependent_restraints_list,
cis_trans_specifications,
apply_restraints_specifications,
log,
restraints_loading_flags=None,
fatal_problem_max_lines=10,
):
if restraints_loading_flags is None: restraints_loading_flags={}
self._cif = cif_output_holder()
self.pdb_link_records = {}
#
self.type_energies = conformer_i_seq()
self.type_h_bonds = conformer_i_seq()
#
self.conformation_dependent_restraints_list = \
conformation_dependent_restraints_list
unknown_residues = dicts.with_default_value(0)
ad_hoc_single_atom_residues = dicts.with_default_value(0)
unusual_residues = dicts.with_default_value(0)
inner_chain_residues_flagged_as_termini = []
n_expected_atoms = 0
unexpected_atoms = dicts.with_default_value(0)
ignored_atoms = dicts.with_default_value(0)
duplicate_atoms = dicts.with_default_value(0)
classifications = dicts.with_default_value(0)
modifications_used = dicts.with_default_value(0)
incomplete_infos = dicts.with_default_value(0)
missing_h_bond_type = dicts.with_default_value(0)
link_ids = dicts.with_default_value(0)
mm_pairs_not_linked = []
n_unresolved_chain_links = 0
n_chain_breaks | |
# VMware vCloud Director Python SDK
# Copyright (c) 2018 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from pyvcloud.vcd.client import E
from pyvcloud.vcd.client import E_VMEXT
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import QueryResultFormat
from pyvcloud.vcd.client import RelationType
from pyvcloud.vcd.client import ResourceType
from pyvcloud.vcd.exceptions import MissingRecordException
from pyvcloud.vcd.exceptions import MultipleRecordsException
from pyvcloud.vcd.exceptions import OperationNotSupportedException
from pyvcloud.vcd.utils import to_dict
class APIExtension(object):
ATTRIBUTES = [
'name', 'namespace', 'enabled', 'exchange', 'routingKey', 'priority',
'isAuthorizationEnabled', 'href', 'id'
]
def __init__(self, client):
"""Constructor for APIExtension object.
:param pyvcloud.vcd.client.Client client: the client that will be used
to make REST calls to vCD.
"""
self.client = client
def list_extensions(self):
"""Fetch the API extension services defined in the system.
:return: all the registered API extension services in the system.
:rtype: list
"""
try:
records = self.client.get_typed_query(
ResourceType.ADMIN_SERVICE.value,
query_result_format=QueryResultFormat.ID_RECORDS).execute()
except OperationNotSupportedException:
msg = 'User doesn\'t have permission to view extensions.'
raise OperationNotSupportedException(msg)
return [to_dict(r, self.ATTRIBUTES) for r in records]
def _get_extension_record(self,
name,
namespace=None,
format=QueryResultFormat.ID_RECORDS):
"""Fetch info about a particular API extension service as a record.
:param str name: the name of the extension service whose info we want
to retrieve.
:param str namespace: the namespace of the extension service. If
omitted, all extension services matching the given name will be
retrieved and that would lead to a MultipleRecordsException.
:param format QueryResultFormat: dictates whether id or href should be
part of the returned record. By default id is returned.
:return: the extension service record.
:rtype: lxml.objectify.ObjectifiedElement object containing
AdminServiceRecord XML data representing the service.
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
qfilter = 'name==%s' % urllib.parse.quote(name)
if namespace is not None:
qfilter += ';namespace==%s' % urllib.parse.quote(namespace)
try:
ext = self.client.get_typed_query(
ResourceType.ADMIN_SERVICE.value,
qfilter=qfilter,
query_result_format=format).find_unique()
except OperationNotSupportedException:
msg = 'User doesn\'t have permission to interact with extensions.'
raise OperationNotSupportedException(msg)
except MissingRecordException:
msg = 'API Extension service (name:' + name
if namespace is not None:
msg += ', namespace:' + namespace
msg += ') not found.'
raise MissingRecordException(msg)
except MultipleRecordsException:
msg = 'Found multiple API Extension service with (name:' + name
if namespace is not None:
msg += ', namespace:' + namespace + ').'
else:
msg += '). Consider providing value for the namespace.'
raise MultipleRecordsException(msg)
return ext
def get_extension(self, name, namespace=None):
"""Fetch info about a particular API extension service.
:param str name: the name of the extension service whose info we want
to retrieve.
:param str namespace: the namespace of the extension service.
:return: information about the extension service.
:rtype: dict
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
ext_record = self._get_extension_record(name, namespace)
return to_dict(ext_record, self.ATTRIBUTES)
def get_extension_xml(self, extension_id):
uri = f"{self.client.get_api_uri()}/admin/extension/service/{extension_id}" # noqa: E501
try:
response_xml = self.client.get_resource(uri)
return response_xml
except Exception as err:
raise Exception(f"Failed to get extension XML with error: {err}")
def get_extension_info(self, name, namespace=None):
"""Return info about an API extension, including filters.
:param str name: the name of the extension service whose info we want
to retrieve.
:param str namespace: the namespace of the extension service. If not
specified (i.e. = None), we will use the value passed in the
`name` parameter.
:return: information about the extension.
:rtype: dict
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
ext = self.get_extension(name, namespace)
filters = self.get_api_filters(ext['id'])
n = 1
for f in filters:
ext['filter_%s' % n] = f.get('urlPattern')
n += 1
return ext
def update_extension(self, name, namespace=None, routing_key=None,
exchange=None, description=None):
"""Update properties for an existing API extension.
:param str name: name of the API extension.
:param str namespace: namespace of the API extension.
:param str routing_key: AMQP routing key to use for the extension.
:param str exchange: AMQP exchange to use for the extension.
:return: href of the API extension.
:rtype: str
:raises MissingRecordException: if an extension with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
record = self._get_extension_record(name=name,
namespace=namespace,
format=QueryResultFormat.RECORDS)
params = E_VMEXT.Service({'name': name})
description = description or record.get('description')
if description is not None:
params.append(E.Description(description))
params.append(E_VMEXT.Namespace(record.get('namespace')))
params.append(E_VMEXT.Enabled(record.get('enabled')))
params.append(E_VMEXT.RoutingKey(
routing_key if routing_key else record.get('routingKey')))
params.append(E_VMEXT.Exchange(
exchange if exchange else record.get('exchange')))
self.client.put_resource(record.get('href'), params, None)
return record.get('href')
def add_extension(self, name, namespace, routing_key, exchange, patterns,
description=None):
"""Add an API extension service.
:param str name: name of the new API extension service.
:param str namespace: namespace of the new API extension service.
:param str routing_key: AMQP routing key to use with the extension.
:param str exchange: AMQP exchange to use with the extension.
:param list patterns: list of url API filters to register with the
extension.
:return: object containing EntityType.ADMIN_SERVICE XML data i.e. the
sparse representation of the API extension.
:rtype: lxml.objectify.ObjectifiedElement
"""
params = E_VMEXT.Service({'name': name})
if description is not None:
params.append(E.Description(description))
params.append(E_VMEXT.Namespace(namespace))
params.append(E_VMEXT.Enabled('true'))
params.append(E_VMEXT.RoutingKey(routing_key))
params.append(E_VMEXT.Exchange(exchange))
filters = E_VMEXT.ApiFilters()
for pattern in patterns:
filters.append(
E_VMEXT.ApiFilter(E_VMEXT.UrlPattern(pattern.strip())))
params.append(filters)
ext = self.client.get_extension()
ext_services = self.client.get_linked_resource(
ext, RelationType.DOWN, EntityType.EXTENSION_SERVICES.value)
return self.client.post_linked_resource(ext_services, RelationType.ADD,
EntityType.ADMIN_SERVICE.value,
params)
def enable_extension(self, name, enabled=True, namespace=None):
"""Enable or disable an API extension service.
:param str name: the name of the extension service whose we want to
enable/disable.
:param str namespace: the namespace of the extension service. If not
specified (i.e. = None), we will use the value passed in the
`name` parameter.
:param bool enabled: flag to enable or disable the extension.
:return: href of the service being enabled/disabled.
:rtype: str
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
record = self._get_extension_record(name=name,
namespace=namespace,
format=QueryResultFormat.RECORDS)
params = E_VMEXT.Service({'name': name})
params.append(E_VMEXT.Namespace(record.get('namespace')))
params.append(E_VMEXT.Enabled('true' if enabled else 'false'))
params.append(E_VMEXT.RoutingKey(record.get('routingKey')))
params.append(E_VMEXT.Exchange(record.get('exchange')))
self.client.put_resource(record.get('href'), params, None)
return record.get('href')
def delete_extension(self, name, namespace):
"""Delete an API extension service.
:param str name: the name of the extension service whose we want to
delete.
:param str namespace: the namespace of the extension service. If not
specified (i.e. = None), we will use the value passed in the
`name` parameter.
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
href = self.enable_extension(name, enabled=False, namespace=namespace)
return self.client.delete_resource(href)
def get_api_filters(self, service_id, format=QueryResultFormat.ID_RECORDS):
"""Fetch the API filters defined for the service.
:param str service_id: the id of the extension service.
:param format QueryResultFormat: dictates whether id or href should be
part of the returned record. By default id is returned.
:return: API filters registered for the API extension.
:rtype: generator object
"""
try:
records = self.client.get_typed_query(
ResourceType.API_FILTER.value,
equality_filter=('service', service_id),
query_result_format=format).execute()
except OperationNotSupportedException:
msg = 'User doesn\'t have permission to view api filters.'
raise OperationNotSupportedException(msg)
return records
def remove_all_api_filters_from_service(self, name, namespace=None):
"""."""
ext_record = self._get_extension_record(name=name, namespace=namespace)
api_filter_records = self.get_api_filters(
service_id=ext_record.get('id'),
format=QueryResultFormat.REFERENCES)
for record in api_filter_records:
api_filter = self.client.get_resource(uri=record.get('href'))
self.client.delete_linked_resource(
resource=api_filter, rel=RelationType.REMOVE, media_type=None)
def add_api_filters_to_service(self, name, patterns, namespace=None):
"""."""
ext_record = self._get_extension_record(
name=name, namespace=namespace,
format=QueryResultFormat.REFERENCES)
ext = self.client.get_resource(uri=ext_record.get('href'))
for pattern in patterns:
api_filter = E_VMEXT.ApiFilter(E_VMEXT.UrlPattern(pattern.strip()))
self.client.post_linked_resource(
resource=ext, rel=RelationType.ADD,
media_type=EntityType.API_FILTER.value, contents=api_filter)
def add_service_right(self, right_name, service_name, namespace,
description, category, bundle_key):
"""Add a new right using API extension service.
:param str right_name: the name of the new right to be registered.
:param str service_name: | |
"2",
"sent": "3",
"id": "w45",
"length": "9",
"offset": "231",
},
{
"text": "on",
"page": "1",
"para": "2",
"sent": "3",
"id": "w46",
"length": "2",
"offset": "241",
},
{
"text": "your",
"page": "1",
"para": "2",
"sent": "3",
"id": "w47",
"length": "4",
"offset": "244",
},
{
"text": "NLP",
"page": "1",
"para": "2",
"sent": "3",
"id": "w48",
"length": "3",
"offset": "249",
},
{
"text": "processor",
"page": "1",
"para": "2",
"sent": "3",
"id": "w49",
"length": "9",
"offset": "254",
},
{
"text": ")",
"page": "1",
"para": "2",
"sent": "3",
"id": "w50",
"length": "1",
"offset": "263",
},
{
"text": ".",
"page": "1",
"para": "2",
"sent": "3",
"id": "w51",
"length": "1",
"offset": "264",
},
]
diff = DeepDiff(actual, expected)
assert diff == dict(), diff
def test_8_pdf_terms(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.terms
expected = [
{
"id": "t1",
"lemma": "the",
"pos": "DET",
"type": "open",
"morphofeat": "Definite=Def|PronType=Art",
"span": [{"id": "w1"}],
},
{
"id": "t2",
"lemma": "Nafigator",
"pos": "PROPN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w2"}],
},
{
"id": "t3",
"lemma": "package",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w3"}],
},
{
"id": "t4",
"lemma": "allow",
"pos": "VERB",
"type": "open",
"morphofeat": "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w4"}],
},
{
"id": "t5",
"lemma": "you",
"pos": "PRON",
"type": "open",
"morphofeat": "Case=Acc|Person=2|PronType=Prs",
"span": [{"id": "w5"}],
},
{
"id": "t6",
"lemma": "to",
"pos": "PART",
"type": "open",
"span": [{"id": "w6"}],
},
{
"id": "t7",
"lemma": "store",
"pos": "VERB",
"type": "open",
"morphofeat": "VerbForm=Inf",
"span": [{"id": "w7"}],
},
{
"id": "t8",
"lemma": "nlp",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w8"}],
},
{
"id": "t9",
"lemma": "output",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w9"}],
},
{
"id": "t10",
"lemma": "from",
"pos": "ADP",
"type": "open",
"span": [{"id": "w10"}],
},
{
"id": "t11",
"lemma": "custom",
"pos": "ADJ",
"type": "open",
"morphofeat": "Degree=Pos",
"span": [{"id": "w11"}],
},
{
"id": "t12",
"lemma": "make",
"pos": "VERB",
"type": "open",
"morphofeat": "Tense=Past|VerbForm=Part",
"span": [{"id": "w12"}],
},
{
"id": "t13",
"lemma": "spa",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w13"}],
},
{
"id": "t14",
"lemma": "cy",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w14"}],
},
{
"id": "t15",
"lemma": "and",
"pos": "CCONJ",
"type": "open",
"span": [{"id": "w15"}],
},
{
"id": "t16",
"lemma": "stanza",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w16"}],
},
{
"id": "t17",
"lemma": "pipeline",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Plur",
"span": [{"id": "w17"}],
},
{
"id": "t18",
"lemma": "with",
"pos": "ADP",
"type": "open",
"span": [{"id": "w18"}],
},
{
"id": "t19",
"lemma": "(",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w19"}],
},
{
"id": "t20",
"lemma": "intermediate",
"pos": "ADJ",
"type": "open",
"morphofeat": "Degree=Pos",
"span": [{"id": "w20"}],
},
{
"id": "t21",
"lemma": ")",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w21"}],
},
{
"id": "t22",
"lemma": "result",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Plur",
"span": [{"id": "w22"}],
},
{
"id": "t23",
"lemma": "and",
"pos": "CCONJ",
"type": "open",
"span": [{"id": "w23"}],
},
{
"id": "t24",
"lemma": "all",
"pos": "DET",
"type": "open",
"span": [{"id": "w24"}],
},
{
"id": "t25",
"lemma": "processing",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w25"}],
},
{
"id": "t26",
"lemma": "step",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Plur",
"span": [{"id": "w26"}],
},
{
"id": "t27",
"lemma": "in",
"pos": "ADP",
"type": "open",
"span": [{"id": "w27"}],
},
{
"id": "t28",
"lemma": "one",
"pos": "NUM",
"type": "open",
"morphofeat": "NumType=Card",
"span": [{"id": "w28"}],
},
{
"id": "t29",
"lemma": "format",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w29"}],
},
{
"id": "t30",
"lemma": ".",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w30"}],
},
{
"id": "t31",
"lemma": "multiword",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Plur",
"span": [{"id": "w31"}],
},
{
"id": "t32",
"lemma": "like",
"pos": "ADP",
"type": "open",
"span": [{"id": "w32"}],
},
{
"id": "t33",
"lemma": "in",
"pos": "ADP",
"type": "open",
"span": [{"id": "w33"}],
},
{
"id": "t34",
"lemma": '"',
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w34"}],
},
{
"id": "t35",
"lemma": "we",
"pos": "PRON",
"type": "open",
"morphofeat": "Case=Nom|Number=Plur|Person=1|PronType=Prs",
"span": [{"id": "w35"}],
},
{
"id": "t36",
"lemma": "have",
"pos": "AUX",
"type": "open",
"morphofeat": "Mood=Ind|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w36"}],
},
{
"id": "t37",
"lemma": "set",
"pos": "VERB",
"type": "open",
"morphofeat": "Tense=Past|VerbForm=Part",
"component_of": "mw1",
"span": [{"id": "w37"}],
},
{
"id": "t38",
"lemma": "that",
"pos": "SCONJ",
"type": "open",
"span": [{"id": "w38"}],
},
{
"id": "t39",
"lemma": "out",
"pos": "ADP",
"type": "open",
"component_of": "mw1",
"span": [{"id": "w39"}],
},
{
"id": "t40",
"lemma": "below",
"pos": "ADV",
"type": "open",
"span": [{"id": "w40"}],
},
{
"id": "t41",
"lemma": '"',
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w41"}],
},
{
"id": "t42",
"lemma": "be",
"pos": "AUX",
"type": "open",
"morphofeat": "Mood=Ind|Tense=Pres|VerbForm=Fin",
"span": [{"id": "w42"}],
},
{
"id": "t43",
"lemma": "recognize",
"pos": "VERB",
"type": "open",
"morphofeat": "Tense=Past|VerbForm=Part|Voice=Pass",
"span": [{"id": "w43"}],
},
{
"id": "t44",
"lemma": "(",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w44"}],
},
{
"id": "t45",
"lemma": "depend",
"pos": "VERB",
"type": "open",
"morphofeat": "VerbForm=Ger",
"span": [{"id": "w45"}],
},
{
"id": "t46",
"lemma": "on",
"pos": "ADP",
"type": "open",
"span": [{"id": "w46"}],
},
{
"id": "t47",
"lemma": "you",
"pos": "PRON",
"type": "open",
"morphofeat": "Person=2|Poss=Yes|PronType=Prs",
"span": [{"id": "w47"}],
},
{
"id": "t48",
"lemma": "nlp",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w48"}],
},
{
"id": "t49",
"lemma": "processor",
"pos": "NOUN",
"type": "open",
"morphofeat": "Number=Sing",
"span": [{"id": "w49"}],
},
{
"id": "t50",
"lemma": ")",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w50"}],
},
{
"id": "t51",
"lemma": ".",
"pos": "PUNCT",
"type": "open",
"span": [{"id": "w51"}],
},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_9_pdf_dependencies(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.deps
expected = [
{"from_term": "t3", "to_term": "t1", "rfunc": "det"},
{"from_term": "t4", "to_term": "t3", "rfunc": "nsubj"},
{"from_term": "t3", "to_term": "t2", "rfunc": "compound"},
{"from_term": "t4", "to_term": "t5", "rfunc": "obj"},
{"from_term": "t7", "to_term": "t6", "rfunc": "mark"},
{"from_term": "t4", "to_term": "t7", "rfunc": "xcomp"},
{"from_term": "t9", "to_term": "t8", "rfunc": "compound"},
{"from_term": "t7", "to_term": "t9", "rfunc": "obj"},
{"from_term": "t13", "to_term": "t10", "rfunc": "case"},
{"from_term": "t7", "to_term": "t13", "rfunc": "obl"},
{"from_term": "t12", "to_term": "t11", "rfunc": "compound"},
{"from_term": "t13", "to_term": "t12", "rfunc": "amod"},
{"from_term": "t17", "to_term": "t14", "rfunc": "compound"},
{"from_term": "t16", "to_term": "t15", "rfunc": "cc"},
{"from_term": "t14", "to_term": "t16", "rfunc": "conj"},
{"from_term": "t22", "to_term": "t18", "rfunc": "case"},
{"from_term": "t17", "to_term": "t22", "rfunc": "nmod"},
{"from_term": "t22", "to_term": "t19", "rfunc": "punct"},
{"from_term": "t22", "to_term": "t20", "rfunc": "amod"},
{"from_term": "t22", "to_term": "t21", "rfunc": "punct"},
{"from_term": "t26", "to_term": "t23", "rfunc": "cc"},
{"from_term": "t22", "to_term": "t26", "rfunc": "conj"},
{"from_term": "t26", "to_term": "t24", "rfunc": "det"},
{"from_term": "t26", "to_term": "t25", "rfunc": "compound"},
{"from_term": "t29", "to_term": "t27", "rfunc": "case"},
{"from_term": "t26", "to_term": "t29", "rfunc": "nmod"},
{"from_term": "t29", "to_term": "t28", "rfunc": "nummod"},
{"from_term": "t17", "to_term": "t30", "rfunc": "punct"},
{"from_term": "t37", "to_term": "t32", "rfunc": "mark"},
{"from_term": "t31", "to_term": "t37", "rfunc": "acl"},
{"from_term": "t37", "to_term": "t33", "rfunc": "mark"},
{"from_term": "t37", "to_term": "t34", "rfunc": "punct"},
{"from_term": "t37", "to_term": "t35", "rfunc": "nsubj"},
{"from_term": "t37", "to_term": "t36", "rfunc": "aux"},
{"from_term": "t43", "to_term": "t38", "rfunc": "mark"},
{"from_term": "t37", "to_term": "t43", "rfunc": "ccomp"},
{"from_term": "t37", "to_term": "t39", "rfunc": "compound:prt"},
{"from_term": "t37", "to_term": "t40", "rfunc": "advmod"},
{"from_term": "t37", "to_term": "t41", "rfunc": "punct"},
{"from_term": "t43", "to_term": "t42", "rfunc": "aux:pass"},
{"from_term": "t49", "to_term": "t44", "rfunc": "punct"},
{"from_term": "t43", "to_term": "t49", "rfunc": "obl"},
{"from_term": "t49", "to_term": "t45", "rfunc": "case"},
{"from_term": "t49", "to_term": "t46", "rfunc": "case"},
{"from_term": "t49", "to_term": "t47", "rfunc": "nmod:poss"},
{"from_term": "t49", "to_term": "t48", "rfunc": "compound"},
{"from_term": "t49", "to_term": "t50", "rfunc": "punct"},
{"from_term": "t43", "to_term": "t51", "rfunc": "punct"},
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_10_pdf_multiwords(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.multiwords
expected = [
{
"id": "mw1",
"lemma": "set_out",
"pos": "VERB",
"type": "phrasal",
"components": [
{"id": "mw1.c1", "span": [{"id": "t37"}]},
{"id": "mw1.c2", "span": [{"id": "t39"}]},
],
}
]
assert actual == expected, (
"expected: " + str(expected) + ", actual: " + str(actual)
)
def test_11_raw(self):
naf = NafDocument().open(join("tests", "tests", "example.naf.xml"))
actual = naf.raw
expected = "The Nafigator package allows you to store NLP output from custom made spaCy and stanza pipelines with (intermediate) results and all processing steps in | |
Xcode 12.0 or newer is required.')
return False
pSettings.mMake = checkMake(downloadDir)
if len(pSettings.mMake) == 0:
print('Error: \'Make\' not found.')
return False
pSettings.mCMake = checkCMake(downloadDir)
if len(pSettings.mCMake) == 0:
print('Error: \'CMake\' not found.')
return False
pSettings.mNinja = checkNinja(downloadDir)
if len(pSettings.mNinja) == 0:
print('Error: \'Ninja\' not found.')
return False
if hostDetected:
iOScommonFlags = ' -D__IPHONE_OS_VERSION_MIN_REQUIRED=120400 -gdwarf-2 -fPIC -fno-strict-aliasing -fstack-protector -fvisibility=hidden'
iOSdeviceFlags = ' -miphoneos-version-min=12.4'
iOSsimulatorFlags = ' -mios-simulator-version-min=12.4'
macOScommonFlags = ' -gdwarf-2 -fPIC -fno-strict-aliasing -fstack-protector -fvisibility=hidden'
pSettings.mArch = ['arm64', 'arm64', 'x86_64', 'arm64', 'x86_64']
pSettings.mArchFlagASM = ['-arch arm64' + iOSdeviceFlags, '-arch arm64' + iOSsimulatorFlags, '-arch x86_64' + iOSsimulatorFlags, '-arch arm64 -mmacosx-version-min=11.0', '-arch x86_64 -mmacosx-version-min=10.13']
pSettings.mArchFlagC = ['-arch arm64 -ObjC' + iOScommonFlags + iOSdeviceFlags, '-arch arm64 -ObjC' + iOScommonFlags + iOSsimulatorFlags, '-arch x86_64 -ObjC' + iOScommonFlags + iOSsimulatorFlags, '-arch arm64 -ObjC -mmacosx-version-min=11.0' + macOScommonFlags, '-arch x86_64 -ObjC -mmacosx-version-min=10.13' + macOScommonFlags]
pSettings.mArchFlagCXX = ['-arch arm64 -ObjC++ -stdlib=libc++ -fvisibility-inlines-hidden' + iOScommonFlags + iOSdeviceFlags, '-arch arm64 -ObjC++ -stdlib=libc++ -fvisibility-inlines-hidden' + iOScommonFlags + iOSsimulatorFlags, '-arch x86_64 -ObjC++ -stdlib=libc++ -fvisibility-inlines-hidden' + iOScommonFlags + iOSsimulatorFlags, '-arch arm64 -ObjC++ -stdlib=libc++ -fvisibility-inlines-hidden -mmacosx-version-min=11.0' + macOScommonFlags, '-arch x86_64 -ObjC++ -stdlib=libc++ -fvisibility-inlines-hidden -mmacosx-version-min=10.13' + macOScommonFlags]
pSettings.mArchName = pSettings.mArch
pSettings.mPlatformName = ['ios', 'ios-simulator', 'ios-simulator', 'macos', 'macos']
pSettings.mMakeFlag = ['ARCH64=1 DSYM=1', 'ARCH64=1 DSYM=1', 'ARCH64=1 DSYM=1', 'ARCH64=1 DSYM=1', 'ARCH64=1 DSYM=1']
pSettings.mTargetSdk = ['iPhoneOS', 'iPhoneSimulator', 'iPhoneSimulator', '', '']
print('Toolchain path: "' + pSettings.mAppleSdkDir + '"')
else:
print('Error: Not supported host platform: ' + platformName + ' arm64/x86-64' if platform.machine().endswith('64') else ' x86')
return False
else:
return False
pSettings.mCoreCount = str(multiprocessing.cpu_count())
print('Available CPU cores: ' + pSettings.mCoreCount)
return True
def executeShellCommand(pCommandLine, pShowOutput = True):
process = subprocess.Popen(pCommandLine, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True)
output, error = process.communicate()
returnCode = process.wait()
if pShowOutput:
returnText = output.decode()
if len(returnText) > 0:
print(returnText)
if returnCode != 0:
print('Error message:\n' + error.decode("utf-8"))
return returnCode
def receiveShellOutput(pCommandLine):
process = subprocess.Popen(pCommandLine, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True)
output, error = process.communicate()
process.wait()
return output.decode()
def executeCmdCommand(pCommandLine, pWorkingDir, pShowOutput = True):
commandLine = pCommandLine.encode()
commandLine += b"""
exit
"""
process = subprocess.Popen(["cmd", "/q", "/k", "echo off"], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = pWorkingDir, shell = True)
process.stdin.write(commandLine)
output, error = process.communicate()
returnCode = process.wait()
if pShowOutput:
returnText = output.decode()
if len(returnText) > 0:
print(returnText)
if returnCode != 0:
print('Error message:\n' + error.decode("utf-8"))
return returnCode
def remove(pPath):
if pPath is not None:
if os.path.isdir(pPath):
shutil.rmtree(pPath)
elif os.path.isfile(pPath):
os.remove(pPath)
return
def buildCMake(pLibraryName, pSettings, pCMakeFlag, pDSYM, pOutputDir, pOutputLibraryName):
print('Building...')
status = False
workingDir = os.getcwd()
platformName = platform.system().lower()
buildDir = os.path.join(workingDir, 'build_cmake')
remove(buildDir)
releaseBuild = True
for i in range(1, len(sys.argv)):
if sys.argv[i] == '-debug':
releaseBuild = False
break
if (len(pCMakeFlag) > 0):
pCMakeFlag += ' '
configType = ''
if releaseBuild:
if pDSYM:
configType = 'RelWithDebInfo'
else:
configType = 'MinSizeRel'
else:
configType = 'Debug'
pCMakeFlag += '-DCMAKE_BUILD_TYPE=' + configType
for i in range(0, len(pSettings.mArchName)):
libraryDir = os.path.join(pSettings.mLibDir, 'lib', pSettings.mBuildTarget, pSettings.mPlatformName[i], pSettings.mArchName[i])
if not os.path.isdir(libraryDir):
os.makedirs(libraryDir)
else:
for j in range(0, len(pLibraryName)):
libraryFilepath = os.path.join(libraryDir, 'lib' + pLibraryName[j] + '.a')
if os.path.isfile(libraryFilepath):
os.remove(libraryFilepath)
os.makedirs(buildDir)
os.chdir(buildDir)
cmakeFlag = pCMakeFlag
if len(pSettings.mArchFlagASM[i]) > 0:
cmakeFlag += ' \"-DCMAKE_ASM_FLAGS=' + pSettings.mArchFlagASM[i] + '\"'
if len(pSettings.mArchFlagC[i]) > 0:
cmakeFlag += ' \"-DCMAKE_C_FLAGS=' + pSettings.mArchFlagC[i] + '\"'
if len(pSettings.mArchFlagCXX[i]) > 0:
cmakeFlag += ' \"-DCMAKE_CXX_FLAGS=' + pSettings.mArchFlagCXX[i] + '\"'
buildSuccess = False
if pSettings.mBuildTarget == 'android':
buildSuccess = buildCMakeAndroid(i, pSettings, cmakeFlag)
elif pSettings.mBuildTarget == 'linux':
buildSuccess = buildCMakeLinux(i, pSettings, cmakeFlag)
elif pSettings.mBuildTarget == 'apple':
buildSuccess = buildCMakeApple(i, pSettings, cmakeFlag)
if buildSuccess:
buildCommand = pSettings.mCMake + ' --build . --config ' + configType
if platformName == 'linux' or platformName == 'darwin':
buildSuccess = executeShellCommand(buildCommand) == 0
elif platformName == 'windows':
buildSuccess = executeCmdCommand(buildCommand, buildDir) == 0
if (len(pLibraryName) == len(pOutputLibraryName)):
for j in range(0, len(pLibraryName)):
try:
shutil.copy2(os.path.join(buildDir, pLibraryName[j] if len(pOutputDir) == 0 else pOutputDir, 'lib' + pOutputLibraryName[j] + '.a'), os.path.join(libraryDir, 'lib' + pLibraryName[j] + '.a'))
except FileNotFoundError:
print('Error: system couldn\'t copy library')
pass
else:
print('Error: system couldn\'t copy library')
os.chdir('..')
remove(buildDir)
print('Build status for ' + pSettings.mArchName[i] + (' (' + pSettings.mPlatformName[i] + ')' if len(pSettings.mPlatformName[i]) > 0 else '') + ': ' + ('Succeeded' if buildSuccess else 'Failed') + '\n')
status |= buildSuccess
createXCFramework(pLibraryName, pSettings)
return status
def buildCMakeAndroid(pIndex, pSettings, pCMakeFlag):
status = False
platformName = platform.system().lower()
toolchainPath = os.path.join(pSettings.mAndroidNdkDir, 'build', 'cmake', 'android.toolchain.cmake')
if os.path.isfile(toolchainPath):
androidApi = pSettings.mAndroidApi
if (int(androidApi) < 21 and (pSettings.mArchName[pIndex] == 'arm64-v8a' or pSettings.mArchName[pIndex] == 'x86_64')):
androidApi = '21'
print('Force Android API: \"21\" for architecture \"' + pSettings.mArchName[pIndex] + '\".')
cmakeCommand = pSettings.mCMake + ' ' + pCMakeFlag + ' -DANDROID_ABI=' + pSettings.mArchName[pIndex] + ' -DANDROID_NATIVE_API_LEVEL=' + androidApi + ' -DCMAKE_TOOLCHAIN_FILE=' + toolchainPath + ' -GNinja -DCMAKE_MAKE_PROGRAM=' + pSettings.mNinja + ' ..'
if platformName == 'linux' or platformName == 'darwin':
status = executeShellCommand(cmakeCommand) == 0
elif platformName == 'windows':
status = executeCmdCommand(cmakeCommand, os.getcwd()) == 0
return status
def buildCMakeLinux(pIndex, pSettings, pCMakeFlag):
status = False
platformName = platform.system().lower()
toolchainPath = ''
if pSettings.mBuildTarget == 'linux':
if pSettings.mArchName[pIndex] == 'x86':
toolchainPath = ' -DCMAKE_TOOLCHAIN_FILE=' + os.path.join(pSettings.mRootDir, 'script', 'linux.toolchain.cmake')
cmakeCommand = pSettings.mCMake + ' ' + pCMakeFlag + toolchainPath + ' -GNinja -DCMAKE_MAKE_PROGRAM=' + pSettings.mNinja + ' ..'
if platformName == 'linux':
status = executeShellCommand(cmakeCommand) == 0
return status
def buildCMakeApple(pIndex, pSettings, pCMakeFlag):
status = False
platformName = platform.system().lower()
cmakeCommand = ''
if (pSettings.mPlatformName[pIndex] == 'ios' or pSettings.mPlatformName[pIndex] == 'ios-simulator') and pSettings.mBuildVariant != 'macos':
toolchainPath = os.path.join(pSettings.mRootDir, 'script', 'ios.toolchain.cmake')
executableDir = os.path.join(pSettings.mAppleSdkDir, 'Toolchains', 'XcodeDefault.xctoolchain', 'usr', 'bin')
sysrootDir = os.path.join(pSettings.mAppleSdkDir, 'Platforms', pSettings.mTargetSdk[pIndex] + '.platform', 'Developer', 'SDKs', pSettings.mTargetSdk[pIndex] + '.sdk')
if os.path.isfile(toolchainPath) and os.path.isdir(executableDir) and os.path.isdir(sysrootDir):
cmakeCommand = pSettings.mCMake + ' ' + pCMakeFlag + ' -DCMAKE_TOOLCHAIN_FILE=' + toolchainPath + ' -DHMS_XCODE_PATH=' + pSettings.mAppleSdkDir + ' -DHMS_TARGET=' + pSettings.mTargetSdk[pIndex] + ' -GNinja -DCMAKE_MAKE_PROGRAM=' + pSettings.mNinja + ' ..'
elif pSettings.mPlatformName[pIndex] == 'macos' and pSettings.mBuildVariant != 'macos':
toolchainPath = os.path.join(pSettings.mRootDir, 'script', 'macos.toolchain.cmake')
if os.path.isfile(toolchainPath):
cmakeCommand = pSettings.mCMake + ' ' + pCMakeFlag + ' -DCMAKE_TOOLCHAIN_FILE=' + toolchainPath + ' -DHMS_ARCH=' + pSettings.mArch[pIndex] + ' -GNinja -DCMAKE_MAKE_PROGRAM=' + pSettings.mNinja + ' ..'
if platformName == 'darwin' and len(cmakeCommand) > 0:
status = executeShellCommand(cmakeCommand) == 0
return status
def buildMake(pLibraryName, pSettings, pMakeFlag):
status = False
workingDir = os.getcwd()
platformName = platform.system().lower()
releaseBuild = True
for i in range(1, len(sys.argv)):
if sys.argv[i] == '-debug':
releaseBuild = False
break
if releaseBuild:
if (len(pMakeFlag) > 0):
pMakeFlag += ' '
pMakeFlag += 'NDEBUG=1'
if platformName == 'linux' or platformName == 'darwin':
executeShellCommand(pSettings.mMake + ' clean')
elif platformName == 'windows':
executeCmdCommand(pSettings.mMake + ' clean', workingDir)
for i in range(0, len(pSettings.mArchName)):
libraryDir = os.path.join(pSettings.mLibDir, 'lib', pSettings.mBuildTarget, pSettings.mPlatformName[i], pSettings.mArchName[i])
if not os.path.isdir(libraryDir):
os.makedirs(libraryDir)
else:
for j in range(0, len(pLibraryName)):
libraryFilepath = os.path.join(libraryDir, 'lib' + pLibraryName[j] + '.a')
if os.path.isfile(libraryFilepath):
os.remove(libraryFilepath)
os.environ['CFLAGS'] = pSettings.mArchFlagC[i]
os.environ['CXXFLAGS'] = pSettings.mArchFlagCXX[i]
buildSuccess = False
if pSettings.mBuildTarget == 'android':
buildSuccess = buildMakeAndroid(i, pLibraryName, pSettings, pMakeFlag)
elif pSettings.mBuildTarget == 'linux':
buildSuccess = buildMakeLinux(i, pLibraryName, pSettings, pMakeFlag)
elif pSettings.mBuildTarget == 'apple':
buildSuccess = buildMakeApple(i, pLibraryName, pSettings, pMakeFlag)
del os.environ['CFLAGS']
del os.environ['CXXFLAGS']
if buildSuccess:
for j in range(0, len(pLibraryName)):
try:
shutil.copy2(os.path.join(workingDir, 'lib' + pLibraryName[j] + '.a'), os.path.join(libraryDir, 'lib' + pLibraryName[j] + '.a'))
except FileNotFoundError:
print('Error: system couldn\'t copy library')
pass
if platformName == 'linux' or platformName == 'darwin':
executeShellCommand(pSettings.mMake + ' clean')
elif platformName == 'windows':
executeCmdCommand(pSettings.mMake + ' clean', workingDir)
print('Build status for ' + pSettings.mArchName[i] + (' (' + pSettings.mPlatformName[i] + ')' if len(pSettings.mPlatformName[i]) > 0 else '') + ': ' + ('Succeeded' if buildSuccess else 'Failed') + '\n')
status |= buildSuccess
createXCFramework(pLibraryName, pSettings)
return status
def buildMakeAndroid(pIndex, pLibraryName, pSettings, pMakeFlag):
print('Building...')
status = False
platformName = platform.system().lower()
toolchainDir = os.path.join(pSettings.mAndroidNdkDir, 'toolchains', 'llvm', 'prebuilt')
if platformName == 'linux':
toolchainDir = os.path.join(toolchainDir, 'linux-x86_64', 'bin')
elif platformName == 'darwin':
toolchainDir = os.path.join(toolchainDir, 'darwin-x86_64', 'bin')
elif platformName == 'windows':
toolchainDir = os.path.join(toolchainDir, 'windows-x86_64', 'bin')
if os.path.isdir(toolchainDir):
llvmPrefix | |
need rules.Today's not a good day to find out why I have so many.": "Someone...",
"Never run when you’re scared.": "Someone...",
"What’s the point in having a heart if you can’t be a bit forgiving every now and then?": "Someone...",
"There’s no point in being grown up if you can’t be childish sometimes.": "<NAME>",
"When you’re a kid, they tell you it’s all… Grow up, get a job, get married, get a house, have a kid, and that’s it. But the truth is, the world is so much stranger than that. It’s so much darker. And so much madder. And so much better.": "Someone...",
"You want weapons? We’re in a library! Books! The best weapons in the world!Arm yourself...!": "Someone...",
"Letting it get to you. You know what that’s called? Being alive. Best thing there is. Being alive right now is all that counts.": "Someone...",
"Superior intelligence and senseless cruelty just do not go together.": "Someone...",
"A straight line may be the shortest distance between two points, but it is by no means the most interesting.": "Someone...",
"Come on, man! It isn’t rocket science, it’s just quantum physics!": "Someone...",
"If you step on a butterfly, you change the future of the human race.": "Someone...",
"The universe has to move forward. Pain and loss, they define us as much as happiness or love. Whether it’s a world, or a relationship… Everything has its time. And everything ends.": "Someone...",
"Big flashy things have my name written all over them. Well… not yet, give me time and a crayon.": "Someone...",
"Never ignore coincidence. Unless, of course, you’re busy. In which case, always ignore coincidence.": "Someone...",
"Almost every species in the universe has an irrational fear of the dark. But they’re wrong. ‘Cause it’s not irrational. *I'm really scared of the dark": "Someone...",
"Biting’s excellent. It’s like kissing – only there is a winner.": "Someone...",
"Everything’s got to end sometime. Otherwise nothing would ever get started.": "Someone...",
"There’s always something to look at if you open your eyes!": "Someone...",
"Courage isn’t just a matter of not being frightened, you know. It’s being afraid and doing what you have to do anyway.": "Someone...",
"There's a horror movie called Alien?That's really offensive, no wonder everyone keeps invading you...": "Someone...",
"Sometimes the only choices you have are bad ones but you still have to choose.": "Someone...",
"**Never** trust a **hug**.It's just a way to hide your face...": "Someone...",
"This is one corner… of one country, in one continent, on one planet that’s a corner of a galaxy that’s a corner of a universe that is forever growing and shrinking and creating and destroying and never remaining the same for a single millisecond. And there is so much, so much to see.": "Someone...",
"Do you think that we care for you so little that betraying us would make a difference?": "Someone...",
"*A BEDROOM?*,What... You've got a whole room *for not being awake in?* But what's the point? You're just missing the room!": "Someone...",
"I've got a horrible feeling that I might have to kill you,I thought you might appreciate a drink first.": "Someone...",
"Do you know what the big problemis in telling Fantasy and Reality apart? They're both ridiculous....": "Someone...",
"Without the capacity for pain, we can't feel the hurt that we inflict...": "Someone...",
"I want you to know that someone cares. Someone, not me": "Someone...",
"The question isn’t who is going to let me; it’s who is going to stop me.": "<NAME>",
"Life is like a box of chocolates. You never know what you’re going to get.": "<NAME>",
"The three great essentials to achieve anything worthwhile are, first, hard work; second, stick-to-itiveness; third, common sense.": "Someone...",
"The successful warrior is the average man, with laser-like focus.": "<NAME>",
"Success? I don’t know what that word means. I’m happy. But success, that goes back to what in somebody’s eyes success means. For me, success is inner peace. That’s a good day for me.": "<NAME>",
"A kiss is a lovely trick designed by nature to stop speech when words become superfluous.": "<NAME>",
"Sooner or later, we all go through a crucible. Most believe there are two types of people who go into a crucible: the ones who grow stronger from the experience and survive it, and the ones who die. But there's a third type: the ones who learn to love the fire. They chose to stay in their crucible because it's easier to embrace the pain when it's all you know anymore......": ""}
nerd = [
"Star Trek II: The Wrath of Kahn featured the first fully computer-created sequence in movie history. The effects were created by a small, subsidiary company of Lucasfilm, which would rename itself to Pixar Animation Studios just a few years later.",
"In 1982, the movie Tron was passed over for a nomination for special effects at the Oscars because the effects were created by a computer. According to the movie’s director, the Academy thought that using computers was cheating.",
"Any mega fan of the Star Wars franchise will know the word “wizard” is not just a term for a guy with robes, a long beard, and magical powers. It’s also an adjective that means “awesome,” and was used by young Anakin Skywalker in The Phantom Menace.",
"When Star Trek premiered in 1966, the final line of the introductory text of each episode was “to boldly go where no man has gone before.” This led to a debate amongst grammar nerds over whether or not the phrase should have been “to go boldly where no man has gone before” instead.",
"Pokémon are fictional creatures which can be captured by humans known as “trainers” for battle or for sport. The question of which pokémon came first is a popular discussion subject among fans. Bulbasaur is the first pokémon in the Pokédex, but that doesn’t make him the first ever. Arceus is the equivalent of God in the Pokémon world and is believed to have created the universe, making it a solid contender for the first pokémon, but on the other hand, Mew contains the genetic code of all pokémon in its DNA, and so it is believed to have come first.",
"The word floccinaucinihilipilification means “the act of viewing something as being useless, without value, or unimportant.” It is the longest non-technical term in the English language and is one letter longer than antidisestablishmentarianism.",
"Believe it or not, there are at least two complete novels that do not use the letter “E.” The first is a 50,000-word novel called Gadsby written in 1939 by <NAME>. To avoid using the letter “E,” Wright allegedly pinned down the letter on his typewriter. The second is a novel written in French by <NAME>. Published in 1969, the French title was La Disparition, and it was eventually translated into English under the name A Void. A literal translation of the book’s title would have been The Disappearance, but that would have used “E” three times, so it had to be changed.",
"There is one common domestic animal that was not mentioned in the bible: Cats do not appear anywhere in the text, and according to some historians, there may be a good reason for this. Cats were revered and even worshiped by the Egyptian people, but in the eyes of Christians, Egyptians were Pagan, and early writers of the bible would have removed any Pagan references. Another theory suggests that cats are not mentioned because they were not considered to be domestic animals. Even house cats are still partially wild, and without human interaction can become completely feral and still survive. I’m watching you, Mittens…",
"The term “robot” was coined in 1920 by the playwright <NAME> in his play Rossum’s Universal | |
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'HealthCheckAlarmIdentifier',
'HealthCheckConfigProperties',
'HealthCheckTag',
'HostedZoneConfig',
'HostedZoneQueryLoggingConfig',
'HostedZoneTag',
'HostedZoneVPC',
'RecordSetAliasTarget',
'RecordSetGeoLocation',
'RecordSetGroupAliasTarget',
'RecordSetGroupGeoLocation',
'RecordSetGroupRecordSet',
]
@pulumi.output_type
class HealthCheckAlarmIdentifier(dict):
"""
A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether the specified health check is healthy.
"""
def __init__(__self__, *,
name: str,
region: str):
"""
A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether the specified health check is healthy.
:param str name: The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
:param str region: For the CloudWatch alarm that you want Route 53 health checkers to use to determine whether this health check is healthy, the region that the alarm was created in.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def region(self) -> str:
"""
For the CloudWatch alarm that you want Route 53 health checkers to use to determine whether this health check is healthy, the region that the alarm was created in.
"""
return pulumi.get(self, "region")
@pulumi.output_type
class HealthCheckConfigProperties(dict):
"""
A complex type that contains information about the health check.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "alarmIdentifier":
suggest = "alarm_identifier"
elif key == "childHealthChecks":
suggest = "child_health_checks"
elif key == "enableSNI":
suggest = "enable_sni"
elif key == "failureThreshold":
suggest = "failure_threshold"
elif key == "fullyQualifiedDomainName":
suggest = "fully_qualified_domain_name"
elif key == "healthThreshold":
suggest = "health_threshold"
elif key == "iPAddress":
suggest = "i_p_address"
elif key == "insufficientDataHealthStatus":
suggest = "insufficient_data_health_status"
elif key == "measureLatency":
suggest = "measure_latency"
elif key == "requestInterval":
suggest = "request_interval"
elif key == "resourcePath":
suggest = "resource_path"
elif key == "routingControlArn":
suggest = "routing_control_arn"
elif key == "searchString":
suggest = "search_string"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in HealthCheckConfigProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
HealthCheckConfigProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
HealthCheckConfigProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: 'HealthCheckConfigPropertiesType',
alarm_identifier: Optional['outputs.HealthCheckAlarmIdentifier'] = None,
child_health_checks: Optional[Sequence[str]] = None,
enable_sni: Optional[bool] = None,
failure_threshold: Optional[int] = None,
fully_qualified_domain_name: Optional[str] = None,
health_threshold: Optional[int] = None,
i_p_address: Optional[str] = None,
insufficient_data_health_status: Optional['HealthCheckConfigPropertiesInsufficientDataHealthStatus'] = None,
inverted: Optional[bool] = None,
measure_latency: Optional[bool] = None,
port: Optional[int] = None,
regions: Optional[Sequence[str]] = None,
request_interval: Optional[int] = None,
resource_path: Optional[str] = None,
routing_control_arn: Optional[str] = None,
search_string: Optional[str] = None):
"""
A complex type that contains information about the health check.
"""
pulumi.set(__self__, "type", type)
if alarm_identifier is not None:
pulumi.set(__self__, "alarm_identifier", alarm_identifier)
if child_health_checks is not None:
pulumi.set(__self__, "child_health_checks", child_health_checks)
if enable_sni is not None:
pulumi.set(__self__, "enable_sni", enable_sni)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if fully_qualified_domain_name is not None:
pulumi.set(__self__, "fully_qualified_domain_name", fully_qualified_domain_name)
if health_threshold is not None:
pulumi.set(__self__, "health_threshold", health_threshold)
if i_p_address is not None:
pulumi.set(__self__, "i_p_address", i_p_address)
if insufficient_data_health_status is not None:
pulumi.set(__self__, "insufficient_data_health_status", insufficient_data_health_status)
if inverted is not None:
pulumi.set(__self__, "inverted", inverted)
if measure_latency is not None:
pulumi.set(__self__, "measure_latency", measure_latency)
if port is not None:
pulumi.set(__self__, "port", port)
if regions is not None:
pulumi.set(__self__, "regions", regions)
if request_interval is not None:
pulumi.set(__self__, "request_interval", request_interval)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if routing_control_arn is not None:
pulumi.set(__self__, "routing_control_arn", routing_control_arn)
if search_string is not None:
pulumi.set(__self__, "search_string", search_string)
@property
@pulumi.getter
def type(self) -> 'HealthCheckConfigPropertiesType':
return pulumi.get(self, "type")
@property
@pulumi.getter(name="alarmIdentifier")
def alarm_identifier(self) -> Optional['outputs.HealthCheckAlarmIdentifier']:
return pulumi.get(self, "alarm_identifier")
@property
@pulumi.getter(name="childHealthChecks")
def child_health_checks(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "child_health_checks")
@property
@pulumi.getter(name="enableSNI")
def enable_sni(self) -> Optional[bool]:
return pulumi.get(self, "enable_sni")
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[int]:
return pulumi.get(self, "failure_threshold")
@property
@pulumi.getter(name="fullyQualifiedDomainName")
def fully_qualified_domain_name(self) -> Optional[str]:
return pulumi.get(self, "fully_qualified_domain_name")
@property
@pulumi.getter(name="healthThreshold")
def health_threshold(self) -> Optional[int]:
return pulumi.get(self, "health_threshold")
@property
@pulumi.getter(name="iPAddress")
def i_p_address(self) -> Optional[str]:
return pulumi.get(self, "i_p_address")
@property
@pulumi.getter(name="insufficientDataHealthStatus")
def insufficient_data_health_status(self) -> Optional['HealthCheckConfigPropertiesInsufficientDataHealthStatus']:
return pulumi.get(self, "insufficient_data_health_status")
@property
@pulumi.getter
def inverted(self) -> Optional[bool]:
return pulumi.get(self, "inverted")
@property
@pulumi.getter(name="measureLatency")
def measure_latency(self) -> Optional[bool]:
return pulumi.get(self, "measure_latency")
@property
@pulumi.getter
def port(self) -> Optional[int]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def regions(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "regions")
@property
@pulumi.getter(name="requestInterval")
def request_interval(self) -> Optional[int]:
return pulumi.get(self, "request_interval")
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[str]:
return pulumi.get(self, "resource_path")
@property
@pulumi.getter(name="routingControlArn")
def routing_control_arn(self) -> Optional[str]:
return pulumi.get(self, "routing_control_arn")
@property
@pulumi.getter(name="searchString")
def search_string(self) -> Optional[str]:
return pulumi.get(self, "search_string")
@pulumi.output_type
class HealthCheckTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag.
:param str value: The value for the tag.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class HostedZoneConfig(dict):
"""
A complex type that contains an optional comment.
If you don't want to specify a comment, omit the HostedZoneConfig and Comment elements.
"""
def __init__(__self__, *,
comment: Optional[str] = None):
"""
A complex type that contains an optional comment.
If you don't want to specify a comment, omit the HostedZoneConfig and Comment elements.
:param str comment: Any comments that you want to include about the hosted zone.
"""
if comment is not None:
pulumi.set(__self__, "comment", comment)
@property
@pulumi.getter
def comment(self) -> Optional[str]:
"""
Any comments that you want to include about the hosted zone.
"""
return pulumi.get(self, "comment")
@pulumi.output_type
class HostedZoneQueryLoggingConfig(dict):
"""
A complex type that contains information about a configuration for DNS query logging.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudWatchLogsLogGroupArn":
suggest = "cloud_watch_logs_log_group_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in HostedZoneQueryLoggingConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
HostedZoneQueryLoggingConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
HostedZoneQueryLoggingConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloud_watch_logs_log_group_arn: str):
"""
A complex type that contains information about a configuration for DNS query logging.
:param str cloud_watch_logs_log_group_arn: The Amazon Resource Name (ARN) of the CloudWatch Logs log group that Amazon Route 53 is publishing logs to.
"""
pulumi.set(__self__, "cloud_watch_logs_log_group_arn", cloud_watch_logs_log_group_arn)
@property
@pulumi.getter(name="cloudWatchLogsLogGroupArn")
def cloud_watch_logs_log_group_arn(self) -> str:
"""
The Amazon Resource Name (ARN) of the CloudWatch Logs log group that Amazon Route 53 is publishing logs to.
"""
return pulumi.get(self, "cloud_watch_logs_log_group_arn")
@pulumi.output_type
class HostedZoneTag(dict):
"""
A complex type that contains information about a tag that you want to add or edit for the specified health check or hosted zone.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
A complex type that contains information about a tag that you want to add or edit for the specified health check or hosted zone.
:param str key: The key name of the tag.
:param str value: The value for the tag.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class HostedZoneVPC(dict):
"""
A complex type that contains information about an Amazon VPC. Route 53 Resolver uses the records in the private hosted zone to route traffic in that VPC.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "vPCId":
suggest = "v_pc_id"
elif key == "vPCRegion":
suggest = "v_pc_region"
| |
('mapping_pg_sub_part_id', 'int', 'true', '-1'),
('tablespace_id', 'int', 'false', '-1')
],
migrate_data_before_2200 = True,
columns_with_tenant_id = ['table_id', 'tablespace_id'],
)
def_table_schema(**all_def_sub_part_def)
def_table_schema(**gen_history_table_def(153, all_def_sub_part_def))
def_table_schema(
table_name = '__all_server_event_history',
table_id = '154',
table_type = 'SYSTEM_TABLE',
gm_columns = [],
rowkey_columns = [
('gmt_create', 'timestamp:6', 'false'),
('svr_ip', 'varchar:MAX_IP_ADDR_LENGTH'),
('svr_port', 'int'),
],
partition_expr = ['key_v2', 'svr_ip, svr_port', '16'],
partition_columns = ['svr_ip', 'svr_port'],
normal_columns = [
('module', 'varchar:MAX_ROOTSERVICE_EVENT_DESC_LENGTH', 'false'),
('event', 'varchar:MAX_ROOTSERVICE_EVENT_DESC_LENGTH', 'false'),
('name1', 'varchar:MAX_ROOTSERVICE_EVENT_NAME_LENGTH', 'true', ''),
('value1', 'varchar:MAX_ROOTSERVICE_EVENT_VALUE_LENGTH', 'true', ''),
('name2', 'varchar:MAX_ROOTSERVICE_EVENT_NAME_LENGTH', 'true', ''),
('value2', 'longtext', 'true'),
('name3', 'varchar:MAX_ROOTSERVICE_EVENT_NAME_LENGTH', 'true', ''),
('value3', 'varchar:MAX_ROOTSERVICE_EVENT_VALUE_LENGTH', 'true', ''),
('name4', 'varchar:MAX_ROOTSERVICE_EVENT_NAME_LENGTH', 'true', ''),
('value4', 'varchar:MAX_ROOTSERVICE_EVENT_VALUE_LENGTH', 'true', ''),
('name5', 'varchar:MAX_ROOTSERVICE_EVENT_NAME_LENGTH', 'true', ''),
('value5', 'varchar:MAX_ROOTSERVICE_EVENT_VALUE_LENGTH', 'true', ''),
('name6', 'varchar:MAX_ROOTSERVICE_EVENT_NAME_LENGTH', 'true', ''),
('value6', 'varchar:MAX_ROOTSERVICE_EVENT_VALUE_LENGTH', 'true', ''),
('extra_info', 'varchar:MAX_ROOTSERVICE_EVENT_EXTRA_INFO_LENGTH', 'true', ''),
],
)
def_table_schema(
table_name = '__all_rootservice_job',
table_id = '155',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('job_id', 'int')
],
normal_columns = [
('job_type', 'varchar:128', 'false'),
('job_status', 'varchar:128', 'false'),
('return_code', 'int', 'true'),
('progress', 'int', 'false', '0'),
('tenant_id', 'int', 'true'),
('tenant_name', 'varchar:OB_MAX_TENANT_NAME_LENGTH_STORE', 'true'),
('database_id', 'int', 'true'),
('database_name', 'varchar:OB_MAX_DATABASE_NAME_LENGTH', 'true'),
('table_id', 'int', 'true'),
('table_name', 'varchar:OB_MAX_CORE_TALBE_NAME_LENGTH', 'true'),
('partition_id', 'int', 'true'),
('svr_ip', 'varchar:MAX_IP_ADDR_LENGTH', 'true'),
('svr_port', 'int', 'true'),
('unit_id', 'int', 'true'),
('rs_svr_ip', 'varchar:MAX_IP_ADDR_LENGTH', 'false'),
('rs_svr_port', 'int', 'false'),
('sql_text', 'longtext', 'true'),
('extra_info', 'varchar:MAX_ROOTSERVICE_EVENT_EXTRA_INFO_LENGTH', 'true'),
('resource_pool_id', 'int', 'true'),
('tablegroup_id', 'int', 'true'),
('tablegroup_name', 'varchar:OB_MAX_TABLEGROUP_NAME_LENGTH', 'true'),
],
)
def_table_schema(
table_name = '__all_unit_load_history',
table_id = '156',
table_type = 'SYSTEM_TABLE',
gm_columns = [],
rowkey_columns = [
('gmt_create', 'timestamp:6', 'false'),
],
normal_columns = [
('tenant_id', 'int'),
('zone', 'varchar:MAX_ZONE_LENGTH'),
('svr_ip', 'varchar:MAX_IP_ADDR_LENGTH', 'true'),
('svr_port', 'int', 'true'),
('unit_id', 'int'),
('load', 'double'),
('disk_usage_rate', 'double'),
('memory_usage_rate', 'double'),
('cpu_usage_rate', 'double'),
('iops_usage_rate', 'double'),
('disk_weight', 'double'),
('memory_weight', 'double'),
('cpu_weight', 'double'),
('iops_weight', 'double'),
('rs_svr_ip', 'varchar:MAX_IP_ADDR_LENGTH', 'true'),
('rs_svr_port', 'int', 'true'),
],
)
all_sys_variable_history_def= dict(
table_name = '__all_sys_variable_history',
table_id = '157',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('zone', 'varchar:MAX_ZONE_LENGTH'),
('name', 'varchar:OB_MAX_CONFIG_NAME_LEN', 'false', ''),
('schema_version', 'int')
],
partition_expr = ['key_v2', 'tenant_id', 16 ],
partition_columns = ['tenant_id'],
rs_restart_related = True,
in_tenant_space = True,
normal_columns = [
('is_deleted', 'int', 'false'),
('data_type', 'int'),
('value', 'varchar:OB_MAX_CONFIG_VALUE_LEN', 'true'),
('info', 'varchar:OB_MAX_CONFIG_INFO_LEN'),
('flags', 'int'),
('min_val', 'varchar:OB_MAX_CONFIG_VALUE_LEN', 'false', ''),
('max_val', 'varchar:OB_MAX_CONFIG_VALUE_LEN', 'false', ''),
],
migrate_data_before_2200 = True,
columns_with_tenant_id = [],
)
def_table_schema(**all_sys_variable_history_def)
def_table_schema(
table_name = '__all_restore_job',
table_id = '158',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('job_id', 'int')
],
normal_columns = [
('tenant_name', 'varchar:OB_MAX_TENANT_NAME_LENGTH'),
('start_time', 'int'),
('backup_uri', 'varchar:2048'),
('backup_end_time', 'int'),
('recycle_end_time', 'int'),
('level', 'int'),
('status', 'int')
],
)
def_table_schema(
table_name = '__all_restore_task',
table_id = '159',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('table_id', 'int'),
('partition_id', 'int')
],
normal_columns = [
('backup_table_id', 'int'),
('index_map', 'varchar:OB_OLD_MAX_VARCHAR_LENGTH', 'true'),
('start_time', 'int'),
('status', 'int'),
('job_id', 'int')
],
)
def_table_schema(
table_name = '__all_restore_job_history',
table_id = '160',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('job_id', 'int')
],
normal_columns = [
('tenant_name', 'varchar:OB_MAX_TENANT_NAME_LENGTH'),
('start_time', 'int'),
('backup_uri', 'varchar:2048'),
('backup_end_time', 'int'),
('recycle_end_time', 'int'),
('level', 'int'),
('status', 'int')
],
)
def_table_schema(
table_name = '__all_time_zone',
table_id = '161',
table_type = 'SYSTEM_TABLE',
gm_columns = [],
rowkey_columns = [
('Time_zone_id', 'int','false', 'NULL', 'true'),
],
in_tenant_space = False,
normal_columns = [
('Use_leap_seconds', 'varchar:8','false', 'N'),
('Version', 'int', 'true'),
],
)
def_table_schema(
table_name = '__all_time_zone_name',
table_id = '162',
table_type = 'SYSTEM_TABLE',
gm_columns = [],
rowkey_columns = [
('Name', 'varchar:64', 'false')
],
in_tenant_space = False,
normal_columns = [
('Time_zone_id', 'int', 'false'),
('Version', 'int', 'true'),
],
)
def_table_schema(
table_name = '__all_time_zone_transition',
table_id = '163',
table_type = 'SYSTEM_TABLE',
gm_columns = [],
rowkey_columns = [
('Time_zone_id', 'int', 'false'),
('Transition_time', 'int', 'false'),
],
in_tenant_space = False,
normal_columns = [
('Transition_type_id', 'int', 'false'),
('Version', 'int', 'true'),
],
)
def_table_schema(
table_name = '__all_time_zone_transition_type',
table_id = '164',
table_type = 'SYSTEM_TABLE',
gm_columns = [],
rowkey_columns = [
('Time_zone_id', 'int', 'false'),
('Transition_type_id', 'int', 'false'),
],
in_tenant_space = False,
normal_columns = [
('Offset', 'int', 'false', '0'),
('Is_DST', 'int', 'false', '0'),
('Abbreviation', 'varchar:8', 'false', ''),
('Version', 'int', 'true'),
],
)
def_table_schema(
table_name = '__all_ddl_id',
table_id = '165',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int', 'false'),
('ddl_id_str', 'varchar:OB_MAX_DDL_ID_STR_LENGTH', 'false'),
],
partition_expr = ['key_v2', 'tenant_id', 16 ],
partition_columns = ['tenant_id'],
in_tenant_space = True,
is_cluster_private = True,
is_backup_private = True,
normal_columns = [
('ddl_stmt_str', 'longtext'),
],
migrate_data_before_2200 = True,
)
all_foreign_key_def = dict(
table_name = '__all_foreign_key',
table_id = '166',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('foreign_key_id', 'int'),
],
partition_expr = ['key_v2', 'tenant_id', 16 ],
partition_columns = ['tenant_id'],
rs_restart_related = True,
in_tenant_space = True,
normal_columns = [
('foreign_key_name', 'varchar:OB_MAX_CONSTRAINT_NAME_LENGTH', 'false', ''),
('child_table_id', 'int'),
('parent_table_id', 'int'),
('update_action', 'int'),
('delete_action', 'int'),
('enable_flag', 'bool', 'false', 'true'),
('ref_cst_type', 'int', 'false', '0'),
('ref_cst_id', 'int', 'false', '-1'),
('validate_flag', 'bool', 'false', 'true'),
('rely_flag', 'bool', 'false', 'false'),
],
migrate_data_before_2200 = True,
columns_with_tenant_id = ['foreign_key_id', 'child_table_id', 'parent_table_id'],
)
def_table_schema(**all_foreign_key_def)
def_table_schema(**gen_history_table_def(167, all_foreign_key_def))
all_foreign_key_column_def = dict(
table_name = '__all_foreign_key_column',
table_id = '168',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('foreign_key_id', 'int'),
('child_column_id', 'int'),
('parent_column_id', 'int'),
],
partition_expr = ['key_v2', 'tenant_id', 16 ],
partition_columns = ['tenant_id'],
rs_restart_related = True,
in_tenant_space = True,
normal_columns = [
('position', 'int', 'false', '0'),
],
migrate_data_before_2200 = True,
columns_with_tenant_id = ['foreign_key_id'],
)
def_table_schema(**all_foreign_key_column_def)
def_table_schema(**gen_history_table_def(169, all_foreign_key_column_def))
all_synonym_def = dict(
table_name = '__all_synonym',
table_id = '180',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('synonym_id', 'int'),
],
partition_expr = ['key_v2', 'tenant_id', 16 ],
partition_columns = ['tenant_id'],
rs_restart_related = True,
in_tenant_space = True,
normal_columns = [
('database_id', 'int'),
('schema_version', 'int'),
('synonym_name', 'varchar:OB_MAX_SYNONYM_NAME_LENGTH', 'false', ''),
('object_name', 'varchar:OB_MAX_SYNONYM_NAME_LENGTH', 'false', ''),
('object_database_id', 'int'),
],
migrate_data_before_2200 = True,
columns_with_tenant_id = ['synonym_id', 'database_id', 'object_database_id'],
)
def_table_schema(**all_synonym_def)
def_table_schema(**gen_history_table_def(181, all_synonym_def))
def_table_schema(
table_name = '__all_sequence_v2',
table_id = '182',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('sequence_key', 'int'),
('column_id', 'int'),
],
in_tenant_space = True,
normal_columns = [
('tenant_id', 'int'),
('sequence_name', 'varchar:OB_MAX_SEQUENCE_NAME_LENGTH', 'true'),
('sequence_value', 'uint', 'true'),
('sync_value', 'uint'),
],
columns_with_tenant_id = ['sequence_key'],
)
def_table_schema(
table_name = '__all_tenant_meta_table',
table_id = '183',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('table_id', 'int'),
('partition_id', 'int'),
('svr_ip', 'varchar:MAX_IP_ADDR_LENGTH'),
('svr_port', 'int'),
],
rs_restart_related = True,
in_tenant_space = True,
is_cluster_private = True,
is_backup_private = True,
normal_columns = [
('sql_port', 'int'),
('unit_id', 'int'),
('partition_cnt', 'int'),
('zone', 'varchar:MAX_ZONE_LENGTH'),
('role', 'int'),
('member_list', 'varchar:MAX_MEMBER_LIST_LENGTH'),
('row_count', 'int'),
('data_size', 'int'),
('data_version', 'int'),
('data_checksum', 'int'),
('row_checksum', 'int'),
('column_checksum', 'varchar:COLUMN_CHECKSUM_LENGTH'),
('is_original_leader', 'int', 'false', '0'),
('is_previous_leader', 'int', 'false', '0'),
('create_time', 'int'),
('rebuild', 'int', 'false', '0'),
('replica_type', 'int', 'false', '0'),
('required_size', 'int', 'false', '0'),
('status', 'varchar:MAX_REPLICA_STATUS_LENGTH', 'false', 'REPLICA_STATUS_NORMAL'),
('is_restore', 'int', 'false', '0'),
('partition_checksum', 'int', 'false', '0'),
('quorum', 'int', 'false', '-1'),
('fail_list', 'varchar:OB_MAX_FAILLIST_LENGTH', 'false', ''),
('recovery_timestamp', 'int', 'false', '0'),
('memstore_percent', 'int', 'false', '100'),
('data_file_id', 'int', 'false', '0')
],
columns_with_tenant_id = [],
)
def_table_schema(
table_name = '__all_index_wait_transaction_status',
table_id = '186',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('index_table_id', 'int'),
('svr_type', 'int'),
('partition_id', 'int'),
],
normal_columns = [
('svr_ip', 'varchar:MAX_IP_ADDR_LENGTH'),
('svr_port', 'int'),
('trans_status', 'int'),
('snapshot_version', 'int'),
('frozen_version', 'int'),
('schema_version', 'int')
],
partition_expr = ['key_v2','tenant_id', '16' ],
partition_columns = ['tenant_id'],
)
def_table_schema(
table_name = '__all_index_schedule_task',
table_id = '187',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('index_table_id', 'int'),
('partition_id', 'int'),
],
normal_columns = [
('svr_ip', 'varchar:MAX_IP_ADDR_LENGTH'),
('svr_port', 'int'),
('frozen_version', 'int'),
('snapshot_version', 'int'),
],
partition_expr = ['key_v2','tenant_id', '16' ],
partition_columns = ['tenant_id'],
)
def_table_schema(
table_name = '__all_index_checksum',
table_id = '188',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('execution_id', 'int'),
('tenant_id', 'int'),
('table_id', 'int'),
('partition_id', 'int'),
('column_id', 'int'),
('task_id', 'int'),
],
normal_columns = [
('checksum', 'int'),
('checksum_method', 'int', 'false', 0)
],
partition_expr = ['key_v2','tenant_id', '16' ],
partition_columns = ['tenant_id'],
)
all_routine_def = dict(
table_name = '__all_routine',
table_id = '189',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('routine_id', 'int'),
],
partition_expr = ['key_v2', 'tenant_id', 16 ],
partition_columns = ['tenant_id'],
rs_restart_related = True,
in_tenant_space = True,
normal_columns = [
('database_id', 'int', 'false'),
('package_id', 'int', 'false'),
('routine_name', 'varchar:OB_MAX_ROUTINE_NAME_LENGTH'),
('overload', 'int'),
('subprogram_id', 'int', 'false'),
('schema_version', 'int'),
('routine_type', 'int', 'false'),
('flag', 'int', 'false'),
('owner_id', 'int', 'false'),
('priv_user', 'varchar:OB_MAX_USER_NAME_LENGTH_STORE', 'true'),
('comp_flag', 'int', 'true'),
('exec_env', 'varchar:OB_MAX_PROC_ENV_LENGTH', 'true'),
('routine_body', 'longtext', 'true'),
('comment', 'varchar:MAX_TENANT_COMMENT_LENGTH', 'true'),
('route_sql', 'longtext', 'true')
],
migrate_data_before_2200 = True,
columns_with_tenant_id = ['routine_id', 'package_id', 'database_id', 'owner_id'],
)
def_table_schema(**all_routine_def)
def_table_schema(**gen_history_table_def(190, all_routine_def))
all_routine_param_def = dict(
table_name = '__all_routine_param',
table_id = '191',
table_type = 'SYSTEM_TABLE',
gm_columns = ['gmt_create', 'gmt_modified'],
rowkey_columns = [
('tenant_id', 'int'),
('routine_id', 'int'),
('sequence', 'int'),
],
partition_expr = ['key_v2', 'tenant_id', 16 ],
partition_columns = ['tenant_id'],
rs_restart_related = True,
in_tenant_space = True,
normal_columns | |
import pygame as pg
import pygame.draw as pgd
import math as m
from random import randint
from colors import *
from menu import *
from textures import *
from game_objects import *
pg.font.init()
font = pg.font.Font(None, 25)
pg.font.init()
font = pg.font.Font(None, 25)
class Tank:
'''TANK
Description: object Tank is the main object in the
game. We can absolutely control it: shooting, lea-
ving mines, aiming and moving.
'''
def __init__(self, Rect, color):
self.spawned = False
# Coordinates and useful variables
self.Rect = Rect
self.x = Rect[0][0]
self.y = Rect[0][1]
self.center = [self.x + self.Rect[1][0] // 2,
self.y + self.Rect[1][0] // 2]
# Cannon's parameters (l-lenght, w-width, r-radius)
self.params = [int(self.Rect[1][0] * 1.2 / 2),
int(self.Rect[1][0] * 0.2 / 2),
int(self.Rect[1][0] * 0.3 / 2),
int(self.Rect[1][0] * 0.4 / 2)]
self.cannon_l = self.params[0]
self.cannon_r = self.params[1]
self.cannon_w = self.params[2]
self.tower_r = self.params[3]
# Tank's game start parameters
self.speed = 3
self.hp = 20
self.ammo = 350
self.traps = 10
self.boost_time = 0
self.color = color
# HUD info
self.HUD_pos = (self.x + int(1.2 * self.Rect[1][0]),
self.y)
self.HUD_size = (self.Rect[1][0] * 3,
self.Rect[1][0] * 2.5)
def show_HUD(self, screen):
'''
This function shows us current conditions
of the tank if show_HUD bool is true
Parameters
----------
screen : Surface
Returns
-------
None.
'''
self.HUD_pos = (self.x + int(1.2*self.Rect[1][0]),
self.y)
current_HP = font.render(str(self.hp)+'/20', True, RED)
current_ammo = font.render('Ammo: '+str(self.ammo), True, RED)
current_traps = font.render('Traps: '+str(self.traps), True, RED)
current_boost = font.render('Boost: +', True, RED)
pgd.rect(screen, YELLOW, (self.HUD_pos, self.HUD_size))
screen.blit(current_HP, (self.HUD_pos[0] + 2, self.HUD_pos[1] + 2))
screen.blit(current_ammo, (self.HUD_pos[0] + 2, self.HUD_pos[1] + 20))
screen.blit(current_traps, (self.HUD_pos[0] + 2, self.HUD_pos[1] + 40))
if self.boost_time > 0:
screen.blit(current_boost, (self.HUD_pos[0] + 2, self.HUD_pos[1] + 60))
def app(self, screen, mouse_pos, fullscreen):
'''
That's the main function in this class. It checks
current tank's conditions and changes variables,
depending on these conditions.
Parameters
----------
screen : Surface
This parameter is used to show the object
on screen in main module.
mouse_pos : tuple
This parameter is used to orient the can-
non depending on mouse position.
fullscreen : bool
This parameter is used to change tank's
parameters while the game is running.
Returns
-------
None.
'''
# Checking if tank has a boost
if self.boost_time > 0:
self.boost_time -= 1
k = 1.5
else:
k = 1
# Checking if there is a fullscreen mode or not
if fullscreen:
self.speed = int(6 * k)
else:
self.speed = int(3 * k)
# Redefining parameters
self.center = [self.x + self.Rect[1][0] // 2,
self.y + self.Rect[1][0] // 2]
self.cannon_l = self.params[0]
self.cannon_r = self.params[1]
self.cannon_w = self.params[2]
self.tower_r = self.params[3]
tank_pos = (self.x + int(self.Rect[1][0] * 0.1),
self.y + int(self.Rect[1][1] * 0.1))
size = (int(self.Rect[1][0] * 0.8),
int(self.Rect[1][1] * 0.8))
# Drawing tank's body
pgd.rect(screen, self.color, (tank_pos, size))
pgd.circle(screen, RED, self.center, self.tower_r)
# This big part of code draws the cannon, depending on mouse_pos
if (self.center[1] - mouse_pos[1]) > 0:
arctg = m.atan((mouse_pos[0] - self.center[0]) / (self.center[1] - mouse_pos[1]))
new_cannon_x = int(self.center[0] + self.cannon_l * m.sin(arctg))
new_cannon_y = int(self.center[1] - self.cannon_l * m.cos(arctg))
elif (self.center[1] - mouse_pos[1]) < 0:
arctg = m.atan((mouse_pos[0] - self.center[0]) / (self.center[1] - mouse_pos[1]))
new_cannon_x = int(self.center[0] - self.cannon_l * m.sin(arctg))
new_cannon_y = int(self.center[1] + self.cannon_l * m.cos(arctg))
else:
if (self.center[0] - mouse_pos[0]) > 0:
new_cannon_x = self.center[0] - self.cannon_l
new_cannon_y = self.center[1]
else:
new_cannon_x = self.center[0] + self.cannon_l
new_cannon_y = self.center[1]
# Drawing the cannon after all calculations
pgd.line(screen, RED, self.center, (new_cannon_x, new_cannon_y), self.cannon_w)
pgd.circle(screen, RED, (new_cannon_x, new_cannon_y), self.cannon_r)
def move(self, event, walls, walls_hp, moving):
'''
This function moves the tank, depending on our
pushes on the keyboard.
Parameters
----------
event : Eventlist
This is parameter is used to check, what
key is pressed.
walls : list
This parameter is used to check if tank
collides into a wall.
walls_hp : list
If wall is destroyed, its HP is zero. So
if HP is zero, the tank can move through
it.
moving : list
A list of four bools; each of them can
tell us, if the tank is moving in definite
direction.
Returns
-------
moving : list
Returns the list of four bools after some
calculations.
'''
up = moving[0]
down = moving[1]
right = moving[3]
left = moving[2]
if event.key == pg.K_RIGHT:
right = True
self.x += self.speed
if self.in_wall(walls, walls_hp):
self.x -= self.speed
if event.key == pg.K_LEFT:
left = True
self.x -= self.speed
if self.in_wall(walls, walls_hp):
self.x += self.speed
if event.key == pg.K_UP:
up = True
self.y -= self.speed
if self.in_wall(walls, walls_hp):
self.y += self.speed
if event.key == pg.K_DOWN:
down = True
self.y += self.speed
if self.in_wall(walls, walls_hp):
self.y -= self.speed
self.Rect[0] = [self.x, self.y]
moving = [up, down, left, right]
return moving
def continue_move(self, walls, walls_hp, moving):
'''
This function doesn't return anything, because
this one only checks if definite button is pu-
shed. If it is, the function continues to move
the tank.
Parameters
----------
event : Eventlist
This is parameter is used to check, what
key is pressed.
walls : list
This parameter is used to check if tank
collides into a wall.
walls_hp : list
If wall is destroyed, its HP is zero. So
if HP is zero, the tank can move through
it.
moving : list
A list of four bools; each of them can
tell us, if the tank is moving in definite
direction.
Returns
-------
None.
'''
right = moving[3]
left = moving[2]
down = moving[1]
up = moving[0]
if right:
self.x += self.speed
if self.in_wall(walls, walls_hp):
self.x -= self.speed
if left:
self.x -= self.speed
if self.in_wall(walls, walls_hp):
self.x += self.speed
if down:
self.y += self.speed
if self.in_wall(walls, walls_hp):
self.y -= self.speed
if up:
self.y -= self.speed
if self.in_wall(walls, walls_hp):
self.y += self.speed
self.Rect[0] = [self.x, self.y]
def stop(self, event, moving):
'''
This function checks if a button is
pushed up or not. And if it is, it will
stop the tank's movement.
Parameters
----------
event : list
This parameter is used to check, what
key was pushed up.
moving : list
This parameter is used to get current
moving conditons.
Returns
-------
moving : list
If definite button is pushed up and
others are not, only pushed up one
will be changed to a False statement
and tank's moves in this direction
will stop.
'''
up = moving[0]
down = moving[1]
left = moving[2]
right = moving[3]
if event.key == pg.K_UP:
up = False
if event.key == pg.K_DOWN:
down = False
if event.key == pg.K_LEFT:
left = False
if event.key == pg.K_RIGHT:
right = False
moving = [up, down, left, right]
return moving
def close_walls(self, walls, walls_hp):
'''
This function is very useful for opti-
misation. It defines the closest walls
in relation to the tank
Parameters
----------
walls : list
walls_hp : list
If a wall's HP is zero, the tank
can move through it.
Returns
-------
close_walls : list
'''
block_size = int(self.Rect[1][0] * 0.8)
x, y = self.x, self.y
close_walls = []
for i in range(len(walls)):
# dx and dy are distances in each axis
dx = abs(x - walls[i][0][0][0])
dy = abs(y - walls[i][0][0][1])
wall_hp = walls_hp[i][0]
if dx < 2 * block_size and dy < 2 * block_size and wall_hp != 0:
# If a wall is close to the tank
close_walls.append(walls[i])
return close_walls
def in_wall(self, walls, walls_hp):
'''
This function checks if the tank is in a wall.
Parameters
----------
walls : list
walls_hp : list
These two parameters are so useful
to check collision conditions easily.
Returns
-------
inwall : bool
'''
# Defining the closest walls for optimisation
close_walls = self.close_walls(walls, walls_hp)
def check_in_wall(pos):
'''
This function is created to be the code
more readable. It is unnecessary, but very
convenient to use
Parameters
----------
pos : tuple
The current position.
Returns
-------
check : bool
If current position is in wall, return True.
In other conditions - False.
'''
x = | |
<reponame>rpitonak/BioPAL
# SPDX-FileCopyrightText: BioPAL <<EMAIL>>
# SPDX-License-Identifier: MIT
import os
import logging
import pyproj
import shutil
import numpy as np
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
from scipy.interpolate import interp1d
from skimage.filters.rank import majority as majority_filter
from equi7grid.equi7grid import Equi7Grid
from equi7grid.image2equi7grid import image2equi7grid
from biopal.io.data_io import (
read_data,
read_ecef_grid,
read_auxiliary_multi_channels,
read_auxiliary_single_channel,
tandemx_fnf_read,
tiff_formatter,
readBiomassHeader,
)
from biopal.io.xml_io import raster_info
from biopal.utility.constants import OVERSAMPLING_FACTOR
from biopal.utility.utility_functions import get_equi7_fnf_tiff_names
from arepytools.io.productfolder import ProductFolder
def data_oversample(data, oversampling_factor, raster_info_obj):
rg_ratio = np.floor(raster_info_obj.resolution_m_slant_rg / raster_info_obj.pixel_spacing_slant_rg)
az_ratio = np.floor(raster_info_obj.resolution_m_az / raster_info_obj.pixel_spacing_az)
rg_oversampling_flag = False
az_oversampling_flag = False
if rg_ratio < 2:
rg_oversampling_flag = True
num_samples_out = raster_info_obj.num_samples * oversampling_factor
else:
num_samples_out = raster_info_obj.num_samples
pixel_spacing_slant_rg_out = raster_info_obj.pixel_spacing_slant_rg
if az_ratio < 2:
az_oversampling_flag = True
num_lines_out = raster_info_obj.num_lines * oversampling_factor
else:
num_lines_out = raster_info_obj.num_lines
pixel_spacing_az_out = raster_info_obj.pixel_spacing_az
if rg_oversampling_flag or az_oversampling_flag:
logging.info(" Oversampling needed:")
if not type(data) is dict:
# slope and reference_heights cases (input data is value matrix)
if rg_oversampling_flag:
logging.info(" range oversampling of auxiliary data...")
data, pixel_spacing_slant_rg_out = data_oversample_core(
data, 0, raster_info_obj.pixel_spacing_slant_rg, oversampling_factor
)
if az_oversampling_flag:
logging.info(" azimuth oversampling of auxiliary data...")
data, pixel_spacing_az_out = data_oversample_core(
data, 1, raster_info_obj.pixel_spacing_az, oversampling_factor
)
else:
for data_key, data_extracted in data.items():
if not type(data_extracted) is dict:
# KZ, off_nadir and ECEFGRID case (input data is a dict of values)
if rg_oversampling_flag:
logging.info(" range oversampling of " + data_key + "...")
data[data_key], pixel_spacing_slant_rg_out = data_oversample_core(
data_extracted, 0, raster_info_obj.pixel_spacing_slant_rg, oversampling_factor,
)
if az_oversampling_flag:
logging.info(" azimuth oversampling of " + data_key + "...")
data_extracted = data[data_key]
data[data_key], pixel_spacing_az_out = data_oversample_core(
data_extracted, 1, raster_info_obj.pixel_spacing_az, oversampling_factor
)
else:
# Beta0 case (input data is a dict of dict with values)
for pol_key, data_pol in data_extracted.items():
if rg_oversampling_flag:
logging.info(
" range oversampling of " + data_key + " , polarization " + pol_key + "..."
)
(data[data_key][pol_key], pixel_spacing_slant_rg_out,) = data_oversample_core(
data_pol, 0, raster_info_obj.pixel_spacing_slant_rg, oversampling_factor,
)
if az_oversampling_flag:
logging.info(
" azimuth oversampling of " + data_key + " , polarization " + pol_key + "..."
)
data_pol = data[data_key][pol_key]
data[data_key][pol_key], pixel_spacing_az_out = data_oversample_core(
data_pol, 1, raster_info_obj.pixel_spacing_az, oversampling_factor
)
logging.info(" oversampling done.\n")
return data, num_samples_out, pixel_spacing_slant_rg_out, num_lines_out, pixel_spacing_az_out
def data_oversample_core(data, axis_index, pixel_spacing_in, oversampling_factor):
# original size of the axis to be interpolated
axis_len_in = data.shape[axis_index]
# pixel spacing after the interpolation
pixel_spacing_out = pixel_spacing_in / oversampling_factor
# input original axis
max_val_in = axis_len_in * pixel_spacing_in
ax_in = np.arange(0, max_val_in, pixel_spacing_in)
# output interpolated axis
max_val_out = axis_len_in * oversampling_factor * pixel_spacing_out
ax_out = np.arange(0, max_val_out, pixel_spacing_out)
# interpolation of data along axis_index
interp_fun = interp1d(ax_in, data, axis=axis_index, bounds_error=0)
data = interp_fun(ax_out)
return data, pixel_spacing_out
def read_and_oversample_data(L1c_repository, acquisitions_pf_names, enable_resampling):
# this function calls the read_data followed by the data_oversample
# (which oversamples only when needed and if enabled )
beta0_calibrated = {}
for pf_name in acquisitions_pf_names:
logging.info(" loading " + pf_name + "...")
# every data in the current stack has same spacing, same resolution and same number of samples and lines
(
beta0_calibrated[pf_name],
num_samples,
num_lines,
pixel_spacing_slant_rg,
pixel_spacing_az,
carrier_frequency_hz,
range_bandwidth_hz,
master_id,
lines_start_utc,
) = read_data(L1c_repository, pf_name)
### ALL chains: oversampling data:
# needed whenever computing covariance or detected data (as notch for AGB)
(_, _, _, _, _, _, resolution_m_slant_rg, resolution_m_az, sensor_velocity,) = readBiomassHeader(
ProductFolder(os.path.join(L1c_repository, acquisitions_pf_names[0]), "r"), 0
)
# backup needed for the resampling in auxiliary data
raster_info_orig = raster_info(
num_samples,
num_lines,
pixel_spacing_slant_rg,
pixel_spacing_az,
resolution_m_slant_rg,
resolution_m_az,
carrier_frequency_hz,
range_bandwidth_hz,
lines_start_utc,
)
if enable_resampling:
(beta0_calibrated, num_samples, pixel_spacing_slant_rg, num_lines, pixel_spacing_az,) = data_oversample(
beta0_calibrated, OVERSAMPLING_FACTOR, raster_info_orig,
)
logging.info("all data loaded.\n")
# filling output structure:
raster_info_os = raster_info(
num_samples,
num_lines,
pixel_spacing_slant_rg,
pixel_spacing_az,
resolution_m_slant_rg,
resolution_m_az,
carrier_frequency_hz,
range_bandwidth_hz,
lines_start_utc,
)
return beta0_calibrated, master_id, raster_info_os, raster_info_orig
def read_and_oversample_aux_data(
file_names,
stack_id,
acquisitions_pf_names,
enable_resampling,
raster_info,
read_ecef=True,
read_off_nadir=True,
read_slope=True,
read_kz=True,
read_ref_h=True,
read_dist=True,
read_average_cov=False,
read_cal_screens=False,
read_FH=False,
read_reference_agb=False,
read_sys_dec_fun=False,
):
# this function calls the auxilary data readers followed by the data_oversample
# (which oversamples only when needed and if enabled )
# it reads only the auxiliary in the current stack_id
# for TOMO we need another auxiliary: phases to be extreacted
ecef_grid = None
kz = None
off_nadir_angle_rad = None
reference_height = None
R = None
slope = None
average_covariance = None
calibration_screens = None
cal_screens_raster_info = None
forest_height = None
reference_agb = None
system_decorr_fun = None
if read_ecef:
logging.info("Loading auxiliary data: ECEFGRID...")
pf_name = os.path.basename(file_names.ECEF_grid_file_names[stack_id])
folder_name = os.path.dirname(file_names.ECEF_grid_file_names[stack_id])
ecef_grid = read_ecef_grid(folder_name, pf_name)
if not ecef_grid is None and enable_resampling:
ecef_grid = data_oversample(ecef_grid, OVERSAMPLING_FACTOR, raster_info)[0]
if not ecef_grid is None:
logging.info("...ECEFGRID read.\n")
else:
logging.warning(
'Since "ECEFGRID/'
+ stack_id
+ '" folder is missing into AuxiliaryProductsFolder, geometric library needs to be called.\n'
)
if read_kz:
logging.info("Loading auxiliary data: KZ...")
pf_name = os.path.basename(file_names.kz_file_names[stack_id])
folder_name = os.path.dirname(file_names.kz_file_names[stack_id])
kz = read_auxiliary_multi_channels(folder_name, pf_name, acquisitions_pf_names)
if not kz is None and enable_resampling:
kz = data_oversample(kz, OVERSAMPLING_FACTOR, raster_info)[0]
if not kz is None:
logging.info("...KZ read.\n")
else:
logging.warning(
'Since "KZ/'
+ stack_id
+ '" folder is missing into AuxiliaryProductsFolder, geometric library needs to be called.\n'
)
if read_off_nadir:
logging.info("Loading auxiliary data: off nadir angles...")
pf_name = os.path.basename(file_names.off_nadir_angle_file_names[stack_id])
folder_name = os.path.dirname(file_names.off_nadir_angle_file_names[stack_id])
off_nadir_angle_rad = read_auxiliary_multi_channels(folder_name, pf_name)
if not off_nadir_angle_rad is None and enable_resampling:
off_nadir_angle_rad = data_oversample(off_nadir_angle_rad, OVERSAMPLING_FACTOR, raster_info)[0]
if not off_nadir_angle_rad is None:
logging.info("...off nadir angles read.\n")
else:
logging.warning(
'Since "OffNadirAngles/'
+ stack_id
+ '" folder is missing into AuxiliaryProductsFolder, geometric library needs to be called.\n'
)
if read_ref_h:
logging.info(
"Loading auxiliary data: reference height..."
) # not needed, use just for comparison with the estimation
pf_name = os.path.basename(file_names.reference_height_file_names[stack_id])
folder_name = os.path.dirname(file_names.reference_height_file_names[stack_id])
reference_height = read_auxiliary_single_channel(
folder_name, pf_name
) # it is the dtm in slant_range-azimuth reference
if not reference_height is None and enable_resampling:
reference_height = data_oversample(reference_height, OVERSAMPLING_FACTOR, raster_info)[0]
if not reference_height is None:
logging.info("...reference height read.\n")
else:
logging.warning(
'Since "ReferenceHeight/'
+ stack_id
+ '" folder is missing into AuxiliaryProductsFolder, geometric library needs to be called.\n'
)
if read_dist:
logging.info("Loading auxiliary data: slant range distances...")
pf_name = os.path.basename(file_names.slant_range_distances_file_names[stack_id])
folder_name = os.path.dirname(file_names.slant_range_distances_file_names[stack_id])
R = read_auxiliary_multi_channels(folder_name, pf_name)
if not R is None and enable_resampling:
R = data_oversample(R, OVERSAMPLING_FACTOR, raster_info)[0]
if not R is None:
logging.info("...slant range distances read.\n")
else:
logging.warning(
'Since "SlantRangeDistances/'
+ stack_id
+ '" folder is missing into AuxiliaryProductsFolder, geometric library needs to be called.\n'
)
if read_slope:
logging.info("Loading auxiliary data: slope...")
pf_name = os.path.basename(file_names.slope_file_names[stack_id])
folder_name = os.path.dirname(file_names.slope_file_names[stack_id])
slope = read_auxiliary_single_channel(folder_name, pf_name)
if not slope.any() is None and enable_resampling:
slope = data_oversample(slope, OVERSAMPLING_FACTOR, raster_info)[0]
if not slope is None:
logging.info("...slope read.\n")
else:
logging.warning(
'Since "Slopes/'
+ stack_id
+ '" folder is missing into AuxiliaryProductsFolder, geometric library needs to be called.\n'
)
if read_average_cov:
logging.info("Loading auxiliary data: Average covariance matrix...")
pf_name = os.path.basename(file_names.average_covariance_folder)
folder = os.path.dirname(file_names.average_covariance_folder)
average_covariance = read_auxiliary_multi_channels(folder, pf_name, acquisitions_pf_names)
if not average_covariance is None and enable_resampling:
average_covariance = data_oversample(average_covariance, OVERSAMPLING_FACTOR, raster_info)[0]
if not average_covariance is None:
logging.info("... Average covariance matrix read.\n")
if read_cal_screens:
logging.info("Loading auxiliary data: Calibration Screens...")
pf_name = os.path.basename(file_names.calibration_screens_file_names[stack_id])
folder = os.path.dirname(file_names.calibration_screens_file_names[stack_id])
calibration_screens, cal_screens_raster_info = read_auxiliary_multi_channels(
folder, pf_name, acquisitions_pf_names, read_raster_info=True
)
if not calibration_screens is None and enable_resampling:
calibration_screens = data_oversample(calibration_screens, OVERSAMPLING_FACTOR, raster_info)[0]
if not calibration_screens is None:
logging.info("... Calibration Screens read.\n")
if read_FH:
logging.info("Loading auxiliary data: forest estimated height..")
pf_name = os.path.basename(file_names.forest_height_folder)
folder = os.path.dirname(file_names.forest_height_folder)
forest_height = read_auxiliary_single_channel(folder, pf_name, acquisitions_pf_names)
if not forest_height is None and enable_resampling:
forest_height = data_oversample(forest_height, OVERSAMPLING_FACTOR, raster_info)[0]
logging.info("...done.\n")
if not forest_height is None:
logging.info("...forest estimated height read.\n")
if read_reference_agb:
logging.info("Loading auxiliary data: reference agb..")
pf_name = os.path.basename(file_names.reference_agb_folder)
folder = os.path.dirname(file_names.reference_agb_folder)
reference_agb = read_auxiliary_single_channel(folder, pf_name, acquisitions_pf_names)
if not reference_agb is None and enable_resampling:
reference_agb = data_oversample(reference_agb, OVERSAMPLING_FACTOR, raster_info)[0]
if not reference_agb is None:
logging.info("...reference agb read.\n")
if read_sys_dec_fun:
logging.info("Loading auxiliary data: system decorrelation function...")
pf_name = os.path.basename(file_names.system_decorrelation_fun_folder)
folder = os.path.dirname(file_names.system_decorrelation_fun_folder)
system_decorr_fun = read_auxiliary_single_channel(folder, pf_name, acquisitions_pf_names)
if not system_decorr_fun is None and enable_resampling:
system_decorr_fun = data_oversample(system_decorr_fun, OVERSAMPLING_FACTOR, raster_info)[0]
if not system_decorr_fun is None:
logging.info("...system decorrelation function read.\n")
return (
ecef_grid,
off_nadir_angle_rad,
slope,
kz,
reference_height,
R,
average_covariance,
calibration_screens,
cal_screens_raster_info,
forest_height,
reference_agb,
system_decorr_fun,
)
def get_equi7_tiff_names(directory):
equi7_fnames = []
equi7_tile_names = os.listdir(directory)
for equi7_tile_name in equi7_tile_names:
equi7_subtile_names = os.listdir(os.path.join(directory, equi7_tile_name))
for equi7_subtile_name in equi7_subtile_names:
tiff_names = os.listdir(os.path.join(directory, equi7_tile_name, equi7_subtile_name))
for tiff_name in tiff_names:
equi7_fnames.append(os.path.join(directory, equi7_tile_name, equi7_subtile_name, tiff_name))
return equi7_fnames
def fnf_tandemx_load_filter_equi7format(
forest_mask_catalogue_folder,
e7g,
product_resolution,
output_folder,
gdal_path,
geographic_boundaries,
time_tag_mjd_initial,
| |
""" Creates training, validation, and test data. """
import math
from os import path
import numpy as np
from numpy.lib import recfunctions
import torch
import defaults
import features
import models
import utils
def get_dataloaders(args, net):
"""
Builds training, validation, and test sets, which are returned as
dataloaders.
"""
out_dir = args["out_dir"]
dat_flp = path.join(
out_dir,
defaults.DATA_PREFIX + utils.args_to_str(
args, order=sorted(defaults.DEFAULTS.keys()), which="data") +
".npz")
scl_prms_flp = path.join(out_dir, "scale_params.json")
# Check for the presence of both the data and the scaling
# parameters because the resulting model is useless without the
# proper scaling parameters.
if (not args["regen_data"] and path.exists(dat_flp) and
path.exists(scl_prms_flp)):
print("Found existing data!")
trn, val, tst = utils.load_parsed_data(dat_flp)
else:
print("Regenerating data...")
trn, val, tst, scl_prms = get_bulk_data(args, net)
# Save the processed data so that we do not need to process it again.
utils.save_parsed_data(dat_flp, trn, val, tst)
# Save scaling parameters. We always need to save the scaling parameters,
# because the trained model cannot be used without them.
utils.save_scl_prms(args["out_dir"], scl_prms)
return create_dataloaders(args, trn, val, tst)
def get_bulk_data(args, net):
"""
Loads bulk training, validation, and test data splits from disk. Returns
a tuple of the form:
( (training dataloader, validation dataloader, test dataloader),
scaling parameters )
"""
data_dir = args["data_dir"]
sample_frac = args["sample_percent"] / 100
trn, val, tst = [
get_split(data_dir, name, sample_frac, net)
for name in ["train", "val", "test"]]
# Validate scaling groups.
assert trn[3] == val[3] == tst[3], "Scaling groups do not agree."
if isinstance(net, models.HistGbdtSklearnWrapper):
# The HistGbdtSklearn model does not require feature scaling because it
# is a decision tree.
scl_prms = np.zeros((0,))
else:
# Scale input features. Do this here instead of in process_exp() because
# all of the features must be scaled using the same parameters. trn[0]
# is the training input data. trn[3] is the scaling groups.
trn[0], scl_prms = scale_fets(trn[0], trn[3], args["standardize"])
return trn[:3], val[:3], tst[:3], scl_prms
def get_split(data_dir, name, sample_frac, net):
""" Constructs a split from many subsplits on disk. """
# Load the split's subsplits.
subsplits = utils.load_subsplits(data_dir, name)
# Optionally select a fraction of each subsplit. We always use all of the
# test split.
if name in {"train", "val"} and sample_frac < 1:
subsplits = [
subsplit[:math.ceil(subsplit.shape[0] * sample_frac)]
for subsplit in subsplits]
# Merge the subsplits into a split.
split = np.concatenate(subsplits)
# Optionally shuffle the split.
if name == "train" and len(subsplits) > 1:
np.random.default_rng().shuffle(split)
# Extract features from the split.
return extract_fets(split, name, net)
def extract_fets(dat, split_name, net):
"""
Extracts net's the input and output features from dat. Returns a tuple of
the form:
(dat_in, dat_out, dat_extra, scaling groups).
"""
# Split each data matrix into two separate matrices: one with the input
# features only and one with the output features only. The names of the
# columns correspond to the feature names in in_spc and out_spc.
assert net.in_spc, f"{net.name}: Empty in spec."
num_out_fets = len(net.out_spc)
# This is not a strict requirement from a modeling point of view,
# but is assumed to make data processing easier.
assert num_out_fets == 1, \
(f"{net.name}: Out spec must contain a single feature, but actually "
f"contains: {net.out_spc}")
# Remove samples where the ground truth output is unknown.
len_before = dat.shape[0]
dat = dat[dat[list(net.out_spc)] != -1][0]
removed = dat.shape[0] - len_before
if removed > 0:
print(
f"Removed {removed} rows with unknown out_spc from split "
f"\"{split_name}\".")
dat_in = recfunctions.repack_fields(dat[list(net.in_spc)])
dat_out = recfunctions.repack_fields(dat[list(net.out_spc)])
# Create a structured array to hold extra data that will not be used as
# features but may be needed by the training/testing process.
dtype_extra = (
# The "raw" entry is the unconverted out_spc.
[("raw",
[typ for typ in dat.dtype.descr if typ[0] in net.out_spc][0][1])] +
[typ for typ in dat.dtype.descr if typ[0] in features.EXTRA_FETS])
dat_extra = np.empty(shape=dat.shape, dtype=dtype_extra)
dat_extra["raw"] = dat_out
for typ in features.EXTRA_FETS:
dat_extra[typ] = dat[typ]
dat_extra = recfunctions.repack_fields(dat_extra)
is_dt = isinstance(net, models.HistGbdtSklearnWrapper)
if not is_dt:
# Verify that there are no NaNs or Infs in the data.
for fet in dat_in.dtype.names:
assert (not (
np.isnan(dat_in[fet]).any() or
np.isinf(dat_in[fet]).any())), \
("Warning: NaNs or Infs in input feature for split "
f"\"{split_name}\": {fet}")
assert (not (
np.isnan(dat_out[features.LABEL_FET]).any() or
np.isinf(dat_out[features.LABEL_FET]).any())), \
f"Warning: NaNs or Infs in ground truth for split \"{split_name}\"."
if dat_in.shape[0] > 0:
# Convert all instances of -1 (feature value unknown) to either the mean for
# that feature or NaN.
bad_fets = []
for fet in dat_in.dtype.names:
invalid = dat_in[fet] == -1
if invalid.all():
bad_fets.append(fet)
continue
dat_in[fet][invalid] = (
float("NaN") if is_dt
else np.mean(dat_in[fet][np.logical_not(invalid)]))
assert (dat_in[fet] != -1).all(), \
f"Found \"-1\" in split \"{split_name}\" feature: {fet}"
assert not bad_fets, \
(f"Features in split \"{split_name}\" contain only \"-1\" "
f"({len(bad_fets)}): {bad_fets}")
# Convert output features to class labels.
dat_out = net.convert_to_class(dat_out)
# Verify data.
assert dat_in.shape[0] == dat_out.shape[0], \
"Input and output should have the same number of rows."
# Find the uniques classes in the output features and make sure that they
# are properly formed. Assumes that dat_out is a structured numpy array
# containing a single column specified by features.LABEL_FET.
for cls in np.unique(dat_out[features.LABEL_FET]).tolist():
assert 0 <= cls < net.num_clss, f"Invalid class: {cls}"
# Transform the data as required by this specific model.
# TODO: Refactor this to be compatible with bulk data splits.
# dat_in, dat_out, dat_extra, scl_grps = net.modify_data(
# exp, dat_in, dat_out, dat_extra, sequential=sequential)
scl_grps = list(range(len(dat_in.dtype.names)))
return dat_in, dat_out, dat_extra, scl_grps
def scale_fets(dat, scl_grps, standardize=False):
"""
Returns a copy of dat with the columns normalized. If standardize
is True, then the scaling groups are normalized to a mean of 0 and
a variance of 1. If standardize is False, then the scaling groups
are normalized to the range [0, 1]. Also returns an array of shape
(number of unique scaling groups, 2) where row i contains the scaling
parameters of column i in dat. If standardize is True, then the
scaling parameters are the mean and standard deviation of that
column's scaling group. If standardize is False, then the scaling
parameters are the min and max of that column's scaling group.
"""
fets = dat.dtype.names
assert fets is not None, \
f"The provided array is not structured. dtype: {dat.dtype.descr}"
assert len(scl_grps) == len(fets), \
f"Invalid scaling groups ({scl_grps}) for dtype ({dat.dtype.descr})!"
# Determine the unique scaling groups.
scl_grps_unique = set(scl_grps)
# Create an empty array to hold the min and max values (i.e.,
# scaling parameters) for each scaling group.
scl_grps_prms = np.empty((len(scl_grps_unique), 2), dtype="float64")
# Function to reduce a structured array.
rdc = (lambda fnc, arr:
fnc(np.array(
[fnc(arr[fet]) for fet in arr.dtype.names if fet != ""])))
# Determine the min and the max of each scaling group.
for scl_grp in scl_grps_unique:
# Determine the features in this scaling group.
scl_grp_fets = [fet for fet_idx, fet in enumerate(fets)
if scl_grps[fet_idx] == scl_grp]
# Extract the columns corresponding to this scaling group.
fet_values = dat[scl_grp_fets]
# Record the min and max of these columns.
scl_grps_prms[scl_grp] = [
np.mean(utils.clean(fet_values))
if standardize else rdc(np.min, fet_values),
np.std(utils.clean(fet_values))
if standardize else rdc(np.max, fet_values)
]
# Create an empty array to hold the min and max values (i.e.,
# scaling parameters) for each column (i.e., feature).
scl_prms = np.empty((len(fets), 2), dtype="float64")
# Create an empty array to hold the rescaled features.
new = np.empty(dat.shape, dtype=dat.dtype)
# Rescale each feature based on its scaling group's min and max.
for fet_idx, fet in enumerate(fets):
# Look up the parameters for this feature's scaling group.
prm_1, prm_2 = scl_grps_prms[scl_grps[fet_idx]]
# Store this min and max in the list of per-column scaling parameters.
scl_prms[fet_idx] = np.array([prm_1, prm_2])
fet_values = dat[fet]
if standardize:
# prm_1 is the mean and prm_2 is the standard deviation.
scaled = (
# Handle the rare case where the standard deviation is
# 0 (meaning that all of the feature values are the
# same), in which case return an array of zeros.
np.zeros(
fet_values.shape, dtype=fet_values.dtype) if prm_2 == 0
else (fet_values - | |
'''
Module : Main
Description : The main entry point for the program.
Copyright : (c) <NAME>, 22 Jul 2020
License : BSD
Maintainer : <NAME>
Portability : POSIX
The program reads one or more input FASTA files and convets them to emoji.
'''
from argparse import ArgumentParser
from math import floor
import sys
import logging
import pkg_resources
from Bio import SeqIO
from fastqe import fastqe_map as emaps
from pyemojify import emojify
from Bio.SeqIO import QualityIO
import binascii
import gzip
from . import biomojify_map
import ast
import vcf
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
EXIT_FILE_IO_ERROR = 1
EXIT_COMMAND_LINE_ERROR = 2
EXIT_FASTA_FILE_ERROR = 3
DEFAULT_MIN_LEN = 0
DEFAULT_VERBOSE = False
PROGRAM_NAME = "biomojify"
# #PyCharm testing command line processing
# sys.argv = [
# __file__,
# # '--bin',
# # '--long','3000',
# # # '--output', 'testouput.txt',
# 'fastq',
# '../functional_tests/test_data/test.fastq',
# # 'test/test.fastq',
# # 'test/test_wiki.fq',
# ]
local_seq_emoji_map = {
'A': '🥑', # avocado not in pyemojify, trying a failthrough which works for the noemoji mode
'C': ':corn:',
'T': ':tomato:',
'G': ':grapes:',
'N': ':question:'
}
try:
PROGRAM_VERSION = pkg_resources.require(PROGRAM_NAME)[0].version
except pkg_resources.DistributionNotFound:
PROGRAM_VERSION = "undefined_version"
def exit_with_error(message, exit_status):
'''Print an error message to stderr, prefixed by the program name and 'ERROR'.
Then exit program with supplied exit status.
Arguments:
message: an error message as a string.
exit_status: a positive integer representing the exit status of the
program.
'''
logging.error(message)
print("{} ERROR: {}, exiting".format(PROGRAM_NAME, message), file=sys.stderr)
sys.exit(exit_status)
def parse_args(error=False):
'''Parse command line arguments.
Returns Options object with command line argument values as attributes.
Will exit the program on a command line error.
'''
description = 'Read one or more FASTA or FASTQ files, and convert them to emoji.😀'
parser = ArgumentParser(description=description)
parser.add_argument('--version',
action='version',
version='%(prog)s ' + PROGRAM_VERSION)
parser.add_argument('--log',
metavar='LOG_FILE',
type=str,
help='record program progress in LOG_FILE')
subparsers = parser.add_subparsers(help='sub-command help')
# FASTA processing
parser_fasta = subparsers.add_parser('fasta', help='fasta --help')
parser_fasta.add_argument(
'--minlen',
metavar='N',
type=int,
default=DEFAULT_MIN_LEN,
help='Minimum length sequence to include in stats (default {})'.format(
DEFAULT_MIN_LEN))
parser_fasta.add_argument('--custom',
metavar='CUSTOM_DICT',
type=str,
help='use a mapping of custom emoji to nucleotides in CUSTOM_DICT (' + emojify(":yellow_heart:") + emojify(
":blue_heart:") + ')')
parser_fasta.add_argument('fasta_files',
nargs='*',
metavar='FASTA_FILE',
type=str,
help='Input FASTA files')
parser_fasta.set_defaults(func=convert_fasta)
# FASTA protein processing
parser_fasta_protein = subparsers.add_parser('fasta_protein', help='fasta_protein --help')
parser_fasta_protein.add_argument(
'--minlen',
metavar='N',
type=int,
default=DEFAULT_MIN_LEN,
help='Minimum length sequence to include in stats (default {})'.format(
DEFAULT_MIN_LEN))
parser_fasta_protein.add_argument('--custom',
metavar='CUSTOM_DICT',
type=str,
help='use a mapping of custom emoji to proteins in CUSTOM_DICT (' + emojify(":yellow_heart:") + emojify(
":blue_heart:") + ')')
parser_fasta_protein.add_argument('fasta_files',
nargs='*',
metavar='FASTA_FILE',
type=str,
help='Input FASTA files')
parser_fasta_protein.set_defaults(func=convert_fasta_protein)
#TODO add FASTQ parser and convert both sequence and quality
# FASTQ processing
parser_fastq = subparsers.add_parser('fastq', help='fastq --help')
parser_fastq.add_argument(
'--minlen',
metavar='N',
type=int,
default=DEFAULT_MIN_LEN,
help='Minimum length sequence to convert (default {})'.format(
DEFAULT_MIN_LEN))
parser_fastq.add_argument('--bin',
action='store_true',
help='use binned scores (' + emojify(":no_entry_sign:") + emojify(":skull:")
+ emojify(":poop:") + emojify(":warning:") + " " + emojify(":smile:") + emojify(
":laughing:") + emojify(":sunglasses:") + emojify(":heart_eyes:") + ")")
parser_fastq.add_argument('--custom',
metavar='CUSTOM_DICT',
type=str,
help='use a mapping of custom emoji to nucleotides in CUSTOM_DICT (' + emojify(":yellow_heart:") + emojify(
":blue_heart:") + ')')
parser_fastq.add_argument('--custom_qual',
metavar='CUSTOM_DICT',
type=str,
help='use a mapping of custom emoji to quality scores in CUSTOM_DICT (' + emojify(":moneybag:") + emojify(
":snake:") + ')')
parser_fastq.add_argument('fastq_files',
nargs='*',
metavar='FASTQ_FILE',
type=str,
help='Input FASTQ files')
parser_fastq.set_defaults(func=convert_fastq)
# file processing template
parser_vcf = subparsers.add_parser('vcf', help='vcf --help')
parser_vcf.add_argument('vcf_files',
nargs='*',
metavar='VCF_FILE',
type=str,
help='(experimental) Input VCF files')
parser_vcf.set_defaults(func=convert_vcf)
#
# # file processing template
# parser_filetype = subparsers.add_parser('filetype', help='filetype help')
# parser_filetype.add_argument(
# '--minlen',
# metavar='N',
# type=int,
# default=DEFAULT_MIN_LEN,
# help='Minimum length sequence to include in stats (default {})'.format(
# DEFAULT_MIN_LEN))
# parser_filetype.add_argument('--custom',
# metavar='CUSTOM_DICT',
# type=str,
# help='use a mapping of custom emoji to proteins in CUSTOM_DICT (' + emojify(":yellow_heart:") + emojify(
# ":blue_heart:") + ')')
# parser_filetype.add_argument('fasta_files',
# nargs='*',
# metavar='FASTA_FILE',
# type=str,
# help='Input FASTA files')
# parser_filetype.set_defaults(func=convert_filetype)
if(error):
parser.print_help()
return
else:
return parser.parse_args()
class FastaStats(object):
'''Compute various statistics for a FASTA file:
num_seqs: the number of sequences in the file satisfying the minimum
length requirement (minlen_threshold).
num_bases: the total length of all the counted sequences.
min_len: the minimum length of the counted sequences.
max_len: the maximum length of the counted sequences.
average: the average length of the counted sequences rounded down
to an integer.
'''
#pylint: disable=too-many-arguments
def __init__(self,
num_seqs=None,
num_bases=None,
min_len=None,
max_len=None,
average=None):
"Build an empty FastaStats object"
self.num_seqs = num_seqs
self.num_bases = num_bases
self.min_len = min_len
self.max_len = max_len
self.average = average
def __eq__(self, other):
"Two FastaStats objects are equal iff their attributes are equal"
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __repr__(self):
"Generate a printable representation of a FastaStats object"
return "FastaStats(num_seqs={}, num_bases={}, min_len={}, max_len={}, " \
"average={})".format(
self.num_seqs, self.num_bases, self.min_len, self.max_len,
self.average)
def from_file(self, fasta_file, minlen_threshold=DEFAULT_MIN_LEN):
'''Compute a FastaStats object from an input FASTA file.
Arguments:
fasta_file: an open file object for the FASTA file
minlen_threshold: the minimum length sequence to consider in
computing the statistics. Sequences in the input FASTA file
which have a length less than this value are ignored and not
considered in the resulting statistics.
Result:
A FastaStats object
'''
num_seqs = num_bases = 0
min_len = max_len = None
for seq in SeqIO.parse(fasta_file, "fasta"):
this_len = len(seq)
if this_len >= minlen_threshold:
if num_seqs == 0:
min_len = max_len = this_len
else:
min_len = min(this_len, min_len)
max_len = max(this_len, max_len)
num_seqs += 1
num_bases += this_len
if num_seqs > 0:
self.average = int(floor(float(num_bases) / num_seqs))
else:
self.average = None
self.num_seqs = num_seqs
self.num_bases = num_bases
self.min_len = min_len
self.max_len = max_len
return self
def pretty(self, filename):
'''Generate a pretty printable representation of a FastaStats object
suitable for output of the program. The output is a tab-delimited
string containing the filename of the input FASTA file followed by
the attributes of the object. If 0 sequences were read from the FASTA
file then num_seqs and num_bases are output as 0, and min_len, average
and max_len are output as a dash "-".
Arguments:
filename: the name of the input FASTA file
Result:
A string suitable for pretty printed output
'''
if self.num_seqs > 0:
num_seqs = str(self.num_seqs)
num_bases = str(self.num_bases)
min_len = str(self.min_len)
average = str(self.average)
max_len = str(self.max_len)
else:
num_seqs = num_bases = "0"
min_len = average = max_len = "-"
return "\t".join([filename, num_seqs, num_bases, min_len, average,
max_len])
def convert_vcf(options):
'''Convert VCF file to emoji '''
print("\t".join(["CHROM","POS","ID","REF","ALT","QUAL","FILTER"]))
if options.vcf_files:
for vcf_filename in options.vcf_files:
logging.info("Processing VCF file from %s", vcf_filename)
try:
if vcf_filename.endswith(".gz"):
vcf_file = gzip.open(vcf_filename, 'rt')
else:
vcf_file = open(vcf_filename)
except IOError as exception:
exit_with_error(str(exception), EXIT_FILE_IO_ERROR)
else:
with vcf_file:
for record in vcf.Reader(vcf_file):
print("\t".join([str(a) for a in [record.CHROM,
record.POS,
record.ID,
"".join([a for a in map(get_vcf_emoji, record.REF)]),
",".join([get_vcf_emoji(str(rec)) for rec in record.ALT]),
get_vcf_qual(record.QUAL),
get_vcf_filter(record.FILTER),
# record.INFO,
# record.FORMAT,
# record.samples
]]))
else:
logging.info("Processing vcf file from stdin")
if (binascii.hexlify(sys.stdin.buffer.peek(1)[:2]) == b'1f8b'):
# print("zipped")
stdin_file = gzip.open(sys.stdin.buffer, 'rt')
else:
stdin_file = sys.stdin
with stdin_file as vcf_file:
for record in vcf.Reader(vcf_file):
print("\t".join([str(a) for a in [record.CHROM,
record.POS,
record.ID,
"".join([a for a in map(get_vcf_emoji, record.REF)]),
",".join([get_vcf_emoji(str(rec)) for rec in record.ALT]),
get_vcf_qual(record.QUAL),
get_vcf_filter(record.FILTER),
# record.INFO,
# record.FORMAT,
# record.samples
]]))
def get_vcf_emoji(orig_c, map_dict=local_seq_emoji_map, default=":heart_eyes:"):
if (orig_c == "None"):
return(emojify((":x:")))
#print("orig:",orig_c,"\n")
return "".join([emojify(map_dict.get(e, ":heart_eyes:")) for e in orig_c])
def get_vcf_qual(quality):
'''Map a quality value to an emoji'''
# Hack to do this quickly - use same trick as FASTQE and convert from value to a PHRED encoding then map
#TODO make this better
#
if quality == None:
bioemojify_qual = emojify(":question:")
else:
fake_seq = 'N'
record_qual = SeqRecord(Seq(fake_seq), id="test", name="lookup",
description="example",
letter_annotations={'phred_quality': [int(quality)]})
mapping_dict_qual_use = emaps.fastq_emoji_map_binned
original_qual = QualityIO._get_sanger_quality_str(record_qual)
#print(original_qual)
bioemojify_qual = "".join([emojify(mapping_dict_qual_use.get(s, ":heart_eyes:")) for s in original_qual])
return(bioemojify_qual)
def get_vcf_filter(filter_val):
filt_emoji = ""
if filter_val == None:
filt_emoji = emojify(":question:")
elif filter_val == []:
filt_emoji = emojify(":thumbsup:")
else:
filt_emoji = emojify(":thumbsdown:")+":"+ str(",".join(filter_val))
return(filt_emoji)
#
# def convert_filetype(options):
# return
def convert_fasta_protein(options):
convert_fasta(options, mapping_dict=biomojify_map.prot_seq_emoji_map)
return
def convert_fasta(options, mapping_dict=local_seq_emoji_map):
'''Convert FASTA file to emoji. If no FASTA files are specified on the command line then
read from the standard input (stdin).
Arguments:
options: the command line options of the program
Result:
None
'''
if options.custom:
with open(options.custom) as f:
mapping_dict_use =ast.literal_eval(f.read())
else:
mapping_dict_use=mapping_dict
if options.fasta_files:
for fasta_filename in options.fasta_files:
logging.info("Processing FASTA file from %s", fasta_filename)
try:
if fasta_filename.endswith(".gz"):
fasta_file = gzip.open(fasta_filename, 'rt')
else:
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import logging
import os
import re
import dask.array as da
import numpy as np
import bokeh.palettes as pl
import colorcet as cc
import xarray as xr
import daskms as xm
from time import time
logger = logging.getLogger(__name__)
#######################################################################
####################### Computation Functions ##########################
def calc_amplitude(ydata):
"""Convert complex data to amplitude (absolute value)
Parameters
----------
ydata : :obj:`xarray.DataArray`
y axis data to be processed
Returns
-------
amplitude : :obj:`xarray.DataArray`
:attr:`ydata` converted to an amplitude
"""
logger.debug("Calculating amplitude data")
amplitude = da.absolute(ydata)
return amplitude
def calc_imaginary(ydata):
"""Extract imaginary part from complex data
Parameters
----------
ydata : :obj:`xarray.DataArray`
y-axis data to be processed
Returns
-------
imag : :obj:`xarray.DataArray`
Imaginary part of :attr:`ydata`
"""
logger.debug("Setting up imaginary data")
imag = ydata.imag
return imag
def calc_real(ydata):
"""Extract real part from complex data
Parameters
----------
ydata : :obj:`xarray.DataArray`
y-axis data to be processed
Returns
-------
real : :obj:`xarray.DataArray`
Real part of :attr:`ydata`
"""
logger.debug("Setting up real data")
real = ydata.real
return real
def calc_phase(ydata, unwrap=False):
"""Convert complex data to angle in degrees
Parameters
----------
wrap : :obj:`bool`
whether to wrap angles between 0 and 2pi
ydata : :obj:`xarray.DataArray`
y-axis data to be processed
Returns
-------
phase: `xarray.DataArray`
:attr:`ydata` data converted to degrees
"""
logger.debug("Calculating wrapped phase")
# np.angle already returns a phase wrapped between (-pi and pi]
# https://numpy.org/doc/1.18/reference/generated/numpy.angle.html
phase = xr.apply_ufunc(da.angle, ydata,
dask="allowed", kwargs=dict(deg=True))
if unwrap:
# using an alternative method to avoid warnings
logger.debug("Unwrapping enabled. Unwrapping angles")
phase = phase.reduce(np.unwrap)
return phase
def calc_uvdist(uvw):
""" Calculate uv distance in metres
Parameters
----------
uvw : :obj:`xarray.DataArray`
UVW column from measurement set
Returns
-------
uvdist : :obj:`xarray.DataArray`
uv distance in meters
"""
logger.debug("Setting up UV Distance (metres)")
u = uvw.isel({'uvw': 0})
v = uvw.isel({'uvw': 1})
uvdist = da.sqrt(da.square(u) + da.square(v))
return uvdist
def calc_uvwave(uvw, freq):
"""Calculate uv distance in wavelength for availed frequency. This
function also calculates the corresponding wavelength. Uses output from
:func:`ragavi.vis_utils.calc_uvdist`
Parameters
----------
freq : :obj:`xarray.DataArray or :obj:`float`
Frequency(ies) from which corresponding wavelength will be obtained.
uvw : :obj:`xarray.DataArray`
UVW column from the MS dataset
Returns
-------
uvwave : :obj:`xarray.DataArray`
uv distance in wavelength for specific frequency
"""
logger.debug("Setting up UV Wavelengths (lambdas)")
# speed of light
C = 3e8
# wavelength = velocity / frequency
wavelength = (C / freq)
# add extra dimension
uvdist = calc_uvdist(uvw)
uvdist = uvdist.expand_dims({"chan": 1}, axis=1)
uvwave = (uvdist / wavelength)
# make the uv distance in kilo lambda
# Removed this, causing problems when limits are selected
# uvwave = uvwave / 1e3
return uvwave
def calc_unique_bls(n_ants=None):
"""Calculate number of unique baselines
Parameters
----------
n_ants : :obj:`int`
Available antennas
Returns
-------
pq: :obj:`int`
Number of unique baselines
"""
pq = int(0.5 * n_ants * (n_ants - 1))
logger.debug(f"Number of unique baselines: {str(pq)}.")
return pq
########################################################################
####################### Get subtables ##################################
def get_antennas(ms_name):
"""Function to get antennae names from the ANTENNA subtable.
Parameters
----------
ms_name : :obj:`str`
Name of MS or table
Returns
-------
ant_names : :obj:`xarray.DataArray`
A :obj:`xarray.DataArray` containing names for all the antennas
available.
"""
logger.debug("Getting antenna names")
subname = "::".join((ms_name, "ANTENNA"))
ant_subtab = list(xm.xds_from_table(subname))
ant_subtab = ant_subtab[0]
ant_names = ant_subtab.NAME
# ant_subtab("close")
logger.debug(f"Antennas found: {', '.join(ant_names.values)}")
return ant_names
def get_fields(ms_name):
"""Get field names from the FIELD subtable.
Parameters
----------
ms_name : :obj:`str`
Name of MS or table
Returns
-------
field_names : :obj:`xarray.DataArray`
String names for the available fields
"""
logger.debug("Getting antenna names")
subname = "::".join((ms_name, "FIELD"))
field_subtab = list(xm.xds_from_table(subname))
field_subtab = field_subtab[0]
field_names = field_subtab.NAME
logger.debug(f"Fields found: {', '.join(field_names.values)}")
return field_names
def get_frequencies(ms_name, spwid=None, chan=None, cbin=None):
"""Function to get channel frequencies from the SPECTRAL_WINDOW subtable
Parameters
----------
chan : :obj:`slice` or :obj:`numpy.ndarray`
A slice object or numpy array to select some or all of the channels.
Default is all the channels
cbin: :obj:`int`
Number of channels to be binned together. If a value is provided,
averaging is assumed to be turned on
ms_name : :obj:`str`
Name of MS or table
spwid : :obj:`int` of :obj:`slice`
Spectral window id number. Defaults to 0. If slicer is specified,
frequencies from a range of spectral windows will be returned.
Returns
-------
frequencies : :obj:`xarray.DataArray`
Channel centre frequencies for specified spectral window or all the
frequencies for all spectral windows if one is not specified
"""
logger.debug("Gettting Frequencies for selected SPWS and channels")
subname = "::".join((ms_name, "SPECTRAL_WINDOW"))
# if averaging is true, it shall be done before selection
if cbin is None:
spw_subtab = xm.xds_from_table(
subname, group_cols="__row__",
columns=["CHAN_FREQ", "CHAN_WIDTH", "EFFECTIVE_BW",
"REF_FREQUENCY", "RESOLUTION"])
if chan is not None:
spw_subtab = [_.sel(chan=chan) for _ in spw_subtab]
else:
from ragavi.averaging import get_averaged_spws
# averages done per spectral window, scan and field
logger.info("Channel averaging active")
spw_subtab = get_averaged_spws(subname, cbin, chan_select=chan)
# concat all spws into a single data array
frequencies = []
for s in spw_subtab:
frequencies.append(s.CHAN_FREQ)
s.close()
frequencies = xr.concat(frequencies, dim="row")
if spwid is not None:
# if multiple SPWs due to slicer, select the desired one(S)
frequencies = frequencies.sel(row=spwid)
logger.debug(
f"Frequency table shape (spws, chans): {str(frequencies.shape)}")
return frequencies
def get_polarizations(ms_name):
"""Get the type of polarizations available in the measurement set
Parameters
----------
ms_name: :obj:`str`
Name of MS / table
Returns
-------
cor2stokes: :obj:`list`
Returns a list containing the types of correlation
"""
logger.debug("Getting Stokes' types")
# Stokes types in this case are 1 based and NOT 0 based.
stokes_types = ["I", "Q", "U", "V", "RR", "RL", "LR", "LL", "XX", "XY",
"YX", "YY", "RX", "RY", "LX", "LY", "XR", "XL", "YR",
"YL", "PP", "PQ", "QP", "QQ", "RCircular", "LCircular",
"Linear", "Ptotal", "Plinear", "PFtotal", "PFlinear",
"Pangle"]
subname = "::".join((ms_name, "POLARIZATION"))
pol_subtable = list(xm.xds_from_table(subname))[0]
# offset the acquired corr type by 1 to match correctly the stokes type
corr_types = pol_subtable.CORR_TYPE.sel(row=0).data.compute() - 1
cor2stokes = []
# Select corr_type name from the stokes types
cor2stokes = [stokes_types[typ] for typ in corr_types]
logger.debug(f"Found stokes: {str(cor2stokes)}")
return cor2stokes
def get_flags(xds_table_obj, corr=None, chan=slice(0, None)):
""" Get Flag values from the FLAG column. Allow for selections in the
channel dimension or the correlation dimension
Parameters
----------
corr : :obj:`int`
Correlation number to select.
xds_table_obj : :obj:`xarray.Dataset`
MS as xarray dataset from xarrayms
Returns
-------
flags : :obj:`xarray.DataArray`
Data array containing values from FLAG column selected by correlation
if index is available.
"""
logger.debug("Getting flags")
flags = xds_table_obj.FLAG
if corr is None:
flags = flags.sel(chan=chan)
else:
flags = flags.sel(dict(corr=corr, chan=chan))
logger.debug("Flags ready")
return flags
########################################################################
####################### Some utils for use #############################
def name_2id(tab_name, field_name):
"""Translate field name to field id
Parameters
---------
tab_name : :obj:str`
MS or Table name
field_name : :obj:`str`
Field name to convert to field ID
Returns
-------
field_id : :obj:`int`
Integer field id
"""
logger.debug("Converting field names to FIELD_IDs")
field_names = get_fields(tab_name).data.compute()
# make the sup field name uppercase
field_name = field_name.upper()
if field_name in field_names:
field_id = np.where(field_names == field_name)[0][0]
logger.debug(f"Field name {field_name} --> found in ID {field_id}")
return int(field_id)
else:
logger.debug(f"FIELD_ID of {field_name} not found")
return -1
def resolve_ranges(inp):
"""Create a TAQL string that can be parsed given a range of values
Parameters
----------
inp : :obj:`str`
A range of values to be constructed. Can be in the form of: "5",
"5,6,7", "5~7" (inclusive range), "5:8" (exclusive range),
"5:" (from 5 to last)
Returns
-------
res : :obj:`str`
Interval string conforming to TAQL sets and intervals as shown in
`Casa TAQL Notes <https://casa.nrao.edu/aips2_docs/notes/199/node5.html#TAQL:EXPRESSIONS>`_
"""
if '~' in inp:
# create expression fro inclusive range
# to form a curly bracket string we need {{}}
res = "[{{{}}}]".format(inp.replace('~', ','))
else:
# takes care of 5:8 or 5,6,7 or 5:
res = "[{}]".format(inp)
logger.debug(f"Resolved range {inp} --> {res} for TAQL selection")
return res
def slice_data(inp):
"""Creates a slicer for an array. To be used to get a data subset such as
correlation or channel subsets.
Parameters
----------
inp : :obj:`str`
This can be of the form "5", "10~20" (10 to 20 inclusive), "10:21"
(same),
"10:" (from 10 to end), ":10:2" (0 to 9 inclusive, stepped by 2),
"~9:2" (same)
Returns
-------
sl : :obj:`slice`
slicer for an iterable object
"""
if inp is None:
sl = slice(0, None)
elif inp.isdigit():
sl = int(inp)
elif | |
"""Contains AMPAL objects representing pseudo atoms."""
from collections import OrderedDict
from ampal.base_ampal import Atom, Monomer, Polymer, write_pdb
from tools.geometry import distance, radius_of_circumcircle
class PseudoGroup(Polymer):
"""Container for `PseudoMonomer`, inherits from `Polymer`.
Parameters
----------
monomers : PseudoAtom or [PseudoGroup], optional
`PseudoMonomer` or list containing `PseudoMonomer` objects to form the
`PseudoGroup`.
polymer_id : str, optional
An ID that the user can use to identify the `PseudoGroup`. This is
used when generating a pdb file using `PseudoGroup().pdb`.
ampal_parent : ampal.Assembly, optional
Reference to `Assembly` containing the `PseudoGroup`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
Attributes
----------
id : str
`PseudoGroup` ID
ampal_parent : ampal.Assembly or None
Reference to `Assembly` containing the `PseudoGroup`
molecule_type : str
A description of the type of `Polymer` i.e. Protein, DNA etc.
ligands : ampal.LigandGroup
A `LigandGroup` containing all the `Ligands` associated with this
`PseudoGroup` chain.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
sl : int
The default smoothing level used when calculating the
backbone primitive.
Raises
------
TypeError
`Polymer` type objects can only be initialised empty or using
a `Monomer`.
"""
def __init__(self, monomers=None, polymer_id=' ', ampal_parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id,
molecule_type='pseudo_group', ampal_parent=ampal_parent, sl=sl)
def __repr__(self):
return '<PseudoGroup chain containing {} {}>'.format(
len(self._monomers),
'PseudoMonomer' if len(self._monomers) == 1 else 'PseudoMonomers')
class PseudoMonomer(Monomer):
"""Represents a collection of `PsuedoAtoms`.
Parameters
----------
pseudo_atoms : OrderedDict, optional
OrderedDict containing Atoms for the `PsuedoMonomer`. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str, optional
One or three letter code that represents the `PsuedoMonomer`.
monomer_id : str, optional
String used to identify the `PsuedoMonomer`.
insertion_code : str, optional
Insertion code of `PsuedoMonomer`, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
ampal_parent : ampal.PseudoGroup, optional
Reference to `PseudoGroup` containing the `PsuedoMonomer`.
Attributes
----------
mol_code : str
PDB molecule code that represents the `Nucleotide`.
insertion_code : str
Insertion code of `Nucleotide`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Nucleotide`.
id : str
String used to identify the `Nucleotide`.
reference_atom : str
The key that corresponds to the reference `Atom`. This is used
by various functions, for example backbone primitives are
calculated using the `Atom` defined using this key.
ampal_parent : Polynucleotide or None
A reference to the `Polynucleotide` containing this `Nucleotide`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
Raises
------
ValueError
Raised if `mol_code` is not length 1 or 3.
"""
def __init__(self, pseudo_atoms=None, mol_code='UNK',
monomer_id=' ', insertion_code=' ', ampal_parent=None):
super(PseudoMonomer, self).__init__(
atoms=pseudo_atoms, monomer_id=monomer_id,
ampal_parent=ampal_parent)
self.mol_code = mol_code
self.insertion_code = insertion_code
self.is_hetero = True
def __repr__(self):
return '<PseudoMonomer containing {} {}. PseudoMonomer code: {}>'.format(
len(self.atoms), 'PseudoAtom' if len(self.atoms) == 1 else 'PseudoAtoms', self.mol_code)
@property
def pdb(self):
"""Generates a PDB string for the `PseudoMonomer`."""
pdb_str = write_pdb(
[self], ' ' if not self.tags['chain_id'] else self.tags['chain_id'])
return pdb_str
class PseudoAtom(Atom):
"""Object containing 3D coordinates and name.
Notes
-----
Used to represent pseudo atoms (e.g. centre_of_mass) in ISAMBARD.
Parameters
----------
coordinates : 3D Vector (tuple, list, numpy.array)
Position of `PseudoAtom` in 3D space.
element : str
Element of `PseudoAtom`.
atom_id : str
Identifier for `PseudoAtom`, usually a number.
res_label : str, optional
Label used in `Monomer` to refer to the `PseudoAtom` type i.e.
"CA" or "OD1".
occupancy : float, optional
The occupancy of the `PseudoAtom`.
bfactor : float, optional
The bfactor of the `PseudoAtom`.
charge : str, optional
The point charge of the `PseudoAtom`.
state : str
The state of this `PseudoAtom`. Used to identify `PseudoAtoms`
with a number of conformations.
ampal_parent : ampal.Monomer, optional
A reference to the `Monomer` containing this `PseudoAtom`.
Attributes
----------
id : str
Identifier for `PseudoAtom`, usually a number.
res_label : str
Label used in `PseudoGroup` to refer to the `Atom` type i.e. "CA" or "OD1".
element : str
Element of `Atom`.
ampal_parent : ampal.PseudoAtom
A reference to the `PseudoGroup` containing this `PseudoAtom`.
number of conformations.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
"""
def __init__(self, coordinates, name='', occupancy=1.0, bfactor=1.0,
charge=' ', ampal_parent=None):
super().__init__(coordinates, element='C', atom_id=' ',
occupancy=occupancy, bfactor=bfactor,
charge=charge, state='A', ampal_parent=ampal_parent)
self.name = name
def __repr__(self):
return ("<PseudoAtom. Name: {}. Coordinates: "
"({:.3f}, {:.3f}, {:.3f})>".format(
self.name, self.x, self.y, self.z))
class Primitive(PseudoGroup):
"""A backbone path composed of `PseudoAtoms`.
Parameters
----------
pseudo_atoms : OrderedDict, optional
OrderedDict containing Atoms for the `PsuedoMonomer`. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str, optional
One or three letter code that represents the `PsuedoMonomer`.
monomer_id : str, optional
String used to identify the `PsuedoMonomer`.
insertion_code : str, optional
Insertion code of `PsuedoMonomer`, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
ampal_parent : ampal.PseudoGroup, optional
Reference to `PseudoGroup` containing the `PsuedoMonomer`.
Attributes
----------
mol_code : str
PDB molecule code that represents the `Nucleotide`.
insertion_code : str
Insertion code of `Nucleotide`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Nucleotide`.
id : str
String used to identify the `Nucleotide`.
reference_atom : str
The key that corresponds to the reference `Atom`. This is used
by various functions, for example backbone primitives are
calculated using the `Atom` defined using this key.
ampal_parent : Polynucleotide or None
A reference to the `Polynucleotide` containing this `Nucleotide`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
Raises
------
ValueError
Raised if `mol_code` is not length 1 or 3.
"""
def __init__(self, monomers=None, polymer_id=' ', ampal_parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id,
ampal_parent=ampal_parent, sl=sl)
def __repr__(self):
return '<Primitive chain containing {} {}>'.format(
len(self._monomers),
'PseudoMonomer' if len(self._monomers) == 1 else 'PseudoMonomers')
@classmethod
def from_coordinates(cls, coordinates):
"""Creates a `Primitive` from a list of coordinates."""
prim = cls()
for coord in coordinates:
pm = PseudoMonomer(ampal_parent=prim)
pa = PseudoAtom(coord, ampal_parent=pm)
pm.atoms = OrderedDict([('CA', pa)])
prim.append(pm)
prim.relabel_all()
return prim
@property
def coordinates(self):
"""Returns the backbone coordinates for the `Primitive`."""
return [x._vector for x in self.get_atoms()]
def rise_per_residue(self):
"""The rise per residue at each point on the Primitive.
Notes
-----
Each element of the returned list is the rise per residue,
at a point on the Primitive. Element i is the distance
between primitive[i] and primitive[i + 1]. The final value
is None.
"""
rprs = [distance(self[i]['CA'], self[i + 1]['CA'])
for i in range(len(self) - 1)]
rprs.append(None)
return rprs
def radii_of_curvature(self):
"""The radius of curvature at each point on the Polymer primitive.
Notes
-----
Each element of the returned list is the radius of curvature,
at a point on the Polymer primitive. Element i is the radius
of the circumcircle formed from indices [i-1, i, i+1] of the
primitve. The first and final values are None.
"""
rocs = []
for i in range(len(self)):
if 0 < i < len(self) - 1:
rocs.append(radius_of_circumcircle(
self[i - 1]['CA'], self[i]['CA'], self[i + 1]['CA']))
else:
| |
plugin_info_filename):
"""
DEPRECATED(>1.9): please use a specific plugin
locator if you need such information.
Gather the core information (name, and module to be loaded)
about a plugin described by it's info file (found at
'directory/filename').
Return an instance of ``PluginInfo`` and the
config_parser used to gather the core data *in a tuple*, if the
required info could be localised, else return ``(None,None)``.
.. note:: This is supposed to be used internally by subclasses
and decorators.
"""
return self.getPluginLocator().gatherCorePluginInfo(directory, plugin_info_filename)
def _getPluginNameAndModuleFromStream(self, infoFileObject, candidate_infofile="<buffered info>"):
"""
DEPRECATED(>1.9): please use a specific plugin
locator if you need such information.
Extract the name and module of a plugin from the
content of the info file that describes it and which
is stored in infoFileObject.
.. note::
Prefer using ``_gatherCorePluginInfo``
instead, whenever possible...
.. warning::
``infoFileObject`` must be a file-like
object: either an opened file for instance or a string
buffer wrapped in a StringIO instance as another
example.
.. note::
``candidate_infofile`` must be provided
whenever possible to get better error messages.
Return a 3-uple with the name of the plugin, its
module and the config_parser used to gather the core
data *in a tuple*, if the required info could be
localised, else return ``(None,None,None)``.
.. note::
This is supposed to be used internally by subclasses
and decorators.
"""
return self.getPluginLocator().getPluginNameAndModuleFromStream(infoFileObject, candidate_infofile)
def getCategories(self):
"""
Return the list of all categories.
"""
return list(self.category_mapping.keys())
def removePluginFromCategory(self, plugin, category_name):
"""
Remove a plugin from the category where it's assumed to belong.
"""
self.category_mapping[category_name].remove(plugin)
def appendPluginToCategory(self, plugin, category_name):
"""
Append a new plugin to the given category.
"""
self.category_mapping[category_name].append(plugin)
def getPluginsOfCategory(self, category_name):
"""
Return the list of all plugins belonging to a category.
"""
return self.category_mapping[category_name][:]
def getAllPlugins(self):
"""
Return the list of all plugins (belonging to all categories).
"""
allPlugins = set()
for pluginsOfOneCategory in self.category_mapping.values():
allPlugins.update(pluginsOfOneCategory)
return list(allPlugins)
def getPluginsOf(self, **kwargs):
"""
Returns a set of plugins whose properties match the named arguments provided here along with their correspoding values.
"""
selectedPLugins = set()
for plugin in self.getAllPlugins():
for attrName in kwargs:
if not hasattr(plugin, attrName):
break
attrValue = kwargs[attrName]
pluginValue = getattr(plugin, attrName)
if pluginValue == attrValue:
continue
if type(pluginValue) == type(attrValue):
break
try:
if attrValue in pluginValue:
continue
except:
break
else:
selectedPLugins.add(plugin)
return selectedPLugins
def getPluginCandidates(self):
"""
Return the list of possible plugins.
Each possible plugin (ie a candidate) is described by a 3-uple:
(info file path, python file path, plugin info instance)
.. warning: ``locatePlugins`` must be called before !
"""
if not hasattr(self, '_candidates'):
raise RuntimeError("locatePlugins must be called before getPluginCandidates")
return self._candidates[:]
def removePluginCandidate(self, candidateTuple):
"""
Remove a given candidate from the list of plugins that should be loaded.
The candidate must be represented by the same tuple described
in ``getPluginCandidates``.
.. warning: ``locatePlugins`` must be called before !
"""
if not hasattr(self, '_candidates'):
raise ValueError("locatePlugins must be called before removePluginCandidate")
self._candidates.remove(candidateTuple)
def appendPluginCandidate(self, candidateTuple):
"""
Append a new candidate to the list of plugins that should be loaded.
The candidate must be represented by the same tuple described
in ``getPluginCandidates``.
.. warning: ``locatePlugins`` must be called before !
"""
if not hasattr(self, '_candidates'):
raise ValueError("locatePlugins must be called before removePluginCandidate")
self._candidates.append(candidateTuple)
def locatePlugins(self):
"""
Convenience method (actually call the IPluginLocator method)
"""
self._candidates, npc = self.getPluginLocator().locatePlugins()
def loadPlugins(self, callback=None, callback_after=None):
"""
Load the candidate plugins that have been identified through a
previous call to locatePlugins. For each plugin candidate
look for its category, load it and store it in the appropriate
slot of the ``category_mapping``.
You can specify 2 callbacks: callback, and callback_after. If either of these are passed a function, (in the case of callback), it will get called before each plugin load attempt and (for callback_after), after each
attempt. The ``plugin_info`` instance is passed as an argument to
each callback. This is meant to facilitate code that needs to run for each plugin, such as adding the directory it resides in to sys.path (so imports of other files in the plugin's directory work correctly). You can use callback_after to remove anything you added to the path.
"""
# print "%s.loadPlugins" % self.__class__
if not hasattr(self, '_candidates'):
raise ValueError("locatePlugins must be called before loadPlugins")
processed_plugins = []
for candidate_infofile, candidate_filepath, plugin_info in self._candidates:
# make sure to attribute a unique module name to the one
# that is about to be loaded
plugin_module_name_template = NormalizePluginNameForModuleName(
"yapsy_loaded_plugin_" + plugin_info.name) + "_%d"
for plugin_name_suffix in range(len(sys.modules)):
plugin_module_name = plugin_module_name_template % plugin_name_suffix
if plugin_module_name not in sys.modules:
break
# tolerance on the presence (or not) of the py extensions
if candidate_filepath.endswith(".py"):
candidate_filepath = candidate_filepath[:-3]
# if a callback exists, call it before attempting to load
# the plugin so that a message can be displayed to the
# user
if callback is not None:
callback(plugin_info)
# cover the case when the __init__ of a package has been
# explicitely indicated
if "__init__" in os.path.basename(candidate_filepath):
candidate_filepath = os.path.dirname(candidate_filepath)
try:
candidate_module = PluginManager._importModule(plugin_module_name, candidate_filepath)
except Exception:
exc_info = sys.exc_info()
log.error("Unable to import plugin: %s" % candidate_filepath, exc_info=exc_info)
plugin_info.error = exc_info
processed_plugins.append(plugin_info)
continue
processed_plugins.append(plugin_info)
if "__init__" in os.path.basename(candidate_filepath):
sys.path.remove(plugin_info.path)
# now try to find and initialise the first subclass of the correct plugin interface
last_failed_attempt_message = None
for element, element_name in ((getattr(candidate_module, name), name) for name in dir(candidate_module)):
plugin_info_reference = None
for category_name in self.categories_interfaces:
try:
is_correct_subclass = issubclass(element, self.categories_interfaces[category_name])
except Exception:
exc_info = sys.exc_info()
log.debug("correct subclass tests failed for: %s in %s" % (element_name, candidate_filepath),
exc_info=exc_info)
continue
if is_correct_subclass and element is not self.categories_interfaces[category_name]:
current_category = category_name
if candidate_infofile not in self._category_file_mapping[current_category]:
# we found a new plugin: initialise it and search for the next one
if not plugin_info_reference:
try:
plugin_info.plugin_object = self.instanciateElementWithImportInfo(element,
element_name,
plugin_module_name,
candidate_filepath)
plugin_info_reference = plugin_info
except Exception:
exc_info = sys.exc_info()
last_failed_attempt_message = "Unable to create plugin object: %s" % candidate_filepath
log.debug(last_failed_attempt_message, exc_info=exc_info)
plugin_info.error = exc_info
break # If it didn't work once it wont again
else:
last_failed_attempt_message = None
plugin_info.categories.append(current_category)
self.category_mapping[current_category].append(plugin_info_reference)
self._category_file_mapping[current_category].append(candidate_infofile)
# Everything is loaded and instantiated for this plugin now
if callback_after is not None:
callback_after(plugin_info)
else:
if last_failed_attempt_message:
log.error(last_failed_attempt_message, exc_info=plugin_info.error)
# Remove candidates list since we don't need them any more and
# don't need to take up the space
delattr(self, '_candidates')
return processed_plugins
@staticmethod
def _importModule(plugin_module_name, candidate_filepath):
"""
Import a module, trying either to find it as a single file or as a directory.
.. note:: Isolated and provided to be reused, but not to be reimplemented !
"""
# use imp to correctly load the plugin as a module
if os.path.isdir(candidate_filepath):
candidate_module = imp.load_module(plugin_module_name, None, candidate_filepath,
("py", "r", imp.PKG_DIRECTORY))
else:
with open(candidate_filepath + ".py", "r") as plugin_file:
candidate_module = imp.load_module(plugin_module_name, plugin_file, candidate_filepath + ".py",
("py", "r", imp.PY_SOURCE))
return candidate_module
def instanciateElementWithImportInfo(self, element, element_name,
plugin_module_name, candidate_filepath):
"""Override this method to customize how plugins are instanciated.
.. note::
This methods recieves the 'element' that is a candidate
as the plugin's main file, but also enough information to reload
its containing module and this element.
"""
return self.instanciateElement(element)
def instanciateElement(self, element):
"""
DEPRECATED(>1.11): reimplement instead ``instanciateElementWithImportInfo`` !
Override this method to customize how plugins are instanciated.
.. warning::
This method is called only if
``instanciateElementWithImportInfo`` has not been reimplemented !
"""
return element()
def collectPlugins(self):
"""
Walk through the plugins' places and look for plugins. Then
for each plugin candidate look for its category, load it and
stores it in the appropriate slot of the category_mapping.
"""
# print "%s.collectPlugins" % self.__class__
self.locatePlugins()
self.loadPlugins()
def getPluginByName(self, name, category="Default"):
"""
Get the plugin correspoding to a given category and name
"""
if category in self.category_mapping:
for item in self.category_mapping[category]:
if item.name == name:
return item
return None
def activatePluginByName(self, name, category="Default"):
"""
Activate a plugin corresponding to a given category + name.
"""
pta_item | |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting unit and lesson editing."""
__author__ = '<NAME> (<EMAIL>)'
import cgi
import logging
import urllib
from common import utils as common_utils
from common import crypto
from common import schema_fields
from controllers import sites
from controllers import utils
from models import courses
from models import resources_display
from models import custom_units
from models import jobs
from models import permissions
from models import roles
from models import services
from models import transforms
from modules.courses import constants
from modules.courses import messages
from modules.assessments import assessments
from modules.dashboard import dashboard
from modules.oeditor import oeditor
from tools import verify
from google.appengine.ext import db
custom_module = None # reference to modules.courses.courses.custom_module
class UnitLessonEditor(object):
"""Namespace for functions handling action callbacks from Dashboard."""
HIDE_ACTIVITY_ANNOTATIONS = [
(['properties', 'activity_title', '_inputex'], {'_type': 'hidden'}),
(['properties', 'activity_listed', '_inputex'], {'_type': 'hidden'}),
(['properties', 'activity', '_inputex'], {'_type': 'hidden'}),
]
ACTION_GET_IMPORT_COURSE = 'import_course'
ACTION_POST_CANCEL_IMPORT = 'cancel_import'
ACTION_POST_ADD_UNIT = 'add_unit'
ACTION_GET_EDIT_UNIT = 'edit_unit'
ACTION_POST_ADD_LESSON = 'add_lesson'
ACTION_GET_EDIT_LESSON = 'edit_lesson'
ACTION_GET_IN_PLACE_LESSON_EDITOR = 'in_place_lesson_editor'
ACTION_POST_ADD_LINK = 'add_link'
ACTION_GET_EDIT_LINK = 'edit_link'
ACTION_POST_ADD_ASSESSMENT = 'add_assessment'
ACTION_GET_EDIT_ASSESSMENT = 'edit_assessment'
ACTION_POST_ADD_CUSTOM_UNIT = 'add_custom_unit'
ACTION_GET_EDIT_CUSTOM_UNIT = 'edit_custom_unit'
@classmethod
def on_module_enabled(cls):
for action, callback in (
(cls.ACTION_GET_IMPORT_COURSE, cls.get_import_course),
(cls.ACTION_POST_CANCEL_IMPORT, cls.post_cancel_import),
(cls.ACTION_POST_ADD_UNIT, cls.post_add_unit),
(cls.ACTION_GET_EDIT_UNIT, cls.get_edit_unit),
(cls.ACTION_POST_ADD_LESSON, cls.post_add_lesson),
(cls.ACTION_GET_EDIT_LESSON, cls.get_edit_lesson),
(cls.ACTION_GET_IN_PLACE_LESSON_EDITOR,
cls.get_in_place_lesson_editor),
(cls.ACTION_POST_ADD_LINK, cls.post_add_link),
(cls.ACTION_GET_EDIT_LINK, cls.get_edit_link),
(cls.ACTION_POST_ADD_ASSESSMENT, cls.post_add_assessment),
(cls.ACTION_GET_EDIT_ASSESSMENT, cls.get_edit_assessment),
(cls.ACTION_POST_ADD_CUSTOM_UNIT, cls.post_add_custom_unit),
(cls.ACTION_GET_EDIT_CUSTOM_UNIT, cls.get_edit_custom_unit),
):
if callback.__name__.startswith('get_'):
dashboard.DashboardHandler.add_custom_get_action(
action, callback, in_action='outline')
elif callback.__name__.startswith('post_'):
dashboard.DashboardHandler.add_custom_post_action(
action, callback)
else:
raise ValueError('Callback names must start with get_ or post_')
# Tell dashboard we want to handle authorization of viewing of
# unit/assessment/link editors ourselves, rather than using a single
# permission name. (This uses detailed schema permissions authority
# checks instead.)
dashboard.DashboardHandler.map_get_action_to_permission_checker(
cls.ACTION_GET_EDIT_UNIT, UnitRESTHandler.can_view)
dashboard.DashboardHandler.map_get_action_to_permission_checker(
cls.ACTION_GET_EDIT_LINK, LinkRESTHandler.can_view)
dashboard.DashboardHandler.map_get_action_to_permission_checker(
cls.ACTION_GET_EDIT_ASSESSMENT, AssessmentRESTHandler.can_view)
@classmethod
def get_import_course(cls, handler):
"""Shows setup form for course import."""
template_values = {}
template_values['page_title'] = handler.format_title('Import Course')
annotations = ImportCourseRESTHandler.SCHEMA_ANNOTATIONS_DICT()
if not annotations:
template_values['main_content'] = 'No courses to import from.'
handler.render_page(template_values)
return
exit_url = handler.canonicalize_url('/dashboard')
rest_url = handler.canonicalize_url(ImportCourseRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
handler,
ImportCourseRESTHandler.SCHEMA_JSON,
annotations,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Import',
required_modules=ImportCourseRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = handler.format_title('Import Course')
template_values['main_content'] = form_html
return template_values
@classmethod
def post_cancel_import(cls, handler):
# Dashboard dispatch will have checked XSRF and admin privileges.
ImportCourseBackgroundJob(handler.app_context, None).cancel()
handler.redirect('/dashboard?action=outline')
@classmethod
def post_add_lesson(cls, handler):
"""Adds new lesson to a first unit of the course."""
course = courses.Course(handler)
target_unit = None
if handler.request.get('unit_id'):
target_unit = course.find_unit_by_id(handler.request.get('unit_id'))
else:
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_UNIT:
target_unit = unit
break
if target_unit:
lesson = course.add_lesson(target_unit)
course.save()
# TODO(psimakov): complete 'edit_lesson' view
handler.redirect(handler.get_action_url(
'edit_lesson', key=lesson.lesson_id,
extra_args={'is_newly_created': 1}))
else:
handler.redirect('/dashboard')
@classmethod
def post_add_unit(cls, handler):
"""Adds new unit to a course."""
course = courses.Course(handler)
unit = course.add_unit()
course.save()
handler.redirect(handler.get_action_url(
'edit_unit', key=unit.unit_id, extra_args={'is_newly_created': 1}))
@classmethod
def post_add_link(cls, handler):
"""Adds new link to a course."""
course = courses.Course(handler)
link = course.add_link()
link.href = ''
course.save()
handler.redirect(handler.get_action_url(
'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))
@classmethod
def post_add_assessment(cls, handler):
"""Adds new assessment to a course."""
course = courses.Course(handler)
assessment = course.add_assessment()
course.save()
handler.redirect(handler.get_action_url(
'edit_assessment', key=assessment.unit_id,
extra_args={'is_newly_created': 1}))
@classmethod
def post_add_custom_unit(cls, handler):
"""Adds a custom unit to a course."""
course = courses.Course(handler)
custom_unit_type = handler.request.get('unit_type')
custom_unit = course.add_custom_unit(custom_unit_type)
course.save()
handler.redirect(handler.get_action_url(
'edit_custom_unit', key=custom_unit.unit_id,
extra_args={'is_newly_created': 1,
'unit_type': custom_unit_type}))
@classmethod
def _render_edit_form_for(
cls, handler, rest_handler_cls, title, schema, additional_dirs=None,
annotations_dict=None, delete_xsrf_token='<PASSWORD>',
delete_message=None, extra_js_files=None, extra_css_files=None):
"""Renders an editor form for a given REST handler class."""
annotations_dict = annotations_dict or []
schema_json = schema.get_json_schema()
annotations_dict = schema.get_schema_dict() + annotations_dict
key = handler.request.get('key')
extra_args = {}
if handler.request.get('is_newly_created'):
extra_args['is_newly_created'] = 1
exit_url = handler.canonicalize_url('/dashboard')
rest_url = handler.canonicalize_url(rest_handler_cls.URI)
delete_method = None
delete_url = None
if roles.Roles.is_course_admin(handler.app_context):
delete_method = 'delete'
delete_url = '%s?%s' % (
handler.canonicalize_url(rest_handler_cls.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
handler.create_xsrf_token(delete_xsrf_token))
}))
def extend_list(target_list, ext_name):
# Extend the optional arg lists such as extra_js_files by an
# optional list field on the REST handler class. Used to provide
# seams for modules to add js files, etc. See LessonRESTHandler
if hasattr(rest_handler_cls, ext_name):
target_list = target_list or []
return (target_list or []) + getattr(rest_handler_cls, ext_name)
return target_list
form_html = oeditor.ObjectEditor.get_html_for(
handler,
schema_json,
annotations_dict,
key, rest_url, exit_url,
additional_dirs=extend_list(additional_dirs, 'ADDITIONAL_DIRS'),
delete_url=delete_url,
delete_method=delete_method,
delete_message=delete_message,
display_types=schema.get_display_types(),
extra_args=extra_args,
extra_css_files=extend_list(extra_css_files, 'EXTRA_CSS_FILES'),
extra_js_files=extend_list(extra_js_files, 'EXTRA_JS_FILES'),
read_only=not handler.app_context.is_editable_fs())
template_values = {}
template_values['page_title'] = handler.format_title('Edit %s' % title)
template_values['main_content'] = form_html
return template_values
@classmethod
def get_edit_unit(cls, handler):
"""Shows unit editor."""
return cls._render_edit_form_for(
handler, UnitRESTHandler, 'Unit', UnitRESTHandler.get_schema(
courses.Course(handler), int(handler.request.get('key'))),
delete_message='Are you sure you want to delete this unit? '
'Deleting the unit will also delete any lessons it contains.')
@classmethod
def get_edit_custom_unit(cls, handler):
"""Shows custom_unit_editor."""
custom_unit_type = handler.request.get('unit_type')
custom_unit = custom_units.UnitTypeRegistry.get(custom_unit_type)
rest_handler = custom_unit.rest_handler
return cls._render_edit_form_for(
handler, rest_handler, custom_unit.name,
rest_handler.get_schema(courses.Course(handler)))
@classmethod
def get_edit_link(cls, handler):
"""Shows link editor."""
return cls._render_edit_form_for(
handler, LinkRESTHandler, 'Link', LinkRESTHandler.get_schema(
courses.Course(handler), int(handler.request.get('key'))))
@classmethod
def get_edit_assessment(cls, handler):
"""Shows assessment editor."""
return cls._render_edit_form_for(
handler, AssessmentRESTHandler, 'Assessment',
AssessmentRESTHandler.get_schema(
courses.Course(handler), int(handler.request.get('key'))),
extra_js_files=['assessment_editor_lib.js', 'assessment_editor.js'])
@classmethod
def get_edit_lesson(cls, handler):
"""Shows the lesson/activity editor."""
key = handler.request.get('key')
course = courses.Course(handler)
lesson = course.find_lesson_by_id(None, key)
annotations_dict = (
None if lesson.has_activity else cls.HIDE_ACTIVITY_ANNOTATIONS)
schema = LessonRESTHandler.get_schema(course, key)
if courses.has_only_new_style_activities(course):
schema.get_property('objectives').extra_schema_dict_values[
'excludedCustomTags'] = set(['gcb-activity'])
return cls._render_edit_form_for(
handler,
LessonRESTHandler, 'Lessons and Activities', schema,
annotations_dict=annotations_dict,
delete_xsrf_token='<PASSWORD>',
extra_js_files=['lesson_editor.js'])
@classmethod
def get_in_place_lesson_editor(cls, handler):
"""Shows the lesson editor iframed inside a lesson page."""
if not handler.app_context.is_editable_fs():
return
key = handler.request.get('key')
course = courses.Course(handler)
lesson = course.find_lesson_by_id(None, key)
annotations_dict = (
None if lesson.has_activity else cls.HIDE_ACTIVITY_ANNOTATIONS)
schema = LessonRESTHandler.get_schema(course, key)
annotations_dict = schema.get_schema_dict() + annotations_dict
if courses.has_only_new_style_activities(course):
schema.get_property('objectives').extra_schema_dict_values[
'excludedCustomTags'] = set(['gcb-activity'])
extra_js_files = [
'lesson_editor.js', 'in_place_lesson_editor_iframe.js'
] + LessonRESTHandler.EXTRA_JS_FILES
form_html = oeditor.ObjectEditor.get_html_for(
handler,
schema.get_json_schema(),
annotations_dict,
key, handler.canonicalize_url(LessonRESTHandler.URI), None,
additional_dirs=LessonRESTHandler.ADDITIONAL_DIRS,
display_types=schema.get_display_types(),
extra_css_files=LessonRESTHandler.EXTRA_CSS_FILES,
extra_js_files=extra_js_files)
template = handler.get_template('in_place_lesson_editor.html', [])
template_values = {
'form_html': form_html,
'extra_css_href_list': handler.EXTRA_CSS_HREF_LIST,
'extra_js_href_list': handler.EXTRA_JS_HREF_LIST
}
handler.response.write(template.render(template_values))
class CommonUnitRESTHandler(utils.BaseRESTHandler):
"""A common super class for all unit REST handlers."""
# These functions are called with an updated unit object whenever a
# change is saved.
POST_SAVE_HOOKS = []
def unit_to_dict(self, unit):
"""Converts a unit to a dictionary representation."""
return resources_display.UnitTools(self.get_course()).unit_to_dict(unit)
def apply_updates(self, unit, updated_unit_dict, errors):
"""Applies changes to a unit; modifies unit input argument."""
resources_display.UnitTools(courses.Course(self)).apply_updates(
unit, updated_unit_dict, errors)
def can_view(self):
raise NotImplementedError()
def can_edit(self):
raise NotImplementedError()
@classmethod
def get_schema(cls, course, key):
raise NotImplementedError()
def get(self):
"""A GET REST method shared by all unit types."""
key = self.request.get('key')
if not self.can_view(self.app_context):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
unit = course.find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
message = ['Success.']
if self.request.get('is_newly_created'):
unit_type = verify.UNIT_TYPE_NAMES[unit.type].lower()
message.append(
'New %s has been created and saved.' % unit_type)
entity = self.unit_to_dict(unit)
schema = self.get_schema(course, key)
schema.redact_entity_to_schema(entity, only_writable=False)
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=transforms.dict_to_json(entity, recurse=True),
xsrf_token=crypto.XsrfTokenManager.create_xsrf_token('put-unit'))
def put(self):
"""A PUT REST method shared by all unit types."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
if not self.can_edit(self.app_context):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
errors = []
course = courses.Course(self)
try:
schema = self.get_schema(course, key)
updated_unit_dict = transforms.json_to_dict(
transforms.loads(payload), schema.get_json_schema_dict())
schema.redact_entity_to_schema(updated_unit_dict)
self.apply_updates(unit, updated_unit_dict, errors)
except (TypeError, ValueError), ex:
errors.append(str(ex))
if not errors:
assert course.update_unit(unit)
course.save()
common_utils.run_hooks(self.POST_SAVE_HOOKS, unit)
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-unit', {'key': key}):
return
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
unit = course.find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
course.delete_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
class UnitRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to unit."""
URI = '/rest/course/unit'
@classmethod
def can_view(cls, app_context):
return permissions.can_view(app_context, constants.SCOPE_UNIT)
@classmethod
def can_edit(cls, app_context):
return permissions.can_edit(app_context, constants.SCOPE_UNIT)
@classmethod
def get_schema(cls, course, this_unit_id):
# The set of | |
<gh_stars>0
#!/usr/bin/env python
#encoding: utf-8
'''aselite is a striped down single file version of ase that retains the
following features: atom and atoms objects, some of ase.io and some of
ase.constraints.'''
from __future__ import print_function
# Copyright 2008, 2009 CAMd
# (see accompanying license files for details).
from math import cos, sin, sqrt
import warnings
import numpy as np
np.seterr(all='raise')
import os
import copy
import sys
import time
from os.path import isfile
import collections
def read_any(filename):
try:
return read_vasp(filename)
except:
pass
try:
return read_xyz(filename)
except:
pass
try:
return read_con(filename)
except:
pass
raise IOError("Could not read file %s." % filename)
def write_jmol(filename, atoms, eigenvalues, eigenvectors):
f_xyz = open(filename,'w')
for i in range(len(eigenvectors)):
mode = eigenvectors[:,i]
mode.shape = (len(mode)/3,3)
f_xyz.write("%i\n"%len(atoms))
f_xyz.write("%f\n"%eigenvalues[i])
for j,atom in enumerate(atoms):
f_xyz.write("%s %f %f %f %f %f %f\n" % (atom.symbol, atom.position[0], atom.position[1], atom.position[2], mode[j,0], mode[j,1], mode[j,2]))
f_xyz.close()
def get_atomtypes(fname):
"""Given a file name, get the atomic symbols.
The function can get this information from OUTCAR and POTCAR
format files. The files can also be compressed with gzip or
bzip2.
"""
atomtypes=[]
if fname.find('.gz') != -1:
import gzip
f = gzip.open(fname)
elif fname.find('.bz2') != -1:
import bz2
f = bz2.BZ2File(fname)
else:
f = open(fname)
for line in f:
if line.find('TITEL') != -1:
atomtypes.append(line.split()[3].split('_')[0].split('.')[0])
return atomtypes
def atomtypes_outpot(posfname, numsyms):
"""Try to retreive chemical symbols from OUTCAR or POTCAR
If getting atomtypes from the first line in POSCAR/CONTCAR fails, it might
be possible to find the data in OUTCAR or POTCAR, if these files exist.
posfname -- The filename of the POSCAR/CONTCAR file we're trying to read
numsyms -- The number of symbols we must find
"""
import os.path as op
import glob
# First check files with exactly same name except POTCAR/OUTCAR instead
# of POSCAR/CONTCAR.
fnames = [posfname.replace('POSCAR', 'POTCAR').replace('CONTCAR',
'POTCAR')]
fnames.append(posfname.replace('POSCAR', 'OUTCAR').replace('CONTCAR',
'OUTCAR'))
# Try the same but with compressed files
fsc = []
for fn in fnames:
fsc.append(fn + '.gz')
fsc.append(fn + '.bz2')
for f in fsc:
fnames.append(f)
# Finally try anything with POTCAR or OUTCAR in the name
vaspdir = op.dirname(posfname)
fs = glob.glob(vaspdir + '*POTCAR*')
for f in fs:
fnames.append(f)
fs = glob.glob(vaspdir + '*OUTCAR*')
for f in fs:
fnames.append(f)
tried = []
files_in_dir = os.listdir('.')
for fn in fnames:
if fn in files_in_dir:
tried.append(fn)
at = get_atomtypes(fn)
if len(at) == numsyms:
return at
raise IOError('Could not determine chemical symbols. Tried files '
+ str(tried))
def get_atomtypes_from_formula(formula):
"""Return atom types from chemical formula (optionally prepended
with and underscore).
"""
symbols = string2symbols(formula.split('_')[0])
atomtypes = [symbols[0]]
for s in symbols[1:]:
if s != atomtypes[-1]: atomtypes.append(s)
return atomtypes
def read_vasp(filename='CONTCAR'):
"""Import POSCAR/CONTCAR type file.
Reads unitcell, atom positions and constraints from the POSCAR/CONTCAR
file and tries to read atom types from POSCAR/CONTCAR header, if this fails
the atom types are read from OUTCAR or POTCAR file.
"""
if isinstance(filename, str):
f = open(filename)
else: # Assume it's a file-like object
f = filename
# First line should contain the atom symbols , eg. "Ag Ge" in
# the same order
# as later in the file (and POTCAR for the full vasp run)
atomtypes = f.readline().split()
# Sometimes the first line in POSCAR/CONTCAR is of the form
# "CoP3_In-3.pos". Check for this case and extract atom types
if len(atomtypes) == 1 and '_' in atomtypes[0]:
atomtypes = get_atomtypes_from_formula(atomtypes[0])
lattice_constant = float(f.readline().split()[0])
# Now the lattice vectors
a = []
for ii in range(3):
s = f.readline().split()
floatvect = float(s[0]), float(s[1]), float(s[2])
a.append(floatvect)
basis_vectors = np.array(a) * lattice_constant
# Number of atoms. Again this must be in the same order as
# in the first line
# or in the POTCAR or OUTCAR file
atom_symbols = []
numofatoms = f.readline().split()
#vasp5.1 has an additional line which gives the atom types
#the following try statement skips this line
try:
int(numofatoms[0])
except ValueError:
numofatoms = f.readline().split()
# check for comments in numofatoms line and get rid of them if necessary
commentcheck = np.array(['!' in s for s in numofatoms])
if commentcheck.any():
# only keep the elements up to the first including a '!':
numofatoms = numofatoms[:np.arange(len(numofatoms))[commentcheck][0]]
numsyms = len(numofatoms)
if len(atomtypes) < numsyms:
# First line in POSCAR/CONTCAR didn't contain enough symbols.
atomtypes = atomtypes_outpot(f.name, numsyms)
else:
try:
for atype in atomtypes[:numsyms]:
if not atype in chemical_symbols:
raise KeyError
except KeyError:
atomtypes = atomtypes_outpot(f.name, numsyms)
for i, num in enumerate(numofatoms):
numofatoms[i] = int(num)
[atom_symbols.append(atomtypes[i]) for na in range(numofatoms[i])]
# Check if Selective dynamics is switched on
sdyn = f.readline()
selective_dynamics = sdyn[0].lower() == "s"
# Check if atom coordinates are cartesian or direct
if selective_dynamics:
ac_type = f.readline()
else:
ac_type = sdyn
cartesian = ac_type[0].lower() == "c" or ac_type[0].lower() == "k"
tot_natoms = sum(numofatoms)
atoms_pos = np.empty((tot_natoms, 3))
if selective_dynamics:
selective_flags = np.empty((tot_natoms, 3), dtype=bool)
for atom in range(tot_natoms):
ac = f.readline().split()
atoms_pos[atom] = (float(ac[0]), float(ac[1]), float(ac[2]))
if selective_dynamics:
curflag = []
for flag in ac[3:6]:
curflag.append(flag == 'F')
selective_flags[atom] = curflag
# Done with all reading
if type(filename) == str:
f.close()
if cartesian:
atoms_pos *= lattice_constant
atoms = Atoms(symbols = atom_symbols, cell = basis_vectors, pbc = True)
if cartesian:
atoms.set_positions(atoms_pos)
else:
atoms.set_scaled_positions(atoms_pos)
if selective_dynamics:
constraints = []
indices = []
for ind, sflags in enumerate(selective_flags):
if sflags.any() and not sflags.all():
constraints.append(FixScaled(atoms.get_cell(), ind, sflags))
elif sflags.all():
indices.append(ind)
if indices:
constraints.append(FixAtoms(indices))
if constraints:
atoms.set_constraint(constraints)
atoms.format = 'vasp'
return atoms
def read_vasp_out(filename='OUTCAR',index = 'all'):
"""Import OUTCAR type file.
Reads unitcell, atom positions, energies, and forces from the OUTCAR file
and attempts to read constraints (if any) from CONTCAR/POSCAR, if present.
"""
try: # try to read constraints, first from CONTCAR, then from POSCAR
constr = read_vasp('CONTCAR').constraints
except:
try:
constr = read_vasp('POSCAR').constraints
except:
constr = None
if isinstance(filename, str):
f = open(filename)
else: # Assume it's a file-like object
f = filename
data = f.readlines()
natoms = 0
images = []
atoms = Atoms(pbc = True, constraint = constr)
energy = 0
species = []
species_num = []
symbols = []
ecount = 0
poscount = 0
for n,line in enumerate(data):
if 'POTCAR:' in line:
temp = line.split()[2]
for c in ['.','_','1']:
if c in temp:
temp = temp[0:temp.find(c)]
species += [temp]
if 'ions per type' in line:
species = species[:int(len(species)/2)]
temp = line.split()
for ispecies in range(len(species)):
species_num += [int(temp[ispecies+4])]
natoms += species_num[-1]
for iatom in range(species_num[-1]): symbols += [species[ispecies]]
if 'direct lattice vectors' in line:
cell = []
for i in range(3):
temp = data[n+1+i].split()
cell += [[float(temp[0]), float(temp[1]), float(temp[2])]]
if 'energy without entropy' in line:
energy = float(data[n].split()[6])
#energy = float(data[n+2].split()[4])
if ecount < poscount:
# reset energy for LAST set of atoms, not current one - VASP 5.11? and up
images[-1].calc.energy = energy
ecount += 1
if 'POSITION ' in line:
forces = []
atoms_symbols = []
atoms_positions = []
positions = []
for iatom in range(natoms):
temp = data[n+2+iatom].split()
atoms_symbols.append(symbols[iatom])
atoms_positions.append([float(temp[0]),float(temp[1]),float(temp[2])])
forces += [[float(temp[3]),float(temp[4]),float(temp[5])]]
atoms = Atoms('H'*natoms, pbc = True, constraint = constr)
atoms.set_cell(cell)
atoms.set_chemical_symbols(atoms_symbols)
atoms.set_positions(atoms_positions)
atoms.set_calculator(SinglePointCalculator(energy,forces,None,None,atoms))
images += [atoms]
poscount += 1
if 'HIPREC TOTAL-FORCE' in line:
forces = []
for line in data[n+2:n+2+natoms]:
fields = line.split()
force = []
for i in range(3):
force.append(float(fields[i]))
forces.append(force)
images[-1].calc.forces = np.array(forces)
# return requested images, code borrowed from ase/io/trajectory.py
if isinstance(index, int):
return images[index]
elif index == 'all':
return images
else:
step = index.step or 1
if step > 0:
start = index.start or 0
if start < 0:
start += len(images)
stop = index.stop or len(images)
if stop < 0:
stop += len(images)
else:
if index.start is None:
start = len(images) - 1
else:
start = index.start
if start < 0:
start += len(images)
if index.stop is None:
stop = -1
else:
stop = index.stop
if stop < 0:
stop += len(images)
return [images[i] for i in range(start, stop, step)]
def write_vasp(filename, atoms, label='', direct=False, sort=None, symbol_count = None, long_format=True):
"""Method to write VASP position (POSCAR/CONTCAR) files.
Writes label, scalefactor, unitcell, # of various kinds of atoms,
positions in cartesian or scaled coordinates (Direct), and constraints
to | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
import os
import yaml
import numpy as np
from astropy.io import fits
from astropy.time import Time
from astropy.table import Table, QTable
from astropy import units as apu
from astropy.constants import c as light_speed
from .math_functions import rms
from .aperture import e_rs, phase
__all__ = [
'extract_data_pyoof', 'extract_data_effelsberg', 'str2LaTeX',
'store_data_csv', 'uv_ratio', 'store_data_ascii', 'table_pyoof_out'
]
def extract_data_pyoof(pathfits):
"""
Extracts data from the `~pyoof` default FITS file OOF holography
observations, ready to use for the least squares minimization (see
`~pyoof.fit_zpoly`). The FITS file has to have the following keys on its
PrimaryHDU header: ``'FREQ'``, ``'WAVEL'``, ``'MEANEL'``, ``'OBJECT'`` and
``'DATE_OBS'``. Besides this three BinTableHDU are required for the data
itself; ``MINUS OOF``, ``ZERO OOF`` and ``PLUS OOF``. The BinTableHDU
header has to have the ``'DZ'`` key which includes the radial offset,
:math:`d_z`. Finally the BinTableHDU has the data files ``'U'``, ``'V'``
and ``'BEAM'``, which is the :math:`x`- and :math:`y`-axis position in
radians and the ``'BEAM'`` in a flat array, in mJy.
Parameters
----------
pathfits : `str`
Path to the FITS file that contains the three beam maps pre-calibrated,
using the correct PrimaryHDU and the three BinTableHDU (``MINUS OOF``,
``ZERO OOF`` and ``PLUS OOF``).
Returns
-------
data_info : `list`
It contains all extra data besides the beam map. The output
corresponds to a list,
``[name, pthto, obs_object, obs_date, freq, wavel, d_z, meanel]``.
These are, name of the FITS file, paht of the FITS file, observed
object, observation date, frequency, wavelength, radial offset and
mean elevation, respectively.
data_obs : `list`
It contains beam maps and :math:`x`-, and :math:`y`-axis
(:math:`uv`-plane in Fourier space) data for the least squares
minimization (see `~pyoof.fit_zpoly`). The list has the following order
``[beam_data, u_data, v_data]``. ``beam_data`` is the three beam
observations, minus, zero and plus out-of-focus, in a flat array.
``u_data`` and ``v_data`` are the beam axes in a flat array.
Raises
------
`ValueError`
If the input file ``pathfits`` is not a FITS file, or if one or more
of the header keys are missing in the FITS file:
``['FREQ', 'WAVEL', 'MEANEL', 'OBJECT', 'DATE_OBS']``.
"""
if os.path.splitext(pathfits)[1] != '.fits':
raise ValueError('File must be a FITS file.')
hdulist = fits.open(pathfits) # open FITS file, pyoof format
# path or directory where the FITS file is located
pthto = os.path.split(pathfits)[0]
# name of the fit file to fit
name = os.path.split(pathfits)[1][:-5]
if not all(
k in hdulist[0].header
for k in ['FREQ', 'WAVEL', 'MEANEL', 'OBJECT', 'DATE_OBS']
):
raise ValueError('Not all needed keys found in FITS header.')
freq = hdulist[0].header['FREQ'] * apu.Hz
wavel = hdulist[0].header['WAVEL'] * apu.m
meanel = hdulist[0].header['MEANEL'] * apu.deg
obs_object = hdulist[0].header['OBJECT']
obs_date = hdulist[0].header['DATE_OBS']
beam_data = np.array([hdulist[i].data['BEAM'] for i in range(1, 4)])
u_data = np.array([hdulist[i].data['U'] for i in range(1, 4)]) * apu.rad
v_data = np.array([hdulist[i].data['V'] for i in range(1, 4)]) * apu.rad
d_z = np.array([hdulist[i].header['DZ'] for i in range(1, 4)]) * apu.m
data_file = [name, pthto]
data_info = data_file + [obs_object, obs_date, freq, wavel, d_z, meanel]
data_obs = [beam_data, u_data, v_data]
return data_info, data_obs
def extract_data_effelsberg(pathfits):
"""
Extracts data from the Effelsberg OOF holography observations, ready to
use for the least squares minimization. This function will only work for
the Effelsberg telescope beam maps.
Parameters
----------
pathfits : `str`
Path to the FITS file that contains the three beam maps pre-calibrated,
from the Effelsberg telescope.
Returns
-------
data_info : `list`
It contains all extra data besides the beam map. The output
corresponds to a list,
``[name, pthto, obs_object, obs_date, freq, wavel, d_z, meanel]``.
These are, name of the FITS file, paht of the FITS file, observed
object, observation date, frequency, wavelength, radial offset and
mean elevation, respectively.
data_obs : `list`
It contains beam maps and :math:`x`-, and :math:`y`-axis
(:math:`uv`-plane in Fourier space) data for the least squares
minimization (see `~pyoof.fit_zpoly`). The list has the following order
``[beam_data, u_data, v_data]``. ``beam_data`` is the three beam
observations, minus, zero and plus out-of-focus, in a flat array.
``u_data`` and ``v_data`` are the beam axes in a flat array.
Raises
------
`ValueError`
If the input file ``pathfits`` is not a FITS file.
"""
if os.path.splitext(pathfits)[1] != '.fits':
raise ValueError('File must be a FITS file.')
pos = [3, 1, 2] # Positions for OOF holography observations at Effelsberg
hdulist = fits.open(pathfits) # main FITS file OOF holography format
# Observation frequency
freq = hdulist[0].header['FREQ'] * apu.Hz
wavel = light_speed / freq
# Mean elevation
meanel = hdulist[0].header['MEANEL'] * apu.deg
obs_object = hdulist[0].header['OBJECT'] # observed object
obs_date = hdulist[0].header['DATE_OBS'] # observation date
d_z = np.array([hdulist[i].header['DZ'] for i in pos]) * apu.m
beam_data = np.array([hdulist[i].data['fnu'] for i in pos])
u_data = np.array([hdulist[i].data['DX'] for i in pos]) * apu.rad
v_data = np.array([hdulist[i].data['DY'] for i in pos]) * apu.rad
# path or directory where the FITS file is located
pthto = os.path.split(pathfits)[0]
# name of the fit file to fit
name = os.path.split(pathfits)[1][:-5]
data_info = [name, pthto, obs_object, obs_date, freq, wavel, d_z, meanel]
data_obs = [beam_data, u_data, v_data]
return data_info, data_obs
def str2LaTeX(python_string):
"""
Function that solves the underscore problem in a python string to
:math:`\\LaTeX` string.
Parameters
----------
python_string : `str`
String that needs to be changed.
Returns
-------
LaTeX_string : `str`
String with the new underscore symbol.
"""
string_list = list(python_string)
for idx, string in enumerate(string_list):
if string_list[idx] == '_':
string_list[idx] = '\\_'
LaTeX_string = ''.join(string_list)
return LaTeX_string
def store_data_csv(name, name_dir, order, save_to_csv):
"""
Stores all important information in a CSV file after the least squares
minimization has finished, `~pyoof.fit_zpoly`. All data will be stored in
the ``pyoof_out/name-number`` directory, with ``name`` the name of the
FITS file, and ``number`` the `~pyoof.fit_zpoly` number of code execution,
i.e. the output data is never overwritten.
Parameters
----------
name : `str`
File name of the FITS file to be optimized.
name_dir : `str`
Path to store all the CSV files. The files will depend on the order of
the Zernike circle polynomial.
order : `int`
Order used for the Zernike circle polynomial, :math:`n`.
save_to_csv : `list`
It contains all data that will be stored. The list must have the
following order, ``[beam_data, u_data, v_data, res_optim, jac_optim,
grad_optim, phase, cov_ptrue, corr_ptrue]``.
"""
headers = [
'Normalized beam', 'u vector radians', 'v vector radians', 'Residual',
'Jacobian', 'Gradient', 'Phase-error radians',
'Variance-Covariance matrix (first row fitted parameters idx)',
'Correlation matrix (first row fitted parameters idx)'
]
fnames = [
f'beam_data.csv', f'u_data.csv', f'v_data.csv',
f'res_n{order}.csv', f'jac_n{order}.csv',
f'grad_n{order}.csv', f'phase_n{order}.csv',
f'cov_n{order}.csv', f'corr_n{order}.csv'
]
if order != 1:
headers = headers[3:]
fnames = fnames[3:]
save_to_csv = save_to_csv[3:]
for fname, header, file in zip(fnames, headers, save_to_csv):
np.savetxt(
fname=os.path.join(name_dir, fname),
X=file,
header=' '.join((header, name))
)
def store_data_ascii(name, name_dir, order, params_solution, params_init):
"""
Stores in an ``~astropy.table.table.Table`` format the parameters found by
the least squares minimization (see `~pyoof.fit_zpoly`).
Parameters
----------
name : `str`
File name of the FITS file to be optimized.
name_dir : `str`
Path to store all the csv files. The files will depend on the order of
the Zernike circle polynomial (radial) order.
order : `int`
Order used for the Zernike circle polynomial, :math:`n`.
params_solution : `~numpy.ndarray`
Contains the best fitted parameters, the illumination function
coefficients, ``I_coeff`` and the Zernike circle polynomial
coefficients, ``K_coeff`` in one array.
params_init : `~numpt.ndarray`
Contains the initial parameters used in the least squares minimization
to start finding the best fitted combination of them.
"""
n = order
N_K_coeff = (n + 1) * (n + 2) // 2
# Making nice table :)
ln = [(j, i) for i in range(0, n + 1) for j in range(-i, i + 1, 2)]
L = np.array(ln)[:, 0]
N = np.array(ln)[:, 1]
params_names = ['i_amp', 'c_dB', 'q', 'x_0', 'y_0']
for i in range(N_K_coeff):
params_names.append(f'K({N[i]}, {L[i]})')
# To store fit information and found parameters in ascii file
tab = Table(
data=[params_names, params_solution, params_init],
names=['parname', 'parfit', 'parinit'],
)
tab.write(
os.path.join(name_dir, f'fitpar_n{n}.csv'),
overwrite=True
)
def uv_ratio(u, v):
"""
Calculates the aspect ratio for the 3 power pattern plots, plus some
corrections for the text on it. Used in the `function` | |
<gh_stars>1-10
import tensorflow as tf
import numpy as np
import scipy.io.wavfile
import scipy.io
import scoping
BATCH_AXIS = 0 # B
LENGTH_AXIS = 1 # L
DEPTH_AXIS = 2 # D
CHANNELS_AXIS = -1 # C
def sliding_window(inp, frame_length, frame_shift, max_number_frames=None, padding="VALID", name=None):
'''
Runs a sliding window across a signal (of audio data, for example).
Params:
- inp: A signal tensor in BLC format where L is the number of SAMPLES
- frame_length: The length of each frame in number of samples (a python integer)
- frame_shift: How many samples to shift the window (a python integer)
- padding: How to pad the ends, can be "SAME" or "VALID"
Returns:
- A BLDC signal tensor where L is the number of FRAMES and D is frame_length.
'''
assert(len(inp.shape) == 3) # BLC
name = scoping.adapt_name(name, "sliding_window")
with tf.name_scope(name):
expanded = tf.expand_dims(inp, 3)
lengths = [1, 1, 1, 1]
shifts = [1, 1, 1, 1]
lengths[LENGTH_AXIS] = frame_length
shifts[LENGTH_AXIS] = frame_shift
# Window the signal
frames = tf.extract_image_patches(expanded, lengths, shifts, [1, 1, 1, 1], padding)
if max_number_frames != None:
# Clip the signal to only be the first max_number_frames frames
slice_lengths = [-1 if i != LENGTH_AXIS else tf.cast(max_number_frames, tf.int32) for i in range(4)]
frames = tf.slice(frames, [0, 0, 0, 0], tf.stack(slice_lengths))
frames = tf.transpose(frames, [0, 1, 3, 2]) # BLCD --> BLDC
frames = tf.identity(frames, name=name)
return frames
class UnsupportedWindowTypeException(Exception):
pass
def magnitude(complex_spec, name=None):
'''
Get the raw magnitude spectrogram for a complex spectrogram
'''
name = scoping.adapt_name(name, "magnitude")
with tf.name_scope(name):
return tf.abs(complex_spec, name=name)
def energy(complex_spec, name=None):
'''
Get the raw energy spectrogram for a complex spectrogram
'''
name = scoping.adapt_name(name, "energy")
with tf.name_scope(name):
return tf.cast(complex_spec * tf.conj(complex_spec), tf.float64, name=name)
def decibels(signal, name=None):
'''
Get the number of decibels (10 * log10(signal))) for a tensor of raw magnitudes
'''
name = scoping.adapt_name(name, "decibels")
with tf.name_scope(name):
return 10 * tf.maximum(tf.log(signal) / np.log(10), -50, name=name)
def get_Nfft(frame_length):
# Get the next power of 2 above frame_length
return int(np.power(2, np.ceil(np.log(np.float64(frame_length)) / np.log(2))))
def timeseries_to_spec(frames, frame_length, window_type='hamming', N_fft=None, zero_pad=True, name=None):
'''
Converts a timeseries to a spectrogram (preprocessing by removing the DC offset, zero padding,
and applying a window function)
Params:
- frames: A BLDC tensor where L is the number of frames and D is the frame length
- frame_length: python integer frame_length
- window_type: the type of window (the same types supported as get_window)
- N_fft: the number of FFT points to use (defaults to the next higher order of 2 after or equal to frame_length)
- zero_pad: whether to zero_pad the frames to the next highest order of 2 for more efficient FFT
Returns:
N_fft, magnitude spec, energy spec, log magnitude spec (decibels), log energy spec (decibels)
The spec is a BLDC tensor where L is the frame_length and D is the FFT bin count.
FFT bin count is (N_fft or the next highest
order of 2 above the frame_length) // 2 + 1 (to get the Nyquist frequency)
'''
name = scoping.adapt_name(name, "spec")
with tf.name_scope(name):
# The window to convolve the sample with
window = tf.constant(get_window(window_type, frame_length).astype(np.float64), name="window")
frames = tf.multiply(frames, tf.reshape(window, [1 if i != DEPTH_AXIS else -1 for i in range(4)]), name="windowing")
# Padding/clipping to N_fft
if zero_pad:
if N_fft is None:
N_fft = get_Nfft(frame_length)
if N_fft > frame_length:
# Pad the frames to N_fft
padding = [[0, 0] if i != DEPTH_AXIS else [0, N_fft - frame_length] for i in range(4)]
frames = tf.pad(frames, padding, "CONSTANT")
elif N_fft < frame_length:
# Downsample the frames to N_fft
assert(DEPTH_AXIS == 2)
frames = tf.image.resize_images(frames, [tf.shape(frames)[1], N_fft])
## New FFT
#frames = tf.cast(tf.transpose(frames, [0, 1, 3, 2]), tf.float32) # BLDC -> BLCD
#mag_spec = tf.spectral.rfft(frames, fft_length=[N_fft] if N_fft is not None else None)
#mag_spec = tf.cast(tf.transpose(mag_spec, [0, 1, 3, 2]), tf.float64) # BLCD -> BLDC
# FFT
complex_frames = tf.complex(tf.cast(frames, tf.float32), tf.zeros(tf.shape(frames)))
complex_frames = tf.transpose(complex_frames, [0, 1, 3, 2]) # BLDC -> BLCD
spec = tf.fft(complex_frames)
# Clip second half of spec:
complex_spec = tf.slice(spec, tf.stack([0, 0, 0, 0]), tf.stack([-1, -1, -1, N_fft // 2 + 1]), name=name)
complex_spec = tf.transpose(complex_spec, [0, 1, 3, 2]) # BLCD -> BLDC
complex_spec = tf.cast(complex_spec, tf.complex128)
mag_spec = magnitude(complex_spec, name="magnitude_spec")
energy_spec = tf.square(mag_spec, name="energy_spec")
log_mag_spec = decibels(mag_spec, name="log_magnetude_spec")
log_energy_spec = 2 * log_mag_spec
return N_fft, mag_spec, energy_spec, log_mag_spec, log_energy_spec
def apply_filterbank(spec, filter_bank, name=None):
'''
Params:
- spec: BLDC where D is half the number of FFT bins (the lower half of the spec), the magnitude spectrum
- filter_bank: [Half FFT bins, number filters] tensor, the filter bank
Returns:
- BLDC tensor where D is the number of filters, the filter bank features
'''
name = scoping.adapt_name(name, "apply_filterbank")
with tf.name_scope(name):
shape = tf.shape(spec)
assert(DEPTH_AXIS == 2)
spec = tf.transpose(spec, [0, 1, 3, 2]) # BLCD
two_d = tf.reshape(spec, [-1, shape[DEPTH_AXIS]]) # *D
feats = tf.matmul(two_d, filter_bank) # *D'
feats = tf.reshape(feats, [shape[0], shape[1], shape[3], -1]) # BLCD'
feats = tf.transpose(feats, [0, 1, 3, 2]) # BLD'C
return tf.identity(feats, name)
## UTILITY:
def get_window(window_type, N):
'''
Gets a window function to be applied to a timeseries via component-wise multiplication.
Returns a numpy array which can be saved in the graph as a tf.constant.
Params:
- window_type: the type of window ('none', 'hamming', and 'hanning' supported)
- N: the number of frames in the output
- N_t: the number of frames in the input (only the first N of which will be used)
Returns:
- The window function of size N as a 1D np array
Derived from a script by <NAME>
'''
result = np.zeros((N,))
omega = np.linspace(0.0, 2.0 * np.pi, N)
if window_type == 'none' or window_type is None:
result = np.ones((N,))
elif window_type == 'hamming':
result = 0.54 - 0.46 * np.cos(omega)
elif window_type == 'hanning':
result = 0.5 - 0.5 * np.cos(omega)
else:
raise UnsupportedWindowTypeException()
return result
def freq_to_mel(freq):
'''
Convert a frequency in Hz to a mel index
'''
return 1125.0 * np.log(1.0 + freq / 700.0)
def mel_to_freq(mel):
'''
Convert the mel index to a frequency in Hz
'''
return 700.0 * (np.exp(mel / 1125.0) - 1.0)
def mel_filterbank(N_fft, sample_rate, num_bands=23, low_freq=54, high_freq=None):
'''
Get the mel filterbank with the given dimensions as a numpy array.
This can then be wrapped as a tf constant for use in tensorflow.
'''
if high_freq is None:
high_freq = sample_rate / 2 - 1
# Get the high frequency/low frequency for the bank
high_freq = float(min(high_freq, sample_rate / 2))
low_freq = float(max(0, low_freq))
# Convert the extremes to mel
high_mel = freq_to_mel(high_freq)
low_mel = freq_to_mel(low_freq)
# Get the bins for the filterbank by linearly spacing in mel space
mels = np.linspace(low_mel, high_mel, num_bands + 2)
# Convert the bins back to frequencies
freqs = mel_to_freq(mels)
# Convert the frequencies to FFT bins
bins = np.ceil((freqs / float(sample_rate / 2) * (N_fft // 2))).astype(np.int32)
# Make the triangular filters
fbank = np.zeros((N_fft // 2 + 1, num_bands))
for band in range(num_bands):
fbank[bins[band]:bins[band+1]+1,band] = np.linspace(0.0, 1.0, bins[band + 1] - bins[band] + 2)[1:]
fbank[bins[band+1]:bins[band+2]+1,band] = np.linspace(1.0, 0.0, bins[band + 2] - bins[band + 1] + 2)[:-1]
return fbank.astype(np.float64)
def constantify(x, name="constant"):
'''
Create a tf constant that is initialized with x and the given name,
returns x and the new constant
'''
return x, tf.constant(x, name=name)
def variableify(x, name="variable"):
'''
Create a tf variable that is initialized with x and the given name,
returns x and the new variable
'''
return x, tf.Variable(x, name=name, trainable=False)
def dct_matrix(filterbank_size=23, mfcc_size=13):
'''
Get a DCT matrix for converting MFSCs -> MFCCs
'''
dct = np.zeros((mfcc_size, filterbank_size))
for i in range(mfcc_size):
for j in range(filterbank_size):
dct[i, j] = np.cos(np.pi * np.float64(i) / np.float64(filterbank_size) * (np.float64(j) + 0.5))
return dct.T
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def remove_dc(signal, signal_lengths, signal_mask, last_sof=None, last_sin=None, online=True, name=None):
name = scoping.adapt_name(name, "remove_dc")
with tf.variable_scope(name):
if online:
batch_size = tf.shape(signal)[0]
length = tf.shape(signal)[1]
channels | |
import datetime
import enum
import pickle
import sys
from decimal import Decimal
from typing import Optional
python_ver_atleast_than_37 = sys.version_info[0:2] > (3, 6)
if python_ver_atleast_than_37:
from dataclasses import dataclass
import pytest
from pytest import raises
from typedpy import (
ImmutableStructure,
NoneField,
SerializableField,
Structure,
Array,
Number,
String,
Integer,
StructureReference,
AllOf,
deserialize_structure,
Enum,
Float,
mappers,
serialize,
Set,
AnyOf,
DateField,
Anything,
Map,
Function,
PositiveInt,
DecimalNumber,
)
from typedpy.extfields import DateTime
from typedpy import serialize_field
from typedpy.serialization import FunctionCall, HasTypes
from typedpy.serialization_wrappers import Serializer, Deserializer
class SimpleStruct(Structure):
name = String(pattern="[A-Za-z]+$", maxLength=8)
class Point:
def __init__(self, x, y):
self._x = x
self._y = y
class Example(Structure):
i = Integer(maximum=10)
s = String(maxLength=5)
array = Array[Integer(multiplesOf=5), Number]
embedded = StructureReference(a1=Integer(), a2=Float())
simple_struct = SimpleStruct
all = AllOf[Number, Integer]
enum = Enum(values=[1, 2, 3])
points = Array[Point]
_optional = ["points"]
@pytest.fixture()
def serialized_source():
return {
"i": 5,
"s": "test",
"array": [10, 7],
"embedded": {"a1": 8, "a2": 0.5},
"simple_struct": {"name": "danny"},
"all": 5,
"enum": 3,
}
@pytest.fixture()
def example(serialized_source):
return deserialize_structure(Example, serialized_source)
def test_successful_deserialization_with_many_types(serialized_source, example):
example = deserialize_structure(Example, serialized_source)
result = serialize(example)
assert result == serialized_source
def test_deserialization_with_non_typedpy_wrapper_can_be_inconsistent(
serialized_source, example
):
serialized_source["points"] = [{"x": 1, "y": 2}]
example = deserialize_structure(Example, serialized_source)
result = serialize(example)
assert result["points"][0] != serialized_source["points"][0]
def test_some_empty_fields():
class Foo(Structure):
a = Integer
b = String
_required = []
foo = Foo(a=5)
assert serialize(foo) == {"a": 5}
def test_null_fields():
class Foo(Structure):
a = Integer
b = String
_required = []
foo = Foo(a=5, c=None)
assert serialize(foo) == {"a": 5}
def test_serialize_set():
class Foo(Structure):
a = Set()
foo = Foo(a={1, 2, 3})
assert serialize(foo) == {"a": [1, 2, 3]}
def test_string_field_wrapper_compact():
class Foo(Structure):
st = String
_additionalProperties = False
foo = Foo(st="abcde")
assert serialize(foo, compact=True) == "abcde"
def test_string_field_wrapper_not_compact():
class Foo(Structure):
st = String
_additionalProperties = False
foo = Foo(st="abcde")
assert serialize(foo, compact=False) == {"st": "abcde"}
def test_set_field_wrapper_compact():
class Foo(Structure):
s = Array[AnyOf[String, Number]]
_additionalProperties = False
foo = Foo(s=["abcde", 234])
assert serialize(foo, compact=True) == ["abcde", 234]
def test_serializable_serialize_and_deserialize():
from datetime import date
class Foo(Structure):
d = Array[DateField(date_format="%y%m%d")]
i = Integer
foo = Foo(d=[date(2019, 12, 4), "191205"], i=3)
serialized = serialize(foo)
assert serialized == {"d": ["191204", "191205"], "i": 3}
deserialized = deserialize_structure(Foo, serialized)
assert deserialized == Foo(i=3, d=[date(2019, 12, 4), date(2019, 12, 5)])
def test_serialize_map_without_any_type_definition():
class Bar(Structure):
m = Map()
a = Integer
original = Bar(a=3, m={"abc": Bar(a=2, m={"x": "xx"}), "bcd": 2})
serialized = serialize(original)
pickled = pickle.dumps(serialized)
assert type(serialized["m"]) == dict
assert type(serialized["m"]["abc"]) == dict
assert type(serialized["m"]["abc"]["m"]) == dict
def test_pickle_with_map_without_any_type_definition():
class Bar(Structure):
m = Map()
a = Integer
original = Bar(a=3, m={"abc": Bar(a=2, m={"x": "xx"}), "bcd": 2})
serialized = serialize(original)
unpickeled = pickle.loads(pickle.dumps(serialized))
deserialized = Deserializer(target_class=Bar).deserialize(unpickeled)
# there is no info on the fact that deserialized.m['abc'] should be converted to a Bar instance, so
# we convert it to a simple dict, to make it straight forward to compare
original.m["abc"] = Serializer(original.m["abc"]).serialize()
assert deserialized == original
def test_serializable_serialize_and_deserialize2():
from datetime import datetime
class Foo(Structure):
d = Array[DateTime]
i = Integer
atime = datetime(2020, 1, 30, 5, 35, 35)
atime_as_string = atime.strftime("%m/%d/%y %H:%M:%S")
foo = Foo(d=[atime, "01/30/20 05:35:35"], i=3)
serialized = serialize(foo)
assert serialized == {"d": [atime_as_string, "01/30/20 05:35:35"], "i": 3}
deserialized = deserialize_structure(Foo, serialized)
assert str(deserialized) == str(
Foo(i=3, d=[atime, datetime(2020, 1, 30, 5, 35, 35)])
)
def test_serializable_serialize_and_deserialize_of_a_non_serializable_value():
from datetime import datetime
class Foo(Structure):
d = DateTime
i = Integer
atime = datetime(2020, 1, 30, 5, 35, 35)
foo = Foo(d=atime, i=3, x=atime)
with raises(ValueError) as excinfo:
serialize(foo)
# this is to cater to Python 3.6
assert "x: cannot serialize value" in str(excinfo.value)
assert "not JSON serializable" in str(excinfo.value)
def test_serialize_map():
class Foo(Structure):
m1 = Map[String, Anything]
m2 = Map
i = Integer
foo = Foo(m1={"a": [1, 2, 3], "b": 1}, m2={1: 2, "x": "b"}, i=5)
serialized = serialize(foo)
assert serialized["m1"] == {"a": [1, 2, 3], "b": 1}
def test_serialize_field_basic_field(serialized_source, example):
assert serialize_field(Example.array, example.array) == serialized_source["array"]
def test_serialize_wrong_value():
with raises(TypeError) as excinfo:
serialize({"abc": 123})
assert (
"serialize: Not a Structure or Field that with an obvious serialization."
" Got: {'abc': 123}. Maybe try serialize_field() instead?" in str(excinfo.value)
)
def test_serialize_with_structured_reference(example, serialized_source):
assert serialize(example.embedded) == serialized_source["embedded"]
def test_serialize_with_array(example, serialized_source):
assert serialize(example.array) == serialized_source["array"]
def test_serialize_with_class_reference(example, serialized_source):
assert serialize(example.simple_struct) == serialized_source["simple_struct"]
def test_serialize_with_map():
class Foo(Structure):
m = Map[String, Anything]
original = {"a": [1, 2, 3], "b": 1}
foo = Foo(m=original)
assert serialize(foo.m) == original
def test_serialize_with_anything_field():
class Foo(Structure):
m = Map[String, Anything]
original = {"a": [1, 2, 3], "b": 1}
foo = Foo(m=original)
assert serialize(foo.m) == original
def test_serialize_with_number(example, serialized_source):
assert serialize(example.i) == serialized_source["i"]
def test_serialize_field_complex_field():
class Foo(Structure):
a = String
i = Integer
class Bar(Structure):
x = Float
foos = Array[Foo]
bar = Bar(x=0.5, foos=[Foo(a="a", i=5), Foo(a="b", i=1)])
assert serialize_field(Bar.foos, bar.foos)[0]["a"] == "a"
assert serialize_field(Array[Foo], bar.foos)[0]["a"] == "a"
assert serialize(bar.foos)[0]["a"] == "a"
def test_serialize_non_typedpy_attribute():
class Foo(Structure):
a = String
i = Integer
foo = Foo(a="a", i=1)
foo.x = {"x": 1, "s": "abc"}
assert serialize(foo)["x"] == {"x": 1, "s": "abc"}
def test_serialize_with_mapper_to_different_keys():
class Foo(Structure):
a = String
i = Integer
foo = Foo(a="string", i=1)
mapper = {"a": "aaa", "i": "iii"}
assert serialize(foo, mapper=mapper) == {"aaa": "string", "iii": 1}
def test_serialize_with_mapper_to_different_keys_in_array():
class Foo(Structure):
a = String
i = Integer
class Bar(Structure):
wrapped = Array[Foo]
bar = Bar(wrapped=[Foo(a="string1", i=1), Foo(a="string2", i=2)])
mapper = {"wrapped._mapper": {"a": "aaa", "i": "iii"}, "wrapped": "other"}
serialized = serialize(bar, mapper=mapper)
assert serialized == {
"other": [{"aaa": "string1", "iii": 1}, {"aaa": "string2", "iii": 2}]
}
def test_serialize_with_deep_mapper():
class Foo(Structure):
a = String
i = Integer
class Bar(Structure):
foo = Foo
array = Array
class Example(Structure):
bar = Bar
number = Integer
example = Example(number=1, bar=Bar(foo=Foo(a="string", i=5), array=[1, 2]))
mapper = {"bar._mapper": {"foo._mapper": {"i": FunctionCall(func=lambda x: x * 2)}}}
serialized = serialize(example, mapper=mapper)
assert serialized == {
"number": 1,
"bar": {"foo": {"a": "string", "i": 10}, "array": [1, 2]},
}
def test_serialize_with_deep_mapper_camel_case():
class Foo(Structure):
a = String
i_num = Integer
c_d = Integer
class Bar(Structure):
foo_bar = Foo
array_one = Array
class Example(Structure):
bar = Bar
number = Integer
example = Example(
number=1, bar=Bar(foo_bar=Foo(a="string", i_num=5, c_d=2), array_one=[1, 2])
)
mapper = {
"bar._mapper": {
"foo_bar._mapper": {
"c_d": "cccc",
"i_num": FunctionCall(func=lambda x: x * 2),
}
}
}
serialized = serialize(example, mapper=mapper, camel_case_convert=True)
assert serialized == {
"number": 1,
"bar": {"fooBar": {"a": "string", "iNum": 10, "cccc": 2}, "arrayOne": [1, 2]},
}
def test_serialize_with_camel_case_setting():
class Bar(Structure):
bar_bar = String
_serialization_mapper = mappers.TO_LOWERCASE
class Foo(Structure):
a = String
i_num = Integer
cba_def_xyz = Integer
bar = Bar
_serialization_mapper = mappers.TO_CAMELCASE
foo = Foo(i_num=5, a="xyz", cba_def_xyz=4, bar=Bar(bar_bar="abc"))
assert Serializer(foo).serialize() == {
"a": "xyz",
"iNum": 5,
"cbaDefXyz": 4,
"bar": {"BARBar": "abc"},
}
def test_serialize_with_deep_mapper_camel_case_setting():
class Foo(Structure):
a = String
i_num = Integer
c_d = Integer
class Bar(Structure):
foo_bar = Foo
array_one = Array
class Example(Structure):
bar = Bar
number = Integer
_serialization_mapper = mappers.TO_CAMELCASE
example = Example(
number=1, bar=Bar(foo_bar=Foo(a="string", i_num=5, c_d=2), array_one=[1, 2])
)
mapper = {
"bar._mapper": {
"foo_bar._mapper": {
"c_d": "cccc",
"i_num": FunctionCall(func=lambda x: x * 2),
}
}
}
serialized = serialize(example, mapper=mapper)
assert serialized == {
"number": 1,
"bar": {
"array_one": [1, 2],
"foo_bar": {"a": "string", "cccc": 2, "i_num": 10},
},
}
serialized = serialize(example, mapper=mapper, camel_case_convert=True)
assert serialized == {
"bar": {"arrayOne": [1, 2], "fooBar": {"a": "string", "cccc": 2, "iNum": 10}},
"number": 1,
}
def test_serialize_with_mapper_with_functions():
def my_func():
pass
class Foo(Structure):
function = Function
i = Integer
foo = Foo(function=my_func, i=1)
mapper = {
"function": FunctionCall(func=lambda f: f.__name__),
"i": FunctionCall(func=lambda x: x + 5),
}
assert serialize(foo, mapper=mapper) == {"function": "my_func", "i": 6}
def test_serialize_with_mapper_with_functions_null():
class Foo(Structure):
function: str
i: int
_serialization_mapper = {
"function": FunctionCall(func=lambda f: f"--{f}--" if f else "unknown"),
"i": FunctionCall(
func=lambda x: x + 5 if x is not None else 999, args=["i"]
),
}
foo = Deserializer(Foo).deserialize({"i": None, "function": None})
assert foo == Foo(function="unknown", i=999)
def test_serialize_with_mapper_with_function_converting_types():
class Foo(Structure):
num = Float
i = Integer
foo = Foo(num=5.5, i=999)
mapper = {
"num": FunctionCall(func=lambda f: [int(f)]),
"i": FunctionCall(func=lambda x: str(x)),
}
assert serialize(foo, mapper=mapper) == {"num": [5], "i": "999"}
def test_serialize_with_mapper_with_function_with_args():
class Foo(Structure):
f = Float
i = Integer
foo = Foo(f=5.5, i=999)
mapper = {
"f": FunctionCall(func=lambda f: [int(f)], args=["i"]),
"i": FunctionCall(func=lambda x: str(x), args=["f"]),
}
assert serialize(foo, mapper=mapper) == {"f": [999], "i": "5.5"}
def test_serialize_invalid_mapper_type():
class Foo(Structure):
i = | |
__getitem__(self, key):
try:
t = self.types[key]
except(KeyError) as err:
raise TypeNotFoundError(key)
return t
class Token:
def __init__(self, string, lex_entry, start, end, from_char, to_char, span=None):
self.string = string
self.lex_entry = lex_entry
self.from_char = from_char
self.to_char = to_char
self.start = start
self.end = end
self.span = span
class Tree:
def __init__(self, label, start, end, span=None):
self.label = label
self.start = start
self.end = end
self.span = span
self.children = []
def process(self, lex_lookup=None):
"""
Initial processing of tree, extracting tokens and all nodes,
adjusting the label of penultimate nodes to be lextypes if a
lex_lookup function is specified.
"""
nodes = []
tokens = []
stack = [self]
self.depth = 0
self.parent = None
while len(stack) > 0:
node = stack.pop()
child1 = node.children[0]
nodes.append(node)
child_depth = node.depth + 1
if type(child1) is Token:
child1.depth = child_depth
child1.parent = node
tokens.append(child1)
if lex_lookup is not None:
node.label = lex_lookup(child1.lex_entry)
else:
for n in node.children:
n.parent = node
n.depth = child_depth
stack.extend(node.children)
tokens.reverse()
return tokens, nodes
def ptb(self):
"""Returns a psuedo Penn Treebank style tree of the derivation.
'Pseudo' because currently the only PTB normalization done is
for round parentheses."""
return self._ptb(self)
def _ptb(self, subtree):
if type(subtree) is Token:
val = subtree.string.replace('(', '-LRB-')
val = val.replace(')', '-RRB-')
return '{} '.format(val)
else:
children = ('{}'.format(self._ptb(x)) for x in subtree.children)
return '({} {})'.format(subtree.label.upper(), ' '.join(children))
def tokens(self):
"""Get the tokens of this tree."""
tokens = []
stack = [self]
while len(stack) > 0:
node = stack.pop()
child1 = node.children[0]
if type(child1) is Token:
tokens.append(child1)
else:
stack.extend(node.children)
tokens.reverse()
return tokens
def pprint(self, **kwargs):
"""Returns a representation of the tree compatible with the LaTeX
qtree package. Requires the nltk module. See
http://www.nltk.org/_modules/nltk/tree.html."""
from nltk import Tree as NLTKTree
tree = NLTKTree.fromstring(self.ptb())
return tree.pprint(**kwargs)
def latex(self):
"""Returns a representation of the tree compatible with the
LaTeX qtree package. Requires the nltk module. See
http://www.nltk.org/_modules/nltk/tree.html."""
from nltk import Tree as NLTKTree
string = self.ptb().replace('[', '\[').replace(']', '\]')
tree = NLTKTree.fromstring(string)
latex = tree.pformat_latex_qtree()
return latex.replace('-LRB-', '(').replace('-RRB-', ')')
def draw(self):
from nltk import Tree as NLTKTree
NLTKTree.fromstring(self.ptb()).draw()
@property
def input(self):
return ' '.join(t.string for t in self.tokens())
@property
def derivation(self):
return self._derivation(self)
def _derivation(self, subtree):
if type(subtree) is Token:
return '({})'.format(subtree.span)
else:
children = ('{}'.format(self._derivation(x)) for x in subtree.children)
return '({} {})'.format(subtree.span, ' '.join(children))
def parse_derivation(derivation, cache=False):
"""Parse a DELPH-IN derivation string, returning a Tree object.
If cache is true, the Tree instances will each store the
relevant span of the derivation string in the attribute 'span'."""
escape_str = '__ESC__'
der_string = derivation.replace('\\"', escape_str)
node_re = r'("[^"]+"|[^()"]+)+'
span_re = re.compile('\({}|\)'.format(node_re))
# Walk through each span, updating a stack of trees.
# Where a span is either lparen + node or rparen.
# The stack is a list of Trees and Tokens
stack = [Tree(None, None, None)]
for match in span_re.finditer(der_string):
span = match.group()
# Leaf node
if span[:2] == '("':
if len(stack) == 1:
parse_error(der_string, match, '(')
chars = span.split('"', 2)[1]
chunks = span.split()
try:
from_char = int(chunks[chunks.index('+FROM')+1].replace(escape_str, ''))
except ValueError:
# re-entrency thing found in ERG 1214 gold trees
from_char = int(chunks[chunks.index('+FROM')+1].replace(escape_str, '').split('=')[-1])
# for multi word tokens, we need to get the *last* +TO value
list_rindex = lambda x: len(chunks) - chunks[-1::-1].index(x) - 1
to_char = int(chunks[list_rindex('+TO')+1].replace(escape_str, ''))
lex = stack[-1]
if cache:
cachespan = span[1:].strip().replace(escape_str, '\\"')
else:
cachespan = None
token = Token(chars, lex.label, lex.start, lex.end, from_char,
to_char, span=cachespan)
stack.append(token)
# Beginning of a tree/subtree
elif span[0] == '(':
if len(stack) == 1 and len(stack[0].children) > 0:
parse_error(der_string, match, 'end-of-string')
atts = span[1:].strip().split()
if len(atts) > 1:
label = atts[1]
start = int(atts[3])
end = int(atts[4])
if cache:
cachespan = span[1:].strip().replace(escape_str, '\\"')
else:
cachespan = None
node = Tree(label, start, end, span=cachespan)
elif len(atts) == 1:
# initial root condition node, not found in ACE
# derivations set start and end to -1 to make this
# detectable.
label = atts[0]
node = Tree(label, -1, -1, span=label)
else:
parse_error(der_string, match, 'empty-node')
stack.append(node)
# End of a subtree
elif span == ')':
if len(stack) == 1:
if len(stack[0].children) == 0:
parse_error(der_string, match, '(')
else:
parse_error(der_string, match, 'end-of-string')
node = stack.pop()
stack[-1].children.append(node)
# check that we got exactly one complete tree.
if len(stack) > 1:
parse_error(der_string, 'end-of-string', ')')
elif len(stack[0].children) == 0:
parse_error(der_string, 'end-of-string', '(')
else:
assert stack[0].label is None
assert len(stack[0].children) == 1
return stack[0].children[0]
def parse_error(string, match, expecting):
"""Construct a basic error message."""
if match == 'end-of-string':
pos, span = len(string), 'end-of-string'
else:
pos, span = match.start(), match.group()
msg = 'Parsing error: expected %r but got %r\n%sat index %d.' % (
expecting, span, ' '*12, pos)
# Add a display showing the error span itself:
s = string.replace('\n', ' ').replace('\t', ' ')
offset = pos
if len(s) > pos+10:
s = s[:pos+10]+'...'
if pos > 10:
s = '...'+s[pos-10:]
offset = 13
msg += '\n%s"%s"\n%s^' % (' '*16, s, ' '*(17+offset))
raise DerivationError(msg)
def ace_parse(input_str, ace_path, grammar, count, yy_input=False,
fragments=False, tnt=False, short_labels=False):
env = dict(os.environ)
#env['LC_ALL'] = 'en_US.UTF-8'
#env['LANG'] = 'en_US.UTF-8'
args = [ace_path, '-g', grammar.dat_path]
alias = grammar.alias
if short_labels:
args.append('--report-labels')
if tnt:
# If we have a logon installation, set PATH and model path
# to use this. Otherwise use the tnt tagger packaged with grammalytics
if os.path.exists(LOGONBIN):
env['PATH'] = "{}:{}".format(os.environ['PATH'], LOGONBIN)
model_path = os.path.join(LOGONROOT, 'coli', 'tnt', 'models', 'wsj.tnt')
else:
thisdir = os.path.dirname(os.path.realpath(__file__))
taggerpath = os.path.join(thisdir, '..', 'tagger')
taggerbin = os.path.join(taggerpath, 'bin')
env['PATH'] = "{}:{}".format(os.environ['PATH'], taggerbin)
model_path = os.path.join(taggerpath, 'coli', 'tnt', 'models', 'wsj.tnt')
args.append('--tnt-model')
args.append(model_path)
if count is not None:
args.append('-n')
args.append(str(count))
if yy_input:
args.append('-y')
if not fragments:
if alias in ('erg1212', 'erg1212-speech'):
args.append('-r')
args.append('root_strict root_informal')
elif alias.startswith('erg') or alias.startswith('terg'):
args.append('-r')
args.append('root_strict root_informal root_bridge')
process = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=env)
out, err = process.communicate(input=input_str.encode('utf8'))
out = out.decode('utf8')
err = err.decode('utf8')
if process.returncode != 0 or out.startswith('SKIP'):
ace_error_str = ''.join([out, err])
raise AceError('ACE', ace_error_str, input=input_str)
return out, err
def load_hierarchy(xmlfile_path, save_pickle=False):
"""Load the pickled version of the hierarchy. If there is none,
load the hierarchy and also save a pickle of it if save_pickle is
True."""
root = os.path.splitext(xmlfile_path)[0]
try:
with open(root+'.pickle', 'rb') as f:
hierarchy = pickle.load(f)
except(IOError) as e:
hierarchy = TypeHierarchy(xmlfile_path)
if save_pickle:
sys.setrecursionlimit(10000)
pickle.dump(hierarchy, open(root+'.pickle', 'wb'))
return hierarchy
def lookup_hierarchy(arg):
cwd = os.getcwd()
hierarchy = TypeHierarchy(os.path.join(cwd, arg.hierarchy))
with open(os.path.join(cwd, arg.types)) as f:
candidates = [l.strip() for l in f.read().splitlines()]
candidates = [l for l in candidates if l != '']
if arg.query == "supers":
found = hierarchy.get_supers(candidates)
elif arg.query == "children":
found = hierarchy.get_children(candidates)
print("\n".join(t.name for t in found))
def get_supers(types, hierarchy):
"""Given a list of types, return a set containing every ancestors to
all the input types. GLBs are resolved using the function
resolve_glbs."""
supers = []
for t in types:
if t.startswith('"'):
# don't bother looking up strings
continue
try:
t = hierarchy[t.lstrip('^')]
except(TypeNotFoundError) as e:
msg = "Did not find '{}' in the type hierarchy'\n".format(t)
sys.stderr.write(msg)
else:
# this needs to be outside of the try so errors
# thrown by ancestors are not caught
for s in t.ancestors():
supers.append(s.name)
return set(resolve_glbs(supers, hierarchy))
def resolve_glbs(types, hierarchy):
"""
Given a list of types, creates and returns a set of types
with the GLB types removed and replaced (ie added to the set)
by their immediate non-GLB ancestors.
"""
new_types = []
glbs = []
for t in types:
if t.startswith('glb'):
glbs.append(t)
else:
new_types.append(t)
glbs = set(glbs)
while len(glbs) > 0:
g = glbs.pop()
parents = (x.name for x in hierarchy[g].parents)
for t in parents:
if t.startswith('glb'):
glbs.add(t)
else:
new_types.append(t)
return set(new_types)
def tsdb_query(query, profile):
"""Perform a query using the tsdb commandline program. The query
| |
+ self.fbe_size) > self._buffer.size:
return 0
fbe_struct_offset = self.read_uint32(self.fbe_offset)
if (fbe_struct_offset == 0) or ((self._buffer.offset + fbe_struct_offset + 4) > self._buffer.size):
return 0
self._buffer.shift(fbe_struct_offset)
fbe_result = self.fbe_body \
+ self.id.fbe_extra \
+ self.name.fbe_extra \
+ self.state.fbe_extra \
+ self.wallet.fbe_extra \
+ self.asset.fbe_extra \
+ self.orders.fbe_extra \
self._buffer.unshift(fbe_struct_offset)
return fbe_result
# Get the field type
@property
def fbe_type(self):
return self.TYPE
TYPE = 3
# Check if the struct value is valid
def verify(self, fbe_verify_type=True):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return True
fbe_struct_offset = self.read_uint32(self.fbe_offset)
if (fbe_struct_offset == 0) or ((self._buffer.offset + fbe_struct_offset + 4 + 4) > self._buffer.size):
return False
fbe_struct_size = self.read_uint32(fbe_struct_offset)
if fbe_struct_size < (4 + 4):
return False
fbe_struct_type = self.read_uint32(fbe_struct_offset + 4)
if fbe_verify_type and (fbe_struct_type != self.fbe_type):
return False
self._buffer.shift(fbe_struct_offset)
fbe_result = self.verify_fields(fbe_struct_size)
self._buffer.unshift(fbe_struct_offset)
return fbe_result
# Check if the struct fields are valid
def verify_fields(self, fbe_struct_size):
fbe_current_size = 4 + 4
if (fbe_current_size + self.id.fbe_size) > fbe_struct_size:
return True
if not self.id.verify():
return False
fbe_current_size += self.id.fbe_size
if (fbe_current_size + self.name.fbe_size) > fbe_struct_size:
return True
if not self.name.verify():
return False
fbe_current_size += self.name.fbe_size
if (fbe_current_size + self.state.fbe_size) > fbe_struct_size:
return True
if not self.state.verify():
return False
fbe_current_size += self.state.fbe_size
if (fbe_current_size + self.wallet.fbe_size) > fbe_struct_size:
return True
if not self.wallet.verify():
return False
fbe_current_size += self.wallet.fbe_size
if (fbe_current_size + self.asset.fbe_size) > fbe_struct_size:
return True
if not self.asset.verify():
return False
fbe_current_size += self.asset.fbe_size
if (fbe_current_size + self.orders.fbe_size) > fbe_struct_size:
return True
if not self.orders.verify():
return False
fbe_current_size += self.orders.fbe_size
return True
# Get the struct value (begin phase)
def get_begin(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
fbe_struct_offset = self.read_uint32(self.fbe_offset)
assert (fbe_struct_offset > 0) and ((self._buffer.offset + fbe_struct_offset + 4 + 4) <= self._buffer.size), "Model is broken!"
if (fbe_struct_offset == 0) or ((self._buffer.offset + fbe_struct_offset + 4 + 4) > self._buffer.size):
return 0
fbe_struct_size = self.read_uint32(fbe_struct_offset)
assert (fbe_struct_size >= (4 + 4)), "Model is broken!"
if fbe_struct_size < (4 + 4):
return 0
self._buffer.shift(fbe_struct_offset)
return fbe_struct_offset
# Get the struct value (end phase)
def get_end(self, fbe_begin):
self._buffer.unshift(fbe_begin)
# Get the struct value
def get(self, fbe_value=None):
if fbe_value is None:
fbe_value = Account()
fbe_begin = self.get_begin()
if fbe_begin == 0:
return fbe_value
fbe_struct_size = self.read_uint32(0)
self.get_fields(fbe_value, fbe_struct_size)
self.get_end(fbe_begin)
return fbe_value
# Get the struct fields values
def get_fields(self, fbe_value, fbe_struct_size):
fbe_current_size = 4 + 4
if (fbe_current_size + self.id.fbe_size) <= fbe_struct_size:
fbe_value.id = self.id.get()
else:
fbe_value.id = 0
fbe_current_size += self.id.fbe_size
if (fbe_current_size + self.name.fbe_size) <= fbe_struct_size:
fbe_value.name = self.name.get()
else:
fbe_value.name = ""
fbe_current_size += self.name.fbe_size
if (fbe_current_size + self.state.fbe_size) <= fbe_struct_size:
fbe_value.state = self.state.get(StateEx.initialized | StateEx.bad | StateEx.sad)
else:
fbe_value.state = StateEx.initialized | StateEx.bad | StateEx.sad
fbe_current_size += self.state.fbe_size
if (fbe_current_size + self.wallet.fbe_size) <= fbe_struct_size:
fbe_value.wallet = self.wallet.get()
else:
fbe_value.wallet = Balance()
fbe_current_size += self.wallet.fbe_size
if (fbe_current_size + self.asset.fbe_size) <= fbe_struct_size:
fbe_value.asset = self.asset.get()
else:
fbe_value.asset = None
fbe_current_size += self.asset.fbe_size
if (fbe_current_size + self.orders.fbe_size) <= fbe_struct_size:
self.orders.get(fbe_value.orders)
else:
fbe_value.orders.clear()
fbe_current_size += self.orders.fbe_size
# Set the struct value (begin phase)
def set_begin(self):
assert (self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size, "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
fbe_struct_size = self.fbe_body
fbe_struct_offset = self._buffer.allocate(fbe_struct_size) - self._buffer.offset
assert (fbe_struct_offset > 0) and ((self._buffer.offset + fbe_struct_offset + fbe_struct_size) <= self._buffer.size), "Model is broken!"
if (fbe_struct_offset <= 0) or ((self._buffer.offset + fbe_struct_offset + fbe_struct_size) > self._buffer.size):
return 0
self.write_uint32(self.fbe_offset, fbe_struct_offset)
self.write_uint32(fbe_struct_offset, fbe_struct_size)
self.write_uint32(fbe_struct_offset + 4, self.fbe_type)
self._buffer.shift(fbe_struct_offset)
return fbe_struct_offset
# Set the struct value (end phase)
def set_end(self, fbe_begin):
self._buffer.unshift(fbe_begin)
# Set the struct value
def set(self, fbe_value):
fbe_begin = self.set_begin()
if fbe_begin == 0:
return
self.set_fields(fbe_value)
self.set_end(fbe_begin)
# Set the struct fields values
def set_fields(self, fbe_value):
self.id.set(fbe_value.id)
self.name.set(fbe_value.name)
self.state.set(fbe_value.state)
self.wallet.set(fbe_value.wallet)
self.asset.set(fbe_value.asset)
self.orders.set(fbe_value.orders)
# Fast Binary Encoding Account model
class AccountModel(fbe.Model):
__slots__ = "_model",
def __init__(self, buffer=None):
super().__init__(buffer)
self._model = FieldModelAccount(self.buffer, 4)
@property
def model(self):
return self._model
# Get the model size
def fbe_size(self):
return self._model.fbe_size + self._model.fbe_extra
# Get the model type
def fbe_type(self):
return self.TYPE
TYPE = FieldModelAccount.TYPE
# Check if the struct value is valid
def verify(self):
if (self.buffer.offset + self._model.fbe_offset - 4) > self.buffer.size:
return False
fbe_full_size = self.read_uint32(self._model.fbe_offset - 4)
if fbe_full_size < self._model.fbe_size:
return False
return self._model.verify()
# Create a new model (begin phase)
def create_begin(self):
fbe_begin = self.buffer.allocate(4 + self._model.fbe_size)
return fbe_begin
# Create a new model (end phase)
def create_end(self, fbe_begin):
fbe_end = self.buffer.size
fbe_full_size = fbe_end - fbe_begin
self.write_uint32(self._model.fbe_offset - 4, fbe_full_size)
return fbe_full_size
# Serialize the struct value
def serialize(self, value):
fbe_begin = self.create_begin()
self._model.set(value)
fbe_full_size = self.create_end(fbe_begin)
return fbe_full_size
# Deserialize the struct value
def deserialize(self, value=None):
if value is None:
value = Account()
if (self.buffer.offset + self._model.fbe_offset - 4) > self.buffer.size:
value = Account()
return value, 0
fbe_full_size = self.read_uint32(self._model.fbe_offset - 4)
assert (fbe_full_size >= self._model.fbe_size), "Model is broken!"
if fbe_full_size < self._model.fbe_size:
value = Account()
return value, 0
self._model.get(value)
return value, fbe_full_size
# Move to the next struct value
def next(self, prev):
self._model.fbe_shift(prev)
class FinalModelAccount(fbe.FinalModel):
__slots__ = "_id", "_name", "_state", "_wallet", "_asset", "_orders",
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
self._id = fbe.FinalModelInt32(buffer, 0)
self._name = fbe.FinalModelString(buffer, 0)
self._state = FinalModelStateEx(buffer, 0)
self._wallet = FinalModelBalance(buffer, 0)
self._asset = fbe.FinalModelOptional(FinalModelBalance(buffer, 0), buffer, 0)
self._orders = fbe.FinalModelVector(FinalModelOrder(buffer, 0), buffer, 0)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def state(self):
return self._state
@property
def wallet(self):
return self._wallet
@property
def asset(self):
return self._asset
@property
def orders(self):
return self._orders
# Get the allocation size
def fbe_allocation_size(self, fbe_value):
fbe_result = 0 \
+ self.id.fbe_allocation_size(fbe_value.id) \
+ self.name.fbe_allocation_size(fbe_value.name) \
+ self.state.fbe_allocation_size(fbe_value.state) \
+ self.wallet.fbe_allocation_size(fbe_value.wallet) \
+ self.asset.fbe_allocation_size(fbe_value.asset) \
+ self.orders.fbe_allocation_size(fbe_value.orders) \
return fbe_result
# Get the final type
@property
def fbe_type(self):
return self.TYPE
TYPE = 3
# Check if the struct value is valid
def verify(self):
self._buffer.shift(self.fbe_offset)
fbe_result = self.verify_fields()
self._buffer.unshift(self.fbe_offset)
return fbe_result
# Check if the struct fields are valid
def verify_fields(self):
fbe_current_offset = 0
self.id.fbe_offset = fbe_current_offset
fbe_field_size = self.id.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.name.fbe_offset = fbe_current_offset
fbe_field_size = self.name.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.state.fbe_offset = fbe_current_offset
fbe_field_size = self.state.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.wallet.fbe_offset = fbe_current_offset
fbe_field_size = self.wallet.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.asset.fbe_offset = fbe_current_offset
fbe_field_size = self.asset.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.orders.fbe_offset = fbe_current_offset
fbe_field_size = self.orders.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
return fbe_current_offset
# Get the struct value
def get(self, fbe_value=None):
if fbe_value is None:
fbe_value = Account()
self._buffer.shift(self.fbe_offset)
fbe_size = self.get_fields(fbe_value)
self._buffer.unshift(self.fbe_offset)
return fbe_value, fbe_size
# Get the struct fields values
def get_fields(self, fbe_value):
fbe_current_offset = 0
fbe_current_size = 0
self.id.fbe_offset = fbe_current_offset
fbe_result = self.id.get()
fbe_value.id = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.name.fbe_offset = fbe_current_offset
fbe_result = self.name.get()
fbe_value.name = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.state.fbe_offset = fbe_current_offset
fbe_result = self.state.get()
fbe_value.state = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.wallet.fbe_offset = fbe_current_offset
fbe_result = self.wallet.get()
fbe_value.wallet = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.asset.fbe_offset = fbe_current_offset
fbe_result = self.asset.get()
fbe_value.asset = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.orders.fbe_offset = fbe_current_offset
fbe_result = self.orders.get(fbe_value.orders)
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
return fbe_current_size
# Set the struct value
def set(self, fbe_value):
self._buffer.shift(self.fbe_offset)
fbe_size = self.set_fields(fbe_value)
self._buffer.unshift(self.fbe_offset)
return fbe_size
# Set the struct fields values
def set_fields(self, fbe_value):
fbe_current_offset = 0
fbe_current_size = 0
self.id.fbe_offset = fbe_current_offset
fbe_field_size = self.id.set(fbe_value.id)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.name.fbe_offset = fbe_current_offset
fbe_field_size = self.name.set(fbe_value.name)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.state.fbe_offset = fbe_current_offset
fbe_field_size = self.state.set(fbe_value.state)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.wallet.fbe_offset = fbe_current_offset
fbe_field_size = self.wallet.set(fbe_value.wallet)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.asset.fbe_offset = fbe_current_offset
fbe_field_size = self.asset.set(fbe_value.asset)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.orders.fbe_offset = fbe_current_offset
fbe_field_size = self.orders.set(fbe_value.orders)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
return fbe_current_size
# Fast Binary | |
<reponame>JStuckner/DnD-GUI-2
#!/usr/bin/python
import tkinter as tk
from tab.tabula import rollTable, getTableNames, getTableGroups
from util import dice, randomMap, encounter
import random
from PIL import Image, ImageTk
from random import randint
import numpy as np
import matplotlib.pyplot as plt
from util.text import string_to_array
from scipy import ndimage, misc
from Characters.Character import getPCs
from util.getJson import getMonsters, getSpells
from util.getModifier import getMod
import re
import imageio
from skimage.transform import resize
def pretty(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent+1)
else:
print('\t' * (indent+1) + str(value))
class App(tk.Frame):
def __init__(self, master=None):
#Difficutly modifiers
self.Diff = 2
self.DiffThreshCR = 15
# Character dictionaries.
self.PCs = getPCs()
for key in self.PCs:
self.PCs[key]["hp"] = self.PCs[key]["maxHP"]
# Monster dictionary
self.monsters = getMonsters()
self.characterList = []
for key in self.PCs:
self.characterList.append(self.PCs[key]["name"])
for monster in self.monsters:
self.characterList.append(monster["name"])
# Spells
self.spells = getSpells()
self.spellList = []
for key in self.spells:
#print(key)
self.spellList.append(key["name"])
tk.Frame.__init__(self, master)
self.pack()
self.background = imageio.imread(r'maps\Elin.jpg')
# Frame
self.frame = tk.Frame(self)
self.frame.grid(row=0, column=0)
#Roll Table
groups = getTableGroups()
tables = getTableNames(groups[0])
self.groupvar = tk.StringVar(self)
self.groupvar.set(groups[0])
self.tablevar = tk.StringVar(self)
self.tablevar.set(tables[0])
self.txtTablevar = tk.StringVar(self)
self.groupvar.trace('w', self.update_options)
self.group = tk.OptionMenu(self.frame, self.groupvar, *groups)
self.table = tk.OptionMenu(self.frame, self.tablevar, *tables)
self.txtTable = tk.Entry(self.frame, width=6)
self.butTable = tk.Button(self.frame,
text="Roll on table",
command=lambda:self.roll((self.groupvar.get(),self.tablevar.get(),self.txtTable.get())))
self.group.grid(row=0, column=0)
self.table.grid(row=0, column=1)
self.txtTable.grid(row=0, column=2)
self.butTable.grid(row=0, column=3)
## #Roll Initiative
## self.txtInit = tk.Entry(self.frame,width=25)
## self.butInit = tk.Button(self.frame,
## text="Roll for initiative",
## command=lambda:self.rollInit())
##
## self.txtInit.grid(row=2, column=0,columnspan=3, sticky='w')
## self.butInit.grid(row=2, column=3)
# Change Stat
self.listPCVar = tk.StringVar(self)
self.listPCVar.set(list(self.PCs.keys())[0])
self.listStatVar = tk.StringVar(self)
self.listStatVar.set(list(self.PCs[self.listPCVar.get()].keys())[0])
self.listPCs = tk.OptionMenu(self.frame, self.listPCVar, *list(self.PCs.keys()), command = lambda x:self.updateStats(PClist=True))
self.listStats = tk.OptionMenu(self.frame, self.listStatVar, *list(self.PCs[self.listPCVar.get()].keys()), command = lambda x:self.updateStats(PClist=False))
self.lblStatVar = tk.StringVar(self)
self.txtStat = tk.Entry(self.frame, textvariable=self.lblStatVar, width=10)
self.butChangeStat = tk.Button(self.frame, text="Change Stat", command=lambda:self.changeStat())
self.listPCs.grid(row=2,column=0)
self.listStats.grid(row=2,column=1)
self.txtStat.grid(row=2,column=2)
self.butChangeStat.grid(row=2, column=3)
self.updateStats(False)
#Create Map
self.mapframe=tk.Frame(self.frame)
self.mapframe.grid(row=1, column=0, columnspan=3)
self.txtXSize = tk.Entry(self.mapframe,width=4)
self.txtYSize = tk.Entry(self.mapframe,width=4)
self.txtXLoc = tk.Entry(self.mapframe,width=4)
self.txtYLoc = tk.Entry(self.mapframe,width=4)
self.butMap = tk.Button(self.frame,
text="Create map",
command=lambda:self.createMap())
self.txtXSize.grid(row=1, column=0)
self.txtYSize.grid(row=1, column=1)
self.txtXLoc.grid(row=1, column=2)
self.txtYLoc.grid(row=1, column=3)
self.butMap.grid(row=1, column=3)
## #Random Zombie
## self.butZombie = tk.Button(self,
## text="Spawn Zombies",
## command=lambda:self.spawnZombie())
## self.butZombie.grid(row=4, column=0)
#Active Perception
self.butPerceive = tk.Button(self.frame,
text="Perception",
command=lambda:self.perceive())
self.butPerceive.grid(row=4, column=3)
# Check carry weight
self.butWeight = tk.Button(self.frame,
text="Check weight",
command=lambda:self.checkWeight())
self.butWeight.grid(row=4, column=1)
# Fight
self.butFight = tk.Button(self.frame,
text="Fight",
command=lambda:self.setupFight())
self.butFight.grid(row=4, column=0)
# Rest
self.butRest = tk.Button(self.frame,
text="Rest",
command=lambda:self.rest())
self.butRest.grid(row=4, column=2)
# Map picture
self.mapIm = [] #store the image
self.canvasMap = tk.Canvas(self, height=800, width=800)
self.canvasMap.grid(row=0, column=4, columnspan=10, rowspan=30)
# Map List
self.txtMap = tk.Text(self, height=40, width=30)
self.txtMap.grid(row=0, column=14, rowspan=30, columnspan=3)
# Map arrows
self.aFrame = tk.Frame(self)
self.aFrame.grid(row=3,column=0)
self.upButton = tk.Button(self.aFrame,
text='^',
command=lambda:self.createMap('up'))
self.downButton = tk.Button(self.aFrame,
text='v',
command=lambda:self.createMap('down'))
self.leftButton = tk.Button(self.aFrame,
text='<',
command=lambda:self.createMap('left'))
self.rightButton = tk.Button(self.aFrame,
text='>',
command=lambda:self.createMap('right'))
self.inButton = tk.Button(self.aFrame,
text='+',
command=lambda:self.createMap('in'))
self.outButton = tk.Button(self.aFrame,
text='-',
command=lambda:self.createMap('out'))
self.upButton.grid(row=0, column = 1)
self.downButton.grid(row=2, column = 1)
self.leftButton.grid(row=1, column = 0)
self.rightButton.grid(row=1, column = 2)
self.inButton.grid(row=0, column = 4)
self.outButton.grid(row=2, column = 4)
# Print text box
self.txtPrint = tk.Text(self,height=35, width=50)
self.txtPrint.grid(row=4, column=0, rowspan=20, columnspan=4)
def updateStats(self, PClist):
## if PClist:
## self.listStats['menu'].delete(0, tk.END)
## for i, stat in enumerate(list(self.PCs[self.listPCVar.get()].keys())):
## self.listStats['menu'].add_command(label=stat, command=tk._setit(self.listStatVar, stat))
## #self.listStatVar.set(list(self.PCs[self.listPCVar.get()].keys())[0])
if PClist:
self.listStats = tk.OptionMenu(self.frame, self.listStatVar, *list(self.PCs[self.listPCVar.get()].keys()), command = lambda x:self.updateStats(PClist=False))
self.listStatVar.set(list(self.PCs[self.listPCVar.get()].keys())[0])
self.listStats.grid(row=2,column=1)
self.lblStatVar.set(self.PCs[self.listPCVar.get()][self.listStatVar.get()])
tempInsert = str(self.lblStatVar.get())
self.txtStat.delete(0,100)
self.txtStat.insert(0, tempInsert)
def changeStat(self):
self.PCs[self.listPCVar.get()][self.listStatVar.get()] = self.lblStatVar.get()
def rest(self):
for PC in self.PCs:
self.PCs[PC]['hp'] = self.PCs[PC]['maxHP']
def roll(self, argv):
output = rollTable(argv, retString=True)
# Find the total gold value of the treasure.
if argv[0] == 'treasure':
total = 0
flagMult = 1
interp = re.sub(',','',output) #Delete commas
interp = re.sub("\)",'',interp)
interp = re.sub("\(",'',interp)
interp = interp.split()
for i, word in enumerate(interp):
# Flag '2x' or 'ix' to multiply next gold value
if word[-1] == 'x':
try:
flagMult = int(word[:-1])
except:
pass
if word == 'cp':
total += flagMult*int(interp[i-1])/100
flagMult = 1
if word == 'sp':
total += flagMult*int(interp[i-1])/10
flagMult = 1
if word == 'gp':
total += flagMult*int(interp[i-1])
flagMult = 1
if word == 'pp':
total += flagMult*int(interp[i-1])*10
flagMult = 1
output += ' TOTAL GOLD VALUE: ' + str(total) + '(' + str(total/4) + ').\n'
self.write(output)
def write(self, *strings, sep=' ', end='\n'):
output = ''
for i in range(len(strings)):
output = output+str(strings[i])+sep
output = output + end
self.txtPrint.insert(tk.END,output)
self.txtPrint.see(tk.END)
def checkWeight(self):
from stuckpy.DnD.inventory import checkWeight
self.write('Encumberance Check')
names = ('Anastasia', 'Cameron', 'Travis', 'Seth', 'Keith',
'Bag of Holding', 'Tensors Floating Disk', 'Keith Donkey')
strength = (self.PCs['Geralda']["str"],
self.PCs['Zana']["str"],
self.PCs['Traubon']["str"],
self.PCs['Saleek']["str"],
self.PCs['Varis']["str"],
0, 0, 10)
bonus = (self.PCs['Geralda']["bonuscarry"],
self.PCs['Zana']["bonuscarry"],
self.PCs['Traubon']["bonuscarry"],
self.PCs['Saleek']["bonuscarry"],
self.PCs['Varis']["bonuscarry"],
500, 500, 0)
light = []
heavy = []
maximum = []
for i in range(len(strength)):
light.append(strength[i]*5+bonus[i])
heavy.append(strength[i]*10+bonus[i])
maximum.append(strength[i]*15+bonus[i])
carry = checkWeight(names[i])
self.write('{0} is carrying {1}/{2} lbs.'.format(names[i],carry,maximum[i]))
if carry > maximum[i]:
self.write('{0} is overencumbered and must immediately carry less'.format(names[i]))
elif carry > heavy[i]:
self.write('{0} is heavily encumbered. Minus 20 movespeed and disadvantage on all ability checks, attack rolls, and saving throws that use Strenght, Dexterity, or Constitution'.format(names[i]))
elif carry > light[i]:
self.write('{0} is lightly encumbered. Minus 10 movespeed.'.format(names[i]))
else:
self.write('{0} is not encumbered.'.format(names[i]))
self.write('')
def perceive(self):
seth = random.randint(1,20) + 6
ana = random.randint(1,20) + 1
cam = random.randint(1,20) + 4
trav = max(random.randint(1,20),random.randint(1,20)) + 6
self.write('Rolling for perception:')
self.write('Seth =',seth)
self.write('Anastasia =',ana)
self.write('Travis =',trav)
self.write('Cameron =',cam)
self.write('')
def update_options(self, *args):
tables = getTableNames(self.groupvar.get())
menu = self.table['menu']
menu.delete(0,'end')
for i in tables:
menu.add_command(label=i, command=lambda val=i:self.tablevar.set(val))
self.tablevar.set(tables[0])
def rollInit(self, Return=False):
params = self.txtInit.get()
params = params.split()
intparams = []
for i in params:
try:
intparams.append(int(i))
except:
pass
if Return:
return(encounter.order(intparams))
else:
self.write('Rolling for initiative...')
self.write(encounter.order(intparams))
self.write('')
def createMap(self, direction='none'):
# Kill 0s
if direction != 'none':
if self.txtXLoc.get() == '':
self.txtXLoc.insert(0,'20')
if self.txtYLoc.get() == '':
self.txtYLoc.insert(0,'20')
if self.txtXSize.get() == '':
self.txtXSize.insert(0,'15')
if self.txtYSize.get() == '':
self.txtYSize.insert(0,'15')
if direction == 'up':
new = int(self.txtXLoc.get())-1
self.txtXLoc.delete(0,10)
self.txtXLoc.insert(0,str(new))
if direction == 'down':
new = int(self.txtXLoc.get())+1
self.txtXLoc.delete(0,10)
self.txtXLoc.insert(0,str(new))
if direction == 'left':
new = int(self.txtYLoc.get())-1
self.txtYLoc.delete(0,10)
self.txtYLoc.insert(0,str(new))
if direction == 'right':
new = int(self.txtYLoc.get())+1
self.txtYLoc.delete(0,10)
self.txtYLoc.insert(0,str(new))
if direction == 'in':
new = int(self.txtXSize.get())-1
self.txtXSize.delete(0,10)
self.txtXSize.insert(0,str(new))
new = int(self.txtYSize.get())-1
self.txtYSize.delete(0,10)
self.txtYSize.insert(0,str(new))
if direction == 'out':
new = int(self.txtXSize.get())+1
self.txtXSize.delete(0,10)
self.txtXSize.insert(0,str(new))
new = int(self.txtYSize.get())+1
self.txtYSize.delete(0,10)
self.txtYSize.insert(0,str(new))
try:
x = int(self.txtXSize.get())
except ValueError:
x = 15
try:
y = int(self.txtYSize.get())
except ValueError:
y = 15
try:
xloc = int(self.txtXLoc.get())
except ValueError:
xloc = 20
try:
yloc = int(self.txtYLoc.get())
except ValueError:
yloc = 20
try:
items = int(self.txtTable.get())
except ValueError:
items = dice.parse(self.txtTable.get())
rolls = rollTable(('map',
'Map',
self.txtTable.get()),
retString=True,
retList=True)
self.txtMap.delete(1.0, tk.END) # clear box
self.txtMap.insert(tk.END,"Map Items:")
for i in range(len(rolls)):
if i < 9:
self.txtMap.insert(tk.END,'\n ' + str(i+1) + ' = ' + str(rolls[i]))
else:
self.txtMap.insert(tk.END,'\n'+ str(i+1)+' = '+str(rolls[i]))
#print('')
Map = self.create(x,y,items,xloc,yloc, get=True)
#Map = resize(Map,(int(750/Map.shape[0]), int(750/Map.shape[1])))
self.mapIm = ImageTk.PhotoImage(Image.fromarray(np.uint8(Map*255)))
#mapx, mapy = self.mapIm.shape
self.canvasMap.config(width=self.mapIm.width(), height=self.mapIm.height())
self.canvasMap.create_image(0,0,image=self.mapIm, anchor='nw')
self.update_idletasks()
def setupFight(self, newFight=True):
self.top = tk.Toplevel(self)
if not newFight:
self.Ftop.destroy()
# Create Widgets
checkVar = tk.IntVar()
self.search_var = tk.StringVar()
self.listnumber_var = tk.StringVar()
self.search_var.trace("w", lambda name, index, mode: self.updateList())
self.entry = tk.Entry(self.top, textvariable=self.search_var, width=13)
self.listFrom = tk.Listbox(self.top, width=45, height=15)
self.scrollbar = tk.Scrollbar(self.top, orient=tk.VERTICAL)
self.listTo = tk.Listbox(self.top, width=45, height=15, selectmode=tk.EXTENDED, yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.listTo.yview)
self.butSetupIn = tk.Button(self.top, text="Add ->", command=lambda:self.addList())
self.butSetupOut = tk.Button(self.top, text="<- Remove", command=lambda:self.removeList())
self.butSetupFight = tk.Button(self.top, text="Fight!", command=lambda:self.goFight(newFight, checkVar.get()))
self.txtSetupNum = tk.Entry(self.top, width=3, textvariable=self.listnumber_var)
self.checkAdjDiff = tk.Checkbutton(self.top,
text ="Adjust Difficulty?",
variable = checkVar,
onvalue=1,
offvalue=0,
height=5, width=20)
# Pack Widgets
self.entry.grid(row=0, column=0, padx=10, pady=3)
self.listFrom.grid(row=1, column=0, padx=10, pady=3, rowspan=10)
self.listTo.grid(row=1, column=2, padx=0,pady=3, rowspan=10)
self.scrollbar.grid(row=1, column=3, rowspan=10, sticky=tk.N+tk.S+tk.W)
self.txtSetupNum.grid(row=3, column=1, padx=5, pady=2)
self.butSetupIn.grid(row=4, column=1, padx=5, pady=2)
self.butSetupOut.grid(row=5, column=1, padx=5, pady=2)
self.butSetupFight.grid(row=7,column=1, padx=5, pady=20)
self.checkAdjDiff.grid(row=8, column=1)
# Call to poplulate initial lists
self.updateList()
if newFight:
self.startToList()
def goFight(self, newFight=True, adjDiff=True):
if newFight:
self.fighters = []
self.order = []
for item in self.listTo.get(0,tk.END):
for PC in self.PCs:
if item.lower() == self.PCs[PC]["name"].lower():
self.fighters.append(self.PCs[PC])
for monster in self.monsters:
if item.lower() == monster["name"].lower():
#print(type(monster))
self.fighters.append(monster.copy())
# Apply difficulty modifiers
cr = self.fighters[-1]["cr"]
try:
cr = int(cr)
except ValueError:
cr = int(1) #actually equals less than 1 but this is temporary
if | |
class TargetPoolsService(base_api.BaseApiService):
"""Service class for the targetPools resource."""
_NAME = u'targetPools'
def __init__(self, client):
super(ComputeAlpha.TargetPoolsService, self).__init__(client)
self._method_configs = {
'AddHealthCheck': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.targetPools.addHealthCheck',
ordered_params=[u'project', u'region', u'targetPool'],
path_params=[u'project', u'region', u'targetPool'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck',
request_field=u'targetPoolsAddHealthCheckRequest',
request_type_name=u'ComputeTargetPoolsAddHealthCheckRequest',
response_type_name=u'Operation',
supports_download=False,
),
'AddInstance': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.targetPools.addInstance',
ordered_params=[u'project', u'region', u'targetPool'],
path_params=[u'project', u'region', u'targetPool'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetPools/{targetPool}/addInstance',
request_field=u'targetPoolsAddInstanceRequest',
request_type_name=u'ComputeTargetPoolsAddInstanceRequest',
response_type_name=u'Operation',
supports_download=False,
),
'AggregatedList': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.targetPools.aggregatedList',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/aggregated/targetPools',
request_field='',
request_type_name=u'ComputeTargetPoolsAggregatedListRequest',
response_type_name=u'TargetPoolAggregatedList',
supports_download=False,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.targetPools.delete',
ordered_params=[u'project', u'region', u'targetPool'],
path_params=[u'project', u'region', u'targetPool'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetPools/{targetPool}',
request_field='',
request_type_name=u'ComputeTargetPoolsDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.targetPools.get',
ordered_params=[u'project', u'region', u'targetPool'],
path_params=[u'project', u'region', u'targetPool'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetPools/{targetPool}',
request_field='',
request_type_name=u'ComputeTargetPoolsGetRequest',
response_type_name=u'TargetPool',
supports_download=False,
),
'GetHealth': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.targetPools.getHealth',
ordered_params=[u'project', u'region', u'targetPool'],
path_params=[u'project', u'region', u'targetPool'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetPools/{targetPool}/getHealth',
request_field=u'instanceReference',
request_type_name=u'ComputeTargetPoolsGetHealthRequest',
response_type_name=u'TargetPoolInstanceHealth',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.targetPools.insert',
ordered_params=[u'project', u'region'],
path_params=[u'project', u'region'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetPools',
request_field=u'targetPool',
request_type_name=u'ComputeTargetPoolsInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.targetPools.list',
ordered_params=[u'project', u'region'],
path_params=[u'project', u'region'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/regions/{region}/targetPools',
request_field='',
request_type_name=u'ComputeTargetPoolsListRequest',
response_type_name=u'TargetPoolList',
supports_download=False,
),
'RemoveHealthCheck': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.targetPools.removeHealthCheck',
ordered_params=[u'project', u'region', u'targetPool'],
path_params=[u'project', u'region', u'targetPool'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck',
request_field=u'targetPoolsRemoveHealthCheckRequest',
request_type_name=u'ComputeTargetPoolsRemoveHealthCheckRequest',
response_type_name=u'Operation',
supports_download=False,
),
'RemoveInstance': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.targetPools.removeInstance',
ordered_params=[u'project', u'region', u'targetPool'],
path_params=[u'project', u'region', u'targetPool'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance',
request_field=u'targetPoolsRemoveInstanceRequest',
request_type_name=u'ComputeTargetPoolsRemoveInstanceRequest',
response_type_name=u'Operation',
supports_download=False,
),
'SetBackup': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.targetPools.setBackup',
ordered_params=[u'project', u'region', u'targetPool'],
path_params=[u'project', u'region', u'targetPool'],
query_params=[u'failoverRatio'],
relative_path=u'projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup',
request_field=u'targetReference',
request_type_name=u'ComputeTargetPoolsSetBackupRequest',
response_type_name=u'Operation',
supports_download=False,
),
}
self._upload_configs = {
}
def AddHealthCheck(self, request, global_params=None):
"""Adds health check URL to targetPool.
Args:
request: (ComputeTargetPoolsAddHealthCheckRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('AddHealthCheck')
return self._RunMethod(
config, request, global_params=global_params)
def AddInstance(self, request, global_params=None):
"""Adds instance url to targetPool.
Args:
request: (ComputeTargetPoolsAddInstanceRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('AddInstance')
return self._RunMethod(
config, request, global_params=global_params)
def AggregatedList(self, request, global_params=None):
"""Retrieves the list of target pools grouped by scope.
Args:
request: (ComputeTargetPoolsAggregatedListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TargetPoolAggregatedList) The response message.
"""
config = self.GetMethodConfig('AggregatedList')
return self._RunMethod(
config, request, global_params=global_params)
def Delete(self, request, global_params=None):
"""Deletes the specified TargetPool resource.
Args:
request: (ComputeTargetPoolsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified TargetPool resource.
Args:
request: (ComputeTargetPoolsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TargetPool) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def GetHealth(self, request, global_params=None):
"""Gets the most recent health check results for each IP for the given instance that is referenced by given TargetPool.
Args:
request: (ComputeTargetPoolsGetHealthRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TargetPoolInstanceHealth) The response message.
"""
config = self.GetMethodConfig('GetHealth')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a TargetPool resource in the specified project and region using the data included in the request.
Args:
request: (ComputeTargetPoolsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of TargetPool resources available to the specified project and region.
Args:
request: (ComputeTargetPoolsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TargetPoolList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def RemoveHealthCheck(self, request, global_params=None):
"""Removes health check URL from targetPool.
Args:
request: (ComputeTargetPoolsRemoveHealthCheckRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('RemoveHealthCheck')
return self._RunMethod(
config, request, global_params=global_params)
def RemoveInstance(self, request, global_params=None):
"""Removes instance URL from targetPool.
Args:
request: (ComputeTargetPoolsRemoveInstanceRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('RemoveInstance')
return self._RunMethod(
config, request, global_params=global_params)
def SetBackup(self, request, global_params=None):
"""Changes backup pool configurations.
Args:
request: (ComputeTargetPoolsSetBackupRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetBackup')
return self._RunMethod(
config, request, global_params=global_params)
class TargetVpnGatewaysService(base_api.BaseApiService):
"""Service class for the targetVpnGateways resource."""
_NAME = u'targetVpnGateways'
def __init__(self, client):
super(ComputeAlpha.TargetVpnGatewaysService, self).__init__(client)
self._method_configs = {
'AggregatedList': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.targetVpnGateways.aggregatedList',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/aggregated/targetVpnGateways',
request_field='',
request_type_name=u'ComputeTargetVpnGatewaysAggregatedListRequest',
response_type_name=u'TargetVpnGatewayAggregatedList',
supports_download=False,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.targetVpnGateways.delete',
ordered_params=[u'project', u'region', u'targetVpnGateway'],
path_params=[u'project', u'region', u'targetVpnGateway'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}',
request_field='',
request_type_name=u'ComputeTargetVpnGatewaysDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.targetVpnGateways.get',
ordered_params=[u'project', u'region', u'targetVpnGateway'],
path_params=[u'project', u'region', u'targetVpnGateway'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}',
request_field='',
request_type_name=u'ComputeTargetVpnGatewaysGetRequest',
response_type_name=u'TargetVpnGateway',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.targetVpnGateways.insert',
ordered_params=[u'project', u'region'],
path_params=[u'project', u'region'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/targetVpnGateways',
request_field=u'targetVpnGateway',
request_type_name=u'ComputeTargetVpnGatewaysInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.targetVpnGateways.list',
ordered_params=[u'project', u'region'],
path_params=[u'project', u'region'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/regions/{region}/targetVpnGateways',
request_field='',
request_type_name=u'ComputeTargetVpnGatewaysListRequest',
response_type_name=u'TargetVpnGatewayList',
supports_download=False,
),
}
self._upload_configs = {
}
def AggregatedList(self, request, global_params=None):
"""Retrieves the list of target VPN gateways grouped by scope.
Args:
request: (ComputeTargetVpnGatewaysAggregatedListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TargetVpnGatewayAggregatedList) The response message.
"""
config = self.GetMethodConfig('AggregatedList')
return self._RunMethod(
config, request, global_params=global_params)
def Delete(self, request, global_params=None):
"""Deletes the specified TargetVpnGateway resource.
Args:
request: (ComputeTargetVpnGatewaysDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified TargetVpnGateway resource.
Args:
request: (ComputeTargetVpnGatewaysGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TargetVpnGateway) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a TargetVpnGateway resource in the specified project and region using the data included in the request.
Args:
request: (ComputeTargetVpnGatewaysInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of TargetVpnGateway resources available to the specified project and region.
Args:
request: (ComputeTargetVpnGatewaysListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TargetVpnGatewayList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class UrlMapsService(base_api.BaseApiService):
"""Service class for the urlMaps resource."""
_NAME = u'urlMaps'
def __init__(self, client):
super(ComputeAlpha.UrlMapsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.urlMaps.delete',
ordered_params=[u'project', u'urlMap'],
path_params=[u'project', u'urlMap'],
query_params=[],
relative_path=u'projects/{project}/global/urlMaps/{urlMap}',
request_field='',
request_type_name=u'ComputeUrlMapsDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.urlMaps.get',
ordered_params=[u'project', u'urlMap'],
path_params=[u'project', u'urlMap'],
query_params=[],
relative_path=u'projects/{project}/global/urlMaps/{urlMap}',
request_field='',
request_type_name=u'ComputeUrlMapsGetRequest',
response_type_name=u'UrlMap',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.urlMaps.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/urlMaps',
request_field=u'urlMap',
request_type_name=u'ComputeUrlMapsInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.urlMaps.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/urlMaps',
request_field='',
request_type_name=u'ComputeUrlMapsListRequest',
response_type_name=u'UrlMapList',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'compute.urlMaps.patch',
ordered_params=[u'project', u'urlMap'],
path_params=[u'project', u'urlMap'],
query_params=[],
relative_path=u'projects/{project}/global/urlMaps/{urlMap}',
request_field=u'urlMapResource',
request_type_name=u'ComputeUrlMapsPatchRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'compute.urlMaps.update',
ordered_params=[u'project', u'urlMap'],
path_params=[u'project', u'urlMap'],
query_params=[],
relative_path=u'projects/{project}/global/urlMaps/{urlMap}',
request_field=u'urlMapResource',
request_type_name=u'ComputeUrlMapsUpdateRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Validate': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.urlMaps.validate',
ordered_params=[u'project', u'urlMap'],
path_params=[u'project', u'urlMap'],
query_params=[],
relative_path=u'projects/{project}/global/urlMaps/{urlMap}/validate',
request_field=u'urlMapsValidateRequest',
request_type_name=u'ComputeUrlMapsValidateRequest',
response_type_name=u'UrlMapsValidateResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified UrlMap resource.
Args:
request: (ComputeUrlMapsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified UrlMap resource.
Args:
request: (ComputeUrlMapsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(UrlMap) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a UrlMap resource in the specified project using the data included in the request.
Args:
request: (ComputeUrlMapsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of UrlMap resources available to the specified project.
Args:
request: (ComputeUrlMapsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(UrlMapList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Update the entire content of the UrlMap resource. This method supports patch semantics.
Args:
request: (ComputeUrlMapsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Update the entire content of the UrlMap resource.
Args:
request: (ComputeUrlMapsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
def | |
pagure.lib.query.get_authorized_project(self.session, "test")
request = pagure.lib.query.search_pull_requests(
self.session, project_id=1, requestid=1
)
self.assertEqual(
pagure.lib.query.get_watch_list(self.session, request),
set(["pingou", "foo"]),
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_invalid_project(self):
"""Test the api_pull_request_create method of the flask api when
not the project doesn't exist.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
headers = {"Authorization": "token aaabbbcccddd"}
data = {
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "master",
"branch_from": "test",
}
output = self.app.post(
"/api/0/foobar/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data, {"error": "Project not found", "error_code": "ENOPROJECT"}
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_missing_title(self):
"""Test the api_pull_request_create method of the flask api when
not title is submitted.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
headers = {"Authorization": "token <KEY>"}
data = {
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "master",
"branch_from": "test",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Invalid or incomplete input submitted",
"error_code": "EINVALIDREQ",
"errors": {"title": ["This field is required."]},
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_missing_branch_to(self):
"""Test the api_pull_request_create method of the flask api when
not branch to is submitted.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
headers = {"Authorization": "token <KEY>"}
data = {
"title": "Test PR",
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_from": "test",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Invalid or incomplete input submitted",
"error_code": "EINVALIDREQ",
"errors": {"branch_to": ["This field is required."]},
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_missing_branch_from(self):
"""Test the api_pull_request_create method of the flask api when
not branch from is submitted.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
headers = {"Authorization": "token <KEY>"}
data = {
"title": "Test PR",
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "master",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Invalid or incomplete input submitted",
"error_code": "EINVALIDREQ",
"errors": {"branch_from": ["This field is required."]},
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_pr_disabled(self):
"""Test the api_pull_request_create method of the flask api when
the parent repo disabled pull-requests.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Check the behavior if the project disabled the issue tracker
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["pull_requests"] = False
repo.settings = settings
self.session.add(repo)
self.session.commit()
headers = {"Authorization": "token <KEY>"}
data = {
"title": "Test PR",
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "master",
"branch_from": "test",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 404)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Pull-Request have been deactivated for this project",
"error_code": "EPULLREQUESTSDISABLED",
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_signed_pr(self):
"""Test the api_pull_request_create method of the flask api when
the parent repo enforces signed-off pull-requests.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Check the behavior if the project disabled the issue tracker
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["Enforce_signed-off_commits_in_pull-request"] = True
repo.settings = settings
self.session.add(repo)
self.session.commit()
headers = {"Authorization": "token <KEY>"}
data = {
"title": "Test PR",
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "master",
"branch_from": "test",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "This repo enforces that all commits are signed "
"off by their author.",
"error_code": "ENOSIGNEDOFF",
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_invalid_branch_from(self):
"""Test the api_pull_request_create method of the flask api when
the branch from does not exist.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Check the behavior if the project disabled the issue tracker
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["Enforce_signed-off_commits_in_pull-request"] = True
repo.settings = settings
self.session.add(repo)
self.session.commit()
headers = {"Authorization": "token aaabbbcccddd"}
data = {
"title": "Test PR",
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "master",
"branch_from": "foobarbaz",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Invalid or incomplete input submitted",
"error_code": "EINVALIDREQ",
"errors": "Branch foobarbaz does not exist",
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_invalid_token(self):
"""Test the api_pull_request_create method of the flask api when
queried with an invalid token.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
headers = {"Authorization": "token aaabbbcccddd"}
data = {
"title": "Test PR",
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "master",
"branch_from": "foobarbaz",
}
output = self.app.post(
"/api/0/test2/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 401)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Invalid or expired token. Please visit "
"http://localhost.localdomain/settings#nav-api-tab to get or "
"renew your API token.",
"error_code": "EINVALIDTOK",
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_invalid_access(self):
"""Test the api_pull_request_create method of the flask api when
the user opening the PR doesn't have commit access.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session, user_id=2)
tests.create_tokens_acl(self.session)
headers = {"Authorization": "token aa<PASSWORD>"}
data = {
"title": "Test PR",
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "master",
"branch_from": "foobarbaz",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 401)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "You do not have sufficient permissions to "
"perform this action",
"error_code": "ENOTHIGHENOUGH",
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_invalid_branch_to(self):
"""Test the api_pull_request_create method of the flask api when
the branch to does not exist.
"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session)
tests.create_tokens_acl(self.session)
# Check the behavior if the project disabled the issue tracker
repo = pagure.lib.query.get_authorized_project(self.session, "test")
settings = repo.settings
settings["Enforce_signed-off_commits_in_pull-request"] = True
repo.settings = settings
self.session.add(repo)
self.session.commit()
headers = {"Authorization": "token <KEY>"}
data = {
"title": "Test PR",
"initial_comment": "Nothing much, the changes speak for themselves",
"branch_to": "foobarbaz",
"branch_from": "test",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 400)
data = json.loads(output.get_data(as_text=True))
self.assertDictEqual(
data,
{
"error": "Invalid or incomplete input submitted",
"error_code": "EINVALIDREQ",
"errors": "Branch foobarbaz could not be found in the "
"target repo",
},
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_project_token_different_project(self):
"""Test the api_pull_request_create method with the project token
of a different project - fails"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session, project_id=2)
tests.create_tokens_acl(self.session)
headers = {"Authorization": "token foo_token"}
data = {
"title": "Test of PR",
"inicial comment": "Some readme adjustment",
"branch_to": "master",
"branch_from": "test",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 401)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_user_token_invalid_acls(self):
"""Test the api_pull_request_create method with the user token, but with
no acls for opening pull request - fails"""
tests.create_projects(self.session)
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(os.path.join(self.path, "repos", "test.git"))
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "test.git"), branch="test"
)
tests.create_tokens(self.session, project_id=None)
for acl in (
"create_project",
"fork_project",
"modify_project",
"update_watch_status",
):
tests.create_tokens_acl(self.session, acl_name=acl)
headers = {"Authorization": "token aa<PASSWORD>cccddd"}
data = {
"title": "Test of PR",
"initial_comment": "Some readme adjustment",
"branch_to": "master",
"branch_from": "test",
}
output = self.app.post(
"/api/0/test/pull-request/new", headers=headers, data=data
)
self.assertEqual(output.status_code, 401)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_api_pull_request_open_from_branch_to_origin(self):
"""Test the api_pull_request_create method from a fork to a master,
with project token of a origin with all the acls"""
tests.create_projects(self.session)
tests.create_projects(
self.session, is_fork=True, hook_token_suffix="foo"
)
project_query = self.session.query(pagure.lib.model.Project)
for project in project_query.filter_by(name="test").all():
if project.parent_id == None:
parent = project
else:
child = project
tests.create_projects_git(os.path.join(self.path, "repos"), bare=True)
tests.create_projects_git(
os.path.join(self.path, "requests"), bare=True
)
tests.add_readme_git_repo(
os.path.join(self.path, "repos", "forks", "pingou", "test.git"),
branch="branch",
)
tests.add_commit_git_repo(
os.path.join(self.path, "repos", "forks", "pingou", "test.git"),
branch="branch",
)
| |
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import command
import config
import ipaddress
import mle
import thread_cert
BBR_1 = 1 # Collapsed with Leader Role
ROUTER_1_1 = 2
ROUTER_1_2 = 3
FED_1_2_1 = 4
MED_1_2_1 = 5
SED_1_2_1 = 6
FED_1_2_2 = 7
MED_1_2_2 = 8
SED_1_2_2 = 9
WAIT_ATTACH = 5
WAIT_REDUNDANCE = 3
ROUTER_SELECTION_JITTER = 1
BBR_REGISTRATION_JITTER = 5
SED_POLL_PERIOD = 2000 # 2s
MED_TIMEOUT = 20 # 20s
PARENT_AGGREGATE_DELAY = 5 # 5s
DUA_IID_MANUAL1 = '4444333322221111'
ST_DUA_SUCCESS = 0
ST_DUA_REREGISTER = 1
ST_DUA_INVALID = 2
ST_DUA_DUPLICATE = 3
ST_DUA_NO_RESOURCES = 4
ST_DUA_BBR_NOT_PRIMARY = 5
ST_DUA_GENERAL_FAILURE = 6
BBR_REREGISTRATION_DELAY = 10
"""
Topology
MED_1_2_1 SED_1_2_1
\ |
\ |
FED_1_2_1 --- ROUTER_1_1 FED_1_2_2 MED_1_2_2
| | /
| | /
BBR_1 (LEADER) --- ROUTER_1_2 --- SED_1_2_2
1) Bring up BBR_1, BBR_1 becomes Leader and Primary Backbone Router, with Domain
Prefix without `P_slaac`.
2) Test behaviors of ROUTER_1_2 under various response status:
a) Bring up ROUTER_1_2 with DUA_IID_MANUAL1, one DUA.req should happen to register DUA.
b) Remove DUA_IID_MANUAL1, one DUA.req should happen for the new generated DUA via SLAAC.
c) Configure BBR_1 to respond with the fatal error ST_DUA_INVALID, update BBR_1 with
BBR_REREGISTRATION_DELAY, ROUTER_1_2 should re-register its DUA within BBR_REREGISTRATION_DELAY.
- ROUTER_1_2 should remove its dua
- update network data, ROUTER_1_2 would regenerate and register the same dua
d) Configure BBR_1 to respond with the fatal error ST_DUA_DUPLICATE, update seqno to trigger reregistration.
After received DUA.rsp with ST_DUA_DUPLICATE, ROUTER_1_2 should
- increase dad counter
- regenerate a new DUA
- send DUA.req
e) (repeated) Configure BBR_1 to respond with per remaining error status:
- increase BBR seqno to trigger reregistration
- ROUTER_1_2 should re-register within BBR_REREGISTRATION_DELAY. For the not fatal errors, ROUTER_1_2
should re-register within another BBR_REREGISTRATION_DELAY (with least delay if ST_DUA_REREGISTER)
3) Bring up FED_1_2_1, MED_1_2_1, SED_1_2_1, they should send DUA.req by themselves as the parent
is of Thread 1.1 version.
4) Bring up FED_1_2_2, it sends DUA.req itself as it it FTD.
5) MED_1_2_2, SED_1_2_2, MTDs should should register their DUA to their parent
by Child Update Request, and the parent would send DUA.req for them on behalf.
6) Increase seqno on BBR_1, within BBR_REREGISTRATION_DELAY, there should be one DUA.req from
per [FED_1_2_1, MED_1_2_1, SED_1_2_1, FED_1_2_2], and 3 DUA.req from ROUTER_1_2 among which
2 DUA.req are for its MTD children.
"""
class TestDomainUnicastAddressRegistration(thread_cert.TestCase):
TOPOLOGY = {
BBR_1: {
'version': '1.2',
'whitelist': [ROUTER_1_1, ROUTER_1_2],
'is_bbr': True
},
ROUTER_1_1: {
'version': '1.1',
'whitelist': [BBR_1, FED_1_2_1, MED_1_2_1, SED_1_2_1]
},
ROUTER_1_2: {
'version': '1.2',
'whitelist': [BBR_1, FED_1_2_2, MED_1_2_2, SED_1_2_2]
},
FED_1_2_1: {
'version': '1.2',
'whitelist': [ROUTER_1_1],
},
MED_1_2_1: {
'mode': 'rsn',
'version': '1.2',
'whitelist': [ROUTER_1_1],
},
SED_1_2_1: {
'mode': 'sn',
'version': '1.2',
'whitelist': [ROUTER_1_1],
},
FED_1_2_2: {
'version': '1.2',
'whitelist': [ROUTER_1_2],
},
MED_1_2_2: {
'mode': 'rsn',
'version': '1.2',
'whitelist': [ROUTER_1_2],
},
SED_1_2_2: {
'mode': 'sn',
'version': '1.2',
'whitelist': [ROUTER_1_2],
},
}
"""All nodes are created with default configurations"""
def __get_iid(self, address):
''' Get the interface identifier of an IPv6 address.
Args:
address (string): An IPv6 address;
'''
return ''.join(ipaddress.ip_address(address).exploded.split(':')[4:])
def __check_dua_registration_tmf(self, node, occurences=1):
messages = self.simulator.get_messages_sent_by(node)
for i in range(occurences):
msg = messages.next_coap_message('0.02', '/n/dr', False)
assert msg, 'Expected {}, but {}th not found\n node: {}(extaddr: {})'.format(
occurences, i + 1, node, self.nodes[node].get_addr64())
def test(self):
# starting context id
context_id = 1
seq_num = 1
# 1) Bring up BBR_1, BBR_1 becomes Leader and Primary Backbone Router, with Domain
# Prefix without `P_slaac`.
self.nodes[BBR_1].set_router_selection_jitter(ROUTER_SELECTION_JITTER)
self.nodes[BBR_1].set_bbr_registration_jitter(BBR_REGISTRATION_JITTER)
self.nodes[BBR_1].set_backbone_router(seqno=seq_num, reg_delay=BBR_REREGISTRATION_DELAY)
self.nodes[BBR_1].start()
WAIT_TIME = WAIT_ATTACH + ROUTER_SELECTION_JITTER
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_1].get_state(), 'leader')
self.nodes[BBR_1].enable_backbone_router()
WAIT_TIME = BBR_REGISTRATION_JITTER + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[BBR_1].get_backbone_router_state(), 'Primary')
assert self.nodes[BBR_1].has_ipmaddr(config.ALL_NETWORK_BBRS_ADDRESS)
assert not self.nodes[BBR_1].has_ipmaddr(config.ALL_DOMAIN_BBRS_ADDRESS)
self.nodes[BBR_1].set_domain_prefix(config.DOMAIN_PREFIX, 'prosD')
WAIT_TIME = WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
assert self.nodes[BBR_1].has_ipmaddr(config.ALL_DOMAIN_BBRS_ADDRESS)
self.simulator.set_lowpan_context(context_id, config.DOMAIN_PREFIX)
domain_prefix_cid = context_id
# 2) Test behaviors of ROUTER_1_2 under various response status:
# a) Bring up ROUTER_1_2 with DUA_IID_MANUAL1, one DUA.req should happen to register DUA.
# Flush relative message queues.
self.flush_nodes([ROUTER_1_2])
self.nodes[ROUTER_1_2].set_dua_iid(DUA_IID_MANUAL1)
self.nodes[ROUTER_1_2].set_router_selection_jitter(ROUTER_SELECTION_JITTER)
self.nodes[ROUTER_1_2].start()
WAIT_TIME = WAIT_ATTACH
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[ROUTER_1_2].get_state(), 'router')
mliid = self.__get_iid(self.nodes[ROUTER_1_2].get_mleid())
WAIT_TIME = WAIT_ATTACH + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.__check_dua_registration_tmf(ROUTER_1_2)
# b) Remove DUA_IID_MANUAL1, one DUA.req should happen for the new generated DUA via SLAAC.
# Flush relative message queues.
self.flush_nodes([ROUTER_1_2])
self.nodes[ROUTER_1_2].clear_dua_iid()
WAIT_TIME = WAIT_ATTACH + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.__check_dua_registration_tmf(ROUTER_1_2)
#c) Configure BBR_1 to respond with the fatal error ST_DUA_INVALID, update BBR_1 with
# BBR_REREGISTRATION_DELAY, ROUTER_1_2 should re-register its DUA within BBR_REREGISTRATION_DELAY.
# - ROUTER_1_2 should remove its dua
# - update network data, ROUTER_1_2 would regenerate and register the same dua
# Flush relative message queues.
self.flush_nodes([ROUTER_1_2])
seq_num = seq_num + 1
self.nodes[BBR_1].set_next_dua_response(ST_DUA_INVALID, mliid)
self.nodes[BBR_1].set_backbone_router(seqno=seq_num)
WAIT_TIME = BBR_REREGISTRATION_DELAY + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.__check_dua_registration_tmf(ROUTER_1_2, 1)
dua = self.nodes[ROUTER_1_2].get_addr(config.DOMAIN_PREFIX)
assert not dua, 'Error: Unexpected DUA ({}) found'.format(dua)
# Retry after new network data is available
seq_num = seq_num + 1
dua = self.nodes[ROUTER_1_2].get_addr(config.DOMAIN_PREFIX)
self.nodes[BBR_1].set_backbone_router(seqno=seq_num)
WAIT_TIME = BBR_REREGISTRATION_DELAY + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.__check_dua_registration_tmf(ROUTER_1_2, 1)
dua = self.nodes[ROUTER_1_2].get_addr(config.DOMAIN_PREFIX)
assert dua, 'Error: Expected DUA ({}) not found'.format(dua)
#d) Configure BBR_1 to respond with the fatal error ST_DUA_DUPLICATE, update seqno to trigger reregistration.
# After received DUA.rsp with ST_DUA_DUPLICATE, ROUTER_1_2 should
# - increase dad counter
# - regenerate a new DUA
# - send DUA.req
# Flush relative message queues.
self.flush_nodes([ROUTER_1_2])
seq_num = seq_num + 1
self.nodes[BBR_1].set_next_dua_response(ST_DUA_DUPLICATE, mliid)
self.nodes[BBR_1].set_backbone_router(seqno=seq_num)
WAIT_TIME = BBR_REREGISTRATION_DELAY * 2 + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.__check_dua_registration_tmf(ROUTER_1_2, 2)
dua2 = self.nodes[ROUTER_1_2].get_addr(config.DOMAIN_PREFIX)
assert dua2, 'Error: Expected DUA ({}) not found'.format(dua2)
assert dua2 != dua, 'Error: Expected Different DUA not found, same DUA {}'.format(dua2)
# e) (repeated) Configure BBR_1 to respond with per remaining error status:
# - increase BBR seqno to trigger reregistration
# - ROUTER_1_2 should re-register within BBR_REREGISTRATION_DELAY. For the not fatal errors, ROUTER_1_2
# should re-register within another BBR_REREGISTRATION_DELAY (with least delay if ST_DUA_REREGISTER)
for status in [ST_DUA_REREGISTER, ST_DUA_NO_RESOURCES, ST_DUA_BBR_NOT_PRIMARY, ST_DUA_GENERAL_FAILURE]:
print('Testing Status %d...'.format(status))
# Flush relative message queues.
self.flush_nodes([ROUTER_1_2])
seq_num = seq_num + 1
self.nodes[BBR_1].set_next_dua_response(status, mliid)
self.nodes[BBR_1].set_backbone_router(seqno=seq_num)
WAIT_TIME = BBR_REREGISTRATION_DELAY + WAIT_REDUNDANCE
if status != ST_DUA_REREGISTER:
WAIT_TIME += BBR_REREGISTRATION_DELAY
self.simulator.go(WAIT_TIME)
self.__check_dua_registration_tmf(ROUTER_1_2, 2)
# Bring up Router_1_1
self.nodes[ROUTER_1_1].set_router_selection_jitter(ROUTER_SELECTION_JITTER)
self.nodes[ROUTER_1_1].start()
WAIT_TIME = WAIT_ATTACH
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[ROUTER_1_1].get_state(), 'router')
dua = self.nodes[ROUTER_1_1].get_addr(config.DOMAIN_PREFIX)
assert not dua, 'Error: Unexpected DUA ({}) found'.format(dua)
# Configure children
for node in [FED_1_2_1, FED_1_2_2]:
self.nodes[node].set_routereligible(False)
for node in [SED_1_2_1, SED_1_2_2]:
self.nodes[node].set_pollperiod(SED_POLL_PERIOD)
for node in [MED_1_2_1, MED_1_2_2]:
self.nodes[node].set_timeout(MED_TIMEOUT)
# 3) Bring up FED_1_2_1, MED_1_2_1, SED_1_2_1, they should send DUA.req by themselves as the parent
# is of Thread 1.1 version.
# 4) Bring up FED_1_2_2, it sends DUA.req itself as it it FTD.
for node in [FED_1_2_1, MED_1_2_1, SED_1_2_1, FED_1_2_2]:
print("Starting child {} (extaddr: {})...".format(node, self.nodes[node].get_addr64()))
# Flush all message queues.
self.flush_all()
self.nodes[node].start()
WAIT_TIME = WAIT_ATTACH
self.simulator.go(WAIT_TIME)
self.assertEqual(self.nodes[node].get_state(), 'child')
WAIT_TIME = PARENT_AGGREGATE_DELAY + WAIT_REDUNDANCE
self.simulator.go(WAIT_TIME)
self.__check_dua_registration_tmf(node)
# 5) MED_1_2_2, SED_1_2_2, MTDs should should register their DUA to their parent
# by Child Update Request, and the parent would | |
===
:params:
sym_plane ("yz"): list of "xy", "yz", "zx"
NCHW bool : point dimension
xyz : (B,3,N) or (B,N,3)
"""
def __init__(self, sym_plane=("yz",), NCHW=True):
super().__init__()
self.sym_plane = sym_plane
assert(isinstance(self.sym_plane, tuple) or isinstance(self.sym_plane, list)), "sym_plane must be a list or tuple"
self.metric = LabeledChamferDistance(beta=0.0, gamma=1.0, delta=0)
self.register_buffer("base_ones", torch.ones((3,), dtype=torch.float))
self.NCHW = NCHW
self.mirror_ops = []
for p in self.sym_plane:
if 'x' not in p:
self.mirror_ops += [lambda xyz: xyz*self.get_mirror_multiplier(0)]
elif 'y' not in p:
self.mirror_ops += [lambda xyz: xyz*self.get_mirror_multiplier(1)]
elif 'z' not in p:
self.mirror_ops += [lambda xyz: xyz*self.get_mirror_multiplier(2)]
else:
raise ValueError
def get_mirror_multiplier(self, dim_id):
base_ones = self.base_ones.clone()
base_ones[dim_id] = -1
if self.NCHW:
return base_ones.view((1,3,1))
else:
return base_ones.view((1,1,3))
def forward(self, xyz):
loss = 0
for op in self.mirror_ops:
m_xyz = op(xyz)
loss += self.metric(m_xyz.detach(), xyz)[0]
return loss
class ConditionNumberLoss(torch.nn.Module):
"""
compare ratio of the largest and smallest principal component values
===
params:
ref_points: (B,N,dim)
points: (B,N,dim)
"""
def __init__(self, ball_size, metric, reduction="mean"):
super().__init__()
self.reduction = reduction
self.ball_size2 = ball_size * 2
self.metric = metric
self.nn_size = 16
def forward(self, ref_points, points, *args, **kwargs):
B,N,C = ref_points.shape
# TODO replace with ball query
# (B,P,K,3), (B,P,K), (B,P,K)
ref_grouped_points, ref_group_idx, ref_group_dist = faiss_knn(self.nn_size, ref_points, ref_points, NCHW=False)
mask = (ref_group_dist < self.ball_size2)
ref_grouped_points.masked_fill_(~mask.unsqueeze(-1), 0.0)
# number of points inside the ball (B,P,1)
nball = torch.sum(mask.to(torch.float), dim=-1, keepdim=True)
ref_group_center = torch.sum(ref_grouped_points, dim=2, keepdim=True)/nball.unsqueeze(-1)
# B,P,K,3
ref_points = ref_grouped_points - ref_group_center
ref_allpoints = ref_points.view(-1, self.nn_size, C).contiguous()
U_ref, S_ref, V_ref = batch_svd(ref_allpoints)
ref_cond = S_ref[:,0]/(S_ref[:,-1]+S_ref[:,0])
ref_cond = ref_cond.view(B, N).contiguous()
# grouped_points, group_idx, _ = faiss_knn(self.nn_size, points, points, NCHW=False)
grouped_points = torch.gather(points.unsqueeze(1).expand(-1,N,-1,-1), 2, ref_group_idx.unsqueeze(-1).expand(-1,-1,-1,C))
grouped_points.masked_fill(~mask.unsqueeze(-1), 0.0)
group_center = torch.sum(grouped_points, dim=2, keepdim=True)/nball.unsqueeze(-1)
points = grouped_points - group_center
allpoints = points.view(-1, self.nn_size, C).contiguous()
# S (BN, k)
U, S, V = batch_svd(allpoints)
cond = S[:,0]/(S[:,-1]+S[:,0])
cond = cond.view(B, N).contiguous()
return self.metric(cond,ref_cond)
class InsideLoss2D(torch.nn.Module):
def __init__(self, reduction="mean"):
super().__init__()
self.reduction = reduction
def forward(self, cage, shape, shape_normals, epsilon=0.01, interpolate=True):
""" Penalize polygon cage that is inside the given shape
Args:
cage: (B,M,3)
shape: (B,N,3)
shape_normals: (B,N,3)
return:
"""
B,M,D = cage.shape
interpolate_n = 10
# find the closest point on the shape
cage_p = cage[:,[i for i in range(1, M)]+[0], :]
t = torch.linspace(0, 1, interpolate_n).to(device=cage_p.device)
# B,M,K,3
cage_itp = t.reshape([1, 1, interpolate_n, 1])*cage_p.unsqueeze(2).expand(-1, -1, interpolate_n, -1) + \
(1-t.reshape([1, 1, interpolate_n, 1]))*cage.unsqueeze(2).expand(-1, -1, interpolate_n, -1)
cage_itp = cage_itp.reshape(B, -1, D)
nn_point, nn_index, _ = faiss_knn(1, cage_itp, shape, NCHW=False)
nn_point = nn_point.squeeze(2)
nn_normal = torch.gather(
shape_normals.unsqueeze(1).expand(-1, nn_index.shape[1], -1, -1), 2,
nn_index.unsqueeze(-1).expand(-1,-1,-1,shape_normals.shape[-1]))
nn_normal = nn_normal.squeeze(2)
# if <(q-p), n> is negative, then this point is inside the shape, gradient is along the normal direction
dot = dot_product(cage_itp - nn_point - epsilon*nn_normal, nn_normal, dim=-1)
loss = torch.where(dot < 0, -dot, torch.zeros_like(dot))
if self.reduction == "mean":
return loss.mean()
elif self.reduction == "max":
return torch.mean(torch.max(loss, dim=-1)[0])
elif self.reduction == "sum":
return loss.mean(torch.sum(loss, dim=-1))
elif self.reduction == "none":
return loss
else:
raise NotImplementedError
return loss
class InterpolatedCDTriMesh(torch.nn.Module):
"""
Reconstruction between cage and shape
mean(shape2cage) + beta*max(shape2cage) + (gamma+delta*|CAGE|*mean(cage2shape))
"""
def __init__(self, interpolate_n=4, beta=1.0, gamma=1, delta=0):
super().__init__()
self.beta = beta
self.gamma = gamma
self.delta = delta
self.interpolate_n = 4
interpolate_n = interpolate_n
t = torch.linspace(0, 1, interpolate_n)
# [(T,T),(T,T)]
sample_weights = torch.meshgrid(t, t)
# (T*T,3)
sample_weights = torch.stack(sample_weights+((1-sample_weights[0]-sample_weights[1]),), dim=-1).view(-1,3)
mask = (sample_weights[:,2]>=0).unsqueeze(-1).expand_as(sample_weights)
# (S,3)
self.sample_weights = torch.masked_select(sample_weights, mask).view(-1, 3)
self.threshold = torch.nn.Hardshrink(0.05)
def forward(self, cage_v, cage_f, shape, interpolate=True):
B,M,D = cage_v.shape
B,F,_ = cage_f.shape
B,N,_ = shape.shape
self.sample_weights = self.sample_weights.to(device=shape.device)
# sample points using interpolated barycentric weights on cage triangles (B,F,1,3,3)
cage_face_vertices = torch.gather(cage_v, 1, cage_f.reshape(B,F*3,1).expand(-1,-1,cage_v.shape[-1])).reshape(B,F,1,3,3)
sample_weights = self.sample_weights.unsqueeze(0).unsqueeze(0).unsqueeze(-1).to(device=cage_v.device) # (1,1,S,3,1)
# (B,F,S,3)
cage_sampled_points = torch.sum(sample_weights*cage_face_vertices, dim=-2).reshape(B,-1,3)
cage2shape, shape2cage, _, _ = nndistance(cage_sampled_points, shape)
shape2cage = self.threshold(shape2cage)
cage2shape = self.threshold(cage2shape)
loss = torch.mean(shape2cage, dim=1)*(self.gamma+self.delta*M) + torch.mean(cage2shape, dim=1) + self.beta*torch.max(cage2shape, dim=1)[0]
loss = torch.mean(loss)
return loss
class InsideLoss3DTriMesh(torch.nn.Module):
"""Penalize cage inside a triangle mesh
Args:
cage_v: (B,M,3)
cage_f: (B,F,3)
shape: (B,N,3)
shape_f: (B,FF,3)
shape_fn: (B,FF,3)
"""
def __init__(self, reduction="mean", interpolate_n=4):
super().__init__()
self.reduction = reduction
interpolate_n = interpolate_n
t = torch.linspace(0, 1, interpolate_n)
# [(T,T),(T,T)]
sample_weights = torch.meshgrid(t, t)
# (T*T,3)
sample_weights = torch.stack(sample_weights+((1-sample_weights[0]-sample_weights[1]),), dim=-1).view(-1,3)
mask = (sample_weights[:,2]>=0).unsqueeze(-1).expand_as(sample_weights)
# (S,3)
self.sample_weights = torch.masked_select(sample_weights, mask).view(-1, 3)
def forward(self, cage_v, cage_f, shape, shape_vn, epsilon=0.01, interpolate=True):
B,M,D = cage_v.shape
B,F,_ = cage_f.shape
B,N,_ = shape.shape
self.sample_weights = self.sample_weights.to(device=shape.device)
# B,FF,_ = shape_f.shape
# sample points using interpolated barycentric weights on cage triangles (B,F,1,3,3)
cage_face_vertices = torch.gather(cage_v, 1, cage_f.reshape(B,F*3,1).expand(-1,-1,cage_v.shape[-1])).reshape(B,F,1,3,3)
sample_weights = self.sample_weights.unsqueeze(0).unsqueeze(0).unsqueeze(-1).to(device=cage_v.device) # (1,1,S,3,1)
# (B,F,S,3)
cage_sampled_points = torch.sum(sample_weights*cage_face_vertices, dim=-2).reshape(B,-1,3)
# shape_face_vertices = torch.gather(shape, 1, shape_f.view(B,F*3,1)).view(B,F,3,3)
# find the closest point on the shape
nn_point, nn_index, _ = faiss_knn(1, cage_sampled_points, shape, NCHW=False)
nn_point = nn_point.squeeze(2)
# (B,FS,1)
nn_normal = torch.gather(
shape_vn.unsqueeze(1).expand(-1, nn_index.shape[1], -1, -1), 2,
nn_index.unsqueeze(-1).expand(-1,-1,-1,shape_vn.shape[-1]))
nn_normal = nn_normal.squeeze(2)
# if <(q-p), n> is negative, then this point is inside the shape, gradient is along the normal direction
dot = dot_product(cage_sampled_points - nn_point - epsilon*nn_normal, nn_normal, dim=-1)
loss = torch.where(dot < 0, -dot, torch.zeros_like(dot))
if self.reduction == "mean":
return loss.mean()
elif self.reduction == "max":
return torch.mean(torch.max(loss, dim=-1)[0])
elif self.reduction == "sum":
return loss.mean(torch.sum(loss, dim=-1))
elif self.reduction == "none":
return loss
else:
raise NotImplementedError
return loss
class MeshDihedralAngleLoss(torch.nn.Module):
"""
if vert1 and vert both given, penalize difference of the dihedral angle between vert1 and vert2
otherwise penalize if dehedral angle < pi/4
vert1 (B,N,3)
vert2 (B,N,3)
edge_points List(torch.Tensor(E, 4))
"""
def __init__(self, threshold=np.pi/6, edge_points=None, reduction="mean"):
super().__init__()
self.edge_points = edge_points
self.reduction = reduction
self.threshold = threshold
def forward(self, vert1, vert2=None, edge_points=None):
if edge_points is None:
edge_points = self.edge_points
assert(edge_points is not None)
B = vert1.shape[0]
loss = []
for b in range(B):
angles1 = dihedral_angle(vert1[b], edge_points)
if vert2 is not None:
angles2 = dihedral_angle(vert2[b], edge_points)
tmp = self.metric(angles1, angles2)
else:
tmp = torch.nn.functional.relu(np.pi/4 - angles1)
tmp = tmp*tmp
tmp = torch.mean(tmp)
loss.append(tmp)
loss = torch.stack(loss, dim=0)
if self.reduction != "none":
loss = loss.mean()
return loss
class GTNormalLoss(torch.nn.Module):
"""
compare the PCA normals of two point clouds
===
params:
NCHW: order of dimensions, default True
pred: (B,3,N) if NCHW, (B,N,3) otherwise
"""
def __init__(self, nn_size=10, NCHW=True):
super().__init__()
self.nn_size = nn_size
self.NCHW = NCHW
self.cos = torch.nn.CosineSimilarity(dim=-1, eps=1e-08)
def forward(self, pred, gt_normals):
pred_normals = batch_normals(pred, nn_size=10, NCHW=self.NCHW)
cos = self.cos(pred_normals, gt_normals)
return torch.mean(1-cos)
class MeshSmoothLoss(torch.nn.Module):
"""
compare laplacian of two meshes with the same connectivity assuming known correspondence
metric: an instance of a module e.g. L1Loss
use_cot: cot laplacian is used instead of uniformlaplacian
consistent_topology: assume face matrix is the same during the entire use
precompute_L: assume vert1 is always the same
"""
def __init__(self, metric, use_cot=False, use_norm=False):
super().__init__()
if use_cot:
self.laplacian = CotLaplacian()
else:
self.laplacian = UniformLaplacian()
self.metric = metric
def forward(self, vert1, face=None):
lap1 = self.laplacian(vert1, face)
lap1 = torch.norm(lap1, dim=-1, p=2)
return lap1.mean()
class LocalFeatureLoss(torch.nn.Module):
"""
penalize point to surface loss
Given points (B,N,3)
1. find KNN and the center
2. fit PCA, get normal
3. project p-center to normal
"""
def __init__(self, nn_size=10, metric=torch.nn.MSELoss("mean"), **kwargs):
super().__init__()
self.nn_size = nn_size
self.metric = metric
def forward(self, xyz1, xyz2, **kwargs):
xyz1 = xyz1.contiguous()
xyz2 = xyz2.contiguous()
B,N,C = xyz1.shape
grouped_points, idx, _ = group_knn(self.nn_size, xyz1, xyz1, unique=True, NCHW=False)
group_center = torch.mean(grouped_points, dim=2, keepdim=True)
grouped_points = grouped_points - group_center
# fit pca
allpoints = grouped_points.view(-1, self.nn_size, C).contiguous()
# BN,C,k
U, S, V = batch_svd(allpoints)
# V is BNxCxC, last_u BNxC
normals = V[:, :, -1].view(B, N, C).detach()
# FIXME what about the sign of normal
ptof1 = dot_product((xyz1 - group_center.squeeze(2)), normals, dim=-1)
# for xyz2 use the same neighborhood
grouped_points = torch.gather(xyz2.unsqueeze(1).expand(-1,N,-1,-1), 2, idx.unsqueeze(-1).expand(-1,-1,-1,C))
group_center = torch.mean(grouped_points, dim=2, keepdim=True)
grouped_points = grouped_points - group_center
allpoints = grouped_points.view(-1, self.nn_size, C).contiguous()
# MB,C,k
U, S, V = batch_svd(allpoints)
# V is MBxCxC, last_u MBxC
normals = V[:, :, -1].view(B, N, C).detach()
ptof2 = dot_product((xyz2 - group_center.squeeze(2)), normals, dim=-1)
# compare ptof1 and ptof2 absolute value (absolute value can only determine bent, not direction of bent)
loss = self.metric(ptof1.abs(), ptof2.abs())
# # penalize flat->curve
bent = ptof2-ptof1
bent.masked_fill_(bent<0, 0.0)
bent = self.metric(bent, torch.zeros_like(bent))
# bent.masked_fill_(bent<=1.0, 0.0)
loss += 5*bent
| |
value_
self.Id_nsprefix_ = child_.prefix
elif nodeName_ == 'SequenceNumber' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'SequenceNumber')
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'SequenceNumber')
self.SequenceNumber = ival_
self.SequenceNumber_nsprefix_ = child_.prefix
elif nodeName_ == 'PackingGroup':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'PackingGroup')
value_ = self.gds_validate_string(value_, node, 'PackingGroup')
self.PackingGroup = value_
self.PackingGroup_nsprefix_ = child_.prefix
# validate type DangerousGoodsPackingGroupType
self.validate_DangerousGoodsPackingGroupType(self.PackingGroup)
elif nodeName_ == 'PackingInstructions':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'PackingInstructions')
value_ = self.gds_validate_string(value_, node, 'PackingInstructions')
self.PackingInstructions = value_
self.PackingInstructions_nsprefix_ = child_.prefix
elif nodeName_ == 'AircraftCategoryType':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AircraftCategoryType')
value_ = self.gds_validate_string(value_, node, 'AircraftCategoryType')
self.AircraftCategoryType = value_
self.AircraftCategoryType_nsprefix_ = child_.prefix
# validate type DangerousGoodsAircraftCategoryType
self.validate_DangerousGoodsAircraftCategoryType(self.AircraftCategoryType)
elif nodeName_ == 'ProperShippingName':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ProperShippingName')
value_ = self.gds_validate_string(value_, node, 'ProperShippingName')
self.ProperShippingName = value_
self.ProperShippingName_nsprefix_ = child_.prefix
elif nodeName_ == 'TechnicalName':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TechnicalName')
value_ = self.gds_validate_string(value_, node, 'TechnicalName')
self.TechnicalName = value_
self.TechnicalName_nsprefix_ = child_.prefix
elif nodeName_ == 'PrimaryClass':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'PrimaryClass')
value_ = self.gds_validate_string(value_, node, 'PrimaryClass')
self.PrimaryClass = value_
self.PrimaryClass_nsprefix_ = child_.prefix
elif nodeName_ == 'SubsidiaryClasses':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'SubsidiaryClasses')
value_ = self.gds_validate_string(value_, node, 'SubsidiaryClasses')
self.SubsidiaryClasses.append(value_)
self.SubsidiaryClasses_nsprefix_ = child_.prefix
elif nodeName_ == 'ReportableQuantity':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'ReportableQuantity')
ival_ = self.gds_validate_boolean(ival_, node, 'ReportableQuantity')
self.ReportableQuantity = ival_
self.ReportableQuantity_nsprefix_ = child_.prefix
elif nodeName_ == 'Percentage' and child_.text:
sval_ = child_.text
fval_ = self.gds_parse_decimal(sval_, node, 'Percentage')
fval_ = self.gds_validate_decimal(fval_, node, 'Percentage')
self.Percentage = fval_
self.Percentage_nsprefix_ = child_.prefix
elif nodeName_ == 'AuthorizationInformation':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AuthorizationInformation')
value_ = self.gds_validate_string(value_, node, 'AuthorizationInformation')
self.AuthorizationInformation = value_
self.AuthorizationInformation_nsprefix_ = child_.prefix
# end class UploadedDangerousGoodsCommodityDescription
class UploadedDangerousGoodsContainer(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Attributes=None, ContainerType=None, QValue=None, GrossWeight=None, Commodities=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if Attributes is None:
self.Attributes = []
else:
self.Attributes = Attributes
self.Attributes_nsprefix_ = None
self.ContainerType = ContainerType
self.ContainerType_nsprefix_ = None
self.QValue = QValue
self.QValue_nsprefix_ = None
self.GrossWeight = GrossWeight
self.GrossWeight_nsprefix_ = None
if Commodities is None:
self.Commodities = []
else:
self.Commodities = Commodities
self.Commodities_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, UploadedDangerousGoodsContainer)
if subclass is not None:
return subclass(*args_, **kwargs_)
if UploadedDangerousGoodsContainer.subclass:
return UploadedDangerousGoodsContainer.subclass(*args_, **kwargs_)
else:
return UploadedDangerousGoodsContainer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Attributes(self):
return self.Attributes
def set_Attributes(self, Attributes):
self.Attributes = Attributes
def add_Attributes(self, value):
self.Attributes.append(value)
def insert_Attributes_at(self, index, value):
self.Attributes.insert(index, value)
def replace_Attributes_at(self, index, value):
self.Attributes[index] = value
def get_ContainerType(self):
return self.ContainerType
def set_ContainerType(self, ContainerType):
self.ContainerType = ContainerType
def get_QValue(self):
return self.QValue
def set_QValue(self, QValue):
self.QValue = QValue
def get_GrossWeight(self):
return self.GrossWeight
def set_GrossWeight(self, GrossWeight):
self.GrossWeight = GrossWeight
def get_Commodities(self):
return self.Commodities
def set_Commodities(self, Commodities):
self.Commodities = Commodities
def add_Commodities(self, value):
self.Commodities.append(value)
def insert_Commodities_at(self, index, value):
self.Commodities.insert(index, value)
def replace_Commodities_at(self, index, value):
self.Commodities[index] = value
def validate_DangerousGoodsContainerAttributeType(self, value):
result = True
# Validate type DangerousGoodsContainerAttributeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['ALL_PACKED_IN_ONE']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on DangerousGoodsContainerAttributeType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.Attributes or
self.ContainerType is not None or
self.QValue is not None or
self.GrossWeight is not None or
self.Commodities
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='UploadedDangerousGoodsContainer', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('UploadedDangerousGoodsContainer')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'UploadedDangerousGoodsContainer':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='UploadedDangerousGoodsContainer')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='UploadedDangerousGoodsContainer', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='UploadedDangerousGoodsContainer'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='UploadedDangerousGoodsContainer', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Attributes_ in self.Attributes:
namespaceprefix_ = self.Attributes_nsprefix_ + ':' if (UseCapturedNS_ and self.Attributes_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAttributes>%s</%sAttributes>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(Attributes_), input_name='Attributes')), namespaceprefix_ , eol_))
if self.ContainerType is not None:
namespaceprefix_ = self.ContainerType_nsprefix_ + ':' if (UseCapturedNS_ and self.ContainerType_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sContainerType>%s</%sContainerType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ContainerType), input_name='ContainerType')), namespaceprefix_ , eol_))
if self.QValue is not None:
namespaceprefix_ = self.QValue_nsprefix_ + ':' if (UseCapturedNS_ and self.QValue_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sQValue>%s</%sQValue>%s' % (namespaceprefix_ , self.gds_format_decimal(self.QValue, input_name='QValue'), namespaceprefix_ , eol_))
if self.GrossWeight is not None:
namespaceprefix_ = self.GrossWeight_nsprefix_ + ':' if (UseCapturedNS_ and self.GrossWeight_nsprefix_) else ''
self.GrossWeight.export(outfile, level, namespaceprefix_, namespacedef_='', name_='GrossWeight', pretty_print=pretty_print)
for Commodities_ in self.Commodities:
namespaceprefix_ = self.Commodities_nsprefix_ + ':' if (UseCapturedNS_ and self.Commodities_nsprefix_) else ''
Commodities_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Commodities', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Attributes':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Attributes')
value_ = self.gds_validate_string(value_, node, 'Attributes')
self.Attributes.append(value_)
self.Attributes_nsprefix_ = child_.prefix
# validate type DangerousGoodsContainerAttributeType
self.validate_DangerousGoodsContainerAttributeType(self.Attributes[-1])
elif nodeName_ == 'ContainerType':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ContainerType')
value_ = self.gds_validate_string(value_, node, 'ContainerType')
self.ContainerType = value_
self.ContainerType_nsprefix_ = child_.prefix
elif nodeName_ == 'QValue' and child_.text:
sval_ = child_.text
fval_ = self.gds_parse_decimal(sval_, node, 'QValue')
fval_ = self.gds_validate_decimal(fval_, node, 'QValue')
self.QValue = fval_
self.QValue_nsprefix_ = child_.prefix
elif nodeName_ == 'GrossWeight':
obj_ = Weight.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.GrossWeight = obj_
obj_.original_tagname_ = 'GrossWeight'
elif nodeName_ == 'Commodities':
obj_ = UploadedDangerousGoodsCommodityContent.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Commodities.append(obj_)
obj_.original_tagname_ = 'Commodities'
# end class UploadedDangerousGoodsContainer
class UploadedDangerousGoodsContainerGroup(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, NumberOfIdenticalContainers=None, Container=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.NumberOfIdenticalContainers = NumberOfIdenticalContainers
self.NumberOfIdenticalContainers_nsprefix_ = None
self.Container = Container
self.Container_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, UploadedDangerousGoodsContainerGroup)
if subclass is not None:
return subclass(*args_, **kwargs_)
if UploadedDangerousGoodsContainerGroup.subclass:
return UploadedDangerousGoodsContainerGroup.subclass(*args_, **kwargs_)
else:
return UploadedDangerousGoodsContainerGroup(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_NumberOfIdenticalContainers(self):
return self.NumberOfIdenticalContainers
def set_NumberOfIdenticalContainers(self, NumberOfIdenticalContainers):
self.NumberOfIdenticalContainers = NumberOfIdenticalContainers
def get_Container(self):
return self.Container
def set_Container(self, Container):
self.Container = Container
def hasContent_(self):
if (
self.NumberOfIdenticalContainers is not None or
self.Container is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='UploadedDangerousGoodsContainerGroup', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('UploadedDangerousGoodsContainerGroup')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'UploadedDangerousGoodsContainerGroup':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='UploadedDangerousGoodsContainerGroup')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='UploadedDangerousGoodsContainerGroup', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='UploadedDangerousGoodsContainerGroup'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='UploadedDangerousGoodsContainerGroup', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.NumberOfIdenticalContainers is not None:
namespaceprefix_ = self.NumberOfIdenticalContainers_nsprefix_ + ':' if (UseCapturedNS_ and self.NumberOfIdenticalContainers_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sNumberOfIdenticalContainers>%s</%sNumberOfIdenticalContainers>%s' % (namespaceprefix_ , | |
<reponame>stewnorriss/letsencrypt
"""Tests for letsencrypt.plugins.standalone.authenticator."""
import os
import pkg_resources
import psutil
import signal
import socket
import unittest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import mock
import OpenSSL
from acme import challenges
from acme import jose
from letsencrypt import achallenges
from letsencrypt.tests import acme_util
KEY_PATH = pkg_resources.resource_filename(
"letsencrypt.tests", os.path.join("testdata", "rsa512_key.pem"))
KEY_DATA = pkg_resources.resource_string(
"letsencrypt.tests", os.path.join("testdata", "rsa512_key.pem"))
KEY = jose.JWKRSA(key=jose.ComparableRSAKey(serialization.load_pem_private_key(
KEY_DATA, password=<PASSWORD>, backend=default_backend())))
PRIVATE_KEY = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, KEY_DATA)
CONFIG = mock.Mock(dvsni_port=5001)
# Classes based on to allow interrupting infinite loop under test
# after one iteration, based on.
# http://igorsobreira.com/2013/03/17/testing-infinite-loops.html
class _SocketAcceptOnlyNTimes(object):
# pylint: disable=too-few-public-methods
"""
Callable that will raise `CallableExhausted`
exception after `limit` calls, modified to also return
a tuple simulating the return values of a socket.accept()
call
"""
def __init__(self, limit):
self.limit = limit
self.calls = 0
def __call__(self):
self.calls += 1
if self.calls > self.limit:
raise CallableExhausted
# Modified here for a single use as socket.accept()
return (mock.MagicMock(), "ignored")
class CallableExhausted(Exception):
# pylint: disable=too-few-public-methods
"""Exception raised when a method is called more than the
specified number of times."""
class ChallPrefTest(unittest.TestCase):
"""Tests for chall_pref() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
def test_chall_pref(self):
self.assertEqual(self.authenticator.get_chall_pref("example.com"),
[challenges.DVSNI])
class SNICallbackTest(unittest.TestCase):
"""Tests for sni_callback() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.cert = achallenges.DVSNI(
challb=acme_util.DVSNI_P,
domain="example.com", key=KEY).gen_cert_and_response()[0]
self.authenticator.private_key = PRIVATE_KEY
self.authenticator.tasks = {"abcdef.acme.invalid": self.cert}
self.authenticator.child_pid = 12345
def test_real_servername(self):
connection = mock.MagicMock()
connection.get_servername.return_value = "abcdef.acme.invalid"
self.authenticator.sni_callback(connection)
self.assertEqual(connection.set_context.call_count, 1)
called_ctx = connection.set_context.call_args[0][0]
self.assertTrue(isinstance(called_ctx, OpenSSL.SSL.Context))
def test_fake_servername(self):
"""Test behavior of SNI callback when an unexpected name is received.
(Currently the expected behavior in this case is to return the
"first" certificate with which the listener was configured,
although they are stored in an unordered data structure so
this might not be the one that was first in the challenge list
passed to the perform method. In the future, this might result
in dropping the connection instead.)"""
connection = mock.MagicMock()
connection.get_servername.return_value = "example.com"
self.authenticator.sni_callback(connection)
self.assertEqual(connection.set_context.call_count, 1)
called_ctx = connection.set_context.call_args[0][0]
self.assertTrue(isinstance(called_ctx, OpenSSL.SSL.Context))
class ClientSignalHandlerTest(unittest.TestCase):
"""Tests for client_signal_handler() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.authenticator.tasks = {"foononce.acme.invalid": "stuff"}
self.authenticator.child_pid = 12345
def test_client_signal_handler(self):
self.assertTrue(self.authenticator.subproc_state is None)
self.authenticator.client_signal_handler(signal.SIGIO, None)
self.assertEqual(self.authenticator.subproc_state, "ready")
self.authenticator.client_signal_handler(signal.SIGUSR1, None)
self.assertEqual(self.authenticator.subproc_state, "inuse")
self.authenticator.client_signal_handler(signal.SIGUSR2, None)
self.assertEqual(self.authenticator.subproc_state, "cantbind")
# Testing the unreached path for a signal other than these
# specified (which can't occur in normal use because this
# function is only set as a signal handler for the above three
# signals).
self.assertRaises(
ValueError, self.authenticator.client_signal_handler,
signal.SIGPIPE, None)
class SubprocSignalHandlerTest(unittest.TestCase):
"""Tests for subproc_signal_handler() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.authenticator.tasks = {"foononce.acme.invalid": "stuff"}
self.authenticator.child_pid = 12345
self.authenticator.parent_pid = 23456
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_subproc_signal_handler(self, mock_exit, mock_kill):
self.authenticator.ssl_conn = mock.MagicMock()
self.authenticator.connection = mock.MagicMock()
self.authenticator.sock = mock.MagicMock()
self.authenticator.subproc_signal_handler(signal.SIGINT, None)
self.assertEquals(self.authenticator.ssl_conn.shutdown.call_count, 1)
self.assertEquals(self.authenticator.ssl_conn.close.call_count, 1)
self.assertEquals(self.authenticator.connection.close.call_count, 1)
self.assertEquals(self.authenticator.sock.close.call_count, 1)
mock_kill.assert_called_once_with(
self.authenticator.parent_pid, signal.SIGUSR1)
mock_exit.assert_called_once_with(0)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_subproc_signal_handler_trouble(self, mock_exit, mock_kill):
"""Test attempting to shut down a non-existent connection.
(This could occur because none was established or active at the
time the signal handler tried to perform the cleanup)."""
self.authenticator.ssl_conn = mock.MagicMock()
self.authenticator.connection = mock.MagicMock()
self.authenticator.sock = mock.MagicMock()
# AttributeError simulates the case where one of these properties
# is None because no connection exists. We raise it for
# ssl_conn.close() instead of ssl_conn.shutdown() for better code
# coverage.
self.authenticator.ssl_conn.close.side_effect = AttributeError("!")
self.authenticator.connection.close.side_effect = AttributeError("!")
self.authenticator.sock.close.side_effect = AttributeError("!")
self.authenticator.subproc_signal_handler(signal.SIGINT, None)
self.assertEquals(self.authenticator.ssl_conn.shutdown.call_count, 1)
self.assertEquals(self.authenticator.ssl_conn.close.call_count, 1)
self.assertEquals(self.authenticator.connection.close.call_count, 1)
self.assertEquals(self.authenticator.sock.close.call_count, 1)
mock_kill.assert_called_once_with(
self.authenticator.parent_pid, signal.SIGUSR1)
mock_exit.assert_called_once_with(0)
class AlreadyListeningTest(unittest.TestCase):
"""Tests for already_listening() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_race_condition(self, mock_get_utility, mock_process, mock_net):
# This tests a race condition, or permission problem, or OS
# incompatibility in which, for some reason, no process name can be
# found to match the identified listening PID.
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("192.168.3.11", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.side_effect = psutil.NoSuchProcess("No such PID")
# We simulate being unable to find the process name of PID 4416,
# which results in returning False.
self.assertFalse(self.authenticator.already_listening(17))
self.assertEqual(mock_get_utility.generic_notification.call_count, 0)
mock_process.assert_called_once_with(4416)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_not_listening(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("192.168.3.11", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
self.assertFalse(self.authenticator.already_listening(17))
self.assertEqual(mock_get_utility.generic_notification.call_count, 0)
self.assertEqual(mock_process.call_count, 0)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_listening_ipv4(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("192.168.3.11", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
result = self.authenticator.already_listening(17)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
mock_process.assert_called_once_with(4416)
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil."
"net_connections")
@mock.patch("letsencrypt.plugins.standalone.authenticator.psutil.Process")
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_listening_ipv6(self, mock_get_utility, mock_process, mock_net):
from psutil._common import sconn
conns = [
sconn(fd=-1, family=2, type=1, laddr=("0.0.0.0", 30),
raddr=(), status="LISTEN", pid=None),
sconn(fd=3, family=2, type=1, laddr=("192.168.5.10", 32783),
raddr=("192.168.3.11", 22), status="ESTABLISHED", pid=1234),
sconn(fd=-1, family=10, type=1, laddr=("::1", 54321),
raddr=("::1", 111), status="CLOSE_WAIT", pid=None),
sconn(fd=3, family=10, type=1, laddr=("::", 12345), raddr=(),
status="LISTEN", pid=4420),
sconn(fd=3, family=2, type=1, laddr=("0.0.0.0", 17),
raddr=(), status="LISTEN", pid=4416)]
mock_net.return_value = conns
mock_process.name.return_value = "inetd"
result = self.authenticator.already_listening(12345)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
mock_process.assert_called_once_with(4420)
class PerformTest(unittest.TestCase):
"""Tests for perform() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.achall1 = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(r="whee", nonce="foo"), "pending"),
domain="foo.example.com", key=KEY)
self.achall2 = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(r="whee", nonce="bar"), "pending"),
domain="bar.example.com", key=KEY)
bad_achall = ("This", "Represents", "A Non-DVSNI", "Challenge")
self.achalls = [self.achall1, self.achall2, bad_achall]
def test_perform_when_already_listening(self):
self.authenticator.already_listening = mock.Mock()
self.authenticator.already_listening.return_value = True
result = self.authenticator.perform([self.achall1])
self.assertEqual(result, [None])
def test_can_perform(self):
"""What happens if start_listener() returns True."""
self.authenticator.start_listener = mock.Mock()
self.authenticator.start_listener.return_value = True
self.authenticator.already_listening = mock.Mock(return_value=False)
result = self.authenticator.perform(self.achalls)
self.assertEqual(len(self.authenticator.tasks), 2)
self.assertTrue(
self.authenticator.tasks.has_key(self.achall1.nonce_domain))
self.assertTrue(
self.authenticator.tasks.has_key(self.achall2.nonce_domain))
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 3)
self.assertTrue(isinstance(result[0], challenges.ChallengeResponse))
self.assertTrue(isinstance(result[1], challenges.ChallengeResponse))
self.assertFalse(result[2])
self.authenticator.start_listener.assert_called_once_with(
CONFIG.dvsni_port, KEY)
def test_cannot_perform(self):
"""What happens if start_listener() returns False."""
self.authenticator.start_listener = mock.Mock()
self.authenticator.start_listener.return_value = False
self.authenticator.already_listening = mock.Mock(return_value=False)
result = self.authenticator.perform(self.achalls)
self.assertEqual(len(self.authenticator.tasks), 2)
self.assertTrue(
self.authenticator.tasks.has_key(self.achall1.nonce_domain))
self.assertTrue(
self.authenticator.tasks.has_key(self.achall2.nonce_domain))
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 3)
self.assertEqual(result, [None, None, False])
self.authenticator.start_listener.assert_called_once_with(
CONFIG.dvsni_port, KEY)
def test_perform_with_pending_tasks(self):
self.authenticator.tasks = {"foononce.acme.invalid": "cert_data"}
extra_achall = acme_util.DVSNI_P
self.assertRaises(
ValueError, self.authenticator.perform, [extra_achall])
def test_perform_without_challenge_list(self):
extra_achall = acme_util.DVSNI_P
# This is wrong because a challenge must be specified.
self.assertRaises(ValueError, self.authenticator.perform, [])
# This is wrong because it must be a list, not a bare challenge.
self.assertRaises(
ValueError, self.authenticator.perform, extra_achall)
# This is wrong because the list must contain at least one challenge.
self.assertRaises(
ValueError, self.authenticator.perform, range(20))
class StartListenerTest(unittest.TestCase):
"""Tests for start_listener() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.fork")
def test_start_listener_fork_parent(self, mock_fork):
self.authenticator.do_parent_process = mock.Mock()
self.authenticator.do_parent_process.return_value = True
mock_fork.return_value = 22222
result = self.authenticator.start_listener(1717, "key")
# start_listener is expected to return the True or False return
# value from do_parent_process.
self.assertTrue(result)
self.assertEqual(self.authenticator.child_pid, 22222)
self.authenticator.do_parent_process.assert_called_once_with(1717)
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.fork")
def test_start_listener_fork_child(self, mock_fork):
self.authenticator.do_parent_process = mock.Mock()
self.authenticator.do_child_process = mock.Mock()
mock_fork.return_value = 0
self.authenticator.start_listener(1717, "key")
self.assertEqual(self.authenticator.child_pid, os.getpid())
self.authenticator.do_child_process.assert_called_once_with(
1717, "key")
class DoParentProcessTest(unittest.TestCase):
"""Tests for do_parent_process() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_ok(self, mock_get_utility):
self.authenticator.subproc_state = "ready"
result = self.authenticator.do_parent_process(1717)
self.assertTrue(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_inuse(self, mock_get_utility):
self.authenticator.subproc_state = "inuse"
result = self.authenticator.do_parent_process(1717)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_cantbind(self, mock_get_utility):
self.authenticator.subproc_state = "cantbind"
result = self.authenticator.do_parent_process(1717)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
@mock.patch("letsencrypt.plugins.standalone.authenticator."
"zope.component.getUtility")
def test_do_parent_process_timeout(self, mock_get_utility):
# Normally times out in 5 seconds and returns False. We can
# now set delay_amount to a lower value so that it times out
# faster than it would under normal use.
result = self.authenticator.do_parent_process(1717, delay_amount=1)
self.assertFalse(result)
self.assertEqual(mock_get_utility.call_count, 1)
class DoChildProcessTest(unittest.TestCase):
"""Tests for do_child_process() method."""
def setUp(self):
from letsencrypt.plugins.standalone.authenticator import \
StandaloneAuthenticator
self.authenticator = StandaloneAuthenticator(config=CONFIG, name=None)
self.cert = achallenges.DVSNI(
challb=acme_util.chall_to_challb(
challenges.DVSNI(r=("x" * 32), nonce="abcdef"), "pending"),
domain="example.com", key=KEY).gen_cert_and_response()[0]
self.authenticator.private_key = PRIVATE_KEY
self.authenticator.tasks = {"abcdef.acme.invalid": self.cert}
self.authenticator.parent_pid = 12345
@mock.patch("letsencrypt.plugins.standalone.authenticator.socket.socket")
@mock.patch("letsencrypt.plugins.standalone.authenticator.os.kill")
@mock.patch("letsencrypt.plugins.standalone.authenticator.sys.exit")
def test_do_child_process_cantbind1(
self, mock_exit, mock_kill, mock_socket):
mock_exit.side_effect = IndentationError("subprocess would exit here")
eaccess = socket.error(socket.errno.EACCES, | |
a close relative of getLinkedEntitiesByTemplateType. It is used for finding
associated (linked) entities and their meme types. Like getLinkedEntitiesByTemplateType, it parses the
link path and follows each step of the path in turn by a recursive call.
Where it differs is that it is not searching for a specific meme type, but instead double wildcards itself all
the way through the assembly network; returning everything that self is linked to, no matter how remotely
linkTypes - the entity link type
crossSingletons - if this is false, the recursion will stop at any singletons
The method id not intended to be called directly, but is instead wrapped by helper functions (getClusterMembers and
getCluster) that refine the results.
"""
#method = moduleName + '.' + self.className + '.getLinkedEntitiesByTemplateType'
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
returnMembers = []
try:
members = linkRepository.getCounterparts(self.uuid, linkDirectionTypes.BIDIRECTIONAL, [], [], linkType, excludeLinks)
newExcludeLinks = self.getLinkIDs()
excludeLinks.extend(newExcludeLinks)
for memberEntityID in members:
member = entityRepository.getEntity(memberEntityID)
isSingleton = member.getIsSingleton()
if isSingleton == True:
position = 2 #Singleton
else:
position = 1 #Not the origin entity and not a singleton
returnMembers.append([self.uuid, member.uuid, member.memePath.fullTemplatePath, member.metaMeme, position])
if (isSingleton == False) or (crossSingletons == True):
partialRet = member.getEntityCluster(linkType, crossSingletons, excludeLinks)
returnMembers.extend(partialRet)
except Exception as e:
unusedDummy = e #dummy variable declaration to prevent false alarm pydev warnings when debug statement is commented out
#logQ.put( [logType , logLevel.DEBUG , method , "Failure getting overview. Traceback = %s" %e])
pass
#logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return returnMembers
def getTraverseFilters(self, filterStatement, isNode = True):
#Find the paremeters
linkParams = []
nodeParams = []
#Peel off the parameter filters from filterStatement
#reInnerParentheses = re.compile("\('([^']+)', '([^']+)'\)")
#reOuterParentheses = re.compile("\((.+)\)")
#reInnerBrackets = re.compile("\[([^]]*)\]")
#reOPMatches = reOuterParentheses.search(filterStatement)
#reIBMatches = reInnerBrackets.search(filterStatement)
reParenthesis = re.compile(r"\(.+?\)")
reBrackets = re.compile(r"\[.+?\]")
allParenthesis = reParenthesis.findall(filterStatement)
allBrackets = reBrackets.findall(filterStatement)
for reP in allParenthesis:
reStripParentheses = re.compile(r"\((.+)\)")
reOPMatches = reStripParentheses.search(reP)
reOPMatch = reOPMatches.groups(1)
innerLinkParams, innerNodeParams = self.getTraverseFilters(reOPMatch[0])
linkParams.extend(innerLinkParams)
nodeParams.extend(innerNodeParams)
for reB in allBrackets:
reStripBrackets = re.compile(r"\[([^]]*)\]")
reIBMatches = reStripBrackets.search(reB)
reIBMatch = reIBMatches.groups(1)
innerLinkParams, innerNodeParams = self.getTraverseFilters(reIBMatch[0], False)
linkParams.extend(innerLinkParams)
nodeParams.extend(innerNodeParams)
#I we have no brackets or parenthesis, then we must be in the 'inner sanctum of the 'filter statement
if (len(allParenthesis) < 1) and (len(allBrackets) < 1):
tp = TraverseParameter(filterStatement.strip())
if tp.operator is not None:
if isNode == True:
nodeParams.append(tp)
else:
linkParams.append(tp)
return linkParams, nodeParams
def getTraverseReport(self, splitPath, isMeme, linkType = 0, excludeLinks = [], returnUniqueValuesOnly = True, excludeCluster = []):
"""
This method is an aid for designers troubleshooting traverse paths, or anyone simply asking 'what lies
along the path'. It is very similar to getLinkedEntitiesByTemplateType, but works in some subtle and
substantially different ways. Instead of delivering the uuid of an entity at the end effector of the
traverse path, it delivers a report of what is along that path and what the nearest neighbors are of
every hop.
The format is very similar to the results of getClusterJSON, so that it can readily be drawn using charting tools
Returns a python dict corresponding to the following JSON example
{
"nodes": [
{"id": "Myriel", "group": 1},
{"id": "Napoleon", "group": 1}
],
"links": [
{"source": "Napoleon", "target": "Myriel", "value": 1},
{"source": "Mlle.Baptistine", "target": "Myriel", "value": 8}
]
}
"""
method = moduleName + '.' + self.className + '.getTraverseReport'
timestamp = time.time()
#logQ.put( [logType , logLevel.DEBUG , method , "entering"])
#no traverse reports for traverse pathgs with wildcards
if "*" in splitPath:
ex = "Traverse path %s contains a wildcard (* or **). It is not possible to create a traverse report for wildcard paths" %splitPath
raise Exceptions.TemplatePathError(ex)
selfUUIDAsStr = str(self.uuid)
if excludeCluster is not None:
excludeCluster.append(selfUUIDAsStr)
excludeLinks.append(selfUUIDAsStr)
try:
traverseOrder = {}
traverseNeighbors = {}
traverseLinks = []
runningPath = ''
#start by building the root node portion (index = "0") of the report
rootMeme = self.memePath.fullTemplatePath
rootMetaMeme = self.metaMeme
rootMemberList = linkRepository.getCounterparts(self.uuid, linkDirectionTypes.BIDIRECTIONAL, [], [], linkType, excludeLinks)
memberListInbound = linkRepository.getCounterparts(self.uuid, linkDirectionTypes.INBOUND, [], [], linkType, excludeLinks)
for memID in rootMemberList:
sMemID = str(memID)
if memID in memberListInbound:
traverseLinks.append({"source": sMemID, "target": selfUUIDAsStr, "value": 1})
else:
traverseLinks.append({"source": selfUUIDAsStr, "target": sMemID, "value": 1})
rootMember = entityRepository.getEntity(memID)
memMeme = rootMember.memePath.fullTemplatePath
memMetaMeme = rootMember.metaMeme
traverseNeighbors[sMemID] = {"id" : sMemID, "meme" : memMeme, "metaMeme" : memMetaMeme, "position" : "-1"}
traverseOrder[selfUUIDAsStr] = {"id" : selfUUIDAsStr, "meme" : rootMeme, "metaMeme" : rootMetaMeme, "position" : timestamp}
#Build up the list of traverse paths
forwardTraverseJoin = '>>'
backwardTraverseJoin = '<<'
polydirectionalTraverseJoin = '::'
if len(splitPath) > 0:
#pathTraversed == True
#while (pathTraversed == False):
#Start by determining whether ot not the we have a leading direction indicator.
#If so, then set the direction to search for currPath and then remove the leading linkdir
soughtPathDirection = linkDirectionTypes.BIDIRECTIONAL #by default
if splitPath.startswith(forwardTraverseJoin) == True:
soughtPathDirection = linkDirectionTypes.OUTBOUND
splitPath = splitPath[2:]
elif splitPath.startswith(backwardTraverseJoin) == True:
soughtPathDirection = linkDirectionTypes.INBOUND
splitPath = splitPath[2:]
elif splitPath.startswith(polydirectionalTraverseJoin) == True:
splitPath = splitPath[2:]
#determine which traverse direction we have in splitPath
partitionSequence = polydirectionalTraverseJoin
lowestIndex = -1
forwardIndex = -1
reverseIndex = -1
polydirectionalIndex = -1
try:
forwardIndex = splitPath.index('>>')
except: pass
try:
reverseIndex = splitPath.index('<<')
except: pass
try:
polydirectionalIndex = splitPath.index('::')
lowestIndex = polydirectionalIndex
except: pass
if (forwardIndex > -1):
if (forwardIndex < lowestIndex) or\
((forwardIndex > lowestIndex) and (lowestIndex < 0)):
lowestIndex = forwardIndex
partitionSequence = forwardTraverseJoin
if ((reverseIndex > -1) or (reverseIndex == 0)):
if (reverseIndex < lowestIndex) or\
((reverseIndex > lowestIndex) and (lowestIndex < 0)):
lowestIndex = reverseIndex
partitionSequence = backwardTraverseJoin
#If forcedContinue is true, we don't bother splitting the path as there was a double wildcard in the recursion history
# somewhere. We'll just accept splitPath as it is.
repartitionedSplitPath = splitPath.partition(partitionSequence)
runningPath = "%s%s%s" %(runningPath, partitionSequence, repartitionedSplitPath[0])
if ((len(repartitionedSplitPath[2]) > 0) and (len(repartitionedSplitPath[1]) > 0)):
splitPath = "%s%s" %(repartitionedSplitPath[1], repartitionedSplitPath[2])
else:
splitPath = repartitionedSplitPath[2]
currentPathFragment = repartitionedSplitPath[0]
#Peel off the parameter filters from currentPathFragment
linkParams, nodeParams = self.getTraverseFilters(currentPathFragment)
reOuterParentheses = re.compile(r"\((.+)\)")
reInnerBrackets = re.compile(r"\[([^]]*)\]")
#strip of the bits inside parenthesis and brackets
currentPathFragment = re.sub(reOuterParentheses, '', currentPathFragment)
currentPathFragment = re.sub(reInnerBrackets, '', currentPathFragment)
try:
if (currentPathFragment is not None) and (len(currentPathFragment) > 0):
if isMeme == True:
try:
soughtPath = templateRepository.resolveTemplate(self.memePath, currentPathFragment, True)
except Exceptions.TemplatePathError as e:
errorMsg = "Failed to resolve path relative to %s. Nested Traceback = %s" %(self.memePath, e)
logQ.put( [logType , logLevel.WARNING , method , errorMsg])
raise e
else:
#We only the fullPemplatePath attribute of the entity, not the actual path pointer
metaMeme = templateRepository.resolveTemplateAbsolutely(self.metaMeme)
soughtPath = templateRepository.resolveTemplate(metaMeme.path, currentPathFragment, True)
except Exception as e:
errorMsg = "Failed to resolve path relative to %s. Nested Traceback = %s" %(self.memePath, e)
logQ.put( [logType , logLevel.WARNING , method , errorMsg])
raise e
try:
#linkDirectionTypes.BIDIRECTIONAL, '', None, linkAttributeOperatorTypes.EQUAL
members = linkRepository.getCounterparts(self.uuid, soughtPathDirection, linkParams, nodeParams, linkType, excludeLinks)
if excludeCluster is not None:
#we need to make sure that we don't backtrack, so filter the exclude list
memberSet = set(members)
excludeSet = set(excludeCluster)
memberSet.difference_update(excludeSet)
members = list(memberSet)
for memberEntityID in members:
member = entityRepository.getEntity(memberEntityID)
if ((isMeme == True) and\
(str(memberEntityID) not in excludeLinks) and\
(member.memePath.fullTemplatePath == soughtPath.path.fullTemplatePath)) or (member.metaMeme == soughtPath.path.fullTemplatePath):
if len(splitPath) > 0:
partialLinks, partialNodes, partialTraverseOrder = member.getTraverseReport(splitPath, isMeme, linkType, excludeLinks, returnUniqueValuesOnly, excludeCluster)
traverseLinks.extend(partialLinks)
traverseOrder.update(partialTraverseOrder)
traverseNeighbors.update(partialNodes)
else:
partialLinks, partialNodes, partialTraverseOrder = member.getTraverseReport("", isMeme, linkType, excludeLinks, returnUniqueValuesOnly, excludeCluster)
traverseLinks.extend(partialLinks)
traverseOrder.update(partialTraverseOrder)
traverseNeighbors.update(partialNodes)
except KeyError as e:
#self.getLinkedEntitiesByTemplateType(oldSplitPath, isMeme, linkType, forcedContinue, excludeLinks, returnUniqueValuesOnly, excludeCluster)
pass
except Exception as e:
#logQ.put( [logType , logLevel.DEBUG , method , "Failure getting linked entities. Traceback = %s" %e])
pass
except Exception as e:
ex = "Function getHasLinkedEntityByMemeType failed. Traceback = %s" %e
#raise Exceptions.ScriptError(ex)
return traverseLinks, traverseNeighbors, traverseOrder
#Todo - update the method | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['PoolArgs', 'Pool']
@pulumi.input_type
class PoolArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_level: pulumi.Input[str],
size_in_tb: pulumi.Input[int],
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Pool resource.
:param pulumi.Input[str] account_name: The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the NetApp Pool should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_level: The service level of the file system. Valid values include `Premium`, `Standard`, or `Ultra`.
:param pulumi.Input[int] size_in_tb: Provisioned size of the pool in TB. Value must be between `4` and `500`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the NetApp Pool. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_level", service_level)
pulumi.set(__self__, "size_in_tb", size_in_tb)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group where the NetApp Pool should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceLevel")
def service_level(self) -> pulumi.Input[str]:
"""
The service level of the file system. Valid values include `Premium`, `Standard`, or `Ultra`.
"""
return pulumi.get(self, "service_level")
@service_level.setter
def service_level(self, value: pulumi.Input[str]):
pulumi.set(self, "service_level", value)
@property
@pulumi.getter(name="sizeInTb")
def size_in_tb(self) -> pulumi.Input[int]:
"""
Provisioned size of the pool in TB. Value must be between `4` and `500`.
"""
return pulumi.get(self, "size_in_tb")
@size_in_tb.setter
def size_in_tb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_in_tb", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the NetApp Pool. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _PoolState:
def __init__(__self__, *,
account_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_level: Optional[pulumi.Input[str]] = None,
size_in_tb: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Pool resources.
:param pulumi.Input[str] account_name: The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the NetApp Pool. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the NetApp Pool should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_level: The service level of the file system. Valid values include `Premium`, `Standard`, or `Ultra`.
:param pulumi.Input[int] size_in_tb: Provisioned size of the pool in TB. Value must be between `4` and `500`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if service_level is not None:
pulumi.set(__self__, "service_level", service_level)
if size_in_tb is not None:
pulumi.set(__self__, "size_in_tb", size_in_tb)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the NetApp Pool. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group where the NetApp Pool should be created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceLevel")
def service_level(self) -> Optional[pulumi.Input[str]]:
"""
The service level of the file system. Valid values include `Premium`, `Standard`, or `Ultra`.
"""
return pulumi.get(self, "service_level")
@service_level.setter
def service_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_level", value)
@property
@pulumi.getter(name="sizeInTb")
def size_in_tb(self) -> Optional[pulumi.Input[int]]:
"""
Provisioned size of the pool in TB. Value must be between `4` and `500`.
"""
return pulumi.get(self, "size_in_tb")
@size_in_tb.setter
def size_in_tb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size_in_tb", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Pool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_level: Optional[pulumi.Input[str]] = None,
size_in_tb: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Pool within a NetApp Account.
## NetApp Pool Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.netapp.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_pool = azure.netapp.Pool("examplePool",
account_name=example_account.name,
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
service_level="Premium",
size_in_tb=4)
```
## Import
NetApp Pool can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:netapp/pool:Pool example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.NetApp/netAppAccounts/account1/capacityPools/pool1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account in which the NetApp Pool should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the NetApp Pool. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the NetApp Pool should be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_level: The service level of the file system. Valid values include `Premium`, `Standard`, or `Ultra`.
:param pulumi.Input[int] size_in_tb: Provisioned size of the pool in TB. Value must be between `4` and `500`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PoolArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Pool within a NetApp Account.
## NetApp Pool Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = | |
"""
Flatpath, go forward forever.
http://codeincomplete.com/posts/javascript-racer/
http://www.extentofthejam.com/pseudo/
http://pixel.garoux.net/screen/game_list
Usage:
* UP/DOWN/LEFT/RIGHT
* SPACE : hide/show road map
* TAB : replay this road
* RETURN : go to a new road
TODO:
* hill road
* more road sprites
* sound
"""
import math
import random
import time
from starfish import pygm
from starfish import consts
from starfish import sptdraw
from starfish import utils
IMG_POS_BACKGROUND = {
'HILLS': { 'x': 5, 'y': 5, 'w': 1280, 'h': 480 },
'SKY': { 'x': 5, 'y': 495, 'w': 1280, 'h': 480 },
'TREES': { 'x': 5, 'y': 985, 'w': 1280, 'h': 480 },
}
IMG_POS_SPRITES = {
'PALM_TREE': { 'x': 5, 'y': 5, 'w': 215, 'h': 540 },
'BILLBOARD08': { 'x': 230, 'y': 5, 'w': 385, 'h': 265 },
'TREE1': { 'x': 625, 'y': 5, 'w': 360, 'h': 360 },
'DEAD_TREE1': { 'x': 5, 'y': 555, 'w': 135, 'h': 332 },
'BILLBOARD09': { 'x': 150, 'y': 555, 'w': 328, 'h': 282 },
'BOULDER3': { 'x': 230, 'y': 280, 'w': 320, 'h': 220 },
'COLUMN': { 'x': 995, 'y': 5, 'w': 200, 'h': 315 },
'BILLBOARD01': { 'x': 625, 'y': 375, 'w': 300, 'h': 170 },
'BILLBOARD06': { 'x': 488, 'y': 555, 'w': 298, 'h': 190 },
'BILLBOARD05': { 'x': 5, 'y': 897, 'w': 298, 'h': 190 },
'BILLBOARD07': { 'x': 313, 'y': 897, 'w': 298, 'h': 190 },
'BOULDER2': { 'x': 621, 'y': 897, 'w': 298, 'h': 140 },
'TREE2': { 'x': 1205, 'y': 5, 'w': 282, 'h': 295 },
'BILLBOARD04': { 'x': 1205, 'y': 310, 'w': 268, 'h': 170 },
'DEAD_TREE2': { 'x': 1205, 'y': 490, 'w': 150, 'h': 260 },
'BOULDER1': { 'x': 1205, 'y': 760, 'w': 168, 'h': 248 },
'BUSH1': { 'x': 5, 'y': 1097, 'w': 240, 'h': 155 },
'CACTUS': { 'x': 929, 'y': 897, 'w': 235, 'h': 118 },
'BUSH2': { 'x': 255, 'y': 1097, 'w': 232, 'h': 152 },
'BILLBOARD03': { 'x': 5, 'y': 1262, 'w': 230, 'h': 220 },
'BILLBOARD02': { 'x': 245, 'y': 1262, 'w': 215, 'h': 220 },
'STUMP': { 'x': 995, 'y': 330, 'w': 195, 'h': 140 },
'SEMI': { 'x': 1365, 'y': 490, 'w': 122, 'h': 144 },
'TRUCK': { 'x': 1365, 'y': 644, 'w': 100, 'h': 78 },
'CAR03': { 'x': 1383, 'y': 760, 'w': 88, 'h': 55 },
'CAR02': { 'x': 1383, 'y': 825, 'w': 80, 'h': 59 },
'CAR04': { 'x': 1383, 'y': 894, 'w': 80, 'h': 57 },
'CAR01': { 'x': 1205, 'y': 1018, 'w': 80, 'h': 56 },
'PLAYER_UPHILL_LEFT': { 'x': 1383, 'y': 961, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_STRAIGHT': { 'x': 1295, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_RIGHT': { 'x': 1385, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_LEFT': { 'x': 995, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_STRAIGHT': { 'x': 1085, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_RIGHT': { 'x': 995, 'y': 531, 'w': 80, 'h': 41 }
}
FP_COLOR_WHITE = '#FFFFFF'
FP_COLOR_BLACK = '#000000'
FP_COLOR_YELLOW = '#EEEE00'
FP_COLOR_BLUE = '#00EEEE'
FP_COLORS = {
'SKY': '#72D7EE',
'TREE': '#005108',
'FOG': '#005108',
'LIGHT': {'road': '#6B6B6B', 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
'DARK': {'road': '#696969', 'grass': '#009A00', 'rumble': '#BBBBBB' },
'START': {'road': FP_COLOR_WHITE, 'grass': FP_COLOR_WHITE, 'rumble': FP_COLOR_WHITE},
'FINISH': {'road': FP_COLOR_BLACK, 'grass': FP_COLOR_BLACK, 'rumble': FP_COLOR_BLACK},
'START_Y': {'road': FP_COLOR_YELLOW, 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
}
FP_ROAD = {
'LENGTH': {'NONE': 0, 'SHORT': 25, 'MEDIUM': 50, 'LONG': 100 }, # num segments
'CURVE': {'NONE': 0, 'EASY': 2, 'MEDIUM': 4, 'HARD': 6 },
'HILL': {'NONE': 0, 'LOW': 20, 'MEDIUM': 40, 'HIGH': 60 },
}
FP_ROAD_SPRTS = {
'chest': {'imgs': ['img_sprts/i_chest1.png'], 'score': 100,},
'coin1': {'imgs': ['img_sprts/i_coin1.png'], 'score': 1,},
'coin5': {'imgs': ['img_sprts/i_coin5.png'], 'score': 5,},
'coin20': {'imgs': ['img_sprts/i_coin20.png'], 'score': 20,},
'health': {'imgs': ['img_sprts/i_health.png'], 'score': 10,},
'heart': {'imgs': ['img_sprts/i_heart.png'], 'score': 50,},
'pot1': {'imgs': ['img_sprts/i_pot1.png'], 'score': -5,},
'pot2': {'imgs': ['img_sprts/i_pot2.png'], 'score': -1,},
'shell': {'imgs': ['img_sprts/p_shell.png'], 'score': -20,},
'rockd': {'imgs': ['img_sprts/rock_d2.png'], 'score': -10,},
'rockr': {'imgs': ['img_sprts/rock_r2.png'], 'score': -50,},
#'ashra_defeat': {'imgs': ['img_sprts/ashra_defeat1.png'], 'score': -100,},
#'bear': {'imgs': ['img_sprts/bear2.png'], 'score': -80,},
#'dinof': {'imgs': ['img_sprts/dinof2.png'], 'score': -50,},
'blobb': {'imgs': ['img_sprts/blobb1.png'], 'score': -50,},
'chick_fly': {'imgs': ['img_sprts/chick_fly3.png'], 'score': 70,},
'clown': {'imgs': ['img_sprts/clown1.png'], 'score': -100,},
}
class SptTmpx(sptdraw.SptDrawBase):
def __init__(self, size, *args, **kwargs):
super(SptTmpx, self).__init__(size)
self.draw_on()
def draw_on(self, *args, **kwargs):
self.fill(consts.GREEN)
self.pygm.draw.circle(self.surf, consts.WHITE,
(self.size[0] / 2, self.size[1] / 2),
self.size[0] / 2, 0)
class SptTmpi(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(SptTmpi, self).__init__(img_file)
class FPSptBg(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptBg, self).__init__(img_file, pos)
class FPSptSprts(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptSprts, self).__init__(img_file, pos)
class FPSptFog(sptdraw.SptDrawBase):
def __init__(self, size, c=[0, 81, 8, 0], h=30, *args, **kwargs):
super(FPSptFog, self).__init__(size)
self.c = c
self.h = h
self.draw_on()
def draw_on(self, *args, **kwargs):
#self.fill(self.c)
d = 2
n = self.h / d
for i in range(n):
rct = [0, i * d, self.size[0], d]
#ca = 255 / n * (n - i)
ca = 200 / n * (n - i)
self.c[3] = ca
self.pygm.draw.rect(self.surf, self.c, rct)
class FPSptRdSprts(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(FPSptRdSprts, self).__init__(img_file)
@classmethod
def create_by_img(cls, img):
return cls(img)
# for test
#o = SptTmpx((40, 40))
#return o
class FPSptRoadB(sptdraw.SptDrawBase):
def __init__(self, size, cfg, *args, **kwargs):
super(FPSptRoadB, self).__init__(size)
self.cfg = cfg
self.car = kwargs.get('car')
self.bg_sky = kwargs.get('bg_sky')
self.bg_hills = kwargs.get('bg_hills')
self.bg_trees = kwargs.get('bg_trees')
self.clr_dark_road = utils.clr_from_str(FP_COLORS['DARK']['road'])
self.clr_dark_grass = utils.clr_from_str(FP_COLORS['DARK']['grass'])
self.rd_reset(init=True)
self.add_fog()
def prms_reset(self, keep_segs=False):
self.e_keys_up = []
self.e_keys_dn = []
self.camera_x = 0.0
self.camera_y = 0.0
self.camera_z = 500.0#1000.0#0.0 == self.camera_h
self.xw = 0.0
self.yw = 0.0
self.zw = 0.0
self.xc = 0.0
self.yc = 0.0
self.zc = 0.0 ##
self.xp = 0.0
self.yp = 0.0
self.xs = 0.0
self.ys = 0.0
self.d = 200.0#100.0#10.0#30.0#1.0
self.w = self.size[0]
self.h = self.size[1]
if not keep_segs:
self.segments = []
self.rd_sprt_objs = {}
self.rd_sprt_cache = [] # for sprites render order
self.track_len = 0.0
self.seg_len = 200.0#100.0#20.0#60.0#200.0#
self.road_w = 2400#2000#600.0#200.0#1000.0#200#
self.camera_h = 500.0#1000.0#
self.speed_max = 300.0#180.0#200.0#100.0
self.lane_w = 60
self.seg_n = 300#200
#self.seg_draw_n = 200#150
self.seg_draw_n = 70#100#200#150
self.speed = 0.0
self.position = 0.0
self.player_x = 0.0#100.0#1000.0#
self.centrifugal = 0.1#0.06#0.08#0.01#0.3
self.player_seg = None
self.base_seg = None # the segment just under the car
self.player_di = 0 # 0:^ 1:> 2:v 3:<
self.player_go = 0 # 0:- 1:^ 2:v
self.speed_dt_up = 1.0#2.0#3.0
self.speed_dt_dn = 2.0#4.0#6.0
self.speed_dt_na = 1.0#3.0
self.player_x_dt = 60.0#30.0#20.0
self.last_seg_i = 0
self.score = 0
self.game_over = False
self.game_score = 0.0
self.tm_start = 0.0
self.tm_end = 0.0
self.tm_last_once = 0.0
self.sky_speed = 0.1#0.05#
self.hill_speed = 0.2#0.1#
self.tree_speed = 0.3#0.15#
def rd_reset(self, init=False, keep_segs=False, segs_file=None):
#if not init and not keep_segs:
if not init:
self.rd_sprts_del_all_objs()
self.prms_reset(keep_segs=keep_segs)
if segs_file is not None:
try:
segs = self.rd_seg_json_load(segs_file)
self.segments = segs
self.track_len = len(self.segments) * self.seg_len
except Exception as e:
print e
self.init_rd_segs_rand_1()
else:
if not keep_segs:
self.init_rd_segs_rand_1()
self.draw_on()
self.rd_seg_render()
def init_rd_segs_rand_1(self):
#self.rd_seg_init(self.seg_n)
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
#self.rd_seg_init(random.randint(30, 100))
self.rd_seg_init(random.randint(1, 10)) # for a3c train
self.rd_seg_init_rand_curve()
#self.add_curves()
#self.add_low_rolling_hills(20, 2.0)
##self.add_low_rolling_hills(30, 4.0)
#self.rd_seg_init_rand(10)#50#10#3#1
#segnrand = random.randint(3, 30)
segnrand = random.randint(2, 6) # for a3c train
self.rd_seg_init_rand(segnrand)
# for segment draw
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
self.rd_seg_init(10) # for a3c train
self.rd_start_seg_init()
self.rd_sprts_init_rand()
def draw_on(self, *args, **kwargs):
self.fill(self.clr_dark_grass)
def add_fog(self):
self.fog = FPSptFog(self.size)
self.fog.rect.top = 240
self.fog.rect.left = 0
self.disp_add(self.fog)
def get_seg_base_i(self, pos=None):
if pos is None:
pos = self.position
i = int(pos / self.seg_len)
#x#i = int(utils.math_round(pos / self.seg_len))
#i = int(math.floor(pos / self.seg_len))
#i = int(math.ceil(pos / self.seg_len))
seg_n = len(self.segments)
i = (i + seg_n) % seg_n
return i
def rd_get_segs(self, whole=False):
if whole:
segs = self.segments
else:
segs = self.segments[:-self.seg_draw_n]
return segs
# #### geometry #### #
def geo_prjc_scale(self, d, zc):
if zc == 0.0:
return 1.0
else:
return d / zc
def xc_to_xp(self, xc, d, zc):
if zc == 0.0:
#xp = float('inf')
#xp = 2 ** 64
xp = xc
else:
xp = xc * (d / zc)
return xp
def yc_to_yp(self, yc, d, zc):
if zc == 0.0:
#yp = float('inf')
#yp = 2 ** 64
yp = yc
else:
yp = yc * (d / zc)
return yp
def xp_to_xs(self, xp, w):
#xs = w / 2.0 + w / 2.0 * xp
xs = w / 2.0 + xp
return xs
def yp_to_ys(self, yp, h):
#ys = h / 2.0 - h / | |
the submitted value
self.submit_form_step()
self.findBy('xpath', '//img[@alt="Value 16_2"]')
self.findByNot('xpath', '//img[@alt="Value 15_1"]')
# She goes back to the form
WebDriverWait(self.browser, 10).until(
EC.visibility_of_element_located((By.ID, "cat_4"))
)
self.click_edit_section('cat_4')
cb_1_1 = self.findBy('xpath', '//label[@for="id_qg_12-0-key_16_1"]')
cb_1_2 = self.findBy('xpath', '//label[@for="id_qg_12-0-key_16_2"]')
cb_2_1 = self.findBy('xpath', '//label[@for="id_qg_12-0-key_15_1"]')
cb_2_1_cb = self.findBy('id', 'id_qg_12-0-key_15_1')
# She sees the conditional question is visible
self.assertTrue(cb_2_1.is_displayed())
# She selects a checkbox of the conditional question
cb_2_1.click()
# She deselects the checkbox which triggers and reselects it. She sees
# the conditional checkbox is not selected anymore
self.assertEqual(cb_2_1_cb.get_attribute('checked'), 'true')
cb_1_2.click()
self.assertFalse(cb_2_1.is_displayed())
cb_1_2.click()
self.assertTrue(cb_2_1.is_displayed())
self.assertIsNone(cb_2_1_cb.get_attribute('checked'))
# She selects something of the conditional question
cb_2_1.click()
# She submits the step
self.submit_form_step()
self.findBy('xpath', '//img[@alt="Value 16_2"]')
self.findBy('xpath', '//img[@alt="Value 15_1"]')
# She submits the entire form
self.review_action('submit')
self.findBy('xpath', '//img[@alt="Value 16_2"]')
self.findBy('xpath', '//img[@alt="Value 15_1"]')
# def test_image_checkbox_subcategory(self):
# # Alice logs in
# self.doLogin()
# cat_4_position = get_position_of_category('cat_4')
# # She goes to a step of the questionnaire
# self.browser.get(self.live_server_url + reverse(
# route_questionnaire_new_step,
# kwargs={'identifier': 'new', 'step': 'cat_4'}))
# # She sees the checkbox images of Key 15 which are not the same
# # as for Key 14.
# img_1_key_14 = self.findBy('xpath', '//img[@alt="Value 14_1"]')
# img_1_key_15 = self.findBy('xpath', '//img[@alt="Value 15_1"]')
# self.assertNotEqual(
# img_1_key_14.get_attribute('src'),
# img_1_key_15.get_attribute('src'))
# # She sees that no Checkbox of Key 15 is selected by default
# self.findByNot(
# 'xpath', '//input[@name="qg_12-0-key_15" and @checked="checked"]')
# # She also sees that Key 16 is not visible
# subcat_val_1 = self.findBy('id', 'id_qg_12-0-key_15_1_sub')
# self.findBy(
# 'xpath', '//input[@name="qg_12-0-key_16"]', base=subcat_val_1)
# self.assertIn('display: none;', subcat_val_1.get_attribute('style'))
# # She sees that the form progress is at 0
# self.findBy('xpath', '//span[@class="meter" and @style="width:0%"]')
# # She submits the form empty and sees that no value was submitted,
# # progress of Category 4 is still 0
# self.findBy('id', 'button-submit').click()
# self.findByNot('xpath', '//*[text()[contains(.,"Key 15")]]')
# progress_indicator = self.findBy(
# 'xpath', '(//a[contains(@href, "edit/new/cat")])[{}]'.format(
# cat_4_position))
# self.assertIn('0/', progress_indicator.text)
# # She goes back to the questionnaire step and sees that form
# # progress is still at 0 and no checkbox is selected
# self.browser.get(self.live_server_url + reverse(
# route_questionnaire_new_step,
# kwargs={'identifier': 'new', 'step': 'cat_4'}))
# self.findBy('xpath', '//span[@class="meter" and @style="width:0%"]')
# self.findByNot(
# 'xpath', '//input[@name="qg_12-0-key_15" and @checked="checked"]')
# # She also sees that Key 16 is not visible
# subcat_val_1 = self.findBy('id', 'id_qg_12-0-key_15_1_sub')
# self.findBy(
# 'xpath', '//input[@name="qg_12-0-key_16"]', base=subcat_val_1)
# self.assertIn('display: none;', subcat_val_1.get_attribute('style'))
# # She selects the first checkbox and sees that the form progress
# # was updated
# self.findBy(
# 'xpath', '(//input[@name="qg_12-0-key_15"])[1]').click()
# self.findBy(
# 'xpath', '//span[@class="meter" and @style="width: 33.3333%;"]')
# # She also sees that Key 16 is now visible but no value is selected
# subcat_val_1 = self.findBy('id', 'id_qg_12-0-key_15_1_sub')
# self.findBy(
# 'xpath', '//input[@name="qg_12-0-key_16"]', base=subcat_val_1)
# self.browser.implicitly_wait(5)
# self.assertNotIn(
# 'display: none;', subcat_val_1.get_attribute('style')
# )
# self.findByNot(
# 'xpath', '//input[@name="qg_12-0-key_16" and @checked="checked"]')
# # She submits the step and sees that Key 15 was submitted and
# # the form progress on the overview page is updated
# self.findBy('id', 'button-submit').click()
# self.checkOnPage('Key 15')
# self.findBy('xpath', '//img[@alt="Value 15_1"]')
# progress_indicator = self.findBy(
# 'xpath', '(//a[contains(@href, "edit/new/cat")])[{}]'.format(
# cat_4_position))
# self.assertIn('1/', progress_indicator.text)
# # She goes back to the step and sees that the value of Key 15 is
# # selected, form progress is at 1
# self.browser.get(self.live_server_url + reverse(
# route_questionnaire_new_step,
# kwargs={'identifier': 'new', 'step': 'cat_4'}))
# self.findBy(
# 'xpath', '//span[@class="meter" and @style="width: 33.3333%;"]')
# # Key 16 is visible but no value selected
# subcat_val_1 = self.findBy('id', 'id_qg_12-0-key_15_1_sub')
# self.findBy(
# 'xpath', '//input[@name="qg_12-0-key_16"]', base=subcat_val_1)
# self.assertNotIn(
# 'display: none;', subcat_val_1.get_attribute('style')
# )
# self.findByNot(
# 'xpath', '//input[@name="qg_12-0-key_16" and @checked="checked"]')
# # She selects a value of Key 16
# self.findBy(
# 'xpath', '(//input[@name="qg_12-0-key_16"])[1]').click()
# # She submits the step and sees that both values are submitted
# self.findBy('id', 'button-submit').click()
# self.checkOnPage('Key 15')
# self.findBy('xpath', '//img[@alt="Value 15_1"]')
# self.checkOnPage('Key 16')
# self.findBy('xpath', '//img[@alt="Value 16_1"]')
# progress_indicator = self.findBy(
# 'xpath', '(//a[contains(@href, "edit/new/cat")])[{}]'.format(
# cat_4_position))
# self.assertIn('1/', progress_indicator.text)
# # She goes back to the step and sees that the value of Key 15 is
# # selected, form progress is at 1
# self.browser.get(self.live_server_url + reverse(
# route_questionnaire_new_step,
# kwargs={'identifier': 'new', 'step': 'cat_4'}))
# self.findBy(
# 'xpath', '//span[@class="meter" and @style="width: 33.3333%;"]')
# # She sees that the value of Key 15 is selected. Key 16 is
# # visible and the first value is selected.
# subcat_val_1 = self.findBy('id', 'id_qg_12-0-key_15_1_sub')
# self.findBy(
# 'xpath', '//input[@name="qg_12-0-key_16"]', base=subcat_val_1)
# self.assertNotIn(
# 'display: none;', subcat_val_1.get_attribute('style'))
# self.findBy(
# 'xpath', '//input[@name="qg_12-0-key_16" and @checked="checked"]')
# # She deselects the value of Key 15 and sees that Key 16 is not
# # visible anymore
# self.findBy(
# 'xpath', '(//input[@name="qg_12-0-key_15"])[1]').click()
# subcat_val_1 = self.findBy('id', 'id_qg_12-0-key_15_1_sub')
# self.findBy(
# 'xpath', '//input[@name="qg_12-0-key_16"]', base=subcat_val_1)
# time.sleep(1)
# self.assertIn('display: none;', subcat_val_1.get_attribute('style'))
# # She reselects the value of Key 15 and sees that the previously
# # selected value of Key 16 is not selected anymore.
# self.findBy(
# 'xpath', '(//input[@name="qg_12-0-key_15"])[1]').click()
# self.findByNot(
# 'xpath', '//input[@name="qg_12-0-key_16" and @checked="checked"]')
# # She selects two values of Key 16 again and submits the form
# self.browser.implicitly_wait(5)
# self.findBy(
# 'xpath', '(//input[@name="qg_12-0-key_16"])[1]').click()
# self.findBy(
# 'xpath', '(//input[@name="qg_12-0-key_16"])[2]').click()
# self.findBy('id', 'button-submit').click()
# self.checkOnPage('Key 15')
# self.findBy('xpath', '//img[@alt="Value 15_1"]')
# self.checkOnPage('Key 16')
# self.findBy('xpath', '//img[@alt="Value 16_1"]')
# self.findBy('xpath', '//img[@alt="Value 16_2"]')
# # She submits the form and sees that the values were stored
# # correctly
# self.findBy('id', 'button-submit').click()
# self.checkOnPage('Key 15')
# self.findBy('xpath', '//img[@alt="Value 15_1"]')
# self.checkOnPage('Key 16')
# self.findBy('xpath', '//img[@alt="Value 16_1"]')
# self.findBy('xpath', '//img[@alt="Value 16_2"]')
def test_measure_selects(self):
# Alice logs in
self.doLogin()
# She goes to a step of the questionnaire
self.browser.get(self.live_server_url + reverse(
route_questionnaire_new_step,
kwargs={'identifier': 'new', 'step': 'cat_2'}))
# She sees Key 12 in a row which is not selected
self.findByNot(
'xpath', '//div[@class="row list-item is-selected"]/div/label['
'contains(text(), "Key 12")]')
# She sees that the form progress is at 0
self.findBy('xpath', '//span[@class="meter" and @style="width:0%"]')
# She submits the form empty and sees that no value was submitted,
# progress of Category 2 is still 0
self.submit_form_step()
self.findByNot('xpath', '//*[text()[contains(.,"Key 12")]]')
progress_indicator = self.findBy(
'xpath',
'(//div[@class="tech-section-progress"])[3]/span[@class="steps"]')
self.assertIn('0/', progress_indicator.text)
# She goes back to the questionnaire step and sees that form
# progress is still at 0 and the row is unselected
self.click_edit_section('cat_2')
self.findBy('xpath', '//span[@class="meter" and @style="width:0%"]')
self.findByNot(
'xpath', '//div[contains(@class, "is-selected")]//label/span['
'text()="low"]')
# She sees that the values are not ordered alphabetically
measures = ["low", "medium", "high"]
for i, m in enumerate(measures):
self.findBy(
'xpath', '(//div[@class="button-bar"]/ul/li/label/span)[{}]['
'contains(text(), "{}")]'.format(i + 1, m))
# She selects the first value and sees that the row is now
# selected and the form progress was updated
self.findBy(
'xpath', '//label/span[contains(text(), "low")]').click()
self.findBy(
'xpath', '//div[contains(@class, "is-selected")]//label/span['
'text()="low"]')
self.findBy(
'xpath', '//span[@class="meter" and @style="width: 25%;"]')
# She submits the step and sees that the value was submitted and
# the form progress on the overview page is updated
self.submit_form_step()
self.findBy('xpath', '//*[text()[contains(.,"Key 12")]]')
self.findBy('xpath', '//*[text()[contains(.,"low")]]')
progress_indicator = self.findBy(
'xpath',
'(//div[@class="tech-section-progress"])[3]/span[@class="steps"]')
self.assertIn('1/', progress_indicator.text)
# She goes back to the step and sees the row is highlighted and
# low selected, form progress is at 1
self.click_edit_section('cat_2')
self.findBy(
'xpath', '//div[contains(@class, "is-selected")]//label/span['
'text()="low"]')
self.findBy(
'xpath', '//span[@class="meter" and @style="width: 25%;"]')
# She selects None and sees that the row is not highlighted
# anymore and the progress was updated
self.findBy(
'xpath', '//label/span[contains(text(), "low")]').click()
self.findByNot(
'xpath', '//div[contains(@class, "is-selected")]//label/span['
'text()="low"]')
self.findBy(
'xpath', '//span[@class="meter" and @style="width: 0%;"]')
# She then selects medium and submits the form
self.findBy(
'xpath', '//label/span[contains(text(), "medium")]').click()
self.submit_form_step()
# The overview now shows medium and she submits the form
self.findBy('xpath', '//*[text()[contains(.,"Key 12")]]')
self.findBy('xpath', '//*[text()[contains(.,"medium")]]')
# She submits the form and sees that the radio value is stored
# correctly
self.review_action('submit')
self.findBy('xpath', '//*[text()[contains(.,"Key 12")]]')
self.findBy('xpath', '//*[text()[contains(.,"medium")]]')
def test_measure_selects_repeating(self):
# Alice logs in
self.doLogin()
# She goes to a step of the questionnaire
self.browser.get(self.live_server_url + reverse(
route_questionnaire_new_step,
kwargs={'identifier': 'new', 'step': 'cat_2'}))
self.rearrangeFormHeader()
# She sees Key 12 and selects a measure (low)
self.findBy(
'xpath', '//div[@data-questiongroup-keyword="qg_9"][1]//label/'
'span[contains(text(), "low")]').click()
# She adds another questiongroup
self.findBy(
'xpath', '//fieldset[@class="row"][2]//a[@data-add-item]').click()
# She selects | |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
from oslo_utils import reflection
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
# Use monotonic time in stopwatches if we can get at it...
#
# PEP @ https://www.python.org/dev/peps/pep-0418/
try:
now = time.monotonic
except AttributeError:
try:
# Try to use the pypi module if it's available (optionally...)
from monotonic import monotonic as now
except (AttributeError, ImportError):
# Ok fallback to the non-monotonic one...
now = time.time
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts(microsecond=False):
"""Timestamp version of our utcnow function.
See :py:class:`oslo_utils.fixture.TimeFixture`.
"""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
timestamp = time.time()
if not microsecond:
timestamp = int(timestamp)
return timestamp
now = utcnow()
timestamp = calendar.timegm(now.timetuple())
if microsecond:
timestamp += float(now.microsecond) / 1000000
return timestamp
def utcnow():
"""Overridable version of utils.utcnow.
See :py:class:`oslo_utils.fixture.TimeFixture`.
"""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp, microsecond=False):
"""Returns an iso8601 formatted date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp), microsecond)
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
See :py:class:`oslo_utils.fixture.TimeFixture`.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta.
See :py:class:`oslo_utils.fixture.TimeFixture`.
"""
assert utcnow.override_time is not None
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds.
See :py:class:`oslo_utils.fixture.TimeFixture`.
"""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time.
See :py:class:`oslo_utils.fixture.TimeFixture`.
"""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:param dt: the time
:param window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
class Split(object):
"""A *immutable* stopwatch split.
See: http://en.wikipedia.org/wiki/Stopwatch for what this is/represents.
"""
__slots__ = ['_elapsed', '_length']
def __init__(self, elapsed, length):
self._elapsed = elapsed
self._length = length
@property
def elapsed(self):
"""Duration from stopwatch start."""
return self._elapsed
@property
def length(self):
"""Seconds from last split (or the elapsed time if no prior split)."""
return self._length
def __repr__(self):
r = reflection.get_class_name(self, fully_qualified=False)
r += "(elapsed=%s, length=%s)" % (self._elapsed, self._length)
return r
class StopWatch(object):
"""A simple timer/stopwatch helper class.
Inspired by: apache-commons-lang java stopwatch.
Not thread-safe (when a single watch is mutated by multiple threads at
the same time). Thread-safe when used by a single thread (not shared) or
when operations are performed in a thread-safe manner on these objects by
wrapping those operations with locks.
"""
_STARTED = 'STARTED'
_STOPPED = 'STOPPED'
def __init__(self, duration=None):
if duration is not None and duration < 0:
raise ValueError("Duration must be greater or equal to"
" zero and not %s" % duration)
self._duration = duration
self._started_at = None
self._stopped_at = None
self._state = None
self._splits = []
def start(self):
"""Starts the watch (if not already started).
NOTE(harlowja): resets any splits previously captured (if any).
"""
if self._state == self._STARTED:
return self
self._started_at = now()
self._stopped_at = None
self._state = self._STARTED
self._splits = []
return self
@property
def splits(self):
"""Accessor to all/any splits that have been captured."""
return tuple(self._splits)
def split(self):
"""Captures a split/elapsed since start time (and doesn't stop)."""
if self._state == self._STARTED:
elapsed = self.elapsed()
if self._splits:
length = self._delta_seconds(self._splits[-1].elapsed, elapsed)
else:
length = elapsed
self._splits.append(Split(elapsed, length))
return self._splits[-1]
else:
raise RuntimeError("Can not create a split time of a stopwatch"
" if it has not been started or if it has been"
" stopped")
def restart(self):
"""Restarts the watch from a started/stopped state."""
if self._state == self._STARTED:
self.stop()
self.start()
return self
@staticmethod
def _delta_seconds(earlier, later):
# Uses max to avoid the delta/time going backwards (and thus negative).
return max(0.0, later - earlier)
def elapsed(self, maximum=None):
"""Returns how many seconds have elapsed."""
if self._state not in (self._STARTED, self._STOPPED):
raise RuntimeError("Can not get the elapsed time of a stopwatch"
" if it has not been started/stopped")
if self._state == self._STOPPED:
elapsed = self._delta_seconds(self._started_at, self._stopped_at)
else:
elapsed = self._delta_seconds(self._started_at, now())
if maximum is not None and elapsed > maximum:
elapsed = max(0.0, maximum)
return elapsed
def __enter__(self):
"""Starts the watch."""
self.start()
return self
def __exit__(self, type, value, traceback):
"""Stops the watch (ignoring errors if stop fails)."""
try:
self.stop()
except RuntimeError:
pass
def leftover(self, return_none=False):
"""Returns how many seconds are left until the watch expires.
:param return_none: when ``True`` instead of raising a ``RuntimeError``
when no duration has been set this call will
return ``None`` instead.
:type return_none: boolean
"""
if self._state != self._STARTED:
raise RuntimeError("Can not get the leftover time of a stopwatch"
" that has not been started")
if self._duration is None:
if not return_none:
raise RuntimeError("Can not get the leftover time of a watch"
" that has no duration")
return None
return max(0.0, self._duration - self.elapsed())
def expired(self):
"""Returns if the watch has expired (ie, duration provided elapsed)."""
if self._state not in (self._STARTED, self._STOPPED):
raise RuntimeError("Can not check if a stopwatch has expired"
" if it has not been started/stopped")
if self._duration is None:
return False
return self.elapsed() > self._duration
def has_started(self):
return self._state == self._STARTED
def has_stopped(self):
return self._state == self._STOPPED
def resume(self):
"""Resumes the watch from a stopped state."""
if self._state == self._STOPPED:
self._state = self._STARTED
return self
else:
raise RuntimeError("Can not resume a stopwatch that has not been"
" stopped")
def stop(self):
"""Stops the watch."""
if self._state == self._STOPPED:
return self
if self._state != self._STARTED:
raise RuntimeError("Can not stop a stopwatch that has not been"
" started")
self._stopped_at = now()
self._state = self._STOPPED
| |
"block_error" not in data["blocks"][0]
assert not data["blocks"][1]["confirmed"]
assert data["blocks"][1]["destination"] == DESTINATION_B
assert data["blocks"][1]["amount"] == "-2000"
assert "block_error" not in data["blocks"][1]
assert wallet.accounts[0].balance == 7000
def test_send_many_description(
self, wallet_mock_node, stdio, wallet_path, wallet_factory,
wallet_loader):
wallet = wallet_factory(
balance=10000, confirmed=True)
wallet.save(wallet_path)
account_id = wallet.accounts[0].account_id
destination_a = \
"xrb_36t3jt9g5r33i817oimdpre1fyofoha5j4k4rq6ofokrokigf3q6xfhe6r6m"
destination_b = \
"xrb_1q7weoyzfw9z4836o11yfjfnqmf953yq8o4hcyp1thdak6xymqup4xczs5n8"
wallet_mock_node.start()
stdio([
"--wallet", str(wallet_path), "send-many",
account_id,
"{},1000".format(destination_a),
"{},2000".format(destination_b),
"--description", "Test description"
])
wallet = wallet_loader(wallet_path)
block_a = wallet.accounts[0].blocks[-2]
block_b = wallet.accounts[0].blocks[-1]
assert block_a.description == "Test description"
assert block_b.description == "Test description"
def test_send_many_with_denominations(
self, wallet_mock_node, stdio, wallet_path, wallet_factory,
wallet_loader):
wallet = wallet_factory(
balance=10000, confirmed=True)
wallet.save(wallet_path)
account_id = wallet.accounts[0].account_id
destination_a = \
"xrb_36t3jt9g5r33i817oimdpre1fyofoha5j4k4rq6ofokrokigf3q6xfhe6r6m"
destination_b = \
"xrb_1q7weoyzfw9z4836o11yfjfnqmf953yq8o4hcyp1thdak6xymqup4xczs5n8"
wallet_mock_node.start()
result = stdio([
"--wallet", str(wallet_path), "send-many",
account_id,
"{},0.000000000000000000001 nano".format(destination_a),
"{},0.000000000000000000000000002 Mnano".format(destination_b)
])
wallet = wallet_loader(wallet_path)
block_a = wallet.accounts[0].blocks[-2]
block_b = wallet.accounts[0].blocks[-1]
assert block_a.link_as_account == destination_a
assert block_a.confirmed
assert block_a.amount == -1000
assert block_b.link_as_account == destination_b
assert block_b.confirmed
assert block_b.amount == -2000
data = result["data"]
assert data["blocks"][0]["destination"] == destination_a
assert data["blocks"][0]["amount"] == "-1000"
assert data["blocks"][1]["destination"] == destination_b
assert data["blocks"][1]["amount"] == "-2000"
assert wallet.accounts[0].balance == 7000
def test_send_many_spendable_account_required(
self, stdio, wallet_mock_node, wallet_path, wallet_factory,
watching_account_factory):
wallet = wallet_factory(balance=0)
wallet.accounts = []
wallet.add_account(watching_account_factory())
account_id = wallet.accounts[0].account_id
wallet.save(wallet_path)
wallet_mock_node.start()
destination_a = \
"xrb_36t3jt9g5r33i817oimdpre1fyofoha5j4k4rq6ofokrokigf3q6xfhe6r6m"
result = stdio([
"--wallet", wallet_path, "send-many", account_id,
"{},1000".format(destination_a)
], success=False)
assert result["data"]["error"] == "spendable_account_required"
def test_send_many_insufficient_balance(
self, wallet_mock_node, stdio, wallet_path, wallet_factory):
wallet = wallet_factory(
balance=10000, confirmed=True)
wallet.save(wallet_path)
account_id = wallet.accounts[0].account_id
destination_a = \
"xrb_36t3jt9g5r33i817oimdpre1fyofoha5j4k4rq6ofokrokigf3q6xfhe6r6m"
destination_b = \
"xrb_1q7weoyzfw9z4836o11yfjfnqmf953yq8o4hcyp1thdak6xymqup4xczs5n8"
wallet_mock_node.start()
result = stdio([
"--wallet", str(wallet_path), "send-many",
account_id,
"{},5000".format(destination_a),
"{},5001".format(destination_b)
], success=False)
assert result["data"]["error"] == "insufficient_balance"
def test_send_many_nonexistent_source(
self, wallet_mock_node, stdio, wallet_path, wallet_factory):
wallet = wallet_factory(
balance=10000, confirmed=True)
wallet.save(wallet_path)
account_id = \
"<KEY>"
destination_a = \
"xrb_36t3jt9g5r33i817oimdpre1fyofoha5j4k4rq6ofokrokigf3q6xfhe6r6m"
destination_b = \
"xrb_1q7weoyzfw9z4836o11yfjfnqmf953yq8o4hcyp1thdak6xymqup4xczs5n8"
wallet_mock_node.start()
result = stdio([
"--wallet", str(wallet_path), "send-many",
account_id,
"{},5000".format(destination_a),
"{},5001".format(destination_b)
], success=False)
assert result["data"]["error"] == "account_not_found"
def test_send_many_invalid_destination(
self, wallet_mock_node, stdio, wallet_path, wallet_factory):
wallet = wallet_factory(balance=10000, confirmed=True)
wallet.save(wallet_path)
account_id = \
"<KEY>"
result = stdio([
"--wallet", str(wallet_path), "send-many",
account_id, "wrong,1000"
], raw=True)
assert "invalid 'destination': is not a valid account ID" in result
def test_send_many_broadcast_failure(
self, wallet_mock_node, stdio, wallet_path, wallet_factory,
wallet_loader):
# Send three transactions and cause the second one to fail,
# which also causes the third one to be rejected
wallet = wallet_factory(balance=10000, confirmed=True)
wallet.save(wallet_path)
account_id = wallet.accounts[0].account_id
destination_a = \
"xrb_36t3jt9g5r33i817oimdpre1fyofoha5j4k4rq6ofokrokigf3q6xfhe6r6m"
destination_b = \
"xrb_1q7weoyzfw9z4836o11yfjfnqmf953yq8o4hcyp1thdak6xymqup4xczs5n8"
destination_c = \
"xrb_33dd14f3jygie9mkq5s76oo9p1zf8137f1gt65mbsptnktijqb4uzs7hgdkm"
wallet_mock_node.fail_broadcast_after(block_count=1)
wallet_mock_node.start()
result = stdio([
"--wallet", str(wallet_path), "send-many",
account_id,
"{},1000".format(destination_a),
"{},2000".format(destination_b),
"{},3000".format(destination_c)
], success=False)
data = result["data"]
assert data["error"] == "block_rejected"
assert data["blocks"][0]["confirmed"]
assert data["blocks"][1]["block_error"] == "source_block_missing"
assert data["blocks"][2]["block_error"] == "previous_block_rejected"
# Rejected blocks (2nd and 3rd) won't be saved
wallet = wallet_loader(wallet_path)
assert wallet.accounts[0].balance == 9000
assert wallet.accounts[0].blocks[-1].amount == -1000
def test_send_many_broadcast_timeout(
self, mock_node, stdio, wallet_path, wallet_factory,
wallet_loader):
wallet = wallet_factory(balance=10000, confirmed=True)
wallet.save(wallet_path)
mock_node.add_replay_datasets(["active_difficulty", "version"]).start()
account_id = wallet.accounts[0].account_id
destination_a = \
"xrb_36t3jt9g5r33i817oimdpre1fyofoha5j4k4rq6ofokrokigf3q6xfhe6r6m"
destination_b = \
"xrb_1q7weoyzfw9z4836o11yfjfnqmf953yq8o4hcyp1thdak6xymqup4xczs5n8"
result = stdio([
"--wallet", wallet_path, "send-many", account_id,
"{},1000".format(destination_a), "{},2000".format(destination_b),
"--timeout", "1"
], success=False)
assert result["data"]["error"] == "network_timeout"
# The blocks are saved despite the timeout
wallet = wallet_loader(wallet_path)
assert wallet.accounts[0].blocks[-1].link_as_account == \
destination_b
assert wallet.accounts[0].blocks[-2].link_as_account == \
destination_a
@pytest.mark.add_encrypted_test
class TestListAccounts:
def test_list_accounts(self, stdio, wallet_path, wallet_loader):
stdio(["create-wallet", str(wallet_path)])
wallet = wallet_loader(wallet_path)
wallet.accounts[4].name = "Fifth account"
wallet.save(wallet_path)
result = stdio(["--wallet", str(wallet_path), "list-accounts"])
assert len(result["data"]["accounts"]) == 20
assert result["data"]["accounts"][4]["name"] == "Fifth account"
@pytest.mark.add_encrypted_test
class TestGetAccountPrivateKey:
def test_get_account_private_key(
self, stdio, wallet_path, wallet_factory, wallet_loader,
is_encrypted_test):
wallet = wallet_factory()
account_id = wallet.accounts[0].account_id
if is_encrypted_test:
wallet.unlock("password")
private_key = wallet.accounts[0].get_secret(
"private_key", secret_key=wallet.secret_key
)
wallet.lock()
else:
private_key = wallet.accounts[0].private_key
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "get-account-private-key",
account_id
])
assert result["data"]["account_id"] == account_id
assert result["data"]["private_key"] == private_key
def test_get_account_private_key_account_not_found(
self, stdio, wallet_path, wallet_factory):
wallet = wallet_factory()
wallet.save(wallet_path)
account_id = \
"xrb_1111111111111111111111111111111111111111111111111111hifc8npp"
result = stdio([
"--wallet", wallet_path, "get-account-private-key",
account_id
], success=False)
assert result["data"]["error"] == "account_not_found"
def test_get_account_private_key_watching_account(
self, stdio, wallet_path, wallet_factory):
wallet = wallet_factory()
account_id = wallet.accounts[0].account_id
wallet.accounts[0].private_key = None
wallet.accounts[0].source = AccountSource.WATCHING
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "get-account-private-key",
account_id
], success=False)
assert result["data"]["error"] == "spendable_account_required"
@pytest.mark.add_encrypted_test
class TestListBlocks:
@pytest.fixture(scope="function")
def active_wallet(self, wallet_factory, account_factory):
wallet = wallet_factory(balance=0)
wallet.accounts = []
wallet.add_account(
account_factory(
balance=500, block_count=50, complete=True, confirm=True
)
)
return wallet
def test_list_blocks(
self, stdio, wallet_path, active_wallet):
wallet = active_wallet
account = wallet.accounts[0]
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "list-blocks", account.account_id,
"--limit", "20"
])
data = result["data"]
assert data["count"] == 50
assert len(data["blocks"]) == 20
# By default, the blocks are returned in descending order
# eg. starting from newest to oldest blocks
assert data["blocks"][0]["hash"] == account.blocks[49].block_hash
assert data["blocks"][1]["hash"] == account.blocks[48].block_hash
assert data["blocks"][0]["balance"] == "500"
assert data["blocks"][0]["amount"] == "10"
def test_list_blocks_ascending(
self, stdio, wallet_path, active_wallet):
wallet = active_wallet
account = wallet.accounts[0]
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "list-blocks", account.account_id,
"--limit", "20", "--no-descending"
])
data = result["data"]
assert data["count"] == 50
assert len(data["blocks"]) == 20
assert data["blocks"][0]["hash"] == account.blocks[0].block_hash
assert data["blocks"][1]["hash"] == account.blocks[1].block_hash
assert data["blocks"][0]["balance"] == "10"
assert data["blocks"][0]["amount"] == "10"
def test_list_blocks_empty(
self, stdio, zero_balance_wallet, wallet_path):
# Empty accounts return an empty list
wallet = zero_balance_wallet
wallet.save(wallet_path)
account_id = wallet.accounts[0].account_id
result = stdio([
"--wallet", wallet_path, "list-blocks", account_id
])
assert result["data"]["count"] == 0
assert len(result["data"]["blocks"]) == 0
def test_list_blocks_offset(self, stdio, wallet_path, active_wallet):
wallet = active_wallet
account = wallet.accounts[0]
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "list-blocks", account.account_id,
"--offset", "5", "--limit", "20"
])
data = result["data"]
assert len(data["blocks"]) == 20
assert data["blocks"][0]["hash"] == account.blocks[44].block_hash
assert data["blocks"][1]["hash"] == account.blocks[43].block_hash
def test_list_blocks_ascending_offset(
self, stdio, wallet_path, active_wallet):
wallet = active_wallet
account = wallet.accounts[0]
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "list-blocks", account.account_id,
"--offset", "5", "--limit", "20", "--no-descending"
])
data = result["data"]
assert len(data["blocks"]) == 20
assert data["blocks"][0]["hash"] == account.blocks[5].block_hash
assert data["blocks"][1]["hash"] == account.blocks[6].block_hash
def test_list_blocks_offset_empty(
self, stdio, wallet_path, active_wallet):
# If an offset higher than the amount of blocks is given,
# return an empty list
wallet = active_wallet
account = wallet.accounts[0]
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "list-blocks", account.account_id,
"--offset", "50"
])
assert len(result["data"]["blocks"]) == 0
def test_list_blocks_offset_ascending_empty(
self, stdio, wallet_path, active_wallet):
wallet = active_wallet
account = wallet.accounts[0]
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "list-blocks", account.account_id,
"--offset", "50", "--no-descending"
])
assert len(result["data"]["blocks"]) == 0
@pytest.mark.add_encrypted_test
class TestGetBlock:
def test_get_block(self, stdio, wallet_factory, wallet_path):
wallet = wallet_factory(balance=10000, confirmed=True)
block = wallet.accounts[0].blocks[0]
block.description = "This is a test block"
block_hash = block.block_hash
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "get-block", block_hash
])
data = result["data"]
assert data["hash"] == block_hash
assert data["confirmed"]
assert not data["is_link_block"]
assert data["description"] == "This is a test block"
assert data["amount"] == "10000"
assert data["balance"] == "10000"
assert data["timestamp"]["date"].isdigit()
assert data["block_data"]["account"] == block.account
def test_get_block_block_not_found(
self, stdio, wallet_factory, wallet_path):
wallet = wallet_factory(balance=10000, confirmed=True)
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "get-block", "A"*64
], success=False)
assert result["data"]["error"] == "block_not_found"
def test_get_block_link_block(self, stdio, wallet_factory, wallet_path):
wallet = wallet_factory(balance=10000, confirmed=True)
block = wallet.accounts[0].blocks[0]
block_hash = block.link_block.block_hash
wallet.save(wallet_path)
result = stdio(["--wallet", wallet_path, "get-block", block_hash])
data = result["data"]
assert data["hash"] == block_hash
assert data["confirmed"]
assert data["is_link_block"]
assert data["amount"] == "-10000"
assert data["timestamp"]["date"].isdigit()
assert data["block_data"]["account"] == block.link_block.account
@pytest.mark.add_encrypted_test
class TestSetAccountName:
def test_set_account_name(
self, stdio, wallet_factory, wallet_path, wallet_loader):
wallet = wallet_factory()
account_id = wallet.accounts[3].account_id
wallet.save(str(wallet_path))
result = stdio([
"--wallet", str(wallet_path), "set-account-name",
account_id, "Account number four"
])
assert result["data"]["account_id"] == account_id
assert result["data"]["name"] == "Account number four"
wallet = wallet_loader(str(wallet_path))
assert wallet.accounts[3].name == "Account number four"
def test_set_account_name_account_not_found(
self, stdio, wallet_factory, wallet_path):
wallet = wallet_factory()
wallet.save(str(wallet_path))
result = stdio([
"--wallet", str(wallet_path), "set-account-name",
"xrb_1111111111111111111111111111111111111111111111111111hifc8npp",
"Nonexistent account"
], success=False)
assert result["data"]["error"] == "account_not_found"
@pytest.mark.add_encrypted_test
class TestClearAccountName:
def test_clear_account_name(
self, stdio, zero_balance_wallet, wallet_path, wallet_loader):
wallet = zero_balance_wallet
account_id = wallet.accounts[0].account_id
wallet.save(wallet_path)
stdio([
"--wallet", str(wallet_path), "set-account-name",
account_id, "<NAME>"
])
# If 'name' is not provided, the name is removed
result = stdio([
"--wallet", str(wallet_path), "clear-account-name",
account_id
])
assert result["data"]["account_id"] == account_id
wallet = wallet_loader(wallet_path)
assert not wallet.accounts[0].name
def test_clear_account_name_account_not_found(
self, stdio, wallet_factory, wallet_path):
wallet = wallet_factory()
wallet.save(str(wallet_path))
result = stdio([
"--wallet", str(wallet_path), "clear-account-name",
"xrb_1111111111111111111111111111111111111111111111111111hifc8npp",
], success=False)
assert result["data"]["error"] == "account_not_found"
@pytest.mark.add_encrypted_test
class TestSetBlockDescription:
def test_set_block_description(
self, stdio, wallet_factory, wallet_loader, wallet_path):
wallet = wallet_factory(balance=1000)
account_id = wallet.accounts[0].account_id
block_hash = wallet.accounts[0].blocks[0].block_hash
wallet.save(wallet_path)
result = stdio([
"--wallet", wallet_path, "set-block-description",
block_hash, "Test description"
])
data = result["data"]
assert data["account_id"] == account_id
assert data["hash"] == block_hash
assert data["description"] == "Test description"
wallet = wallet_loader(wallet_path)
assert wallet.accounts[0].blocks[0].description == | |
<reponame>siddhu95/mcclanahoochie
# pvtrace is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pvtrace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
from external.transformations import translation_matrix, rotation_matrix
import external.transformations as tf
from Geometry import *
from Materials import *
from ConstructiveGeometry import CSGadd, CSGint, CSGsub
import warnings
class Register(object):
"""
A class that will register photon position and wavelength. Device objects are subclasses of register.
"""
def __init__(self):
super(Register, self).__init__()
self.store = dict()
# Dictionary whose keys are surface_identifiers. The items are
# arrays where each index is an tuples containing ray statistics
# as indicated in the log function.
def log(self, photon):
# Need to check that the photon is on the surface
#import pdb; pdb.set_trace()
if not self.shape.on_surface(photon.position):
if photon.active == False:
# The photon has been non-radiatively lost inside a material
key = 'loss'
if not self.store.has_key(key):
self.store[key] = []
log_entry = (list(photon.position), float(photon.wavelength), None, photon.absorption_counter)
self.store[key].append(log_entry)
if photon.show_log: print ' Logged as lost photon...'
return
else:
#A photon has been logged in the interior of a material but photon.active = True, which means it is no non-radiatively lost. So why is it being logged?"
warnings.warn("It is likely that a light source has been placed inside an object. Normally the light sources should be external. Now attempting to log the ray and continue.")
key = 'volume_source'
if not self.store.has_key(key):
self.store[key] = []
log_entry = (list(photon.position), float(photon.wavelength), None, photon.absorption_counter)
self.store['volume_source'].append(log_entry)
if photon.show_log: print 'Logged as photon from a volume source...'
return
# Can do this because all surface_normal with the acute flag False returns outwards facing normals.
normal = photon.exit_device.shape.surface_normal(photon.ray, acute=False)
rads = angle(normal, photon.ray.direction)
if rads < np.pi/2:
# Ray facing outwards
bound = "outbound"
else:
# Ray facing inwards
bound = "inbound"
if photon.show_log: print ' Logged as ', bound, '...'
key = photon.exit_device.shape.surface_identifier(photon.position)
if not self.store.has_key(key):
# Add an item for this key.
self.store[key] = []
# [0] --> position
# [1] --> wavelength
# [2] --> boundedness (inbound or outbound)
# [3] --> re-absorptions
# [4] --> total jumps
# [5] --> object_number
log_entry = (list(photon.position), float(photon.wavelength), bound, photon.absorption_counter)
self.store[key].append(log_entry)
def count(self, shape, surface_point, bound):
"""
Returns the number of photon counts that are on the
same surface as the surface_point for the given shape.
"""
key = shape.surface_identifier(surface_point)
if not self.store.has_key(key):
return 0.0
counts = None
entries = self.store[key]
counts = 0
for entry in entries:
if entry[2] == bound:
counts = counts + 1
if counts == None:
return 0
return counts
def loss(self):
"""
Returns the number of photons that have been non-radiatively lost in the volume of the shape.
A more adventurous version of this could be made that returns positions.
"""
if not self.store.has_key('loss'):
return 0
return len(self.store['loss'])
def spectrum(self, shape, surface_point, bound):
"""Returns the counts histogram (bins,counts) for """
wavelengths = []
key = shape.surface_identifier(surface_point)
if not self.store.has_key(key):
return None
entries = self.store[key]
if len(entries) == 0:
return None
for entry in entries:
if entry[2] == bound:
wavelengths.append(float(entry[1]))
if len(wavelengths) is 0:
return None
wavelengths = np.array(wavelengths)
min = wavelengths.min()
max = wavelengths.max()
if len(wavelengths) is 1:
bins = np.arange(np.floor( wavelengths[0] - 1), np.ceil(wavelengths[0] + 2))
freq, bins = np.histogram(wavelengths, bins=bins)
else:
bins = np.arange(np.floor( wavelengths.min()-1), np.ceil(wavelengths.max()+2))
freq, bins = np.histogram(wavelengths, bins=bins)
return Spectrum(bins[0:-1], freq)
def reabs(self, shape, surface_point, bound):
"""
16/03/10: Returns list where list[i+1] contains number of surface photons that experienced i re-absorptions;
Length of list is ten by default (=> photons with up to 9 re-absorptions recorded), but is extended if necessary.
"""
key = shape.surface_identifier(surface_point)
if not self.store.has_key(key):
return [0,0,0,0,0,0,0,0,0,0]
reabs_list = [0,0,0,0,0,0,0,0,0,0]
key_entries = self.store[key]
for entry in key_entries:
if entry[2] == bound:
number_reabs = entry[3]
# In case reabs_list is not sufficiently long...
if number_reabs+1 > len(reabs_list):
while len(reabs_list) < number_reabs+1:
reabs_list.append(0)
reabs_list[number_reabs] = reabs_list[number_reabs] + 1
return reabs_list
def loss_reabs(self):
"""
16/03/10: Returns list where list[i+1] contains number of LOST photons that experienced i re-absorptions;
Length of list is ten by default (=> photons with up to 9 re-absorptions recorded), but is extended if necessary.
"""
if not self.store.has_key('loss'):
return [0,0,0,0,0,0,0,0,0,0]
reabs_list = [0,0,0,0,0,0,0,0,0,0]
loss_entries = self.store['loss']
for entry in loss_entries:
number_reabs = entry[3]
if number_reabs+1 > len(reabs_list):
while len(reabs_list) < number_reabs+1:
reabs_list.append(0)
reabs_list[number_reabs] = reabs_list[number_reabs] + 1
return reabs_list
class Detector(Register):
"""An abstract class to base solar cell like object from. Similar to the register class but will deactive photon when then hit."""
def __init__(self):
super(Detector, self).__init__()
class SimpleCell(Detector):
"""A SimpleCell object is a solar cell with perfect AR coating."""
def __init__(self, finiteplane):
super(Detector, self).__init__()
self.shape = finiteplane
self.name = "cell"
self.material = None
class Coating(Register):
"""
Overview:
A coating device is a shape that contains a reflective material which may
have an spectral and angular dependent reflectivity.
Details:
When a ray hits an object, the Fresnel equation are used to determine if
the ray continues on it's path or is reflected. Coatings are special
objects that supply there own reflectivity, and may also define
Rather than using Fresnel equation to determine the reflectivity of
"""
def __init__(self, reflectivity=None, shape=None, refractive_index=1.5):
super(Coating, self).__init__()
self.reflectivity = reflectivity
self.refractive_index = refractive_index
self.shape = shape
self.name = "COATING"
self.material = ReflectiveMaterial(reflectivity, refractive_index=refractive_index)
if not isinstance(self.shape, Polygon):
self.origin = self.shape.origin
self.size = np.abs(self.shape.extent - self.shape.origin)
class Bounds(Register):
"""A huge box containing only air with refractive index 1."""
def __init__(self):
super(Bounds, self).__init__()
self.shape = Box(origin=(-5,-5,-5), extent=(5,5,5))
self.material = Material()
self.name = "BOUNDS"
class Rod(Register):
"""docstring for Rod"""
def __init__(self, bandgap=555, radius=1, length=1):
super(Rod, self).__init__()
self.shape = Cylinder(radius, length)
self.material = SimpleMaterial(bandgap)
class Prism(Register):
"""Prism"""
def __init__(self, bandgap=555, base=1, alpha=np.pi/3, beta=np.pi/3, length=1):
super(Prism, self).__init__()
h = base*(1/np.tan(alpha) + 1/np.tan(alpha))
box0 = Box(origin=(0,0,0), extent=(base,h,length))
box1 = Box(origin=(0,0,0), extent=(h/np.sin(alpha),h,length))
box1.append_transform(trans.rotation_matrix(alpha, (0,0,1)))
box2 = Box(origin=(base,0,0), extent=(base+h,h/np.sin(beta),h,length))
box2.append_transform(trans.rotation_matrix(np.pi/2-beta, (0,0,1)))
step1 = CSGsub(box0, box1)
step2 = CSGsub(step1, box2)
self.shape = step2
self.material = SimpleMaterial(bandgap)
class LSC(Register):
"""LSC implementation."""
def __init__(self, bandgap=555, origin=(0,0,0), size=(1,1,1)):
super(LSC, self).__init__()
self.origin = np.array(origin)
self.size = np.array(size)
self.shape = Box(origin=origin, extent=np.array(origin) + np.array(size))
self.material = SimpleMaterial(bandgap)
self.name = "LSC"
"""
16/03/10: Assume that surfaces with a solar cell attached are index matched. This makes
sure that all surfaces that hit one of the collection edges are counted.
e.g. index_matched_surfaces = ['top', 'bottom']
"""
self.index_matched_surfaces = []
class Collector(Register):
"""Collector implementation."""
def __init__(self, bandgap=555, origin=(0,0,0), size=(1,1,1)):
super(Collector, self).__init__()
self.origin = np.array(origin)
self.size = np.array(size)
self.shape = Box(origin=origin, extent=np.array(origin) + np.array(size))
self.material = SimpleMaterial(bandgap)
self.name = "LSC"
class RayBin(Collector):
"""An class for erasing the ray if it hits this device. --> e.g. a solar cell!"""
def __init__(self, bandgap=555, origin=(0,0,0), size=(1,1,1)):
super(RayBin, self).__init__()
self.origin = np.array(origin)
self.size = np.array(size)
self.shape = Box(origin=origin, extent=np.array(origin) + np.array(size))
self.material = SimpleMaterial(bandgap)
self.name = "RayBin"
class PlanarMirror(Register):
"""Planar mirror with variable reflectivity (constant or wavelength dependent but constant in angle). """
def __init__(self, reflectivity=1.0, origin=(0,0,0), size=(1,1,0.001) ):
super(PlanarMirror, self).__init__()
self.reflectivity = reflectivity
self.shape = Box(origin=np.array(origin), extent=np.array(origin) + np.array(size))
self.material = ReflectiveMaterial(reflectivity)
class Face(Register):
"""General 2D object for ray tracing surfaces."""
def __init__(self, reflectivity=1.0, transmittance=-1, shape=Polygon([(0,0,0), (1,0,0), (1,1,0), (0,1,0)])):
super(Face, self).__init__()
assert reflectivity + transmittance < 1, "reflectivity + transmittance of Face device must be smaller than 1.0"
self.reflectivity = reflectivity
#if reflectivity -> ray reflected, if transmittance | |
documentation below.
:param int priority: Priority used when determining the order of rule execution. Lower values are executed first. If not provided list order will be used.
:param bool terminates: Terminates indicates that if this rule is true no further rules should be executed. Note: setting a fixed_response forces this field to true.
"""
pulumi.set(__self__, "name", name)
if condition is not None:
pulumi.set(__self__, "condition", condition)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if fixed_response is not None:
pulumi.set(__self__, "fixed_response", fixed_response)
if overrides is not None:
pulumi.set(__self__, "overrides", overrides)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if terminates is not None:
pulumi.set(__self__, "terminates", terminates)
@property
@pulumi.getter
def name(self) -> str:
"""
Human readable name for this rule.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def condition(self) -> Optional[str]:
"""
The statement to evaluate to determine if this rules effects should be applied. An empty condition is always true. See [load balancing rules](https://developers.cloudflare.com/load-balancing/understand-basics/load-balancing-rules).
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def disabled(self) -> Optional[bool]:
"""
A disabled rule will be be executed.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter(name="fixedResponse")
def fixed_response(self) -> Optional['outputs.LoadBalancerRuleFixedResponse']:
"""
Settings for a HTTP response to return directly to the eyeball if the condition is true. Note: overrides or fixed_response must be set. See the field documentation below.
"""
return pulumi.get(self, "fixed_response")
@property
@pulumi.getter
def overrides(self) -> Optional[Sequence['outputs.LoadBalancerRuleOverride']]:
"""
The Load Balancer settings to alter if this rules condition is true. Note: overrides or fixed_response must be set. See the field documentation below.
"""
return pulumi.get(self, "overrides")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Priority used when determining the order of rule execution. Lower values are executed first. If not provided list order will be used.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def terminates(self) -> Optional[bool]:
"""
Terminates indicates that if this rule is true no further rules should be executed. Note: setting a fixed_response forces this field to true.
"""
return pulumi.get(self, "terminates")
@pulumi.output_type
class LoadBalancerRuleFixedResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "contentType":
suggest = "content_type"
elif key == "messageBody":
suggest = "message_body"
elif key == "statusCode":
suggest = "status_code"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerRuleFixedResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerRuleFixedResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerRuleFixedResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
content_type: Optional[str] = None,
location: Optional[str] = None,
message_body: Optional[str] = None,
status_code: Optional[int] = None):
"""
:param str content_type: The value of the HTTP context-type header for this fixed response.
:param str location: The value of the HTTP location header for this fixed response.
:param str message_body: The text used as the html body for this fixed response.
:param int status_code: The HTTP status code used for this fixed response.
"""
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if location is not None:
pulumi.set(__self__, "location", location)
if message_body is not None:
pulumi.set(__self__, "message_body", message_body)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[str]:
"""
The value of the HTTP context-type header for this fixed response.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The value of the HTTP location header for this fixed response.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="messageBody")
def message_body(self) -> Optional[str]:
"""
The text used as the html body for this fixed response.
"""
return pulumi.get(self, "message_body")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[int]:
"""
The HTTP status code used for this fixed response.
"""
return pulumi.get(self, "status_code")
@pulumi.output_type
class LoadBalancerRuleOverride(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultPools":
suggest = "default_pools"
elif key == "fallbackPool":
suggest = "fallback_pool"
elif key == "popPools":
suggest = "pop_pools"
elif key == "regionPools":
suggest = "region_pools"
elif key == "sessionAffinity":
suggest = "session_affinity"
elif key == "sessionAffinityAttributes":
suggest = "session_affinity_attributes"
elif key == "sessionAffinityTtl":
suggest = "session_affinity_ttl"
elif key == "steeringPolicy":
suggest = "steering_policy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerRuleOverride. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerRuleOverride.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerRuleOverride.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_pools: Optional[Sequence[str]] = None,
fallback_pool: Optional[str] = None,
pop_pools: Optional[Sequence['outputs.LoadBalancerRuleOverridePopPool']] = None,
region_pools: Optional[Sequence['outputs.LoadBalancerRuleOverrideRegionPool']] = None,
session_affinity: Optional[str] = None,
session_affinity_attributes: Optional[Mapping[str, str]] = None,
session_affinity_ttl: Optional[int] = None,
steering_policy: Optional[str] = None,
ttl: Optional[int] = None):
"""
:param Sequence[str] default_pools: See default_pool_ids above.
:param str fallback_pool: See fallback_pool_id above.
:param Sequence['LoadBalancerRuleOverridePopPoolArgs'] pop_pools: See pop_pools above.
:param Sequence['LoadBalancerRuleOverrideRegionPoolArgs'] region_pools: See region_pools above.
:param str session_affinity: See field above.
:param Mapping[str, str] session_affinity_attributes: See field above.
:param int session_affinity_ttl: See field above.
:param str steering_policy: See field above.
:param int ttl: See field above.
"""
if default_pools is not None:
pulumi.set(__self__, "default_pools", default_pools)
if fallback_pool is not None:
pulumi.set(__self__, "fallback_pool", fallback_pool)
if pop_pools is not None:
pulumi.set(__self__, "pop_pools", pop_pools)
if region_pools is not None:
pulumi.set(__self__, "region_pools", region_pools)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if session_affinity_attributes is not None:
pulumi.set(__self__, "session_affinity_attributes", session_affinity_attributes)
if session_affinity_ttl is not None:
pulumi.set(__self__, "session_affinity_ttl", session_affinity_ttl)
if steering_policy is not None:
pulumi.set(__self__, "steering_policy", steering_policy)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter(name="defaultPools")
def default_pools(self) -> Optional[Sequence[str]]:
"""
See default_pool_ids above.
"""
return pulumi.get(self, "default_pools")
@property
@pulumi.getter(name="fallbackPool")
def fallback_pool(self) -> Optional[str]:
"""
See fallback_pool_id above.
"""
return pulumi.get(self, "fallback_pool")
@property
@pulumi.getter(name="popPools")
def pop_pools(self) -> Optional[Sequence['outputs.LoadBalancerRuleOverridePopPool']]:
"""
See pop_pools above.
"""
return pulumi.get(self, "pop_pools")
@property
@pulumi.getter(name="regionPools")
def region_pools(self) -> Optional[Sequence['outputs.LoadBalancerRuleOverrideRegionPool']]:
"""
See region_pools above.
"""
return pulumi.get(self, "region_pools")
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[str]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity")
@property
@pulumi.getter(name="sessionAffinityAttributes")
def session_affinity_attributes(self) -> Optional[Mapping[str, str]]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity_attributes")
@property
@pulumi.getter(name="sessionAffinityTtl")
def session_affinity_ttl(self) -> Optional[int]:
"""
See field above.
"""
return pulumi.get(self, "session_affinity_ttl")
@property
@pulumi.getter(name="steeringPolicy")
def steering_policy(self) -> Optional[str]:
"""
See field above.
"""
return pulumi.get(self, "steering_policy")
@property
@pulumi.getter
def ttl(self) -> Optional[int]:
"""
See field above.
"""
return pulumi.get(self, "ttl")
@pulumi.output_type
class LoadBalancerRuleOverridePopPool(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "poolIds":
suggest = "pool_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerRuleOverridePopPool. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerRuleOverridePopPool.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerRuleOverridePopPool.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
pool_ids: Sequence[str],
pop: str):
"""
:param Sequence[str] pool_ids: A list of pool IDs in failover priority to use for traffic reaching the given PoP.
:param str pop: A 3-letter code for the Point-of-Presence. Allowed values can be found in the list of datacenters on the [status page](https://www.cloudflarestatus.com/). Multiple entries should not be specified with the same PoP.
"""
pulumi.set(__self__, "pool_ids", pool_ids)
pulumi.set(__self__, "pop", pop)
@property
@pulumi.getter(name="poolIds")
def pool_ids(self) -> Sequence[str]:
"""
A list of pool IDs in failover priority to use for traffic reaching the given PoP.
"""
return pulumi.get(self, "pool_ids")
@property
@pulumi.getter
def pop(self) -> str:
"""
A 3-letter code for the Point-of-Presence. Allowed values can be found in the list of datacenters on the [status page](https://www.cloudflarestatus.com/). Multiple entries should not be specified with the same PoP.
"""
return pulumi.get(self, "pop")
@pulumi.output_type
class LoadBalancerRuleOverrideRegionPool(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "poolIds":
suggest = "pool_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerRuleOverrideRegionPool. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerRuleOverrideRegionPool.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerRuleOverrideRegionPool.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
pool_ids: Sequence[str],
region: str):
"""
:param Sequence[str] pool_ids: A list of pool IDs in failover priority to use for traffic reaching the given PoP.
:param str region: A region code which must be in the list defined [here](https://support.cloudflare.com/hc/en-us/articles/115000540888-Load-Balancing-Geographic-Regions). Multiple entries should not be specified with the same region.
"""
pulumi.set(__self__, "pool_ids", pool_ids)
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="poolIds")
def pool_ids(self) | |
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A library to generate and store the manifests for cros builders to use.
"""
import cPickle
import fnmatch
import logging
import os
import re
import shutil
import tempfile
from chromite.buildbot import constants, repository
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import gs
from chromite.lib import osutils
MANIFEST_VERSIONS_URL = 'gs://chromeos-manifest-versions'
BUILD_STATUS_URL = '%s/builder-status' % MANIFEST_VERSIONS_URL
PUSH_BRANCH = 'temp_auto_checkin_branch'
NUM_RETRIES = 20
class VersionUpdateException(Exception):
"""Exception gets thrown for failing to update the version file"""
pass
class StatusUpdateException(Exception):
"""Exception gets thrown for failure to update the status"""
pass
class GenerateBuildSpecException(Exception):
"""Exception gets thrown for failure to Generate a buildspec for the build"""
pass
def RefreshManifestCheckout(manifest_dir, manifest_repo):
"""Checks out manifest-versions into the manifest directory.
If a repository is already present, it will be cleansed of any local
changes and restored to its pristine state, checking out the origin.
"""
reinitialize = True
if os.path.exists(manifest_dir):
result = cros_build_lib.RunCommand(['git', 'config', 'remote.origin.url'],
cwd=manifest_dir, print_cmd=False,
redirect_stdout=True, error_code_ok=True)
if (result.returncode == 0 and
result.output.rstrip() == manifest_repo):
logging.info('Updating manifest-versions checkout.')
try:
git.RunGit(manifest_dir, ['gc', '--auto'])
git.CleanAndCheckoutUpstream(manifest_dir)
except cros_build_lib.RunCommandError:
logging.warning('Could not update manifest-versions checkout.')
else:
reinitialize = False
else:
logging.info('No manifest-versions checkout exists at %s', manifest_dir)
if reinitialize:
logging.info('Cloning fresh manifest-versions checkout.')
_RemoveDirs(manifest_dir)
repository.CloneGitRepo(manifest_dir, manifest_repo)
def _PushGitChanges(git_repo, message, dry_run=True):
"""Push the final commit into the git repo.
Args:
git_repo: git repo to push
message: Commit message
dry_run: If true, don't actually push changes to the server
"""
remote, push_branch = git.GetTrackingBranch(
git_repo, for_checkout=False, for_push=True)
git.RunGit(git_repo, ['add', '-A'])
# It's possible that while we are running on dry_run, someone has already
# committed our change.
try:
git.RunGit(git_repo, ['commit', '-m', message])
except cros_build_lib.RunCommandError:
if dry_run:
return
raise
push_cmd = ['push', remote, '%s:%s' % (PUSH_BRANCH, push_branch)]
if dry_run:
push_cmd.extend(['--dry-run', '--force'])
git.RunGit(git_repo, push_cmd)
def _RemoveDirs(dir_name):
"""Remove directories recursively, if they exist"""
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
def CreateSymlink(src_file, dest_file):
"""Creates a relative symlink from src to dest with optional removal of file.
More robust symlink creation that creates a relative symlink from src_file to
dest_file.
This is useful for multiple calls of CreateSymlink where you are using
the dest_file location to store information about the status of the src_file.
Args:
src_file: source for the symlink
dest_file: destination for the symlink
"""
dest_dir = os.path.dirname(dest_file)
osutils.SafeUnlink(dest_file)
osutils.SafeMakedirs(dest_dir)
rel_src_file = os.path.relpath(src_file, dest_dir)
logging.debug('Linking %s to %s', rel_src_file, dest_file)
os.symlink(rel_src_file, dest_file)
class VersionInfo(object):
"""Class to encapsulate the Chrome OS version info scheme.
You can instantiate this class in three ways.
1) using a version file, specifically chromeos_version.sh,
which contains the version information.
2) passing in a string with the 3 version components.
3) using a source repo and calling from_repo().
Args:
version_string: Optional 3 component version string to parse. Contains:
build_number: release build number.
branch_build_number: current build number on a branch.
patch_number: patch number.
chrome_branch: If version_string specified, specify chrome_branch i.e. 13.
incr_type: How we should increment this version - build|branch|patch
version_file: version file location.
"""
# Pattern for matching build name format. Includes chrome branch hack.
VER_PATTERN = '(\d+).(\d+).(\d+)(?:-R(\d+))*'
def __init__(self, version_string=None, chrome_branch=None,
incr_type='build', version_file=None):
if version_file:
self.version_file = version_file
logging.debug('Using VERSION _FILE = %s', version_file)
self._LoadFromFile()
else:
match = re.search(self.VER_PATTERN, version_string)
self.build_number = match.group(1)
self.branch_build_number = match.group(2)
self.patch_number = match.group(3)
self.chrome_branch = chrome_branch
self.version_file = None
self.incr_type = incr_type
@classmethod
def from_repo(cls, source_repo):
return cls(version_file=os.path.join(source_repo, constants.VERSION_FILE))
def _LoadFromFile(self):
"""Read the version file and set the version components"""
with open(self.version_file, 'r') as version_fh:
for line in version_fh:
if not line.strip():
continue
match = self.FindValue('CHROME_BRANCH', line)
if match:
self.chrome_branch = match
logging.debug('Set the Chrome branch number to:%s',
self.chrome_branch)
continue
match = self.FindValue('CHROMEOS_BUILD', line)
if match:
self.build_number = match
logging.debug('Set the build version to:%s', self.build_number)
continue
match = self.FindValue('CHROMEOS_BRANCH', line)
if match:
self.branch_build_number = match
logging.debug('Set the branch version to:%s',
self.branch_build_number)
continue
match = self.FindValue('CHROMEOS_PATCH', line)
if match:
self.patch_number = match
logging.debug('Set the patch version to:%s', self.patch_number)
continue
logging.debug(self.VersionString())
def FindValue(self, key, line):
"""Given the key find the value from the line, if it finds key = value
Args:
key: key to look for
line: string to search
returns:
None: on a non match
value: for a matching key
"""
regex = '.*(%s)\s*=\s*(\d+)$' % key
match = re.match(regex, line)
if match:
return match.group(2)
return None
def IncrementVersion(self, message, dry_run):
"""Updates the version file by incrementing the patch component.
Args:
message: Commit message to use when incrementing the version.
dry_run: Git dry_run.
"""
def IncrementOldValue(line, key, new_value):
"""Change key to new_value if found on line. Returns True if changed."""
old_value = self.FindValue(key, line)
if old_value:
temp_fh.write(line.replace(old_value, new_value, 1))
return True
else:
return False
if not self.version_file:
raise VersionUpdateException('Cannot call IncrementVersion without '
'an associated version_file')
if not self.incr_type or self.incr_type not in ('build', 'branch'):
raise VersionUpdateException('Need to specify the part of the version to'
' increment')
if self.incr_type == 'build':
self.build_number = str(int(self.build_number) + 1)
self.branch_build_number = '0'
self.patch_number = '0'
elif self.patch_number == '0':
self.branch_build_number = str(int(self.branch_build_number) + 1)
else:
self.patch_number = str(int(self.patch_number) + 1)
temp_file = tempfile.mkstemp(suffix='mvp', prefix='tmp', dir=None,
text=True)[1]
with open(self.version_file, 'r') as source_version_fh:
with open(temp_file, 'w') as temp_fh:
for line in source_version_fh:
if IncrementOldValue(line, 'CHROMEOS_BUILD', self.build_number):
pass
elif IncrementOldValue(line, 'CHROMEOS_BRANCH',
self.branch_build_number):
pass
elif IncrementOldValue(line, 'CHROMEOS_PATCH', self.patch_number):
pass
else:
temp_fh.write(line)
temp_fh.close()
source_version_fh.close()
repo_dir = os.path.dirname(self.version_file)
try:
git.CreatePushBranch(PUSH_BRANCH, repo_dir)
shutil.copyfile(temp_file, self.version_file)
os.unlink(temp_file)
_PushGitChanges(repo_dir, message, dry_run=dry_run)
finally:
# Update to the remote version that contains our changes. This is needed
# to ensure that we don't build a release using a local commit.
git.CleanAndCheckoutUpstream(repo_dir)
return self.VersionString()
def VersionString(self):
"""returns the version string"""
return '%s.%s.%s' % (self.build_number, self.branch_build_number,
self.patch_number)
@classmethod
def VersionCompare(cls, version_string):
"""Useful method to return a comparable version of a LKGM string."""
info = cls(version_string)
return map(int, [info.build_number, info.branch_build_number,
info.patch_number])
def BuildPrefix(self):
"""Returns the build prefix to match the buildspecs in manifest-versions"""
if self.incr_type == 'branch':
if self.patch_number == '0':
return '%s.' % self.build_number
else:
return '%s.%s.' % (self.build_number, self.branch_build_number)
# Default to build incr_type.
return ''
class BuilderStatus(object):
"""Object representing the status of a build."""
# Various status builds can be in.
STATUS_FAILED = 'fail'
STATUS_PASSED = 'pass'
STATUS_INFLIGHT = 'inflight'
STATUS_COMPLETED = [STATUS_PASSED, STATUS_FAILED]
def __init__(self, status, message):
self.status = status
self.message = message
# Helper methods to make checking the status object easy.
def Failed(self):
"""Returns True if the Builder failed."""
return self.status == BuilderStatus.STATUS_FAILED
def Passed(self):
"""Returns True if the Builder passed."""
return self.status == BuilderStatus.STATUS_PASSED
def Inflight(self):
"""Returns True if the Builder is still inflight."""
return self.status == BuilderStatus.STATUS_INFLIGHT
def Completed(self):
"""Returns True if the Builder has completed."""
return self.status in BuilderStatus.STATUS_COMPLETED
@classmethod
def GetCompletedStatus(cls, success):
"""Return the appropriate status constant for a completed build.
Args:
success: Whether the build was successful or not.
"""
if success:
return cls.STATUS_PASSED
else:
return cls.STATUS_FAILED
class BuildSpecsManager(object):
"""A Class to manage buildspecs and their states."""
def __init__(self, source_repo, manifest_repo, build_name, incr_type, force,
branch, dry_run=True, master=False):
"""Initializes a build specs manager.
Args:
source_repo: Repository object for the source code.
manifest_repo: Manifest repository for manifest versions / buildspecs.
build_name: Identifier for the build. Must match cbuildbot_config.
incr_type: How we should increment this version - build|branch|patch
force: Create a new manifest even if there are no changes.
branch: Branch this builder is running on.
dry_run: Whether we actually commit changes we make or not.
master: Whether we are the master builder.
"""
self.cros_source = source_repo
buildroot = source_repo.directory
if manifest_repo.startswith(constants.GERRIT_INT_SSH_URL):
self.manifest_dir = os.path.join(buildroot, 'manifest-versions-internal')
else:
self.manifest_dir = os.path.join(buildroot, 'manifest-versions')
self.manifest_repo = manifest_repo
self.build_name = build_name
self.incr_type = incr_type
self.force = force
self.branch = branch
self.dry_run = dry_run
self.master = master
# Directories and specifications are set once we load the specs.
self.all_specs_dir = None
self.pass_dir = None
self.fail_dir = None
# Path to specs for builder. Requires passing %(builder)s.
self.specs_for_builder = None
# Specs.
self.latest = None
self._latest_status = None
self.latest_unprocessed = None
self.compare_versions_fn = VersionInfo.VersionCompare
self.current_version = None
self.rel_working_dir = ''
def _LatestSpecFromList(self, specs):
"""Find the latest spec in a list of specs.
Args:
specs: List | |
not None:
for patch in self.SELECTED_REGIONS: ## TODO only draws on top
self.draw_selected_patch(patch)
self.canvas.draw()
self.customToolbar.limits = [self.aplot.get_xlim(),self.aplot.get_ylim()]
def recalcTPrate(self): ###TODO z okazji interfaceu
if self.interface_mode.get():
if self.ds_over.get():
TPrate = (len(self.data2[self.data2 == 1.]),
len(self.data2[self.data2 == 0.1])) # TP,FP ## TODO co zrobic z interchain?
self.TPrate.set("%5.2f%%" % (TPrate[0] * 100. / sum(TPrate)))
self.TP_frame.grid(column=1, row=0, padx=10)
else: # TODO - nadal nie mam TP rate bez natywnych knotaktow
pass
elif self.overlay_var.get():
TPrate = (len(self.data2[np.triu(self.data2) == 1.]),
len(self.data2[np.triu(self.data2) == 0.1])) # TP,FP ## TODO co zrobic z interchain?
self.TPrate.set("%5.2f%%" % (TPrate[0] * 100. / sum(TPrate)))
self.TP_frame.grid(column=1, row=0, padx=10)
elif (self.recolor_by_trueness_var.get() or self.recolor_by_any_trueness.get()):
TPrate = (len(self.data2[np.triu(self.data2) == 1.]),
len(self.data2[np.triu(self.data2) == 0.1])) # TP,FP ## TODO co zrobic z interchain?
self.TPrate.set("%5.2f%%" % (TPrate[0] * 100. / sum(TPrate)))
self.TP_frame.grid(column=1, row=0, padx=10)
else:
if self.comp_mode.get():
TPrate=0
all=0
mins = self.slider_min.get()
TPrate = len(set(zip(*(map(list,np.where(np.triu(np.transpose(self.data))>0.))))) & set(zip(*(map(list,np.where(np.triu(self.data)>mins))))))
all = len(set(zip(*(map(list,np.where(np.triu(self.data)>mins))))))
self.TPrate.set("%5.2f%%" % (TPrate*100./all))
#(TPrate*100./len(self.data[np.triu(self.data)>self.slider_min.get()])))
self.TP_frame.grid(column=1, row=0, padx=10)
def tick_formatter_x(self,tick_val,tick_pos):
return str(self.tick_formatter(int(tick_val),tick_pos,1))
def tick_formatter_y(self,tick_val,tick_pos):
return str(self.tick_formatter(int(tick_val),tick_pos,0))
def tick_formatter(self,tick_val,tick_pos,x=1):
try:
if self.interface_mode.get():
if x:
return self.current_structure_obj_var.struct_1.translations.struct2pdb(tick_val)
else:
return self.current_structure_obj_var.struct_2.translations.struct2pdb(tick_val)
elif self.restrict_to_structure_var.get() or self.overlay_var.get():
return self.current_structure_obj_var.translations.struct2pdb(tick_val)
else:
return self.current_structure_obj_var.translations.singleplot_cursor(tick_val)
except IndexError:
return ""
def makeDSplot(self,overlay=0):
if overlay:
self.ds_normal.set(0)
self.ds_over.set(1)
self.clear_pymol_bonds()
if self.plot_to_remember is None:
self.SELECTED_REGIONS = []
self.FIGURE.clf()
self.SS_plots = []
self.aplot = plt.subplot2grid((60, 60), (1, 5), colspan=54,
rowspan=54) # ,fig=FIGURE)#,fig=0)#FIGURE)# add_subplot(211)
self.aplot.xaxis.set_major_formatter(mpl.ticker.FuncFormatter(self.tick_formatter_x))
self.aplot.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(self.tick_formatter_y))
my_struct = self.current_structure_obj_var
if overlay:
data2 = my_struct.makeOLarray(self.DATA_BACKUP, distance=self.comp_distance.get(), restricted=True,
nonwc=self.rna_nonwc_pairs.get(), vmin=self.slider_min.get(),
state=self.current_state_var.get())
cmapa = self.overlay_cmap
cmapa.set_bad(color="white")
cmapa.set_over(color="black") # "red")
cmapa.set_under(color="white")
norm = self.overlay_norm
data2 = np.ma.masked_where(data2 < -0.75, data2)
self.data2 = data2
heatmap = self.aplot.pcolorfast(data2, cmap=cmapa, norm=norm, vmin=0.) # , vmax=vmax)
if not self.HELD_LMB.get():
self.recalcTPrate()
else:
self.data = my_struct.makeSSarray(self.DATA_BACKUP, comparison=self.comp_mode.get(), distance=self.comp_distance.get(),
restricted=True, state=self.current_state_var.get(), nonwc=self.rna_nonwc_pairs.get())
if self.colormap.get() == "BinaryTP":
cmapa, norm = self.binaryColormap()
cmapa.set_bad(color="0.75")
cmapa.set_over(color="black")
cmapa.set_under(color="white")
heatmap = self.aplot.pcolorfast(self.data, cmap=cmapa, norm=norm, vmin=self.slider_min.get(),
vmax=self.slider_max.get())
else:
norm = None
cmapa = cm.get_cmap(self.colormap.get())
cmapa.set_bad(color="0.75")
cmapa.set_over(color="black")
cmapa.set_under(color="white")
self.aplot.pcolorfast(self.data, cmap=cmapa, vmin=self.slider_min.get(), vmax=self.slider_max.get())
mpl.colorbar.ColorbarBase(self.cmap_ax, cmap=self.cmapa,
norm=self.norm,
orientation='vertical')
self.cmap_canvas.draw()
if not self.HELD_LMB.get():
self.recalcTPrate()
self.data2 = None
self.cmapa = cmapa
self.norm = norm
self.SS_plots += my_struct.plotSS(self.FIGURE, self.aplot)
self.aplot.invert_yaxis() # set_ylim(self.aplot.get_ylim()[::-1])
self.aplot.set_xlabel("{} chain {}".format(my_struct.struct_1.objId,my_struct.struct_1.chain_simple))
self.aplot.set_ylabel("{} chain {}".format(my_struct.struct_2.objId,my_struct.struct_2.chain_simple))
for patch in self.SELECTED_REGIONS: ## TODO only draws on top
self.draw_selected_patch(patch)
self.bonds_in_patches(patch[0],patch[1],[self.current_structure_obj_var],0)
self.canvas.draw()
self.customToolbar.update()
def makeSSplot(self,*args,**kwargs):
if self.interface_mode.get():
self.makeDSplot(overlay=self.ds_over.get())
return
mesh =False
if 'mesh' in kwargs and kwargs['mesh']:
mesh = True
if self.map_structure_mode.get() == self.OPCJE[0]:
return
self.clear_pymol_bonds()
from_states = False
if "from_states" in kwargs:
from_states=1
if self.plot_to_remember is None:
self.SELECTED_REGIONS = []
elif 'from_overlay' in kwargs:
if self.restrict_to_structure_var.get():
pass
else:
self.mapSelectionBetweenPlots()
elif 'from_restrict' in kwargs:
self.mapSelectionBetweenPlots()
elif 'from_native' in kwargs:
if self.comp_mode.get():
for reg in self.SELECTED_REGIONS:
nreg = (reg[1],reg[0],reg[2])
if nreg not in self.SELECTED_REGIONS:
self.SELECTED_REGIONS.append(nreg)
else:
nregs = []
for i in self.SELECTED_REGIONS:
if i[1][0]<i[0][0]:
nregs.append(i)
self.SELECTED_REGIONS = nregs
self.FIGURE.clf()
self.SS_plots = []
restricted = self.restrict_to_structure_var.get()
self.aplot = plt.subplot2grid((60, 60), (1, 5), colspan=54,
rowspan=54)
self.aplot.xaxis.set_major_formatter(mpl.ticker.FuncFormatter(self.tick_formatter_x))
self.aplot.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(self.tick_formatter_y))
my_struct = self.current_structure_obj_var
if self.overlay_var.get():
data2 = my_struct.makeOLarray(self.DATA_BACKUP, distance=self.comp_distance.get(), restricted=restricted,
nonwc=self.rna_nonwc_pairs.get(), vmin=self.slider_min.get(), state=self.current_state_var.get())
cmapa = self.overlay_cmap
cmapa.set_bad(color="white")
cmapa.set_over(color="black") # "red")
cmapa.set_under(color="white")
norm = self.overlay_norm
data2 = np.ma.masked_where(data2 < -0.75, data2)
self.data2 = data2
if mesh:
heatmap = self.aplot.pcolormesh(data2, cmap=cmapa, norm=norm, vmin=0.) # , vmax=vmax)
else:
heatmap = self.aplot.pcolorfast(data2, cmap=cmapa, norm=norm, vmin=0.) # , vmax=vmax)
if not self.HELD_LMB.get() or from_states:
self.recalcTPrate()
elif (self.recolor_by_trueness_var.get() or self.recolor_by_any_trueness.get()): # TODO recalc only on change
self.data = my_struct.makeSSarray(self.DATA_BACKUP, comparison=self.comp_mode.get(), distance=self.comp_distance.get(),
restricted=restricted, state=self.current_state_var.get(), nonwc=self.rna_nonwc_pairs.get())
data2 = np.array(self.data)
data2[data2 < self.slider_min.get()] = -0.1
data2 = my_struct.recolorSSarray(data2, self.slider_min.get(), distance_intra=self.comp_distance.get(),
distance_inter=self.mark_on_similar_just_within_cutoff.get(),
restricted=restricted, comparison=self.comp_mode.get(),
all_combos=(1 + self.all_combos_var.get()) % 2, state=self.current_state_var.get(),
any=self.recolor_by_any_trueness.get(),nonwc = self.rna_nonwc_pairs.get())
data2[data2 == 3.] = 1. # TODO for now inter/intra are exclusive
cmapa = self.TP_cmap#copy(cm.get_cmap("spring"))
cmapa.set_bad(color="white")
cmapa.set_over(color="black") # "red")
cmapa.set_under(color="white")
norm = self.TP_norm#None
bad = []
if not self.trueness_show_intra.get():
data2[data2 == 1.] = -1.
if not self.trueness_show_inter.get():
data2[data2 == 2.] = -1.
if not self.trueness_show_false.get():
data2[data2 == 0.1] = -1. ## TODO some combos?
data2 = np.ma.masked_where(data2 < -0.75, data2)
self.data2 = data2
if mesh:
heatmap = self.aplot.pcolormesh(data2, cmap=cmapa,norm=norm, vmin=0.)#, vmax=vmax)
else:
heatmap = self.aplot.pcolorfast(data2, cmap=cmapa,norm=norm, vmin=0.)#, vmax=vmax)
if not self.HELD_LMB.get() or from_states:
self.recalcTPrate()
else:
if not self.comp_mode.get(): self.TP_frame.grid_forget()
self.data = my_struct.makeSSarray(self.DATA_BACKUP, comparison=self.comp_mode.get(), distance=self.comp_distance.get(),
restricted=restricted, state=self.current_state_var.get(), nonwc=self.rna_nonwc_pairs.get())
if self.colormap.get() == "BinaryTP":
cmapa, norm = self.binaryColormap()
cmapa.set_bad(color="0.75")
cmapa.set_over(color="black")
cmapa.set_under(color="white")
if mesh:
heatmap = self.aplot.pcolorfast(self.data, cmap=cmapa, norm=norm, vmin=self.slider_min.get(),
vmax=self.slider_max.get())
else:
heatmap = self.aplot.pcolorfast(self.data, cmap=cmapa, norm=norm, vmin=self.slider_min.get(),
vmax=self.slider_max.get())
else:
norm = None
cmapa = cm.get_cmap(self.colormap.get())
cmapa.set_bad(color="0.75")
cmapa.set_over(color="black")
cmapa.set_under(color="white")
if mesh:
heatmap = self.aplot.pcolormesh(self.data, cmap=cmapa, vmin=self.slider_min.get(), vmax=self.slider_max.get())
else:
heatmap = self.aplot.pcolorfast(self.data, cmap=cmapa, vmin=self.slider_min.get(), vmax=self.slider_max.get())
mpl.colorbar.ColorbarBase(self.cmap_ax, cmap=self.cmapa,
norm=self.norm,
orientation='vertical')
self.cmap_canvas.draw()
if not self.HELD_LMB.get() or from_states:
self.recalcTPrate()
self.data2 = None
self.cmapa = cmapa
self.norm = norm
self.SS_plots += my_struct.plotSS(self.FIGURE, self.aplot, restricted=(restricted or self.overlay_var.get()))
for patch in self.SELECTED_REGIONS: ## TODO only draws on top
self.draw_selected_patch(patch)
self.bonds_in_patches(patch[0],patch[1],[self.current_structure_obj_var],0)
self.aplot.invert_yaxis()
self.aplot.set_xlabel("{} chain {}".format(my_struct.objId, my_struct.chain_simple))
self.aplot.set_ylabel("{} chain {}".format(my_struct.objId, my_struct.chain_simple))
self.canvas.draw()
self.customToolbar.update()
def lets_do_the_flip(self):
def flip(x):
if x=="L": return "R"
else: return "L"
for i,reg in enumerate(self.SELECTED_REGIONS):
self.SELECTED_REGIONS[i] = list(reg[:-1])+[flip(reg[-1])]
for path in self.AXLINES:
try:
path.remove()
except:
pass
for patch in self.SELECTED_REGIONS: ## TODO only draws on top
self.draw_selected_patch(patch)
self.canvas.draw()
self.redraw_bonds()
def add_pymol_bond_to_object_intra(self,res1,res2,color,obj=""):
bonded_atom = "name CA and elem C"
if dm.Structure.isRNA:
bonded_atom = "name P and elem P"
tmp_color_name = "tmp_color_%d" % len(self.DRAWN_BONDS)
cmd.set_color(tmp_color_name,"[ %.3f, %.3f, %.3f ]" % color[:3])
others = self.mark_on_similar.get()
obj,clist,cmap,idx_ref = obj if obj else (False,False,False)
for c in clist[0]: ######## TODO Important - changed to differentiate the "mark on similar option"
for tmpi,chain in enumerate([c]+(cmap.get(c,[]) if others else [])):
r1,r2 = res1,res2
if others and tmpi:
r1 += idx_ref[(c,chain)]
r2 += idx_ref[(c,chain)]
cmd.bond("%s i. %d and (alt A or alt '') and {}".format(bonded_atom) % ("{} and c. {} and ".format(obj,chain) if obj else "", r1),
"%s i. %d and (alt A or alt '') and {}".format(bonded_atom) % ("{} and c. {} and ".format(obj,chain) if obj else "", r2))
cmd.select("tmp_select","%s i. %d+%d and (alt A or alt '') and {}".format(bonded_atom) % ("{} and c. {} and ".format(obj,chain) if obj else "", r1,r2))
cmd.show("sticks","tmp_select")
cmd.set_bond("stick_color",tmp_color_name, #str(color[:3]),
"%s i. %d and (alt A or alt '') and {}".format(bonded_atom) % ("{} and c. {} and ".format(obj,chain) if obj else "", r1),
"%s i. %d and (alt A or alt '') and {}".format(bonded_atom) % ("{} and c. {} and ".format(obj,chain) if obj else "", r2))
cmd.deselect()
self.DRAWN_BONDS.append(("%s i. %d and (alt A or alt '') and {}".format(bonded_atom) % ("{} and c. {} and ".format(obj,chain) if obj else "", r1),
"%s i. %d and (alt A or alt '') and {}".format(bonded_atom) % ("{} and c. {} and ".format(obj,chain) if obj else "", r2), r1,r2,obj))
def add_pymol_bond_to_object_iface(self,r1,r2,color):
bonded_atom = "name CA and elem C"
if dm.Structure.isRNA:
bonded_atom = "name P and elem P"
tmp_color_name = "tmp_color_%d" % len(self.DRAWN_BONDS)
cmd.set_color(tmp_color_name,"[ %.3f, %.3f, %.3f ]" % color[:3])
c1,c2 = self.current_structure_obj_var.struct_1.chain_simple,self.current_structure_obj_var.struct_2.chain_simple
obj1,obj2 = self.current_structure_obj_var.struct_1.objId,self.current_structure_obj_var.struct_2.objId
obj_name = "dist_{}".format(len(self.DRAWN_BONDS))
cmd.distance(obj_name, "{} and c. {} and i. {} and {} and (alt A or alt '')".format(obj1, c1, r1, bonded_atom),
"{} and c. {} and i. {} and {} and (alt A or alt '')".format(obj2, c2, r2, bonded_atom))
cmd.color(tmp_color_name, obj_name)
cmd.hide("labels", obj_name)
self.DRAWN_BONDS.append([obj_name, "{}{}".format(r1, c1), "{}{}".format(r2, c2)])
def add_pymol_bond_to_object_inter(self,res1, res2, color, obj=""):
bonded_atom = "name CA and elem C"
if dm.Structure.isRNA:
bonded_atom = "name P and elem P"
tmp_color_name = "tmp_color_%d" % len(self.DRAWN_BONDS)
cmd.set_color(tmp_color_name, "[ %.3f, %.3f, %.3f ]" % color[:3])
others = self.mark_on_similar.get()
just_within = self.mark_on_similar_just_within.get()
if just_within:
just_within_cutoff = self.mark_on_similar_just_within_cutoff.get()
obj, clist, cmap, idx_ref = obj if obj else (False, False, False)
my_list = clist #+ cmap[clist[0]]
pairs_to_bond = []
cmd.set('valence', 1)
for tmpi, c in enumerate(my_list):
for chain in my_list[tmpi + 1:]:
r1, r2 = res1, res2
r3, r4 = res1, res2
r2 += idx_ref[(clist[0], chain)]
r3 += idx_ref[(clist[0], chain)]
if c not in clist:
if not others:
continue
r1 += idx_ref[(clist[0], c)]
r4 += idx_ref[(clist[0], c)]
if not just_within:
pairs_to_bond.append((c, chain, r1, r2))
pairs_to_bond.append((chain, c, r3, r4))
else:
a1 = "%s i. %d and (alt A or alt '') and {}".format(bonded_atom) % ("{} and c. {} and ".format(obj, c) if obj else "", r1)
a2 = "%s i. %d and (alt A or alt '') and {}".format(bonded_atom) % | |
# -*- coding: utf-8 -*-
import numpy as np
#import matplotlib.colors
#from matplotlib.figure import Figure
#from matplotlib.backends.backend_agg import FigureCanvasAgg
import fractalshades.colors as fscolors
##: A colormap circling through black, blue, white, orange, black
cmap_legacy = fscolors.Fractal_colormap(
colors=[[0.00784314, 0.01960784, 0.14509804],
[0.17647059, 0.10980392, 0.10588235],
[0.48627451, 0.24313725, 0.07058824],
[0.63921569, 0.39607843, 0.17647059],
[0.81176471, 0.58039216, 0.33333333],
[0.97647059, 0.85490196, 0.64313725],
[0.96470588, 0.98823529, 0.90196078],
[0.48627451, 0.7254902 , 0.90980392],
[0.27843137, 0.51764706, 0.74901961],
[0.12156863, 0.32941176, 0.57254902],
[0.07058824, 0.25490196, 0.49411765],
[0.00784314, 0.01960784, 0.14509804]],
kinds=['Lab', 'Lch', 'Lch', 'Lch', 'Lab', 'Lab', 'Lab', 'Lch', 'Lch', 'Lch', 'Lab'],
grad_npts=[32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32],
grad_funcs=['x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x', 'x'],
extent='mirror'
)
def Pchipinterpolate_rgb(rgb_arr, pos_arr, npts=256):
""" Utility function used to build a cmap. It needs scipy but should
not be necessary to the end-used """
from scipy.interpolate import PchipInterpolator
res = np.empty([npts, 3], dtype=np.float64)
lin_scale = np.linspace(0., 1., num=npts, dtype=np.float64)
for ic in range(3):
channel_y = rgb_arr[:, ic]
interp = PchipInterpolator(pos_arr, channel_y)
res[:, ic] = interp(lin_scale)
return res
def create_UF_cmap():
"""
Utility function to create a cmap
classic_colormap = create_UF_cmap()
"""
pos_arr = np.array([0., 0.16, 0.42, 0.6425, 0.8575, 1.], dtype=np.float64)
rgb_arr = np.array([
[ 0., 7., 100.],
[ 32., 107., 203.],
[ 237., 255., 255.],
[ 255., 170., 0.],
[ 0., 2., 0.],
[ 0., 7., 100.]
]) / 255.
res_rgb = Pchipinterpolate_rgb(rgb_arr, pos_arr)
cmap = fscolors.Fractal_colormap(
colors=res_rgb,
kinds="Lch",
grad_npts=3,
grad_funcs='x',
extent='mirror'
)
return cmap
cmap_classic = fscolors.Fractal_colormap(
colors=[[0.00000000e+00, 2.74509804e-02, 3.92156863e-01],
[1.10437384e-04, 3.73834922e-02, 4.04650357e-01],
[4.38226764e-04, 4.73039863e-02, 4.17125524e-01],
[9.78083979e-04, 5.72121456e-02, 4.29573705e-01],
[1.72472487e-03, 6.71076526e-02, 4.41986240e-01],
[2.67286527e-03, 7.69901900e-02, 4.54354469e-01],
[3.81722103e-03, 8.68594405e-02, 4.66669733e-01],
[5.15250798e-03, 9.67150866e-02, 4.78923371e-01],
[6.67344196e-03, 1.06556811e-01, 4.91106726e-01],
[8.37473881e-03, 1.16384297e-01, 5.03211136e-01],
[1.02511144e-02, 1.26197226e-01, 5.15227942e-01],
[1.22972845e-02, 1.35995282e-01, 5.27148486e-01],
[1.45079650e-02, 1.45778147e-01, 5.38964106e-01],
[1.68778717e-02, 1.55545503e-01, 5.50666144e-01],
[1.94017206e-02, 1.65297034e-01, 5.62245939e-01],
[2.20742273e-02, 1.75032422e-01, 5.73694833e-01],
[2.48901077e-02, 1.84751350e-01, 5.85004166e-01],
[2.78440778e-02, 1.94453500e-01, 5.96165278e-01],
[3.09308533e-02, 2.04138555e-01, 6.07169509e-01],
[3.41451500e-02, 2.13806197e-01, 6.18008200e-01],
[3.74816839e-02, 2.23456110e-01, 6.28672691e-01],
[4.09351707e-02, 2.33087977e-01, 6.39154323e-01],
[4.45003263e-02, 2.42701478e-01, 6.49444436e-01],
[4.81718665e-02, 2.52296298e-01, 6.59534371e-01],
[5.19445072e-02, 2.61872119e-01, 6.69415467e-01],
[5.58129642e-02, 2.71428624e-01, 6.79079066e-01],
[5.97719533e-02, 2.80965495e-01, 6.88516507e-01],
[6.38161904e-02, 2.90482415e-01, 6.97719131e-01],
[6.79403913e-02, 2.99979067e-01, 7.06678279e-01],
[7.21392719e-02, 3.09455133e-01, 7.15385291e-01],
[7.64075480e-02, 3.18910296e-01, 7.23831507e-01],
[8.07399355e-02, 3.28344239e-01, 7.32008267e-01],
[8.51311501e-02, 3.37756644e-01, 7.39906913e-01],
[8.95759078e-02, 3.47147194e-01, 7.47518784e-01],
[9.40689243e-02, 3.56515572e-01, 7.54835221e-01],
[9.86049155e-02, 3.65861460e-01, 7.61847564e-01],
[1.03178597e-01, 3.75184541e-01, 7.68547154e-01],
[1.07784685e-01, 3.84484498e-01, 7.74925331e-01],
[1.12417896e-01, 3.93761012e-01, 7.80973436e-01],
[1.17072944e-01, 4.03013768e-01, 7.86682808e-01],
[1.21744546e-01, 4.12242448e-01, 7.92044788e-01],
[1.26442213e-01, 4.21451959e-01, 7.97057643e-01],
[1.31644451e-01, 4.30811773e-01, 8.01942937e-01],
[1.37566242e-01, 4.40395326e-01, 8.06809033e-01],
[1.44183896e-01, 4.50191275e-01, 8.11654219e-01],
[1.51473724e-01, 4.60188274e-01, 8.16476784e-01],
[1.59412036e-01, 4.70374980e-01, 8.21275017e-01],
[1.67975141e-01, 4.80740047e-01, 8.26047207e-01],
[1.77139351e-01, 4.91272131e-01, 8.30791641e-01],
[1.86880974e-01, 5.01959888e-01, 8.35506611e-01],
[1.97176323e-01, 5.12791973e-01, 8.40190402e-01],
[2.08001706e-01, 5.23757041e-01, 8.44841306e-01],
[2.19333434e-01, 5.34843749e-01, 8.49457610e-01],
[2.31147817e-01, 5.46040752e-01, 8.54037602e-01],
[2.43421166e-01, 5.57336705e-01, 8.58579573e-01],
[2.56129790e-01, 5.68720263e-01, 8.63081810e-01],
[2.69250000e-01, 5.80180083e-01, 8.67542602e-01],
[2.82758106e-01, 5.91704820e-01, 8.71960239e-01],
[2.96630418e-01, 6.03283129e-01, 8.76333008e-01],
[3.10843247e-01, 6.14903666e-01, 8.80659199e-01],
[3.25372903e-01, 6.26555086e-01, 8.84937100e-01],
[3.40195695e-01, 6.38226045e-01, 8.89165000e-01],
[3.55287934e-01, 6.49905198e-01, 8.93341187e-01],
[3.70625931e-01, 6.61581201e-01, 8.97463951e-01],
[3.86185996e-01, 6.73242710e-01, 9.01531580e-01],
[4.01944438e-01, 6.84878379e-01, 9.05542364e-01],
[4.17877568e-01, 6.96476865e-01, 9.09494590e-01],
[4.33961696e-01, 7.08026823e-01, 9.13386547e-01],
[4.50173133e-01, 7.19516908e-01, 9.17216524e-01],
[4.66488189e-01, 7.30935776e-01, 9.20982811e-01],
[4.82883173e-01, 7.42272083e-01, 9.24683695e-01],
[4.99334396e-01, 7.53514484e-01, 9.28317465e-01],
[5.15818169e-01, 7.64651634e-01, 9.31882411e-01],
[5.32310801e-01, 7.75672189e-01, 9.35376820e-01],
[5.48788603e-01, 7.86564805e-01, 9.38798982e-01],
[5.65227885e-01, 7.97318137e-01, 9.42147185e-01],
[5.81604957e-01, 8.07920840e-01, 9.45419719e-01],
[5.97896129e-01, 8.18361570e-01, 9.48614871e-01],
[6.14077712e-01, 8.28628983e-01, 9.51730931e-01],
[6.30126016e-01, 8.38711735e-01, 9.54766187e-01],
[6.46017351e-01, 8.48598479e-01, 9.57718928e-01],
[6.61728027e-01, 8.58277873e-01, 9.60587443e-01],
[6.77234354e-01, 8.67738572e-01, 9.63370021e-01],
[6.92512643e-01, 8.76969230e-01, 9.66064950e-01],
[7.07539204e-01, 8.85958505e-01, 9.68670518e-01],
[7.22290347e-01, 8.94695050e-01, 9.71185016e-01],
[7.36742383e-01, 9.03167523e-01, 9.73606731e-01],
[7.50871621e-01, 9.11364577e-01, 9.75933952e-01],
[7.64654371e-01, 9.19274869e-01, 9.78164968e-01],
[7.78066945e-01, 9.26887055e-01, 9.80298068e-01],
[7.91085652e-01, 9.34189789e-01, 9.82331541e-01],
[8.03686802e-01, 9.41171728e-01, 9.84263674e-01],
[8.15846706e-01, 9.47821526e-01, 9.86092758e-01],
[8.27541674e-01, 9.54127840e-01, 9.87817080e-01],
[8.38748016e-01, 9.60079324e-01, 9.89434930e-01],
[8.49442042e-01, 9.65664635e-01, 9.90944596e-01],
[8.59600062e-01, 9.70872428e-01, 9.92344366e-01],
[8.69198387e-01, 9.75691358e-01, 9.93632531e-01],
[8.78213328e-01, 9.80110081e-01, 9.94807377e-01],
[8.86621193e-01, 9.84117253e-01, 9.95867195e-01],
[8.94398294e-01, 9.87701528e-01, 9.96810273e-01],
[9.01520940e-01, 9.90851563e-01, 9.97634899e-01],
[9.07965442e-01, 9.93556012e-01, 9.98339363e-01],
[9.13708110e-01, 9.95803532e-01, 9.98921953e-01],
[9.18725254e-01, 9.97582779e-01, 9.99380957e-01],
[9.22993185e-01, 9.98882406e-01, 9.99714665e-01],
[9.26488212e-01, 9.99691071e-01, 9.99921366e-01],
[9.29186646e-01, 9.99997428e-01, 9.99999347e-01],
[9.31390967e-01, 9.99862137e-01, 9.99253124e-01],
[9.33566587e-01, 9.99389882e-01, 9.96710858e-01],
[9.35716976e-01, 9.98588684e-01, 9.92429575e-01],
[9.37841616e-01, 9.97465705e-01, 9.86474975e-01],
[9.39939988e-01, 9.96028108e-01, 9.78912758e-01],
[9.42011573e-01, 9.94283054e-01, 9.69808626e-01],
[9.44055854e-01, 9.92237706e-01, 9.59228278e-01],
[9.46072311e-01, 9.89899227e-01, 9.47237417e-01],
[9.48060428e-01, 9.87274778e-01, 9.33901742e-01],
[9.50019684e-01, 9.84371523e-01, 9.19286955e-01],
[9.51949561e-01, 9.81196622e-01, 9.03458756e-01],
[9.53849542e-01, 9.77757239e-01, 8.86482847e-01],
[9.55719108e-01, 9.74060536e-01, 8.68424927e-01],
[9.57557740e-01, 9.70113676e-01, 8.49350698e-01],
[9.59364920e-01, 9.65923819e-01, 8.29325861e-01],
[9.61140129e-01, 9.61498130e-01, 8.08416116e-01],
[9.62882850e-01, 9.56843770e-01, 7.86687164e-01],
[9.64592563e-01, 9.51967901e-01, 7.64204706e-01],
[9.66268750e-01, 9.46877686e-01, 7.41034443e-01],
[9.67910894e-01, 9.41580287e-01, 7.17242075e-01],
[9.69518474e-01, 9.36082866e-01, 6.92893304e-01],
[9.71090974e-01, 9.30392586e-01, 6.68053830e-01],
[9.72627874e-01, 9.24516609e-01, 6.42789353e-01],
[9.74128656e-01, 9.18462098e-01, 6.17165576e-01],
[9.75592802e-01, 9.12236214e-01, 5.91248198e-01],
[9.77019794e-01, 9.05846120e-01, 5.65102920e-01],
[9.78409112e-01, 8.99298978e-01, 5.38795444e-01],
[9.79760238e-01, 8.92601951e-01, 5.12391470e-01],
[9.81072655e-01, 8.85762201e-01, 4.85956698e-01],
[9.82345843e-01, 8.78786890e-01, 4.59556830e-01],
[9.83579285e-01, 8.71683180e-01, 4.33257566e-01],
[9.84772461e-01, 8.64458234e-01, 4.07124607e-01],
[9.85924854e-01, 8.57119214e-01, 3.81223655e-01],
[9.87035944e-01, 8.49673283e-01, 3.55620409e-01],
[9.88105214e-01, 8.42127602e-01, 3.30380570e-01],
[9.89132146e-01, 8.34489335e-01, 3.05569840e-01],
[9.90116220e-01, 8.26765643e-01, 2.81253919e-01],
[9.91056918e-01, 8.18963688e-01, 2.57498508e-01],
[9.91953722e-01, 8.11090634e-01, 2.34369308e-01],
[9.92806113e-01, 8.03153642e-01, 2.11932020e-01],
[9.93613573e-01, 7.95159874e-01, 1.90252343e-01],
[9.94375584e-01, 7.87116493e-01, 1.69395980e-01],
[9.95091627e-01, 7.79030662e-01, 1.49428631e-01],
[9.95761184e-01, 7.70909542e-01, 1.30415997e-01],
[9.96383736e-01, 7.62760296e-01, 1.12423778e-01],
[9.96958765e-01, 7.54590086e-01, 9.55176752e-02],
[9.97485753e-01, 7.46406074e-01, 7.97633899e-02],
[9.97964180e-01, 7.38215424e-01, 6.52266225e-02],
[9.98393529e-01, 7.30025296e-01, 5.19730738e-02],
[9.98773282e-01, 7.21842854e-01, 4.00684447e-02],
[9.99102919e-01, 7.13675259e-01, 2.95784359e-02],
[9.99381923e-01, 7.05529675e-01, 2.05687483e-02],
[9.99609774e-01, 6.97413263e-01, 1.31050827e-02],
[9.99785955e-01, 6.89333185e-01, 7.25313979e-03],
[9.99909947e-01, 6.81296605e-01, 3.07862044e-03],
[9.99981232e-01, 6.73310683e-01, 6.47225458e-04],
[9.99973697e-01, 6.65372012e-01, 0.00000000e+00],
[9.98670259e-01, 6.56984091e-01, 0.00000000e+00],
[9.95455321e-01, 6.47895389e-01, 0.00000000e+00],
[9.90401699e-01, 6.38138096e-01, 0.00000000e+00],
[9.83582215e-01, 6.27744402e-01, 0.00000000e+00],
[9.75069687e-01, 6.16746500e-01, 0.00000000e+00],
[9.64936933e-01, 6.05176579e-01, 0.00000000e+00],
[9.53256774e-01, 5.93066831e-01, 0.00000000e+00],
[9.40102027e-01, 5.80449446e-01, 0.00000000e+00],
[9.25545513e-01, 5.67356615e-01, 0.00000000e+00],
[9.09660051e-01, 5.53820529e-01, 0.00000000e+00],
[8.92518458e-01, 5.39873379e-01, 0.00000000e+00],
[8.74193556e-01, 5.25547356e-01, 0.00000000e+00],
[8.54758162e-01, 5.10874650e-01, 0.00000000e+00],
[8.34285096e-01, 4.95887452e-01, 0.00000000e+00],
[8.12847176e-01, 4.80617953e-01, 0.00000000e+00],
[7.90517223e-01, 4.65098344e-01, 0.00000000e+00],
[7.67368055e-01, 4.49360816e-01, 0.00000000e+00],
[7.43472491e-01, 4.33437559e-01, 0.00000000e+00],
[7.18903350e-01, 4.17360764e-01, 0.00000000e+00],
[6.93733452e-01, 4.01162623e-01, 0.00000000e+00],
[6.68035615e-01, 3.84875326e-01, 0.00000000e+00],
[6.41882659e-01, 3.68531063e-01, 0.00000000e+00],
[6.15347402e-01, 3.52162026e-01, 0.00000000e+00],
[5.88502665e-01, 3.35800405e-01, 0.00000000e+00],
[5.61421265e-01, 3.19478392e-01, 0.00000000e+00],
[5.34176022e-01, 3.03228177e-01, 0.00000000e+00],
[5.06839756e-01, 2.87081950e-01, 0.00000000e+00],
[4.79485284e-01, 2.71071903e-01, 0.00000000e+00],
[4.52185427e-01, 2.55230227e-01, 0.00000000e+00],
[4.25013004e-01, 2.39589112e-01, 0.00000000e+00],
[3.98040832e-01, 2.24180749e-01, 0.00000000e+00],
[3.71341733e-01, 2.09037330e-01, 0.00000000e+00],
[3.44988524e-01, 1.94191044e-01, 0.00000000e+00],
[3.19054025e-01, 1.79674082e-01, 0.00000000e+00],
[2.93611055e-01, 1.65518636e-01, 0.00000000e+00],
[2.68732434e-01, 1.51756896e-01, 0.00000000e+00],
[2.44490979e-01, 1.38421054e-01, 0.00000000e+00],
[2.20959510e-01, 1.25543299e-01, 0.00000000e+00],
[1.98210847e-01, 1.13155823e-01, 0.00000000e+00],
[1.76317808e-01, 1.01290816e-01, 0.00000000e+00],
[1.55353213e-01, 8.99804696e-02, 0.00000000e+00],
[1.35389881e-01, 7.92569745e-02, 0.00000000e+00],
[1.16500630e-01, 6.91525213e-02, 0.00000000e+00],
[9.87582805e-02, 5.96993009e-02, 0.00000000e+00],
[8.22356507e-02, 5.09295041e-02, 0.00000000e+00],
[6.70055599e-02, 4.28753217e-02, 0.00000000e+00],
[5.31408274e-02, 3.55689446e-02, 0.00000000e+00],
[4.07142721e-02, 2.90425636e-02, 0.00000000e+00],
[2.97987132e-02, 2.33283694e-02, 0.00000000e+00],
[2.04669698e-02, 1.84585530e-02, 0.00000000e+00],
[1.27918610e-02, 1.44653050e-02, 0.00000000e+00],
[6.84620587e-03, 1.13808164e-02, 0.00000000e+00],
[2.70282357e-03, 9.23727801e-03, 0.00000000e+00],
[4.34533158e-04, 8.06688056e-03, 0.00000000e+00],
[0.00000000e+00, 7.84315297e-03, 5.39857630e-05],
[0.00000000e+00, 7.84411505e-03, 8.39057327e-04],
[0.00000000e+00, 7.84835664e-03, 2.53589902e-03],
[0.00000000e+00, 7.85832973e-03, 5.11501856e-03],
[0.00000000e+00, 7.87648628e-03, 8.54692366e-03],
[0.00000000e+00, 7.90527827e-03, 1.28021221e-02],
[0.00000000e+00, 7.94715768e-03, 1.78511215e-02],
[0.00000000e+00, 8.00457647e-03, 2.36644296e-02],
[0.00000000e+00, 8.07998662e-03, 3.02125542e-02],
[0.00000000e+00, 8.17584011e-03, 3.74660029e-02],
[0.00000000e+00, 8.29458892e-03, 4.53952835e-02],
[0.00000000e+00, 8.43868501e-03, 5.39709038e-02],
[0.00000000e+00, 8.61058036e-03, 6.31633713e-02],
[0.00000000e+00, 8.81272694e-03, 7.29431939e-02],
[0.00000000e+00, 9.04757673e-03, 8.32808793e-02],
[0.00000000e+00, 9.31758171e-03, 9.41469352e-02],
[0.00000000e+00, 9.62519385e-03, 1.05511869e-01],
[0.00000000e+00, 9.97286511e-03, 1.17346189e-01],
[0.00000000e+00, 1.03630475e-02, 1.29620403e-01],
[0.00000000e+00, 1.07981929e-02, 1.42305018e-01],
[0.00000000e+00, 1.12807535e-02, 1.55370542e-01],
[0.00000000e+00, 1.18131810e-02, 1.68787483e-01],
[0.00000000e+00, 1.23979275e-02, 1.82526348e-01],
[0.00000000e+00, 1.30374451e-02, 1.96557646e-01],
[0.00000000e+00, 1.37341856e-02, 2.10851884e-01],
[0.00000000e+00, 1.44906010e-02, 2.25379569e-01],
[0.00000000e+00, 1.53091433e-02, 2.40111210e-01],
[0.00000000e+00, 1.61922645e-02, 2.55017314e-01],
[0.00000000e+00, 1.71424165e-02, 2.70068388e-01],
[0.00000000e+00, 1.81620514e-02, 2.85234942e-01],
[0.00000000e+00, 1.92536210e-02, 3.00487481e-01],
[0.00000000e+00, 2.04195775e-02, 3.15796515e-01],
[0.00000000e+00, 2.16623727e-02, 3.31132550e-01],
[0.00000000e+00, 2.29844586e-02, 3.46466095e-01],
[0.00000000e+00, 2.43882872e-02, 3.61767657e-01],
[0.00000000e+00, 2.58763105e-02, 3.77007743e-01],
[0.00000000e+00, 2.74509804e-02, 3.92156863e-01]],
kinds='Lch',
grad_npts=3,
grad_funcs='x',
extent='repeat'
)
cmap_atoll = fscolors.Fractal_colormap(
colors=[[0.5372549 , 0.89411765, 0.80784314],
[0.55294118, 0.89019608, 0.8 ],
[0.56862745, 0.89019608, 0.8 ],
[0.57647059, 0.89019608, 0.8 ],
[0.59607843, 0.89411765, 0.80784314],
[0.60784314, 0.89803922, 0.80784314],
[0.62745098, 0.90196078, 0.80392157],
[0.58431373, 0.91764706, 0.81960784],
[0.17254902, 0.75686275, 0.72156863],
[0.10588235, 0.68627451, 0.72156863],
[0.11372549, 0.66666667, 0.70588235],
[0.1254902 , 0.63137255, 0.69411765],
[0.10196078, 0.57647059, 0.65882353],
[0.10980392, 0.58039216, 0.6745098 ],
[0.10980392, 0.57647059, 0.65882353],
[0.11764706, 0.56470588, 0.65882353],
[0.1254902 , 0.56078431, 0.66666667],
[0.12156863, 0.57647059, 0.68235294],
[0.13333333, 0.61176471, 0.70196078],
[0.10980392, 0.50588235, 0.6 ],
[0.10196078, 0.34901961, 0.48627451],
[0.12156863, 0.3254902 , 0.47058824],
[0.1254902 , 0.34117647, 0.48627451],
[0.10980392, 0.34509804, 0.47843137],
[0.10980392, 0.3372549 , 0.47058824],
[0.11372549, 0.3254902 , 0.46666667],
[0.10196078, 0.29803922, 0.44313725],
[0.10196078, 0.30980392, 0.45098039],
[0.11372549, 0.30588235, 0.44705882],
[0.12156863, 0.30980392, 0.4627451 ],
[0.12156863, 0.37254902, 0.51372549],
[0.09411765, 0.5254902 , 0.62745098],
[0.11764706, 0.58039216, 0.6745098 ],
[0.11764706, 0.58039216, 0.65882353],
[0.11764706, 0.57647059, 0.65490196],
[0.12156863, 0.58039216, 0.65882353],
[0.12941176, 0.58039216, 0.6627451 ],
[0.10196078, 0.55686275, 0.63921569],
[0.11764706, 0.50196078, 0.59607843],
[0.09803922, 0.4745098 , 0.57254902],
[0.10588235, 0.49411765, 0.58823529],
[0.08627451, 0.4 , 0.52156863],
[0.12156863, 0.48627451, 0.58823529],
[0.11764706, 0.49019608, 0.59607843],
[0.11372549, 0.45490196, 0.56078431],
[0.10588235, 0.42352941, 0.54509804],
[0.11764706, 0.53333333, 0.62745098],
[0.10196078, 0.56470588, 0.65098039],
[0.12156863, 0.55294118, 0.64705882],
[0.11372549, 0.54509804, 0.63529412],
[0.10980392, 0.53333333, 0.62352941],
[0.09803922, 0.54117647, 0.62352941],
[0.10980392, 0.55686275, 0.65098039],
[0.1254902 , 0.56078431, 0.66666667],
[0.11372549, 0.55294118, 0.64705882],
[0.11764706, 0.54901961, 0.64705882],
[0.10196078, 0.54901961, 0.64313725],
[0.10588235, 0.55294118, 0.64705882],
[0.11372549, 0.55294118, 0.64705882],
[0.09411765, 0.54901961, 0.63921569],
[0.10196078, 0.55686275, 0.64705882],
[0.11372549, 0.56078431, 0.65490196],
[0.09019608, 0.54509804, 0.63529412],
[0.09019608, 0.54509804, 0.63529412],
[0.09411765, 0.54509804, 0.64705882],
[0.09411765, 0.54117647, 0.64313725],
[0.10196078, 0.54901961, 0.65098039],
[0.09803922, 0.54901961, 0.63137255],
[0.10196078, 0.54901961, 0.64313725],
[0.10980392, 0.55686275, 0.65098039],
[0.10588235, 0.55294118, 0.65490196],
[0.09803922, 0.54509804, 0.63921569],
[0.10980392, 0.55686275, 0.65098039],
[0.10196078, 0.54901961, 0.64313725],
[0.13333333, 0.58431373, 0.68235294],
[0.09411765, 0.56078431, 0.65490196],
[0.10196078, 0.57254902, 0.66666667],
[0.10196078, 0.58039216, 0.67058824],
[0.09411765, 0.58431373, 0.6627451 ],
[0.10588235, 0.57647059, 0.67058824],
[0.10588235, 0.58431373, 0.6745098 ],
[0.12156863, 0.59215686, 0.68627451],
[0.11372549, 0.59215686, 0.68235294],
[0.1254902 , 0.59607843, 0.69019608],
[0.11764706, 0.59215686, 0.68627451],
[0.10196078, 0.59215686, 0.67058824],
| |
import numpy as np
from typing import List
from functools import lru_cache, reduce
from pyspark.sql import Window
from pyspark.sql.types import (
IntegerType,
LongType,
StructField,
StructType,
Row
)
from pyspark.sql.functions import col, year, month, dayofmonth, row_number, udf, desc, asc
from . import OpheliaUtilitiesException
from ._logger import OpheliaLogger
__all__ = [
'union_all',
'split_date',
'row_index',
'lag_min_max_data',
'regex_expr',
'remove_duplicate_element',
'year_array',
'dates_index',
'sorted_date_list',
'feature_pick',
'binary_search',
'century_from_year',
'simple_average',
'delta_series',
'simple_moving_average',
'average',
'weight_moving_average',
'single_exp_smooth',
'double_exp_smooth',
'initial_seasonal_components',
'triple_exp_smooth',
'row_indexing',
'string_match'
]
logger = OpheliaLogger()
def union_all(dfs: list):
"""
Union all helps to gather in one single spark DataFrame a list of multiple DF
:param dfs: list of dataframes
:return: union df
"""
try:
first = dfs[0]
union_dfs = first.sql_ctx._sc.union([df.cache().rdd for df in dfs])
return first.sql_ctx.createDataFrame(union_dfs, first.schema)
except Exception as e:
raise OpheliaUtilitiesException(f"An error occurred on union_all() method: {e}")
def split_date(df, col_date: str):
"""
Split date helps to divide date field into day, month and year by each column
:param df: spark DataFrame with date field to split
:param col_date: str, column name date
:return: spark DataFrame
"""
try:
dates_df = df.select(
'*', year(col_date).alias(f'{col_date}_year'),
month(col_date).alias(f'{col_date}_month'),
dayofmonth(col_date).alias(f'{col_date}_day')
)
logger.info("Split Date In Columns")
return dates_df
except Exception as e:
raise OpheliaUtilitiesException(f"An error occurred on split_date() method: {e}")
def row_index(df, col_order: str):
"""
Row index method will help to create a row index for a given spark DataFrame
:param df: data to analyze
:param col_order: column to order
:return: DataFrame
"""
try:
w = Window().orderBy(col(col_order).desc())
logger.info("Row Indexing In DataFrame")
return df.withColumn("row_num", row_number().over(w))
except Exception as e:
raise OpheliaUtilitiesException(f"An error occurred on row_index() method: {e}")
def lag_min_max_data(df, is_max=True, col_lag: str = "operation_date"):
"""
This is a placeholder for this method
:param df: data to analyze
:param is_max: indicates if it is max
:param col_lag: name of the column to lag
:return: DataFrame
"""
try:
col_values = df.select(col_lag).distinct().collect()
if is_max:
lag_date = max(col_values)[0]
else:
lag_date = min(col_values)[0]
lag_data = df.where(col(col_lag) < lag_date).select([col(c).alias(f'{c}_lag') for c in df.columns])
logger.info("Lag-Over Dates In DataFrame")
return lag_data
except Exception as e:
raise OpheliaUtilitiesException(f"An error occurred on lag_min_max_data() method: {e}")
def regex_expr(regex_name):
"""
Creates a regex expression for one or more regex
:param regex_name: str or list(str) regex character to find
:return: list
"""
try:
if isinstance(regex_name, list):
return [f".*{re}" for re in regex_name]
return [f".*{regex_name}"]
except ValueError as ve:
raise OpheliaUtilitiesException(f"An error occurred on regex_expr() method: {ve}")
def remove_duplicate_element(lst: list):
"""
Remove duplicate element in given array
:param lst: list of n elements with duplicates
:return: list
"""
try:
return list(dict.fromkeys(lst))
except ValueError as ve:
raise OpheliaUtilitiesException(f"An error occurred on remove_duplicate_element() method: {ve}")
def year_array(from_year, to_year):
"""
Gets every year number between a range, including the upper limit
:param from_year: start year number
:param to_year: end year number
:return: list
"""
try:
logger.info(f"Window Data From Year {from_year} To {to_year}")
return list(range(int(from_year), int(to_year) + 1))
except ValueError as ve:
raise OpheliaUtilitiesException(f"An error occurred on year_array() method: {ve}")
def dates_index(dates_list: list):
"""
Dates parser function, transform a list of dates in a dictionary
:param dates_list: sequence of date values
:return: callable function
"""
try:
if len(dates_list) == 0:
raise AssertionError("Empty Parameters Array")
dates_dict = {date: index for index, date in enumerate(dates_list)}
logger.info("Set Date Index")
return udf(lambda x: dates_dict[x], IntegerType())
except ValueError as ve:
raise OpheliaUtilitiesException(f"An error occurred on dates_index() method: {ve}")
@lru_cache(maxsize=60)
def sorted_date_list(df, col_collect: str):
"""
Builds a sorted list of every value for a date column in a given DataFrame
:param df: data to analyze
:param col_collect: column to analyze
:return: list
"""
try:
logger.info("Order Date List")
return sorted([x.operation_date for x in df.select(col_collect).distinct().collect()])
except Exception as e:
raise OpheliaUtilitiesException(f"An error occurred on sorted_date_list() method: {e}")
def feature_pick(df):
"""
Feature pick function helps to split variable names from spark DataFrame
into 'string', 'int', 'bigint', 'double', 'float', 'date' and 'other' type in separated list
:param df: spark DataFrame with fields to analyze
:return: dict
"""
try:
s, i, l, d, f, t, o = [], [], [], [], [], [], []
for k, v in df.dtypes:
s.append(k) if v in ['str', 'string'] else \
i.append(k) if v in ['int', 'integer'] else \
l.append(k) if v in ['bigint', 'long'] else \
d.append(k) if v in ['double'] else \
f.append(k) if v in ['float'] else \
t.append(k) if v in ['date', 'timestamp'] else \
o.append(k)
return {'string': s, 'int': i, 'long': l, 'double': d, 'float': f, 'date': t, 'other': o}
except ValueError as ve:
raise OpheliaUtilitiesException(f"An error occurred on feature_pick() method: {ve}")
def __binary_helper_search(array, target, left_p, right_p):
try:
if left_p > right_p:
raise AssertionError("None binary pointer")
mid_point = (left_p + right_p) // 2
potential_match = array[mid_point]
if target == potential_match:
return mid_point
elif target < potential_match:
return __binary_helper_search(array, target, left_p, mid_point - 1)
else:
return __binary_helper_search(array, target, mid_point + 1, right_p)
except ValueError as ve:
raise OpheliaUtilitiesException(f"An error occurred on __binary_helper_search() private method: {ve}")
def binary_search(array: list, target):
"""
Use a helper recursive binary search method for O(n*log(n)) search items
:param array: array of elements
:param target: number to search
:return: array index int
"""
logger.info("Binary Find")
return __binary_helper_search(array, target, 0, len(array) - 1)
def century_from_year(yr: int):
"""
Calculates the century from a given year
:param yr: int representing year
:return: century int
"""
try:
return (yr - 1) // 100 + 1
except ArithmeticError as ae:
raise OpheliaUtilitiesException(f"An error occurred on century_from_year() method: {ae}")
@lru_cache(maxsize=30)
def simple_average(series: List[float]):
"""
Compute the simple average from a given series
:param series: list of float observation series
:return: float
"""
try:
logger.info("Compute Simple Average")
return reduce(lambda a, b: a + b, series) / len(series)
except ArithmeticError as ae:
raise OpheliaUtilitiesException(f"An error occurred on simple_average() method: {ae}")
def delta_series(series: List[float]):
"""
Identify the delta variation from a given series
:param series: list of float observation series
:return: float
"""
try:
y, y_n = np.array(series), len(series)
y_hat = simple_average(series)
return float(2.048 * np.sqrt((1 / (y_n - 2)) * (sum((y - y_hat)**2) / np.var(y))))
except ArithmeticError as ae:
raise OpheliaUtilitiesException(f"An error occurred on delta_series() method: {ae}")
@lru_cache(maxsize=30)
def simple_moving_average(series: List[float], n_moving_day: int):
"""
Compute the simple moving average (SMA) from a given series
:param series: array of float observation series
:param n_moving_day: int of n moving observations
:return: float
"""
try:
logger.info("SMA")
return simple_average(series=series[-n_moving_day:])
except ArithmeticError as ae:
raise OpheliaUtilitiesException(f"An error occurred on simple_moving_average() method: {ae}")
def average(series: List[float], n_moving_day=None):
"""
Wrapper for both average function type, simple average and SMA
:param series: array of float observation series
:param n_moving_day: int of n moving observations
:return: float
"""
try:
if n_moving_day is None:
return simple_average(series=series)
return simple_moving_average(series=series, n_moving_day=n_moving_day)
except ArithmeticError as ae:
raise OpheliaUtilitiesException(f"An error occurred on average() method: {ae}")
@lru_cache(maxsize=30)
def weight_moving_average(series: List[float], weights: List[float]):
"""
Compute weight moving average (WMA) from a given series
:param series: array of float observation series
:param weights: list of weights that must add up to 1, e.g. [0.1,0.2,0.3,0.4] = 1
:return: float
"""
try:
if sum(weights) != 1:
raise AssertionError("Invalid list, sum of weights must be equal to 1")
result = 0.0
weights.reverse()
for n in range(len(weights)):
result += series[-n - 1] * weights[n]
return result
except ArithmeticError as ae:
raise OpheliaUtilitiesException(f"An error occurred on weight_moving_average() method: {ae}")
@lru_cache(maxsize=30)
def single_exp_smooth(series: List[float], alpha: float = 0.05):
"""
Compute single exponential smooth series with alpha data smooth coefficient
:param series: array of float observation series
:param alpha: float alpha smooth 0.05 set as default, other options could be: 0.5, 0.005, 0.0005
:return: Python dict with series decomposition components
"""
try:
result = [series[0]]
for n in range(1, len(series)):
result.append(alpha * series[n] + (1 - alpha) * result[n-1])
return {'single_exp_smooth': result, 'delta': delta_series(result)}
except ArithmeticError as ae:
raise OpheliaUtilitiesException(f"An error occurred on single_exp_smooth() method: {ae}")
@lru_cache(maxsize=30)
def double_exp_smooth(series: List[float], alpha: float = 0.05, beta: float = 0.005):
"""
Compute double exponential smooth series with alpha data smooth and beta trend smooth coefficients
:param series: array of float observation series
:param alpha: float alpha data smooth factor 0.05 set as default, other options: 0.5, 0.005, 0.0005
:param beta: float beta trend smooth factor 0.005 set as default
:return: Python dict with series decomposition components
"""
try:
result = [series[0]]
level, | |
in kernel:
ii = (i + k) % len(self.cells)
c.add_neighbor(self.cells[ii])
def get_vertices(self, z):
o = []
for i, v in enumerate(self.cells):
o.append([v.loc.x, v.loc.y, v.loc.z])
self.next()
return o
def grow(self):
for v in self.cells:
v.grow()
for v in self.cells:
v.update()
class Tip():
def __init__(self, branch, loc, dir=(0.0, 0.0, 1.0), speed=0.3, hormones=[], data={}, bifurcation=(4, 3, 0.5, 0.618, 0.4, 0.8, 0, 0, 10), cell_res=8, start_at_0=True, start_radius=(0.01, 0.01), cell_growth=None, dna=None):
br = dna.get("branch")
# Initial configuration (can be changed by factors that affect the tip)
self.parent = branch
self.direction = mathutils.Vector(dir)
self.rq = self.update_q()
self.bifurc_period = br[0].copy() # How many growth steps happen between bifurcations/branching
self.bifurcations = br[1].copy() # How many new tips grow out of this
self.biphase_offset = br[2].copy() # The radial offset angle (0.0-1.0) of successive bifurcations
self.bifurc_sr = br[3].copy() # Initial speed ratio for bifurcated child tips
self.bifurc_inclination = br[4].copy() # Inclination of child tips
self.bifurc_radius_ratio = br[5].copy() # The ratio of
self.bifurc_stop = br[6].copy() # When to stop bifurcations on this branch
self.stop_age = br[7].copy() # if set > 0, it will make the branch stop growing after this age
self.max_generation = br[8] # Number of bifurcation generations allowed in entire organism
self.speed = Param(speed) if dna is None else br[9].copy()
self.speed_decay = Param(0.98) if dna is None else br[10].copy() #, vmin=0.818, vmax=1.16, func=p_random) # ratio of decay of growth speed per growth
self.photolocate_ratio = Param(0.04) if dna is None else br[11].copy()
self.geolocate_ratio = Param(0.02) if dna is None else br[12].copy()
self.branch_growth_rate = Param(1.0) if dna is None else br[13].copy() #if it uses .copy like the rest, it gets "normal" behavior, but if not, uses linked behavior
self.data = data
self.hormones = hormones # hormones are dropped as a total of what is available
# Slices and surface cells
self.start_radius = start_radius
self.cell_growth_rate = cell_growth if dna is None else dna.get("cell")[0]
self.slice_growth_rate = Param(2.0) if dna is None else dna.get("slice")[4]
self.branch = []
self.cell_res = cell_res if dna is None else dna.get("cell")[1]
self.cur_slice = None
# Working parameters (counters, history, etc)
self.dna = dna
self.cache_vertex = None
self.loc = mathutils.Vector(loc)
self.last_loc = self.loc
self.phase = 0.0
self.generation = 0
self.age = 0
self.bifurc_count = 0
# direction of gravity and direction of light are unit vectors that point toward gravity and toward the brightest light
self.light_axis = mathutils.Vector((0.5, 0.5, 1.0))
self.gravity_axis = mathutils.Vector((0.0, 0.0, -1.0))
if start_at_0:
self.start()
# auxins specifically in tip growth are
# What makes plants grow? these hormones.
# bending happens because light hits one side of the stem, spending the auxins
# slowing down growth on that side!!!! intense, causing the shoots and leaves to turn toward the light
# As this tip grows it needs to drop auxins to the cells it's dropping
# The cells it's dropping are
# in roots, auxins cause less growth, causing them to bend away from the light
# Each cell dropped has same amount of auxins
# when the dropped cells receive light, they can spend their auxins
# literally each cell needs to raytrace to a light source
# upon receiving the light, it slowly depreciates the amount of auxins it has
# other cells can pass it auxins if the function that determines auxin transfer allows
# how will curvature of final outward mesh handle things like where branch nodes meet their parents?
# since the tip only drops a growing surface cell, it will be included as a neighbor to the cells dropped by the tip's parent branch
def start(self):
self.new_slice()
def new_slice(self):
# (1.0, 0.5, 10.0)
self.cur_slice = self.branch.append(Slice(
self.cell_res.next(),
start_radius = self.start_radius,
center = self.loc,
normal = self.direction,
rate_growth_radial = self.cell_growth_rate,
mult_growth_radial = self.slice_growth_rate.next() * self.branch_growth_rate.value,
dna = self.dna
))
def update_q(self):
rq = rot_q(self.direction)
return rq
def can_grow(self):
return self.stop_age.value == 0 or self.age < self.stop_age.value - 1
def can_bifurcate(self):
counter = self.bifurc_stop.value == 0 or self.bifurc_count < self.bifurc_stop.value
gen = self.max_generation.value == 0 or self.generation < self.max_generation.value
return counter and gen
# Actions
def photolocate(self):
if not self.can_grow():
return False
#negage = 1.0 if self.age == 0 else 1.0 / self.age
#self.direction = self.direction + (self.light_axis * strength * negage)
self.direction = self.direction.lerp(self.light_axis, self.photolocate_ratio.next())
return True
def geolocate(self):
if not self.can_grow():
return False
#negage = 1.0 if self.age == 0 else 1.0 / self.age
#self.direction = self.direction + (self.gravity_axis * strength * negage)
self.direction = self.direction.lerp(-self.gravity_axis, self.geolocate_ratio.next())
return True
def grow(self):
# Replace with NN
# Always grow branch first
for slice in self.branch:
slice.grow()
if self.can_grow():
# cheating without using hormones to control direction
self.photolocate()
self.geolocate()
self.last_loc = self.loc
self.loc = self.loc + (self.direction * self.speed.next())
self.rq = self.update_q()
# Lay down slice
self.new_slice()
#self.cur_slice = self.branch.append(Slice(self.cell_res, start_radius=self.start_radius, center=self.loc, normal=self.direction, proto_cell=self.proto_cell))
#self.speed *= self.speed_decay.next()
self.age += 1
return self.bifurcate()
def bifurcate(self):
if self.age % self.bifurc_period.value == 0 and self.can_bifurcate() and self.can_grow():
vects = self.bifurcate_dir()
self.bifurc_count += 1
self.bifurc_period.next()
self.bifurcations.next()
self.biphase_offset.next()
self.bifurc_sr.next()
self.bifurc_inclination.next()
self.bifurc_radius_ratio.next()
self.bifurc_stop.next()
self.stop_age.next()
self.max_generation.next()
self.speed_decay.next()
self.photolocate_ratio.next()
self.geolocate_ratio.next()
return vects
else:
return None
# Util
def bifurcate_dir(self):
# returns a list of directions for new tips to grow in
# use direction of growth's normal for lat
v1 = self.direction
# use number of bifurcations as longitudinal slice width for branch direction
mp2 = math.pi * 2
r = mp2 / self.bifurcations.value
p = mp2 * self.phase
#print(self.bifurcations, r, p, self.phase)
o = []
for i in range(0, self.bifurcations.value):
x = math.sin(r * i + p)
y = math.cos(r * i + p)
v2 = mathutils.Vector((x, y, 0.0))
# Rotate v2 so that v1 is it's (x,y) plane's normal
# ie: make v2 orthogonal to v1 (the direction of growth for this tip)
v2.rotate(self.rq) # = self.normal_transpose(v2, v1)
v2 = v2.lerp(v1, self.bifurc_inclination.value)
o.append(v2 + self.loc)
#print(o)
self.phase += self.biphase_offset.value
return o
class Shoot(Tip):
def __init__(self, branch, loc, dir=(0.0, 0.0, 1.0), speed=0.45, hormones=[], data={}, bifurcation=(4, 2, 0.33, 0.618, 0.4, 0.8, 0, 0, 10), cell_res=8, start_at_0=True, start_radius=(0.01, 0.01), cell_growth=None, dna=None):
super().__init__(branch, loc, dir=dir, speed=speed, hormones=hormones, data=data, bifurcation=bifurcation, cell_res=cell_res, start_at_0=start_at_0, start_radius=start_radius, cell_growth=cell_growth, dna=dna)
# Shoots are positively phototropic (towards the light), negatively geotropic (away from gravity)
# Shoots react to certain hormones in different ways (auxins are what cause the above)
# ie: in the cells dropped, the auxins accumulate on a shaded side
# the cells will be able to share auxins with it's neighbors
# depending on the calculated light the cell is receiving
# as the auxins will accumulate mostly in the side that is
# a) in the shade
# b) in the direction of gravity
# causing the negative effect because auxins generate growth
# causing the cells to grow faster in the growth direction
class Root(Tip):
def __init__(self, branch, loc, dir=(0.0, 0.0, 1.0), speed=0.3, hormones=[], data={}, bifurcation=(4, 3, 0.5, 0.618, 0.4, 0.8, 0, 0, 10), cell_res=8, start_at_0=True, start_radius=(0.01, 0.01), cell_growth=None, dna=None):
super().__init__(branch, loc, dir=dir, speed=speed, hormones=hormones, data=data, bifurcation=bifurcation, cell_res=cell_res, start_at_0=start_at_0, start_radius=start_radius, cell_growth=cell_growth, dna=dna)
# Roots are negatively phototropic and positively geotropic
# Hormonal system
class Hormone():
# Known as phytofaormones or plant growth substances
# Control morphological, physiological and biochemical responses at very low concentrations
# Hormones act locally as they are disolved usually through the cells in the shaft backward
AUXINS = 1010
GIBBERELLINS = 1011
CYTOKININS = 1012
ETHYLENE = 1013
ABSCISIC = 1014 #abscisic acid
BRASSINO = 1015 #brassinosteroids
OLIGO = 1016 #oligosaccharides
POLYAMINES = 1017
def __init__(self, type, volume, makeup=None):
self.type = type
self.volume = volume
self.makeup = makeup
# Uses some volume of the hormone
def use(self, volume):
o = self.volume
v = self.volume - volume
if v < 0:
self.volume -= o
return o
else:
self.volume -= volume
return volume
class Auxin(Hormone):
| |
#!/usr/bin/env python
# Author: <NAME> [dkovar <at> gmail [dot] com]
# Name: analyzeMFT.py
#
# Copyright (c) 2010 <NAME>. All rights reserved.
# This software is distributed under the Common Public License 1.0
#
# Date: May 2013
#
import sys
import struct
import mftutils
import binascii
from optparse import OptionParser
import ctypes
import bitparse
def parse_record(raw_record, isDebug):
record = {}
record['filename'] = ''
record['notes'] = ''
record['ads'] = 0
record['datacnt'] = 0
decodeMFTHeader(record, raw_record)
record_number = record['recordnum']
if isDebug:
print '-->Record number: %d\n\tMagic: %s Attribute offset: %d Flags: %s Size:%d' % (
record_number, record['magic'],
record['attr_off'], hex(int(record['flags'])), record['size'])
if record['magic'] == 0x44414142:
if isDebug:
print "BAAD MFT Record"
record['baad'] = True
return record
if record['magic'] != 0x454c4946:
if isDebug:
print "Corrupt MFT Record"
record['corrupt'] = True
return record
read_ptr = record['attr_off']
# How should we preserve the multiple attributes? Do we need to preserve them all?
while (read_ptr < 1024):
ATRrecord = decodeATRHeader(raw_record[read_ptr:])
if ATRrecord['type'] == 0xffffffff: # End of attributes
break
if ATRrecord['nlen'] > 0:
bytes = raw_record[
read_ptr + ATRrecord['name_off']:read_ptr + ATRrecord['name_off'] + ATRrecord['nlen'] * 2]
ATRrecord['name'] = bytes.decode('utf-16').encode('utf-8')
else:
ATRrecord['name'] = ''
if isDebug:
print "Attribute type: %x Length: %d Res: %x" % (ATRrecord['type'], ATRrecord['len'], ATRrecord['res'])
if ATRrecord['type'] == 0x10: # Standard Information
if isDebug:
print "Stardard Information:\n++Type: %s Length: %d Resident: %s Name Len:%d Name Offset: %d" % \
(hex(int(ATRrecord['type'])), ATRrecord['len'], ATRrecord['res'], ATRrecord['nlen'],
ATRrecord['name_off'])
SIrecord = decodeSIAttribute(raw_record[read_ptr + ATRrecord['soff']:], isDebug)
record['si'] = SIrecord
if isDebug:
print "++CRTime: %s\n++MTime: %s\n++ATime: %s\n++EntryTime: %s" % \
(SIrecord['crtime'].dtstr, SIrecord['mtime'].dtstr, SIrecord['atime'].dtstr,
SIrecord['ctime'].dtstr)
elif ATRrecord['type'] == 0x20: # Attribute list
if isDebug:
print "Attribute list"
if ATRrecord['res'] == 0:
ALrecord = decodeAttributeList(raw_record[read_ptr + ATRrecord['soff']:], record)
record['al'] = ALrecord
if isDebug:
print "Name: %s" % (ALrecord['name'])
else:
if isDebug:
print "Non-resident Attribute List?"
record['al'] = None
elif ATRrecord['type'] == 0x30: # File name
if isDebug: print "File name record"
FNrecord = decodeFNAttribute(raw_record[read_ptr + ATRrecord['soff']:], isDebug, record)
record['fn', record['fncnt']] = FNrecord
if isDebug: print "Name: %s (%d)" % (FNrecord['name'], record['fncnt'])
record['fncnt'] = record['fncnt'] + 1
if FNrecord['crtime'] != 0:
if isDebug: print "\tCRTime: %s MTime: %s ATime: %s EntryTime: %s" % (FNrecord['crtime'].dtstr,
FNrecord['mtime'].dtstr,
FNrecord['atime'].dtstr,
FNrecord['ctime'].dtstr)
elif ATRrecord['type'] == 0x40: # Object ID
ObjectIDRecord = decodeObjectID(raw_record[read_ptr + ATRrecord['soff']:])
record['objid'] = ObjectIDRecord
if isDebug: print "Object ID"
elif ATRrecord['type'] == 0x50: # Security descriptor
record['sd'] = True
if isDebug: print "Security descriptor"
elif ATRrecord['type'] == 0x60: # Volume name
record['volname'] = True
if isDebug: print "Volume name"
elif ATRrecord['type'] == 0x70: # Volume information
if isDebug: print "Volume info attribute"
VolumeInfoRecord = decodeVolumeInfo(raw_record[read_ptr + ATRrecord['soff']:], isDebug)
record['volinfo'] = VolumeInfoRecord
elif ATRrecord['type'] == 0x80: # Data
if ATRrecord['name'] != '':
record['data_name', record['ads']] = ATRrecord['name']
record['ads'] = record['ads'] + 1
if ATRrecord['res'] == 0:
DataAttribute = decodeDataAttribute(raw_record[read_ptr + ATRrecord['soff']:], ATRrecord)
else:
DataAttribute = {}
DataAttribute['ndataruns'] = ATRrecord['ndataruns']
DataAttribute['dataruns'] = ATRrecord['dataruns']
DataAttribute['drunerror'] = ATRrecord['drunerror']
record['data', record['datacnt']] = DataAttribute
record['datacnt'] = record['datacnt'] + 1
if isDebug: print "Data attribute"
elif ATRrecord['type'] == 0x90: # Index root
record['indexroot'] = True
if isDebug: print "Index root"
elif ATRrecord['type'] == 0xA0: # Index allocation
record['indexallocation'] = True
if isDebug: print "Index allocation"
elif ATRrecord['type'] == 0xB0: # Bitmap
record['bitmap'] = True
if isDebug: print "Bitmap"
elif ATRrecord['type'] == 0xC0: # Reparse point
record['reparsepoint'] = True
if isDebug: print "Reparse point"
elif ATRrecord['type'] == 0xD0: # EA Information
record['eainfo'] = True
if isDebug: print "EA Information"
elif ATRrecord['type'] == 0xE0: # EA
record['ea'] = True
if isDebug: print "EA"
elif ATRrecord['type'] == 0xF0: # Property set
record['propertyset'] = True
if isDebug: print "Property set"
elif ATRrecord['type'] == 0x100: # Logged utility stream
record['loggedutility'] = True
if isDebug: print "Logged utility stream"
else:
if isDebug: print "Found an unknown attribute"
if ATRrecord['len'] > 0:
read_ptr = read_ptr + ATRrecord['len']
else:
if isDebug: print "ATRrecord->len < 0, exiting loop"
break
return record
def mft_to_csv(record, ret_header):
'Return a MFT record in CSV format'
mftBuffer = ''
tmpBuffer = ''
filenameBuffer = ''
if ret_header:
# Write headers
csv_string = ['Record Number', 'Good', 'Active', 'Record type',
# '$Logfile Seq. Num.',
'Sequence Number', 'Parent File Rec. #', 'Parent File Rec. Seq. #',
'Filename #1', 'Std Info Creation date', 'Std Info Modification date',
'Std Info Access date', 'Std Info Entry date', 'FN Info Creation date',
'FN Info Modification date', 'FN Info Access date', 'FN Info Entry date',
'Object ID', 'Birth Volume ID', 'Birth Object ID', 'Birth Domain ID',
'Filename #2', 'FN Info Creation date', 'FN Info Modify date',
'FN Info Access date', 'FN Info Entry date', 'Filename #3', 'FN Info Creation date',
'FN Info Modify date', 'FN Info Access date', 'FN Info Entry date', 'Filename #4',
'FN Info Creation date', 'FN Info Modify date', 'FN Info Access date',
'FN Info Entry date', 'Standard Information', 'Attribute List', 'Filename',
'Object ID', 'Volume Name', 'Volume Info', 'Data', 'Index Root',
'Index Allocation', 'Bitmap', 'Reparse Point', 'EA Information', 'EA',
'Property Set', 'Logged Utility Stream', 'Log/Notes', 'STF FN Shift', 'uSec Zero', 'ADS']
return csv_string
if 'baad' in record:
csv_string = ["%s" % record['recordnum'], "BAAD MFT Record"]
return csv_string
csv_string = [record['recordnum'], decodeMFTmagic(record), decodeMFTisactive(record),
decodeMFTrecordtype(record)]
if 'corrupt' in record:
tmp_string = ["%s" % record['recordnum'], "Corrupt", "Corrupt", "Corrupt MFT Record"]
csv_string.extend(tmp_string)
return csv_string
# tmp_string = ["%d" % record['lsn']]
# csv_string.extend(tmp_string)
tmp_string = ["%d" % record['seq']]
csv_string.extend(tmp_string)
if record['fncnt'] > 0:
csv_string.extend([str(record['fn', 0]['par_ref']), str(record['fn', 0]['par_seq'])])
else:
csv_string.extend(['NoParent', 'NoParent'])
if record['fncnt'] > 0 and 'si' in record:
# filenameBuffer = [FNrecord['name'], str(record['si']['crtime'].dtstr),
filenameBuffer = [record['filename'], str(record['si']['crtime'].dtstr),
record['si']['mtime'].dtstr, record['si']['atime'].dtstr, record['si']['ctime'].dtstr,
record['fn', 0]['crtime'].dtstr, record['fn', 0]['mtime'].dtstr,
record['fn', 0]['atime'].dtstr, record['fn', 0]['ctime'].dtstr]
elif 'si' in record:
filenameBuffer = ['NoFNRecord', str(record['si']['crtime'].dtstr),
record['si']['mtime'].dtstr, record['si']['atime'].dtstr, record['si']['ctime'].dtstr,
'NoFNRecord', 'NoFNRecord', 'NoFNRecord', 'NoFNRecord']
else:
filenameBuffer = ['NoFNRecord', 'NoSIRecord', 'NoSIRecord', 'NoSIRecord', 'NoSIRecord',
'NoFNRecord', 'NoFNRecord', 'NoFNRecord', 'NoFNRecord']
csv_string.extend(filenameBuffer)
if 'objid' in record:
# objidBuffer = [record['objid']['objid'].objstr, record['objid']['orig_volid'].objstr,
# record['objid']['orig_objid'].objstr, record['objid']['orig_domid'].objstr]
objidBuffer = [record['objid']['objid'], record['objid']['orig_volid'],
record['objid']['orig_objid'], record['objid']['orig_domid']]
else:
objidBuffer = ['', '', '', '']
csv_string.extend(objidBuffer)
# If this goes above four FN attributes, the number of columns will exceed the headers
for i in range(1, record['fncnt']):
filenameBuffer = [record['fn', i]['name'], record['fn', i]['crtime'].dtstr, record['fn', i]['mtime'].dtstr,
record['fn', i]['atime'].dtstr, record['fn', i]['ctime'].dtstr]
csv_string.extend(filenameBuffer)
filenameBuffer = ''
# Pad out the remaining FN columns
if record['fncnt'] < 2:
tmp_string = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
elif record['fncnt'] == 2:
tmp_string = ['', '', '', '', '', '', '', '', '', '']
elif record['fncnt'] == 3:
tmp_string = ['', '', '', '', '']
csv_string.extend(tmp_string)
# One darned big if statement, alas.
csv_string.append('True') if 'si' in record else csv_string.append('False')
csv_string.append('True') if 'al' in record else csv_string.append('False')
csv_string.append('True') if record['fncnt'] > 0 else csv_string.append('False')
csv_string.append('True') if 'objid' in record else csv_string.append('False')
csv_string.append('True') if 'volname' in record else csv_string.append('False')
csv_string.append('True') if 'volinfo' in record else csv_string.append('False')
csv_string.append('True') if 'data' in record else csv_string.append('False')
csv_string.append('True') if 'indexroot' in record else csv_string.append('False')
csv_string.append('True') if 'indexallocation' in record else csv_string.append('False')
csv_string.append('True') if 'bitmap' in record else csv_string.append('False')
csv_string.append('True') if 'reparse' in record else csv_string.append('False')
csv_string.append('True') if 'eainfo' in record else csv_string.append('False')
csv_string.append('True') if 'ea' in record else csv_string.append('False')
csv_string.append('True') if 'propertyset' in record else csv_string.append('False')
csv_string.append('True') if 'loggedutility' in record else csv_string.append('False')
if 'notes' in record: # Log of abnormal activity related to this record
csv_string.append(record['notes'])
else:
csv_string.append('None')
record['notes'] = ''
if 'stf-fn-shift' in record:
csv_string.append('Y')
else:
csv_string.append('N')
if 'usec-zero' in record:
csv_string.append('Y')
else:
csv_string.append('N')
if record['ads'] > 0:
csv_string.append('Y')
else:
csv_string.append('N')
return csv_string
# MD5|name|inode|mode_as_string|UID|GID|size|atime|mtime|ctime|crtime
def mft_to_body(record, full, std):
' Return a MFT record in bodyfile format'
# Add option to use STD_INFO
if record['fncnt'] > 0:
if full == True: # Use full path
name = record['filename']
else:
name = record['fn', 0]['name']
if std == True: # Use STD_INFO
rec_bodyfile = ("%s|%s|%s|%s|%s|%s|%s|%d|%d|%d|%d\n" %
('0', name, '0', '0', '0', '0',
int(record['fn', 0]['real_fsize']),
int(record['si']['atime'].unixtime), # was str ....
int(record['si']['mtime'].unixtime),
int(record['si']['ctime'].unixtime),
int(record['si']['ctime'].unixtime)))
else: # Use FN
rec_bodyfile = ("%s|%s|%s|%s|%s|%s|%s|%d|%d|%d|%d\n" %
('0', name, '0', '0', '0', '0',
int(record['fn', 0]['real_fsize']),
int(record['fn', 0]['atime'].unixtime),
int(record['fn', 0]['mtime'].unixtime),
int(record['fn', 0]['ctime'].unixtime),
int(record['fn', 0]['crtime'].unixtime)))
else:
if 'si' in record:
rec_bodyfile = ("%s|%s|%s|%s|%s|%s|%s|%d|%d|%d|%d\n" %
('0', 'No | |
<gh_stars>0
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import division
import numbers
import warnings
import numpy as np
from scipy import sparse
from .. import get_config as _get_config
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.fixes import _argmax, _object_dtype_isnan
from ..utils.validation import check_is_fitted
from .base import _transform_selected
from .label import _encode, _encode_check_unknown
range = six.moves.range
__all__ = [
'OneHotEncoder',
'OrdinalEncoder'
]
class _BaseEncoder(BaseEstimator, TransformerMixin):
"""
Base class for encoders that includes the code to categorize and
transform the input features.
"""
def _check_X(self, X):
"""
Perform custom check_array:
- convert list of strings to object dtype
- check for missing values for object dtype data (check_array does
not do that)
"""
X_temp = check_array(X, dtype=None)
if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):
X = check_array(X, dtype=np.object)
else:
X = X_temp
if X.dtype == np.dtype('object'):
if not _get_config()['assume_finite']:
if _object_dtype_isnan(X).any():
raise ValueError("Input contains NaN")
return X
def _fit(self, X, handle_unknown='error'):
X = self._check_X(X)
n_samples, n_features = X.shape
if self._categories != 'auto':
if X.dtype != object:
for cats in self._categories:
if not np.all(np.sort(cats) == np.array(cats)):
raise ValueError("Unsorted categories are not "
"supported for numerical categories")
if len(self._categories) != n_features:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.categories_ = []
for i in range(n_features):
Xi = X[:, i]
if self._categories == 'auto':
cats = _encode(Xi)
else:
cats = np.array(self._categories[i], dtype=X.dtype)
if handle_unknown == 'error':
diff = _encode_check_unknown(Xi, cats)
if diff:
msg = ("Found unknown categories {0} in column {1}"
" during fit".format(diff, i))
raise ValueError(msg)
self.categories_.append(cats)
def _transform(self, X, handle_unknown='error'):
X = self._check_X(X)
_, n_features = X.shape
X_int = np.zeros_like(X, dtype=np.int)
X_mask = np.ones_like(X, dtype=np.bool)
for i in range(n_features):
Xi = X[:, i]
diff, valid_mask = _encode_check_unknown(Xi, self.categories_[i],
return_mask=True)
if not np.all(valid_mask):
if handle_unknown == 'error':
msg = ("Found unknown categories {0} in column {1}"
" during transform".format(diff, i))
raise ValueError(msg)
else:
# Set the problematic rows to an acceptable value and
# continue `The rows are marked `X_mask` and will be
# removed later.
X_mask[:, i] = valid_mask
Xi = Xi.copy()
Xi[~valid_mask] = self.categories_[i][0]
_, encoded = _encode(Xi, self.categories_[i], encode=True)
X_int[:, i] = encoded
return X_int, X_mask
class OneHotEncoder(_BaseEncoder):
"""Encode categorical integer features as a one-hot numeric array.
The input to this transformer should be an array-like of integers or
strings, denoting the values taken on by categorical (discrete) features.
The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')
encoding scheme. This creates a binary column for each category and
returns a sparse matrix or dense array.
By default, the encoder derives the categories based on the unique values
in each feature. Alternatively, you can also specify the `categories`
manually.
The OneHotEncoder previously assumed that the input features take on
values in the range [0, max(values)). This behaviour is deprecated.
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
categories : 'auto' or a list of lists/arrays of values, default='auto'.
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories should not mix strings and numeric
values within a single feature, and should be sorted in case of
numeric values.
The used categories can be found in the ``categories_`` attribute.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
dtype : number type, default=np.float
Desired dtype of output.
handle_unknown : 'error' or 'ignore', default='error'.
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros. In the inverse transform, an unknown category
will be denoted as None.
n_values : 'auto', int or array of ints, default='auto'
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
.. deprecated:: 0.20
The `n_values` keyword was deprecated in version 0.20 and will
be removed in 0.22. Use `categories` instead.
categorical_features : 'all' or array of indices or mask, default='all'
Specify what features are treated as categorical.
- 'all': All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
.. deprecated:: 0.20
The `categorical_features` keyword was deprecated in version
0.20 and will be removed in 0.22.
You can use the ``ColumnTransformer`` instead.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting
(in order of the features in X and corresponding with the output
of ``transform``).
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
.. deprecated:: 0.20
The ``active_features_`` attribute was deprecated in version
0.20 and will be removed in 0.22.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by ``active_features_`` afterwards)
.. deprecated:: 0.20
The ``feature_indices_`` attribute was deprecated in version
0.20 and will be removed in 0.22.
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
.. deprecated:: 0.20
The ``n_values_`` attribute was deprecated in version
0.20 and will be removed in 0.22.
Examples
--------
Given a dataset with two features, we let the encoder find the unique
values per feature and transform the data to a binary one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder(handle_unknown='ignore')
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
>>> enc.fit(X)
... # doctest: +ELLIPSIS
OneHotEncoder(categorical_features=None, categories=None,
dtype=<... 'numpy.float64'>, handle_unknown='ignore',
n_values=None, sparse=True)
>>> enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> enc.transform([['Female', 1], ['Male', 4]]).toarray()
array([[1., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.]])
>>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]])
array([['Male', 1],
[None, 2]], dtype=object)
>>> enc.get_feature_names()
array(['x0_Female', 'x0_Male', 'x1_1', 'x1_2', 'x1_3'], dtype=object)
See also
--------
sklearn.preprocessing.OrdinalEncoder : performs an ordinal (integer)
encoding of the categorical features.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
"""
def __init__(self, n_values=None, categorical_features=None,
categories=None, sparse=True, dtype=np.float64,
handle_unknown='error'):
self.categories = categories
self.sparse = sparse
self.dtype = dtype
self.handle_unknown = handle_unknown
self.n_values = n_values
self.categorical_features = categorical_features
# Deprecated attributes
@property
@deprecated("The ``active_features_`` attribute was deprecated in version "
"0.20 and will be removed 0.22.")
def active_features_(self):
check_is_fitted(self, 'categories_')
return self._active_features_
@property
@deprecated("The ``feature_indices_`` attribute was deprecated in version "
"0.20 and will be removed 0.22.")
def feature_indices_(self):
check_is_fitted(self, 'categories_')
return self._feature_indices_
@property
@deprecated("The ``n_values_`` attribute was deprecated in version "
"0.20 and will be removed 0.22.")
def n_values_(self):
check_is_fitted(self, 'categories_')
return self._n_values_
def _handle_deprecations(self, X):
# internal version of the attributes to handle deprecations
self._categories = getattr(self, '_categories', None)
self._categorical_features = getattr(self, '_categorical_features',
None)
# user manually set the categories or second fit -> never legacy mode
if self.categories is not None or self._categories is not None:
self._legacy_mode = False
if self.categories | |
upstream for all Jobs currently queued in export_dict
upstream_jobs = []
for export_job in self.export_dict['jobs']:
# get lineage
# TODO: replace with new upstream lineage method that's coming
lineage = export_job.get_lineage()
# loop through nodes and add to set
for lineage_job_dict in lineage['nodes']:
upstream_jobs.append(Job.objects.get(pk=int(lineage_job_dict['id'])))
# update set with upstream
self.export_dict['jobs'].update(upstream_jobs)
# topographically sort all queued export jobs, and write to manifest
self.export_dict['jobs'] = Job._topographic_sort_jobs(self.export_dict['jobs'])
self.export_manifest['jobs'] = [job.id for job in self.export_dict['jobs']]
def _collect_related_components(self):
'''
Method to collect related components based on self.export_dict['jobs'],
and items from self.
All operate over self.export_dict['jobs'], updating other sections of
self.export_dict
TODO:
- these would benefit for more error and existence checking
- even if dependencies are not found, exports should continue
'''
###################################
# ORGANIZATIONS and RECORD GROUPS
###################################
# extend self.export_dict['orgs'] and self.export_dict['record_groups']
for job in self.export_dict['jobs']:
self.export_dict['orgs'].add(job.record_group.organization)
self.export_dict['record_groups'].add(job.record_group)
############################
# JOBS: Job Input
############################
# get all related Job Inputs
job_inputs = JobInput.objects.filter(
job__in=self.export_dict['jobs'],
input_job__in=self.export_dict['jobs'])
# write to serialize set
self.export_dict['job_inputs'].update(job_inputs)
############################
# JOBS: Job Validation
############################
# get all related Job Inputs
job_validations = JobValidation.objects.filter(job__in=self.export_dict['jobs'])
# write to serialize set
self.export_dict['job_validations'].update(job_validations)
############################
# TRANSFORMATION SCENARIOS
############################
# loop through Jobs, looking for Transformation Scenarios
for job in self.export_dict['jobs']:
# check job details for transformation used
if 'transformation' in job.job_details_dict.keys():
try:
for trans in job.job_details_dict['transformation']['scenarios']:
self.export_dict['transformations'].add(Transformation.objects.get(pk=int(trans['id'])))
except Exception as err:
LOGGER.warning('Could not export Transformations for job %s: %s', job, str(err))
############################
# VALIDATION SCENARIOS
############################
# loop through Jobs, looking for Validations applied Scenarios
for job in self.export_dict['jobs']:
# check for JobValidation instances
jvs = JobValidation.objects.filter(job=job)
# loop through and add to set
for job_validation in jvs:
self.export_dict['validations'].add(job_validation.validation_scenario)
############################
# OAI ENDPOINTS
############################
# loop through Jobs, looking for OAI endpoints that need exporting
for job in self.export_dict['jobs']:
if job.job_type == 'HarvestOAIJob':
try:
# read OAI endpoint from params
self.export_dict['oai_endpoints'].add(OAIEndpoint.objects.get(pk=job.job_details_dict['oai_params']['id']))
except Exception as err:
LOGGER.warning('Could not export OAIEndpoint for job %s: %s', job, str(err))
############################
# RITS
############################
# loop through Jobs, looking for RITS Scenarios
for job in self.export_dict['jobs']:
# check job details for rits used
if 'rits' in job.job_details_dict.keys() and job.job_details_dict['rits'] != None:
try:
self.export_dict['rits'].add(RecordIdentifierTransformation.objects.get(pk=(job.job_details_dict['rits'])))
except Exception as err:
LOGGER.warning('Could not export Record Identifier Transformation Scenario for job %s: %s', job, str(err))
############################
# DBDD
############################
# loop through Jobs, looking for DBDD used
for job in self.export_dict['jobs']:
# check job details for DBDD used
if 'dbdm' in job.job_details_dict.keys() and job.job_details_dict['dbdm']['dbdd'] != None:
LOGGER.debug('attempting to export dbdd_id %s for %s', job.job_details_dict['dbdm']['dbdd'], job)
try:
# get dbdd
dbdd = DPLABulkDataDownload.objects.get(pk=(job.job_details_dict['dbdm']['dbdd']))
# add to export_dict
self.export_dict['dbdd'].add(dbdd)
# export DBDD index from ElasticSearch
# prepare dbdd export dir
dbdd_export_path = '%s/dbdd' % self.export_path
if not os.path.isdir(dbdd_export_path):
os.mkdir(dbdd_export_path)
# build command list
cmd = [
"elasticdump",
"--input=http://%s:9200/%s" % (settings.ES_HOST, dbdd.es_index),
"--output=%(dbdd_export_path)s/dbdd%(dbdd_id)s.json" % {'dbdd_export_path':dbdd_export_path, 'dbdd_id':dbdd.id},
"--type=data",
"--ignore-errors",
"--noRefresh"
]
LOGGER.debug("elasticdump cmd: %s", cmd)
# run cmd
os.system(" ".join(cmd))
except Exception as err:
LOGGER.debug('could not export DBDD for job %s: %s', job, str(err))
############################
# JOB RECORDS (Mongo)
############################
# prepare records export dir
record_exports_path = '%s/record_exports' % self.export_path
os.mkdir(record_exports_path)
# loop through jobs and export
for job in self.export_dict['jobs']:
# prepare command
cmd = 'mongoexport --host %(mongo_host)s:27017 --db combine --collection record --out %(record_exports_path)s/j%(job_id)s_mongo_records.json --type=json -v --query \'{"job_id":%(job_id)s}\'' % {
'job_id':job.id,
'record_exports_path':record_exports_path,
'mongo_host':settings.MONGO_HOST
}
LOGGER.debug("mongoexport cmd: %s", cmd)
# run
os.system(cmd)
############################
# JOB VALIDATIONS (Mongo)
############################
# prepare records export dir
validation_exports_path = '%s/validation_exports' % self.export_path
os.mkdir(validation_exports_path)
# loop through jobs and export
for job in self.export_dict['jobs']:
# prepare command
cmd = 'mongoexport --host %(mongo_host)s:27017 --db combine --collection record_validation --out %(validation_exports_path)s/j%(job_id)s_mongo_validations.json --type=json -v --query \'{"job_id":%(job_id)s}\'' % {
'job_id':job.id,
'validation_exports_path':validation_exports_path,
'mongo_host':settings.MONGO_HOST
}
LOGGER.debug("mongoexport cmd: %s", cmd)
# run
os.system(cmd)
############################
# JOB MAPPED FIELDS (ES)
############################
'''
Consider: elasticdump
'''
# prepare records export dir
es_export_path = '%s/mapped_fields_exports' % self.export_path
os.mkdir(es_export_path)
# loop through jobs and export
for job in self.export_dict['jobs']:
# build command list
cmd = [
"elasticdump",
"--input=http://%s:9200/j%s" % (settings.ES_HOST, job.id),
"--output=%(es_export_path)s/j%(job_id)s_mapped_fields.json" % {'es_export_path':es_export_path, 'job_id':job.id},
"--type=data",
"--sourceOnly",
"--ignore-errors",
"--noRefresh"
]
LOGGER.debug("elasticdump cmd: %s", cmd)
# run cmd
os.system(" ".join(cmd))
def _sort_discrete_config_scenarios(self, config_scenarios):
'''
Method to sort and add configuration scenarios to list of model instances
in self.export_dict for eventual serialization
Sorting to these:
'validations':set(),
'transformations':set(),
'oai_endpoints':set(),
'rits':set(),
'field_mapper_configs':set(),
'dbdd':set()
Args:
config_scenarios (list): List of model instances, or specially prefixed ids
'''
LOGGER.debug('sorting passed discrete configuration scenarios')
# establish sort hash
sorting_hash = {
ValidationScenario:'validations',
Transformation:'transformations',
OAIEndpoint:'oai_endpoints',
RecordIdentifierTransformation: 'rits',
FieldMapper:'field_mapper_configs',
DPLABulkDataDownload:'dbdd'
}
# invert sorting_hash for id prefixes
id_prefix_hash = {v:k for k, v in sorting_hash.items()}
# loop through passed model instances
for config_scenario in config_scenarios:
LOGGER.debug('adding to export_dict for serialization: %s', config_scenario)
# if prefixed string passed, retrieve model instance
if isinstance(config_scenario, str) and '|' in config_scenario:
model_type, model_id = config_scenario.split('|')
config_scenario = id_prefix_hash[model_type].objects.get(pk=int(model_id))
# slot to export dict through hash
self.export_dict[sorting_hash[type(config_scenario)]].add(config_scenario)
def package_export(self):
'''
Method to serialize model instances, and combine with serializations already on disk
'''
# serialize Django model instances
with open('%s/django_objects.json' % self.export_path, 'w') as out_file:
# combine all model instances, across model types
to_serialize = []
for _key, val in self.export_dict.items():
to_serialize.extend(val)
# write as single JSON file
out_file.write(serializers.serialize('json', to_serialize))
# finalize export_manifest
self._finalize_export_manifest()
# write export_manifest
with open('%s/export_manifest.json' % self.export_path, 'w') as out_file:
out_file.write(json.dumps(self.export_manifest))
# if compressing, zip up directory, and remove originals after archive created
if self.compress:
LOGGER.debug("compressing exported state at %s", self.export_path)
# establish output archive file
export_filename = '%s/%s' % (settings.STATEIO_EXPORT_DIR, self.export_manifest['export_id'])
# use shutil to zip up
compress_stime = time.time()
new_export_path = shutil.make_archive(
export_filename,
self.compression_format,
settings.STATEIO_EXPORT_DIR,
self.export_manifest['export_id']
)
LOGGER.debug('archive %s created in %ss', new_export_path, (time.time() - compress_stime))
# remove originals
shutil.rmtree(self.export_path)
# update export path and manifest
self.export_path = new_export_path
self.export_manifest['export_path'] = self.export_path
# return export_path
return self.export_path
def _finalize_export_manifest(self):
'''
Method to finalize export_manifest before writing to export
- loop through self.export_dict for export types that are human meaningful
'''
# establish section in export_manifest
self.export_manifest['exports'] = {
'jobs':[],
'record_groups':[],
'orgs':[],
'dbdd':[],
'oai_endpoints':[],
'rits':[],
'transformations':[],
'validations':[],
}
# loop through export Django model instance exports
export_count = 0
for export_type in self.export_manifest['exports'].keys():
# loop through exports for type
for export in self.export_dict[export_type]:
LOGGER.debug('writing %s to export_manifest', export)
# bump counter
export_count += 1
# write
self.export_manifest['exports'][export_type].append({
'name':export.name,
'id':export.id
})
# write Published Subsets to export manifest
self._collect_published_subsets()
# write count to exports
self.export_manifest['exports']['count'] = export_count
def _collect_published_subsets(self):
'''
Method to include associated Published Subsets with export
'''
self.export_manifest['published_subsets'] = []
# add all subsets if include non-set Records AND > 1 Jobs do not have publish_set_id
non_publish_set_jobs = [job for job in self.export_dict['jobs'] if job.publish_set_id == '']
non_publish_set_subsets = [subset for subset in PublishedRecords.get_subsets() if subset['include_non_set_records']]
if len(non_publish_set_jobs) > 0 and len(non_publish_set_subsets) > 0:
self.export_manifest['published_subsets'].extend(non_publish_set_subsets)
# loop through Jobs and add Published Subsets if publish_set_id is included in any
for job in self.export_dict['jobs']:
# if Job published and has publish_set_ids, update export_dict set
if job.published and job.publish_set_id != '':
self.export_manifest['published_subsets'].extend(PublishedRecords.get_subsets(includes_publish_set_id=job.publish_set_id))
# finally, dedupe published_subsets and remove IDs
sanitized_subsets = []
for subset in self.export_manifest['published_subsets']:
subset.pop('_id')
if subset not in sanitized_subsets:
sanitized_subsets.append(subset)
self.export_manifest['published_subsets'] = sanitized_subsets
def import_state(
self,
export_path,
import_name=None,
load_only=False,
import_records=True,
stateio_id=None
):
'''
Import exported state
Args:
export_path (str): location on disk of unzipped export directory
import_name (str): Human name for import task
load_only (bool): If True, will only parse export but will not import anything
import_records (bool): If True, will import Mongo and ElasticSearch records
stateio_id (int): ID of pre-existing StateIO object
Returns:
'''
#debug
import_stime = time.time()
self.initialize_import_manifest(export_path, import_name)
# init/update associated StateIO instance
update_dict = {
'stateio_type':'import',
'name':self.import_manifest['import_name'],
'import_id':self.import_manifest['import_id'],
'export_path':self.import_manifest['export_path'],
'import_manifest':{k:v for k, v in self.import_manifest.items() if k not in ['pk_hash', 'export_manifest']},
'status':'running'
}
if stateio_id == None:
LOGGER.debug('initializing StateIO object')
self.stateio = StateIO(**update_dict)
else:
LOGGER.debug('retrieving and updating StateIO object')
self.stateio = StateIO.objects.get(id=stateio_id)
self.stateio.update(**update_dict)
self.stateio.reload()
# save
self.stateio.save()
# load state, deserializing and export_manifest
self._load_state(export_path)
# if not load only, continue
if not load_only:
# load configuration dependencies
self._import_config_instances()
# | |
<reponame>byung-u/JP_news
#!/usr/bin/env python3
# import datetime
import newspaper
import os
import re
from bs4 import BeautifulSoup
from collections import Counter
from datetime import datetime
# from datetime import datetime, timedelta
from newspaper import Article
from itertools import count
from random import choice
from requests import get, codes
from selenium import webdriver
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko ) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/ 19.0.1084.46'
'Safari/536.5'), )
chromedriver_path = os.environ.get('CHROMEDRIVER_PATH')
now = datetime.now()
# now = datetime.now() - timedelta(days=1)
def get_news_article_info(url):
article = Article(url)
article.download()
try:
article.parse()
except newspaper.article.ArticleException:
return None
except UnicodeEncodeError:
return None
article.nlp()
return article.keywords, article.title, article.summary
def check_valid_string(text):
text = text.strip()
text = text.replace("'", "").replace('"', '').replace('·', ',')
return text
def match_soup_class(target, mode='class'):
def do_match(tag):
classes = tag.get(mode, [])
return all(c in classes for c in target)
return do_match
def request_and_get(url):
try:
r = get(url, headers={'User-Agent': choice(USER_AGENTS)})
if r.status_code != codes.ok:
print('[%s] request error, code=%d', url, r.status_code)
return False
return r
except TypeError:
print('[%s] connect fail', url)
return False
def realestate_molit(keywords_list):
cnt = 0
r = request_and_get('http://www.molit.go.kr/USR/NEWS/m_71/lst.jsp')
if r is None:
return
today = '%4d-%02d-%02d' % (now.year, now.month, now.day)
soup = BeautifulSoup(r.text, 'html.parser')
for tbody in soup.find_all('tbody'):
for tr in tbody.find_all('tr'):
for idx, td in enumerate(tr.find_all('td')):
if idx == 3:
article_date = td.text
break
try:
tr.a['href']
except TypeError:
continue
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 국토교통부 보도자료')
cnt += 1
href = 'http://www.molit.go.kr/USR/NEWS/m_71/%s' % tr.a['href']
print(tr.a.text.strip())
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
def realestate_gyunghyang(keywords_list):
cnt = 0
r = request_and_get('http://biz.khan.co.kr/khan_art_list.html?category=realty')
if r is None:
return
today = '%4d. %02d. %02d' % (now.year, now.month, now.day)
soup = BeautifulSoup(r.content.decode('euc-kr', 'replace'), 'html.parser')
for news_list in soup.find_all(match_soup_class(['news_list'])):
for li in news_list.find_all('li'):
try:
article_date = li.find('em', attrs={'class': 'letter'}).text
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 경향신문')
cnt += 1
title = li.find('strong', attrs={'class': 'hd_title'})
print(title.text)
print(title.a['href'])
keywords = get_news_article_info(title.a['href'])
keywords_list.extend(keywords)
except TypeError:
continue
def realestate_kookmin(keywords_list):
cnt = 0
r = request_and_get('http://news.kmib.co.kr/article/list.asp?sid1=eco')
if r is None:
return
today = '%4d-%02d-%02d' % (now.year, now.month, now.day)
base_url = 'http://news.kmib.co.kr/article'
soup = BeautifulSoup(r.content.decode('euc-kr', 'replace'), 'html.parser')
cnt = 0
for nws_list in soup.find_all(match_soup_class(['nws_list'])):
for dl in nws_list.find_all('dl'):
article_date = dl.find('dd', attrs={'class': 'date'}).text
if not article_date.startswith(today):
continue
if dl.text == '등록된 기사가 없습니다.':
return
dt = dl.find('dt')
href = '%s/%s' % (base_url, dt.a['href'])
title = check_valid_string(dt.a.text)
if (title.find('아파트') != -1 or
title.find('국토부') != -1 or title.find('국토교통부') != -1 or
title.find('전세') != -1 or title.find('전월세') != -1 or
title.find('청약') != -1 or title.find('분양') != -1 or
title.find('부동산') != -1):
if cnt == 0:
print('\n📰 국민일보')
print(title)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
cnt += 1
def realestate_nocut(keywords_list):
cnt = 0
r = request_and_get('http://www.nocutnews.co.kr/news/list?c1=203&c2=204<ype=1')
if r is None:
return
today = '%4d-%02d-%02d' % (now.year, now.month, now.day)
base_url = 'http://www.nocutnews.co.kr'
soup = BeautifulSoup(r.content.decode('utf-8', 'replace'), 'html.parser')
news = soup.find(match_soup_class(['newslist']))
for dl in news.find_all('dl'):
dt = dl.find('dt')
href = '%s%s' % (base_url, dt.a['href'])
title = check_valid_string(dt.text)
temp = (dl.find('dd', attrs={'class': 'txt'}).text).split(' ')
article_date = ''.join(temp[-3:])
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 노컷뉴스')
cnt += 1
print(title)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
return
def realestate_donga(keywords_list):
cnt = 0
r = request_and_get('http://news.donga.com/List/Economy/RE')
if r is None:
return
today = '%4d%02d%02d' % (now.year, now.month, now.day)
soup = BeautifulSoup(r.text, 'html.parser')
for alist in soup.find_all(match_soup_class(['articleList'])):
tit = alist.find('span', attrs={'class': 'tit'})
title = check_valid_string(tit.text)
temp = (alist.a['href']).split('/')
article_date = temp[-3]
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 동아일보')
print(title)
print(alist.a['href'])
keywords = get_news_article_info(alist.a['href'])
keywords_list.extend(keywords)
cnt += 1
def realestate_mbn(keywords_list):
cnt = 0
r = request_and_get('http://news.mk.co.kr/newsList.php?sc=30000020')
if r is None:
return
today = '%4d.%02d.%02d' % (now.year, now.month, now.day)
soup = BeautifulSoup(r.content.decode('euc-kr', 'replace'), 'html.parser')
for list_area in soup.find_all(match_soup_class(['list_area'])):
for dl in list_area.find_all('dl'):
dt = dl.find('dt')
href = dt.a['href']
title = check_valid_string(dt.text)
article_date = dl.find('span', attrs={'class': 'date'}).text
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 매일경제')
print(title)
print(href)
cnt += 1
keywords = get_news_article_info(href)
try:
keywords_list.extend(keywords)
except TypeError:
continue
def realestate_yonhapnews(keywords_list):
cnt = 0
r = request_and_get('http://www.yonhapnews.co.kr/economy/0304000001.html')
if r is None:
return
today = '%4d/%02d/%02d' % (now.year, now.month, now.day)
soup = BeautifulSoup(r.content.decode('utf-8', 'replace'), 'html.parser')
for sect02 in soup.find_all(match_soup_class(['section02'])):
for div in sect02.find_all('div'):
href = div.a['href']
urls = div.a['href'].split('/')
article_date = '/'.join(urls[4:7])
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 연합뉴스')
cnt += 1
print(div.a.text)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
def realestate_cnews(keywords_list):
base_url = 'http://www.cnews.co.kr/uhtml/read.jsp?idxno='
today = '%4d%02d%02d' % (now.year, now.month, now.day)
cnt = 0
urls = ['http://www.cnews.co.kr/uhtml/autosec/S1N1_S2N12_1.html', # 분양
'http://www.cnews.co.kr/uhtml/autosec/S1N1_S2N13_1.html', # 도시정비
'http://www.cnews.co.kr/uhtml/autosec/S1N1_S2N14_1.html', # 개발
'http://www.cnews.co.kr/uhtml/autosec/S1N1_S2N15_1.html', # 재태크
'http://www.cnews.co.kr/uhtml/autosec/S1N1_S2N16_1.html', # 부동산시장
]
for url in urls:
r = request_and_get(url)
soup = BeautifulSoup(r.content.decode('utf-8', 'replace'), 'html.parser')
for sub_list in soup.find_all(match_soup_class(['sub_main_news_list_2'])):
for li in sub_list.find_all('li'):
title = li.find('div', {'class': 'title'})
article_date = li.a['href'].split("'")[1]
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 건설경제')
cnt += 1
href = '%s%s' % (base_url, article_date)
print(title.text)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
def realestate_sedaily(keywords_list):
urls = ['http://www.sedaily.com/NewsList/GB01', # 정책, 제도
'http://www.sedaily.com/NewsList/GB02', # 분양, 청약
'http://www.sedaily.com/NewsList/GB03', # 아파트, 주택
'http://www.sedaily.com/NewsList/GB04', # 오피스, 상가, 토지
'http://www.sedaily.com/NewsList/GB05', # 건설업계
'http://www.sedaily.com/NewsList/GB06', # 간접투자
'http://www.sedaily.com/NewsList/GB07', # 기획연재
]
base_url = 'http://www.sedaily.com'
today = '%4d-%02d-%02d' % (now.year, now.month, now.day)
cnt = 0
for url in urls:
r = request_and_get(url)
soup = BeautifulSoup(r.content.decode('utf-8', 'replace'), 'html.parser')
for news_list in soup.find_all(match_soup_class(['news_list'])):
for li in news_list.find_all('li'):
dt = li.find('dt')
href = '%s%s' % (base_url, dt.a['href'])
dd = li.find('dd')
article_date = dd.find('span', attrs={'class': 'letter'}).text
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 서울경제')
cnt += 1
print(dt.text)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
def realestate_moonhwa(keywords_list):
cnt = 0
r = request_and_get('http://www.munhwa.com/news/section_list.html?sec=economy&class=5')
if r is None:
return
today = '%4d.%02d.%02d' % (now.year, now.month, now.day)
soup = BeautifulSoup(r.content.decode('euc-kr', 'replace'), 'html.parser')
for td in soup.find_all('td', attrs={'style': 'padding:4 0 0 3'}):
articles = td.text.split()
article_date = articles[-1].replace(']', '').replace('[', '')
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 문화일보')
cnt += 1
print(td.a['href'])
print(' '.join(articles[:-1]))
keywords = get_news_article_info(td.a['href'])
keywords_list.extend(keywords)
def realestate_segye(keywords_list):
cnt = 0
r = request_and_get('http://www.segye.com/newsList/0101030700000')
if r is None:
return
today = '%4d%02d%02d' % (now.year, now.month, now.day)
base_url = 'http://www.segye.com'
soup = BeautifulSoup(r.content.decode('utf-8', 'replace'), 'html.parser')
for r_txt in soup.find_all(match_soup_class(['r_txt'])):
for dt in r_txt.find_all('dt'):
href = '%s%s' % (base_url, dt.a['href'])
title = dt.text
article_date = dt.a['href'].split('/')[-1]
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 세계일보')
cnt += 1
print(title)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
def realestate_joins(keywords_list):
cnt = 0
r = request_and_get('http://realestate.joins.com/article/')
if r is None:
return
today = '%4d.%02d.%02d' % (now.year, now.month, now.day)
base_url = 'http://realestate.joins.com'
soup = BeautifulSoup(r.content.decode('utf-8', 'replace'), 'html.parser')
for list_basic in soup.find_all(match_soup_class(['list_basic'])):
for ul in list_basic.find_all('ul'):
for li in ul.find_all('li'):
title = li.find('span', attrs={'class': 'thumb'})
try:
temp_date = li.find('span', attrs={'class': 'byline'})
temp_date = temp_date.find_all('em')
article_date = temp_date[1].text.split()[0]
if article_date != today:
continue
except AttributeError:
continue
try:
title = title.img['alt']
except AttributeError:
continue
try:
temp = li.a['href']
except KeyError:
continue
href = '%s%s' % (base_url, temp)
if cnt == 0:
print('\n📰 중앙일보')
cnt += 1
print(title)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
def realestate_chosun(keywords_list):
cnt = 0
r = request_and_get('http://biz.chosun.com/svc/list_in/list.html?catid=4&gnb_global')
if r is None:
return
today = '%4d%02d%02d' % (now.year, now.month, now.day)
base_url = 'http://biz.chosun.com'
soup = BeautifulSoup(r.content.decode('utf-8', 'replace'), 'html.parser')
for f in soup.find_all(match_soup_class(['list_vt'])):
for li in f.find_all('li'):
dt = li.find('dt')
href = '%s%s' % (base_url, li.a['href'])
title = check_valid_string(dt.a.text)
article_date = li.a['href'].split('/')[-1]
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 조선일보')
cnt += 1
print(title)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
def realestate_hani(keywords_list):
cnt = 0
r = request_and_get(' http://www.hani.co.kr/arti/economy/property/home01.html')
if r is None:
return
today = '%4d-%02d-%02d' % (now.year, now.month, now.day)
base_url = 'http://www.hani.co.kr'
soup = BeautifulSoup(r.content.decode('utf-8', 'replace'), 'html.parser')
for article in soup.find_all(match_soup_class(['article-area'])):
article_date = article.find('span', attrs={'class': 'date'}).text
href = '%s%s' % (base_url, article.a['href'])
article = article.text.strip().split('\n')
title = check_valid_string(article[0])
if not article_date.startswith(today):
continue
if cnt == 0:
print('\n📰 한겨례신문')
cnt += 1
print(title)
print(href)
keywords = get_news_article_info(href)
keywords_list.extend(keywords)
return
def realestate_hankyung(keywords_list):
cnt = 0
r = request_and_get('http://land.hankyung.com/')
if r is None:
return
today = '%4d%02d%02d' % (now.year, now.month, now.day)
soup = BeautifulSoup(r.content.decode('euc-kr', 'replace'), 'html.parser')
sessions = soup.select('div > h2 > a')
for s in | |
that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.ListNodesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListNodesAsyncPager)
assert response.next_page_token == "<PASSWORD>"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_nodes_async_from_dict():
await test_list_nodes_async(request_type=dict)
def test_list_nodes_field_headers():
client = TpuClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_tpu.ListNodesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
call.return_value = cloud_tpu.ListNodesResponse()
client.list_nodes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_nodes_field_headers_async():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_tpu.ListNodesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tpu.ListNodesResponse()
)
await client.list_nodes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_nodes_flattened():
client = TpuClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tpu.ListNodesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_nodes(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_nodes_flattened_error():
client = TpuClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_nodes(
cloud_tpu.ListNodesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_nodes_flattened_async():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tpu.ListNodesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tpu.ListNodesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_nodes(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_nodes_flattened_error_async():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_nodes(
cloud_tpu.ListNodesRequest(), parent="parent_value",
)
def test_list_nodes_pager(transport_name: str = "grpc"):
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_tpu.ListNodesResponse(
nodes=[cloud_tpu.Node(), cloud_tpu.Node(), cloud_tpu.Node(),],
next_page_token="abc",
),
cloud_tpu.ListNodesResponse(nodes=[], next_page_token="def",),
cloud_tpu.ListNodesResponse(
nodes=[cloud_tpu.Node(),], next_page_token="ghi",
),
cloud_tpu.ListNodesResponse(nodes=[cloud_tpu.Node(), cloud_tpu.Node(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_nodes(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloud_tpu.Node) for i in results)
def test_list_nodes_pages(transport_name: str = "grpc"):
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_nodes), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_tpu.ListNodesResponse(
nodes=[cloud_tpu.Node(), cloud_tpu.Node(), cloud_tpu.Node(),],
next_page_token="abc",
),
cloud_tpu.ListNodesResponse(nodes=[], next_page_token="def",),
cloud_tpu.ListNodesResponse(
nodes=[cloud_tpu.Node(),], next_page_token="ghi",
),
cloud_tpu.ListNodesResponse(nodes=[cloud_tpu.Node(), cloud_tpu.Node(),],),
RuntimeError,
)
pages = list(client.list_nodes(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_nodes_async_pager():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_nodes), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_tpu.ListNodesResponse(
nodes=[cloud_tpu.Node(), cloud_tpu.Node(), cloud_tpu.Node(),],
next_page_token="abc",
),
cloud_tpu.ListNodesResponse(nodes=[], next_page_token="def",),
cloud_tpu.ListNodesResponse(
nodes=[cloud_tpu.Node(),], next_page_token="ghi",
),
cloud_tpu.ListNodesResponse(nodes=[cloud_tpu.Node(), cloud_tpu.Node(),],),
RuntimeError,
)
async_pager = await client.list_nodes(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloud_tpu.Node) for i in responses)
@pytest.mark.asyncio
async def test_list_nodes_async_pages():
client = TpuAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_nodes), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
cloud_tpu.ListNodesResponse(
nodes=[cloud_tpu.Node(), cloud_tpu.Node(), cloud_tpu.Node(),],
next_page_token="abc",
),
cloud_tpu.ListNodesResponse(nodes=[], next_page_token="def",),
cloud_tpu.ListNodesResponse(
nodes=[cloud_tpu.Node(),], next_page_token="ghi",
),
cloud_tpu.ListNodesResponse(nodes=[cloud_tpu.Node(), cloud_tpu.Node(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_nodes(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [cloud_tpu.GetNodeRequest, dict,])
def test_get_node(request_type, transport: str = "grpc"):
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_tpu.Node(
name="name_value",
description="description_value",
accelerator_type="accelerator_type_value",
state=cloud_tpu.Node.State.CREATING,
health_description="health_description_value",
runtime_version="runtime_version_value",
cidr_block="cidr_block_value",
health=cloud_tpu.Node.Health.HEALTHY,
tags=["tags_value"],
id=205,
api_version=cloud_tpu.Node.ApiVersion.V1_ALPHA1,
)
response = client.get_node(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.GetNodeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tpu.Node)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.accelerator_type == "accelerator_type_value"
assert response.state == cloud_tpu.Node.State.CREATING
assert response.health_description == "health_description_value"
assert response.runtime_version == "runtime_version_value"
assert response.cidr_block == "cidr_block_value"
assert response.health == cloud_tpu.Node.Health.HEALTHY
assert response.tags == ["tags_value"]
assert response.id == 205
assert response.api_version == cloud_tpu.Node.ApiVersion.V1_ALPHA1
def test_get_node_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TpuClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node), "__call__") as call:
client.get_node()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.GetNodeRequest()
@pytest.mark.asyncio
async def test_get_node_async(
transport: str = "grpc_asyncio", request_type=cloud_tpu.GetNodeRequest
):
client = TpuAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cloud_tpu.Node(
name="name_value",
description="description_value",
accelerator_type="accelerator_type_value",
state=cloud_tpu.Node.State.CREATING,
health_description="health_description_value",
runtime_version="runtime_version_value",
cidr_block="cidr_block_value",
health=cloud_tpu.Node.Health.HEALTHY,
tags=["tags_value"],
id=205,
api_version=cloud_tpu.Node.ApiVersion.V1_ALPHA1,
)
)
response = await client.get_node(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_tpu.GetNodeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_tpu.Node)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.accelerator_type == "accelerator_type_value"
assert response.state == cloud_tpu.Node.State.CREATING
assert response.health_description == "health_description_value"
assert response.runtime_version == "runtime_version_value"
assert response.cidr_block == "cidr_block_value"
assert response.health == cloud_tpu.Node.Health.HEALTHY
assert response.tags == ["tags_value"]
assert response.id == 205
assert response.api_version == cloud_tpu.Node.ApiVersion.V1_ALPHA1
@pytest.mark.asyncio
async def test_get_node_async_from_dict():
await test_get_node_async(request_type=dict)
def test_get_node_field_headers():
client = TpuClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloud_tpu.GetNodeRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_node), "__call__") as call:
call.return_value = cloud_tpu.Node()
client.get_node(request)
# Establish that the underlying gRPC stub method was called.
assert | |
in xrange(self._ndataset)]
# else:
# return old[param] != getattr(self,'_'+param)
def _update_(self, analysis_type=None, **kwargs):
"""Initialize, update and check statistical paremeters.
A value of None is converted to an optimal value.
Analyses are re-ran if needed by checking dependencies.
"""
# Filter parameter list according to analysis_type
running = isinstance(analysis_type, str)
if running:
for param in kwargs.keys():
if param not in self._params[analysis_type]:
del kwargs[param]
req_params = kwargs.keys()
# Initialize old values and defaults changed to False
old = {}
changed = {}
init_all = [None]*self._ndataset
for param in self._all_params:
#print 'check', param
# if not req_params: continue
# Get old values , defaults to None and set new value
# if kwargs.has_key(param):
if param == 'nsvd': # Single value for all datasets
changed[param] = False
old[param] = getattr(self,'_'+param,None)
setattr(self,'_'+param,SpAn._check_length_(kwargs.pop(param, old[param]),0,None))
else:
changed[param] = [False]*self._ndataset
old[param] = getattr(self,'_'+param,list(init_all))
setattr(self,'_'+param,SpAn._check_length_(kwargs.pop(param, old[param]),self._ndataset,None))
# print 'cur', param, getattr(self, '_'+param), changed[param]
# if not req_params: return changed
if not running: return changed
# Number of PCA modes
# if 'npca' in req_params:
for iset in xrange(self._ndataset):
if self._npca[iset] is None:
# Guess a value
if self._prepca[iset] is not None:
self._npca[iset] = self._prepca[iset]
elif iset:
self._npca[iset] = self._npca[iset-1] # From last dataset
else:
self._npca[iset] = SpAn._npca_default # Default value
if self._prepca[iset] is not None:
self._npca[iset] = max(self._npca[iset], self._prepca[iset]) # Min
self._npca[iset] = npy.clip(self._npca[iset],1,min(SpAn._npca_max,self._ns[iset],self._nt[iset])) # Max
# Number of pre-PCA modes before MSSA and SVD
# if 'prepca' in req_params:
for iset in xrange(self._ndataset):
if self._prepca[iset] is None: # Default: pre-PCA needed over max (for MSSA and SVD)
self._prepca[iset] = min(self._ns[iset], self._ns[iset]) > SpAn._npca_max
if not self._quiet and self._prepca[iset] and analysis_type in ['mssa', 'svd'] :
print '[mssa/svd] The number of valid points of one of the datasets is greater than %i, so we perform a pre-PCA'%SpAn._npca_max
if self._prepca[iset] is True: # Defaults to the number of PCA modes
self._prepca[iset] = self._npca[iset]
elif self._prepca[iset]: # Max number of prepca modes is number of points
self._prepca[iset] = min(self._prepca[iset], self._ns[iset], self._nt[iset])
if self._prepca[iset] == 0:
self._prepca[iset] = False
# Dependency rules between prepca and npca
for iset in xrange(self._ndataset):
if self._prepca[iset] and self._npca[iset] < self._prepca[iset]:
if not self._quiet and self._prepca[iset]:
print 'The number of pre-PCA modes (%i) for dataset #%i is lower than the number of PCA modes (%i), so we adjust the latter.' % (self._prepca[iset],iset,self._npca[iset])
self._npca[iset] = self._prepca[iset]
# Window extension of MSSA
# if 'window' in req_params:
for iset in xrange(self._ndataset):
if self._window[iset] is None: # Initialization
self._window[iset] = int(self._nt[iset]*SpAn._window_default)
self._window[iset] = npy.clip(self._window[iset],1,max(1,self._nt[iset]))
# Number of MSSA modes
for iset in xrange(self._ndataset):
# if 'nmssa' not in req_params and not changed['prepca'][iset]: continue
# if not changed['prepca'][iset]: continue
if self._nmssa[iset] is None: # Initialization
# Guess a value
if iset:
self._nmssa[iset] = self._nmssa[iset-1] # From last dataset
else:
self._nmssa[iset] = SpAn._nmssa_default # Default value
if self._prepca[iset]:
nchanmax = self._prepca[iset] # Input channels are from pre-PCA
else:
nchanmax = self._ns[iset] # Input channels are from real space
self._nmssa[iset] = npy.clip(self._nmssa[iset],1,
min(SpAn._nmssa_max,nchanmax*self._window[iset])) # Max
# Number of SVD modes (special case)
if self._nsvd is None: # Initialization
self._nsvd = SpAn._nsvd_default # Default value
for iset in xrange(self._ndataset): # Check values
# if 'nsvd' not in req_params and not changed['prepca'][iset]: continue
if not changed['prepca'][iset]: continue
if self._prepca[iset]:
nchanmax = self._prepca[iset] # Input channels are from pre-PCA
else:
nchanmax = self._ns[iset] # Input channels are from real space
self._nsvd = npy.clip(self._nsvd,1, min(SpAn._nsvd_max,nchanmax)) # Max
# # Check what changed
# for param in self._all_params:
# changed[param] = self._changed_param_(old,param)
# Re-run analyses when needed
# if not kwargs: return changed # Just initializations (dry run to prevent not ending loop)
changed['nsvd'] = old['nsvd'] != self._nsvd
runsvd = False
for iset in xrange(self._ndataset):
# Check what changed
for param in self._all_params:
if param != 'nsvd':
changed[param][iset] = old[param][iset] != getattr(self,'_'+param)[iset]
# Analyses
# - PCA
if (analysis_type == 'pca' or self._prepca[iset]) and \
(self._pca_raw_eof.has_key(iset) and changed['npca'][iset]):
print 'Rerunning PCA'
self.pca(iset=iset)
# - MSSA
if analysis_type == 'mssa' and \
(self._mssa_raw_eof.has_key(iset) and
(changed['nmssa'][iset] or changed['window'][iset] or
(self._prepca[iset] and changed['prepca'][iset]))):
print 'Rerunning MSSA'
self.mssa(iset=iset)
# - SVD
if not runsvd and analysis_type == 'svd' and (changed['nsvd'] or \
(self._svd_raw_eof.has_key(iset) and
(self._prepca[iset] and changed['prepca'][iset]))):
runsvd = True
if runsvd:
#FIXME: MUST NOT RERUN SVD
# print 'Rerunning SVD'
self.svd()
# Inform about which params have been modified for each dataset
return changed
def _check_isets_(self,iset):
"""Check if an iset is a valid dataset.
It can be a list, and it is returned as a list.
if an iset is invalid, it is removed from the output list.
"""
if iset is None: return range(self._ndataset)
if iset == 'left':
iset = 0
elif iset == 'right':
iset = 1
if iset < 0 or iset >= self._ndataset:
warn('Invalid dataset id: %i. Valid id are < %i'%(iset,self._ndataset))
else:
return [iset]
def _check_shape_(self, inputs, fillvalue):
"""Return input as datasets (tree) *shape*"""
imap = self._input_map
if isinstance(imap, int):
return [SpAn._check_length_(inputs, max(1, imap), fillvalue), ]
inputs = SpAn._check_length_(inputs,len(imap),fillvalue)
for iset, im in enumerate(imap):
inputs[iset] = SpAn._check_length_(inputs[iset], max(1, im), fillvalue)
return inputs
#################################################################
## PCA
#################################################################
@_filldocs_
def pca(self,iset=None,**kwargs):
"""
Principal Components Analysis (PCA)
It is called everytime needed by :meth:`pca_eof`, :meth:`pca_pc`, :meth:`pca_ev` and :meth:`pca_rec`.
Thus, since results are stored in cache, it not necessary call it explicitly.
:Parameters:
%(npca)s
%(iset)s
"""
# Check on which dataset to operate
isets = self._check_isets_(iset)
# Update params
self._update_('pca', **kwargs)
# Loop on datasets
for iset in isets:
# Check if old results can be used when npca is lower
if getattr(self,'_pca_raw_pc').has_key(iset) and \
getattr(self,'_pca_raw_pc')[iset].shape[-1] > self._npca[iset]:
continue
# Remove old results
for att in 'raw_eof','raw_pc','raw_ev','ev_sum':
dic = getattr(self,'_pca_'+att)
if dic.has_key(iset): del dic[iset]
# Compute PCA
pdata = self._pdata[iset]
if pdata.ndim == 1: # One single channel, so result is itself
raw_eof = npy.ones(1,dtype=pdata.dtype)
raw_pc = pdata
raw_ev = raw_pc.var()
ev_sum = ev
else: # Several channels
weights = self._stack_info[iset]['weights']
raw_eof,raw_pc,raw_ev,ev_sum = spanlib_fort.pca(pdata,self._npca[iset],weights,-1)
# Save results
self._pca_raw_pc[iset] = raw_pc
self._pca_raw_eof[iset] = raw_eof
self._pca_raw_ev[iset] = raw_ev
self._pca_ev_sum[iset] = ev_sum
# Delete formmated variables
for vtype in 'pc', 'eof':
vfmt = getattr(self, '_pca_fmt_'+vtype)
if vfmt.has_key(iset): del vfmt[iset]
gc.collect()
self._last_analysis_type = 'pca'
@_filldocs_
def pca_eof(self, iset=None, scale=False, raw=False, **kwargs):
"""Get EOFs from PCA analysis
:Parameters:
%(scale)s
%(raw)s
%(iset)s
:PCA parameters:
%(npca)s
:Returns:
Arrays with shape ``(npca,...)``
"""
# Dataset selection
isets = self._check_isets_(iset)
# Update params
changed = self._update_('pca', **kwargs)
# Of, let's format the variables
fmt_eof = {}
for iset in isets:
# Operate only on selected datasets
if isets is not None and iset not in isets: continue
# EOF already available
if self._pca_fmt_eof.has_key(iset):
fmt_eof[iset] = self._pca_fmt_eof[iset]
continue
# First PCA analysis?
if not self._pca_raw_eof.has_key(iset): self.pca(iset=iset)
# Get raw data back to physical space
#FIXME: add raw for fmt_eof
self._pca_fmt_eof[iset] = \
self._unstack_(iset, self._pca_raw_eof[iset][:, :self._npca[iset]],
self._mode_axis_('pca',iset), cpatts=False, remean=False)
# Set attributes and scale
for idata,eof in enumerate(self._pca_fmt_eof[iset]):
# Attributes
if cdms2_isVariable(eof):
if not self._stack_info[iset]['ids'][idata].startswith('variable_'):
eof.id = self._stack_info[iset]['ids'][idata]+'_pca_eof'
else:
eof.id = 'pca_eof'
eof.name = eof.id
eof.standard_name = 'empirical_orthogonal_functions_of_pca'
eof.long_name = 'PCA empirical orthogonal functions'
atts = self._stack_info[iset]['atts'][idata]
if atts.has_key('long_name'):
eof.long_name += ' of '+atts['long_name']
print 'scale', scale
if scale and atts.has_key('units'):
eof.units = atts['units']
# Scaling
if scale:
if scale is True: # Std dev of EOF is sqrt(ev)
scale = npy.sqrt(self._pca_raw_ev[iset]*(self.ns(iset)-1))
for imode in xrange(eof.shape[0]):
eof[imode] *= scale[imode]
else:
eof *= scale
fmt_eof[iset] = self._pca_fmt_eof[iset]
return self._demap_(fmt_eof, grouped=raw)
@_filldocs_
def pca_pc(self, iset=None, scale=False, **kwargs):
"""Get PCs from current PCA decomposition
:Parameters:
%(iset)s
:PCA parameters:
%(npca)s
:Returns:
Arrays with the shape ``(npca,nt)``
"""
# Check on which dataset to operate
isets = self._check_isets_(iset)
# Update params
self._update_('pca', **kwargs)
# Of, let's format the variable
fmt_pc = {}
for iset in isets:
# PC already available
if self._pca_fmt_pc.has_key(iset):
fmt_pc[iset] = self._pca_fmt_pc[iset]
continue
# First PCA analysis?
if not self._pca_raw_pc.has_key(iset): self.pca(iset=iset)
# Format the variable
pc = npy.asarray(self._pca_raw_pc[iset][:,:self._npca[iset]].transpose(),order='C')
if self._cdat_inside_(iset):
pc = cdms2.createVariable()
pc.setAxis(0, self._mode_axis_('pca',iset))
pc.setAxis(1, self._time_axis_(iset,0))
pc.id = pc.name = 'pca_pc'
pc.standard_name = 'principal_components_of_pca'
pc.long_name = 'PCA principal components of '
atts = self._stack_info[iset]['atts'][0]
if self._ndata[iset] == 1 and atts.has_key('long_name'):
pc.long_name += atts['long_name']
# else:
# pc.long_name += 'dataset %i'%iset
if scale and (self._ndata[iset] == 1 or npy.allclose(self.norms(iset), 1.)) and atts.has_key('units'):
pc.units = atts['units']
fmt_pc[iset] = self._pca_fmt_pc[iset] = pc
self._check_dataset_tag_('_pca_fmt_pc',iset)
return self._demap_(fmt_pc, grouped=True)
def pca_ec(self, xeof=None, xpc=None, iset=None, scale=False, **kwargs):
# Check on which dataset to operate
isets = self._check_isets_(iset)
need_pca = xeof is None or xpc is None
if need_pca:
self._update_('pca', **kwargs)
# Remap
if xeof is not None:
xeof = self._remap_(xeof)
if xpc is not None:
xpc = self._remap_(xpc, grouped=True)
# Loop on datasets
for iset in isets:
# PC already available
if self._pca_fmt_pc.has_key(iset):
fmt_pc[iset] = self._pca_fmt_pc[iset]
continue
# First PCA analysis?
if need_pca and not self._pca_raw_pc.has_key(iset): self.pca(iset=iset)
if xeof is None:
raw_eof = self._pca_raw_eof.has_key(iset)
else:
stack = self._core_stack_(xeof, dnorms=self._stack_info[0]['norms'],
dmasks=self._stack_info[0]['masks'], dorders=self._stack_info[0]['orders'],
dweights=self._stack_info[0]['weights'])
raw_eof = stack['pdata']
@_filldocs_
def pca_ev(self,iset=None,relative=False,sum=False,cumsum=False,**kwargs):
"""Get eigen values from current PCA decomposition
:Parameters:
%(relative)s
%(sum)s
%(cumsum)s
%(iset)s
:PCA parameters:
%(npca)s
:Returns:
Arrays with shape ``(npca,)`` or a float
"""
# Check on which dataset to operate
isets = self._check_isets_(iset)
# Update params
self._update_('pca', **kwargs)
# Loop on dataset
res = {}
for iset in isets:
# First PCA analysis?
if not self._pca_raw_eof.has_key(iset): self.pca(iset=iset)
# We only want the sum
if sum:
res[iset] = self._pca_ev_sum[iset]
continue
# Data
ev = self._pca_raw_ev[iset][:self._npca[iset]]
if cumsum:
ev = raw_ev.cumsum()
if relative:
ev = 100.*ev/self._pca_ev_sum[iset]
# Format the variable
if self._cdat_inside_(iset):
id = 'pca_ev'
long_name = []
if cumsum:
id += '_cumsum'
long_name.append('cumulative')
if relative:
id += '_rel'
long_name.append('relative')
ev = cdms2.createVariable(ev)
ev.id = ev.name = id
long_name.append('PCA eigen values')
ev.long_name = ' '.join(long_name).title()+' of '
ev.setAxisList([self._mode_axis_('pca',iset)])
ev.standard_name = 'eigen_values_of_pca'
atts = self._stack_info[iset]['atts'][0]
if self._ndata[iset] == 1 and atts.has_key('long_name'):
ev.long_name += atts['long_name']
else:
ev.long_name += 'dataset %i'%iset
if relative:
ev.units = '% of total variance'
elif (self._ndata[iset] == 1 or npy.allclose(self.norms(iset), 1.)) and atts.has_key('units'):
ev.units = atts['units']
for ss in ['^','**',' ']:
if ev.units.find(ss) != -1:
ev.units = '(%s)^2' % ev.units
break
res[iset] = ev
return self._demap_(res, grouped=True)
@_filldocs_
def pca_rec(self, iset=None, modes=None, raw=False, **kwargs):
"""Reconstruct a set of modes from PCA decomposition
:Parameters:
%(modes)s
%(raw)s
%(iset)s
:PCA parameters:
%(npca)s
:Returns:
Arrays with the same shape as input arrays.
"""
# Check on which dataset to operate
isets = self._check_isets_(iset)
# Update params
self._update_('pca', **kwargs)
# Loop on datasets
pca_fmt_rec = {}
for iset in isets:
# First PCA analysis?
if not self._pca_raw_pc.has_key(iset): self.pca(iset=iset)
# Get raw data back to physical space
reof = self._pca_raw_eof[iset][:,:self._npca[iset]]
rpc = self._pca_raw_pc[iset][:,:self._npca[iset]]
raw_rec,smodes = self._project_(reof,rpc,iset,modes)
#FIXME: add raw for pca_rec
pca_fmt_rec[iset] = self._unstack_(iset,raw_rec,self._time_axis_(iset))
del raw_rec
# Set | |
<reponame>CommanderStorm/jumpcutter<filename>jumpcutter.py
import argparse
import glob
import logging
import math
import os
import re
import subprocess
from multiprocessing import Process
from shutil import copyfile, rmtree
import numpy as np
from audiotsm import phasevocoder
from audiotsm.io.wav import WavReader, WavWriter
from pytube import YouTube
from scipy.io import wavfile
from Gui import jumpcutterGui as Gui
PROJECT_ROOT = os.path.normpath(os.path.join(__file__, '..', '..'))
TEMP_FOLDER = os.path.join(PROJECT_ROOT, "TEMP")
TEMP_TEMP_FOLDER = os.path.join(TEMP_FOLDER, "temp")
# _____ __ __
# / | / | / |
# $$$$$ | __ __ _____ ____ ______ _______ __ __ _$$ |_ _$$ |_ ______ ______
# $$ |/ | / |/ \/ \ / \ / |/ | / |/ $$ |/ $$ | / \ / \
# __ $$ |$$ | $$ |$$$$$$ $$$$ |/$$$$$$ |/$$$$$$$/ $$ | $$ |$$$$$$/ $$$$$$/ /$$$$$$ |/$$$$$$ |
# / | $$ |$$ | $$ |$$ | $$ | $$ |$$ | $$ |$$ | $$ | $$ | $$ | __ $$ | __ $$ $$ |$$ | $$/
# $$ \__$$ |$$ \__$$ |$$ | $$ | $$ |$$ |__$$ |$$ \_____ $$ \__$$ | $$ |/ |$$ |/ |$$$$$$$$/ $$ |
# $$ $$/ $$ $$/ $$ | $$ | $$ |$$ $$/ $$ |$$ $$/ $$ $$/ $$ $$/ $$ |$$ |
# $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/ $$$$$$$/ $$$$$$/ $$$$/ $$$$/ $$$$$$$/ $$/
# $$ |
# __ __ ______ $$ | __ __
# / |/ | / \ $$/ / |/ |
# $$ |$$ |/$$$$$$ | ______ ______ ______ $$ |$$ |
# $$ |$$ |$$ | $$/ / \ / \ / \ $$ |$$ |
# $$/ $$/ $$ | /$$$$$$ |/$$$$$$ |/$$$$$$ |$$/ $$/
# $$ | __ $$ | $$ |$$ | $$/ $$ $$ |
# $$ \__/ |$$ \__$$ |$$ | $$$$$$$$/
# $$ $$/ $$ $$/ $$ | $$ |
# $$$$$$/ $$$$$$/ $$/ $$$$$$$/
# ______ __ ______ ______
# / \ / | / \ / \
# /$$$$$$ |_$$ |_ __ __ /$$$$$$ |/$$$$$$ |
# $$ \__$$// $$ | / | / |$$ |_ $$/ $$ |_ $$/
# $$ \$$$$$$/ $$ | $$ |$$ | $$ |
# $$$$$$ | $$ | __ $$ | $$ |$$$$/ $$$$/
# / \__$$ | $$ |/ |$$ \__$$ |$$ | $$ |
# $$ $$/ $$ $$/ $$ $$/ $$ | $$ |
# $$$$$$/ $$$$/ $$$$$$/ $$/ $$/
#
def get_max_volume(s):
max_volume = float(np.max(s))
min_volume = float(np.min(s))
return max(max_volume, -min_volume)
def copy_frame(input_frame, output_frame):
global TEMP_FOLDER, TEMP_TEMP_FOLDER
# todo is +1 correct?
src = os.path.join(TEMP_TEMP_FOLDER, "frame{:06d}.jpg".format(input_frame + 1))
dst = os.path.join(TEMP_FOLDER, "newFrame{:06d}.jpg".format(output_frame + 1))
if not os.path.isfile(src):
return False
copyfile(src, dst)
if output_frame % 500 == 0:
print(str(output_frame) + " time-altered frames saved.")
return True
def input_to_output_filename(filename):
dot_index = filename.rfind(".")
return filename[:dot_index] + "_ALTERED" + filename[dot_index:]
def create_path(file_path):
# assert (not os.path.exists(file_path)), "The filepath "+file_path+" already exists. Don"t want to overwrite it.
# Aborting."
try:
os.mkdir(file_path)
except OSError:
assert False, "Creation of the directory %s failed. (The TEMP folder may already exist. Delete or rename it, " \
"and try again.) "
def delete_path(file_path): # Dangerous! Watch out!
try:
rmtree(file_path, ignore_errors=False)
except OSError:
print("Deletion of the directory %s failed" % file_path)
print(OSError)
def download_file(url):
name = YouTube(url).streams.first().download()
newname = name.replace(" ", "_")
os.rename(name, newname)
return newname
def count_mp4_files_in_folder(input_path: str):
return len(glob.glob1(input_path, "*.mp4"))
def call_subprocess(command: str, shell: bool = False, stdout: str = None):
timer_start = time.time()
if stdout is not None:
with open(stdout, "w+") as parameter_file:
subprocess.call(command, shell=shell, stdout=parameter_file)
else:
subprocess.call(command, shell=shell)
timer_end = time.time() - timer_start
print(f"{timer_end}s: {command}")
def process(output_file: str, silent_threshold: float, new_speed: list, frame_spreadage: int,
sample_rate: float, frame_rate: float, frame_quality: int, input_file: str):
global TEMP_FOLDER, TEMP_TEMP_FOLDER
assert input_file is not None, "why u put no input file, that dum"
if len(output_file) < 1:
output_file = input_to_output_filename(input_file)
# smooth out transitiion"s audio by quickly fading in/out (arbitrary magic number whatever)
audio_fade_envelope_size = 400
# path creation
create_path(TEMP_FOLDER)
create_path(TEMP_TEMP_FOLDER)
# path constants Temp Temp
tmp_paramsfile_path = os.path.join(TEMP_TEMP_FOLDER, "params.txt")
tmp_audiofile_path = os.path.join(TEMP_TEMP_FOLDER, "audio.wav")
tmp_frame_namingpattern = os.path.join(TEMP_TEMP_FOLDER, "frame%06d.jpg")
tmp_wav_start_file = os.path.join(TEMP_TEMP_FOLDER, "tempStart.wav")
tmp_wav_end_file = os.path.join(TEMP_TEMP_FOLDER, "tempEnd.wav")
# path constants Temp
tmp_newaudiofile_path = os.path.join(TEMP_FOLDER, 'audioNew.wav')
tmp_newframe_namingpattern = os.path.join(TEMP_TEMP_FOLDER, "newFrame%06d.jpg")
LOG.critical(f"------------START OF JUMPCUT [{input_file}]--------------")
picture_seperation_process = generate_picture_separation_process(frame_quality, input_file, tmp_frame_namingpattern)
LOG.warning("picture_seperation_process was started")
audio_data, audio_sample_count, max_audio_volume, sample_rate = generate_audioinfo(input_file, sample_rate,
tmp_audiofile_path)
frame_rate = infer_framerate(frame_rate, tmp_paramsfile_path)
samples_per_frame = sample_rate / frame_rate
audio_frame_count: int = int(math.ceil(audio_sample_count / samples_per_frame))
has_loud_audio = generate_has_loud_audio(audio_frame_count, audio_data, audio_sample_count, max_audio_volume,
samples_per_frame, silent_threshold)
chunks = generate_chunks(audio_frame_count, frame_spreadage, has_loud_audio)
output_audio_data = np.zeros((0, audio_data.shape[1]))
output_pointer = 0
last_existing_frame = None
LOG.warning("waiting on picture_seperation_process")
picture_seperation_process.join()
LOG.warning("picture_seperation_process joined with main process")
for chunk in chunks:
audio_chunk = audio_data[int(chunk[0] * samples_per_frame):int(chunk[1] * samples_per_frame)]
wavfile.write(tmp_wav_start_file, sample_rate, audio_chunk)
with WavReader(tmp_wav_start_file) as reader:
with WavWriter(tmp_wav_end_file, reader.channels, reader.samplerate) as writer:
tsm = phasevocoder(reader.channels, speed=new_speed[int(chunk[2])])
tsm.run(reader, writer)
_, altered_audio_data = wavfile.read(tmp_wav_end_file)
leng = altered_audio_data.shape[0]
end_pointer = output_pointer + leng
output_audio_data = np.concatenate((output_audio_data, altered_audio_data / max_audio_volume))
# todo unpack previous for into chunk.id,output_pointer,leng,end_pointer,output_audio_data
# output_audio_data[output_pointer:end_pointer] = altered_audio_data/max_audio_volume
# smooth out transition's audio by quickly fading in/out
if leng < audio_fade_envelope_size:
output_audio_data[output_pointer:end_pointer] = 0 # audio is less than 0.01 sec, let"s just remove it.
else:
premask = np.arange(audio_fade_envelope_size) / audio_fade_envelope_size
mask = np.repeat(premask[:, np.newaxis], 2, axis=1) # make the fade-envelope mask stereo
output_audio_data[output_pointer:output_pointer + audio_fade_envelope_size] *= mask
output_audio_data[end_pointer - audio_fade_envelope_size:end_pointer] *= 1 - mask
start_output_frame = int(math.ceil(output_pointer / samples_per_frame))
end_output_frame = int(math.ceil(end_pointer / samples_per_frame))
for outputFrame in range(start_output_frame, end_output_frame):
input_frame = int(chunk[0] + new_speed[int(chunk[2])] * (outputFrame - start_output_frame))
if copy_frame(input_frame, outputFrame):
last_existing_frame = input_frame
else:
copy_frame(last_existing_frame, outputFrame)
output_pointer = end_pointer
timer_end = time.time() - timer_start
print(f"Process chunks took {timer_end} s ")
timerwav = time.time()
wavfile.write(TEMP_FOLDER + "/audioNew.wav", sample_rate, output_audio_data)
"""
outputFrame = math.ceil(output_pointer/samples_per_frame)
for endGap in range(outputFrame,audio_frame_count):
copy_frame(int(audio_sample_count/samples_per_frame)-1,endGap)
"""
timer_wav = time.time() - timerwav
print(f"Process wavfile took {timer_wav} s ")
command = f"ffmpeg -thread_queue_size {6000} -hide_banner -loglevel warning -stats -y " \
f"-framerate {str(frame_rate)} " \
f"-i {tmp_newframe_namingpattern} -ac 2 -i {tmp_newaudiofile_path} -framerate {str(frame_rate)} " \
f"-c:v libx264 -preset fast -crf 28 -pix_fmt yuvj420p " \
f"{output_file}"
deletion_thread = Process(target=delete_path, args=(TEMP_TEMP_FOLDER,))
deletion_thread.start()
print("\n$> ", command)
timer_cogent = time.time()
subprocess.call(command, shell=True)
timer_cogent = time.time() - timer_cogent
print(f"Process command took {timer_cogent} s ")
deletion_thread.join()
delete_path(TEMP_FOLDER)
LOG.critical(f"end of jumpcut")
def combine_video_audio(frame_rate, output_file, tmp_newaudiofile_path, tmp_newframe_namingpattern):
command = f"ffmpeg -thread_queue_size {6000} -hide_banner -loglevel warning -stats -y " \
f"-framerate {str(frame_rate)} " \
f"-i {tmp_newframe_namingpattern} -ac 2 -i {tmp_newaudiofile_path} -framerate {str(frame_rate)} " \
f"-c:v libx264 -preset fast -crf 28 -pix_fmt yuvj420p " \
f"{output_file}"
subprocess.call(command, shell=True)
def generate_audioinfo(input_file, sample_rate, tmp_audiofile_path):
# todo if input.mp4 is actually necessarily
command = f'ffmpeg -hide_banner -loglevel warning ' \
f'-i "{input_file}" ' \
f'-ab 160k -ac 2 -ar {str(sample_rate)} ' \
f'-vn "{tmp_audiofile_path}"'
call_subprocess(command, shell=False)
sample_rate, audio_data = wavfile.read(tmp_audiofile_path)
audio_sample_count = audio_data.shape[0]
max_audio_volume = get_max_volume(audio_data)
return audio_data, audio_sample_count, max_audio_volume, sample_rate
def generate_picture_separation_process(frame_quality, input_file, tmp_frame_namingpattern):
command = f'ffmpeg -hide_banner -loglevel warning -stats ' \
f'-i "{input_file}" ' \
f'-qscale:v {str(frame_quality)} ' \
f'"{tmp_frame_namingpattern}"'
picture_seperation_thread = Process(target=call_subprocess, args=(command,))
picture_seperation_thread.start()
return picture_seperation_thread
def generate_has_loud_audio(audio_frame_count, audio_data, audio_sample_count, max_audio_volume,
samples_per_frame, silent_threshold):
has_loud_audio = np.zeros(audio_frame_count)
for audio_frame_iterator in range(audio_frame_count):
start = int(audio_frame_iterator * samples_per_frame)
end = min(int((audio_frame_iterator + 1) * samples_per_frame), audio_sample_count)
audiochunks = audio_data[start:end]
maxchunks_volume = float(get_max_volume(audiochunks)) / max_audio_volume
if maxchunks_volume >= silent_threshold:
has_loud_audio[audio_frame_iterator] = 1
return has_loud_audio
def infer_framerate(frame_rate, tmp_paramsfile_path):
global TEMP_FOLDER
command = f'ffmpeg -hide_banner -loglevel warning ' \
f'-i "{os.path.join(TEMP_FOLDER, "input.mp4")}" ' \
f'2>&1'
call_subprocess(command, shell=False, stdout=tmp_paramsfile_path)
with open(tmp_paramsfile_path, "r") as parameter_file:
for line in parameter_file.readlines():
m = re.search(r"Stream #.*Video.* ([0-9]*) fps", line)
if m is not None:
frame_rate = float(m.group(1))
# todo break for here?
return frame_rate
def generate_chunks(audio_frame_count, frame_spreadage, has_loud_audio):
should_include_frame = np.zeros(audio_frame_count)
chunks = [[0, 0, 0]]
for audio_frame_iterator in range(audio_frame_count):
start = int(max(0, audio_frame_iterator - frame_spreadage))
end = int(min(audio_frame_count, audio_frame_iterator + 1 + frame_spreadage))
should_include_frame[audio_frame_iterator] = np.max(has_loud_audio[start:end])
if audio_frame_iterator >= 1 and \
should_include_frame[audio_frame_iterator] != should_include_frame[audio_frame_iterator - 1]:
# Did we flip?
chunks.append([chunks[-1][1], audio_frame_iterator, should_include_frame[audio_frame_iterator - 1]])
chunks.append([chunks[-1][1], audio_frame_count, should_include_frame[audio_frame_iterator - 1]])
chunks = chunks[1:]
return chunks
def process_folder(output_dir: str, silent_threshold: float, new_speed: list, frame_spreadage: int,
sample_rate: float, frame_rate: float, frame_quality: int, input_path: str):
try:
number_of_files = count_mp4_files_in_folder(input_path)
except IOError:
print("something went wrong when trying to access the '%s' - Folder" % input_path)
return
if number_of_files > 0:
print("\n\nInput-Source is the | |
<reponame>justinbois/eqtk<filename>eqtk/parsers.py
import warnings
import numpy as np
import pandas as pd
from . import constants
def parse_rxns(rxns):
"""
Parse reactions inputted as multiline strings to a stoichiometric
matrix.
Parameters
----------
rxns : str
String expressing chemical reactions. The syntax is similar to
that of Cantera (http://cantera.org). Specifically:
- The chemical equality operator is defined by ``<=>`` or ``⇌``
and must be preceded and followed by whitespace.
- The chemical ``+`` operator must be preceded and followed by
whitespace.
- Stoichiometric coefficients are followed by a space.
- Each chemical reaction appears on its own line.
Returns
-------
output : Pandas DataFrame
DataFrame with column names given by the chemical species.
Each row of the data frame represents the stoichiometric
coefficients of a reaction. If equilibrium constants are given
in the input, a column `"equilibrium constant"` is also included
in the output, giving the equilibrium constant for the
respective reaction.
Examples
--------
>>> import eqtk
>>> rxns = '''
... L + A <=> LA ; 1.2
... L + B <=> LB ; 3e-7
... A + B <=> AB ; 0.005
... LB + A <=> LAB ; 2'''
>>> eqtk.parse_rxns(rxns)
L A LA B LB AB LAB equilibrium constant
0 -1.0 -1.0 1.0 0.0 0.0 0.0 0.0 1.200000e+00
1 -1.0 0.0 0.0 -1.0 1.0 0.0 0.0 3.000000e-07
2 0.0 -1.0 0.0 -1.0 0.0 1.0 0.0 5.000000e-03
3 0.0 -1.0 0.0 0.0 -1.0 0.0 1.0 2.000000e+00
>>> import eqtk
>>> rxns = '''
... AB <=> A + B ; 0.015
... AC <=> A + C ; 0.003
... AA <=> 2 A ; 0.02'''
>>> eqtk.parse_rxns(rxns)
AB A B AC C AA equilibrium constant
0 -1.0 1.0 1.0 0.0 0.0 0.0 0.015
1 0.0 1.0 0.0 -1.0 1.0 0.0 0.003
2 0.0 2.0 0.0 0.0 0.0 -1.0 0.020
"""
rxn_list = rxns.splitlines()
N_dict_list = []
K_list = []
for rxn in rxn_list:
if rxn.strip() != "":
N_dict, K = _parse_rxn(rxn)
N_dict_list.append(N_dict)
K_list.append(K)
# Make sure K's are specified for all or none
if not (all([K is None for K in K_list]) or all([K is not None for K in K_list])):
raise ValueError(
"Either all or none of the equilibrium constants must be specified."
)
# Unique chemical species
species = []
for N_dict in N_dict_list:
for compound in N_dict:
if compound not in species:
species.append(compound)
# Build stoichiometric matrix
N = np.zeros((len(N_dict_list), len(species)), dtype=float)
for r, N_dict in enumerate(N_dict_list):
for compound, coeff in N_dict.items():
N[r, species.index(compound)] = coeff
# Convert stoichiometic matrix and K to DataFrame
N = pd.DataFrame(data=N, columns=species)
if K_list[0] is not None:
N["equilibrium constant"] = np.array(K_list, dtype=float)
return N
def parse_input(
c0,
N=None,
K=None,
logK=None,
A=None,
G=None,
names=None,
units=None,
solvent_density=None,
T=293.15,
G_units=None,
):
"""Prepare input for use in low-level interface.
Parameters
----------
c0 : array_like, dict, Series, or DataFrame, shape (n_points, n_compounds) or (n_compounds, )
Each row contains the total "initial" concentration of all
possible chemical species in solution. The equilibrium
concentration of all species is computed for each row in `c0`.
`c0[i, j]` is the initial concentration of compound `j` for
calculation `i`. `c0` may also be passed as a Pandas Series
where the indices contain the name of the chemical species and
each value is the "initial concentration." `c0` may also be
passed as a Pandas DataFrame where each row contains the total
"initial" concentration of all possible compounds in solution
and the column names are the names of the chemical species. If
`c0` is passed as a dict, the dict must be convertible to a
Pandas Series or DataFrame as `pd.Series(c0)` or
`pd.DataFrame(c0)`.
N : array_like or DataFrame, default `None`
Stoichiometic matrix. `N[r, j]` = the stoichiometric coefficient
of compound `j` in chemical reaction `r`. All rows of `N` must
be linearly independent. If entered as a DataFrame, the name of
chemical species `j` is `N.columns[j]`. Optionally, column
`'equilibrium constant'` contains the equilibrium constants for
each reaction in units commensurate with those of `c0`. If `N`
is given, `A` and `G` cannot be given.
K : array_like, shape (n_reactions,), default `None`
`K[r]` is the equilibrium constant for chemical reaction r in
units commensurate with those of `c0`. If `N` is given as a
DataFrame with an `'equilibrium constant'` column, `K` should
not be supplied. If `K`is given, `A` and `G` cannot be given.
logK : array_like, shape (n_reactions,), default `None`
`logK[r]` is the natural logarithm of the equilibrium constant
for chemical reaction r. If `logK` is specified, the
concentrations must all be dimensionless (`units=None`). If `N`
is given as a DataFrame with a `'log equilibrium constant'`
column, `logK` should not be supplied. If `K` is given, `A`,
`G`, and `K` cannot be given.
A : array_like or DataFrame, n_compounds columns
Constraint matrix. If `c` is the output, then `A @ c0 = A @ c`.
All entries must be nonnegative and the rows of `A` must be
linearly independent. If entered as a DataFrame, the name of
chemical species `j` is `A.columns[j]`. If `A` is given, `G`
must be given, and `N` and `K` cannot be given.
G : array_like, shape (n_compounds, ), default `None`
`G[j]` is the free energy of chemical species `j` in units
specified by `G_units`. If `G` is given, `A` must be given, and
`N` and `K` cannot be given.
names : list or tuple of str, default `None`, optional
The names of the chemical species. Names are inferred if `N` or
`A` is given as a DataFrame, in which case `names` is
unnecessary.
units : string or `None`, default `None`
The units of the concentrations inputted as `c0`. The output is
also in these units. Allowable values are {`None`,
'mole fraction', 'molar', 'M', 'millimolar', 'mM', 'micromolar',
'uM', 'µM', 'nanomolar', 'nM', 'picomolar', 'pM'}. If `None`,
concentrations are considered to be dimensionless. The
equilibrium constants given by `K` must have corresponding
units.
G_units : string, default `None`
Units in which free energy is given. If `None` or `'kT'`, the
free energies are specified in units of of the thermal energy
kT. Allowable values are {None, 'kT', kcal/mol', 'J', 'J/mol',
'kJ/mol', 'pN-nm'}.
solvent_density : float, default `None`
The density of the solvent in units commensurate with the
`units` keyword argument. Default (`None`) assumes the solvent
is water, and its density is computed at the temperature
specified by the `T` keyword argument.
T : float, default = 293.15
Temperature, in Kelvin, of the solution. When `N` and `K` are
given, `T` is ignored if `solvent_density` is given or if
`units` is `None`. If `A` and `G` are given, `T` is ignored when
`units` and `G_units` are both `None`.
Returns
-------
x0 : Numpy array, dtype float, shape (n_points, n_compounds)
Initial concentrations in dimensionless units. If `units` is
not `None`, then the `x0` is a mole fraction.
N : Numpy array, dtype float, shape (n_reactions, n_compounds)
The stoichiometric matrix. Returned as `None` if inputted `N` is
`None`.
logK : Numpy array, dtype float, shape (n_reactions,)
The natural logarithm of the dimensionless equilibrium
constants. Returned as `None` if inputted `G` is `None`.
A : Numpy array, dtype float, shape (n_conserv_laws, n_compounds)
The conservation matrix. Returned as `None` if inputted `A` is
`None`.
G : Numpy array, dtype float, shape (n_compounds,)
The free energies of the chemical species. Returned as `None` if
inputted `G` is `None`.
names : list of strings, len n_compounds
Names of chemical species. If inputted `names` is `None` or if
the names of the species cannot be inferred from any other
input, returned as `None`.
solvent_density : float
The density of the solvent.
single_point : bool
True if only one calculation is to be computed (n_points == 1).
False otherwise.
"""
if type(N) == str:
N = parse_rxns(N)
_check_units(units)
_check_NK_AG(N, K, logK, A, G, units)
_check_G_units(G_units, T)
_check_T(T)
_check_solvent_density(solvent_density, units)
_check_names_type(names)
solvent_density = _parse_solvent_density(solvent_density, T, units)
| |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import glob
import ast
import json
import time
import matplotlib.pyplot as plt
import matplotlib
from PIL import Image
import cv2
from ..datasets import GOT10k
from ..utils.metrics import rect_iou
from ..utils.viz import show_frame
from ..utils.ioutils import compress
class ExperimentGOT10k(object):
r"""Experiment pipeline and evaluation toolkit for GOT-10k dataset.
Args:
root_dir (string): Root directory of GOT-10k dataset where
``train``, ``val`` and ``test`` folders exist.
subset (string): Specify ``train``, ``val`` or ``test``
subset of GOT-10k.
list_file (string, optional): If provided, only run experiments on
sequences specified by this file.
result_dir (string, optional): Directory for storing tracking
results. Default is ``./results``.
report_dir (string, optional): Directory for storing performance
evaluation results. Default is ``./reports``.
"""
def __init__(self, root_dir, subset='val', list_file=None,
result_dir='results', report_dir='reports', use_dataset=True, start_idx=0, end_idx=None):
super(ExperimentGOT10k, self).__init__()
assert subset in ['val', 'test']
self.subset = subset
if use_dataset:
self.dataset = GOT10k(
root_dir, subset=subset, list_file=list_file)
self.result_dir = os.path.join(result_dir, 'GOT-10k')
self.report_dir = os.path.join(report_dir, 'GOT-10k')
self.nbins_iou = 101
self.repetitions = 3
self.start_idx = start_idx
self.end_idx = end_idx
def run(self, tracker, visualize=False, save_video=False):
if self.subset == 'test':
print('\033[93m[WARNING]:\n' \
'The groundtruths of GOT-10k\'s test set is withholded.\n' \
'You will have to submit your results to\n' \
'[http://got-10k.aitestunion.com/]' \
'\nto access the performance.\033[0m')
time.sleep(2)
print('Running tracker %s on GOT-10k...' % tracker.name)
self.dataset.return_meta = False
end_idx = self.end_idx
if end_idx is None:
end_idx = len(self.dataset)
for s in range(self.start_idx, end_idx):
img_files, anno = self.dataset[s]
seq_name = self.dataset.seq_names[s]
print('--Sequence %d/%d: %s' % (
s + 1, len(self.dataset), seq_name))
# run multiple repetitions for each sequence
for r in range(self.repetitions):
# check if the tracker is deterministic
if r > 0 and tracker.is_deterministic:
break
elif r == 3 and self._check_deterministic(
tracker.name, seq_name):
print(' Detected a deterministic tracker, ' +
'skipping remaining trials.')
break
print(' Repetition: %d' % (r + 1))
# skip if results exist
record_file = os.path.join(
self.result_dir, tracker.name, seq_name,
'%s_%03d.txt' % (seq_name, r + 1))
if os.path.exists(record_file):
print(' Found results, skipping', seq_name)
continue
if hasattr(tracker, 'set_video_name'):
tracker.set_video_name(seq_name)
# tracking loop
boxes, times = tracker.track(
img_files, anno[0, :], visualize=visualize)
if hasattr(tracker, 'set_video_name'):
tracker.set_video_name(None)
# record results
self._record(record_file, boxes, times)
# save videos
if save_video:
video_dir = os.path.join(os.path.dirname(os.path.dirname(self.result_dir)),
'videos', 'GOT-10k', tracker.name)
video_file = os.path.join(video_dir, '%s.avi' % seq_name)
if not os.path.isdir(video_dir):
os.makedirs(video_dir)
image = Image.open(img_files[0])
img_W, img_H = image.size
out_video = cv2.VideoWriter(video_file, cv2.VideoWriter_fourcc(*'MJPG'), 10, (img_W, img_H))
for ith, (img_file, pred) in enumerate(zip(img_files, boxes)):
image = Image.open(img_file)
if not image.mode == 'RGB':
image = image.convert('RGB')
img = np.array(image)[:, :, ::-1].copy()
pred = pred.astype(int)
cv2.rectangle(img, (pred[0], pred[1]), (pred[0] + pred[2], pred[1] + pred[3]), self.color['pred'], 2)
if ith < anno.shape[0]:
gt = anno[ith].astype(int)
cv2.rectangle(img, (gt[0], gt[1]), (gt[0] + gt[2], gt[1] + gt[3]), self.color['gt'], 2)
out_video.write(img)
out_video.release()
print(' Videos saved at', video_file)
def report(self, tracker_names, plot_curves=True):
assert isinstance(tracker_names, (list, tuple))
if self.subset == 'test':
pwd = os.getcwd()
# generate compressed submission file for each tracker
for tracker_name in tracker_names:
# compress all tracking results
result_dir = os.path.join(self.result_dir, tracker_name)
os.chdir(result_dir)
save_file = '../%s' % tracker_name
compress('.', save_file)
print('Records saved at', save_file + '.zip')
# print submission guides
print('\033[93mLogin and follow instructions on')
print('http://got-10k.aitestunion.com/submit_instructions')
print('to upload and evaluate your tracking results\033[0m')
# switch back to previous working directory
os.chdir(pwd)
return None
elif self.subset == 'val':
# meta information is useful when evaluation
self.dataset.return_meta = True
# assume tracker_names[0] is your tracker
report_dir = os.path.join(self.report_dir, tracker_names[0])
if not os.path.exists(report_dir):
os.makedirs(report_dir)
report_file = os.path.join(report_dir, 'performance.json')
# visible ratios of all sequences
seq_names = self.dataset.seq_names
covers = {s: self.dataset[s][2]['cover'][1:] for s in seq_names}
performance = {}
for name in tracker_names:
print('Evaluating', name)
ious = {}
times = {}
performance.update({name: {
'overall': {},
'seq_wise': {}}})
for s, (_, anno, meta) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
record_files = glob.glob(os.path.join(
self.result_dir, name, seq_name,
'%s_[0-9]*.txt' % seq_name))
if len(record_files) == 0:
raise Exception('Results for sequence %s not found.' % seq_name)
# read results of all repetitions
boxes = [np.loadtxt(f, delimiter=',') for f in record_files]
assert all([b.shape == anno.shape for b in boxes])
# calculate and stack all ious
bound = ast.literal_eval(meta['resolution'])
seq_ious = [rect_iou(b[1:], anno[1:], bound=bound) for b in boxes]
# only consider valid frames where targets are visible
seq_ious = [t[covers[seq_name] > 0] for t in seq_ious]
seq_ious = np.concatenate(seq_ious)
ious[seq_name] = seq_ious
# stack all tracking times
times[seq_name] = []
time_file = os.path.join(
self.result_dir, name, seq_name,
'%s_time.txt' % seq_name)
if os.path.exists(time_file):
seq_times = np.loadtxt(time_file, delimiter=',')
seq_times = seq_times[~np.isnan(seq_times)]
seq_times = seq_times[seq_times > 0]
if len(seq_times) > 0:
times[seq_name] = seq_times
# store sequence-wise performance
ao, sr, speed, _ = self._evaluate(seq_ious, seq_times)
performance[name]['seq_wise'].update({seq_name: {
'ao': ao,
'sr': sr,
'speed_fps': speed,
'length': len(anno) - 1}})
ious = np.concatenate(list(ious.values()))
times = np.concatenate(list(times.values()))
# store overall performance
ao, sr, speed, succ_curve = self._evaluate(ious, times)
performance[name].update({'overall': {
'ao': ao,
'sr': sr,
'speed_fps': speed,
'succ_curve': succ_curve.tolist()}})
# save performance
with open(report_file, 'w') as f:
json.dump(performance, f, indent=4)
# plot success curves
if plot_curves:
self.plot_curves([report_file], tracker_names)
return performance
def show(self, tracker_names, seq_names=None, play_speed=1):
if seq_names is None:
seq_names = self.dataset.seq_names
elif isinstance(seq_names, str):
seq_names = [seq_names]
assert isinstance(tracker_names, (list, tuple))
assert isinstance(seq_names, (list, tuple))
play_speed = int(round(play_speed))
assert play_speed > 0
self.dataset.return_meta = False
for s, seq_name in enumerate(seq_names):
print('[%d/%d] Showing results on %s...' % (
s + 1, len(seq_names), seq_name))
# load all tracking results
records = {}
for name in tracker_names:
record_file = os.path.join(
self.result_dir, name, seq_name,
'%s_001.txt' % seq_name)
records[name] = np.loadtxt(record_file, delimiter=',')
# loop over the sequence and display results
img_files, anno = self.dataset[seq_name]
for f, img_file in enumerate(img_files):
if not f % play_speed == 0:
continue
image = Image.open(img_file)
boxes = [anno[f]] + [
records[name][f] for name in tracker_names]
show_frame(image, boxes,
legends=['GroundTruth'] + tracker_names,
colors=['w', 'r', 'g', 'b', 'c', 'm', 'y',
'orange', 'purple', 'brown', 'pink'])
def _record(self, record_file, boxes, times):
# record bounding boxes
record_dir = os.path.dirname(record_file)
if not os.path.isdir(record_dir):
os.makedirs(record_dir)
np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
while not os.path.exists(record_file):
print('warning: recording failed, retrying...')
np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
print(' Results recorded at', record_file)
# record running times
time_file = record_file[:record_file.rfind('_')] + '_time.txt'
times = times[:, np.newaxis]
if os.path.exists(time_file):
exist_times = np.loadtxt(time_file, delimiter=',')
if exist_times.ndim == 1:
exist_times = exist_times[:, np.newaxis]
times = np.concatenate((exist_times, times), axis=1)
np.savetxt(time_file, times, fmt='%.8f', delimiter=',')
def _check_deterministic(self, tracker_name, seq_name):
record_dir = os.path.join(
self.result_dir, tracker_name, seq_name)
record_files = sorted(glob.glob(os.path.join(
record_dir, '%s_[0-9]*.txt' % seq_name)))
if len(record_files) < 3:
return False
records = []
for record_file in record_files:
with open(record_file, 'r') as f:
records.append(f.read())
return len(set(records)) == 1
def _evaluate(self, ious, times):
# AO, SR and tracking speed
ao = np.mean(ious)
sr = np.mean(ious > 0.5)
if len(times) > 0:
# times has to be an array of positive values
speed_fps = np.mean(1. / times)
else:
speed_fps = -1
# success curve
# thr_iou = np.linspace(0, 1, 101)
thr_iou = np.linspace(0, 1, self.nbins_iou)
bin_iou = np.greater(ious[:, None], thr_iou[None, :])
succ_curve = np.mean(bin_iou, axis=0)
return ao, sr, speed_fps, succ_curve
def plot_curves(self, report_files, tracker_names, extension='.png'):
assert isinstance(report_files, list), \
'Expected "report_files" to be a list, ' \
'but got %s instead' % type(report_files)
# assume tracker_names[0] is your tracker
report_dir = os.path.join(self.report_dir, tracker_names[0])
if not os.path.exists(report_dir):
os.makedirs(report_dir)
performance = {}
for report_file in report_files:
with open(report_file) as f:
performance.update(json.load(f))
succ_file = os.path.join(report_dir, 'success_plot'+extension)
key = 'overall'
# filter performance by tracker_names
performance = {k:v for k,v in performance.items() if k in tracker_names}
# sort trackers by AO
tracker_names = list(performance.keys())
aos = [t[key]['ao'] for t in performance.values()]
inds = np.argsort(aos)[::-1]
tracker_names = [tracker_names[i] for i in inds]
# markers
markers = ['-', '--', '-.']
markers = [c + m for m in markers for c in [''] * 10]
matplotlib.rcParams.update({'font.size': 19})
# plot success curves
thr_iou = np.linspace(0, 1, self.nbins_iou)
fig, ax = plt.subplots()
lines = []
legends = []
for i, name in enumerate(tracker_names):
line, = ax.plot(thr_iou,
performance[name][key]['succ_curve'],
markers[i % len(markers)],
linewidth=4,
zorder=performance[name][key]['ao'])
lines.append(line)
if name == "Siam R-CNN (ours)":
legends.append('$\\bf{Siam}$ $\\bf{R}$$\\bf{-}$$\\bf{CNN}$: [%.3f]' % performance[name][key]['ao'])
else:
legends.append('%s: [%.3f]' % | |
import sys
import os
import gensim
import re
import random
import csv
import logging
import numpy as np
# import theanets
import json
# from sklearn.metrics import classification_report, confusion_matrix
import operator
from random import random
from gensim.models import KeyedVectors
### Main program
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
path = ''
# d2vFile = "300dimStreamedChemPhrases/model_streamed_topic.w2v"
d2vFile = "doc2vec/samuel_ngrams_model/model_chemdump_ngrams.w2v"
threshold = 6 #7
sim_threshold = 0.5 #0.4
importance = 0 #20
if "-t" in sys.argv:
for i in range(len(sys.argv)):
if sys.argv[i] == "-t":
if (sys.argv[i+1][:sys.argv[i+1].find(".")] + sys.argv[i+1][sys.argv[i+1].find(".")+1:]).isdigit():
threshold = float(sys.argv[i+1])
else:
input(sys.argv[i+1] + " is not a valid number. " + str(threshold) + " used as default. \nPress Enter to confirm. ")
sys.argv = sys.argv[:i] + sys.argv[i+2:]
break
if "-m" in sys.argv:
for i in range(len(sys.argv)):
if sys.argv[i] == "-m":
if (sys.argv[i+1][:sys.argv[i+1].find(".")] + sys.argv[i+1][sys.argv[i+1].find(".")+1:]).isdigit():
sim_threshold = float(sys.argv[i+1])
else:
input(sys.argv[i+1] + " is not a valid number. " + str(sim_threshold) + " used as default. \nPress Enter to confirm. ")
sys.argv = sys.argv[:i] + sys.argv[i+2:]
break
if "-i" in sys.argv:
for i in range(len(sys.argv)):
if sys.argv[i] == "-i":
if (sys.argv[i+1][:sys.argv[i+1].find(".")] + sys.argv[i+1][sys.argv[i+1].find(".")+1:]).isdigit():
importance = float(sys.argv[i+1])
else:
input(sys.argv[i+1] + " is not a valid number. " + str(importance) + " used as default. \nPress Enter to confirm. ")
sys.argv = sys.argv[:i] + sys.argv[i+2:]
break
black = False
if "-black" in sys.argv:
black = True
for i in range(len(sys.argv)):
if sys.argv[i] == "-black":
sys.argv = sys.argv[:i] + sys.argv[i+1:]
break
plain = False
if "-plain" in sys.argv:
plain = True
for i in range(len(sys.argv)):
if sys.argv[i] == "-plain":
sys.argv = sys.argv[:i] + sys.argv[i+1:]
break
if len(sys.argv) == 1:
input("No folder given. Current folder used. \nPress Enter to confirm. ")
elif sys.argv[1] == "-h":
print("folder [doc2vecmodel] [-t 7] [-m 0.4]")
else:
path = sys.argv[1]
if path[-1] != "/":
path += "/"
if len(sys.argv) > 2:
d2vFile = sys.argv[2]
### Main program
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Load word vectors
vectors = KeyedVectors.load(d2vFile, mmap='r')
# print(vectors.most_similar("crops"))
# input("->")
# opening files
thisFileName = path + "section-sentences-restructured-syn-pos.txt"
if not os.path.exists(thisFileName):
print("no synonym pos file found")
thisFileName = path + "section-sentences-restructured-pos.txt"
if not os.path.exists(thisFileName):
print("no restructured pos file found")
thisFileName = path + "section-sentences-pos.txt"
f1 = open(thisFileName)
pos = f1.read()
f1.close()
pos = pos.split("\n")[:-1]
for i in range(len(pos)):
pos[i] = pos[i].split(" ")[:-1]
thisFileName = path + "section-sentences-restructured-syn-tok.txt"
if not os.path.exists(thisFileName):
print("no synonym tok file found")
thisFileName = path + "section-sentences-restructured-tok.txt"
if not os.path.exists(thisFileName):
print("no restructured tok file found")
thisFileName = path + "section-sentences-tok.txt"
f1 = open(thisFileName)
raw = f1.read()
f1.close()
raw = raw.split("\n")[:-1]
for i in range(len(raw)):
raw[i] = raw[i].split(" ")[:-1]
thisFileName = path + "section-sentences-ids.txt"
if not os.path.exists(thisFileName):
thisFileName += ".txt"
f1 = open(thisFileName)
ids = f1.read()
f1.close()
ids = ids.split("\n")[1:-1]
for i in range(len(ids)):
ids[i] = ids[i].split(",")
f1 = open(path + "section-sentences-restructured-meta.tsv")
meta = f1.read()
f1.close()
meta = meta.split("\n")[:-1]
for i in range(len(meta)):
meta[i] = meta[i].split("\t")
# # opening the files with the frist sentences
thisFileName = path + "abstract-sentences-restructured-syn-pos.txt"
if not os.path.exists(thisFileName):
print("no synonym abstract pos file found")
thisFileName = path + "abstract-sentences-restructured-pos.txt"
if not os.path.exists(thisFileName):
print("no abstract restructured pos file found")
thisFileName = path + "abstract-sentences-pos.txt"
if os.path.exists(thisFileName):
f1 = open(thisFileName)
fst_pos = f1.read()
f1.close()
fst_pos = fst_pos.split("\n")[:-1]
for i in range(len(fst_pos)):
fst_pos[i] = fst_pos[i].split(" ")[:-1]
thisFileName = path + "abstract-sentences-restructured-syn-tok.txt"
if not os.path.exists(thisFileName):
print("no synonym abstract tok file found")
thisFileName = path + "abstract-sentences-restructured-tok.txt"
if not os.path.exists(thisFileName):
print("no abstract restructured tok file found")
thisFileName = path + "abstract-sentences-tok.txt"
f1 = open(thisFileName)
fst_raw = f1.read()
f1.close()
fst_raw = fst_raw.split("\n")[:-1]
for i in range(len(fst_raw)):
fst_raw[i] = fst_raw[i].split(" ") #[:-1]
f1 = open(path + "abstract-sentences-ids.txt")
fst_ids = f1.read()
f1.close()
fst_ids = fst_ids.split("\n")[1:-1]
for i in range(len(fst_ids)):
fst_ids[i] = fst_ids[i].split(",")
f1 = open(path + "abstract-sentences-restructured-meta.tsv")
fst_meta = f1.read()
f1.close()
fst_meta = fst_meta.split("\n")[:-1]
for i in range(len(fst_meta)):
fst_meta[i] = fst_meta[i].split("\t")
else:
print("no abstract found at all")
fst_pos = []
fst_raw = []
fst_ids = []
fst_meta = []
# # adding the first sentences to the list of all sentences
# to seperate the first sentences later on
sent_count = len(raw)
raw += fst_raw
pos += fst_pos
ids += fst_ids
meta += fst_meta
# raw = [["lithium"]]
allWords = {}
wholeSize = 0
for s in raw:
for w in s:
wholeSize += 1
if w in allWords:
allWords[w] += 1
else:
allWords[w] = 1
corpusSize = 0
for w in vectors.wv.vocab:
corpusSize += vectors.wv.vocab[w].count
for w in allWords:
if w in vectors.wv.vocab:
# print (w, allWords[w], (allWords[w]/wholeSize)/(vectors.wv.vocab[w].count/corpusSize), sep = "\t")
allWords[w] = (allWords[w]/wholeSize)/(vectors.wv.vocab[w].count/corpusSize)
# sortedWords = sorted(allWords.items(), key=operator.itemgetter(1))
# for w in sortedWords:
# if type(w[1]) == int:
# print(w[0], "\tnot in corpus")
# else:
# print(w[0], "\t", w[1])
tags = {}
sent_tags = {}
for i in range(len(raw)):
sent_tags[i] = {}
imp_sent = range(len(raw))
def addToTag(sentence, word):
if word.isdigit():
return
if (len(word) > 0): # and (word in vectors):
if word in tags:
tags[word][sentence] = True
else:
tags[word] = {sentence: True}
if sentence in sent_tags:
sent_tags[sentence][word] = True
else:
sent_tags[sentence] = {word: True}
for sentence in range(len(pos)):
for word in range(len(pos[sentence])):
if importance:
if (allWords[raw[sentence][word]] > importance) or (type(allWords[raw[sentence][word]]) == int): # 1e-8:
addToTag(sentence, raw[sentence][word])
else:
if (pos[sentence][word].startswith("NN")):
addToTag(sentence, raw[sentence][word])
continue
if ("-" in raw[sentence][word]) and not(raw[sentence][word].endswith("-")):
addToTag(sentence, raw[sentence][word])
continue
if not (raw[sentence][word].endswith("-")):
for x in raw[sentence][word][1:]:
if x.isupper() or x.isdigit():
addToTag(sentence, raw[sentence][word])
break
if (len(pos[sentence])-2 >= word) and ((pos[sentence][word] == "NN") and (pos[sentence][word+1] == "NN")) or ((pos[sentence][word] == "JJ") and (pos[sentence][word+1] == "NN")):
addToTag(sentence, raw[sentence][word] + "_" + raw[sentence][word+1])
# for sentence in range(len(pos)):
# for word in range(len(pos[sentence])):
# if raw[sentence][word] in tags:
# addToTag(sentence, raw[sentence][word])
ord_sent = []
for i in range(len(raw)):
ord_sent.append(i)
def getColor(doc):
doc = int(doc)
r = int(((((575*doc+313)) % 907 ) % 20 + 2)*10)
g = int(((((612*doc+741)) % 1223) % 20 + 3)*10)
b = int(((((754*doc+329)) % 761 ) % 20 + 1)*10)
def fillNeros(x):
while len(x) < 6:
x = "0" + x
return x
return '#' + fillNeros(hex( (r*256 + g)*256 + b )[2:])
neworder = [ord_sent[0]]
# ord_sent = ord_sent[1:]
html = ["<p>"] #+ " ".join(raw[neworder[-1]])
def format(outputLine, maxi):
output = outputLine
# while True:
# # output = output.replace('` <font style="text-decoration: underline">', "`")
# # output = output.replace("</font> </font>", "</font>")
# # output = output.replace('<font style="text-decoration: underline"> <font style="text-decoration: underline">', '<font style="text-decoration: underline">')
# # output = output.replace("'</font> '", "''")
# # print(output)
# if output == outputLine:
# break
# outputLine = output
outputLine = output.replace("` ", "`").replace(" '", "'").replace('underline"> ', 'underline">').replace(" </font>", "</font>")
html = ""
if (not plain) and (maxi >= sent_count):
html += "<b>"
if not black:
html += '<font color="' + getColor(ids[maxi][1]) + '">'
html += outputLine
# html += untermaxi(maxi)
if not black:
html += '''</font> ''' + '<font color="#000000">'
if plain:
html += "<!-- "
if maxi < sent_count:
# print(maxi, ids[maxi])
html += "(sentID:" + ids[maxi][0] + ',doc:' + ids[maxi][1] + ',origSent:' + ids[maxi][3] + ")"
else:
html += '''(doc:''' + str(ids[maxi][1]) + ''',abstract Sentence) '''
if (not plain) and (maxi >= sent_count):
html += "</b>"
if int(meta[maxi][1]) < 11:
html += " -- ok"
else:
html += " -- <i> LCS: " + str(meta[maxi][1]) + "</i>"
if plain:
html += " -->"
html += "<br/>"
return html
outputLast = ""
while ord_sent:
sims = []
tag_counts = []
max_sim = 0
maxi = ord_sent[0]
outputLine = " ".join(raw[ord_sent[0]])
# outputLast = html[-1]
# looking for the most similar sentence to come next
for i in ord_sent:
lastLine = outputLast
thisLine = " ".join(raw[i])
sim = 0
for y in sent_tags[i]: # raw[i]:
for x in sent_tags[neworder[-1]]: # raw[neworder[-1]]:
if (x in vectors) and (y in vectors):
this_sim = vectors.similarity(x, y)
if this_sim >= sim_threshold:
sim += 1 # this_sim #1
if (not plain):
if not x in '<font style="text-decoration: underline">':
if (x.replace("_", " ") + " '") in lastLine:
lastLine = (" " + lastLine).replace(x.replace("_", " ") + " '", x.replace("_", " ") + " ''")
else:
lastLine = (" " + lastLine).replace(" " + x.replace("_", " ") + " ", ' <font style="text-decoration: underline"> ' + x.replace("_", " ") + " '</font> ")
if outputLast and (not y in '<font style="text-decoration: underline">'):
if ("` " + y.replace("_", " ")) in thisLine:
thisLine = (" " + thisLine).replace("` " + y.replace("_", " "), '`` ' + y.replace("_", " "))
else:
thisLine = (" " + thisLine).replace(" " + y.replace("_", " ") + " ", ' <font style="text-decoration: underline">` ' + y.replace("_", " ") + ' </font> ')
# else:
# print(y)
elif x == y:
sim += 1
# bias on fist sentences
if (i > sent_count) and (neworder[-1] <= sent_count):
sim *= 1.2
# and especially when its from the same document
if ids[i][1] == ids[neworder[-1]][1]:
sim *= 1.7
# and especially when its the last sentence from the document
if len([x for x in ord_sent if ids[x][1] == ids[i][1]]) == 1:
sim *= 5
if (sim > max_sim) and (sim > threshold):
max_sim = sim
maxi = i
outputLast = lastLine
outputLine = thisLine
while outputLast.startswith(" "):
outputLast = outputLast[1:]
if (outputLast):
# output +=
html += [format(outputLast, neworder[-1])]
outputLast = outputLine
neworder += [maxi]
ord_sent = [s for s in ord_sent if s != maxi] #ord_sent[:maxi] + ord_sent[maxi+1:]
html += [format(outputLast, neworder[-1])]
# if last_doc != ids[maxi][1]:
# html += "</p><p>\n"
# print (maxi, ord_sent)
# print(max_sim)
# print(" ".join(raw[neworder[-1]]))
html = "\n".join(html)
ord_sent = neworder
def unterline(line):
res = ""
skip = False
for i in range(len(raw[line])):
if skip:
skip = False
continue
if (not plain) and (raw[line][i] in tags):
res += '<font style="text-decoration: underline">' + raw[line][i] + '</font> '
elif (not plain) and (i < len(raw[line])-2) and ((raw[line][i] + "_" + raw[line][i+1]) in tags):
res += '<font style="text-decoration: underline">' + raw[line][i] + " " + raw[line][i+1] + '</font> '
skip = True
else:
res += raw[line][i] + ' '
return res
# html = "<p>\n"
# last_doc = 0
# for line in ord_sent:
# if last_doc != ids[line][1]:
# html += "</p><p>\n"
# if (not plain) and (line | |
as other tables cannot refer to it
for name, table in reversed(cls.__createdTables.items()):
try:
# A table might not use the default connection wrapper
table.__testconnection.execute('DROP TABLE ' + name)
except Exception:
# The exception raised for a missing table is driver dependent
pass
cls.__createdTables.clear()
Variable.clear()
def reset(self):
"""Forcefully create a new table and add the provided rows."""
try:
self.drop()
except Exception:
# The exception raised for a missing table depends on the driver
pass
self.ensure()
def ensure(self):
"""Create the table if it does not exist, otherwise verify the rows.
If the table does exist but does not contain the expected set of
rows an exception is raised to prevent overriding existing data.
"""
if self.__variables:
raise ValueError(self.name + " contains variables")
# Use a hack to check if the table is available in portable manner
try:
self.__testconnection.execute('SELECT 1 FROM ' + self.name)
except Exception:
# The exception raised for a missing table depends on the driver
self.create()
# If the table was drawn without any rows there are none to add
if self.__rows:
self.__testconnection.execute(self.getSQLToInsert())
self.__testconnection.commit()
return
# If the table exists and contain the correct rows we just use it as is
if not self.assertEqual(verbose=False):
raise ValueError(self.name + " contain other rows")
def update(self, index, line):
"""Create a new instance with the row specified by the index
updated with the values included in the provided line.
Arguments:
- index: the index of the row to be updated.
- line: an ASCII representation of the new values.
"""
if index < len(self.__rows):
table = self.__copy()
newRow = table.__row(line, False)
newRow = tuple(map(lambda tp: tp[1] if tp[1] else tp[0],
zip(self.__rows[index], newRow)))
table.__rows[index] = newRow
table.__additions.add(index)
return table
raise ValueError("{} index out of bounds {} > {}".
format(self.name, index, len(self.__rows)))
def additions(self, withKey=False):
"""Return all rows added or updated since the original drawn table.
Arguments:
- withKey: if True the primary keys are included in the rows.
"""
if withKey:
return list(map(lambda i: dict(zip(
self.__columns, self.__rows[i])), self.__additions))
else:
return list(map(lambda i: dict(zip(
self.attributes, self.__rows[i][len(self.__keyrefs):])),
self.__additions))
def drop(self):
"""Drop the table in the database without checking the contents."""
if self.name in type(self).__createdTables:
self.__testconnection.execute('DROP TABLE ' + self.name)
del type(self).__createdTables[self.name]
else:
raise ValueError(self.name + " is not created by a Table instance")
# Private Methods
def __header(self, line):
"""Parse the header of the drawn table."""
keyrefs = []
attributes = []
types = []
localConstraints = []
globalConstraints = []
afterKeyrefs = False
for cs in [c.strip() for c in line.split('|') if c]:
column = cs.split(':')
if len(column) != 2:
raise ValueError("Malformed column definition: " + cs)
name = column[0].strip()
primaryKey = False
# Constraints are parsed first so primary keys can be extracted
columnConstraints = []
startOfConstraints = column[1].find('(')
if startOfConstraints > -1:
line = column[1][startOfConstraints + 1: -1]
for constraint in line.split(','):
constraint = constraint.strip().lower()
if constraint == 'pk':
primaryKey = True
elif constraint == 'unique':
columnConstraints.append('UNIQUE')
elif constraint == 'not null':
columnConstraints.append('NOT NULL')
elif constraint.startswith('fk '):
reference = constraint.split(' ', 1)[1]
globalConstraints.append('FOREIGN KEY (' + name +
') REFERENCES ' + reference)
else:
raise ValueError("Unknown constraint in {} for {}: {}"
.format(self.name, name, constraint))
column[1] = column[1][:startOfConstraints]
localConstraints.append(' '.join(columnConstraints))
# Primary keys must be listed in order and before other attributes
if primaryKey:
keyrefs.append(name)
if afterKeyrefs:
raise ValueError("Primary key after other attributes: {}"
+ line)
else:
attributes.append(name)
afterKeyrefs = True
types.append(column[1].strip())
# Formats both types of constraints for use with generated SQL
localConstraints = list(map(lambda c: ' ' + c if c else c,
localConstraints))
if keyrefs:
globalConstraints.insert(0, 'PRIMARY KEY (' +
', '.join(keyrefs) + ')')
globalConstraints = ', ' + ', '.join(globalConstraints) + ')'
return (keyrefs, attributes, types, localConstraints,
globalConstraints)
def __row(self, line, castempty):
""" Parse a row in the drawn table.
Casting of missing values can be toggled so this method
can parse both full rows, new rows, and rows with updates.
"""
result = []
values = line.strip().split('|')[1:-1]
for index, value in enumerate(values):
result.append(self.__parse(index, value.strip(), castempty))
return tuple(result)
def __parse(self, index, value, castempty):
""" Parse a field in the drawn table.
Casting of missing values can be toggled so this method
can be used when parsing full, new, and updated rows.
"""
if type(value) is Variable:
self.__variables.append(value)
return value
if type(value) is str:
if value.startswith(self.__prefix):
variable = Variable(value, self.__prefix, self.name, len(
self.__rows), index, self.__columns[index])
self.__variables.append(variable)
return variable
baseType = self.__types[index].split('(', 1)[0].lower()
cast = self.__casts[baseType]
if value == self.__nullsubst:
value = None
elif value or castempty:
value = cast(value)
return value
def __compareAndMaybeRaise(self, why, comparison, selfViolations,
dbViolations, shouldRaise, verbose):
"""Compare this table to the table in the database, and raise an
AssertionError if asked by the caller as it already has all of the
required information.
Arguments:
- why: a short description of why the assertion is violated.
- comparison: a function that takes the rows in self and the rows in
the database as input and then computes if the assertion holds.
- selfViolations: a function that takes the rows in self and the
database as input and returns those in self violating the assert.
- dbViolations: a function taking the rows in self and the database
as input and returns those in the database violating the assert.
- shouldRaise: if True the function raises an error if the assertion
does not hold instead of simply returning the value False.
- verbose: if True an ASCII representation of the rows violating the
assertion is printed when an error is raised instead of only why.
"""
# Variables are always resolved to ensure they match the database
for variable in self.__variables:
self.__resolve(variable)
# Sets are used for performance and to simplify having multiple asserts
rowSet = set(self.__rows)
if len(self.__rows) != len(rowSet):
raise ValueError("The '{}' table instance contains duplicate rows"
.format(self.name))
self.__testconnection.execute(
'SELECT {} FROM {}'.format(', '.join(self.__columns), self.name))
dbRows = list(self.__testconnection.fetchalltuples()) # Generator
dbSet = set(dbRows)
if len(dbRows) != len(dbSet):
raise ValueError("The '{}' database table contains duplicate rows"
.format(self.name))
success = comparison(rowSet, dbSet)
if not success and shouldRaise:
if not verbose:
raise AssertionError(why)
else:
selfStr = self.__table2str(rowSet, False)
dbStr = self.__table2str(dbSet, False)
violations = list(selfViolations(rowSet, dbSet)) + \
[()] + list(dbViolations(rowSet, dbSet))
vStr = self.__table2str(violations, True)
raise AssertionError((why +
"\nDrawn Table:\n{}"
"\n\nDatabase Table:\n{}"
"\n\nViolations:\n{}")
.format(selfStr, dbStr, vStr))
return success
def __resolve(self, variable):
"""Ensure the value of the variable is resolved."""
row = filter(lambda t: type(t[1]) is not Variable,
zip(self.__columns, self.__rows[variable.row]))
query = "SELECT " + ", ".join(self.__columns) + " FROM " + self.name \
+ " WHERE " + " AND ".join([self.__pair2equals(p) for p in row])
self.__testconnection.execute(query)
dbRows = list(self.__testconnection.fetchalltuples())
if len(dbRows) != 1:
raise ValueError("No unambigiuous value for the variable {} in {}"
.format(variable.name, self.name))
variable.set(dbRows[0][variable.column])
def __pair2equals(self, columnAndValue):
"""Create an SQL string that checks if a column is equal to a value."""
if columnAndValue[1] is None:
return columnAndValue[0] + " IS NULL"
else:
return columnAndValue[0] + " = '{}'".format(columnAndValue[1])
def __table2str(self, rows, violation, indention=2):
"""Format a table as a string."""
# Determine the longest value of each column for formatting with (pk)
header = list(map(lambda tp: tp[0] + ':' + tp[1],
zip(self.__columns, self.__types)))
for i, _ in enumerate(self.__keyrefs):
header[i] += ' (pk)'
widths = list(map(len, header))
for row in rows:
for i, value in enumerate(row):
widths[i] = max(widths[i], len(str(value)))
# Format table with the column width determined by the widest cell
prefix = indention * ' '
fs = ('{{}}' + ('| {{: <{}}} ' * len(widths)) + '|').format(*widths)
header = fs.format(prefix, *header)
delimiter = fs.format(prefix, *map(lambda w: w * '-', widths))
rows = list(map(lambda r: tuple(map(lambda v: 'NULL' if v is None
else v, r)), rows))
# The rows are formatted and a prefix is added to the rows violating
# the assert. Expected rows are marked with an E while rows currently
# in the database are marked with a D. All other rows have no prefix.
violationPrefix | |
<gh_stars>0
import base64
from munch import Munch
import logging
log = logging.getLogger(__name__)
class SchemaInvalid(Exception):
"""
Raised when the schema itself is invalid.
"""
def __init__(self, help):
self.help = help
class SchemaValidationFailed(Exception):
"""
Raised when a test fails to conform to the given schema.
"""
def __init__(self, help=None, context=None, errors=None):
self.help = help
self.context = context
self.errors = errors
def __str__(self):
return f"SchemaValidationFailed {self.help} {self.context if self.context is not None else ''} {self.errors if self.errors is not None else ''}"
def schema_check(expr, help=None):
if not expr:
raise SchemaValidationFailed(help=help)
TUP_VALIDATOR = 0
TUP_KWS = 1
TUP_ELEMS = 2
TUP_TYPE = 3
class Schema:
"""
A simple schema validator.
Example #1: A single-level dict
import plaster.tools.schema.schema.Schema as s
schema = s(
s.is_kws(
a=s.is_int(),
b=s.is_int(),
)
)
test = dict(a=1, b="not an int")
schema.validate(test)
Example #2: A multi-level structure
schema = s(
s.is_dict(required=True, no_extras=True, ignore_underscored_keys=True, elems=dict(
an_optional_int=s.is_int(),
some_required_int=s.is_int(required=True),
some_other_required_int_that_can_be_none=s.is_int(required=True, noneable=True),
a_key_with_a_bound=s.is_int(min_val=0, max_val=5),
a_simple_optional_list=s.is_list(s.is_int()),
a_bounded_list_of_ints=s.is_list(min_len=0, max_len=3, elems=s.is_int()),
a_deprecated_field=s.is_deprecated(),
a_bool=s.is_bool(),
an_sub_dict_with_minimal_rules=s.is_kws(
a=s.is_int(),
b=s.is_int(),
),
)
Example #3: Including help
schema = s(
s.is_dict(help="Important things", elems=dict(
peace=s.is_int(help="Peace man!"),
and=s.is_int(),
love=s.is_int(help="is all it takes."),
)
schema.help()
Usage if you want to validate the options of a function:
def some_func(n_pres, dyes, **kws):
schema.validate(dict(locals(), **kws))
Validators:
Options applicable to all validators:
required=False
noneable=False
help="A string that documents this field"
is_int(bounds=None)
is_float(bounds=None)
is_number(bounds=None)
is_str()
is_list(min_len=None, max_len=None, elems=sub_element_schema)
is_dict(
ignore_underscored_keys=False,
all_required=False,
no_extras=False,
use_default_on_none=False,
elems=sub_element_schema
)
is_kws(sub_element_schema) # Like dict but uses **kws for sub_elems
is_kws_r(sub_element_schema) # Like dict but uses **kws for sub_elems and asserts all required
TASK: Document defaults
"""
def __init__(self, schema_tuple):
Schema._check_is_schema_tuple(schema_tuple)
self.schema_tuple = schema_tuple
def schema(self):
"""
Example:
schema0 = s(
s.is_kws_r(
var1=s.is_int()
)
)
schema1 = s(
s.is_kws_r(
**schema0.schema(),
var2=s.is_str(),
)
)
"""
return self.schema_tuple[TUP_ELEMS]
@staticmethod
def _print_error(message):
"""mock-point"""
log.error(message)
@staticmethod
def _print_help(indent, key, help=None):
"""mock-point"""
yellow = "\u001b[33m"
reset = "\u001b[0m"
print(
f"{reset}{indent}{key}: "
f"{yellow if help else ''}"
f"{help if help is not None else 'No help available'}{reset}"
)
def help(self):
def _recurse_help(schema_tuple, level, context):
Schema._print_help(
" " * (level * 2), context, schema_tuple[TUP_KWS].get("help")
)
if schema_tuple[TUP_ELEMS] is not None:
for key, elem_schema_tuple in schema_tuple[TUP_ELEMS].items():
_recurse_help(elem_schema_tuple, level + 1, key)
_recurse_help(self.schema_tuple, 0, "root")
def validate(
self, to_test, print_on_error=True, raise_on_error=True, context="root"
):
error = self._recurse(self.schema_tuple, to_test, context)
def _recurse_print(e, level):
if e.context is not None:
Schema._print_error(f"{' ' * (level * 2)}In context of {e.context}:")
level += 1
if e.errors is not None:
[_recurse_print(_e, level) for _e in e.errors]
else:
Schema._print_error(f"{' ' * (level * 2)}{e.help}")
if error is not None:
if print_on_error:
_recurse_print(error, 0)
if raise_on_error:
raise error
return False
return True
def apply_defaults(self, defaults, apply_to, override_nones=False):
"""Apply defaults to to_apply dict (and sub-dicts)."""
def _recurse(schema_tuple, _defaults, _apply_to):
if schema_tuple[TUP_ELEMS] is not None and schema_tuple[TUP_TYPE] is dict:
assert isinstance(_defaults, (dict, type(None)))
assert isinstance(_apply_to, dict)
elems = schema_tuple[TUP_ELEMS]
# if elems is None:
# elems = {}
assert isinstance(elems, dict)
# APPLY default to anything that is in the defaults that *is* in the
# elems schema and that isn't in apply_to already (or is none if override_nones)
# NOTE: treats empty lists as equivalent to None if override_nones, but then
# we must ensure that def_val is not None since schema may require a list.
for def_key, def_val in _defaults.items():
if def_key in elems and (
def_key not in _apply_to
or (
def_val is not None
and def_key in _apply_to
and _apply_to[def_key] in [None, []]
and override_nones
)
):
_apply_to[def_key] = def_val
for key, elem_schema_tuple in elems.items():
if (
key in _defaults
and _defaults[key] is not None
and key in _apply_to
and _apply_to[key] is not None
):
# _defaults[key] can be None in the situation where there is
# a perfectly good dict in a sub key of the apply_to already.
_recurse(elem_schema_tuple, _defaults[key], _apply_to[key])
_recurse(self.schema_tuple, defaults, apply_to)
def top_level_fields(self):
"""
Return all *top-level* fields (Does NOT recurse).
Returns a list of tuples
[
(field_name, field_type, field_help, field_userdata, field_subtype)
]
field_type is converted into a Python type (list, dict, int, float, str)
field_help returns None if help is not given.
field_subtype is only used for lists. It is the type of the list element.
"""
validator_fn, kws, sub_elems, type_ = self.schema_tuple
if sub_elems is None:
return []
fields = []
for name, obj in sub_elems.items():
field_subtype = None
if obj[TUP_TYPE] is list:
field_subtype = obj[TUP_ELEMS][TUP_TYPE]
fields += [
(
name,
obj[TUP_TYPE],
obj[TUP_KWS].get("help"),
obj[TUP_KWS].get("userdata"),
field_subtype,
)
]
return fields
def requirements(self):
"""
Return all *top-level* required fields (Does NOT recurse).
Returns a list of tuples
[
(field_name, field_type, field_help, field_userdata)
]
field_type is converted into a Python type (list, dict, int, float, str)
field_help returns None if help is not given.
"""
validator_fn, kws, sub_elems, type_ = self.schema_tuple
if sub_elems is None:
return []
all_req = kws.get("all_required", False)
required = []
for name, obj in sub_elems.items():
if all_req or (
obj[TUP_ELEMS] is not None and obj[TUP_ELEMS].get("required")
):
required += [
(
name,
obj[TUP_TYPE],
obj[TUP_KWS].get("help"),
obj[TUP_KWS].get("userdata"),
)
]
return required
@classmethod
def _recurse(cls, schema_tuple, to_test, context):
"""Recurse check sub_schema"""
try:
schema_tuple[TUP_VALIDATOR](to_test)
except SchemaValidationFailed as e:
e.context = context
return e
return None
@classmethod
def _check(cls, expr, help):
return schema_check(expr, help)
@classmethod
def _check_is_type(cls, arg, types):
"""Leaf-node type check"""
cls._check(
isinstance(arg, types),
f"Must be of type '{','.join([t.__name__ for t in types])}' (was {type(arg).__name__}).",
)
@classmethod
def _check_errors(cls, errors):
"""
Used by contexts after they have accumulated their list of errors;
raise if there is any error in the list.
"""
errors = [error for error in errors if error is not None]
if len(errors) > 0:
raise SchemaValidationFailed(errors=errors)
@classmethod
def _check_noneable(cls, kws, arg):
"""Check noneable flag and return True if None and allowed"""
if not kws.get("noneable") and arg is None:
raise SchemaValidationFailed(help=f"Was None but None is not allowed.")
return arg is None
@classmethod
def _check_is_schema_tuple(cls, schema_tuple):
"""
Check that the validator itself returns a properly constructed schema tuple
(callable, type name, sub_elems)
"""
if schema_tuple is None:
return
if not isinstance(schema_tuple, tuple) or len(schema_tuple) != 4:
raise SchemaInvalid("a Schema requires a 4 tuple")
if not isinstance(schema_tuple[TUP_TYPE], (type, type(None))):
raise SchemaInvalid("a Schema requires returning a type")
if not isinstance(schema_tuple, tuple):
raise SchemaInvalid("a Schema was required")
if not callable(schema_tuple[TUP_VALIDATOR]):
raise SchemaInvalid(
"a Schema was required (tuple[TUP_VALIDATOR] not callable)"
)
if not isinstance(schema_tuple[TUP_KWS], dict):
raise SchemaInvalid("a Schema was required (tuple[TUP_KWS] not a dict)")
@classmethod
def _check_arg_type(cls, arg, arg_name, expected_types):
"""
Check that the validator argument is of the right type.
"""
if not isinstance(arg, expected_types):
raise SchemaInvalid(
f"a Schema expected argument '{arg_name}' to be of type(s) '{expected_types}' "
f"(was '{type(arg).__name__}'.)"
)
@classmethod
def _check_bounds_arg(cls, bounds):
"""Helper to check validity of the bounds argument"""
if bounds is not None:
cls._check_arg_type(bounds, "bounds", tuple)
if len(bounds) != 2:
raise SchemaInvalid(
f"bounds parameter should be length 2. (was {len(bounds)})"
)
cls._check_arg_type(bounds[0], "bounds[0]", (int, float, type(None)))
cls._check_arg_type(bounds[1], "bounds[1]", (int, float, type(None)))
@classmethod
def _check_bounds(cls, arg, bounds=None):
"""Helper to check bounds for int, float, number"""
if bounds is not None:
if bounds[0] is not None:
cls._check(arg >= bounds[0], f"Must be >= {bounds[0]} (was {arg}).")
if bounds[1] is not None:
cls._check(arg <= bounds[1], f"Must be <= {bounds[1]} (was {arg}).")
@classmethod
def is_int(cls, bounds=None, **kws):
cls._check_bounds_arg(bounds)
def validator(arg):
if cls._check_noneable(kws, arg):
return
cls._check_is_type(arg, (int,))
cls._check_bounds(arg, bounds)
return validator, kws, None, int
@classmethod
def is_float(cls, bounds=None, **kws):
cls._check_bounds_arg(bounds)
def validator(arg):
if cls._check_noneable(kws, arg):
return
cls._check_is_type(arg, (float,))
cls._check_bounds(arg, bounds)
return validator, kws, None, float
@classmethod
def is_number(cls, bounds=None, **kws):
cls._check_bounds_arg(bounds)
def validator(arg):
if cls._check_noneable(kws, arg):
return
cls._check_is_type(arg, (int, float))
cls._check_bounds(arg, bounds)
return validator, kws, None, float
@classmethod
def is_str(cls, **kws):
allow_empty_string = kws.get("allow_empty_string", True)
options = kws.get("options", None)
def validator(arg):
if cls._check_noneable(kws, arg):
return
cls._check_is_type(arg, (str,))
if not allow_empty_string:
cls._check(arg != "", f"Empty string not allowed.")
if options is not None:
assert isinstance(options, (list, tuple))
cls._check(arg in options, f"String '{arg}' not in allowed options.")
return validator, kws, None, str
@classmethod
def is_bool(cls, **kws):
def validator(arg):
if cls._check_noneable(kws, arg):
return
cls._check_is_type(arg, (bool,))
return validator, kws, None, bool
@classmethod
def is_list(cls, elems=None, **kws):
min_len = kws.get("min_len")
max_len | |
C u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)CbCsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cs u0 {1,S}
5 S2s u0 {1,S}
6 Cd u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd)CbCsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cs u0 {1,S}
5 S2s u0 {1,S}
6 Cdd u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)CbCsSs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cs u0 {1,S}
6 S2s u0 {1,S}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-Cd)CbCsSs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cb u0 {1,S}
5 Cs u0 {1,S}
6 S2s u0 {1,S}
7 C u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CtCtCsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Ct u0 {1,S}
3 Ct u0 {1,S}
4 Cs u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CbCtCsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Ct u0 {1,S}
4 Cs u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CbCbCsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cb u0 {1,S}
4 Cs u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CdsCdsCdsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S}
3 Cd u0 {1,S}
4 Cd u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cd)(Cds-Cd)(Cds-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 S2s u0 {1,S}
6 C u0 {2,D}
7 C u0 {3,D}
8 C u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 S2s u0 {1,S}
6 Cd u0 {2,D}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 S2s u0 {1,S}
6 Cd u0 {2,D}
7 Cd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 S2s u0 {1,S}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 S2s u0 {1,S}
7 Cd u0 {3,D}
8 Cd u0 {4,D}
9 C u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cdd)(Cds-Cdd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 S2s u0 {1,S}
6 Cd u0 {2,D}
7 Cdd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 S2s u0 {1,S}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 S2s u0 {1,S}
8 Cd u0 {4,D}
9 S2d u0 {5,D}
10 C u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {7,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 S2s u0 {1,S}
8 Cd u0 {4,D}
9 C u0 {5,D}
10 C u0 {6,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Cd u0 {1,S} {8,D}
5 S2s u0 {1,S}
6 Cdd u0 {2,D}
7 Cdd u0 {3,D}
8 Cdd u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 S2s u0 {1,S}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
11 S2d u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 S2s u0 {1,S}
9 S2d u0 {5,D}
10 S2d u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 S2s u0 {1,S}
9 S2d u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {8,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,S} {6,D}
4 Cd u0 {1,S} {7,D}
5 Cdd u0 {2,D} {9,D}
6 Cdd u0 {3,D} {10,D}
7 Cdd u0 {4,D} {11,D}
8 S2s u0 {1,S}
9 C u0 {5,D}
10 C u0 {6,D}
11 C u0 {7,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CtCdsCdsSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Ct u0 {1,S}
3 Cd u0 {1,S}
4 Cd u0 {1,S}
5 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cd)(Cds-Cd)CtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
6 C u0 {2,D}
7 C u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cds)(Cds-Cds)CtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
6 Cd u0 {2,D}
7 Cd u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd)(Cds-Cds)CtSs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cd u0 {1,S} {7,D}
4 Ct u0 {1,S}
5 S2s u0 {1,S}
6 Cdd u0 {2,D}
7 Cd u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)(Cds-Cds)CtSs",
group =
"""
1 * | |
if not otherOperand is None:
if otherOperand.op == 61: # num
otherOperandRepr = str(otherOperand.n.value(get_int_type_by_width_and_sign(8,1)))
elif otherOperand.op == 65: # var
otherOperandRepr = cfunc.lvars[otherOperand.v.idx].name
elif otherOperand.op == 60: # memptr
getNameForCMemberExpr(cfunc, otherOperand)
else:
otherOperandRepr = otherOperand.opname
represent = "(" + represent + " " + opname + " " + otherOperandRepr + ")"
return represent
def getUsageOfVRefItem(cfunc, vRefItem, usageVarStartPos, isVRefLvarPtr):
item = vRefItem
vRefLvarIdx = vRefItem.v.idx
vRefLvar = cfunc.lvars[vRefLvarIdx]
vRefLvarType = vRefLvar.tif
isUsagePtr = isVRefLvarPtr
usageSize = vRefLvarType.get_size()
opStackOnVRef = []
parentItem = cfunc.body.find_parent_of(item).to_specific_type
while type(parentItem) != idaapi.cinsn_t:
if parentItem.op == 48: #cast
if isPrevOpOnPtrVarValid(opStackOnVRef, vRefLvarType):
addedOffset = (opStackOnVRef[0][1].n.value(get_int_type_by_width_and_sign(8,1))) * vRefLvarType.get_ptrarr_objsize()
usageVarStartPos = usageVarStartPos + addedOffset
opStackOnVRef = []
castType = parentItem.type
vRefLvarType = castType
if castType.is_ptr():
isUsagePtr = True
elif parentItem.op == 51: # ptr
if isPrevOpOnPtrVarValid(opStackOnVRef, vRefLvarType):
addedOffset = (opStackOnVRef[0][1].n.value(get_int_type_by_width_and_sign(8,1))) * vRefLvarType.get_ptrarr_objsize()
usageVarStartPos = usageVarStartPos + addedOffset
opStackOnVRef = []
usageSize = vRefLvarType.get_ptrarr_objsize()
isUsagePtr = False
elif parentItem.op == 57: # call
if parentItem.x.op != 68: # helper
calledFuncExpr = parentItem.x
calledFuncEA = get_first_dref_from(parentItem.ea)
if calledFuncEA is None or calledFuncEA == BADADDR:
calledFuncEA = getCalledFuncEAOfCallExpr(cfunc, calledFuncExpr)
argIndex = len(parentItem.a)-1
while argIndex >= 0:
if parentItem.a[argIndex].to_specific_type == item:
break
argIndex = argIndex - 1
if calledFuncEA != BADADDR and argIndex != -1:
calledFuncNameDemangled = getDeFuncNameAtEA(calledFuncEA)
if calledFuncNameDemangled is None:
calledFuncNameDemangled = getName(calledFuncEA)
return (UsageType.FUNCCALL, parentItem.ea, calledFuncEA, argIndex, calledFuncNameDemangled), (usageVarStartPos, usageSize, isUsagePtr), (opStackOnVRef, reprOpStack(cfunc, opStackOnVRef))
else:
return (UsageType.UNKNOWN, parentItem.ea, parentItem, item), (usageVarStartPos, usageSize, isUsagePtr), (opStackOnVRef, reprOpStack(cfunc, opStackOnVRef))
elif parentItem.op >= 2 and parentItem.op <= 15: # assign ops
assignTarget = findAssignTargetOfExprResult(cfunc, item)
if not assignTarget is None:
if assignTarget.op == 65: #vref
return (UsageType.ASSIGN, parentItem.ea, assignTarget.v.idx), (usageVarStartPos, usageSize, isUsagePtr), (opStackOnVRef, reprOpStack(cfunc, opStackOnVRef))
elif assignTarget.op == 60: #memptr
member = getMemberForCMemberExpr(assignTarget)
return (UsageType.ASSIGNMEM, parentItem.ea, member, get_member_fullname(member.id)), (usageVarStartPos, usageSize, isUsagePtr), (opStackOnVRef, reprOpStack(cfunc, opStackOnVRef))
else:
return (UsageType.UNKNOWN, parentItem.ea, parentItem, item), (usageVarStartPos, usageSize, isUsagePtr), (opStackOnVRef, reprOpStack(cfunc, opStackOnVRef))
None
else:
opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
if parentItem.op >= 22 and parentItem.op <= 31 : # logical comparisons
None
elif parentItem.op == 17 or parentItem.op == 18 or parentItem.op == 49: # logic or, logic and, logic not
None
elif parentItem.op == 58: # idx
None
#elif parentItem.op == 35: #add
# opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
#elif parentItem.op == 36: #sub
# opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
#elif parentItem.op == 37: #mul
# opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
#elif parentItem.op == 38 or parentItem.op == 39: #div
# opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
#elif parentItem.op == 40 or parentItem.op == 41 : #mod
# opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
#elif parentItem.op == 21: #band
# opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
#elif parentItem.op == 19: #bor
# opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
#elif parentItem.op == 20: #xor
# opStackOnVRef.append((parentItem.op, getTheOtherOperandOfOp(parentItem, item)))
item = parentItem
parentItem = cfunc.body.find_parent_of(item).to_specific_type
if parentItem.op == 73: #if
return (UsageType.CONDITION, parentItem.ea, parentItem, item), (usageVarStartPos, usageSize, isUsagePtr), (opStackOnVRef, reprOpStack(cfunc, opStackOnVRef))
elif parentItem.op == 80: #ret
return (UsageType.RETURN, parentItem.ea, parentItem, item), (usageVarStartPos, usageSize, isUsagePtr), (opStackOnVRef, reprOpStack(cfunc, opStackOnVRef))
return (UsageType.UNKNOWN, parentItem.ea, parentItem, item), (usageVarStartPos, usageSize, isUsagePtr), (opStackOnVRef, reprOpStack(cfunc, opStackOnVRef))
def getCFuncArgIdxByCallArgIdx(callArgIdx, argTotalNum):
if argTotalNum <= 2:
return callArgIdx
else:
if callArgIdx == 0 or callArgIdx == 1:
return callArgIdx + ((argTotalNum - 2) if argTotalNum <=4 else 2)
elif callArgIdx == 2 or callArgIdx == 3:
return callArgIdx - 2
else:
return callArgIdx
def sortUsageListByPosInVar(usageList):
return sorted(usageList, key=lambda usage: usage[1][0])
def sortUsageListByUsageEA(usageList):
return sorted(usageList, key=lambda usage: usage[0][1])
def printMsg(msg):
print msg
None
def printUsageList(usageList):
for usage in usageList:
print usage[0][0].name, hex(usage[0][1]), usage[1][0], usage[1][1], usage[2][1]
None
def analyzeInputArgUsageInMethod(methodEA, argIndex, startPosInOrigInput, argSize, isArgPtr):
propagatedSet = {}
usageList = []
cfunc = decompileFuncInTextAtEA(methodEA)
if cfunc == None:
return []
argIndex = getCFuncArgIdxByCallArgIdx(argIndex, len(cfunc.arguments))
#print argIndex
argLvar = cfunc.arguments[argIndex]
argLvarName = argLvar.name
propagatedSet[argLvarName] = (startPosInOrigInput, argSize, isArgPtr) # argSize -1 stands for not sure of size
if not cfunc is None:
cfunc_treeitems = cfunc.treeitems
for item in cfunc_treeitems:
item = item.to_specific_type
if (item.op >= 2 and item.op <= 15): # assign
assignTarget = findAssignTargetOfExprResult(cfunc, item.x)
if not assignTarget is None and assignTarget.op == 65 and cfunc.lvars[assignTarget.v.idx].name in propagatedSet: # stop propagation for reassigned lvars
propagatedSet.pop(cfunc.lvars[assignTarget.v.idx].name)
if item.op == 65: #var
itemLvarIdx = item.v.idx
itemLvar = cfunc.lvars[itemLvarIdx]
itemLvarName = itemLvar.name
if itemLvarName in propagatedSet:
propagatedInfo = propagatedSet[itemLvarName]
usage = getUsageOfVRefItem(cfunc, item, propagatedInfo[0], propagatedInfo[2])
if not None is usage:
usageInfo = usage[0]
usageVarLoc = usage[1]
usageOpStack = usage[2]
usageType = usageInfo[0]
usageEA = usageInfo[1]
if usageType == UsageType.ASSIGN:
assignTargetLVarIdx = usageInfo[2]
assignTargetLVar = cfunc.lvars[assignTargetLVarIdx]
assignTargetLVarName = assignTargetLVar.name
propagatedSet[assignTargetLVarName] = usageVarLoc
else:
usageList.append(usage)
if usageType == UsageType.FUNCCALL:
calledFuncEA = usageInfo[2]
callArgIdx = usageInfo[3]
#print propagatedSet
#print hex(usageEA), hex(calledFuncEA), callArgIdx, usageVarLoc
usageList.extend(analyzeInputArgUsageInMethod(calledFuncEA, callArgIdx, usageVarLoc[0], usageVarLoc[1], usageVarLoc[2]))
elif usageType == UsageType.CONDITION:
None
#print propagatedSet
usageList = sortUsageListByPosInVar(usageList)
#for usage in usageList:
# print usage
return usageList
wait_for_analysis_time = 0
def wait_for_analysis_to_finish():
print("[+] waiting for analysis to finish...")
global wait_for_analysis_time
starttime = time.time()
idaapi.autoWait()
idc.Wait()
endtime = time.time()
wait_for_analysis_time += endtime-starttime
print("[+] analysis finished.")
def initHexRaysPlugin():
wait_for_analysis_to_finish()
if not idaapi.init_hexrays_plugin():
print "forcing hexrays to load..."
load_plugin_decompiler()
if not idaapi.init_hexrays_plugin():
raise Exception("hexrays decompiler is not available :(")
#def isBinaryArm64():
# fileTypeName = idaapi.get_file_type_name()
# if "Mach-O file" in fileTypeName and fileTypeName.endswith("ARM64"):
# return True
# return False
def isBinaryArm():
return idaapi.get_file_type_name().endswith("ARM")
def isBinaryArm64():
return "ARM64" in idaapi.get_file_type_name()
def isBinaryX86():
return idaapi.get_file_type_name().endswith("X86")
def isBinaryX86_64():
return idaapi.get_file_type_name().endswith("X86_64")
def load_plugin_decompiler():
# load decompiler plugins (32 and 64 bits, just let it fail)
print "[+] trying to load decompiler plugins"
if isBinaryX86_64():
# 64bit plugins
idc.RunPlugin("hexx64", 0)
elif isBinaryX86():
# 32bit plugins
idc.RunPlugin("hexrays", 0)
elif isBinaryX86():
idc.RunPlugin("hexarm", 0)
elif isBinaryArm64():
idc.RunPlugin("hexarm64", 0)
print "[+] decompiler plugins loaded."
def findUsageOfStructMem(structMemFullName):
version = getVersionNumber()
if version < 7.0:
member = get_member_by_fullname(structMemFullName, None)
else:
member = get_member_by_fullname(structMemFullName)
memId = member.id
def setFuncTypeWithFuncInfo(funcEA, funcInfo, isFuncPtr=False):
typeToSet = funcInfo.getFuncTypeToSet(isFuncPtr)
#print "[-]", hex(funcEA), typeToSet
ret = setFuncType(funcEA, typeToSet)
if None is ret or ret == False or ret == 0:
print "[!] SetType {} failed at {:016X}".format(typeToSet, funcEA)
return False
return True
def getKernFuncEAByName(funcName):
startEA = 0
if isBinaryArm64():
kernelTextSeg = get_segm_by_name("__TEXT_EXEC:__text")
if None is kernelTextSeg:
kernelTextSeg = get_segm_by_name("__TEXT:__text")
if None is kernelTextSeg:
kernelTextSeg = get_segm_by_name("__text")
if None is kernelTextSeg:
print "[!] Can not find kernel text segment."
else:
startEA = kernelTextSeg.startEA
funcEA = get_name_ea(startEA, funcName)
#print "[-]", funcName, "at", hex(funcEA)
return funcEA
import json
def parseKernelHeadersAndSetType():
global confirmedFuncTypes
global kernelClassNameSet
#phase = "parseKernelHeadersAndSetType"
#if checkPhaseDone(phase):
# return
print "[+] Parse Kernel Headers And Set Type"
parseResults = loadKernelHeaders()
parseResultOfAllClasses = parseResults["classes"]
for className in parseResultOfAllClasses:
kernelClassNameSet.add(className)
parseResultOfClass = parseResultOfAllClasses[className]
for mangledFuncName in parseResultOfClass:
funcEA = getKernFuncEAByName(mangledFuncName)
if funcEA != BADADDR:
funcInfo = parseResultOfClass[mangledFuncName]
ret = setFuncTypeWithFuncInfo(funcEA, funcInfo, False)
if ret:
keepCon_ItemAndGOTs(funcEA)
confirmedFuncTypes[funcEA] = getTinfoOfFuncAtEA(funcEA)
if isBinaryArm64():
if (className in classNameToVTableAddrMap):
keepCon_VTAndVTS_ForClass(className)
elif isBinaryX86_64():
# In macOS driver binaries, vtables of kernel classes are imported, we need to parse imported structures
if not isX64BinaryKernel():
continue
if className == "OSMetaClass" or className == "OSObject":
# Too many
continue
vtsName = "vtable_" + className
vtsStructId = GetStrucIdByName(vtsName)
if vtsStructId != None and vtsStructId != BADADDR:
vtsStruct = get_struc(vtsStructId)
childFuncEASetList = getAllChildFuncEAsForClass(className)
if len(childFuncEASetList) == 0:
continue
for mangledFuncName in parseResultOfClass:
funcInfo = parseResultOfClass[mangledFuncName]
if funcInfo.isVirtual:
vfuncMember = get_member_by_name(vtsStruct, mangledFuncName)
if not None is vfuncMember:
typeToSet = funcInfo.getFuncTypeToSet(True)
SetType(vfuncMember.id, typeToSet)
childFuncEAs = childFuncEASetList[vfuncMember.soff/8]
for ea in childFuncEAs:
changeRetTypeOfFuncAtAddr(ea, funcInfo.returnType)
else:
print "[?] {} is not in class {}".format(mangledFuncName, className)
wait_for_analysis_to_finish()
#markPhaseDone(phase)
def parseClientMemForType():
None
def parseClientMemForTypeFunc(clientMemForTypeFuncEA):
funcName = getDeFuncNameAtEA(clientMemForTypeFuncEA)
if not funcName is None and funcName.endswith():
None
None
def gatherInfoAboutInstructions():
insnInfoMap = {}
for funcStartEA in Functions():
func = idaapi.get_func(funcStartEA)
funcEndEA = func.endEA
heads = Heads(funcStartEA, funcEndEA)
for head in heads:
operator = GetMnem(head)
if operator not in insnInfoMap:
insnInfoMap[operator] = 0
insnInfoMap[operator] += 1
#for operator in insnInfoMap:
# print operator, ": ", insnInfoMap[operator]
for key, value in sorted(insnInfoMap.iteritems(), key=lambda (k,v): (v,k)):
print "%s: %s" % (key, value)
return insnInfoMap
def markPhaseDone(phase):
# if "." in struct name, then IDA pro will crash
phase = phase.replace(".", "_")
if not checkPhaseDone(phase):
phaseStructName = "__PHASE_" + phase + "__"
createClassStruct(phaseStructName, 0)
def checkPhaseDone(phase):
phaseStructName = "__PHASE_" + phase + "__"
strucId = GetStrucIdByName(phaseStructName)
return strucId != BADADDR
AllControlFlowInsnTraceList = {}
AllControlFlowInsnTraceListByBB = {}
def |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.