id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1677035
|
import json
from hamcrest import assert_that, is_, equal_to
from lxd_image_server.simplestreams.index import Index
class TestIndex(object):
def test_generate_json(self):
INDEX = {
'format': 'index:1.0',
'index': {
'images': {
'datatype': 'image-downloads',
'path': 'streams/v1/images.json',
'format': 'products:1.0',
'products': ['product1', 'product2']
}
}
}
index = Index()
index.add('product1')
index.add('product2')
out = json.loads(index.to_json())
assert_that(out, is_(equal_to(INDEX)))
def test_no_duplicate(self):
INDEX = {
'format': 'index:1.0',
'index': {
'images': {
'datatype': 'image-downloads',
'path': 'streams/v1/images.json',
'format': 'products:1.0',
'products': ['product1']
}
}
}
index = Index()
index.add('product1')
index.add('product1')
index.add('product1')
out = json.loads(index.to_json())
assert_that(out, is_(equal_to(INDEX)))
def test_update_product(self):
INDEX = {
'format': 'index:1.0',
'index': {
'images': {
'datatype': 'image-downloads',
'path': 'streams/v1/images.json',
'format': 'products:1.0',
'products': ['product1']
}
}
}
index = Index()
index.add('product1')
index.add('iats:xenial:amd64:default')
index.delete('iats:xenial:amd64:default')
out = json.loads(index.to_json())
assert_that(out, is_(equal_to(INDEX)))
def test_delete_all(self):
INDEX = {
'format': 'index:1.0',
'index': {
'images': {
'datatype': 'image-downloads',
'path': 'streams/v1/images.json',
'format': 'products:1.0',
'products': []
}
}
}
index = Index()
index.add('iats:xenial:amd64:default')
index.delete('iats:xenial:amd64:default')
out = json.loads(index.to_json())
assert_that(out, is_(equal_to(INDEX)))
|
1677044
|
from magma import array, wire, compile, EndCircuit
from loam.boards.icestick import IceStick
from mantle.lattice.ice40.RAMB import RAMB
icestick = IceStick()
icestick.Clock.on()
icestick.J1[0].rename('I0').input().on()
icestick.J1[1].rename('I1').input().on()
icestick.J1[2].rename('I2').input().on()
icestick.J1[3].rename('I3').input().on()
icestick.J1[4].rename('I4').input().on()
icestick.J1[5].rename('I5').input().on()
icestick.J1[6].rename('I6').input().on()
icestick.J3[0].rename('D0').output().on()
icestick.J3[1].rename('D1').output().on()
main = icestick.main()
WDATA = array([main.I0, main.I1, 0, 0, 0, 0, 0, 0])
WADDR = array([main.I2, main.I3, 0,0,0,0,0,0,0])
RADDR = array([main.I4, main.I5, 0,0,0,0,0,0,0])
WE = main.I6
O = array([main.D0, main.D1])
N = 8
M = 4096//N
rom = M * [0]
for i in range(M):
rom[i] = i & 0xff
ramb = RAMB( M, N, rom )
#print(romb.interface)
wire( WE, ramb.WE )
wire( WADDR, ramb.WADDR )
wire( WDATA, ramb.WDATA )
wire( 1, ramb.RE )
wire( RADDR, ramb.RADDR )
wire( ramb.RDATA[0:2], O)
EndCircuit()
|
1677048
|
import asyncio
import json
import flask
import pytest
from flask import request
from mitmproxy.addons import asgiapp
from mitmproxy.addons import next_layer
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.test import taddons
tapp = flask.Flask(__name__)
@tapp.route("/")
def hello():
print("CALLED")
return "testapp"
@tapp.route("/parameters")
def request_check():
args = {}
for k in request.args.keys():
args[k] = request.args[k]
return json.dumps(args)
@tapp.route("/requestbody", methods=["POST"])
def request_body():
return json.dumps({"body": request.data.decode()})
@tapp.route("/error")
def error():
raise ValueError("An exception...")
async def errapp(scope, receive, send):
raise ValueError("errapp")
async def noresponseapp(scope, receive, send):
return
@pytest.mark.asyncio
async def test_asgi_full():
ps = Proxyserver()
addons = [
asgiapp.WSGIApp(tapp, "testapp", 80),
asgiapp.ASGIApp(errapp, "errapp", 80),
asgiapp.ASGIApp(noresponseapp, "noresponseapp", 80),
]
with taddons.context(ps, *addons) as tctx:
tctx.master.addons.add(next_layer.NextLayer())
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://testapp:80/ HTTP/1.1\r\n\r\n"
writer.write(req.encode())
header = await reader.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 200 OK")
body = await reader.readuntil(b"testapp")
assert body == b"testapp"
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://testapp:80/parameters?param1=1¶m2=2 HTTP/1.1\r\n\r\n"
writer.write(req.encode())
header = await reader.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 200 OK")
body = await reader.readuntil(b"}")
assert body == b'{"param1": "1", "param2": "2"}'
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"POST http://testapp:80/requestbody HTTP/1.1\r\nContent-Length: 6\r\n\r\nHello!"
writer.write(req.encode())
header = await reader.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 200 OK")
body = await reader.readuntil(b"}")
assert body == b'{"body": "Hello!"}'
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://errapp:80/?foo=bar HTTP/1.1\r\n\r\n"
writer.write(req.encode())
header = await reader.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 500")
body = await reader.readuntil(b"ASGI Error")
assert body == b"ASGI Error"
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://noresponseapp:80/ HTTP/1.1\r\n\r\n"
writer.write(req.encode())
header = await reader.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 500")
body = await reader.readuntil(b"ASGI Error")
assert body == b"ASGI Error"
|
1677064
|
from .dual_primal_edge_unpool import DualPrimalEdgeUnpooling
__all__ = ['DualPrimalEdgeUnpooling']
|
1677168
|
import time
import numpy as np
import torch as T
import torch.nn.functional as F
from torch.optim.adam import Adam
from ..utils.networks_mlp import Actor, Critic
from ..agent_base import Agent
from ..utils.exploration_strategy import GaussianNoise
class TD3(Agent):
def __init__(self, algo_params, env, transition_tuple=None, path=None, seed=-1):
# environment
self.env = env
self.env.seed(seed)
obs = self.env.reset()
algo_params.update({'state_dim': obs.shape[0],
'action_dim': self.env.action_space.shape[0],
'action_max': self.env.action_space.high,
'action_scaling': self.env.action_space.high[0],
'init_input_means': None,
'init_input_vars': None
})
# training args
self.training_episodes = algo_params['training_episodes']
self.testing_gap = algo_params['testing_gap']
self.testing_episodes = algo_params['testing_episodes']
self.saving_gap = algo_params['saving_gap']
super(TD3, self).__init__(algo_params,
transition_tuple=transition_tuple,
goal_conditioned=False,
path=path,
seed=seed)
# torch
self.network_dict.update({
'actor': Actor(self.state_dim, self.action_dim, action_scaling=self.action_scaling).to(self.device),
'actor_target': Actor(self.state_dim, self.action_dim, action_scaling=self.action_scaling).to(self.device),
'critic_1': Critic(self.state_dim + self.action_dim, 1).to(self.device),
'critic_1_target': Critic(self.state_dim + self.action_dim, 1).to(self.device),
'critic_2': Critic(self.state_dim + self.action_dim, 1).to(self.device),
'critic_2_target': Critic(self.state_dim + self.action_dim, 1).to(self.device)
})
self.network_keys_to_save = ['actor_target', 'critic_1_target']
self.actor_optimizer = Adam(self.network_dict['actor'].parameters(), lr=self.actor_learning_rate)
self._soft_update(self.network_dict['actor'], self.network_dict['actor_target'], tau=1)
self.critic_1_optimizer = Adam(self.network_dict['critic_1'].parameters(), lr=self.critic_learning_rate)
self._soft_update(self.network_dict['critic_1'], self.network_dict['critic_1_target'], tau=1)
self.critic_2_optimizer = Adam(self.network_dict['critic_2'].parameters(), lr=self.critic_learning_rate)
self._soft_update(self.network_dict['critic_2'], self.network_dict['critic_2_target'], tau=1)
# behavioural policy args (exploration)
self.exploration_strategy = GaussianNoise(self.action_dim, self.action_max, mu=0, sigma=0.1)
# training args
self.target_noise = algo_params['target_noise']
self.noise_clip = algo_params['noise_clip']
self.warmup_step = algo_params['warmup_step']
self.actor_update_interval = algo_params['actor_update_interval']
# statistic dict
self.statistic_dict.update({
'episode_return': [],
'episode_test_return': []
})
def run(self, test=False, render=False, load_network_ep=None, sleep=0):
if test:
num_episode = self.testing_episodes
if load_network_ep is not None:
print("Loading network parameters...")
self._load_network(ep=load_network_ep)
print("Start testing...")
else:
num_episode = self.training_episodes
print("Start training...")
for ep in range(num_episode):
ep_return = self._interact(render, test, sleep=sleep)
self.statistic_dict['episode_return'].append(ep_return)
print("Episode %i" % ep, "return %0.1f" % ep_return)
if (ep % self.testing_gap == 0) and (ep != 0) and (not test):
ep_test_return = []
for test_ep in range(self.testing_episodes):
ep_test_return.append(self._interact(render, test=True))
self.statistic_dict['episode_test_return'].append(sum(ep_test_return)/self.testing_episodes)
print("Episode %i" % ep, "test return %0.1f" % (sum(ep_test_return)/self.testing_episodes))
if (ep % self.saving_gap == 0) and (ep != 0) and (not test):
self._save_network(ep=ep)
if not test:
print("Finished training")
print("Saving statistics...")
self._plot_statistics(save_to_file=True)
else:
print("Finished testing")
def _interact(self, render=False, test=False, sleep=0):
done = False
obs = self.env.reset()
ep_return = 0
# start a new episode
while not done:
if render:
self.env.render()
if self.env_step_count < self.warmup_step:
action = self.env.action_space.sample()
else:
action = self._select_action(obs, test=test)
new_obs, reward, done, info = self.env.step(action)
time.sleep(sleep)
ep_return += reward
if not test:
self._remember(obs, action, new_obs, reward, 1 - int(done))
if self.observation_normalization:
self.normalizer.store_history(new_obs)
self.normalizer.update_mean()
if (self.env_step_count % self.update_interval == 0) and (self.env_step_count > self.warmup_step):
self._learn()
obs = new_obs
self.env_step_count += 1
return ep_return
def _select_action(self, obs, test=False):
obs = self.normalizer(obs)
with T.no_grad():
inputs = T.as_tensor(obs, dtype=T.float, device=self.device)
action = self.network_dict['actor_target'](inputs).detach().cpu().numpy()
if test:
# evaluate
return np.clip(action, -self.action_max, self.action_max)
else:
# explore
return self.exploration_strategy(action)
def _learn(self, steps=None):
if len(self.buffer) < self.batch_size:
return
if steps is None:
steps = self.optimizer_steps
for i in range(steps):
if self.prioritised:
batch, weights, inds = self.buffer.sample(self.batch_size)
weights = T.as_tensor(weights, device=self.device).view(self.batch_size, 1)
else:
batch = self.buffer.sample(self.batch_size)
weights = T.ones(size=(self.batch_size, 1), device=self.device)
inds = None
actor_inputs = self.normalizer(batch.state)
actor_inputs = T.as_tensor(actor_inputs, dtype=T.float32, device=self.device)
actions = T.as_tensor(batch.action, dtype=T.float32, device=self.device)
critic_inputs = T.cat((actor_inputs, actions), dim=1)
actor_inputs_ = self.normalizer(batch.next_state)
actor_inputs_ = T.as_tensor(actor_inputs_, dtype=T.float32, device=self.device)
rewards = T.as_tensor(batch.reward, dtype=T.float32, device=self.device).unsqueeze(1)
done = T.as_tensor(batch.done, dtype=T.float32, device=self.device).unsqueeze(1)
if self.discard_time_limit:
done = done * 0 + 1
with T.no_grad():
actions_ = self.network_dict['actor_target'](actor_inputs_)
# add noise
noise = (T.randn_like(actions_, device=self.device) * self.target_noise)
actions_ += noise.clamp(-self.noise_clip, self.noise_clip)
actions_ = actions_.clamp(-self.action_max[0], self.action_max[0])
critic_inputs_ = T.cat((actor_inputs_, actions_), dim=1)
value_1_ = self.network_dict['critic_1_target'](critic_inputs_)
value_2_ = self.network_dict['critic_2_target'](critic_inputs_)
value_ = T.min(value_1_, value_2_)
value_target = rewards + done * self.gamma * value_
self.critic_1_optimizer.zero_grad()
value_estimate_1 = self.network_dict['critic_1'](critic_inputs)
critic_loss_1 = F.mse_loss(value_estimate_1, value_target.detach(), reduction='none')
(critic_loss_1 * weights).mean().backward()
self.critic_1_optimizer.step()
if self.prioritised:
assert inds is not None
self.buffer.update_priority(inds, np.abs(critic_loss_1.cpu().detach().numpy()))
self.critic_2_optimizer.zero_grad()
value_estimate_2 = self.network_dict['critic_2'](critic_inputs)
critic_loss_2 = F.mse_loss(value_estimate_2, value_target.detach(), reduction='none')
(critic_loss_2 * weights).mean().backward()
self.critic_2_optimizer.step()
self.statistic_dict['critic_loss'].append(critic_loss_1.detach().mean())
if self.optim_step_count % self.actor_update_interval == 0:
self.actor_optimizer.zero_grad()
new_actions = self.network_dict['actor'](actor_inputs)
critic_eval_inputs = T.cat((actor_inputs, new_actions), dim=1)
actor_loss = -self.network_dict['critic_1'](critic_eval_inputs).mean()
actor_loss.backward()
self.actor_optimizer.step()
self._soft_update(self.network_dict['actor'], self.network_dict['actor_target'])
self._soft_update(self.network_dict['critic_1'], self.network_dict['critic_1_target'])
self._soft_update(self.network_dict['critic_2'], self.network_dict['critic_2_target'])
self.statistic_dict['actor_loss'].append(actor_loss.detach().mean())
self.optim_step_count += 1
|
1677182
|
import pyb
import stm
# This script sets up a timer to do quadrature decoding
#
# It was tested using a switch similar to https://www.sparkfun.com/products/9117
# with some debounce wired up like this: https://hifiduino.files.wordpress.com/2010/10/analogdeb.jpg
# Note: the debounce is only really required for mechanical switches.
#
# I also tested this with one of these: http://www.lynxmotion.com/p-448-quadrature-motor-encoder-wcable.aspx
pin_a = pyb.Pin('X1', pyb.Pin.AF_PP, pull=pyb.Pin.PULL_NONE, af=pyb.Pin.AF1_TIM2)
pin_b = pyb.Pin('X2', pyb.Pin.AF_PP, pull=pyb.Pin.PULL_NONE, af=pyb.Pin.AF1_TIM2)
# The prescaler needs to be 0. When incrementing, the counter will count up-to
# and including the period value, and then reset to 0.
enc_timer = pyb.Timer(2, prescaler=0, period=100000)
# ENC_AB will increment/decrement on the rising edge of either the A channel or the B
# channel.
enc_channel = enc_timer.channel(1, pyb.Timer.ENC_AB)
while True:
print("Counter =", enc_timer.counter());
pyb.delay(200)
|
1677212
|
from __future__ import unicode_literals
from flask import Flask, render_template_string, Markup
from unittest import TestCase
from textwrap import dedent
try:
from unittest import mock
except ImportError:
import mock
import misaka
from misaka import (EXT_AUTOLINK, EXT_FENCED_CODE, # pyflakes.ignore
EXT_NO_INTRA_EMPHASIS, EXT_SPACE_HEADERS, EXT_STRIKETHROUGH,
EXT_SUPERSCRIPT, EXT_TABLES, HTML_ESCAPE, HTML_HARD_WRAP, HTML_SKIP_HTML,
HTML_USE_XHTML, TABLE_ALIGNMASK, TABLE_HEADER, TABLE_ALIGN_CENTER, TABLE_ALIGN_LEFT,
TABLE_ALIGN_RIGHT, EXT_MATH, EXT_FOOTNOTES, EXT_UNDERLINE, EXT_MATH_EXPLICIT,
EXT_DISABLE_INDENTED_CODE, EXT_HIGHLIGHT, EXT_QUOTE)
from flask_misaka import Misaka, markdown
TEST_MD = "*This* ~~contains~~ ``some`` mark^(down) extensions: www.markdown.com foo_bar_baz it's"
app = Flask(__name__)
app.debug = True
Misaka(app)
# templating tests #
@app.route('/a')
def view_render_inline():
s = "This is ~~restructuredtext~~ *markdown*"
return render_template_string('{{s|markdown}}', s=s)
def test_render_inline():
client = app.test_client()
resp = client.open('/a')
assert resp.data == b'<p>This is ~~restructuredtext~~ <em>markdown</em></p>\n'
@app.route('/b')
def view_render_var_block():
s = "This is a *markdown* block"
tpl = '''{% filter markdown %}{{s}}{% endfilter %}'''
return render_template_string(tpl, s=s)
def test_render_var_block():
client = app.test_client()
resp = client.open('/b')
assert resp.data == b'<p>This is a <em>markdown</em> block</p>\n'
@app.route('/c')
def view_render_in_block():
tpl = '''{% filter markdown %}This is a *markdown* block{% endfilter %}'''
return render_template_string(tpl)
def test_render_in_block():
client = app.test_client()
resp = client.open('/c')
assert resp.data == b'<p>This is a <em>markdown</em> block</p>\n'
# markdown extensions in templates
extapp = Flask(__name__)
extapp.debug = True
Misaka(extapp, strikethrough=True)
@extapp.route('/d')
def view_render_inline_ext():
s = "This is ~~restructuredtext~~ *markdown*"
return render_template_string('{{s|markdown}}', s=s)
def test_render_inline_ext():
client = extapp.test_client()
resp = client.open('/d')
assert resp.data == b'<p>This is <del>restructuredtext</del> <em>markdown</em></p>\n'
# Note that the Markdown extension tests aren't actually testing that the
# Markdown is rendered correctly; that should be covered by the test suite of
# the misaka module. These tests should test that Flask-Misaka is calling
# the misaka module correctly, and returning the result unmodified
# (aside from being wrapped in a Markup class instance.)
@mock.patch("flask_misaka.misaka.html", side_effect=misaka.html)
class MarkdownExtensionTests(TestCase):
def test_defaults(self, html):
ext, flags = 0, 0
result = markdown(TEST_MD)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_ext(self, html):
ext, flags = EXT_AUTOLINK, 0
result = markdown(TEST_MD, autolink=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_ext(self, html):
ext, flags = EXT_FENCED_CODE | EXT_AUTOLINK, 0
result = markdown(TEST_MD, fenced_code=True, autolink=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_render(self, html):
ext, flags = 0, HTML_ESCAPE
result = markdown(TEST_MD, escape=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_render(self, html):
ext, flags = 0, HTML_HARD_WRAP | HTML_ESCAPE
result = markdown(TEST_MD, wrap=True, escape=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_one_ext_one_render(self, html):
ext, flags = EXT_NO_INTRA_EMPHASIS, HTML_SKIP_HTML
result = markdown(TEST_MD, no_intra_emphasis=True, no_html=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_two_ext_two_render(self, html):
ext = EXT_STRIKETHROUGH | EXT_SUPERSCRIPT
flags = HTML_HARD_WRAP | HTML_USE_XHTML
result = markdown(TEST_MD, strikethrough=True, superscript=True,
hard_wrap=True, use_xhtml=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_inverse_ext(self, html):
ext, flags = EXT_NO_INTRA_EMPHASIS, 0
result = markdown(TEST_MD, intra_emphasis=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_inverse_render(self, html):
ext, flags = 0, HTML_SKIP_HTML
result = markdown(TEST_MD, no_html=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_undefined_option(self, html):
ext, flags = 0, 0
result = markdown(TEST_MD, fireworks=True)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_defined_and_undefined_options(self, html):
ext, flags = 0, HTML_HARD_WRAP
result = markdown(TEST_MD, hard_wrap=True, stupid_hard_wrap=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_set_defaults(self, html):
ext, flags = EXT_TABLES, HTML_HARD_WRAP
md = Misaka(hard_wrap=True, tables=True)
result = md.render(TEST_MD)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_override_defaults(self, html):
ext, flags = 0, 0
md = Misaka(autolink=True)
result = md.render(TEST_MD, autolink=False)
html.assert_called_with(TEST_MD, extensions=ext, render_flags=flags)
self.assertIsInstance(result, Markup)
self.assertEqual(result, misaka.html(TEST_MD,
extensions=ext, render_flags=flags))
def test_custom_renderer(self, html):
class CustomRenderer(misaka.HtmlRenderer):
def image(self, link, title, alt_text):
return '<div><img src="{0}" alt="{2}" title="{1}"><div>{1}</div></div>'.format(
link, title, alt_text)
test_md = ''
expected_result = '<p><div><img src="/img.jpg" alt="Alt text" title="Title"><div>Title</div></div></p>\n'
md = Misaka(None, CustomRenderer())
result = md.render(test_md)
self.assertFalse(html.called)
self.assertEqual(str(result), expected_result)
def test_smartypants(self, html):
text = "Don't call me Shirley"
expected_result = "<p>Don’t call me Shirley</p>\n"
md = Misaka(smartypants=True)
result = md.render(text)
self.assertIsInstance(result, Markup)
self.assertEqual(result, expected_result)
def test_smartypants_table(self, html):
"smartypants should not interfere with processing tables"
text = dedent("""
| Left align | Right align | Center align |
|:-----------|------------:|:------------:|
| This | This | This |
| column | column | column |
| will | will | will |
| be | be | be |
| left | right | center |
| aligned | aligned | aligned |
""")
expected_result = dedent("""
<table>
<thead>
<tr>
<th style="text-align: left">Left align</th>
<th style="text-align: right">Right align</th>
<th style="text-align: center">Center align</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: left">This</td>
<td style="text-align: right">This</td>
<td style="text-align: center">This</td>
</tr>
<tr>
<td style="text-align: left">column</td>
<td style="text-align: right">column</td>
<td style="text-align: center">column</td>
</tr>
<tr>
<td style="text-align: left">will</td>
<td style="text-align: right">will</td>
<td style="text-align: center">will</td>
</tr>
<tr>
<td style="text-align: left">be</td>
<td style="text-align: right">be</td>
<td style="text-align: center">be</td>
</tr>
<tr>
<td style="text-align: left">left</td>
<td style="text-align: right">right</td>
<td style="text-align: center">center</td>
</tr>
<tr>
<td style="text-align: left">aligned</td>
<td style="text-align: right">aligned</td>
<td style="text-align: center">aligned</td>
</tr>
</tbody>
</table>
""")
md = Misaka(tables=True, smartypants=True)
result = md.render(text).strip()
self.assertIsInstance(result, Markup)
self.assertEqual(result.strip(), expected_result.strip())
class FactoryPatternTests(TestCase):
def test_init(self):
md = Misaka()
app2 = Flask(__name__)
md.init_app(app2)
self.assertIn("markdown", app2.jinja_env.filters)
|
1677218
|
from __future__ import division
import numpy as np
from numpy import linalg as la
import pdb, copy
import utils.utils
class LMPC(object):
"""Learning Model Predictive Controller (LMPC)
Inputs:
- ftocp: Finite Time Optimal Control Prolem object used to compute the predicted trajectory
Methods:
- addTrajectory: adds a trajectory to the safe set SS and update value function
- computeCost: computes the cost associated with a feasible trajectory
- solve: uses ftocp and the stored data to comptute the predicted trajectory"""
def __init__(self, ftocp, CVX=False):
# Initialization
self.ftocp = ftocp
# self.SS = []
# self.uSS = []
self.Qfun = []
self.SS_t = []
self.uSS_t = []
self.Qfun_t = []
self.Q = ftocp.Q
self.R = ftocp.R
self.it = 0
self.CVX = CVX
self.x_cls = []
self.u_cls = []
self.ss_idxs = []
def addTrajectory(self, x, u, xf=None):
if xf is None:
xf = np.zeros((self.ftocp.n))
n_x = x.shape[0]
n_u = u.shape[0]
# Add the feasible trajectory x and the associated input sequence u to the safe set
self.x_cls.append(copy.copy(x))
self.u_cls.append(copy.copy(u))
# Compute and store the cost associated with the feasible trajectory
cost = np.array(self.computeCost(x, u, xf))
self.Qfun.append(cost)
self.SS_t = []
self.uSS_t = []
self.Qfun_t = []
for t in range(len(self.ss_idxs)-1):
ss = np.empty((n_x,0))
uss = np.empty((n_u,0))
qfun = np.empty(0)
for j in self.ss_idxs[t]['it_range']:
ss = np.append(ss, self.x_cls[j][:,self.ss_idxs[t]['ts_range']], axis=1)
uss = np.append(uss, self.u_cls[j][:,self.ss_idxs[t]['ts_range']], axis=1)
qfun = np.append(qfun, self.Qfun[j][self.ss_idxs[t]['ts_range']])
self.SS_t.append(ss)
self.uSS_t.append(uss)
self.Qfun_t.append(qfun)
self.ftocp.costFTOCP = cost[0] + 0.1
# Augment iteration counter and print the cost of the trajectories stored in the safe set
self.it = self.it + 1
print ('Trajectory of length %i added to the Safe Set. Current Iteration: %i' % (x.shape[1], self.it))
print "Performance of stored trajectories: \n", [self.Qfun[i][0] for i in range(self.it)]
return cost
def computeCost(self, x, u, xf):
l = x.shape[1]
# Compute the cost in a DP like strategy: start from the last point x[len(x)-1] and move backwards
for t in range(l-1,-1,-1):
if t == l-1: # Terminal cost
# cost = [la.norm((self.Q**0.5).dot(x[:,t]-xf),ord=2)**2]
# cost = [10*x[1,t]**2]
cost = [0]
else:
if la.norm(x[:,t].reshape((-1,1))-xf,2) <= 1e-7:
cost.append(0)
else:
cost.append(u[:,t].T.dot(self.R).dot(u[:,t]) + 1 + cost[-1])
# Finally flip the cost to have correct order
return np.flip(cost).tolist()
def solve(self, xt, xf=None, abs_t=None, expl_con=None, verbose=True):
# Solve the FTOCP. Here set terminal constraint = ConvHull(self.SS) and terminal cost = BarycentricInterpolation(self.Qfun)
return self.ftocp.solve(xt, xf=xf, abs_t=abs_t, expl_con=expl_con,
SS=self.SS, Qfun=self.Qfun, CVX=self.CVX, verbose=verbose)
def get_safe_set_q_func(self):
return (self.SS, self.uSS, self.Qfun)
def add_safe_set(self, ss_idxs):
self.ss_idxs = ss_idxs
|
1677226
|
class TaxJarError(Exception):
"""Base class for TaxJar-related errors"""
class TaxJarResponseError(TaxJarError):
"""Response errors (400, 500)"""
class TaxJarConnectionError(TaxJarError):
"""Connection errors"""
class TaxJarTypeError(TaxJarError):
"""Factory errors"""
|
1677232
|
import logging
import pathlib
import joblib
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from util import INDEX_COLUMNS, init, reduce_mem_usage
def dump(df, name):
df = reduce_mem_usage(df)
save_dir = pathlib.Path('../data/01_readcsv')
if not save_dir.exists():
save_dir.mkdir(parents=True)
joblib.dump(df, save_dir / f'{name}.joblib', compress=True)
def main(run_name):
input_dir = pathlib.Path('../input/m5-forecasting-uncertainty')
calendar = pd.read_csv(input_dir / 'calendar.csv', parse_dates=['date'])
prices = pd.read_csv(input_dir / 'sell_prices.csv')
# sales = pd.read_csv(input_dir / 'sales_train_validation.csv')
sales = pd.read_csv(input_dir / 'sales_train_evaluation.csv')
items = sales[['item_id', 'dept_id', 'cat_id']].drop_duplicates()
releases = prices.groupby(['store_id','item_id'])['wm_yr_wk'].min().reset_index()
releases.columns = ['store_id','item_id','wm_yr_wk']
weekday = calendar.groupby('wm_yr_wk')['date'].min().reset_index()
releases = releases.merge(weekday)
releases.columns = ['store_id','item_id','release_week', 'release_date']
releases.drop('release_week', axis=1, inplace=True)
for d in calendar['d']:
if d not in sales.columns:
sales[d] = pd.NA
v_sales = pd.melt(
sales,
id_vars=INDEX_COLUMNS,
var_name='d',
value_name='sales'
)
v_sales['sales'] = v_sales['sales'].astype('Int32')
v_sales = v_sales.merge(releases)
v_sales = v_sales.merge(calendar[['d', 'date']])
v_sales = v_sales[v_sales['date'] >= v_sales['release_date']].copy().reset_index(drop=True)
v_sales['release_ago'] = (v_sales['date'] - v_sales['release_date']).dt.days
dump(v_sales, 'v_sales')
dump(calendar, 'calendar')
dump(prices, 'prices')
dump(items, 'items')
v_sales = v_sales.merge(calendar)
v_sales = v_sales.merge(prices)
cat_columns = ['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id', 'event_name_1', 'event_type_1', 'event_name_2', 'event_type_2']
label_encoders = {}
for column in tqdm(cat_columns):
encoder = LabelEncoder()
v_sales[column] = encoder.fit_transform(v_sales[column].fillna('NA'))
label_encoders[column] = encoder
dump(v_sales, 'merge')
if __name__ == "__main__":
run_name = init(__file__)
try:
main(run_name)
except:
logging.exception('exception')
finally:
logging.info('end')
|
1677275
|
from OpenSSL import crypto, SSL
def generate_certificate(
organization="PrivacyFilter",
common_name="https://www.url.com",
country="NL",
duration=(365 * 24 * 60 * 60),
keyfilename="key.pem",
certfilename="cert.pem"):
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 4096)
cert = crypto.X509()
cert.get_subject().C = country
cert.get_subject().O = organization
cert.get_subject().CN = common_name
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(duration)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha512')
with open(keyfilename, "wt") as keyfile:
keyfile.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode("utf-8"))
with open(certfilename, "wt") as certfile:
certfile.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode("utf-8"))
if __name__ == '__main__':
generate_certificate()
|
1677285
|
import maya.cmds as cmds
import sys
import maya.mel as mel
from . import stereoCameraErrors
import os
import imp
def __call(language, method, rigName, kwords={}, cmd_args=[]):
"""
Private method to call a MEL or Python callback. Return 'Error' in
case of error. We avoid None, [], '' because those are more likely
to be returned by the command we call
"""
pass
def reloadScripts():
pass
def runCallbackChecks():
pass
def loadPlugin():
pass
def performReloadChk():
"""
Scans the current module database and looks for any modules that we
own. If we find a module that we own, reload it in case any changes
have been made. In terms of module reloading, this module cannot be
reloaded because it would imply that the code is changing while it
is being executed. If you change this module, you must invoke a
reload in the python shell prior to calling this script.
"""
pass
def unloadPlugin(*args):
pass
|
1677294
|
import pytest
from mendeley.exception import MendeleyApiException
from test import cassette
from test.resources.documents import *
def test_should_trash_document():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/trash/move_to_trash/trash_document.yaml'), \
pytest.raises(MendeleyApiException) as ex_info:
doc = create_document(session)
doc.move_to_trash()
session.documents.get(doc.id)
ex = ex_info.value
assert ex.status == 404
assert ex.message == 'Document not found'
|
1677306
|
import logging
from typing import List
from hydra.utils import instantiate
from omegaconf import DictConfig
from nuplan.planning.script.builders.utils.utils_type import validate_type
from nuplan.planning.training.modeling.metrics.abstract_training_metric import AbstractTrainingMetric
logger = logging.getLogger(__name__)
def build_training_metrics(cfg: DictConfig) -> List[AbstractTrainingMetric]:
"""
Build metrics based on config
:param cfg: config
:return list of metrics.
"""
instantiated_metrics = []
for metric_name, cfg_metric in cfg.training_metric.items():
new_metric: AbstractTrainingMetric = instantiate(cfg_metric)
validate_type(new_metric, AbstractTrainingMetric)
instantiated_metrics.append(new_metric)
return instantiated_metrics
|
1677333
|
from collections import namedtuple
Config = namedtuple("Config", [
"dataset_path",
"models_dir",
"folder",
"img_rows",
"img_cols",
"target_rows",
"target_cols",
"num_channels",
"network",
"loss",
"lr",
"optimizer",
"batch_size",
"epoch_size",
"use_clahe",
"nb_epoch",
"cycle_start_epoch",
"predict_batch_size",
"use_crop",
"use_resize",
"dbg",
"save_images",
"test_pad",
"train_pad",
"fold" # pass in argparse
])
|
1677341
|
from ...vendor import click
@click.command(
help='Exit with a successful status code',
)
@click.help_option('-h', '--help')
def subcommand():
pass
|
1677354
|
import pandas as pd
import matplotlib.pyplot as plt
import textwrap
import yaml
# draw histograms
def draw_hists(columns, test_vectors, width=20, height=5):
n = len(columns)
fig, axes = plt.subplots(1, n)
fig.set_figwidth(width)
fig.set_figheight(height)
i = 0
for column in columns:
test_vectors[column].value_counts().plot(kind='bar', rot=0, ax=axes[i])
axes[i].set_title(textwrap.TextWrapper(width=25).fill(column))
i = i + 1
# load a list of test vectors from a YAML file
def load_test_vectors_from_yaml(file, features):
rows = []
with open(file, 'r') as f:
raw_data = yaml.load(f)
for raw in raw_data['elements']:
data = {}
data['alias'] = raw['alias']
for value in raw['values']:
feature_type = value['type']
if feature_type == 'ScoreValue':
feature_value = value['value']
elif feature_type == 'BooleanValue':
feature_value = value['flag']
elif feature_type == 'DateValue':
feature_value = value['date']
elif feature_type == 'IntegerValue' or feature_type == 'PositiveIntegerValue':
feature_value = value['number']
elif feature_type == 'LgtmGradeValue':
feature_value = value['value']
elif feature_type == 'UnknownValue':
feature_value = 'unknown'
elif feature_type == 'VulnerabilitiesValue':
feature_value = '...'
else:
raise Exception('Oh no! Unknown type: ' + feature_type)
if feature_type == 'ScoreValue':
feature_name = value['score']['name']
else:
feature_name = value['feature']['name']
if feature_name not in features:
raise Exception('Oh no! Unknown feature: ' + feature_name)
data[feature_name] = feature_value
data['score_from'] = raw['expectedScore']['from']
data['score_to'] = raw['expectedScore']['to']
rows.append(data)
return pd.DataFrame(rows)
|
1677373
|
import os
import sys
import time
def progbar(i, iter_per_epoch, message='', bar_length=50, display=True):
j = (i % iter_per_epoch) + 1
end_epoch = j == iter_per_epoch
if display:
perc = int(100. * j / iter_per_epoch)
prog = ''.join(['='] * (bar_length * perc // 100))
template = "\r[{:" + str(bar_length) + "s}] {:3d}%. {:s}"
string = template.format(prog, perc, message)
sys.stdout.write(string)
sys.stdout.flush()
if end_epoch:
sys.stdout.write('\r{:100s}\r'.format(''))
sys.stdout.flush()
return end_epoch, (i + 1) // iter_per_epoch
class FileWriter(object):
def __init__(self, log_file, args=None,
overwrite=False, pipe_to_sys=True):
self.written = False
self.log_file = log_file
self.pipe = pipe_to_sys
self.args = args
# non-tensorflow values to be stored
self.names = []
self.formats = []
# tf tensor values
self.tensor_names = []
self.tensors = []
self.tensor_formats = []
# check file existence, then create file
if os.path.exists(self.log_file) and not overwrite:
raise Exception("Overwriting existing log directory is "
"not allowed unless overwrite=True")
log_dir = os.path.dirname(self.log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.f = open(self.log_file, 'w', 0)
if args is not None:
# create file
# write args
v_dict = vars(self.args)
string = '# ArgParse Values:'
self._write(string)
for k in v_dict:
string = '# {:s}: {:s}'.format(str(k), str(v_dict[k]))
self._write(string)
@staticmethod
def list_args(args):
v_dict = vars(args)
print('# ArgParse Values:')
for k in v_dict:
print('# {:s}: {:s}'.format(str(k), str(v_dict[k])))
def initialize(self):
# create header name
self.header = ','.join(self.tensor_names + self.names)
self._write(self.header)
def add_var(self, name, var_format, tensor=None):
if tensor is None:
self.names += [name]
self.formats += [var_format]
else:
self.tensor_names += [name]
self.tensors += [tensor]
self.tensor_formats += [var_format]
def write(self, tensor_values=[], values=[]):
values = tensor_values + values
string = ','.join(self.tensor_formats + self.formats).format(*values)
self._write(string, is_summary=True, pipe=True)
def _write(self, string, is_summary=False, pipe=False):
self.f.write(string + '\n')
self.f.flush()
if self.pipe and pipe:
if is_summary:
print(self.header)
print(string)
# Retaining for backwards compatibility
from .tfutils import TensorDict
|
1677388
|
from taichi._lib import core as _ti_core
from taichi.lang.enums import Layout
from taichi.lang.expr import Expr, make_expr_group
from taichi.lang.util import taichi_scope
class AnyArray:
"""Class for arbitrary arrays in Python AST.
Args:
ptr (taichi_core.Expr): A taichi_core.Expr wrapping a taichi_core.ExternalTensorExpression.
element_shape (Tuple[Int]): () if scalar elements (default), (n) if vector elements, and (n, m) if matrix elements.
layout (Layout): Memory layout.
"""
def __init__(self, ptr, element_shape, layout):
assert ptr.is_external_var()
self.ptr = ptr
self.element_shape = element_shape
self.layout = layout
@property
@taichi_scope
def shape(self):
"""A list containing sizes for each dimension. Note that element shape will be excluded.
Returns:
List[Int]: The result list.
"""
dim = _ti_core.get_external_tensor_dim(self.ptr)
ret = [
Expr(_ti_core.get_external_tensor_shape_along_axis(self.ptr, i))
for i in range(dim)
]
element_dim = len(self.element_shape)
if element_dim == 0:
return ret
return ret[
element_dim:] if self.layout == Layout.SOA else ret[:-element_dim]
@taichi_scope
def loop_range(self):
"""Gets the corresponding taichi_core.Expr to serve as loop range.
This is not in use now because struct fors on AnyArrays are not supported yet.
Returns:
taichi_core.Expr: See above.
"""
return self.ptr
class AnyArrayAccess:
"""Class for first-level access to AnyArray with Vector/Matrix elements in Python AST.
Args:
arr (AnyArray): See above.
indices_first (Tuple[Int]): Indices of first-level access.
"""
def __init__(self, arr, indices_first):
self.arr = arr
self.indices_first = indices_first
@taichi_scope
def subscript(self, i, j):
indices_second = (i, ) if len(self.arr.element_shape) == 1 else (i, j)
if self.arr.layout == Layout.SOA:
indices = indices_second + self.indices_first
else:
indices = self.indices_first + indices_second
return Expr(_ti_core.subscript(self.arr.ptr,
make_expr_group(*indices)))
|
1677389
|
import os
from flask import Flask, Response
from hanako.server.player_api import player_api
from hanako.server.room_api import room_api
api = Flask(__name__)
api.register_blueprint(player_api)
api.register_blueprint(room_api)
@api.route('/health')
def health_check():
return 'Health OK for: ' + str(Response(os.uname()))
@api.route('/api/v1/cqrs', methods=['POST'])
def cqrs():
return ''
|
1677397
|
import time
import RPi.GPIO as GPIO
switch1 = 17
switch2 = 26
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(switch1, GPIO.IN)
GPIO.setup(switch2, GPIO.IN)
print "start"
while True:
if not GPIO.input(switch1):
print "Button 1 pressed"
time.sleep(0.5)
elif not GPIO.input(switch2):
print "Button 2 pressed"
time.sleep(0.5)
else:
pass
|
1677448
|
import re
from telethon import TelegramClient
def test_all_methods_present(docs_dir):
"""
Determine if all methods existance of the given documentation.
Args:
docs_dir: (str): write your description
"""
with (docs_dir / 'quick-references/client-reference.rst').open(encoding='utf-8') as fd:
present_methods = set(map(str.lstrip, re.findall(r'^ {4}\w+$', fd.read(), re.MULTILINE)))
assert len(present_methods) > 0
for name in dir(TelegramClient):
attr = getattr(TelegramClient, name)
if callable(attr) and not name.startswith('_'):
assert name in present_methods
|
1677457
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Hardmax(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
)
x = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(np.float32)
y = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_example')
# For multiple occurrances of the maximal values, the first occurrence is selected for one-hot output
x = np.array([[3, 3, 3, 1]]).astype(np.float32)
y = np.array([[1, 0, 0, 0]]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_one_hot')
@staticmethod
def export_hardmax_axis(): # type: () -> None
def hardmax_2d(x): # type: (np.ndarray) -> np.ndarray
return np.eye(x.shape[1], dtype=x.dtype)[np.argmax(x, axis=1)]
x = np.random.randn(3, 4, 5).astype(np.float32)
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
axis=0,
)
y = hardmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_axis_0')
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
axis=1,
)
y = hardmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_axis_1')
# default axis is 1
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_default_axis')
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
axis=2,
)
y = hardmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_axis_2')
node = onnx.helper.make_node(
'Hardmax',
inputs=['x'],
outputs=['y'],
axis=-1,
)
y = hardmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_hardmax_negative_axis')
|
1677458
|
from quicksort import partition
from animate import Plot
def heapify(unsorted, index, start, end):
heap_size = end + 1 - start
largest = index
left_index = 2 * index + 1
right_index = 2 * index + 2
if left_index < heap_size and unsorted[start + left_index] > unsorted[start + largest]:
largest = left_index
if right_index < heap_size and unsorted[start + right_index] > unsorted[start + largest]:
largest = right_index
if largest != index:
unsorted[start + largest], unsorted[start + index] = unsorted[start + index], unsorted[start + largest]
heapify(unsorted, largest, start, end)
Plot(largest,unsorted)
def heapSort(unsorted, start, end):
n = end + 1 - start
for i in range(n // 2 - 1, -1, -1):
heapify(unsorted, i, start, end)
for i in range(end, start, -1):
unsorted[start], unsorted[i] = unsorted[i], unsorted[start]
heapify(unsorted, 0, start, i - 1)
return unsorted
def introSort(data, start, end, depth):
if start < end:
if depth == 0:
# Switch from quicksort to heapsort
heapSort(data, start, end)
Plot(max(data[start:end + 1]), data)
return
# Return the pivot index
p = partition(data, start, end)
# Sort all the elements to the left and to the right of the pivot
introSort(data, start, p - 1, depth - 1)
introSort(data, p + 1, end, depth - 1)
|
1677464
|
from rover import rover
import datetime
import numpy as np
import rospy
def thread_log():
print('LOG: thread starting ..')
freq = 50.0
t0 = datetime.datetime.now()
t = datetime.datetime.now()
t_pre = datetime.datetime.now()
avg_number = 100
header_written = False
file_open = False
file_name = rover.t0.strftime('data_logs/log_%Y%m%d_%H%M%S.txt')
rate = rospy.Rate(freq)
while not rospy.is_shutdown() and rover.on:
t = datetime.datetime.now()
dt = (t - t_pre).total_seconds()
if dt < 1e-6:
continue
freq = (freq * (avg_number - 1) + (1 / dt)) / avg_number
t_pre = t
rover.freq_log = freq
dt_millis = t - t0
t_millis = int(dt_millis.seconds * 1e3 + dt_millis.microseconds / 1e3)
if rover.save_on:
if not header_written:
header_written = True
write_header(file_name)
else:
if not file_open:
f = open(file_name, 'a')
file_open = True
write_date(f, t_millis)
rate.sleep()
if file_open:
f.close()
print('LOG: thread closed!')
def write_header(file_name):
# NOTE: header order and the data order must be of the same order.
with open(file_name, 'w') as f:
f.write('time,')
f.write('t,')
f.write(string_vector('x'))
f.write(string_vector('v'))
f.write(string_vector('a'))
f.write(string_vector('W'))
f.write(string_3x3('R'))
f.write(string_vector('xd'))
f.write(string_vector('xd_dot'))
f.write(string_vector('b1d'))
f.write(string_vector('Wd'))
f.write(string_3x3('Rd'))
f.write('\n')
with open('data_logs/last_log.txt', 'w') as f:
f.write(file_name)
def write_date(f, t_millis):
# NOTE: header order and the data order must be of the same order.
write_scalar(f, datetime.datetime.now().strftime('%H%M%S.%f'))
write_scalar(f, t_millis)
write_vector(f, rover.x)
write_vector(f, rover.v)
write_vector(f, rover.a)
write_vector(f, rover.W)
write_3x3(f, rover.R)
write_vector(f, rover.control.xd)
write_vector(f, rover.control.xd_dot)
write_vector(f, rover.control.b1d)
write_vector(f, rover.control.Wd)
write_3x3(f, rover.control.Rd)
f.write('\n')
def string_vector(name, length=3):
out = ''
for i in range(length):
out += '{}_{},'.format(name, i)
return out
def string_3x3(name):
out = ''
for i in range(3):
for j in range(3):
out += '{}_{}{},'.format(name, i, j)
return out
def write_scalar(f, data):
f.write('{},'.format(data))
def write_vector(f, data, length=3):
line = ''
for i in range(length):
line += '{},'.format(data[i])
f.write(line)
def write_3x3(f, data):
for i in range(3):
for j in range(3):
f.write('{},'.format(data[i, j]))
|
1677479
|
from flask_restplus import Resource
from flask import request, current_app
from sqlalchemy import desc, func, or_
from marshmallow.exceptions import MarshmallowError
from werkzeug.exceptions import BadRequest
from app.extensions import api
from app.api.now_submissions.models.application_start_stop import ApplicationStartStop
from app.api.now_submissions.response_models import APPLICATIONSTARTSTOP
from app.api.utils.access_decorators import requires_role_edit_submissions
from app.api.utils.resources_mixins import UserMixin
class ApplicationStartStopListResource(Resource, UserMixin):
@api.doc(description='Save an application start stop')
@requires_role_edit_submissions
@api.expect(APPLICATIONSTARTSTOP)
def post(self):
application_startstop = ApplicationStartStop.create(str(request.json))
application_startstop.save()
return application_startstop.messageid, 201
|
1677565
|
from __future__ import print_function
__authors__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "3-clause BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
import warnings
from theano import function
from theano.sandbox.cuda import gpu_from_host
from theano.sandbox.cuda import host_from_gpu
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano import shared
from theano import tensor as T
from theano.tensor import as_tensor_variable
from theano.tensor.nnet.conv import conv2d
from pylearn2.sandbox.cuda_convnet.img_acts import ImageActs
def test_match_full_conv():
# Tests that running ImageActs with no padding is the same as running
# theano's conv2D in full mode after flipping the kernel and tranposing
# the output and input channels
# In other words, if convolution computes H=XK, we now compute
# R=HK^T
rng = np.random.RandomState([2013, 1, 29])
batch_size = 2
rows = 6
cols = 7
channels = 3
filter_rows = 5
filter_cols = filter_rows
num_filters = 16
hid_acts = shared(rng.uniform(-1., 1., (num_filters,
rows - filter_rows + 1,
cols - filter_cols + 1,
batch_size)
).astype('float32'), name='hidacts')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(hid_acts)
gpu_filters = gpu_from_host(filters)
output = ImageActs()(gpu_images, gpu_filters, as_tensor_variable((6, 7)))
output = host_from_gpu(output)
images_bc01 = hid_acts.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
# need to tranpose the kernel stack to do imgActs rather than filterActs
filters_bc01 = filters_bc01.dimshuffle(1, 0, 2, 3)
# In order to do the transpose operation, we must flip the kernels
# But in theano's conv2d, the kernels get flipped anyway
# so in this case, we do not flip the kernel
output_conv2d = conv2d(images_bc01, filters_bc01, border_mode='full')
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
f = function([], [output, output_conv2d])
output, output_conv2d = f()
warnings.warn("""test_match_full_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
if np.abs(output - output_conv2d).max() > 2.4e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print('cuda-convnet shape: ',output.shape)
print('theano shape: ',output_conv2d.shape)
assert False
err = np.abs(output - output_conv2d)
print('absolute error range: ', (err.min(), err.max()))
print('mean absolute error: ', err.mean())
print('cuda-convnet value range: ', (output.min(), output.max()))
print('theano value range: ', (output_conv2d.min(), output_conv2d.max()))
assert False
def test_match_full_conv_grad():
# Tests that the gradient of ImageActs with no padding is the same as the
# gradient of
# theano's conv2D in full mode after flipping the kernel and tranposing
# the output and input channels
rng = np.random.RandomState([2013, 1, 29])
batch_size = 2
rows = 6
cols = 7
channels = 3
filter_rows = 5
filter_cols = filter_rows
num_filters = 16
hid_acts = shared(rng.uniform(-1., 1., (num_filters,
rows - filter_rows + 1,
cols - filter_cols + 1,
batch_size)
).astype('float32'), name='hidacts')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(hid_acts)
gpu_filters = gpu_from_host(filters)
output = ImageActs()(gpu_images, gpu_filters, as_tensor_variable((6, 7)))
output = host_from_gpu(output)
images_bc01 = hid_acts.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
# need to tranpose the kernel stack to do imgActs rather than filterActs
filters_bc01 = filters_bc01.dimshuffle(1, 0, 2, 3)
# In order to do the transpose operation, we must flip the kernels
# But in theano's conv2d, the kernels get flipped anyway
# so in this case, we do not flip the kernel
output_conv2d = conv2d(images_bc01, filters_bc01, border_mode='full')
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
theano_rng = MRG_RandomStreams(5 * 10 * 2013)
random = theano_rng.normal(size=output_conv2d.shape, dtype=output_conv2d.dtype)
projected = (output * random).sum()
projected_conv_2d = (output_conv2d * random).sum()
grads = T.grad(projected, [hid_acts, filters]) + T.grad(projected_conv_2d, [hid_acts, filters])
f = function([], grads)
gi, gf, gi_th, gf_th = f()
assert gi.shape == gi_th.shape
diff = np.abs(gi - gi_th).max()
if diff > 2.9e-6:
assert False
diff = np.abs(gf - gf_th).max()
if diff > 1.5e-6:
raise AssertionError(diff)
if __name__ == '__main__':
test_match_full_conv()
|
1677572
|
import distrax
import jax.numpy as jnp
from shinrl import Pendulum
def test_step_reset():
env = Pendulum()
env.reset()
for _ in range(env.config.horizon - 1):
a = env.action_space.sample()
obs, rew, done, info = env.step(a)
assert not done
obs, rew, done, info = env.step(a)
assert info["TimeLimit.truncated"]
def test_continuous_step_reset():
config = Pendulum.DefaultConfig(act_mode="continuous")
env = Pendulum(config)
env.reset()
for _ in range(env.config.horizon - 1):
a = env.action_space.sample()
obs, rew, done, info = env.step(a)
assert not done
obs, rew, done, info = env.step(a)
assert info["TimeLimit.truncated"]
def test_q():
env = Pendulum()
pol = jnp.ones((env.mdp.dS, env.mdp.dA)) / env.mdp.dA
ret = env.calc_return(pol)
assert ret < -1000
q = env.calc_optimal_q()
pol = distrax.Greedy(q).probs
assert q.max() > -50
assert env.calc_return(pol) > -50
|
1677574
|
from itertools import chain
from django.contrib.staticfiles.finders import find
import pytest
from codemirror2.widgets import CodeMirrorEditor
@pytest.fixture
def w():
"""
construct a CodeMirrorEditor widget with default settings
"""
return CodeMirrorEditor()
def test_dont_share_options(settings):
settings.CODEMIRROR_DEFAULT_OPTIONS = {"default": "option"}
w1 = CodeMirrorEditor(options={"w1": "opt1"})
w2 = CodeMirrorEditor(options={"w2": "opt2"})
assert w1.options != w2.options
assert w1.options == {"w1": "opt1", "default": "option"}
assert w2.options == {"w2": "opt2", "default": "option"}
def test_mode_populated_from_options():
w = CodeMirrorEditor(options={"mode": "python"})
assert w.modes == ["python"]
def test_mode_populated_w_modelist(w):
assert len(w.modes) == 16
def test_pass_modes():
modes = ["python", "css", "xml", "javascript", "htmlmixed"]
w = CodeMirrorEditor(options={"mode": "python"},
modes=modes)
assert w.modes == modes
assert w.options == {"mode": "python"}
def test_default_theme(w):
assert w.themes == ["default"]
def test_themes_populated_from_options():
w = CodeMirrorEditor(options={"theme": "dark"})
assert w.themes == ["dark"]
def test_themes_populated_from_args():
themes = ["light", "dark", "default"]
w = CodeMirrorEditor(themes=themes)
assert w.themes == themes
def test_render_widget(w):
html = w.render("code", "")
assert 'name="code"' in html
assert 'document.getElementById("id_code")' in html
def test_media_w_mode():
w = CodeMirrorEditor(options={"mode": "python"})
js, css = w.media._js, w.media._css
assert js == [
"codemirror2/lib/codemirror.js",
"codemirror2/addon/mode/overlay.js",
"codemirror2/mode/python/python.js",
]
assert css == {
"screen": [
"codemirror2/lib/codemirror.css"
]
}
for f in chain(js, css["screen"]):
assert find(f)
def test_mode_extra_css():
w = CodeMirrorEditor(options={"mode": "tiki"})
js, css = w.media._js, w.media._css
assert "codemirror2/mode/tiki/tiki.css" in css["screen"]
for f in chain(js, css["screen"]):
assert find(f)
def test_staticfiles_exist():
"""
in default configuration (with "all" modes), the static files should exist
"""
w = CodeMirrorEditor()
js, css = w.media._js, w.media._css
for f in chain(js, css["screen"]):
assert find(f)
|
1677582
|
import os
import time
from data_util.log import logger
import torch as T
import rouge
from model import Model
from data_util import config, data
from data_util.batcher import Batcher, Example, Batch
from data_util.data import Vocab
from beam_search import beam_search
from train_util import get_enc_data
from rouge import Rouge
import argparse
import jieba
if config.cuda:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def get_cuda(tensor):
if T.cuda.is_available():
tensor = tensor.cuda()
return tensor
class Evaluate(object):
def __init__(self, data_path, opt, batch_size=config.batch_size):
self.vocab = Vocab(config.vocab_path, config.vocab_size)
self.batcher = Batcher(data_path,
self.vocab,
mode='eval',
batch_size=batch_size,
single_pass=True)
self.opt = opt
time.sleep(5)
def setup_valid(self):
self.model = Model()
self.model = get_cuda(self.model)
if config.cuda:
checkpoint = T.load(os.path.join(config.demo_model_path, self.opt.load_model))
else:
checkpoint = T.load(os.path.join(config.demo_model_path, self.opt.load_model), map_location='cpu')
self.model.load_state_dict(checkpoint["model_dict"])
def print_original_predicted(self, decoded_sents, ref_sents, article_sents,
loadfile):
filename = "test_" + loadfile.split(".")[0] + ".txt"
with open(os.path.join("data", filename), "w", encoding='utf-8') as f:
for i in range(len(decoded_sents)):
f.write("article: " + article_sents[i] + "\n")
f.write("ref: " + ref_sents[i] + "\n")
f.write("dec: " + decoded_sents[i] + "\n\n")
def evaluate_batch(self, article):
self.setup_valid()
batch = self.batcher.next_batch()
start_id = self.vocab.word2id(data.START_DECODING)
end_id = self.vocab.word2id(data.STOP_DECODING)
unk_id = self.vocab.word2id(data.UNKNOWN_TOKEN)
decoded_sents = []
ref_sents = []
article_sents = []
rouge = Rouge()
while batch is not None:
enc_batch, enc_lens, enc_padding_mask, enc_batch_extend_vocab, extra_zeros, ct_e = get_enc_data(
batch)
with T.autograd.no_grad():
enc_batch = self.model.embeds(enc_batch)
enc_out, enc_hidden = self.model.encoder(enc_batch, enc_lens)
#-----------------------Summarization----------------------------------------------------
with T.autograd.no_grad():
pred_ids = beam_search(enc_hidden, enc_out, enc_padding_mask,
ct_e, extra_zeros,
enc_batch_extend_vocab, self.model,
start_id, end_id, unk_id)
for i in range(len(pred_ids)):
decoded_words = data.outputids2words(pred_ids[i], self.vocab,
batch.art_oovs[i])
if len(decoded_words) < 2:
decoded_words = "xxx"
else:
decoded_words = " ".join(decoded_words)
decoded_sents.append(decoded_words)
abstract = batch.original_abstracts[i]
article = batch.original_articles[i]
ref_sents.append(abstract)
article_sents.append(article)
batch = self.batcher.next_batch()
load_file = self.opt.load_model
if article:
self.print_original_predicted(decoded_sents, ref_sents,
article_sents, load_file)
scores = rouge.get_scores(decoded_sents, ref_sents)
rouge_1 = sum([x["rouge-1"]["f"] for x in scores]) / len(scores)
rouge_2 = sum([x["rouge-2"]["f"] for x in scores]) / len(scores)
rouge_l = sum([x["rouge-l"]["f"] for x in scores]) / len(scores)
logger.info(load_file + " rouge_1:" + "%.4f" % rouge_1 + " rouge_2:" + "%.4f" % rouge_2 + " rouge_l:" + "%.4f" % rouge_l)
class Demo(Evaluate):
def __init__(self, opt):
self.vocab = Vocab(config.demo_vocab_path, config.demo_vocab_size)
self.opt = opt
self.setup_valid()
def evaluate(self, article, ref):
dec = self.abstract(article)
scores = rouge.get_scores(dec, ref)
rouge_1 = sum([x["rouge-1"]["f"] for x in scores]) / len(scores)
rouge_2 = sum([x["rouge-2"]["f"] for x in scores]) / len(scores)
rouge_l = sum([x["rouge-l"]["f"] for x in scores]) / len(scores)
return {
'dec': dec,
'rouge_1': rouge_1,
'rouge_2': rouge_2,
'rouge_l': rouge_l
}
def abstract(self, article):
start_id = self.vocab.word2id(data.START_DECODING)
end_id = self.vocab.word2id(data.STOP_DECODING)
unk_id = self.vocab.word2id(data.UNKNOWN_TOKEN)
example = Example(' '.join(jieba.cut(article)), '', self.vocab)
batch = Batch([example], self.vocab, 1)
enc_batch, enc_lens, enc_padding_mask, enc_batch_extend_vocab, extra_zeros, ct_e = get_enc_data(
batch)
with T.autograd.no_grad():
enc_batch = self.model.embeds(enc_batch)
enc_out, enc_hidden = self.model.encoder(enc_batch, enc_lens)
pred_ids = beam_search(enc_hidden, enc_out, enc_padding_mask, ct_e,
extra_zeros, enc_batch_extend_vocab,
self.model, start_id, end_id, unk_id)
for i in range(len(pred_ids)):
decoded_words = data.outputids2words(pred_ids[i], self.vocab,
batch.art_oovs[i])
decoded_words = " ".join(decoded_words)
return decoded_words
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task",
type=str,
default="validate",
choices=["validate", "test", "demo"])
parser.add_argument("--start_from", type=str, default="0160000.tar")
parser.add_argument("--load_model", type=str, default='0195000.tar')
opt = parser.parse_args()
if opt.task == "validate":
saved_models = os.listdir(config.save_model_path)
saved_models.sort()
file_idx = saved_models.index(opt.start_from)
saved_models = saved_models[file_idx:]
for f in saved_models:
opt.load_model = f
eval_processor = Evaluate(config.valid_data_path, opt)
eval_processor.evaluate_batch(False)
elif opt.task == "test":
eval_processor = Evaluate(config.test_data_path, opt)
eval_processor.evaluate_batch(True)
else:
demo_processor = Demo(opt)
logger.info(
demo_processor.abstract(
'就在对接货币基金的互联网理财产品诞生一周年的时候余额宝们的收益率破5已悄然成常态而数据显示今年截至6月6日市场上654只债券基金AB类份额分开计算平均收益率达451%且有248只债基产品收益率超过5%占比38%'
))
|
1677603
|
from monday.resources.base import BaseResource
from monday.query_joins import create_update_query, get_update_query, get_updates_for_item_query
class UpdateResource(BaseResource):
def __init__(self, token):
super().__init__(token)
def create_update(self, item_id, update_value):
query = create_update_query(item_id, update_value)
return self.client.execute(query)
def fetch_updates(self, limit, page=None):
query = get_update_query(limit, page)
return self.client.execute(query)
def fetch_updates_for_item(self, board_id, item_id, limit=100):
query = get_updates_for_item_query(board=board_id, item=item_id, limit=limit)
return self.client.execute(query)
|
1677607
|
import numpy as np
from attention_utils import get_activations, get_data
np.random.seed(1337) # for reproducibility
from keras.models import *
from keras.layers import Input, Dense, merge
input_dim = 32
def build_model():
inputs = Input(shape=(input_dim,))
# ATTENTION PART STARTS HERE
attention_probs = Dense(input_dim, activation='softmax', name='attention_vec')(inputs)
attention_mul = merge([inputs, attention_probs], output_shape=32, name='attention_mul', mode='mul')
# ATTENTION PART FINISHES HERE
attention_mul = Dense(64)(attention_mul)
output = Dense(1, activation='sigmoid')(attention_mul)
model = Model(input=[inputs], output=output)
return model
if __name__ == '__main__':
N = 10000
inputs_1, outputs = get_data(N, input_dim)
m = build_model()
m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
print(m.summary())
m.fit([inputs_1], outputs, epochs=20, batch_size=64, validation_split=0.5)
testing_inputs_1, testing_outputs = get_data(1, input_dim)
# Attention vector corresponds to the second matrix.
# The first one is the Inputs output.
attention_vector = get_activations(m, testing_inputs_1,
print_shape_only=True,
layer_name='attention_vec')[0].flatten()
print('attention =', attention_vector)
# plot part.
import matplotlib.pyplot as plt
import pandas as pd
pd.DataFrame(attention_vector, columns=['attention (%)']).plot(kind='bar',
title='Attention Mechanism as '
'a function of input'
' dimensions.')
plt.show()
|
1677612
|
import numpy as np
from tqdm import trange, tqdm
import tensorflow as tf
from .fedbase import BaseFedarated
from flearn.utils.tf_utils import process_grad, cosine_sim, softmax, norm_grad
from flearn.utils.model_utils import batch_data, gen_batch, gen_epoch
class Server(BaseFedarated):
def __init__(self, params, learner, dataset):
print('Using fair fed maml to Train')
self.inner_opt = tf.train.GradientDescentOptimizer(params['learning_rate'])
super(Server, self).__init__(params, learner, dataset)
def train(self):
print('Training with {} workers ---'.format(self.clients_per_round))
num_clients = len(self.clients)
pk = np.ones(num_clients) * 1.0 / num_clients
train_batches = {}
for c in self.clients:
train_batches[c] = gen_batch(c.train_data, self.batch_size, self.num_rounds + 2)
test_batches = {}
for c in self.clients:
test_batches[c] = gen_batch(c.test_data, self.batch_size, self.num_rounds + 2)
print('Have generated training and testing batches for all devices/tasks...')
for i in trange(self.num_rounds + 1, desc='Round: ', ncols=120):
# only train on non-held-out clients
indices, selected_clients = self.select_clients(round=i, pk=pk, held_out=self.held_out, num_clients=self.clients_per_round)
Deltas = []
hs = []
selected_clients = selected_clients.tolist()
for c in selected_clients:
# communicate the latest model
c.set_params(self.latest_model)
weights_before = c.get_params()
# solve minimization locally
batch1 = next(train_batches[c])
batch2 = next(test_batches[c])
if self.with_maml:
_, grads1, loss1 = c.solve_sgd(batch1)
_, grads2, loss2 = c.solve_sgd(batch2)
Deltas.append([np.float_power(loss2 + 1e-10, self.q) * grad for grad in grads2[1]])
hs.append(self.q * np.float_power(loss2+1e-10, (self.q-1)) * norm_grad(grads2[1]) + (1.0/self.learning_rate) * np.float_power(loss2+1e-10, self.q))
self.latest_model = self.aggregate2(weights_before, Deltas, hs)
print("###### finish meta-training, start meta-testing ######")
test_accuracies = []
initial_accuracies = []
for c in self.clients[len(self.clients)-self.held_out:]: # meta-test on the held-out tasks
# start from the same initial model that is learnt using q-FFL + MAML
c.set_params(self.latest_model)
ct, cl, ns = c.test_error_and_loss()
initial_accuracies.append(ct * 1.0/ns)
# solve minimization locally
for iters in range(self.num_fine_tune): # run k-iterations of sgd
batch = next(train_batches[c])
_, grads1, loss1 = c.solve_sgd(batch)
ct, cl, ns = c.test_error_and_loss()
test_accuracies.append(ct * 1.0/ns)
print("initial mean: ", np.mean(np.asarray(initial_accuracies)))
print("initial variance: ", np.var(np.asarray(initial_accuracies)))
print(self.output)
print("personalized mean: ", np.mean(np.asarray(test_accuracies)))
print("personalized variance: ", np.var(np.asarray(test_accuracies)))
np.savetxt(self.output+"_"+"test.csv", np.asarray(test_accuracies), delimiter=",")
|
1677631
|
import sys
import re
import traceback
import os
from opsbro.util import lower_dict
from opsbro.collector import Collector
if os.name == 'nt':
import opsbro.misc.wmi as wmi
class Memory(Collector):
def launch(self):
logger = self.logger
# logger.debug('getMemoryUsage: start')
if os.name == 'nt':
data = {}
# get physical available memory
_os = wmi.wmiaccess.get_table_where('Win32_OperatingSystem', {})[0]
data['Memory Total MBytes'] = total_memory = int(_os.TotalVisibleMemorySize) / 1024
counters = [
(r'Memory Available MBytes', r'\Memory\Available MBytes', 0),
(r'swap Input/sec', r'\Memory\Pages Input/sec', 100),
(r'swap % usage', r'\Paging File(*)\% Usage', 0),
(r'swap % usage peak', r'\Paging File(*)\% Usage Peak', 0),
]
for c in counters:
_label = c[0]
_query = c[1]
_delay = c[2]
v = wmi.wmiaccess.get_perf_data(_query, unit='double', delay=_delay)
data[_label] = v
data['Memory Usage %'] = 100 * (total_memory - data['Memory Available MBytes']) / total_memory
return data
# If Linux like procfs system is present and mounted we use meminfo, else we use "native" mode (vmstat and swapinfo)
if sys.platform.startswith('linux'):
# logger.debug('getMemoryUsage: linux2')
try:
with open('/proc/meminfo', 'r') as meminfoProc:
lines = meminfoProc.readlines()
except IOError as e:
logger.error('getMemoryUsage: exception = %s', e)
return False
# logger.debug('getMemoryUsage: open success, parsing')
regexp = re.compile(r'([0-9]+)') # We run this several times so one-time compile now
meminfo = {}
# Loop through and extract the numerical values
for line in lines:
values = line.split(':')
try:
# Picks out the key (values[0]) and makes a list with the value as the meminfo value (values[1])
# We are only interested in the KB data so regexp that out
match = re.search(regexp, values[1])
if match is not None:
meminfo[str(values[0])] = int(match.group(0))
except IndexError:
break
# logger.debug('getMemoryUsage: parsing, looped')
# put all keys in lower case
meminfo = lower_dict(meminfo)
memData = {}
memData['phys_free'] = 0
memData['phys_used'] = 0
memData['cached'] = 0
memData['swap_free'] = 0
memData['swap_used'] = 0
# Phys
try:
# logger.debug('getMemoryUsage: formatting (phys)')
physTotal = meminfo['memtotal']
physFree = meminfo['memfree'] + meminfo['buffers'] + meminfo['cached'] + meminfo['sreclaimable'] # also count io cache and system one (slab)
physUsed = 100 * (physTotal - float(physFree)) / physTotal
# Convert to MB
meminfo['phys_total'] = physTotal
meminfo['phys_free'] = physFree
meminfo['phys_used'] = physUsed
# Stops the agent crashing if one of the meminfo elements isn't set
except IndexError:
logger.error('getMemoryUsage: formatting (phys) IndexError - cached, memtotal or memfree not present')
except KeyError:
logger.error('getMemoryUsage: formatting (phys) KeyError - cached, memtotal or memfree not present')
logger.debug('getMemoryUsage: formatted (phys)')
# Swap
try:
# logger.debug('getMemoryUsage: formatting (swap)')
swapTotal = meminfo['swaptotal']
swapFree = meminfo['swapfree']
if swapTotal == 0:
swapUsed = 0
else:
swapUsed = 100 * (swapTotal - float(swapFree)) / swapTotal
meminfo['swap_free'] = swapFree
meminfo['swap_used'] = swapUsed
# Stops the agent crashing if one of the meminfo elements isn't set
except IndexError:
logger.error('getMemoryUsage: formatting (swap) IndexError - SwapTotal or SwapFree not present')
except KeyError:
logger.error('getMemoryUsage: formatting (swap) KeyError - SwapTotal or SwapFree not present')
logger.debug('getMemoryUsage: formatted (swap), completed, returning')
return meminfo
elif sys.platform.find('freebsd') != -1:
logger.debug('getMemoryUsage: freebsd (native)')
physFree = None
try:
try:
logger.debug('getMemoryUsage: attempting sysinfo')
proc = subprocess.Popen(['sysinfo', '-v', 'mem'], stdout=subprocess.PIPE, close_fds=True)
sysinfo = proc.communicate()[0]
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
sysinfo = sysinfo.split('\n')
regexp = re.compile(r'([0-9]+)') # We run this several times so one-time compile now
for line in sysinfo:
parts = line.split(' ')
if parts[0] == 'Free':
logger.debug('getMemoryUsage: parsing free')
for part in parts:
match = re.search(regexp, part)
if match != None:
physFree = match.group(0)
logger.debug('getMemoryUsage: sysinfo: found free %s', physFree)
if parts[0] == 'Active':
logger.debug('getMemoryUsage: parsing used')
for part in parts:
match = re.search(regexp, part)
if match != None:
physUsed = match.group(0)
logger.debug('getMemoryUsage: sysinfo: found used %s', physUsed)
if parts[0] == 'Cached':
logger.debug('getMemoryUsage: parsing cached')
for part in parts:
match = re.search(regexp, part)
if match != None:
cached = match.group(0)
logger.debug('getMemoryUsage: sysinfo: found cached %s', cached)
except OSError as e:
logger.debug('getMemoryUsage: sysinfo not available')
except Exception as e:
logger.error('getMemoryUsage: exception = %s', traceback.format_exc())
finally:
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
if physFree == None:
logger.info(
'getMemoryUsage: sysinfo not installed so falling back on sysctl. sysinfo provides more accurate memory info so is recommended. http://www.freshports.org/sysutils/sysinfo')
try:
try:
logger.debug('getMemoryUsage: attempting Popen (sysctl)')
proc = subprocess.Popen(['sysctl', '-n', 'hw.physmem'], stdout=subprocess.PIPE, close_fds=True)
physTotal = proc.communicate()[0]
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
logger.debug('getMemoryUsage: attempting Popen (vmstat)')
proc = subprocess.Popen(['vmstat', '-H'], stdout=subprocess.PIPE, close_fds=True)
vmstat = proc.communicate()[0]
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
except Exception as e:
logger.error('getMemoryUsage: exception = %s', traceback.format_exc())
return False
finally:
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
logger.debug('getMemoryUsage: Popen success, parsing')
# First we parse the information about the real memory
lines = vmstat.split('\n')
physParts = lines[2].split(' ')
physMem = []
# We need to loop through and capture the numerical values
# because sometimes there will be strings and spaces
for k, v in enumerate(physParts):
if re.match(r'([0-9]+)', v) != None:
physMem.append(v)
physTotal = int(physTotal.strip()) / 1024 # physFree is returned in B, but we need KB so we convert it
physFree = int(physMem[4])
physUsed = int(physTotal - physFree)
logger.debug('getMemoryUsage: parsed vmstat')
# Convert everything to MB
physUsed = int(physUsed) / 1024
physFree = int(physFree) / 1024
cached = 'NULL'
#
# Swap memory details
#
logger.debug('getMemoryUsage: attempting Popen (swapinfo)')
try:
try:
proc = subprocess.Popen(['swapinfo', '-k'], stdout=subprocess.PIPE, close_fds=True)
swapinfo = proc.communicate()[0]
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
except Exception as e:
logger.error('getMemoryUsage: exception = %s', traceback.format_exc())
return False
finally:
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
lines = swapinfo.split('\n')
swapUsed = 0
swapFree = 0
for index in range(1, len(lines)):
swapParts = re.findall(r'(\d+)', lines[index])
if swapParts != None:
try:
swapUsed += int(swapParts[len(swapParts) - 3]) / 1024
swapFree += int(swapParts[len(swapParts) - 2]) / 1024
except IndexError as e:
pass
logger.debug('getMemoryUsage: parsed swapinfo, completed, returning')
return {'physUsed': physUsed, 'physFree': physFree, 'swapUsed': swapUsed, 'swapFree': swapFree,
'cached' : cached}
elif sys.platform == 'darwin':
logger.debug('getMemoryUsage: darwin')
try:
try:
logger.debug('getMemoryUsage: attempting Popen (top)')
proc = subprocess.Popen(['top', '-l 1'], stdout=subprocess.PIPE, close_fds=True)
top = proc.communicate()[0]
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
logger.debug('getMemoryUsage: attempting Popen (sysctl)')
proc = subprocess.Popen(['sysctl', 'vm.swapusage'], stdout=subprocess.PIPE, close_fds=True)
sysctl = proc.communicate()[0]
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
except Exception as e:
logger.error('getMemoryUsage: exception = %s', traceback.format_exc())
return False
finally:
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception as e:
logger.debug('Process already terminated')
logger.debug('getMemoryUsage: Popen success, parsing')
# Deal with top
lines = top.split('\n')
physParts = re.findall(r'([0-9]\d+)', lines[self.topIndex])
logger.debug('getMemoryUsage: parsed top')
# Deal with sysctl
swapParts = re.findall(r'([0-9]+\.\d+)', sysctl)
logger.debug('getMemoryUsage: parsed sysctl, completed, returning')
return {'physUsed': physParts[3], 'physFree': physParts[4], 'swapUsed': swapParts[1],
'swapFree': swapParts[2], 'cached': 'NULL'}
else:
self.set_not_eligible('This system is not managed by this collector.')
return False
|
1677632
|
import PySimpleGUI as sg
import os, re, subprocess
from flask import Flask, render_template, flash, redirect, url_for, request, session
from classes.forms import RegistrationForm
from classes.functions import Main
import datetime, textwrap
from configparser import ConfigParser
from multiprocessing import Process
import webbrowser
import threading
configPath = "config.ini"
config = ConfigParser()
config.read(configPath)
def validipv4(ip):
pattern = re.compile(r"""
^
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 0-3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){0,3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(ip) is not None
sqlRefference = "Windows Drivers Reference\n" \
"{SQL Server} - released with SQL Server 2000\n" \
"{SQL Native Client} - released with SQL Server 2005 (also known as version 9.0)\n" \
"{SQL Server Native Client 10.0} - released with SQL Server 2008\n" \
"{SQL Server Native Client 11.0} - released with SQL Server 2012\n" \
"{ODBC Driver 11 for SQL Server} - supports SQL Server 2005 through 2014\n" \
"{ODBC Driver 13 for SQL Server} - supports SQL Server 2005 through 2016\n" \
"{ODBC Driver 13.1 for SQL Server} - supports SQL Server 2008 through 2016\n" \
"{ODBC Driver 17 for SQL Server} - supports SQL Server 2008 through 2017"
sqlConnect = [
[sg.Text("SQL Driver", size=(10,1)), sg.DropDown(
enable_events=True,
readonly=True,
font=10,
default_value=config.get("sqlConfig", "sql_driver"),
size=(24,1),
tooltip=sqlRefference, pad=(0,5),
values=["{SQL Server}",
"{SQL Native Client}",
"{SQL Server Native Client 10.0}",
"{SQL Server Native Client 11.0}",
"{ODBC Driver 11 for SQL Server}",
"{ODBC Driver 13 for SQL Server}",
"{ODBC Driver 13.1 for SQL Server}",
"{ODBC Driver 17 for SQL Server}"])],
[sg.Text("Instance",size=(10,1),pad=(0,5) ), sg.InputText(default_text=(config.get("sqlConfig", "SQL_SERVER"))), ],
[sg.Text("Port", size=(10,1) ,pad=(0,5) ),sg.InputText(default_text=(config.get("sqlConfig", "SQL_PORT")))],
[sg.Text("Username", size=(10,1),pad=(0,5)),sg.InputText( default_text=(config.get("sqlConfig", "SQL_USER")))],
[sg.Text("Password", size=(10,1),pad=(0,5)), sg.InputText(password_char="*",default_text=(config.get("sqlConfig", "SQL_PASS")))],
[sg.Text("Database", size=(10,1),pad=(0,5)), sg.InputText(default_text=(config.get("sqlConfig", "SQL_DBASE")))]]
def isPortFree(host,port):
import socket, errno
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((host, port))
except socket.error as e:
return False
finally:
return True
s.close()
def ExecuteCommandSubprocess(command, wait=False, quiet=True, *args):
try:
sp = subprocess.Popen([command,*args], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if wait:
out, err = sp.communicate()
if not quiet:
if out:
print(out.decode("utf-8"))
if err:
print(err.decode("utf-8"))
except Exception as e:
print('Exception encountered running command ', e)
return ''
return (out.decode('utf-8'))
def listThemes():
themes_list = []
for themes in os.listdir('templates/themes/'):
themes_list.append(themes)
return themes_list
def seasons(vars):
if vars == "Season 0-1":
return 20
elif vars == "Season 2-8":
return 32
elif vars == "Season 9-13":
return 64
else:
return 20
def season_reverse(value):
if value == 20:
return "Season 0-1"
elif value == 32:
return "Season 2-8"
elif value == 64:
return "Season 9-13"
else:
return "Season 0-1"
webSettings = [
[sg.Text("Server Name", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get("webConfig", "server_name"))), ],
[sg.Text("Secret Key", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get("webConfig", "secret_key")))],
[sg.Text("Season", size=(10, 1), pad=(0, 5)), sg.DropDown(default_value=season_reverse(config.getint("webConfig", "item_hex_len")),
values=["Season 0-1", "Season 2-8", "Season 9-13"], readonly=True)],
[sg.Text("Web Debug", size=(10, 1), pad=(0, 5)), sg.Checkbox(text="", default=config.getboolean("webConfig", "web_debug"))],
[sg.Text("Web IP", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.get("webConfig", "web_ip")))],
[sg.Text("Web PORT", size=(10, 1), pad=(0, 5)), sg.InputText(default_text=(config.getint("webConfig", "web_port")))],
[sg.Text("Web Theme", size=(10, 1), pad=(0, 5)), sg.DropDown(default_value=config.get("webConfig", "web_theme"),values=listThemes(), readonly=True)],
[sg.Text("Theme Switcher", size=(10, 1), pad=(0, 5)), sg.Checkbox(text="", default=config.getboolean("webConfig", "theme_switcher"))]
]
layout = [[sg.TabGroup([[sg.Tab('SQL Settings', sqlConnect), sg.Tab('WEB Settings', webSettings)]])],
[sg.Button('Start Server', disabled=False,auto_size_button=False),
sg.Button('Stop Server', disabled=True, auto_size_button=False)]
]
window = sg.Window('DTpyWeb GUI v2', icon="static/default-images/favicon.ico",
auto_size_text=False,
default_element_size=(30, 1),
return_keyboard_events=True,
use_default_focus=False,
text_justification="left"
).Layout(layout).Finalize()
def runWeb():
configPath = "config.ini"
config = ConfigParser()
config.read(configPath)
main = Main()
app = Flask(__name__)
app.config['SECRET_KEY'] = config.get("webConfig", "secret_key")
@app.context_processor
def _processor():
return dict(
date_now=datetime.datetime.now().strftime("%d.m.%Y %H:%M:%S"),
author="© 2020 r00tme - DTpyWeb. All rights reserved.",
theme=main.themes_check()[0],
theme_switch_form = main.themes_check()[1],
theme_switch_active = config.getboolean("webConfig", "theme_switcher"),
top10=main.rankings(" TOP 10 "),
header="header.html",
server=config.get("webConfig", "server_name"),
)
@app.route('/userinfo', methods=['GET'])
@app.route('/userinfo<path:path>', methods=['GET', 'POST'])
def users_info(path):
main.theme_switcher()
if main.user_exist(path[1:], False):
item_image = []
item_info = []
for i in range(0, 12):
user_items = textwrap.wrap(main.return_items(path[1:]), config.getint("webConfig", "item_hex_len"))[i]
if main.item_info(user_items):
item_image.append(main.item_info(user_items)[1])
item_info.append(main.item_info(user_items)[0])
else:
item_image.append("")
item_info.append("")
return render_template("modules/userinfo.html", title="Character Information Page",
item_info=item_info, item_image=item_image, character=path[1:])
else:
flash(r'This user does not exist', 'error')
return redirect(url_for('home'))
@app.route('/', methods=['GET', 'POST'])
@app.route('/home', methods=['GET', 'POST'])
def home():
# TODO news System
# * This route will be removed after the news system is completed
main.login()
main.theme_switcher()
stripin = main.themes_check()[0].split('/')
return render_template("%s/%s/home.html" % (stripin[0], stripin[1]), title="News")
@app.route('/download', methods=['GET', 'POST'])
@app.route('/about', methods=['GET', 'POST'])
@app.route('/rules', methods=['GET', 'POST'])
@app.route('/rankings', methods=['GET', 'POST'])
def main_pages():
main.login()
main.theme_switcher()
var = config.get("dl_links", "dl_links")
cors = str(var).split("\n")
return render_template("modules/" + request.path + ".html", title=u"%s" % request.path[1:].capitalize(),
download_links=cors)
@app.route('/buy-credits', methods=['GET', 'POST'])
@app.route('/my-auction', methods=['GET', 'POST'])
@app.route('/buy-credits', methods=['GET', 'POST'])
@app.route('/my-account', methods=['GET', 'POST'])
@app.route('/my-characters', methods=['GET', 'POST'])
@app.route('/vip-modules', methods=['GET', 'POST'])
@app.route('/my-market', methods=['GET', 'POST'])
def user_pages():
main.theme_switcher()
if 'username' not in session:
flash(r'You do not have an access to this page', 'error')
return redirect(url_for('home'))
else:
return render_template("modules/user/" + request.path + ".html",
title=u"%s %s Page" % (request.path.split("-")[0][1:].title(),
request.path.split("-")[1].title()))
@app.route('/logout')
def logout():
session.pop('username', None)
flash('You were logged out', 'info')
return redirect('/home')
@app.route('/register', methods=['GET', 'POST'])
def register():
main.theme_switcher()
form = RegistrationForm()
if form.validate_on_submit():
main.register(
form.username.data,
form.password.data,
form.email.data,
form.question.data,
form.answer.data)
return render_template("modules/register.html", title="Register", form=form)
@app.errorhandler(404)
def page_not_found(e):
return render_template("modules/404.html", title="Page does not exist"), 404
from flask import request
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
app.run(debug=False, host=config.get("webConfig", "web_ip"),
port=config.getint("webConfig", "web_port"))
def thegui():
while True:
event, values = window.Read(timeout=0)
if event is None or event == "Exit": # always, always give a way out!
break
if event is not sg.TIMEOUT_KEY:
config.set("sqlConfig", str("sql_driver"), str(values[0]))
config.set("sqlConfig", str("sql_server"), str(values[1]))
if values[2].isdigit():
config.set("sqlConfig", "sql_port", values[2])
else:
sg.Popup("Type a valid and not in use port number")
window.FindElement(values[2]).Update(values[2][:-1])
config.set("sqlConfig", str("sql_user"), str(values[3]))
config.set("sqlConfig", str("sql_pass"), str(values[4]))
config.set("sqlConfig", str("sql_dbase"), str(values[5]))
config.set("webConfig", str("server_name"), str(values[6]))
config.set("webConfig", str("secret_key"), str(values[7]))
config.set("webConfig", str("item_hex_len"), str(seasons(values[8])))
config.set("webConfig", str("web_debug"), str(values[9]))
if validipv4(values[10]):
config.set("webConfig", str("web_ip"), str(values[10]))
else:
sg.Popup("Type a valid IP address")
window.FindElement(values[10]).Update(values[10][:-1])
if values[11].isdigit():
config.set("webConfig", "web_port", values[11])
else:
sg.Popup("Type a valid and not in use port number")
window.FindElement(values[11]).Update(values[11][:-1])
config.set("webConfig", str("web_theme"), str(values[12]))
config.set("webConfig", str("theme_switcher"), str(values[13]))
with open(configPath, "w+") as f:
config.write(f)
if event == "Start Server":
window.Element('Start Server').Update(disabled=True)
window.Element('Stop Server').Update(disabled=False)
if isPortFree(values[10], int(values[11])):
threading.Thread(target=runWeb).start()
os.startfile("http://" + config.get("webConfig","web_ip") + ":" + config.get("webConfig","web_port"))
else:
sg.Popup("Port %s is already in use, \nchange the port or close the program that use it" % values[10])
if event == "Stop Server":
os.system('taskkill /f /im DTpyWeb.exe')
os.system('taskkill /f /im python.exe')
os.system('start DTpyWeb.exe')
if __name__ == '__main__':
thegui()
|
1677694
|
from pywavefront import *
from PyVMF import *
def obj_to_solids(filename: str, material_path: str = "", scale=64):
"""
Turns an .obj file to VMF solids, **BETA** it's very finicky and remember to invert normals
:param filename: The name of the .obj file with path (ex: "test/wall.obj")
:param material_path: The path to the .VMT's using same names (from the game materials folder, ex: "custom/ramp/"
:param scale: The scale applied to the entire .obj
"""
if material_path[-1] != "/":
material_path += "/"
scene = Wavefront(filename, collect_faces=True)
for mesh in scene.mesh_list:
solid = Solid()
for face in mesh.faces[::2]:
side = Side()
for i, vertex in enumerate(face):
vs = str(scene.vertices[vertex])
v = Convert.string_to_vertex(vs)
v.multiply(scale)
side.plane[i] = v
solid.add_sides(side)
solid.editor = Editor()
solid.set_texture(material_path + mesh.materials[0].name[0:-3])
solid.rotate_x(Vertex(), 90)
yield solid
|
1677698
|
from distutils.core import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name='ipython_pytest',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
py_modules=['ipython_pytest'],
url='https://github.com/akaihola/ipython_pytest',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.5',
'Operating System :: OS Independent'],
license='README.rst',
description='IPython extension to run pytest for the current cell.',
long_description=long_description,
)
|
1677734
|
from jinja2 import Template
import os
from optparse import OptionParser
import logging
import csv
import time
import re
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
from itertools import zip_longest
from livereload import Server
VERSION = '0.1.0'
RENDERED_CARDS_FILE = "index.html"
class CardRenderer:
def __init__(self, input_path, prefix):
self.prefix = prefix
self.input_path = input_path
self.csv_card_path = self.get_path("csv")
self.custom_header_path = self.get_path("header.html")
self.single_card_template_path = self.get_path("html.jinja2")
self.cards_template_path = os.path.join(os.path.dirname(__file__), 'cards.html.jinja2')
self.all_cards_rendered_path = os.path.join(input_path, RENDERED_CARDS_FILE)
def get_path(self, extension):
return os.path.join(self.input_path, "{}.{}".format(self.prefix, extension))
def render_cards(self):
# I've noticed that when saving the CSV file
# the server reloads an empty page
# unless I add a small sleep before attempting to read everything
time.sleep(0.5)
# load the csv file
cards_data = []
with open(self.csv_card_path, "r", encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile, dialect='custom_delimiter')
for row in reader:
cards_data.append(row)
rendered_cards = []
# load the single card template
with open(self.single_card_template_path, "r") as template_file:
template = Template(template_file.read())
# render the template with card data
for card_data in cards_data:
if str(card_data.get('ignore', "false")).lower() == "true":
continue
rendered = template.render(
card_data,
__card_data=card_data,
__time=str(time.time())
)
num_cards = card_data.get('num_cards')
if num_cards is None or re.match("^[^0-9]*$", num_cards):
num_cards = 1
num_cards = int(num_cards)
for i in range(0, int(num_cards)):
rendered_cards.append(rendered)
# Load custom header html if it exists
custom_header = None
if os.path.exists(self.custom_header_path):
with open(self.custom_header_path, "r") as f:
custom_header = f.read()
# render the cards template with all rendered cards
with open(self.cards_template_path, "r") as cards_template_file:
template = Template(cards_template_file.read())
with open(self.all_cards_rendered_path, "w") as all_cards_rendered_file:
all_cards_rendered_file.write(
template.render(
rendered_cards=rendered_cards,
prefix=self.prefix,
custom_header=custom_header
)
)
class RenderingEventHandler(FileSystemEventHandler):
def __init__(self, card_renderer):
self.card_renderer = card_renderer
def on_any_event(self, event):
if event.src_path == self.card_renderer.all_cards_rendered_path:
return
self.card_renderer.render_cards()
def parse_options():
parser = OptionParser(
usage="usage: %prog [options]",
version="%prog {}".format(VERSION)
)
parser.add_option("-p", "--path",
help="path to assets",
dest="path",
default=os.getcwd(),
metavar="PATH")
parser.add_option("-x", "--prefix",
help="filename prefix, example _card<.ext>",
dest="prefix",
default="_card",
metavar="PREFIX")
parser.add_option("-d", "--delimiter",
help="delimiter used in the csv file, default: , (comma)",
dest="delimiter",
default=",",
metavar="DELIMITER")
parser.add_option("--port",
help="port to use for live reloaded page",
dest="port",
type="int",
default=8800,
metavar="PORT")
parser.add_option("--address",
help="host address to bind to",
dest="host_address",
default="0.0.0.0",
metavar="ADDRESS")
return parser.parse_args()
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
(options, args) = parse_options()
port = options.port
assets_path = options.path
file_prefix = options.prefix
host_address = options.host_address
csv.register_dialect('custom_delimiter', delimiter=options.delimiter)
card_renderer = CardRenderer(assets_path, file_prefix)
observer = Observer()
observer.schedule(LoggingEventHandler(), assets_path, recursive=True)
observer.schedule(RenderingEventHandler(card_renderer), assets_path, recursive=True)
card_renderer.render_cards()
observer.start()
server = Server()
server.watch(card_renderer.all_cards_rendered_path)
server.serve(root=assets_path, port=port, host=host_address)
observer.stop()
observer.join()
if __name__ == "__main__":
main()
|
1677749
|
from os.path import join
from os import listdir
import cv2
import numpy as np
import glob
import xml.etree.ElementTree as ET
visual = True
color_bar = np.random.randint(0, 255, (90, 3))
VID_base_path = './ILSVRC2015'
ann_base_path = join(VID_base_path, 'Annotations/VID/train/')
img_base_path = join(VID_base_path, 'Data/VID/train/')
sub_sets = sorted({'a', 'b', 'c', 'd', 'e'})
for sub_set in sub_sets:
sub_set_base_path = join(ann_base_path, sub_set)
videos = sorted(listdir(sub_set_base_path))
for vi, video in enumerate(videos):
print('subset: {} video id: {:04d} / {:04d}'.format(sub_set, vi, len(videos)))
video_base_path = join(sub_set_base_path, video)
xmls = sorted(glob.glob(join(video_base_path, '*.xml')))
for xml in xmls:
f = dict()
xmltree = ET.parse(xml)
size = xmltree.findall('size')[0]
frame_sz = [int(it.text) for it in size]
objects = xmltree.findall('object')
if visual:
im = cv2.imread(xml.replace('xml', 'JPEG').replace('Annotations', 'Data'))
for object_iter in objects:
trackid = int(object_iter.find('trackid').text)
bndbox = object_iter.find('bndbox')
bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text),
int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)]
if visual:
pt1 = (int(bbox[0]), int(bbox[1]))
pt2 = (int(bbox[2]), int(bbox[3]))
cv2.rectangle(im, pt1, pt2, color_bar[trackid].tolist(), 3)
if visual:
cv2.imshow('img', im)
cv2.waitKey(1)
print('done!')
|
1677771
|
import demistomock as demisto
import json
import pytest
from CommonServerPython import entryTypes
entryTypes['warning'] = 11
bot_id: str = '9bi5353b-md6a-4458-8321-e924af433amb'
tenant_id: str = 'pbae9ao6-01ql-249o-5me3-4738p3e1m941'
team_id: str = '19:<EMAIL>'
team_aad_id: str = '7d8efdf8-0c5a-42e3-a489-5ef5c3fc7a2b'
team_name: str = 'The-A-Team'
service_url: str = 'https://smba.trafficmanager.net/emea'
mirrored_channels: list = [
{
'channel_id': '19:2cb<EMAIL>78c624400ef<EMAIL>750539998<EMAIL>.skype',
'investigation_id': '1',
'mirror_type': 'all',
'mirror_direction': 'both',
'auto_close': 'true',
'mirrored': True,
'channel_name': 'incident-1'
},
{
'channel_id': '19:2<EMAIL>3<EMAIL>',
'investigation_id': '10',
'mirror_type': 'all',
'mirror_direction': 'both',
'auto_close': 'true',
'mirrored': True,
'channel_name': 'incident-10'
}
]
team_members: list = [
{
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg',
'objectId': '359d2c3c-162b-414c-b2eq-386461e5l050',
'name': '<NAME>',
'givenName': 'Bruce',
'surname': 'Willis',
'userPrincipalName': '<EMAIL>',
'tenantId': tenant_id
},
{
'id': '29:1pBMMC85IyjM3tr_MCZi7KW4pw4EULxLN4C7R_xoi3Wva_lOn3VTf7xJlCLK-r-pMumrmoz9agZxsSrCf7__u9R',
'objectId': '2826c1p7-bdb6-4529-b57d-2598me968631',
'name': '<NAME>',
'givenName': 'Denzel',
'surname': 'Washington',
'email': '<EMAIL>',
'userPrincipalName': '<EMAIL>',
'tenantId': tenant_id
}
]
integration_context: dict = {
'bot_name': 'DemistoBot',
'service_url': service_url,
'tenant_id': tenant_id,
'teams': json.dumps([{
'mirrored_channels': mirrored_channels,
'team_id': team_id,
'team_aad_id': team_aad_id,
'team_members': team_members,
'team_name': team_name
}])
}
@pytest.fixture(autouse=True)
def get_integration_context(mocker):
mocker.patch.object(demisto, 'getIntegrationContext', return_value=integration_context)
@pytest.fixture(autouse=True)
def get_graph_access_token(requests_mock):
requests_mock.post(
f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token',
json={
'access_token': 'token'
},
status_code=200
)
@pytest.fixture(autouse=True)
def get_bot_access_token(requests_mock):
requests_mock.post(
'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token',
json={
'access_token': 'token'
}
)
def test_mentioned_users_to_entities():
from MicrosoftTeams import mentioned_users_to_entities
mentioned_users = ['<NAME>', '<NAME>']
bruce_entity = {
'type': 'mention',
'mentioned': {
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg',
'name': '<NAME>'
},
'text': '<at>@Bruce Willis</at>'
}
denzel_entity = {
'type': 'mention',
'mentioned': {
'id': '29:1pBMMC85IyjM3tr_MCZi7KW4pw4EULxLN4C7R_xoi3Wva_lOn3VTf7xJlCLK-r-pMumrmoz9agZxsSrCf7__u9R',
'name': '<NAME>'
},
'text': '<at>@Denzel Washington</at>'
}
assert mentioned_users_to_entities(mentioned_users, integration_context) == [bruce_entity, denzel_entity]
mentioned_users = ['<NAME>', 'demisto']
with pytest.raises(ValueError, match='Team member demisto was not found'):
mentioned_users_to_entities(mentioned_users, integration_context)
def test_process_mentioned_users_in_message():
from MicrosoftTeams import process_mentioned_users_in_message
raw_message = '@demisto dev; @demisto; <EMAIL>; <EMAIL> hi; @hi @wow;'
parsed_message = '<at>@demisto dev</at> <at>@demisto</at> <EMAIL>; <EMAIL> hi; @hi <at>@wow</at>'
users, message = process_mentioned_users_in_message(raw_message)
assert users == ['demisto dev', 'demisto', 'wow']
assert message == parsed_message
def test_message_handler(mocker):
from MicrosoftTeams import message_handler
mocker.patch.object(demisto, 'addEntry')
request_body: dict = {
'from': {
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg',
'aadObjectId': '359d2c3c-162b-414c-b2eq-386461e5l050',
'name': '<NAME>'
}
}
channel_data: dict = {
'channel': {
'id': '19:2cbad0d78c624<EMAIL>ef<EMAIL>053<EMAIL>.skype'
},
'team': {
'id': team_id
}
}
message_handler(integration_context, request_body, channel_data, 'waz up')
assert demisto.addEntry.call_count == 1
add_entry_args = demisto.addEntry.call_args[1]
assert add_entry_args == {
'id': '1',
'entry': 'waz up',
'username': 'Bruce Willis',
'email': '<EMAIL>',
'footer': '\n**From Microsoft Teams**'
}
def test_member_added_handler(mocker, requests_mock):
from MicrosoftTeams import member_added_handler
mocker.patch.object(demisto, 'getIntegrationContext', return_value={})
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(demisto, 'params', return_value={'bot_id': bot_id})
requests_mock.get(
f'{service_url}/v3/conversations/{team_id}/members',
json=team_members
)
request_body: dict = {
'recipient': {
'id': f'28:{bot_id}',
'name': 'DemistoBot'
},
'membersAdded': [{
'id': f'28:{bot_id}'
}]
}
channel_data: dict = {
'team': {
'id': team_id,
'name': team_name,
'aadGroupId': team_aad_id
},
'eventType': 'teamMemberAdded',
'tenant': {
'id': tenant_id
}
}
member_added_handler(integration_context, request_body, channel_data)
expected_integration_context: dict = {
'bot_name': 'DemistoBot',
'teams': json.dumps([{
'mirrored_channels': mirrored_channels,
'team_id': team_id,
'team_aad_id': team_aad_id,
'team_members': team_members,
'team_name': team_name
}]),
'tenant_id': tenant_id,
'service_url': service_url
}
assert demisto.setIntegrationContext.call_count == 2
set_integration_context = demisto.setIntegrationContext.call_args[0]
assert len(set_integration_context) == 1
assert set_integration_context[0] == expected_integration_context
def test_mirror_investigation(mocker, requests_mock):
from MicrosoftTeams import mirror_investigation
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team'
}
)
# verify command cannot be executed in the war room
mocker.patch.object(
demisto,
'investigation',
return_value={
'type': 9
}
)
with pytest.raises(ValueError) as e:
mirror_investigation()
assert str(e.value) == 'Can not perform this action in playground.'
# verify channel is mirrored successfully and a message is sent to it
mocker.patch.object(
demisto,
'investigation',
return_value={
'id': '2'
}
)
channel_id: str = 'channel-id'
# create channel mock request
requests_mock.post(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'id': channel_id
}
)
# send message mock request
requests_mock.post(
f'{service_url}/v3/conversations/{channel_id}/activities',
json={}
)
mirror_investigation()
updated_mirrored_channels: list = mirrored_channels[:]
updated_mirrored_channels.append({
'channel_id': 'channel-id',
'investigation_id': '2',
'mirror_type': 'all',
'mirror_direction': 'both',
'auto_close': 'true',
'mirrored': False,
'channel_name': 'incident-2'
})
expected_integration_context: dict = {
'bot_name': 'DemistoBot',
'tenant_id': tenant_id,
'service_url': service_url,
'teams': json.dumps([{
'mirrored_channels': updated_mirrored_channels,
'team_id': team_id,
'team_aad_id': team_aad_id,
'team_members': team_members,
'team_name': 'The-A-Team'
}])
}
assert requests_mock.request_history[1].json() == {
'displayName': 'incident-2',
'description': 'Channel to mirror incident 2'
}
assert requests_mock.request_history[3].json() == {
'text': 'This channel was created to mirror [incident 2](https://test-address:8443#/WarRoom/2) between '
'Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, you need to'
' mention the Demisto Bot in the message.',
'type': 'message'
}
assert demisto.setIntegrationContext.call_count == 3
set_integration_context = demisto.setIntegrationContext.call_args[0]
assert len(set_integration_context) == 1
set_integration_context[0].pop('graph_access_token')
set_integration_context[0].pop('graph_valid_until')
set_integration_context[0].pop('bot_access_token')
set_integration_context[0].pop('bot_valid_until')
assert set_integration_context[0] == expected_integration_context
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Investigation mirrored successfully in channel incident-2.'
# verify channel mirror is updated successfully
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(
demisto,
'args',
return_value={
'mirror_type': 'chat',
'direction': 'FromDemisto',
'autoclose': 'false'
}
)
mocker.patch.object(
demisto,
'investigation',
return_value={
'id': '1'
}
)
mirror_investigation()
assert demisto.setIntegrationContext.call_count == 1
set_integration_context = demisto.setIntegrationContext.call_args[0]
assert len(set_integration_context) == 1
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Investigation mirror was updated successfully.'
# verify channel with custom channel name is mirrored successfully
mocker.patch.object(
demisto,
'investigation',
return_value={
'id': '14'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'channel_name': 'booya'
}
)
mirror_investigation()
assert requests_mock.request_history[5].json() == {
'displayName': 'booya',
'description': 'Channel to mirror incident 14'
}
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Investigation mirrored successfully in channel booya.'
def test_send_message(mocker, requests_mock):
from MicrosoftTeams import send_message
mocker.patch.object(demisto, 'results')
# verify that a mirrored message is skipped
mocker.patch.object(
demisto,
'args',
return_value={
'messageType': 'mirrorEntry',
'originalMessage': 'a mirrored message\n**From Microsoft Teams**'
}
)
assert send_message() is None
# verify notification from server with severity below threshold is not sent
mocker.patch.object(
demisto,
'params',
return_value={
'min_incident_severity': 'Medium',
'team': 'The-A-Team'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'messageType': 'incidentOpened',
'severity': 1
}
)
assert send_message() is None
# verify error is raised if no user/channel were provided
mocker.patch.object(
demisto,
'args',
return_value={}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'No channel or team member to send message were provided.'
# verify error is raised if both user and channel were provided
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'somechannel',
'team_member': 'someuser'
}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'Provide either channel or team member to send message to, not both.'
# verify message is sent properly given user to send to
mocker.patch.object(
demisto,
'params',
return_value={
'bot_id': bot_id
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'team_member': '<NAME>',
'message': 'MESSAGE'
}
)
requests_mock.post(
f'{service_url}/v3/conversations',
json={
'id': 'conversation-id'
}
)
requests_mock.post(
f'{service_url}/v3/conversations/conversation-id/activities',
json={}
)
expected_create_personal_conversation_data: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': 'DemistoBot'
},
'members': [{
'id': '29:1pBMMC85IyjM3tr_MCZi7KW4pw4EULxLN4C7R_xoi3Wva_lOn3VTf7xJlCLK-r-pMumrmoz9agZxsSrCf7__u9R'
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
send_message()
assert requests_mock.request_history[0].json() == expected_create_personal_conversation_data
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
# verify message is sent properly given channel
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'incident-1',
'message': 'MESSAGE'
}
)
requests_mock.post(
f"{service_url}/v3/conversations/{mirrored_channels[0]['channel_id']}/activities",
json={}
)
send_message()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
# verify message is sent properly given entitlement
message: dict = {
'message_text': 'is this really working?',
'options': ['yes', 'no', 'maybe'],
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '72',
'task_id': '23'
}
mocker.patch.object(
demisto,
'args',
return_value={
'team_member': '<EMAIL>',
'message': json.dumps(message)
}
)
expected_ask_user_message: dict = {
'attachments': [{
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'actions': [
{
'data': {
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '72',
'response': 'yes',
'task_id': '23'
},
'title': 'yes',
'type': 'Action.Submit'
},
{
'data': {
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '72',
'response': 'no',
'task_id': '23'
},
'title': 'no',
'type': 'Action.Submit'
},
{
'data': {
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '72',
'response': 'maybe',
'task_id': '23'
},
'title': 'maybe',
'type': 'Action.Submit'
}
],
'body': [{
'text': 'is this really working?',
'type': 'TextBlock'
}],
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'version': '1.0'
},
'contentType': 'application/vnd.microsoft.card.adaptive'
}],
'type': 'message'
}
send_message()
assert requests_mock.request_history[4].json() == expected_ask_user_message
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
# verify proper error is raised if invalid JSON provided as adaptive card
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'channel',
'adaptive_card': 'THISisSTRINGnotJSON'
}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'Given adaptive card is not in valid JSON format.'
# verify proper error is raised if both message and adaptive card were provided
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'channel',
'message': 'message',
'adaptive_card': '{"a":"b"}'
}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'Provide either message or adaptive to send, not both.'
# verify proper error is raised if neither message or adaptive card were provided
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'channel'
}
)
with pytest.raises(ValueError) as e:
send_message()
assert str(e.value) == 'No message or adaptive card to send were provided.'
# verify adaptive card sent successfully
adaptive_card: dict = {
"contentType": "application/vnd.microsoft.card.adaptive",
"content": {
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
"type": "AdaptiveCard",
"version": "1.0",
"body": [
{
"type": "Container",
"items": [{
"type": "TextBlock",
"text": "What a pretty adaptive card"
}]
}
]
}
}
mocker.patch.object(
demisto,
'args',
return_value={
'team_member': '<EMAIL>',
'adaptive_card': json.dumps(adaptive_card)
}
)
expected_conversation: dict = {
'type': 'message',
'attachments': [adaptive_card]
}
send_message()
assert requests_mock.request_history[6].json() == expected_conversation
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
def test_send_message_server_notifications_incident_opened(mocker, requests_mock):
"""
Given:
- Notification from server of an incident opened.
When:
- Sending notification message of the incident opened.
Then:
- Ensure message is sent successfully.
- Verify the message is sent to the dedicated notifications channel.
"""
from MicrosoftTeams import send_message
mocker.patch.object(demisto, 'results')
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team',
'min_incident_severity': 'Low',
'incident_notifications_channel': 'General'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'incidentNotificationChannel',
'message': 'user has reported an incident tadam.\nView it on https://server#/WarRoom/3247',
'messageType': 'incidentOpened',
'severity': 1,
'to': ''
}
)
requests_mock.get(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'value': [
{
'description': 'general channel',
'displayName': 'General',
'id': '19:67pd3966e74g45f28d<EMAIL>5f<EMAIL>.skype'
}
]
}
)
requests_mock.post(
f'{service_url}/v3/conversations/19:67pd3966e74g45f28d0c65f1689132bb@thread.skype/activities',
json={}
)
send_message()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
def test_send_message_server_notifications_incident_changed(mocker, requests_mock):
"""
Given:
- Notification from server of an updated incident.
When:
- Sending notification message of the updated incident.
Then:
- Ensure message is sent successfully.
- Verify the message is sent to the dedicated notifications channel.
"""
from MicrosoftTeams import send_message
mocker.patch.object(demisto, 'results')
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team',
'min_incident_severity': 'Low',
'incident_notifications_channel': 'General'
}
)
mocker.patch.object(
demisto,
'args',
return_value={
'channel': 'incidentNotificationChannel',
'message': 'DBot has updated an incident tadam.\nView it on https://server#/WarRoom/3247',
'messageType': 'incidentChanged',
'severity': 1,
'to': ''
}
)
requests_mock.get(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'value': [
{
'description': 'general channel',
'displayName': 'General',
'id': '19:67pd3966e74g<EMAIL>'
}
]
}
)
requests_mock.post(
f'{service_url}/v3/conversations/19:67pd3966e74g45f28d0c65f1689132bb@thread.skype/activities',
json={}
)
send_message()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Message was sent successfully.'
def test_get_channel_id(requests_mock):
from MicrosoftTeams import get_channel_id
# get channel which is in the integration context
assert get_channel_id('incident-1', team_aad_id) == '19:2cbad0d78c624400ef83a5750539998g@thread.skype'
# get channel which is not in the integration context
requests_mock.get(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'value': [
{
'description': 'channel for incident 1',
'displayName': 'incident-1',
'id': '19:6<EMAIL>'
},
{
'description': 'channel for incident 2',
'displayName': 'incident-3',
'id': '19:67pd3967e74g45f28d0c65f1689132bo@thread.skype'
}
]
}
)
assert get_channel_id('incident-3', team_aad_id) == '19:67pd3967e74g45f28d0c65f1689132<EMAIL>.skype'
# Try a channel which does not exit
with pytest.raises(ValueError) as e:
get_channel_id('incident-4', team_aad_id)
assert str(e.value) == 'Could not find channel: incident-4'
def test_close_channel(mocker, requests_mock):
from MicrosoftTeams import close_channel
requests_mock.delete(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels/19:2cbad0d78c624400ef83a5750539998g@thread.skype',
status_code=204
)
requests_mock.delete(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels/19:2cbad0d78c624400ef83a575<EMAIL>',
status_code=204
)
mocker.patch.object(demisto, 'results')
# close channel without given channel name
mocker.patch.object(demisto, 'investigation', return_value={'id': '1'})
mocker.patch.object(demisto, 'getIntegrationContext', return_value=integration_context)
mocker.patch.object(demisto, 'setIntegrationContext')
close_channel()
assert requests_mock.request_history[0].method == 'DELETE'
assert demisto.setIntegrationContext.call_count == 1
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Channel was successfully closed.'
# try to close channel without given channel name, which does not exist in the integration context
mocker.patch.object(demisto, 'investigation', return_value={'id': '5'})
with pytest.raises(ValueError) as e:
close_channel()
assert str(e.value) == 'Could not find Microsoft Teams channel to close.'
# close channel given channel name
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'setIntegrationContext')
requests_mock.get(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'value': [
{
'description': 'channel for incident 1',
'displayName': 'incident-1',
'id': '19:67pd3967e74g45f28d0c65f1689132bb@thread.skype'
},
{
'description': 'channel for incident 6',
'displayName': 'incident-6',
'id': '19:67pd3967e74g45f28d0c65f1689132bo@thread.skype'
}
]
}
)
requests_mock.delete(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels/19:67pd39<EMAIL>',
status_code=204
)
mocker.patch.object(demisto, 'params', return_value={'team': 'The-A-Team'})
mocker.patch.object(demisto, 'args', return_value={'channel': 'incident-1'})
close_channel()
assert requests_mock.request_history[0].method == 'DELETE'
assert demisto.setIntegrationContext.call_count == 0
assert demisto.results.call_count == 1
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'Channel was successfully closed.'
def test_entitlement_handler(mocker, requests_mock):
from MicrosoftTeams import entitlement_handler
mocker.patch.object(demisto, 'handleEntitlementForUser')
conversation_id: str = 'f:3005393407786078157'
activity_id: str = '1:1vW2mx4iDZf05lk18yskL64Wkfwraa76YTGNgDiIi-_5'
requests_mock.put(
f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}',
json={'id': 'updateid'}
)
request_body: dict = {
'from': {
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg',
'aadObjectId': '359d2c3c-162b-414c-b2eq-386461e5l050',
'name': '<NAME>'
},
'replyToId': activity_id
}
value: dict = {
'response': 'Approve!',
'entitlement': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'investigation_id': '100',
'task_id': '4'
}
entitlement_handler(integration_context, request_body, value, conversation_id)
assert demisto.handleEntitlementForUser.call_count == 1
handle_entitlement_args = demisto.handleEntitlementForUser.call_args[1]
assert handle_entitlement_args == {
'incidentID': '100',
'guid': '4404dae8-2d45-46bd-85fa-64779c12abe8',
'taskID': '4',
'email': '<EMAIL>',
'content': 'Approve!'
}
def test_translate_severity():
from MicrosoftTeams import translate_severity
assert translate_severity('Low') == 1
assert translate_severity('NotRealSeverity') == 0
def test_is_investigation_mirrored():
from MicrosoftTeams import is_investigation_mirrored
existing_investigation_id: str = '1'
non_existing_investigation_id: str = '2'
assert is_investigation_mirrored(existing_investigation_id, mirrored_channels) == 0
assert is_investigation_mirrored(non_existing_investigation_id, mirrored_channels) == -1
def test_urlify_hyperlinks():
from MicrosoftTeams import urlify_hyperlinks
message: str = 'Visit https://www.demisto.com and http://www.demisto.com'
formatted_message: str = 'Visit [https://www.demisto.com](https://www.demisto.com) ' \
'and [http://www.demisto.com](http://www.demisto.com)'
assert urlify_hyperlinks(message) == formatted_message
def test_get_team_aad_id(mocker, requests_mock):
from MicrosoftTeams import get_team_aad_id
# verify team ID for team which is in integration context
mocker.patch.object(
demisto,
'params',
return_value={
'team': 'The-A-Team'
}
)
assert get_team_aad_id('The-A-Team') == '7d8efdf8-0c5a-42e3-a489-5ef5c3fc7a2b'
# verify non existing team raises value error
requests_mock.get(
"https://graph.microsoft.com/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')",
json={
'@odata.context': 'https://graph.microsoft.com/beta/$metadata#groups',
'value': [
{
'id': '02bd9fd6-8f93-4758-87c3-1fb73740a315',
'displayName': 'MyGreatTeam',
'groupTypes': [
'Unified'
],
'mailEnabled': True,
'resourceBehaviorOptions': [],
'resourceProvisioningOptions': [
'Team'
],
'securityEnabled': False,
'visibility': 'Private'
},
{
'id': '8090c93e-ba7c-433e-9f39-08c7ba07c0b3',
'displayName': 'WooahTeam',
'groupTypes': [
'Unified'
],
'mailEnabled': True,
'mailNickname': 'X1050LaunchTeam',
'resourceBehaviorOptions': [],
'resourceProvisioningOptions': [
'Team'
],
'securityEnabled': False,
'visibility': 'Private'
}
]
}
)
with pytest.raises(ValueError) as e:
get_team_aad_id('The-B-Team')
assert str(e.value) == 'Could not find requested team.'
# verify team ID for team which is not in integration context
assert get_team_aad_id('MyGreatTeam') == '02bd9fd6-8f93-4758-87c3-1fb73740a315'
def test_get_team_member():
from MicrosoftTeams import get_team_member
user_id: str = '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg'
team_member: dict = {
'username': '<NAME>',
'user_email': '<EMAIL>'
}
assert get_team_member(integration_context, user_id) == team_member
with pytest.raises(ValueError) as e:
get_team_member(integration_context, 'NotRealUser')
assert str(e.value) == 'Team member was not found'
def test_get_team_member_id():
from MicrosoftTeams import get_team_member_id
requested_team_member: str = '<NAME>'
expected_user_id: str = '29:1pBMMC85IyjM3tr_MCZi7KW4pw4EULxLN4C7R_xoi3Wva_lOn3VTf7xJlCLK-r-pMumrmoz9agZxsSrCf7__u9R'
assert get_team_member_id(requested_team_member, integration_context) == expected_user_id
requested_team_member = '<EMAIL>'
assert get_team_member_id(requested_team_member, integration_context) == expected_user_id
requested_team_member = 'TheRock'
with pytest.raises(ValueError) as e:
get_team_member_id(requested_team_member, integration_context)
assert str(e.value) == 'Team member TheRock was not found'
def test_create_adaptive_card():
from MicrosoftTeams import create_adaptive_card
body: list = [{
'type': 'TextBlock',
'size': 'Medium',
'weight': 'Bolder',
'text': 'What a beautiful text'
}]
expected_adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': body
}
}
assert create_adaptive_card(body) == expected_adaptive_card
actions: list = [{
'type': 'Action.OpenUrl',
'title': 'DEMISTO',
'url': 'https://www.demisto.com'
}]
expected_adaptive_card['content']['actions'] = actions
assert create_adaptive_card(body, actions) == expected_adaptive_card
def test_process_tasks_list():
from MicrosoftTeams import process_tasks_list
data_by_line: list = [
'Task | Incident | Due | Link ',
'=========================================|================================|=====================|=====',
'Manually review the incident | 21 - nnn | 0001-01-01 00:00:00 | '
'https://demisto.com/#/WorkPlan/21'
]
expected_adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': [{
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': 'Manually review the incident'
},
{
'title': 'Incident:',
'value': '21 - nnn'
},
{
'title': 'Due:',
'value': '0001-01-01 00:00:00'
},
{
'title': 'Link:',
'value': '[https://demisto.com/#/WorkPlan/21](https://demisto.com/#/WorkPlan/21)'
}
]
}]
}
}
assert process_tasks_list(data_by_line) == expected_adaptive_card
def test_process_incidents_list():
from MicrosoftTeams import process_incidents_list
data_by_line: list = [
'ID | Name | Status | Type | Owner | Created | Link ',
'===========|======================|=============|=============|=============|=====================|=====',
'257 | w | Active | Unclassifie | god | 2019-07-28 16:42:40 | '
'https://demisto.com/#/WarRoom/257',
'250 | gosa | Active | Unclassifie | mozes | 2019-07-28 16:16:49 | '
'https://demisto.com/#/WarRoom/250 '
]
expected_adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': [
{
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': '257'
},
{
'title': 'Name:',
'value': 'w'
},
{
'title': 'Status:',
'value': 'Active'
},
{
'title': 'Type:',
'value': 'Unclassifie'
},
{
'title': 'Owner:',
'value': 'god'
},
{
'title': 'Created:',
'value': '2019-07-28 16:42:40'
},
{
'title': 'Link:',
'value': '[https://demisto.com/#/WarRoom/257](https://demisto.com/#/WarRoom/257)'
}
]
},
{
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': '250'
},
{
'title': 'Name:',
'value': 'gosa'
},
{
'title': 'Status:',
'value': 'Active'
},
{
'title': 'Type:',
'value': 'Unclassifie'
},
{
'title': 'Owner:',
'value': 'mozes'
},
{
'title': 'Created:',
'value': '2019-07-28 16:16:49'
},
{
'title': 'Link:',
'value': '[https://demisto.com/#/WarRoom/250](https://demisto.com/#/WarRoom/250)'
}
]
}
]
}
}
assert process_incidents_list(data_by_line) == expected_adaptive_card
def test_process_mirror_or_unknown_message():
from MicrosoftTeams import process_mirror_or_unknown_message
message: str = 'I can understand the following commands:\nlist incidents [page x]\nlist my incidents [page x]\n' \
'list my tasks\nlist closed incidents\nnew incident [details]\nmirror incident-id'
expected_adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': [{
'type': 'TextBlock',
'text': 'I can understand the following commands:\n\nlist incidents [page x]\n\nlist my incidents [page'
' x]\n\nlist my tasks\n\nlist closed incidents\n\nnew incident [details]\n\nmirror incident-id',
'wrap': True
}]
}
}
assert process_mirror_or_unknown_message(message) == expected_adaptive_card
def test_get_participant_info():
from MicrosoftTeams import get_participant_info
participants = {'organizer': {'upn': 'mail.com', 'role': 'presenter',
'identity': {'phone': None, 'guest': None, 'encrypted': None,
'onPremises': None, 'applicationInstance': None,
'application': None, 'device': None,
'user':
{'id': 'id_identifier',
'displayName': 'best_user',
'tenantId': 'tenantId_identifier',
'identityProvider': 'AAD'}}}, 'attendees': []}
participant_id, participant_display_name = get_participant_info(participants)
assert participant_id == 'id_identifier'
assert participant_display_name == 'best_user'
def test_create_channel(requests_mock):
from MicrosoftTeams import create_channel
requests_mock.post(
f'https://graph.microsoft.com/v1.0/teams/{team_aad_id}/channels',
json={
'id': '19:67pd3967e74g45f28d0c65f1689132bb@thread.skype'
}
)
channel_name: str = 'CrazyChannel'
response = create_channel(team_aad_id, channel_name)
assert response == '19:<EMAIL>.skype'
def test_create_meeting_command(requests_mock, mocker):
from MicrosoftTeams import create_meeting_command
mocker.patch.object(demisto, 'args', return_value={"subject": "Best_Meeting", "member": "username"})
mocker.patch.object(demisto, 'results')
requests_mock.get(
'https://graph.microsoft.com/v1.0/users',
json={"value": [{"id": "userid1"}]}
)
requests_mock.post(
'https://graph.microsoft.com/v1.0/users/userid1/onlineMeetings',
json={
"chatInfo": {
"threadId": "19:@thread.skype",
"messageId": "0",
"replyChainMessageId": "0"
},
"creationDateTime": "2019-07-11T02:17:17.6491364Z",
"startDateTime": "2019-07-11T02:17:17.6491364Z",
"endDateTime": "2019-07-11T02:47:17.651138Z",
"id": "id_12345",
"joinWebUrl": "https://teams.microsoft.com/l/meetup-join/12345",
"participants": {
"organizer": {
"identity": {
"user": {
"id": "user_id_12345",
"displayName": "Demisto"
}
},
"upn": "upn-value"
}
},
"subject": "User Token Meeting"
}
)
expected_results = 'The meeting "Best_Meeting" was created successfully'
create_meeting_command()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0]['HumanReadable'] == expected_results
assert results[0]['Contents'].get('id') == 'id_12345'
def test_get_team_members(requests_mock):
from MicrosoftTeams import get_team_members
requests_mock.get(
f'{service_url}/v3/conversations/{team_aad_id}/members',
json=team_members
)
assert get_team_members(service_url, team_aad_id) == team_members
def test_update_message(requests_mock):
from MicrosoftTeams import update_message
activity_id: str = '1:1vW2mx4iDZf05lk18yskL64Wkfwraa76YTGNgDiIi-_5'
conversation_id: str = 'f:3005393407786078157'
requests_mock.put(
f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}',
json={'id': 'updateid'}
)
expected_conversation: dict = {
'type': 'message',
'attachments': [{
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0', 'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'body': [{
'type': 'TextBlock', 'text': 'OMG!'
}]
}
}]
}
update_message(service_url, conversation_id, activity_id, 'OMG!')
assert requests_mock.request_history[0].method == 'PUT'
assert json.loads(requests_mock.request_history[0].body) == expected_conversation
# def test_create_team(mocker, requests_mock):
# from MicrosoftTeams import create_team
# mocker.patch.object(
# demisto,
# 'args',
# return_value={
# 'display_name': 'OhMyTeam',
# 'mail_nickname': 'No<PASSWORD>names<PASSWORD>',
# 'owner': '<EMAIL>',
# 'mail_enabled': 'true',
# 'security_enabled': 'false'
# }
# )
# requests_mock.get(
# f'https://graph.microsoft.com/v1.0/users',
# json={
# 'value': team_members
# }
# )
# with pytest.raises(ValueError) as e:
# create_team()
# assert str(e.value) == 'Could not find given users to be Team owners.'
# mocker.patch.object(
# demisto,
# 'args',
# return_value={
# 'display_name': 'OhMyTeam',
# 'mail_nickname': 'NoNicknames<PASSWORD>',
# 'owner': '<EMAIL>'
# }
# )
def test_direct_message_handler(mocker, requests_mock):
from MicrosoftTeams import direct_message_handler
mocker.patch.object(
demisto,
'createIncidents',
return_value={
'id': '4',
'name': 'incidentnumberfour'
}
)
requests_mock.post(
f'{service_url}/v3/conversations/conversation-id/activities',
json={}
)
request_body: dict = {
'from': {
'id': '29:1KZccCJRTxlPdHnwcKfxHAtYvPLIyHgkSLhFSnGXLGVFlnltovdZPmZAduPKQP6NrGqOcde7FXAF7uTZ_8FQOqg'
}
}
conversation: dict = {
'id': 'conversation-id'
}
# verify create incident fails on un allowed external incident creation and non found user
message: str = 'create incident name=GoFish type=Phishing'
mocker.patch.object(demisto, 'findUser', return_value=None)
direct_message_handler(integration_context, request_body, conversation, message)
assert requests_mock.request_history[0].json() == {
'text': 'You are not allowed to create incidents.', 'type': 'message'
}
# verify create incident successfully
mocker.patch.object(demisto, 'findUser', return_value={'id': 'nice-demisto-id'})
direct_message_handler(integration_context, request_body, conversation, message)
assert requests_mock.request_history[1].json() == {
'text': "Successfully created incident incidentnumberfour.\n"
"View it on: [https://test-address:8443#/WarRoom/4](https://test-address:8443#/WarRoom/4)",
'type': 'message'
}
# verify get my incidents
my_incidents: str = "```ID | Name | Status | Type | Owner | Created" \
" | Link\n ===========|======================|=============|=============|====" \
"=========|=====================|=====\n257 | w | Active | " \
"Unclassifie | god | 2019-07-28 16:42:40 | https://demisto.com/#/WarRoom/257```"
mocker.patch.object(demisto, 'directMessage', return_value=my_incidents)
message = 'list my incidents'
direct_message_handler(integration_context, request_body, conversation, message)
assert requests_mock.request_history[2].json() == {
'attachments': [{
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'body': [{
'facts': [
{
'title': 'ID:',
'value': '257'
},
{
'title': 'Name:',
'value': 'w'
},
{
'title': 'Status:',
'value': 'Active'
},
{
'title': 'Type:',
'value': 'Unclassifie'
},
{
'title': 'Owner:',
'value': 'god'
},
{
'title': 'Created:',
'value': '2019-07-28 16:42:40'
},
{
'title': 'Link:',
'value': '[https://demisto.com/#/WarRoom/257](https://demisto.com/#/WarRoom/257)'
}
],
'type': 'FactSet'
}],
'type': 'AdaptiveCard',
'msteams': {
'width': 'Full'
},
'version': '1.0'
},
'contentType': 'application/vnd.microsoft.card.adaptive'
}],
'type': 'message'
}
# verify error message raised by Demisto server is sent as message as expectec
mocker.patch.object(
demisto,
'directMessage',
side_effect=ValueError(
'I\'m sorry but I was unable to find you as a Demisto user for email [<EMAIL>]'
)
)
direct_message_handler(integration_context, request_body, conversation, message)
assert requests_mock.request_history[3].json() == {
'type': 'message',
'text': 'I\'m sorry but I was unable to find you as a Demisto user for email [<EMAIL>]'
}
def test_error_parser():
from MicrosoftTeams import error_parser
class MockResponse:
def __init__(self, json_data, status_code, text=''):
self.json_data = json_data
self.status_code = status_code
self.text = text
def json(self):
return self.json_data
# verify bot framework error parsed as expected
error_description: str = "AADSTS700016: Application with identifier '2bc5202b-ad6a-4458-8821-e104af433bbb' " \
"was not found in the directory 'botframework.com'. This can happen if the application " \
"has not been installed by the administrator of the tenant or consented to by any user " \
"in the tenant. You may have sent your authentication request to the wrong tenant.\r\n" \
"Trace ID: 9eaeeec8-7f9e-4fb8-b319-5413581f0a00\r\nCorrelation ID: " \
"138cb511-2484-410e-b9c1-14b15accbeba\r\nTimestamp: 2019-08-28 13:18:44Z"
bot_error_json_response: dict = {
'error': 'unauthorized_client',
'error_description': error_description,
'error_codes': [
700016
],
'timestamp': '2019-08-28 13:18:44Z',
'trace_id': '9eaeeec8-7f9e-4fb8-b319-5413581f0a11',
'correlation_id': '138cb111-2484-410e-b9c1-14b15accbeba',
'error_uri': 'https://login.microsoftonline.com/error?code=700016'
}
bot_error_json_response = MockResponse(bot_error_json_response, 400)
assert error_parser(bot_error_json_response, 'bot') == error_description
# verify graph error parsed as expected
error_code: str = 'InvalidAuthenticationToken'
error_message: str = 'Access token validation failure.'
graph_error_json_response: dict = {
'error': {
'code': error_code,
'message': error_message,
'innerError': {
'request-id': 'c240ab22-4463-4a1f-82bc-8509d8190a77',
'date': '2019-08-28T13:37:14'
}
}
}
graph_error_json_response = MockResponse(graph_error_json_response, 401)
assert error_parser(graph_error_json_response) == f'{error_code}: {error_message}'
def test_integration_health(mocker):
from MicrosoftTeams import integration_health
mocker.patch.object(demisto, 'results')
expected_results = """### Microsoft API Health
|Bot Framework API Health|Graph API Health|
|---|---|
| Operational | Operational |
### Microsoft Teams Mirrored Channels
|Channel|Investigation ID|Team|
|---|---|---|
| incident-10 | 10 | The-A-Team |
| incident-2 | 2 | The-A-Team |
| booya | 14 | The-A-Team |
"""
integration_health()
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0]['HumanReadable'] == expected_results
|
1677774
|
import os
from flask import Flask, Response, jsonify, abort
from flask_restplus import Api, Resource, fields, reqparse
from flask_cors import CORS, cross_origin
import json
import pandas as pd
from dotenv import load_dotenv
import time
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
import logging
import sqlalchemy
import sys
from flask import request
from werkzeug.exceptions import HTTPException
# get logging level from the environment, default to INFO
logging.basicConfig(level=os.environ.get("LOGLEVEL", logging.INFO))
# Get a logger and keep its name in sync with this filename
logger = logging.getLogger(os.path.basename(__file__))
# load environment variables
load_dotenv()
# The application
app = Flask(__name__)
CORS(app)
logger.info('starting application')
# On Bluemix, get the port number from the environment variable PORT
# When running this app on the local machine, default to 8080
port = int(os.getenv('PORT', 8080))
# DB Connections and identifier constants
SQLALCHEMY_DATABASE_URI = ("mysql+pymysql://"+ os.getenv('MARIADB_USERNAME')
+":"+ os.getenv("MARIADB_PASSWORD")
+"@"+ os.getenv("MARIADB_HOST")
+":" + str(os.getenv("MARIADB_HOST"))
+"/prometeo")
DB_ENGINE = sqlalchemy.MetaData(SQLALCHEMY_DATABASE_URI).bind
ANALYTICS_TABLE = 'meal_status_analytics'
meal_ID_COL = 'meal_id'
TIMESTAMP_COL = 'timestamp_mins'
STATUS_LED_COL = 'analytics_status_LED'
# We initialize the prometeo Analytics engine.
# Calculates Time-Weighted Average exposures and exposure-limit status 'gauges' for all meals for the last minute.
def callCreateMenu():
logger.info('Running analytics')
# Start up a scheduled job to run once per minute
ANALYTICS_FREQUENCY_SECONDS = 60
scheduler = BackgroundScheduler()
scheduler.add_job(func=callMicrobitesAnalytics, trigger="interval", seconds=ANALYTICS_FREQUENCY_SECONDS)
scheduler.start()
# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
|
1677809
|
import torch
import torch.nn as nn
def ConvBNReLU(in_channels,out_channels,kernel_size,stride,padding=1):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size//2),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ResidualBlock, self).__init__()
mid_channels = out_channels//2
self.bottleneck = nn.Sequential(
ConvBNReLU(in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1),
ConvBNReLU(in_channels=mid_channels, out_channels=mid_channels, kernel_size=3, stride=1, padding=1),
ConvBNReLU(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1),
)
self.shortcut = ConvBNReLU(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1)
def forward(self, x):
out = self.bottleneck(x)
return out+self.shortcut(x)
class left_pool(torch.autograd.Function):
def forward(self, input_):
self.save_for_backward(input_.clone())
output = torch.zeros_like(input_)
batch = input_.size(0)
width = input_.size(3)
input_tmp = input_.select(3, width - 1)
output.select(3, width - 1).copy_(input_tmp)
for idx in range(1, width):
input_tmp = input_.select(3, width - idx - 1)
output_tmp = output.select(3, width - idx)
cmp_tmp = torch.cat((input_tmp.view(batch, 1, -1), output_tmp.view(batch, 1, -1)), 1).max(1)[0]
output.select(3, width - idx - 1).copy_(cmp_tmp.view_as(input_tmp))
return output
def backward(self, grad_output):
input_, = self.saved_tensors
output = torch.zeros_like(input_)
grad_output = grad_output.clone()
res = torch.zeros_like(grad_output)
w = input_.size(3)
batch = input_.size(0)
output_tmp = res.select(3, w - 1)
grad_output_tmp = grad_output.select(3, w - 1)
output_tmp.copy_(grad_output_tmp)
input_tmp = input_.select(3, w - 1)
output.select(3, w - 1).copy_(input_tmp)
for idx in range(1, w):
input_tmp = input_.select(3, w - idx - 1)
output_tmp = output.select(3, w - idx)
cmp_tmp = torch.cat((input_tmp.view(batch, 1, -1), output_tmp.view(batch, 1, -1)), 1).max(1)[0]
output.select(3, w - idx - 1).copy_(cmp_tmp.view_as(input_tmp))
grad_output_tmp = grad_output.select(3, w - idx - 1)
res_tmp = res.select(3, w - idx)
com_tmp = comp(input_tmp, output_tmp, grad_output_tmp, res_tmp)
res.select(3, w - idx - 1).copy_(com_tmp)
return res
class HourglassNetwork(nn.Module):
def __init__(self):
super(HourglassNetwork, self).__init__()
def forward(self, x):
return out
class PredictionModule(nn.Module):
def __init__(self):
super(PredictionModule, self).__init__()
def forward(self, x):
return out
class CornerNet(nn.Module):
def __init__(self):
super(CornerNet, self).__init__()
def forward(self, x):
return out
if __name__ == '__main__':
model = CornerNet()
print(model)
data = torch.randn(1,3,511,511)
output = model(data)
print(output.shape)
|
1677872
|
set_name(0x80121A34, "PreGameOnlyTestRoutine__Fv", SN_NOWARN)
set_name(0x80123AF8, "DRLG_PlaceDoor__Fii", SN_NOWARN)
set_name(0x80123FCC, "DRLG_L1Shadows__Fv", SN_NOWARN)
set_name(0x801243E4, "DRLG_PlaceMiniSet__FPCUciiiiiii", SN_NOWARN)
set_name(0x80124850, "DRLG_L1Floor__Fv", SN_NOWARN)
set_name(0x8012493C, "StoreBlock__FPiii", SN_NOWARN)
set_name(0x801249E8, "DRLG_L1Pass3__Fv", SN_NOWARN)
set_name(0x80124B9C, "DRLG_LoadL1SP__Fv", SN_NOWARN)
set_name(0x80124C78, "DRLG_FreeL1SP__Fv", SN_NOWARN)
set_name(0x80124CA8, "DRLG_Init_Globals__Fv", SN_NOWARN)
set_name(0x80124D4C, "set_restore_lighting__Fv", SN_NOWARN)
set_name(0x80124DDC, "DRLG_InitL1Vals__Fv", SN_NOWARN)
set_name(0x80124DE4, "LoadL1Dungeon__FPcii", SN_NOWARN)
set_name(0x80124FB0, "LoadPreL1Dungeon__FPcii", SN_NOWARN)
set_name(0x80125168, "InitL5Dungeon__Fv", SN_NOWARN)
set_name(0x801251C8, "L5ClearFlags__Fv", SN_NOWARN)
set_name(0x80125214, "L5drawRoom__Fiiii", SN_NOWARN)
set_name(0x80125280, "L5checkRoom__Fiiii", SN_NOWARN)
set_name(0x80125314, "L5roomGen__Fiiiii", SN_NOWARN)
set_name(0x80125610, "L5firstRoom__Fv", SN_NOWARN)
set_name(0x801259CC, "L5GetArea__Fv", SN_NOWARN)
set_name(0x80125A2C, "L5makeDungeon__Fv", SN_NOWARN)
set_name(0x80125AB8, "L5makeDmt__Fv", SN_NOWARN)
set_name(0x80125BA0, "L5HWallOk__Fii", SN_NOWARN)
set_name(0x80125CDC, "L5VWallOk__Fii", SN_NOWARN)
set_name(0x80125E28, "L5HorizWall__Fiici", SN_NOWARN)
set_name(0x80126068, "L5VertWall__Fiici", SN_NOWARN)
set_name(0x8012629C, "L5AddWall__Fv", SN_NOWARN)
set_name(0x8012650C, "DRLG_L5GChamber__Fiiiiii", SN_NOWARN)
set_name(0x801267CC, "DRLG_L5GHall__Fiiii", SN_NOWARN)
set_name(0x80126880, "L5tileFix__Fv", SN_NOWARN)
set_name(0x80127144, "DRLG_L5Subs__Fv", SN_NOWARN)
set_name(0x8012733C, "DRLG_L5SetRoom__Fii", SN_NOWARN)
set_name(0x8012743C, "L5FillChambers__Fv", SN_NOWARN)
set_name(0x80127B28, "DRLG_L5FTVR__Fiiiii", SN_NOWARN)
set_name(0x80128078, "DRLG_L5FloodTVal__Fv", SN_NOWARN)
set_name(0x8012817C, "DRLG_L5TransFix__Fv", SN_NOWARN)
set_name(0x8012838C, "DRLG_L5DirtFix__Fv", SN_NOWARN)
set_name(0x801284E8, "DRLG_L5CornerFix__Fv", SN_NOWARN)
set_name(0x801285F8, "DRLG_L5__Fi", SN_NOWARN)
set_name(0x80128B18, "CreateL5Dungeon__FUii", SN_NOWARN)
set_name(0x8012B0BC, "DRLG_L2PlaceMiniSet__FPUciiiiii", SN_NOWARN)
set_name(0x8012B4B0, "DRLG_L2PlaceRndSet__FPUci", SN_NOWARN)
set_name(0x8012B7B0, "DRLG_L2Subs__Fv", SN_NOWARN)
set_name(0x8012B9A4, "DRLG_L2Shadows__Fv", SN_NOWARN)
set_name(0x8012BB68, "InitDungeon__Fv", SN_NOWARN)
set_name(0x8012BBC8, "DRLG_LoadL2SP__Fv", SN_NOWARN)
set_name(0x8012BC68, "DRLG_FreeL2SP__Fv", SN_NOWARN)
set_name(0x8012BC98, "DRLG_L2SetRoom__Fii", SN_NOWARN)
set_name(0x8012BD98, "DefineRoom__Fiiiii", SN_NOWARN)
set_name(0x8012BFA4, "CreateDoorType__Fii", SN_NOWARN)
set_name(0x8012C088, "PlaceHallExt__Fii", SN_NOWARN)
set_name(0x8012C0C0, "AddHall__Fiiiii", SN_NOWARN)
set_name(0x8012C198, "CreateRoom__Fiiiiiiiii", SN_NOWARN)
set_name(0x8012C820, "GetHall__FPiN40", SN_NOWARN)
set_name(0x8012C8B8, "ConnectHall__Fiiiii", SN_NOWARN)
set_name(0x8012CF20, "DoPatternCheck__Fii", SN_NOWARN)
set_name(0x8012D1D4, "L2TileFix__Fv", SN_NOWARN)
set_name(0x8012D2F8, "DL2_Cont__FUcUcUcUc", SN_NOWARN)
set_name(0x8012D378, "DL2_NumNoChar__Fv", SN_NOWARN)
set_name(0x8012D3D4, "DL2_DrawRoom__Fiiii", SN_NOWARN)
set_name(0x8012D4D8, "DL2_KnockWalls__Fiiii", SN_NOWARN)
set_name(0x8012D6A8, "DL2_FillVoids__Fv", SN_NOWARN)
set_name(0x8012E02C, "CreateDungeon__Fv", SN_NOWARN)
set_name(0x8012E338, "DRLG_L2Pass3__Fv", SN_NOWARN)
set_name(0x8012E4D0, "DRLG_L2FTVR__Fiiiii", SN_NOWARN)
set_name(0x8012EA18, "DRLG_L2FloodTVal__Fv", SN_NOWARN)
set_name(0x8012EB1C, "DRLG_L2TransFix__Fv", SN_NOWARN)
set_name(0x8012ED2C, "L2DirtFix__Fv", SN_NOWARN)
set_name(0x8012EE8C, "L2LockoutFix__Fv", SN_NOWARN)
set_name(0x8012F218, "L2DoorFix__Fv", SN_NOWARN)
set_name(0x8012F2C8, "DRLG_L2__Fi", SN_NOWARN)
set_name(0x8012FD14, "DRLG_InitL2Vals__Fv", SN_NOWARN)
set_name(0x8012FD1C, "LoadL2Dungeon__FPcii", SN_NOWARN)
set_name(0x8012FF0C, "LoadPreL2Dungeon__FPcii", SN_NOWARN)
set_name(0x801300F8, "CreateL2Dungeon__FUii", SN_NOWARN)
set_name(0x80130AB0, "InitL3Dungeon__Fv", SN_NOWARN)
set_name(0x80130B38, "DRLG_L3FillRoom__Fiiii", SN_NOWARN)
set_name(0x80130D94, "DRLG_L3CreateBlock__Fiiii", SN_NOWARN)
set_name(0x80131030, "DRLG_L3FloorArea__Fiiii", SN_NOWARN)
set_name(0x80131098, "DRLG_L3FillDiags__Fv", SN_NOWARN)
set_name(0x801311C8, "DRLG_L3FillSingles__Fv", SN_NOWARN)
set_name(0x80131294, "DRLG_L3FillStraights__Fv", SN_NOWARN)
set_name(0x80131658, "DRLG_L3Edges__Fv", SN_NOWARN)
set_name(0x80131698, "DRLG_L3GetFloorArea__Fv", SN_NOWARN)
set_name(0x801316E8, "DRLG_L3MakeMegas__Fv", SN_NOWARN)
set_name(0x8013182C, "DRLG_L3River__Fv", SN_NOWARN)
set_name(0x8013226C, "DRLG_L3SpawnEdge__FiiPi", SN_NOWARN)
set_name(0x801324F8, "DRLG_L3Spawn__FiiPi", SN_NOWARN)
set_name(0x8013270C, "DRLG_L3Pool__Fv", SN_NOWARN)
set_name(0x80132960, "DRLG_L3PoolFix__Fv", SN_NOWARN)
set_name(0x80132A94, "DRLG_L3PlaceMiniSet__FPCUciiiiii", SN_NOWARN)
set_name(0x80132E14, "DRLG_L3PlaceRndSet__FPCUci", SN_NOWARN)
set_name(0x8013315C, "WoodVertU__Fii", SN_NOWARN)
set_name(0x80133208, "WoodVertD__Fii", SN_NOWARN)
set_name(0x801332A4, "WoodHorizL__Fii", SN_NOWARN)
set_name(0x80133338, "WoodHorizR__Fii", SN_NOWARN)
set_name(0x801333BC, "AddFenceDoors__Fv", SN_NOWARN)
set_name(0x801334A0, "FenceDoorFix__Fv", SN_NOWARN)
set_name(0x80133694, "DRLG_L3Wood__Fv", SN_NOWARN)
set_name(0x80133E84, "DRLG_L3Anvil__Fv", SN_NOWARN)
set_name(0x801340E0, "FixL3Warp__Fv", SN_NOWARN)
set_name(0x801341C8, "FixL3HallofHeroes__Fv", SN_NOWARN)
set_name(0x8013431C, "DRLG_L3LockRec__Fii", SN_NOWARN)
set_name(0x801343B8, "DRLG_L3Lockout__Fv", SN_NOWARN)
set_name(0x80134478, "DRLG_L3__Fi", SN_NOWARN)
set_name(0x80134B98, "DRLG_L3Pass3__Fv", SN_NOWARN)
set_name(0x80134D3C, "CreateL3Dungeon__FUii", SN_NOWARN)
set_name(0x80134E50, "LoadL3Dungeon__FPcii", SN_NOWARN)
set_name(0x80135074, "LoadPreL3Dungeon__FPcii", SN_NOWARN)
set_name(0x80136EC0, "DRLG_L4Shadows__Fv", SN_NOWARN)
set_name(0x80136F84, "InitL4Dungeon__Fv", SN_NOWARN)
set_name(0x80137020, "DRLG_LoadL4SP__Fv", SN_NOWARN)
set_name(0x801370C4, "DRLG_FreeL4SP__Fv", SN_NOWARN)
set_name(0x801370EC, "DRLG_L4SetSPRoom__Fii", SN_NOWARN)
set_name(0x801371EC, "L4makeDmt__Fv", SN_NOWARN)
set_name(0x80137290, "L4HWallOk__Fii", SN_NOWARN)
set_name(0x801373E0, "L4VWallOk__Fii", SN_NOWARN)
set_name(0x8013755C, "L4HorizWall__Fiii", SN_NOWARN)
set_name(0x8013772C, "L4VertWall__Fiii", SN_NOWARN)
set_name(0x801378F4, "L4AddWall__Fv", SN_NOWARN)
set_name(0x80137DD4, "L4tileFix__Fv", SN_NOWARN)
set_name(0x80139FBC, "DRLG_L4Subs__Fv", SN_NOWARN)
set_name(0x8013A194, "L4makeDungeon__Fv", SN_NOWARN)
set_name(0x8013A3CC, "uShape__Fv", SN_NOWARN)
set_name(0x8013A670, "GetArea__Fv", SN_NOWARN)
set_name(0x8013A6CC, "L4drawRoom__Fiiii", SN_NOWARN)
set_name(0x8013A734, "L4checkRoom__Fiiii", SN_NOWARN)
set_name(0x8013A7D0, "L4roomGen__Fiiiii", SN_NOWARN)
set_name(0x8013AACC, "L4firstRoom__Fv", SN_NOWARN)
set_name(0x8013ACE8, "L4SaveQuads__Fv", SN_NOWARN)
set_name(0x8013AD88, "DRLG_L4SetRoom__FPUcii", SN_NOWARN)
set_name(0x8013AE5C, "DRLG_LoadDiabQuads__FUc", SN_NOWARN)
set_name(0x8013AFD4, "DRLG_L4PlaceMiniSet__FPCUciiiiii", SN_NOWARN)
set_name(0x8013B3EC, "DRLG_L4FTVR__Fiiiii", SN_NOWARN)
set_name(0x8013B934, "DRLG_L4FloodTVal__Fv", SN_NOWARN)
set_name(0x8013BA38, "IsDURWall__Fc", SN_NOWARN)
set_name(0x8013BA68, "IsDLLWall__Fc", SN_NOWARN)
set_name(0x8013BA98, "DRLG_L4TransFix__Fv", SN_NOWARN)
set_name(0x8013BDF0, "DRLG_L4Corners__Fv", SN_NOWARN)
set_name(0x8013BE84, "L4FixRim__Fv", SN_NOWARN)
set_name(0x8013BEC0, "DRLG_L4GeneralFix__Fv", SN_NOWARN)
set_name(0x8013BF64, "DRLG_L4__Fi", SN_NOWARN)
set_name(0x8013C860, "DRLG_L4Pass3__Fv", SN_NOWARN)
set_name(0x8013CA04, "CreateL4Dungeon__FUii", SN_NOWARN)
set_name(0x8013CAE4, "ObjIndex__Fii", SN_NOWARN)
set_name(0x8013CB98, "AddSKingObjs__Fv", SN_NOWARN)
set_name(0x8013CCC8, "AddSChamObjs__Fv", SN_NOWARN)
set_name(0x8013CD44, "AddVileObjs__Fv", SN_NOWARN)
set_name(0x8013CDF0, "DRLG_SetMapTrans__FPc", SN_NOWARN)
set_name(0x8013CEB4, "LoadSetMap__Fv", SN_NOWARN)
set_name(0x8013D1DC, "CM_QuestToBitPattern__Fi", SN_NOWARN)
set_name(0x8013D2B4, "CM_ShowMonsterList__Fii", SN_NOWARN)
set_name(0x8013D32C, "CM_ChooseMonsterList__FiUl", SN_NOWARN)
set_name(0x8013D3CC, "NoUiListChoose__FiUl", SN_NOWARN)
set_name(0x8013D3D4, "ChooseTask__FP4TASK", SN_NOWARN)
set_name(0x8013D4DC, "ShowTask__FP4TASK", SN_NOWARN)
set_name(0x8013D70C, "GetListsAvailable__FiUlPUc", SN_NOWARN)
set_name(0x8013D830, "GetDown__C4CPad", SN_NOWARN)
set_name(0x8013D858, "AddL1Door__Fiiii", SN_NOWARN)
set_name(0x8013D990, "AddSCambBook__Fi", SN_NOWARN)
set_name(0x8013DA30, "AddChest__Fii", SN_NOWARN)
set_name(0x8013DC10, "AddL2Door__Fiiii", SN_NOWARN)
set_name(0x8013DD5C, "AddL3Door__Fiiii", SN_NOWARN)
set_name(0x8013DDF0, "AddSarc__Fi", SN_NOWARN)
set_name(0x8013DECC, "AddFlameTrap__Fi", SN_NOWARN)
set_name(0x8013DF28, "AddTrap__Fii", SN_NOWARN)
set_name(0x8013E020, "AddArmorStand__Fi", SN_NOWARN)
set_name(0x8013E0A8, "AddObjLight__Fii", SN_NOWARN)
set_name(0x8013E150, "AddBarrel__Fii", SN_NOWARN)
set_name(0x8013E200, "AddShrine__Fi", SN_NOWARN)
set_name(0x8013E350, "AddBookcase__Fi", SN_NOWARN)
set_name(0x8013E3A8, "AddBookstand__Fi", SN_NOWARN)
set_name(0x8013E3F0, "AddBloodFtn__Fi", SN_NOWARN)
set_name(0x8013E438, "AddPurifyingFountain__Fi", SN_NOWARN)
set_name(0x8013E514, "AddGoatShrine__Fi", SN_NOWARN)
set_name(0x8013E55C, "AddCauldron__Fi", SN_NOWARN)
set_name(0x8013E5A4, "AddMurkyFountain__Fi", SN_NOWARN)
set_name(0x8013E680, "AddTearFountain__Fi", SN_NOWARN)
set_name(0x8013E6C8, "AddDecap__Fi", SN_NOWARN)
set_name(0x8013E744, "AddVilebook__Fi", SN_NOWARN)
set_name(0x8013E794, "AddMagicCircle__Fi", SN_NOWARN)
set_name(0x8013E808, "AddBrnCross__Fi", SN_NOWARN)
set_name(0x8013E850, "AddPedistal__Fi", SN_NOWARN)
set_name(0x8013E8C4, "AddStoryBook__Fi", SN_NOWARN)
set_name(0x8013EA94, "AddWeaponRack__Fi", SN_NOWARN)
set_name(0x8013EB1C, "AddTorturedBody__Fi", SN_NOWARN)
set_name(0x8013EB98, "AddFlameLvr__Fi", SN_NOWARN)
set_name(0x8013EBD8, "GetRndObjLoc__FiRiT1", SN_NOWARN)
set_name(0x8013ECE4, "AddMushPatch__Fv", SN_NOWARN)
set_name(0x8013EE08, "AddSlainHero__Fv", SN_NOWARN)
set_name(0x8013EE48, "RndLocOk__Fii", SN_NOWARN)
set_name(0x8013EF2C, "TrapLocOk__Fii", SN_NOWARN)
set_name(0x8013EF94, "RoomLocOk__Fii", SN_NOWARN)
set_name(0x8013F02C, "InitRndLocObj__Fiii", SN_NOWARN)
set_name(0x8013F1D8, "InitRndLocBigObj__Fiii", SN_NOWARN)
set_name(0x8013F3D0, "InitRndLocObj5x5__Fiii", SN_NOWARN)
set_name(0x8013F4F8, "SetMapObjects__FPUcii", SN_NOWARN)
set_name(0x8013F798, "ClrAllObjects__Fv", SN_NOWARN)
set_name(0x8013F888, "AddTortures__Fv", SN_NOWARN)
set_name(0x8013FA14, "AddCandles__Fv", SN_NOWARN)
set_name(0x8013FA9C, "AddTrapLine__Fiiii", SN_NOWARN)
set_name(0x8013FE38, "AddLeverObj__Fiiiiiiii", SN_NOWARN)
set_name(0x8013FE40, "AddBookLever__Fiiiiiiiii", SN_NOWARN)
set_name(0x80140054, "InitRndBarrels__Fv", SN_NOWARN)
set_name(0x801401F0, "AddL1Objs__Fiiii", SN_NOWARN)
set_name(0x80140328, "AddL2Objs__Fiiii", SN_NOWARN)
set_name(0x8014043C, "AddL3Objs__Fiiii", SN_NOWARN)
set_name(0x8014053C, "WallTrapLocOk__Fii", SN_NOWARN)
set_name(0x801405A4, "TorchLocOK__Fii", SN_NOWARN)
set_name(0x801405E4, "AddL2Torches__Fv", SN_NOWARN)
set_name(0x80140798, "AddObjTraps__Fv", SN_NOWARN)
set_name(0x80140B10, "AddChestTraps__Fv", SN_NOWARN)
set_name(0x80140C60, "LoadMapObjects__FPUciiiiiii", SN_NOWARN)
set_name(0x80140DCC, "AddDiabObjs__Fv", SN_NOWARN)
set_name(0x80140F20, "AddStoryBooks__Fv", SN_NOWARN)
set_name(0x80141070, "AddHookedBodies__Fi", SN_NOWARN)
set_name(0x80141268, "AddL4Goodies__Fv", SN_NOWARN)
set_name(0x80141318, "AddLazStand__Fv", SN_NOWARN)
set_name(0x801414AC, "InitObjects__Fv", SN_NOWARN)
set_name(0x80141AF8, "PreObjObjAddSwitch__Fiiii", SN_NOWARN)
set_name(0x80141E00, "FillSolidBlockTbls__Fv", SN_NOWARN)
set_name(0x80141FAC, "SetDungeonMicros__Fv", SN_NOWARN)
set_name(0x80141FB4, "DRLG_InitTrans__Fv", SN_NOWARN)
set_name(0x80142028, "DRLG_RectTrans__Fiiii", SN_NOWARN)
set_name(0x801420A8, "DRLG_CopyTrans__Fiiii", SN_NOWARN)
set_name(0x80142110, "DRLG_ListTrans__FiPUc", SN_NOWARN)
set_name(0x80142184, "DRLG_AreaTrans__FiPUc", SN_NOWARN)
set_name(0x80142214, "DRLG_InitSetPC__Fv", SN_NOWARN)
set_name(0x8014222C, "DRLG_SetPC__Fv", SN_NOWARN)
set_name(0x801422DC, "Make_SetPC__Fiiii", SN_NOWARN)
set_name(0x8014237C, "DRLG_WillThemeRoomFit__FiiiiiPiT5", SN_NOWARN)
set_name(0x80142644, "DRLG_CreateThemeRoom__Fi", SN_NOWARN)
set_name(0x8014364C, "DRLG_PlaceThemeRooms__FiiiiUc", SN_NOWARN)
set_name(0x801438F4, "DRLG_HoldThemeRooms__Fv", SN_NOWARN)
set_name(0x80143AA8, "SkipThemeRoom__Fii", SN_NOWARN)
set_name(0x80143B74, "InitLevels__Fv", SN_NOWARN)
set_name(0x80143C78, "TFit_Shrine__Fi", SN_NOWARN)
set_name(0x80143EE8, "TFit_Obj5__Fi", SN_NOWARN)
set_name(0x801440BC, "TFit_SkelRoom__Fi", SN_NOWARN)
set_name(0x8014416C, "TFit_GoatShrine__Fi", SN_NOWARN)
set_name(0x80144204, "CheckThemeObj3__Fiiii", SN_NOWARN)
set_name(0x80144354, "TFit_Obj3__Fi", SN_NOWARN)
set_name(0x80144414, "CheckThemeReqs__Fi", SN_NOWARN)
set_name(0x801444E0, "SpecialThemeFit__Fii", SN_NOWARN)
set_name(0x801446BC, "CheckThemeRoom__Fi", SN_NOWARN)
set_name(0x80144968, "InitThemes__Fv", SN_NOWARN)
set_name(0x80144CB4, "HoldThemeRooms__Fv", SN_NOWARN)
set_name(0x80144D9C, "PlaceThemeMonsts__Fii", SN_NOWARN)
set_name(0x80144F40, "Theme_Barrel__Fi", SN_NOWARN)
set_name(0x801450B8, "Theme_Shrine__Fi", SN_NOWARN)
set_name(0x801451A0, "Theme_MonstPit__Fi", SN_NOWARN)
set_name(0x801452CC, "Theme_SkelRoom__Fi", SN_NOWARN)
set_name(0x801455D0, "Theme_Treasure__Fi", SN_NOWARN)
set_name(0x80145834, "Theme_Library__Fi", SN_NOWARN)
set_name(0x80145AA4, "Theme_Torture__Fi", SN_NOWARN)
set_name(0x80145C14, "Theme_BloodFountain__Fi", SN_NOWARN)
set_name(0x80145C88, "Theme_Decap__Fi", SN_NOWARN)
set_name(0x80145DF8, "Theme_PurifyingFountain__Fi", SN_NOWARN)
set_name(0x80145E6C, "Theme_ArmorStand__Fi", SN_NOWARN)
set_name(0x80146004, "Theme_GoatShrine__Fi", SN_NOWARN)
set_name(0x80146154, "Theme_Cauldron__Fi", SN_NOWARN)
set_name(0x801461C8, "Theme_MurkyFountain__Fi", SN_NOWARN)
set_name(0x8014623C, "Theme_TearFountain__Fi", SN_NOWARN)
set_name(0x801462B0, "Theme_BrnCross__Fi", SN_NOWARN)
set_name(0x80146428, "Theme_WeaponRack__Fi", SN_NOWARN)
set_name(0x801465C0, "UpdateL4Trans__Fv", SN_NOWARN)
set_name(0x80146620, "CreateThemeRooms__Fv", SN_NOWARN)
set_name(0x80146804, "InitPortals__Fv", SN_NOWARN)
set_name(0x80146864, "InitQuests__Fv", SN_NOWARN)
set_name(0x80146C68, "DrawButcher__Fv", SN_NOWARN)
set_name(0x80146CAC, "DrawSkelKing__Fiii", SN_NOWARN)
set_name(0x80146CE8, "DrawWarLord__Fii", SN_NOWARN)
set_name(0x80146DE4, "DrawSChamber__Fiii", SN_NOWARN)
set_name(0x80146F20, "DrawLTBanner__Fii", SN_NOWARN)
set_name(0x80146FFC, "DrawBlind__Fii", SN_NOWARN)
set_name(0x801470D8, "DrawBlood__Fii", SN_NOWARN)
set_name(0x801471B8, "DRLG_CheckQuests__Fii", SN_NOWARN)
set_name(0x801472F4, "InitInv__Fv", SN_NOWARN)
set_name(0x80147348, "InitAutomap__Fv", SN_NOWARN)
set_name(0x8014750C, "InitAutomapOnce__Fv", SN_NOWARN)
set_name(0x8014751C, "MonstPlace__Fii", SN_NOWARN)
set_name(0x801475D8, "InitMonsterGFX__Fi", SN_NOWARN)
set_name(0x801476B0, "PlaceMonster__Fiiii", SN_NOWARN)
set_name(0x80147750, "AddMonsterType__Fii", SN_NOWARN)
set_name(0x8014784C, "GetMonsterTypes__FUl", SN_NOWARN)
set_name(0x801478FC, "ClrAllMonsters__Fv", SN_NOWARN)
set_name(0x80147A2C, "InitLevelMonsters__Fv", SN_NOWARN)
set_name(0x80147AB0, "GetLevelMTypes__Fv", SN_NOWARN)
set_name(0x80147F18, "PlaceQuestMonsters__Fv", SN_NOWARN)
set_name(0x801482DC, "LoadDiabMonsts__Fv", SN_NOWARN)
set_name(0x801483EC, "PlaceGroup__FiiUci", SN_NOWARN)
set_name(0x8014899C, "SetMapMonsters__FPUcii", SN_NOWARN)
set_name(0x80148BC0, "InitMonsters__Fv", SN_NOWARN)
set_name(0x80148F70, "PlaceUniqueMonst__Fiii", SN_NOWARN)
set_name(0x801496DC, "PlaceUniques__Fv", SN_NOWARN)
set_name(0x8014986C, "PreSpawnSkeleton__Fv", SN_NOWARN)
set_name(0x801499AC, "encode_enemy__Fi", SN_NOWARN)
set_name(0x80149A04, "decode_enemy__Fii", SN_NOWARN)
set_name(0x80149B14, "IsGoat__Fi", SN_NOWARN)
set_name(0x80149B40, "InitMissiles__Fv", SN_NOWARN)
set_name(0x80149D08, "InitNoTriggers__Fv", SN_NOWARN)
set_name(0x80149D2C, "InitTownTriggers__Fv", SN_NOWARN)
set_name(0x8014A074, "InitL1Triggers__Fv", SN_NOWARN)
set_name(0x8014A188, "InitL2Triggers__Fv", SN_NOWARN)
set_name(0x8014A318, "InitL3Triggers__Fv", SN_NOWARN)
set_name(0x8014A474, "InitL4Triggers__Fv", SN_NOWARN)
set_name(0x8014A688, "InitSKingTriggers__Fv", SN_NOWARN)
set_name(0x8014A6D4, "InitSChambTriggers__Fv", SN_NOWARN)
set_name(0x8014A720, "InitPWaterTriggers__Fv", SN_NOWARN)
set_name(0x8014A76C, "InitVPTriggers__Fv", SN_NOWARN)
set_name(0x8014A7B8, "InitStores__Fv", SN_NOWARN)
set_name(0x8014A838, "SetupTownStores__Fv", SN_NOWARN)
set_name(0x8014A9C8, "DeltaLoadLevel__Fv", SN_NOWARN)
set_name(0x8014B2A0, "SmithItemOk__Fi", SN_NOWARN)
set_name(0x8014B304, "RndSmithItem__Fi", SN_NOWARN)
set_name(0x8014B410, "WitchItemOk__Fi", SN_NOWARN)
set_name(0x8014B550, "RndWitchItem__Fi", SN_NOWARN)
set_name(0x8014B650, "BubbleSwapItem__FP10ItemStructT0", SN_NOWARN)
set_name(0x8014B734, "SortWitch__Fv", SN_NOWARN)
set_name(0x8014B854, "RndBoyItem__Fi", SN_NOWARN)
set_name(0x8014B978, "HealerItemOk__Fi", SN_NOWARN)
set_name(0x8014BB2C, "RndHealerItem__Fi", SN_NOWARN)
set_name(0x8014BC2C, "RecreatePremiumItem__Fiiii", SN_NOWARN)
set_name(0x8014BCF4, "RecreateWitchItem__Fiiii", SN_NOWARN)
set_name(0x8014BE4C, "RecreateSmithItem__Fiiii", SN_NOWARN)
set_name(0x8014BEE8, "RecreateHealerItem__Fiiii", SN_NOWARN)
set_name(0x8014BFA8, "RecreateBoyItem__Fiiii", SN_NOWARN)
set_name(0x8014C06C, "RecreateTownItem__FiiUsii", SN_NOWARN)
set_name(0x8014C0F8, "SpawnSmith__Fi", SN_NOWARN)
set_name(0x8014C294, "SpawnWitch__Fi", SN_NOWARN)
set_name(0x8014C600, "SpawnHealer__Fi", SN_NOWARN)
set_name(0x8014C91C, "SpawnBoy__Fi", SN_NOWARN)
set_name(0x8014CA70, "SortSmith__Fv", SN_NOWARN)
set_name(0x8014CB84, "SortHealer__Fv", SN_NOWARN)
set_name(0x8014CCA4, "RecreateItem__FiiUsii", SN_NOWARN)
set_name(0x80121AA0, "themeLoc", SN_NOWARN)
set_name(0x801221E8, "OldBlock", SN_NOWARN)
set_name(0x801221F8, "L5dungeon", SN_NOWARN)
set_name(0x80121E88, "SPATS", SN_NOWARN)
set_name(0x80121F8C, "BSTYPES", SN_NOWARN)
set_name(0x8012205C, "L5BTYPES", SN_NOWARN)
set_name(0x8012212C, "STAIRSUP", SN_NOWARN)
set_name(0x80122150, "L5STAIRSUP", SN_NOWARN)
set_name(0x80122174, "STAIRSDOWN", SN_NOWARN)
set_name(0x80122190, "LAMPS", SN_NOWARN)
set_name(0x8012219C, "PWATERIN", SN_NOWARN)
set_name(0x80121A90, "L5ConvTbl", SN_NOWARN)
set_name(0x8012A428, "RoomList", SN_NOWARN)
set_name(0x8012AA7C, "predungeon", SN_NOWARN)
set_name(0x80128BB8, "Dir_Xadd", SN_NOWARN)
set_name(0x80128BCC, "Dir_Yadd", SN_NOWARN)
set_name(0x80128BE0, "SPATSL2", SN_NOWARN)
set_name(0x80128BF0, "BTYPESL2", SN_NOWARN)
set_name(0x80128C94, "BSTYPESL2", SN_NOWARN)
set_name(0x80128D38, "VARCH1", SN_NOWARN)
set_name(0x80128D4C, "VARCH2", SN_NOWARN)
set_name(0x80128D60, "VARCH3", SN_NOWARN)
set_name(0x80128D74, "VARCH4", SN_NOWARN)
set_name(0x80128D88, "VARCH5", SN_NOWARN)
set_name(0x80128D9C, "VARCH6", SN_NOWARN)
set_name(0x80128DB0, "VARCH7", SN_NOWARN)
set_name(0x80128DC4, "VARCH8", SN_NOWARN)
set_name(0x80128DD8, "VARCH9", SN_NOWARN)
set_name(0x80128DEC, "VARCH10", SN_NOWARN)
set_name(0x80128E00, "VARCH11", SN_NOWARN)
set_name(0x80128E14, "VARCH12", SN_NOWARN)
set_name(0x80128E28, "VARCH13", SN_NOWARN)
set_name(0x80128E3C, "VARCH14", SN_NOWARN)
set_name(0x80128E50, "VARCH15", SN_NOWARN)
set_name(0x80128E64, "VARCH16", SN_NOWARN)
set_name(0x80128E78, "VARCH17", SN_NOWARN)
set_name(0x80128E88, "VARCH18", SN_NOWARN)
set_name(0x80128E98, "VARCH19", SN_NOWARN)
set_name(0x80128EA8, "VARCH20", SN_NOWARN)
set_name(0x80128EB8, "VARCH21", SN_NOWARN)
set_name(0x80128EC8, "VARCH22", SN_NOWARN)
set_name(0x80128ED8, "VARCH23", SN_NOWARN)
set_name(0x80128EE8, "VARCH24", SN_NOWARN)
set_name(0x80128EF8, "VARCH25", SN_NOWARN)
set_name(0x80128F0C, "VARCH26", SN_NOWARN)
set_name(0x80128F20, "VARCH27", SN_NOWARN)
set_name(0x80128F34, "VARCH28", SN_NOWARN)
set_name(0x80128F48, "VARCH29", SN_NOWARN)
set_name(0x80128F5C, "VARCH30", SN_NOWARN)
set_name(0x80128F70, "VARCH31", SN_NOWARN)
set_name(0x80128F84, "VARCH32", SN_NOWARN)
set_name(0x80128F98, "VARCH33", SN_NOWARN)
set_name(0x80128FAC, "VARCH34", SN_NOWARN)
set_name(0x80128FC0, "VARCH35", SN_NOWARN)
set_name(0x80128FD4, "VARCH36", SN_NOWARN)
set_name(0x80128FE8, "VARCH37", SN_NOWARN)
set_name(0x80128FFC, "VARCH38", SN_NOWARN)
set_name(0x80129010, "VARCH39", SN_NOWARN)
set_name(0x80129024, "VARCH40", SN_NOWARN)
set_name(0x80129038, "HARCH1", SN_NOWARN)
set_name(0x80129048, "HARCH2", SN_NOWARN)
set_name(0x80129058, "HARCH3", SN_NOWARN)
set_name(0x80129068, "HARCH4", SN_NOWARN)
set_name(0x80129078, "HARCH5", SN_NOWARN)
set_name(0x80129088, "HARCH6", SN_NOWARN)
set_name(0x80129098, "HARCH7", SN_NOWARN)
set_name(0x801290A8, "HARCH8", SN_NOWARN)
set_name(0x801290B8, "HARCH9", SN_NOWARN)
set_name(0x801290C8, "HARCH10", SN_NOWARN)
set_name(0x801290D8, "HARCH11", SN_NOWARN)
set_name(0x801290E8, "HARCH12", SN_NOWARN)
set_name(0x801290F8, "HARCH13", SN_NOWARN)
set_name(0x80129108, "HARCH14", SN_NOWARN)
set_name(0x80129118, "HARCH15", SN_NOWARN)
set_name(0x80129128, "HARCH16", SN_NOWARN)
set_name(0x80129138, "HARCH17", SN_NOWARN)
set_name(0x80129148, "HARCH18", SN_NOWARN)
set_name(0x80129158, "HARCH19", SN_NOWARN)
set_name(0x80129168, "HARCH20", SN_NOWARN)
set_name(0x80129178, "HARCH21", SN_NOWARN)
set_name(0x80129188, "HARCH22", SN_NOWARN)
set_name(0x80129198, "HARCH23", SN_NOWARN)
set_name(0x801291A8, "HARCH24", SN_NOWARN)
set_name(0x801291B8, "HARCH25", SN_NOWARN)
set_name(0x801291C8, "HARCH26", SN_NOWARN)
set_name(0x801291D8, "HARCH27", SN_NOWARN)
set_name(0x801291E8, "HARCH28", SN_NOWARN)
set_name(0x801291F8, "HARCH29", SN_NOWARN)
set_name(0x80129208, "HARCH30", SN_NOWARN)
set_name(0x80129218, "HARCH31", SN_NOWARN)
set_name(0x80129228, "HARCH32", SN_NOWARN)
set_name(0x80129238, "HARCH33", SN_NOWARN)
set_name(0x80129248, "HARCH34", SN_NOWARN)
set_name(0x80129258, "HARCH35", SN_NOWARN)
set_name(0x80129268, "HARCH36", SN_NOWARN)
set_name(0x80129278, "HARCH37", SN_NOWARN)
set_name(0x80129288, "HARCH38", SN_NOWARN)
set_name(0x80129298, "HARCH39", SN_NOWARN)
set_name(0x801292A8, "HARCH40", SN_NOWARN)
set_name(0x801292B8, "USTAIRS", SN_NOWARN)
set_name(0x801292DC, "DSTAIRS", SN_NOWARN)
set_name(0x80129300, "WARPSTAIRS", SN_NOWARN)
set_name(0x80129324, "CRUSHCOL", SN_NOWARN)
set_name(0x80129338, "BIG1", SN_NOWARN)
set_name(0x80129344, "BIG2", SN_NOWARN)
set_name(0x80129350, "BIG5", SN_NOWARN)
set_name(0x8012935C, "BIG8", SN_NOWARN)
set_name(0x80129368, "BIG9", SN_NOWARN)
set_name(0x80129374, "BIG10", SN_NOWARN)
set_name(0x80129380, "PANCREAS1", SN_NOWARN)
set_name(0x801293A0, "PANCREAS2", SN_NOWARN)
set_name(0x801293C0, "CTRDOOR1", SN_NOWARN)
set_name(0x801293D4, "CTRDOOR2", SN_NOWARN)
set_name(0x801293E8, "CTRDOOR3", SN_NOWARN)
set_name(0x801293FC, "CTRDOOR4", SN_NOWARN)
set_name(0x80129410, "CTRDOOR5", SN_NOWARN)
set_name(0x80129424, "CTRDOOR6", SN_NOWARN)
set_name(0x80129438, "CTRDOOR7", SN_NOWARN)
set_name(0x8012944C, "CTRDOOR8", SN_NOWARN)
set_name(0x80129460, "Patterns", SN_NOWARN)
set_name(0x80130470, "lockout", SN_NOWARN)
set_name(0x801301D0, "L3ConvTbl", SN_NOWARN)
set_name(0x801301E0, "L3UP", SN_NOWARN)
set_name(0x801301F4, "L3DOWN", SN_NOWARN)
set_name(0x80130208, "L3HOLDWARP", SN_NOWARN)
set_name(0x8013021C, "L3TITE1", SN_NOWARN)
set_name(0x80130240, "L3TITE2", SN_NOWARN)
set_name(0x80130264, "L3TITE3", SN_NOWARN)
set_name(0x80130288, "L3TITE6", SN_NOWARN)
set_name(0x801302B4, "L3TITE7", SN_NOWARN)
set_name(0x801302E0, "L3TITE8", SN_NOWARN)
set_name(0x801302F4, "L3TITE9", SN_NOWARN)
set_name(0x80130308, "L3TITE10", SN_NOWARN)
set_name(0x8013031C, "L3TITE11", SN_NOWARN)
set_name(0x80130330, "L3ISLE1", SN_NOWARN)
set_name(0x80130340, "L3ISLE2", SN_NOWARN)
set_name(0x80130350, "L3ISLE3", SN_NOWARN)
set_name(0x80130360, "L3ISLE4", SN_NOWARN)
set_name(0x80130370, "L3ISLE5", SN_NOWARN)
set_name(0x8013037C, "L3ANVIL", SN_NOWARN)
set_name(0x8013528C, "dung", SN_NOWARN)
set_name(0x8013541C, "hallok", SN_NOWARN)
set_name(0x80135430, "L4dungeon", SN_NOWARN)
set_name(0x80136D30, "L4ConvTbl", SN_NOWARN)
set_name(0x80136D40, "L4USTAIRS", SN_NOWARN)
set_name(0x80136D6C, "L4TWARP", SN_NOWARN)
set_name(0x80136D98, "L4DSTAIRS", SN_NOWARN)
set_name(0x80136DCC, "L4PENTA", SN_NOWARN)
set_name(0x80136E00, "L4PENTA2", SN_NOWARN)
set_name(0x80136E34, "L4BTYPES", SN_NOWARN)
|
1677873
|
from planemo.engine import (
engine_context,
)
from planemo.galaxy import galaxy_config
from planemo.galaxy.config import _find_test_data
from planemo.galaxy.test import (
handle_reports_and_summary,
run_in_config,
)
from planemo.runnable import (
for_paths,
RunnableType,
)
def test_runnables(ctx, runnables, original_paths=None, **kwds):
"""Return exit code indicating test or failure."""
engine_type = kwds["engine"]
test_engine_testable = {RunnableType.galaxy_tool, RunnableType.galaxy_datamanager, RunnableType.directory}
enable_test_engines = any(r.type not in test_engine_testable for r in runnables)
enable_test_engines = enable_test_engines or engine_type != "galaxy"
if enable_test_engines:
ctx.vlog("Using test engine type %s" % engine_type)
with engine_context(ctx, **kwds) as engine:
test_data = engine.test(runnables)
ctx.vlog("engine.test returning [%s]" % test_data)
return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds)
else:
ctx.vlog("Running traditional Galaxy tool tests using run_tests.sh in Galaxy root %s" % engine_type)
kwds["for_tests"] = True
if kwds.get('update_test_data'):
non_copied_runnables = for_paths(original_paths)
kwds['test_data_target_dir'] = _find_test_data(non_copied_runnables, **kwds)
with galaxy_config(ctx, runnables, **kwds) as config:
return_value = run_in_config(ctx, config, **kwds)
return return_value
|
1677891
|
import numpy as np
import pandas as pd
import holoviews as hv
import colorcet as cc
from ..backend_transforms import _transfer_opts_cur_backend
from ..util import with_hv_extension
@with_hv_extension
def andrews_curves(data, class_column, samples=200, alpha=0.5,
width=600, height=300, cmap=None, colormap=None,
**kwds):
"""
Generate a plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame: DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column: str
Column name containing class names
samples: int, optional
Number of samples to draw
alpha: float, optional
The transparency of the lines
cmap/colormap: str or colormap object
Colormap to use for groups
Returns
-------
obj : HoloViews object
The HoloViews representation of the plot.
See Also
--------
pandas.plotting.parallel_coordinates : matplotlib version of this routine
"""
t = np.linspace(-np.pi, np.pi, samples)
vals = data.drop(class_column, axis=1).values.T
curves = np.outer(vals[0], np.ones_like(t))
for i in range(1, len(vals)):
ft = ((i + 1) // 2) * t
if i % 2 == 1:
curves += np.outer(vals[i], np.sin(ft))
else:
curves += np.outer(vals[i], np.cos(ft))
df = pd.DataFrame({'t': np.tile(np.arange(samples), curves.shape[0]),
'sample': np.repeat(np.arange(curves.shape[0]), curves.shape[1]),
'value': curves.ravel(),
class_column: np.repeat(data[class_column], samples)})
labelled = ['x']
options = {'Overlay': dict(legend_limit=5000),
'Curve': dict(kwds, labelled=labelled, alpha=alpha,
width=width, height=height, **kwds)}
dataset = hv.Dataset(df)
groups = dataset.to(hv.Curve, 't', 'value').overlay('sample').items()
if cmap and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
cmap = cmap or colormap or cc.palette['glasbey_category10']
colors = hv.plotting.util.process_cmap(cmap, categorical=True, ncolors=len(groups))
el = hv.Overlay([curve.relabel(k).options('Curve', color=c, backend='bokeh')
for c, (k, v) in zip(colors, groups) for curve in v]).options(options, backend='bokeh')
el = _transfer_opts_cur_backend(el)
return el
|
1677897
|
def iteminfo:
def__init__(self):
self.icode=0
self.item=null
self.price=0
self.qty=0
self.discount=0
self.netprice=0
def cal(self):
if self.qty<=10:
self.discount=0
if self.qty>=11 and self.qty<=20:
self.discount=15
if self.qty>=20:
self.discount=20
def buy(self,ic,in,pr,q):
self.icode=ic
self.item=in
self.price=pr
self.qty=q
cal(c):
self.netprice=self.price*self.qty-self.discount
print "net price:","self.netprice"
def show all():
print 'icode:','self.icode'
print 'item:','self.item'
print 'price:,'self.price'
print 'qty:','self.qty'
print 'discount:','self.discount'
print 'netprice:','self.netprice'
x=new
|
1677917
|
from requests import get
import json
class Paginator:
"""
Paginator for moving through Partial collections.
It can move forwards, backwards or can jump to specific page
"""
def __init__(self, response, base_url='http://localhost:8080'):
self.response = response
self.base_url = base_url
def initialize_forward(self):
"""
Initializes the paginator to move forwards.
:returns: Iterator
"""
view = self.response['view']
present_page = view['@id']
yield_page = {"pages_ahead": True}
while True:
self.response = get(self.base_url + present_page).json()
yield_page['members'] = self.response['members']
if 'last' in view and view['last'] == present_page:
yield_page['pages_ahead'] = False
yield yield_page
view = self.response['view']
if 'next' in view:
yield_page['pages_ahead'] = True
present_page = view['next']
else:
yield_page['members'] = self.response['members']
yield yield_page
def initialize_backward(self):
"""
Initializes the pagintor to move backwards.
:returns: Iterator
"""
view = self.response['view']
present_page = view['@id']
yield_page = {"pages_behind": False}
while True:
self.response = get(self.base_url+present_page).json()
yield_page['members'] = self.response['members']
if 'first' in view and view['first'] == present_page:
yield_page['pages_behind'] = False
yield yield_page
view = self.response['view']
if 'previous' in view:
yield_page['pages_behind'] = True
present_page = view['previous']
else:
yield_page['members'] = self.response['members']
yield yield_page
def jump_to_page(self, page):
"""
Jumps to a specified page.
:params page: Page number to jump
:returns members: Members of collection of that page.
"""
try:
url = self.base_url + self.response['@id'] + '?page=' + str(page)
response = get(url).json()
return response['members']
except KeyError:
print('No such page exists.')
raise
def jump_to_last_page(self):
"""
Jumps to Last page of Collection
:returns members of last page of Partial Collection
"""
try:
url = self.base_url + self.response['view']['last']
response = get(url).json()
return response['members']
except KeyError:
print("No last item in view")
raise
def total_items(self):
"""returns total number of items of collection
:return: Total number of items in collection
"""
try:
return self.response['totalItems']
except KeyError:
print("No totalItem key provided")
raise
def total_pages(self):
"""
Returns total Number of pages in the Collection
:returns: total number of pages in the Partial Collection
"""
try:
last_page_url = self.response['view']['last']
indexPage = last_page_url.index("page=")
return last_page_url[indexPage + 5]
except KeyError:
print("Unable to find last page")
raise
|
1677925
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
from rlpytorch import ArgsProvider, add_err
from rlpytorch.trainer import topk_accuracy
class MultiplePrediction:
def __init__(self):
self.args = ArgsProvider(
call_from = self,
define_args = [
("multipred_no_backprop", dict(action="store_true")),
],
)
self.policy_loss = nn.NLLLoss().cuda()
self.value_loss = nn.MSELoss().cuda()
def update(self, mi, batch, stats):
''' Update given batch '''
# Current timestep.
state_curr = mi["model"](batch.hist(0))
total_loss = None
eps = 1e-6
targets = batch.hist(0)["offline_a"]
for i, pred in enumerate(state_curr["pis"]):
if i == 0:
prec1, prec5 = topk_accuracy(pred.data, targets[:, i].contiguous(), topk=(1, 5))
stats["top1_acc"].feed(prec1[0])
stats["top5_acc"].feed(prec5[0])
# backward.
loss = self.policy_loss((pred + eps).log(), Variable(targets[:, i]))
stats["loss" + str(i)].feed(loss.data[0])
total_loss = add_err(total_loss, loss / (i + 1))
stats["total_loss"].feed(total_loss.data[0])
if not self.args.multipred_no_backprop:
total_loss.backward()
|
1677937
|
import unittest
import numpy as np
import pandas as pd
import os
import sys
from mastml.datasets import LocalDatasets
sys.path.insert(0, os.path.abspath('../../../'))
from mastml.feature_selectors import NoSelect, EnsembleModelFeatureSelector, PearsonSelector, MASTMLFeatureSelector
from sklearn.ensemble import RandomForestRegressor
import mastml
mastml_path = mastml.__path__._path[0]
class TestSelectors(unittest.TestCase):
def test_noselect(self):
X = pd.DataFrame(np.random.uniform(low=0.0, high=100, size=(50, 10)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
selector = NoSelect()
Xselect = selector.evaluate(X=X, y=y, savepath=os.getcwd())
self.assertEqual(Xselect.shape, (50, 10))
return
def test_ensembleselector(self):
X = pd.DataFrame(np.random.uniform(low=0.0, high=100, size=(50, 10)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
model = RandomForestRegressor()
selector = EnsembleModelFeatureSelector(model=model, n_features_to_select=5)
Xselect = selector.evaluate(X=X, y=y, savepath=os.getcwd())
self.assertEqual(Xselect.shape, (50, 5))
self.assertTrue(os.path.exists('EnsembleModelFeatureSelector_feature_importances.xlsx'))
os.remove('EnsembleModelFeatureSelector_feature_importances.xlsx')
os.remove('selected_features.txt')
return
def test_pearsonselector(self):
X = pd.DataFrame(np.random.uniform(low=0.0, high=100, size=(10, 10)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(10,)))
selector = PearsonSelector(threshold_between_features=0.3,
threshold_with_target=0.3,
flag_highly_correlated_features=True,
n_features_to_select=3)
selector.evaluate(X=X, y=y, savepath=os.getcwd())
Xselect = selector.transform(X=X)
self.assertEqual(Xselect.shape, (10, 3))
self.assertTrue(os.path.exists('PearsonSelector_fullcorrelationmatrix.xlsx'))
self.assertTrue(os.path.exists('PearsonSelector_highlycorrelatedfeatures.xlsx'))
self.assertTrue(os.path.exists('PearsonSelector_highlycorrelatedfeaturesflagged.xlsx'))
self.assertTrue(os.path.exists('PearsonSelector_highlycorrelatedwithtarget.xlsx'))
os.remove('PearsonSelector_fullcorrelationmatrix.xlsx')
os.remove('PearsonSelector_highlycorrelatedfeatures.xlsx')
os.remove('PearsonSelector_highlycorrelatedfeaturesflagged.xlsx')
os.remove('PearsonSelector_highlycorrelatedwithtarget.xlsx')
os.remove('selected_features.txt')
return
def test_mastmlselector(self):
X = pd.DataFrame(np.random.uniform(low=0.0, high=100, size=(10, 10)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(10,)))
model = RandomForestRegressor()
selector = MASTMLFeatureSelector(model=model, n_features_to_select=2, cv=None, manually_selected_features=[1])
selector.evaluate(X=X, y=y, savepath=os.getcwd())
Xselect = selector.transform(X=X)
self.assertEqual(Xselect.shape, (10, 2))
self.assertTrue(os.path.exists(os.path.join(os.getcwd(), 'MASTMLFeatureSelector_featureselection_data.xlsx')))
os.remove(os.path.join(os.getcwd(), 'MASTMLFeatureSelector_featureselection_data.xlsx'))
os.remove('selected_features.txt')
return
def test_featureselector_with_random_score(self):
target = 'E_regression.1'
extra_columns = ['Material compositions 1', 'Material compositions 2', 'Hop activation barrier', 'E_regression']
d = LocalDatasets(file_path=os.path.join(mastml_path, 'data/figshare_7418492/All_Model_Data.xlsx'),
target=target,
extra_columns=extra_columns,
group_column='Material compositions 1',
testdata_columns=None,
as_frame=True)
data_dict = d.load_data()
X = data_dict['X']
y = data_dict['y']
model = RandomForestRegressor()
selector = EnsembleModelFeatureSelector(model=model, n_features_to_select=100, n_random_dummy= 100)
Xselect = selector.evaluate(X=X, y=y, savepath=os.getcwd())
self.assertEqual(Xselect.shape, (408, 100))
self.assertTrue(os.path.exists('EnsembleModelFeatureSelector_feature_importances.xlsx'))
os.remove('EnsembleModelFeatureSelector_feature_importances.xlsx')
os.remove('selected_features.txt')
return
def test_featureselector_with_permutated_score(self):
target = 'E_regression.1'
extra_columns = ['Material compositions 1', 'Material compositions 2', 'Hop activation barrier', 'E_regression']
d = LocalDatasets(file_path=os.path.join(mastml_path, 'data/figshare_7418492/All_Model_Data.xlsx'),
target=target,
extra_columns=extra_columns,
group_column='Material compositions 1',
testdata_columns=None,
as_frame=True)
data_dict = d.load_data()
X = data_dict['X']
y = data_dict['y']
model = RandomForestRegressor()
selector = EnsembleModelFeatureSelector(model=model, n_features_to_select=100, n_random_dummy= 100, n_permuted_dummy = 200)
Xselect = selector.evaluate(X=X, y=y, savepath=os.getcwd())
self.assertEqual(Xselect.shape, (408, 100))
self.assertTrue(os.path.exists('EnsembleModelFeatureSelector_feature_importances.xlsx'))
os.remove('EnsembleModelFeatureSelector_feature_importances.xlsx')
os.remove('selected_features.txt')
return
if __name__=='__main__':
unittest.main()
|
1677945
|
import unittest
from unittest import mock
from flumine.order.order import OrderStatus, OrderTypes
from betfairlightweight.resources.bettingresources import PriceSize
from flumine import config
from flumine.markets.market import Market
from flumine.markets.markets import Markets
from flumine.order.order import (
BaseOrder,
BetfairOrder,
)
from flumine.order import process
from flumine.strategy.strategy import Strategies
from flumine.utils import create_cheap_hash
class BaseOrderTest(unittest.TestCase):
def setUp(self) -> None:
mock_client = mock.Mock(paper_trade=False)
self.mock_trade = mock.Mock(client=mock_client)
self.mock_order_type = mock.Mock()
self.order = BaseOrder(self.mock_trade, "BACK", self.mock_order_type, 1)
config.simulated = True
def tearDown(self) -> None:
config.simulated = False
def test_process_current_orders_with_default_sep(self):
mock_log_control = mock.Mock()
mock_add_market = mock.Mock()
market_book = mock.Mock()
markets = Markets()
market = Market(
flumine=mock.Mock(), market_id="market_id", market_book=market_book
)
markets.add_market("market_id", market)
strategies = Strategies()
cheap_hash = create_cheap_hash("strategy_name", 13)
trade = mock.Mock(market_id="market_id")
trade.strategy.name_hash = cheap_hash
current_order = mock.Mock(
customer_order_ref=f"{cheap_hash}I123", market_id="market_id", bet_id=None
)
betfair_order = BetfairOrder(trade=trade, side="BACK", order_type=mock.Mock())
betfair_order.id = "123"
market.blotter = {"123": betfair_order}
event = mock.Mock(event=[mock.Mock(orders=[current_order])])
process.process_current_orders(
markets=markets,
strategies=strategies,
event=event,
log_control=mock_log_control,
add_market=mock_add_market,
)
self.assertEqual(current_order, betfair_order.responses.current_order)
def test_process_current_order(self):
mock_order = mock.Mock(status=OrderStatus.EXECUTABLE)
mock_order.current_order.status = "EXECUTION_COMPLETE"
mock_current_order = mock.Mock()
mock_log_control = mock.Mock()
process.process_current_order(mock_order, mock_current_order, mock_log_control)
mock_order.update_current_order.assert_called_with(mock_current_order)
mock_order.execution_complete.assert_called()
@mock.patch("flumine.order.process.OrderEvent")
def test_process_current_order_async(self, mock_order_event):
mock_order = mock.Mock(status=OrderStatus.EXECUTABLE, async_=True, bet_id=None)
mock_order.current_order.status = "EXECUTION_COMPLETE"
mock_current_order = mock.Mock(bet_id=1234)
mock_log_control = mock.Mock()
process.process_current_order(mock_order, mock_current_order, mock_log_control)
mock_order.update_current_order.assert_called_with(mock_current_order)
mock_order.execution_complete.assert_called()
self.assertEqual(mock_order.bet_id, 1234)
mock_order.responses.placed.assert_called_with()
mock_order_event.assert_called_with(mock_order)
mock_log_control.assert_called_with(mock_order_event())
def test_create_order_from_current(self):
mock_add_market = mock.Mock()
market_book = mock.Mock()
markets = Markets()
market = Market(
flumine=mock.Mock(), market_id="market_id", market_book=market_book
)
markets.add_market("market_id", market)
cheap_hash = create_cheap_hash("strategy_name", 13)
strategy = mock.Mock(name_hash=cheap_hash)
strategies = Strategies()
strategies(strategy=strategy, client=mock.Mock())
current_order = mock.Mock(
customer_order_ref=f"{cheap_hash}I123",
market_id="market_id",
bet_id=None,
selection_id="selection_id",
handicap="handicap",
order_type="LIMIT",
price_size=PriceSize(price=10.0, size=2.0),
persistence_type="LAPSE",
)
new_order = process.create_order_from_current(
markets=markets,
strategies=strategies,
current_order=current_order,
add_market=mock_add_market,
)
self.assertEqual(market.blotter["123"], new_order)
self.assertEqual(new_order.market_id, "market_id")
self.assertEqual(new_order.selection_id, "selection_id")
self.assertEqual(new_order.handicap, "handicap")
self.assertEqual(new_order.order_type.ORDER_TYPE, OrderTypes.LIMIT)
self.assertEqual(new_order.order_type.size, 2.0)
self.assertEqual(new_order.order_type.price, 10.0)
|
1677971
|
import sleuth_backend.views.views_utils as utils
from django.test import TestCase
class TestViewsUtils(TestCase):
'''
Test views utility functions
'''
def test_build_core_request(self):
'''
Test building list of requested cores
'''
solr_cores = ['genericPage', 'redditPost', 'courseItem']
# one requested core
result = utils.build_core_request('genericPage', solr_cores)
self.assertEquals(['genericPage'], result)
# multiple requested cores
result = utils.build_core_request('genericPage,redditPost', solr_cores)
self.assertEquals(['genericPage', 'redditPost'], result)
# some invalid cores
with self.assertRaises(ValueError):
utils.build_core_request('wow,redditPost', solr_cores)
# all cores invalid
with self.assertRaises(ValueError):
utils.build_core_request('geasdficPage,redasdf', solr_cores)
# no given cores
result = utils.build_core_request('', solr_cores)
self.assertEquals(solr_cores, result)
def test_build_return_fields(self):
'''
Test building string of return fields
'''
result = utils.build_return_fields('')
self.assertEquals('id,updatedAt,name,description', result)
with self.assertRaises(ValueError):
utils.build_return_fields('asdfasdf')
with self.assertRaises(ValueError):
utils.build_return_fields('links,sadf')
|
1678050
|
from xv_leak_tools import tools_root
from xv_leak_tools.log import L
from xv_leak_tools.process import check_subprocess
from xv_leak_tools.test_components.local_component import LocalComponent
from xv_leak_tools.test_device.connector_helper import ConnectorHelper
class Git(LocalComponent):
@staticmethod
def _git_branch():
'''Return the git branch we're currently on. This is designed to run on the test
orchestration device, i.e. localhost. We use it to ensure that all devices are checked out
to the same revision'''
# TODO: Consider making this configurable as well, with the default being this.
return check_subprocess(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].strip()
def setup(self):
if not self._config.get("checkout", False):
return
# Update the device's git checkout to be the same as our branch and to be at latest revision
branch = Git._git_branch()
L.info(
"Updating git repo to branch {} on device {}".format(branch, self._device.device_id()))
connector_helper = ConnectorHelper(self._device)
git_root = self._device.config().get('git_root', tools_root())
# TODO: Potentially should clean as well?
connector_helper.execute_command(
[
# This can possibly be done in fewer lines
'cd', git_root, '&&',
'git', 'checkout', branch, '&&',
'git', 'pull', '&&',
'git', 'submodule', 'update', '--init', '--recursive'
]
)
|
1678141
|
import time
import simpleaudio as sa
import asyncio
import threading
import random
import os
from simpleaudio._simpleaudio import SimpleaudioError
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'music')
class Player():
def __init__(self):
self.audio_files = [{
'path': os.path.join(dir_path, 'paradise.wav'),
'name': 'Purrple Cat - lost paradise'
},
{
'path': os.path.join(dir_path, 'jazzy.wav'),
'name': 'Homie Cat - riverbed'
},
{
'path': os.path.join(dir_path, 'mondays.wav'),
'name': 'MusicByAden - Mondays'
},
{
'path': os.path.join(dir_path, 'time.wav'),
'name': 'KaizanBlue - Time'
},
{
'path': os.path.join(dir_path, 'cruise.wav'),
'name': '<NAME> - Cruisin Along'
}
]
random.shuffle(self.audio_files)
self.play_obj = None
self._stop = False
self.playing_now = self.audio_files[0]['name']
def _play(self):
while True:
for audio_file in self.audio_files:
wave_obj = sa.WaveObject.from_wave_file(audio_file['path'])
self.playing_now = audio_file['name']
if not self._stop:
try:
self.play_obj = wave_obj.play()
self.play_obj.wait_done()
except SimpleaudioError:
print('cannot play audio')
self._stop = True
if self._stop:
break
if self._stop:
break
def play(self):
self.thread = threading.Thread(target=self._play)
self.thread.start()
def stop(self):
self._stop = True
try:
self.play_obj.stop()
except AttributeError:
pass
|
1678154
|
from typing import Dict
from ruamel.yaml import YAML
import constants
from constants import (
NORMALIZED_INTERFACES,
INTERFACE_NAME_RE,
NEIGHBOR_SPLIT_RE,
CDP_NEIGHBOR_RE,
HOSTS_FILE,
DEVICE_USERNAME,
DEVICE_PASSWORD,
DEVICE_TYPE,
CONNECTION_TIMEOUT,
)
def normalize_interface_type(interface_type: str) -> str:
"""Normalizes interface type
For example, G is converted to GigabitEthernet, Te is converted to TenGigabitEthernet
"""
int_type = interface_type.strip().lower()
for norm_int_type in NORMALIZED_INTERFACES:
if norm_int_type.lower().startswith(int_type):
return norm_int_type
return int_type
def normalize_interface_name(interface_name: str) -> str:
"""Normalizes interface name
For example, Gi0/1 is converted to GigabitEthernet1,
Te1/1 is converted to TenGigabitEthernet1/1
"""
match = INTERFACE_NAME_RE.search(interface_name)
if match:
int_type = match.group("interface_type")
normalized_int_type = normalize_interface_type(int_type)
int_num = match.group("interface_num")
return normalized_int_type + int_num
raise ValueError(f"Does not recognize {interface_name} as an interface name")
def extract_hostname_from_fqdn(fqdn: str) -> str:
"""Extracts hostname from fqdn-like string
For example, R1.cisco.com -> R1, sw1 -> sw1"
"""
return fqdn.split(".")[0]
def parse_show_cdp_neighbors(cli_output: str) -> Dict[str, Dict[str, str]]:
"""Parses `show cdp neighbors` and returns a dictionary of neighbors and connected interfaces"""
result: Dict[str, Dict[str, str]] = {}
for neighbor_output in NEIGHBOR_SPLIT_RE.split(cli_output):
match = CDP_NEIGHBOR_RE.search(neighbor_output)
if match:
remote_fqdn = match.group("remote_fqdn")
local_interface = normalize_interface_name(match.group("local_interface"))
remote_interface = normalize_interface_name(match.group("remote_interface"))
remote_hostname = extract_hostname_from_fqdn(remote_fqdn)
result[local_interface] = {
"connected_device": {
"name": remote_hostname,
"port": remote_interface,
}
}
return dict(sorted(result.items()))
def get_devices_conn_params() -> Dict[str, Dict[str, str]]:
"""Creates a dictionary of connection parameters for SSH"""
result: Dict[str, Dict[str, str]] = {}
yaml = YAML()
with open(HOSTS_FILE, 'r') as f:
hosts = yaml.load(f)
for device, device_details in hosts["devices"]["routers"].items():
device_params = {
"host": device_details["host"],
"username": DEVICE_USERNAME,
"password": <PASSWORD>,
"device_type": DEVICE_TYPE,
"timeout": CONNECTION_TIMEOUT,
"global_delay_factor": constants.NETMIKO_GLOBAL_DELAY_FACTOR,
}
result[device] = device_params
return result
|
1678198
|
import asyncio
import datetime
import json
import asyncpg
import discord
from discord.ext import commands, tasks
from discord.ext.commands.cooldowns import BucketType
class Stats(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Track command count
self.command_count = 0
self.bot.loop.create_task(self.count_commands())
# Insert command updates in batches to prevent spam causing excessive inserts
self._command_batch = []
self._batch_lock = asyncio.Lock(loop=self.bot.loop)
self.batch_insert_loop.add_exception_type(asyncpg.PostgresConnectionError)
self.batch_insert_loop.start()
# Get the uptime of the bot. In a short description format by default.
def get_uptime(self, full=False):
current_time = datetime.datetime.utcnow()
delta = current_time - self.bot.uptime
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
if full:
return f'{days} days, {hours} hours, {minutes} minutes, and {seconds} seconds'
else:
return f'{days}d {hours}h {minutes}m {seconds}s'
# Update the cached successful command count
async def count_commands(self):
query = '''SELECT COUNT(*) FROM command_history
WHERE failed = false
'''
self.command_count = await self.bot.db.fetchval(query)
# On each command, add attributes of command to batch to be logged by task loop
@commands.Cog.listener()
async def on_command_completion(self, ctx):
# Ensure command recorded
if ctx.command is None:
return
# Get guild id
guild_id = None
if ctx.guild is not None:
guild_id = ctx.guild.id
# Cache command to batch for insertion
self._command_batch.append(
{
'command': ctx.command.qualified_name,
'prefix': ctx.prefix,
'guild_id': guild_id,
'used_at': ctx.message.created_at.isoformat(),
'failed': ctx.command_failed
}
)
# Update command count
if not ctx.command_failed:
self.command_count += 1
# Ensure loop ends if cog is unloaded
def cog_unload(self):
self.batch_insert_loop.stop()
# Loops each 10 seconds, inserts all batched command stats into command history table
@tasks.loop(seconds=10)
async def batch_insert_loop(self):
async with self._batch_lock:
query = '''INSERT INTO command_history (command, prefix, guild_id, used_at, failed)
SELECT x.command, x.prefix, x.guild_id, x.used_at, x.failed
FROM jsonb_to_recordset($1::jsonb) AS
x(command TEXT, prefix TEXT, guild_id BIGINT, used_at TIMESTAMP, failed BOOLEAN)
'''
if self._command_batch:
await self.bot.db.execute(query, json.dumps(self._command_batch))
self._command_batch.clear()
# Posts the bots uptime to the channel
@commands.command()
@commands.cooldown(1, 3, BucketType.user)
async def uptime(self, ctx):
await ctx.send(f'🔌 Uptime: **{self.get_uptime(True)}**')
# Check the latency of the bot
@commands.command()
@commands.cooldown(1, 3, BucketType.channel)
async def ping(self, ctx):
latency = round(self.bot.latency * 1000, 2)
await ctx.send(f'🏓 Latency: {str(latency)}ms')
# Get all episode information of the last screencap that was posted in the
# channel
@commands.command(aliases=['episodeinfo'])
@commands.cooldown(1, 3, BucketType.channel)
@commands.bot_has_permissions(embed_links=True)
async def epinfo(self, ctx):
if ctx.channel.id in self.bot.cached_screencaps:
# Get screencap and its timestamp
screencap = self.bot.cached_screencaps[ctx.channel.id]
real_timestamp = screencap.get_real_timestamp()
# Create embed for episode information, links to wiki of episode
embed = discord.Embed(title=f'{screencap.api.title}: {screencap.title}', colour=discord.Colour(0x44981e),
url=screencap.wiki_url)
# Add episode information
embed.add_field(name='Episode', value=screencap.key, inline=True)
embed.add_field(name='Air Date', value=screencap.air_date, inline=True)
embed.add_field(name='Timestamp', value=real_timestamp, inline=True)
embed.add_field(name='Director(s)', value=screencap.director)
embed.add_field(name='Writer(s)', value=screencap.writer)
await ctx.send(embed=embed)
# Display statistics for the bot
@commands.command(aliases=['statistics'])
@commands.cooldown(1, 3, BucketType.channel)
@commands.bot_has_permissions(embed_links=True)
async def stats(self, ctx):
# Count users online in guilds and user average
total_members = 0
for guild in self.bot.guilds:
total_members += guild.member_count
guild_count = len(self.bot.guilds)
# Embed statistics output
embed = discord.Embed(colour=discord.Colour(0x44981e))
embed.set_thumbnail(url=self.bot.user.avatar_url)
embed.set_author(name=f'{self.bot.user.name} Statistics', url='https://github.com/FlandersBOT',
icon_url=self.bot.user.avatar_url)
# Round latency to 2 decimal places and get milliseconds
latency = round(self.bot.latency * 1000, 2)
# Add all statistics
embed.add_field(name='Bot Owner', value='Mitch#8293', inline=True)
embed.add_field(name='Server Count', value=f'{guild_count:,}', inline=True)
embed.add_field(name='Total Members', value=f'{total_members:,}', inline=True)
embed.add_field(name='Uptime', value=self.get_uptime(), inline=True)
embed.add_field(name='Latency', value=f'{latency:,}' + ' ms', inline=True)
embed.add_field(name='Commands Used', value=f'{self.command_count:,}', inline=True)
await ctx.send(embed=embed)
# All privacy related functions, including information regarding the data logged, and options to both delete, and
# opt out of future data logging
@commands.command()
@commands.cooldown(1, 3, BucketType.user)
async def privacy(self, ctx, *, subcommand: str = None):
# Display generic privacy info
if subcommand is None or subcommand == 'info':
await ctx.send('FlandersBOT stores the user ID (e.g. 221609683562135553) privately and stores the username '
'& discriminator (e.g. FlandersBOT#0680) of all trivia participants publicly for use in the '
'trivia leaderboards.\nIf you wish to participate in trivia without appearing in the '
'leaderboards, use the command: `ned privacy config`\nIf you wish to remove all data '
'relating to your account, use the command: `ned privacy remove`.')
# Adjust privacy settings for user
elif subcommand.lower() in ['edit', 'update', 'config', 'modify']:
msg = await ctx.send('By default, all trivia participants are visible to the public in the trivia '
'leaderboard. Alternatively, you may change your privacy settings by reacting to this '
'message.\n**A**: Public\n**B**: Private')
await msg.add_reaction('🇦')
await msg.add_reaction('🇧')
# Check for response of cross/tick
def is_answer(reaction, user):
return (not user.bot and str(reaction.emoji) in ['🇦', '🇧', '🇨']
and user.id == ctx.author.id)
react, user = await self.bot.wait_for('reaction_add',
check=is_answer, timeout=120)
# Affirmative reaction, drop all data for that user
if react.emoji == '🇦':
# Set privacy setting for profile to public
query = '''UPDATE leaderboard
SET privacy = 0
WHERE user_id = $1
'''
await self.bot.db.execute(query, user.id)
await msg.edit(content='Your leaderboard stats are now public')
# Affirmative reaction, drop all data for that user
if react.emoji == '🇧':
# Set privacy setting for profile to private
query = '''UPDATE leaderboard
SET privacy = 1
WHERE user_id = $1
'''
await self.bot.db.execute(query, user.id)
await msg.edit(content='Your leaderboard stats are now private.')
# Remove all data logged for user
elif subcommand.lower() in ['remove', 'delete', 'erase', 'purge']:
msg = await ctx.send('Would you like to erase all your user data?\nThis will remove you from the trivia '
'leaderboard, and cannot be undone.')
await msg.add_reaction('❌')
await msg.add_reaction('✅')
# Check for response of cross/tick
def is_answer(reaction, user):
return not user.bot and str(reaction.emoji) in ['❌', '✅'] and user.id == ctx.author.id
react, user = await self.bot.wait_for('reaction_add', check=is_answer, timeout=120)
# Affirmative reaction, drop all data for that user
if react.emoji == '✅':
# Delete all records of user from leaderboard
query = '''DELETE FROM leaderboard
WHERE user_id = $1
'''
await self.bot.db.execute(query, user.id)
# Delete all records of user from answers
query = '''DELETE FROM answers
WHERE user_id = $1
'''
await self.bot.db.execute(query, user.id)
# Clear all records of user id from vote history, replaces with null
query = '''UPDATE vote_history
SET user_id = NULL
WHERE user_id = $1
'''
await self.bot.db.execute(query, user.id)
await msg.edit(content='All data relating to your account has been deleted.\nIf you wish to '
'participate in trivia without appearing in the leaderboards, use the command: '
'`ned privacy config`')
def setup(bot):
bot.add_cog(Stats(bot))
|
1678213
|
from django.contrib import admin
from .models import (IP)
# Register your models here.
admin.site.register(IP)
|
1678242
|
from vega.search_space.networks.pytorch.network import Network
from .backbones import *
from .heads import *
from .blocks import *
from .customs import *
from .super_network import *
from .esrbodys import *
from .detectors import *
from .roi_extractors import *
from .shared_heads import *
from .utils import *
from .necks import *
from .losses import *
from .jddbodys import *
from .cyclesrbodys import *
|
1678272
|
import networkx as nx
import matplotlib.pyplot as plt
from pyvis.network import Network
import pandas as pd
import streamlit as st
def got_func(physics):
got_net = Network(height="600px", width="100%", font_color="black",heading='Game of Thrones Graph')
# set the physics layout of the network
got_net.barnes_hut()
got_data = pd.read_csv("https://www.macalester.edu/~abeverid/data/stormofswords.csv")
#got_data = pd.read_csv("stormofswords.csv")
#got_data.rename(index={0: "Source", 1: "Target", 2: "Weight"})
sources = got_data['Source']
targets = got_data['Target']
weights = got_data['Weight']
edge_data = zip(sources, targets, weights)
for e in edge_data:
src = e[0]
dst = e[1]
w = e[2]
got_net.add_node(src, src, title=src)
got_net.add_node(dst, dst, title=dst)
got_net.add_edge(src, dst, value=w)
neighbor_map = got_net.get_adj_list()
# add neighbor data to node hover data
for node in got_net.nodes:
node["title"] += " Neighbors:<br>" + "<br>".join(neighbor_map[node["id"]])
node["value"] = len(neighbor_map[node["id"]])
if physics:
got_net.show_buttons(filter_=['physics'])
got_net.show("gameofthrones.html")
def simple_func(physics):
nx_graph = nx.cycle_graph(10)
nx_graph.nodes[1]['title'] = 'Number 1'
nx_graph.nodes[1]['group'] = 1
nx_graph.nodes[3]['title'] = 'I belong to a different group!'
nx_graph.nodes[3]['group'] = 10
nx_graph.add_node(20, size=20, title='couple', group=2)
nx_graph.add_node(21, size=15, title='couple', group=2)
nx_graph.add_edge(20, 21, weight=5)
nx_graph.add_node(25, size=25, label='lonely', title='lonely node', group=3)
nt = Network("500px", "500px",notebook=True,heading='')
nt.from_nx(nx_graph)
#physics=st.sidebar.checkbox('add physics interactivity?')
if physics:
nt.show_buttons(filter_=['physics'])
nt.show('test.html')
def karate_func(physics):
G = nx.karate_club_graph()
nt = Network("500px", "500px",notebook=True,heading='Zachary’s Karate Club graph')
nt.from_nx(G)
#physics=st.sidebar.checkbox('add physics interactivity?')
if physics:
nt.show_buttons(filter_=['physics'])
nt.show('karate.html')
|
1678284
|
import requests
from requests_oauthlib import OAuth1
consumer_key = '확인한 consumer_key'
consumer_secret = '확인한 onsumer_secret'
access_token = '확인한 access_token'
access_token_secret = '확인한 access_token_secret'
oauth = OAuth1(client_key=consumer_key, client_secret=consumer_secret,
resource_owner_key=access_token, resource_owner_secret=access_token_secret)
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name={0}'.format('naver_d2')
r = requests.get(url=url,auth=oauth)
statuses = r.json()
for status in statuses:
print (status['text'], status['created_at'])
|
1678297
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import logging
from indra.util import read_unicode_csv
logger = logging.getLogger(__name__)
from protmapper.uniprot_client import *
def _build_uniprot_subcell_loc():
fname = os.path.dirname(os.path.abspath(__file__)) +\
'/../resources/uniprot_subcell_loc.tsv'
csv_rows = read_unicode_csv(fname, delimiter='\t')
# Skip the header row
up_to_go = {}
for row in csv_rows:
upid = row[0]
goid = row[1]
up_to_go[upid] = goid
return up_to_go
uniprot_subcell_loc = _build_uniprot_subcell_loc()
|
1678311
|
import torch
import torch.nn as nn
from torch.nn import functional as F
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import count_num_param
from dassl.engine import TRAINER_REGISTRY, TrainerXU
from dassl.engine.trainer_tmp import SimpleNet
@TRAINER_REGISTRY.register()
class MCD(TrainerXU):
"""Maximum Classifier Discrepancy.
https://arxiv.org/abs/1712.02560.
"""
def __init__(self, cfg):
super().__init__(cfg)
self.n_step_F = cfg.TRAINER.MCD.N_STEP_F
def build_model(self):
cfg = self.cfg
print('Building F')
self.F = SimpleNet(cfg, cfg.MODEL, 0)
self.F.to(self.device)
print('# params: {:,}'.format(count_num_param(self.F)))
self.optim_F = build_optimizer(self.F, cfg.OPTIM)
self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)
self.register_model('F', self.F, self.optim_F, self.sched_F)
fdim = self.F.fdim
print('Building C1')
print("fdim : ",fdim)
print("num_classes : ",self.num_classes)
self.C1 = nn.Linear(fdim, self.num_classes)
self.C1.to(self.device)
print('# params: {:,}'.format(count_num_param(self.C1)))
self.optim_C1 = build_optimizer(self.C1, cfg.OPTIM)
self.sched_C1 = build_lr_scheduler(self.optim_C1, cfg.OPTIM)
self.register_model('C1', self.C1, self.optim_C1, self.sched_C1)
print('Building C2')
self.C2 = nn.Linear(fdim, self.num_classes)
self.C2.to(self.device)
print('# params: {:,}'.format(count_num_param(self.C2)))
self.optim_C2 = build_optimizer(self.C2, cfg.OPTIM)
self.sched_C2 = build_lr_scheduler(self.optim_C2, cfg.OPTIM)
self.register_model('C2', self.C2, self.optim_C2, self.sched_C2)
def forward_backward(self, batch_x, batch_u):
parsed = self.parse_batch_train(batch_x, batch_u)
input_x, label_x, input_u = parsed
# Step A
feat_x = self.F(input_x)
logit_x1 = self.C1(feat_x)
logit_x2 = self.C2(feat_x)
loss_x1 = F.cross_entropy(logit_x1, label_x)
loss_x2 = F.cross_entropy(logit_x2, label_x)
loss_step_A = loss_x1 + loss_x2
self.model_backward_and_update(loss_step_A)
# Step B
with torch.no_grad():
feat_x = self.F(input_x)
logit_x1 = self.C1(feat_x)
logit_x2 = self.C2(feat_x)
loss_x1 = F.cross_entropy(logit_x1, label_x)
loss_x2 = F.cross_entropy(logit_x2, label_x)
loss_x = loss_x1 + loss_x2
with torch.no_grad():
feat_u = self.F(input_u)
pred_u1 = F.softmax(self.C1(feat_u), 1)
pred_u2 = F.softmax(self.C2(feat_u), 1)
loss_dis = self.discrepancy(pred_u1, pred_u2)
loss_step_B = loss_x - loss_dis
self.model_backward_and_update(loss_step_B, ['C1', 'C2'])
# Step C
for _ in range(self.n_step_F):
feat_u = self.F(input_u)
pred_u1 = F.softmax(self.C1(feat_u), 1)
pred_u2 = F.softmax(self.C2(feat_u), 1)
loss_step_C = self.discrepancy(pred_u1, pred_u2)
self.model_backward_and_update(loss_step_C, 'F')
loss_summary = {
'loss_step_A': loss_step_A.item(),
'loss_step_B': loss_step_B.item(),
'loss_step_C': loss_step_C.item()
}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return loss_summary
def discrepancy(self, y1, y2):
return (y1 - y2).abs().mean()
def model_inference(self, input):
feat = self.F(input)
return self.C1(feat)
|
1678319
|
import requests
import os
from pathlib import Path
import pickle
from shutil import unpack_archive
urls = dict()
urls['ecg']=['http://www.cs.ucr.edu/~eamonn/discords/ECG_data.zip',
'http://www.cs.ucr.edu/~eamonn/discords/mitdbx_mitdbx_108.txt',
'http://www.cs.ucr.edu/~eamonn/discords/qtdbsele0606.txt',
'http://www.cs.ucr.edu/~eamonn/discords/chfdbchf15.txt',
'http://www.cs.ucr.edu/~eamonn/discords/qtdbsel102.txt']
urls['gesture']=['http://www.cs.ucr.edu/~eamonn/discords/ann_gun_CentroidA']
urls['space_shuttle']=['http://www.cs.ucr.edu/~eamonn/discords/TEK16.txt',
'http://www.cs.ucr.edu/~eamonn/discords/TEK17.txt',
'http://www.cs.ucr.edu/~eamonn/discords/TEK14.txt']
urls['respiration']=['http://www.cs.ucr.edu/~eamonn/discords/nprs44.txt',
'http://www.cs.ucr.edu/~eamonn/discords/nprs43.txt']
urls['power_demand']=['http://www.cs.ucr.edu/~eamonn/discords/power_data.txt']
for dataname in urls:
raw_dir = Path('dataset', dataname, 'raw')
raw_dir.mkdir(parents=True, exist_ok=True)
for url in urls[dataname]:
filename = raw_dir.joinpath(Path(url).name)
print('Downloading', url)
resp =requests.get(url)
filename.write_bytes(resp.content)
if filename.suffix=='':
filename.rename(filename.with_suffix('.txt'))
print('Saving to', filename.with_suffix('.txt'))
if filename.suffix=='.zip':
print('Extracting to', filename)
unpack_archive(str(filename), extract_dir=str(raw_dir))
for filepath in raw_dir.glob('*.txt'):
with open(str(filepath)) as f:
# Label anomaly points as 1 in the dataset
labeled_data=[]
for i, line in enumerate(f):
tokens = [float(token) for token in line.split()]
if raw_dir.parent.name== 'ecg':
# Remove time-step channel
tokens.pop(0)
if filepath.name == 'chfdbchf15.txt':
tokens.append(1.0) if 2250 < i < 2400 else tokens.append(0.0)
elif filepath.name == 'xmitdb_x108_0.txt':
tokens.append(1.0) if 4020 < i < 4400 else tokens.append(0.0)
elif filepath.name == 'mitdb__100_180.txt':
tokens.append(1.0) if 1800 < i < 1990 else tokens.append(0.0)
elif filepath.name == 'chfdb_chf01_275.txt':
tokens.append(1.0) if 2330 < i < 2600 else tokens.append(0.0)
elif filepath.name == 'ltstdb_20221_43.txt':
tokens.append(1.0) if 650 < i < 780 else tokens.append(0.0)
elif filepath.name == 'ltstdb_20321_240.txt':
tokens.append(1.0) if 710 < i < 850 else tokens.append(0.0)
elif filepath.name == 'chfdb_chf13_45590.txt':
tokens.append(1.0) if 2800 < i < 2960 else tokens.append(0.0)
elif filepath.name == 'stdb_308_0.txt':
tokens.append(1.0) if 2290 < i < 2550 else tokens.append(0.0)
elif filepath.name == 'qtdbsel102.txt':
tokens.append(1.0) if 4230 < i < 4430 else tokens.append(0.0)
elif filepath.name == 'ann_gun_CentroidA.txt':
tokens.append(1.0) if 2070 < i < 2810 else tokens.append(0.0)
elif filepath.name == 'TEK16.txt':
tokens.append(1.0) if 4270 < i < 4370 else tokens.append(0.0)
elif filepath.name == 'TEK17.txt':
tokens.append(1.0) if 2100 < i < 2145 else tokens.append(0.0)
elif filepath.name == 'TEK14.txt':
tokens.append(1.0) if 1100 < i < 1200 or 1455 < i < 1955 else tokens.append(0.0)
elif filepath.name == 'nprs44.txt':
tokens.append(1.0) if 16192 < i < 16638 or 20457 < i < 20911 else tokens.append(0.0)
elif filepath.name == 'nprs43.txt':
tokens.append(1.0) if 12929 < i < 13432 or 14877 < i < 15086 or 15729 < i < 15924 else tokens.append(0.0)
elif filepath.name == 'power_data.txt':
tokens.append(1.0) if 8254 < i < 8998 or 11348 < i < 12143 or 33883 < i < 34601 else tokens.append(0.0)
labeled_data.append(tokens)
# Fill in the point where there is no signal value.
if filepath.name == 'ann_gun_CentroidA.txt':
for i, datapoint in enumerate(labeled_data):
for j,channel in enumerate(datapoint[:-1]):
if channel == 0.0:
labeled_data[i][j] = 0.5 * labeled_data[i - 1][j] + 0.5 * labeled_data[i + 1][j]
# Save the labeled dataset as .pkl extension
labeled_whole_dir = raw_dir.parent.joinpath('labeled', 'whole')
labeled_whole_dir.mkdir(parents=True, exist_ok=True)
with open(str(labeled_whole_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data, pkl)
# Divide the labeled dataset into trainset and testset, then save them
labeled_train_dir = raw_dir.parent.joinpath('labeled','train')
labeled_train_dir.mkdir(parents=True,exist_ok=True)
labeled_test_dir = raw_dir.parent.joinpath('labeled','test')
labeled_test_dir.mkdir(parents=True,exist_ok=True)
if filepath.name == 'chfdb_chf13_45590.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[:2439], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[2439:3726], pkl)
elif filepath.name == 'chfdb_chf01_275.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[:1833], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[1833:3674], pkl)
elif filepath.name == 'chfdbchf15.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[3381:14244], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[33:3381], pkl)
elif filepath.name == 'qtdbsel102.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[10093:44828], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[211:10093], pkl)
elif filepath.name == 'mitdb__100_180.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[2328:5271], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[73:2328], pkl)
elif filepath.name == 'stdb_308_0.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[2986:5359], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[265:2986], pkl)
elif filepath.name == 'ltstdb_20321_240.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[1520:3531], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[73:1520], pkl)
elif filepath.name == 'xmitdb_x108_0.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[424:3576], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[3576:5332], pkl)
elif filepath.name == 'ltstdb_20221_43.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[1121:3731], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[0:1121], pkl)
elif filepath.name == 'ann_gun_CentroidA.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[3000:], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[:3000], pkl)
elif filepath.name == 'nprs44.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[363:12955], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[12955:24082], pkl)
elif filepath.name == 'nprs43.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[4285:10498], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[10498:17909], pkl)
elif filepath.name == 'power_data.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[15287:33432], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[501:15287], pkl)
elif filepath.name == 'TEK17.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[2469:4588], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[1543:2469], pkl)
elif filepath.name == 'TEK16.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[521:3588], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[3588:4539], pkl)
elif filepath.name == 'TEK14.txt':
with open(str(labeled_train_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[2089:4098], pkl)
with open(str(labeled_test_dir.joinpath(filepath.name).with_suffix('.pkl')), 'wb') as pkl:
pickle.dump(labeled_data[97:2089], pkl)
nyc_taxi_raw_path = Path('dataset/nyc_taxi/raw/nyc_taxi.csv')
labeled_data = []
with open(str(nyc_taxi_raw_path),'r') as f:
for i, line in enumerate(f):
tokens = [float(token) for token in line.strip().split(',')[1:]]
tokens.append(1) if 150 < i < 250 or \
5970 < i < 6050 or \
8500 < i < 8650 or \
8750 < i < 8890 or \
10000 < i < 10200 or \
14700 < i < 14800 \
else tokens.append(0)
labeled_data.append(tokens)
nyc_taxi_train_path = nyc_taxi_raw_path.parent.parent.joinpath('labeled','train',nyc_taxi_raw_path.name).with_suffix('.pkl')
nyc_taxi_train_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(nyc_taxi_train_path),'wb') as pkl:
pickle.dump(labeled_data[:13104], pkl)
nyc_taxi_test_path = nyc_taxi_raw_path.parent.parent.joinpath('labeled','test',nyc_taxi_raw_path.name).with_suffix('.pkl')
nyc_taxi_test_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(nyc_taxi_test_path),'wb') as pkl:
pickle.dump(labeled_data[13104:], pkl)
|
1678364
|
import requests
from scrapy.selector import Selector
import pymysql
import time
conn = pymysql.connect(host="127.0.0.1", user="feson", passwd="<PASSWORD>", db="Spider", charset="utf8")
cursor = conn.cursor()
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
}
class GetRandomIp(object):
def parse(self, next_url='/inha/1'):
"""
Parse Ip List From Site, Transfer to parse_detail
:param next_url:
:return: None
"""
print("Begin Parsing...")
response = requests.get(url='https://www.kuaidaili.com/free/intr'.format(next_url), headers=headers)
response = Selector(text=response.text)
tr_list = response.xpath('//*[@id="list"]/table/tbody/tr/td')
if tr_list:
self.parse_detail(tr_list)
for i in range(20):
time.sleep(5)
next_url = 'https://www.kuaidaili.com/free/intr/%d' % i
if next_url:
self.parse(next_url)
def parse_detail(self, tr_list):
"""
Parse Ip detail from list, transfer to insert into database
:param tr_list:
:return: None
"""
ip = tr_list.xpath('//td[@data-title="IP"]/text()').extract()
port = tr_list.xpath('//td[@data-title="PORT"]/text()').extract()
type = tr_list.xpath('//td[@data-title="类型"]/text()').extract()
speed = tr_list.xpath('//td[@data-title="响应速度"]/text()').extract()
for i in range(len(ip)):
self.insert_sql(ip[i], port[i], type[i])
def insert_sql(self, ip, port, type):
type = type.lower()
proxy_url = '{0}://{1}:{2}'.format(type, ip, port)
res = self.check_ip(type, proxy_url)
print(proxy_url)
if res:
cursor.execute(
"insert proxy_ip(ip, port, type) VALUES('{0}', '{1}', '{2}')".format(
ip, port, type
)
)
conn.commit()
def get_ip(self):
sql = "select * from proxy_ip ORDER BY RAND() LIMIT 1"
cursor.execute(sql)
ip, port, type = cursor.fetchone()
conn.commit()
type = type.lower()
proxy_url = '{0}://{1}:{2}'.format(type, ip, port)
res = self.check_ip(type, proxy_url)
if res:
return proxy_url
else:
self.delete_ip(ip)
return self.get_ip()
def check_ip(self, type, proxy_url):
request_url = 'http://hf.58.com/ershoufang/0'
try:
proxy = {type: proxy_url}
response = requests.get(url=request_url, proxies=proxy, timeout=5)
except Exception as e:
print(e)
return False
else:
code = response.status_code
if code == 200 or code == 302:
return True
else:
print('invalid ip and port')
return False
def delete_ip(self, ip):
sql = """delete from proxy_ip where ip='%s'""" % ip
cursor.execute(sql)
conn.commit()
ip = GetRandomIp()
if __name__ == '__main__':
ip = GetRandomIp()
ip.parse()
# print(ip.get_ip())
|
1678382
|
from polyphony import testbench
def f(l1:list, l2:list):
return l1[0] + l2[0]
def func11(a:list, b:list):
t1 = f(a, b)
t2 = f(b, a)
return t1 + t2
@testbench
def test():
a = [1]
b = [2]
assert 6 == func11(a, b)
test()
|
1678398
|
from django.contrib import admin
# Register your models here.
from .models import Company, Flight, Comment
admin.site.register(Company)
admin.site.register(Flight)
admin.site.register(Comment)
|
1678417
|
import numpy as np
import pytest
from mirdata.datasets import mtg_jamendo_autotagging_moodtheme
from tests.test_utils import run_track_tests
def test_track():
default_trackid = "track_0000948"
data_home = "tests/resources/mir_datasets/mtg_jamendo_autotagging_moodtheme"
dataset = mtg_jamendo_autotagging_moodtheme.Dataset(data_home)
track = dataset.track(default_trackid)
expected_attributes = {
"audio_path": "tests/resources/mir_datasets/mtg_jamendo_autotagging_moodtheme/audios/48/948.mp3",
"track_id": "track_0000948",
}
expected_property_types = {
"audio": tuple,
"artist_id": str,
"album_id": str,
"duration": float,
"tags": str,
}
run_track_tests(track, expected_attributes, expected_property_types)
audio, sr = track.audio
assert sr == 44100, "sample rate {} is not 44100".format(sr)
assert audio.shape == (2, 88200), "audio shape {} was not (2, 88200)".format(
audio.shape
)
def test_track_properties_and_attributes():
default_trackid = "track_0000948"
data_home = "tests/resources/mir_datasets/mtg_jamendo_autotagging_moodtheme"
dataset = mtg_jamendo_autotagging_moodtheme.Dataset(data_home)
track = dataset.track(default_trackid)
assert track.track_id == default_trackid
assert track.artist_id == "artist_000087"
assert track.album_id == "album_000149"
assert track.duration == 212.7
assert track.tags == "mood/theme---background"
def test_to_jams():
default_trackid = "track_0000948"
data_home = "tests/resources/mir_datasets/mtg_jamendo_autotagging_moodtheme"
dataset = mtg_jamendo_autotagging_moodtheme.Dataset(data_home)
track = dataset.track(default_trackid)
jam = track.to_jams()
assert jam["sandbox"].track_id == default_trackid
assert jam["sandbox"].artist_id == "artist_000087"
assert jam["sandbox"].album_id == "album_000149"
assert jam.file_metadata.duration == 212.7
assert jam["sandbox"].tags == "mood/theme---background"
def test_get_track_ids_for_split():
dataset = mtg_jamendo_autotagging_moodtheme.Dataset(
"tests/resources/mir_datasets/mtg_jamendo_autotagging_moodtheme"
)
assert len(dataset.get_track_ids_for_split(0)["train"]) == 1
assert len(dataset.get_track_ids_for_split(0)["validation"]) == 1
assert len(dataset.get_track_ids_for_split(0)["test"]) == 1
assert len(dataset.get_track_ids_for_split(1)["train"]) == 1
assert len(dataset.get_track_ids_for_split(1)["validation"]) == 1
assert len(dataset.get_track_ids_for_split(1)["test"]) == 1
assert len(dataset.get_track_ids_for_split(2)["train"]) == 1
assert len(dataset.get_track_ids_for_split(2)["validation"]) == 1
assert len(dataset.get_track_ids_for_split(2)["test"]) == 1
assert len(dataset.get_track_ids_for_split(3)["train"]) == 1
assert len(dataset.get_track_ids_for_split(3)["validation"]) == 1
assert len(dataset.get_track_ids_for_split(3)["test"]) == 1
assert len(dataset.get_track_ids_for_split(4)["train"]) == 1
assert len(dataset.get_track_ids_for_split(4)["validation"]) == 1
assert len(dataset.get_track_ids_for_split(4)["test"]) == 1
with pytest.raises(Exception):
dataset.get_track_ids_for_split(-1)
|
1678421
|
from app.models import *
from app.services.steps import Step_VR_7
def test_step_vr7_is_complete_false(app, db_session, client):
form_payload = {}
step = Step_VR_7(form_payload)
assert step.run() == False
assert step.is_complete == False
assert step.next_step == None
def test_step_vr7_is_complete_true(app, db_session, client):
form_payload = {
"affirmation": True
}
step = Step_VR_7(form_payload)
assert step.run() == True
assert step.is_complete == True
assert step.next_step == 'Step_VR_8'
|
1678444
|
import pytest
from django_test_migrations.exceptions import MigrationNotInPlan
from django_test_migrations.plan import truncate_plan
@pytest.mark.parametrize(('targets', 'index'), [
([], 9), # full plan for empty targets
([('app1', None)], 0),
([('app1', None), ('app3', None)], 7),
([('app2', '0002_second')], 6),
([('app1', '0002_second'), ('app2', None)], 2),
([('app1', '0003_third'), ('app2', None)], 4),
([('app1', '0003_third'), ('app1', '0005_fifth')], 7),
([('app1', '0003_third'), ('app2', None), ('app3', '0001_initial')], 8),
])
def test_truncate_plan(plan, targets, index):
"""Ensure plan is properly truncated for both types migrations names."""
assert truncate_plan(targets, plan) == plan[:index]
def test_empty_plan():
"""Ensure function work when plan is empty."""
assert not truncate_plan([('app1', '0001_initial')], [])
@pytest.mark.parametrize('targets', [
[('app4', None)],
[('app1', '0047_magic')],
[('app1', '0005_fifth'), ('app4', None)],
[('app1', '0005_fifth'), ('app4', '0047_magic'), ('app3', None)],
])
def test_migration_target_does_not_exist(plan, targets):
"""Ensure ``MigrationNotInPlan`` is raised when target not in plan."""
with pytest.raises(MigrationNotInPlan):
truncate_plan(targets, plan)
|
1678478
|
from rdr_service.api import check_ppi_data_api
from rdr_service.code_constants import FIRST_NAME_QUESTION_CODE
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.model.participant import Participant
from rdr_service.model.utils import to_client_participant_id
from tests.test_data import email_code, first_name_code
from tests.helpers.unittest_base import BaseTestCase
class CheckPpiDataApiTest(BaseTestCase):
def setUp(self):
super().setUp()
CodeDao().insert(email_code())
CodeDao().insert(first_name_code())
def test_empty_request(self):
response = self.send_post("CheckPpiData", {"ppi_data": {}})
self.assertEqual({"ppi_results": {}}, response)
def test_result_to_json(self):
result = check_ppi_data_api._ValidationResult()
result.add_error("ez")
result.add_error("ea")
result.tests_count += 11
self.assertEqual({"tests_count": 11, "errors_count": 2, "error_messages": ["ez", "ea"]}, result.to_json())
def test_validation_no_answer(self):
self.participant = Participant(participantId=123, biobankId=555)
ParticipantDao().insert(self.participant)
self.participant_id = to_client_participant_id(self.participant.participantId)
summary = ParticipantSummaryDao().insert(self.participant_summary(self.participant))
result = check_ppi_data_api._get_validation_result(summary.email, {FIRST_NAME_QUESTION_CODE: "NotAnswered"})
self.assertEqual(1, result.tests_count)
self.assertEqual(1, result.errors_count)
self.assertEqual(1, len(result.messages))
self.assertIn(FIRST_NAME_QUESTION_CODE, result.messages[0])
# test using phone number as lookup value in API.
summary.loginPhoneNumber = "5555555555"
ParticipantSummaryDao().update(summary)
result = check_ppi_data_api._get_validation_result(
summary.loginPhoneNumber, {FIRST_NAME_QUESTION_CODE: "NotAnswered"}
)
self.assertEqual(1, result.tests_count)
self.assertEqual(1, result.errors_count)
self.assertEqual(1, len(result.messages))
self.assertIn(FIRST_NAME_QUESTION_CODE, result.messages[0])
|
1678479
|
import tensorflow as tf
class Resnet_152_feature(tf.keras.Model):
def __init__(self, class_resnet_152):
super(Resnet_152_feature, self).__init__(name='Resnet_152_feature')
self.resnet_152 = class_resnet_152
self.resnet_152_preprocess = tf.keras.applications.resnet.preprocess_input
def call(self, x):
x = ((x + 1) / 2) * 255.0
x_resnet_152 = self.resnet_152(self.resnet_152_preprocess(x))
return x_resnet_152
class Resnet_152_class(tf.keras.Model):
def __init__(self, trainable=False):
super(Resnet_152_class, self).__init__(name='Resnet_152_class')
resnet_152_features = tf.keras.applications.resnet.ResNet152(weights='imagenet', include_top=False, pooling='avg')
if trainable is False:
resnet_152_features.trainable = False
self.last_feature = tf.keras.Model(inputs=resnet_152_features.input, outputs=resnet_152_features.output)
def call(self, x):
return self.last_feature(x)
|
1678491
|
from pydantic import BaseModel
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Documentation, PortDoc, Form, FormGroup, \
FormField, FormComponent
from tracardi.service.plugin.runner import ActionRunner
from tracardi.service.plugin.domain.result import Result
from tracardi.service.notation.dot_template import DotTemplate
class Configuration(BaseModel):
type: str
message: str
def validate(config: dict):
return Configuration(**config)
class LogAction(ActionRunner):
def __init__(self, **kwargs):
self.config = validate(kwargs)
async def run(self, payload):
dot = self._get_dot_accessor(payload)
template = DotTemplate()
self.config.message = template.render(self.config.message, dot)
if self.config.type == 'warning':
self.console.warning(self.config.message)
elif self.config.type == 'error':
self.console.error(self.config.message)
elif self.config.type == 'info':
self.console.log(self.config.message)
return Result(port="payload", value=payload)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='LogAction',
inputs=["payload"],
outputs=['payload'],
version='0.6.1',
license="MIT",
author="<NAME>",
init={
"type": "warning",
"message": "<log-message>"
},
manual="log_message_action",
form=Form(
groups=[FormGroup(name="Log message plugin", fields=[
FormField(
id="type",
name="Message type",
description="Select type of the message that you want to log.",
component=FormComponent(type="select", props={"items": {
"warning": "Warning",
"error": "Error",
"info": "Info"
}, "initValue": "warning"})
),
FormField(
id="message",
name="Message",
description="Provide a message that you want to log. You can use dot template here.",
component=FormComponent(type="textarea", props={"label": "Message"})
)
])]
)
),
metadata=MetaData(
name='Log message',
desc='Logs message to flow log.',
icon='error',
group=["Error reporting"],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes payload object.")
},
outputs={
"payload": PortDoc(desc="This port return input payload.")
}
)
)
)
|
1678498
|
dict={}
dict_all = {}
list = []
with open('rm_overlap_des-jan25.txt','r') as f:
lines = f.readlines()
for i in xrange(len(lines)):
try:
mid = lines[i].split('\t')[0]
if dict.has_key(mid):
# print 'Overlap!'
pass
else:
dict[mid] = i
list.append(i)
except:
print i
print 'Format error'
for x in dict:
if len(x)>10:
print x
# with open('rm_overlap_des-jan25.txt','a') as wrt:
# for j in list:
# wrt.write(lines[j])
count = 0
with open('/Users/jcxu/Desktop/Relation_Extraction/data/FB15k/entity2id.txt','r') as rd:
ls = rd.readlines()
for ll in ls:
id ,idx = ll.split('\t')
dict_all[id] = idx
if dict.has_key(id) is False:
print 'www.freebase.com%s'%(id)
else:
count+=1
print count
|
1678501
|
import os
from .common_config import ffmpeg_bin_dir
# 为 .ts 文件的(1.ts,2.ts,3.ts,...,101.ts,...)这样的文件列表进行从小到大排序
def bubbleSortTsFile(arr):
n = len(arr)
# 遍历所有数组元素
for i in range(n):
# Last i elements are already in place
for j in range(0, n-i-1):
if int(str(arr[j]).replace('.ts','')) > int(str(arr[j+1]).replace('.ts','')) :
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
filelist_name = 'filelist.txt'
def write_tslist_into_txt(filelist_path,ts_file_list):
if os.path.exists(filelist_path):
os.remove(filelist_path)
str_content = ''
ts_end_idx = len(ts_file_list) - 1
f_path = filelist_path
for ts_idx,ts_path in enumerate(ts_file_list):
rn_str = ''
if ts_idx < ts_end_idx:
rn_str = '\n'
# 写入文本
str_content = f"file '{ts_path}'"
str_content = str_content.replace('/','\\')
str_content = f'{str_content}{rn_str}'
fp = open(f_path,"a",encoding="utf-8")
fp.write('{0}'.format(str_content))
fp.close()
def combine_ts_by_ffmpeg(tsvideoRoot,video_name_dir,ts_file_list, saveFileDir, saveFilePath, log_print):
# tsvideoRoot = tsvideoRoot.replace('/','\\')
# tsFileDir = tsFileDir.replace('/','\\')
# saveFilePath = saveFilePath.replace('/','\\')
# 合并的所有 .ts 文件时存储组合列表的 txt 文件
filelist_path = '{0}/{1}__{2}'.format(saveFileDir,video_name_dir,filelist_name)
if os.path.exists(filelist_path):
os.remove(filelist_path)
# drive 参数是设定的输出的磁盘
drive_name = tsvideoRoot[0] # 输出的盘符就是当前工程所在的盘符
# 删除旧文件(避免 ffmpeg 发现已存在相同文件时会在终端发出询问请求是否覆盖)
if not os.path.exists(saveFileDir):
os.makedirs(saveFileDir)
if os.path.exists(saveFilePath):
os.remove(saveFilePath)
# # 利用 windows 的 cmd 指令完成合并
# ts_dir_file_list = os.listdir(tsFileDir)
# ts_file_list = [i for i in ts_dir_file_list if sourceFormat in i]
# ts_file_list = bubbleSortTsFile(ts_file_list)
log_print('准备合并的文件列表----------------------------')
log_print(f'ts_file_list: \n{ts_file_list}')
# 把要合并的所有 .ts 文件都预先排列好的写入到一个 txt 文件中
# filelist_path = '{0}/{1}__{2}'.format(saveFileDir,video_name_dir,filelist_name)
log_print('把要合并的所有 .ts 文件都预先排列好的写入到一个 txt 文件中----------------------------')
log_print(filelist_path)
write_tslist_into_txt(filelist_path,ts_file_list)
# 然后通过 ffmpeg 指令合并文件
tsvideoDirCmdStr = drive_name + ": && cd " + tsvideoRoot
cmdStr = f'{tsvideoDirCmdStr} && {ffmpeg_bin_dir}/ffmpeg -f concat -safe 0 -i {filelist_path} -c copy {saveFilePath}'
log_print("CMD合成指令:{}".format(cmdStr))
os.system(cmdStr)
log_print("{}视频合成完成".format(saveFilePath))
# 移除合并文件时,临时使用的 filelist.txt 文件
os.remove(filelist_path)
|
1678504
|
import os
import xml.etree.ElementTree as ET
import numpy as np
import cv2
import sys
from tqdm import tqdm
from albumentations import *
from multiprocessing import Process
classes = ["0","1","0head","1head"]
imageFolder = "images"
annotationFolder = "annotations"
if len(sys.argv) < 2:
quit()
wd = sys.argv[1]
print(wd)
path_images = "{}/{}".format(wd, imageFolder)
path_annotations = "{}/{}".format(wd, annotationFolder)
files = [f for f in os.listdir(path_annotations) if os.path.isfile(os.path.join(path_annotations, f))]
pbar = tqdm(total=len(files))
def get_aug(aug, min_area=0., min_visibility=0.):
return Compose(aug, bbox_params={'format': 'pascal_voc', 'min_area': min_area, 'min_visibility': min_visibility, 'label_fields': ['category_id']})
def write_annotation(path_annotation, annotations):
height, width = annotations['image'].shape[:2]
#print("{}".format(path_annotation))
annotation_file = open(path_annotation, "w")
annotation_file.write("<annotation>\n")
annotation_file.write("<width>{}</width>\n".format(width))
annotation_file.write("<height>{}</height>\n".format(height))
for idx, bbox in enumerate(annotations['bboxes']):
annotation_file.write("<object>\n")
annotation_file.write("\t<objName>{}</objName>\n".format(annotations['category_id'][idx]))
annotation_file.write("\t<xmin>{}</xmin>\n".format(int(bbox[0])))
annotation_file.write("\t<xmax>{}</xmax>\n".format(int(bbox[2])))
annotation_file.write("\t<ymin>{}</ymin>\n".format(int(bbox[1])))
annotation_file.write("\t<ymax>{}</ymax>\n".format(int(bbox[3])))
annotation_file.write("</object>\n")
#print("{} {} {} {} {}".format(bbox[0], bbox[1], bbox[2], bbox[3], annotations['category_id'][idx]))
annotation_file.write("</annotation>\n")
annotation_file.close()
def process_images(annotations):
aug = get_aug([
OneOf([
RandomBrightness(),
RandomContrast(),
RandomGamma()
],p=0.8),
RGBShift(p=0.1),
HueSaturationValue(p=0.1),
OneOf([
Blur(),
MedianBlur(),
MotionBlur(),
GaussNoise()
],p=0.2),
OneOf([
CLAHE(),
IAAAdditiveGaussianNoise(),
IAASharpen(),
IAAEmboss()
],p=0.15),
#Normalize(p=0.1),
#RandomRotate90(p=0.1),
#JpegCompression(p=0.1, quality_lower=45),
#OneOf([
# RandomCrop(height=900,width=1600),
# RandomCrop(height=800,width=800),
# RandomSizedCrop(min_max_height=[100,500],height=800,width=1000),
# ShiftScaleRotate(p=0.1, scale_limit=0.1),
# Cutout(num_holes=512, max_h_size=16, max_w_size=16),
# RandomCrop(height=1000, width=600)
#],p=0.5)
],min_visibility=0.33)
for filu in annotations:
path_image = "{}/{}".format(path_images, filu)
path_annotation = "{}/{}".format(path_annotations, filu)
path_image = path_image[:-3]
path_image += "jpg"
annotation_file = open(path_annotation)
image = cv2.imread(path_image)
tree = ET.parse(annotation_file)
root = tree.getroot()
bboxes = []
cat_id = []
for obj in root.iter('object'):
clas = obj.find('objName').text
xmin = int(obj.find('xmin').text)
xmax = int(obj.find('xmax').text)
ymin = int(obj.find('ymin').text)
ymax = int(obj.find('ymax').text)
bboxes.append([xmin, ymin, xmax, ymax])
cat_id.append(classes.index(clas))
annotations = {'image': image, 'bboxes': bboxes, 'category_id': cat_id}
category_id_to_name = {0: "0", 1: "1"}
try:
augmented = aug(**annotations)
except:
#print("Some error")
continue
if not augmented['bboxes']:
continue
annotation_file.close()
cv2.imwrite(path_image, augmented['image'])
write_annotation(path_annotation, augmented)
chunks = [files[x:x+100] for x in range(0, len(files), 100)]
thread_count = 16
while chunks:
threads = []
for i in range(0,thread_count):
if not chunks:
break
chunk = chunks.pop()
p = Process(target=process_images, args=([chunk]))
p.start()
threads.append(p)
#process_images(aug, chunk)
for thread in threads:
thread.join()
pbar.update(100)
pbar.close()
|
1678564
|
import pytest
from iocage_lib.ioc_common import validate_plugin_manifest
VALID_MANIFEST = {
"name": "test_plugin",
"release": "12.2-RELEASE",
"pkgs": [],
"packagesite": "http://pkg.FreeBSD.org/${ABI}/latest",
"fingerprints": {
"iocage-plugins": [
{
"function": "sha256",
"fingerprint": "b0170035af3acc5f3f3ae1859dc717101b4e6c1d0a794ad554928ca0cbb2f438"
}
]
},
"artifact": "TEST_ARTIFACT",
}
def test_validate_plugin_manifest():
validate_plugin_manifest(VALID_MANIFEST, None, None)
@pytest.mark.parametrize(
"missing_field",
["name", "release", "pkgs", "packagesite", "fingerprints", "artifact"]
)
def test_missing_required_fields(missing_field):
manifest = VALID_MANIFEST.copy()
del manifest[missing_field]
exp_msg = f"'{missing_field}' is a required property"
with pytest.raises(RuntimeError, match=exp_msg):
validate_plugin_manifest(manifest, None, None)
def test_missing_multiple_required_fields():
manifest = VALID_MANIFEST.copy()
del manifest["name"]
del manifest["packagesite"]
exp_msg = "'name' is a required property\n"
exp_msg += "'packagesite' is a required property"
with pytest.raises(RuntimeError, match=exp_msg):
validate_plugin_manifest(manifest, None, None)
def test_devfs_ruleset_not_dict():
manifest = VALID_MANIFEST.copy()
manifest["devfs_ruleset"] = "INVALID_TYPE"
exp_msg = "'INVALID_TYPE' is not of type 'object'"
with pytest.raises(RuntimeError, match=exp_msg):
validate_plugin_manifest(manifest, None, None)
def test_devfs_ruleset_missing_paths():
manifest = VALID_MANIFEST.copy()
manifest["devfs_ruleset"] = {}
exp_msg = "'paths' is a required property"
with pytest.raises(RuntimeError, match=exp_msg):
validate_plugin_manifest(manifest, None, None)
def test_devfs_ruleset_invalid_paths_type():
manifest = VALID_MANIFEST.copy()
manifest["devfs_ruleset"] = {
"paths": "INVALID_TYPE",
}
exp_msg = "'INVALID_TYPE' is not of type 'object'"
with pytest.raises(RuntimeError, match=exp_msg):
validate_plugin_manifest(manifest, None, None)
def test_devfs_ruleset_invalid_includes_type():
manifest = VALID_MANIFEST.copy()
manifest["devfs_ruleset"] = {
"paths": {},
"includes": "INVALID_TYPE"
}
exp_msg = "'INVALID_TYPE' is not of type 'array'"
with pytest.raises(RuntimeError, match=exp_msg):
validate_plugin_manifest(manifest, None, None)
def test_valid_devfs_ruleset():
manifest = VALID_MANIFEST.copy()
manifest["devfs_ruleset"] = {
"paths": {},
"includes": []
}
validate_plugin_manifest(manifest, None, None)
|
1678616
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.performance_curves import CurveFanPressureRise
log = logging.getLogger(__name__)
class TestCurveFanPressureRise(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_curvefanpressurerise(self):
pyidf.validation_level = ValidationLevel.error
obj = CurveFanPressureRise()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_coefficient1_c1 = 2.2
obj.coefficient1_c1 = var_coefficient1_c1
# real
var_coefficient2_c2 = 3.3
obj.coefficient2_c2 = var_coefficient2_c2
# real
var_coefficient3_c3 = 4.4
obj.coefficient3_c3 = var_coefficient3_c3
# real
var_coefficient4_c4 = 5.5
obj.coefficient4_c4 = var_coefficient4_c4
# real
var_minimum_value_of_qfan = 6.6
obj.minimum_value_of_qfan = var_minimum_value_of_qfan
# real
var_maximum_value_of_qfan = 7.7
obj.maximum_value_of_qfan = var_maximum_value_of_qfan
# real
var_minimum_value_of_psm = 8.8
obj.minimum_value_of_psm = var_minimum_value_of_psm
# real
var_maximum_value_of_psm = 9.9
obj.maximum_value_of_psm = var_maximum_value_of_psm
# real
var_minimum_curve_output = 10.1
obj.minimum_curve_output = var_minimum_curve_output
# real
var_maximum_curve_output = 11.11
obj.maximum_curve_output = var_maximum_curve_output
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.curvefanpressurerises[0].name, var_name)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].coefficient1_c1, var_coefficient1_c1)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].coefficient2_c2, var_coefficient2_c2)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].coefficient3_c3, var_coefficient3_c3)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].coefficient4_c4, var_coefficient4_c4)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].minimum_value_of_qfan, var_minimum_value_of_qfan)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].maximum_value_of_qfan, var_maximum_value_of_qfan)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].minimum_value_of_psm, var_minimum_value_of_psm)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].maximum_value_of_psm, var_maximum_value_of_psm)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].minimum_curve_output, var_minimum_curve_output)
self.assertAlmostEqual(idf2.curvefanpressurerises[0].maximum_curve_output, var_maximum_curve_output)
|
1678621
|
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.pardir, os.pardir)))
from TaskKit.Scheduler import Scheduler
class TaskKitTest(unittest.TestCase):
def setUp(self):
self._scheduler = Scheduler()
def checkBasics(self):
sched = self._scheduler
sched.start()
def tearDown(self):
self._scheduler.stop()
self._scheduler = None
def makeTestSuite():
suite1 = unittest.makeSuite(TaskKitTest, 'check')
return unittest.TestSuite((suite1,))
if __name__ == '__main__':
runner = unittest.TextTestRunner(stream=sys.stdout)
unittest.main(defaultTest='makeTestSuite', testRunner=runner)
|
1678623
|
import pytest
@pytest.fixture
def default_internal_request():
return dict(
concepto_pago='PRUEBA',
institucion_ordenante='646',
cuenta_beneficiario='072691004495711499',
institucion_beneficiaria='072',
monto=1020,
nombre_beneficiario='<NAME>',
nombre_ordenante='BANCO',
cuenta_ordenante='646180157000000004',
rfc_curp_ordenante='ND',
version=2,
)
@pytest.fixture(scope='module')
def vcr_config():
config = dict()
config['filter_headers'] = [
('Authorization', 'DUMMY'),
('access-token', 'DUMMY'),
]
return config
|
1678635
|
lsd_and_math.loc[7] = [6, 70]
x, y = lsd_and_math.T.values
b0, b1 = fmin(sum_of_squares, [0,1], args=(x,y))
b0_abs, b1_abs = fmin(sum_of_absval, [0,0], args=(x,y))
print('\nintercept: {0:.2}, slope: {1:.2}'.format(b0,b1))
ax = lsd_and_math.plot(x='Drugs', y='Score', style='ro', legend=False, xlim=(0,8))
ax.plot([0,10], [b0, b0+b1*10], 'k:')
ax.plot([0,10], [b0_abs, b0_abs+b1_abs*10]);
|
1678674
|
import numpy as np
def get_src_indices_by_row(row_idxs, shape, flat=True):
"""
Provide the src_indices when connecting a vectorized variable from an output to an input.
Indices are selected by choosing the first indices to be passed, corresponding to node
index in Dymos.
Parameters
----------
row_idxs : array_like
The rows/node indices to be connected from the source to the target.
shape : tuple
The shape of the variable at each node (ignores the first dimension).
flat : bool
If True, return the source indices in flat source indices form.
Returns
-------
array_like
If flat, a numpy array of shape `(row_idxs,) + shape` where each element is the index
of the source of that element in the source array, in C-order.
"""
if not flat:
raise NotImplementedError('Currently get_src_indices_by_row only returns '
'flat source indices.')
num_src_rows = np.max(row_idxs) + 1
src_shape = (num_src_rows,) + shape
other_idxs = [np.arange(n, dtype=int) for n in shape]
ixgrid = np.ix_(row_idxs, *other_idxs)
a = np.reshape(np.arange(np.prod(src_shape), dtype=int), newshape=src_shape)
return a[ixgrid]
|
1678691
|
from .common import *
def process_get_test(options):
if not load_session_with_options(options):
fatal('No session known. Use init first.')
for i in options.numbers:
global_vars.problem.download_test(i)
save_session()
def process_get_all_tests(options):
if not load_session_with_options(options):
fatal('No session known. Use init first.')
global_vars.problem.download_all_tests()
save_session()
def add_parser(subparsers):
parser_get_test = subparsers.add_parser(
'gettest',
help="Downloads test with given numbers"
)
parser_get_test.add_argument('numbers', nargs='+', help='Tests to download')
parser_get_test.set_defaults(func=process_get_test)
parser_get_all_tests = subparsers.add_parser(
'getalltests',
help="Downloads alls tests"
)
parser_get_all_tests.set_defaults(func=process_get_all_tests)
|
1678715
|
import argparse
import os
import sys
# for linux env.
sys.path.insert(0,'..')
from distutils.util import strtobool
import pickle
import torch
import numpy as np
from data.data_loader import NumpyTupleDataset
import pandas as pd
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Draw
import torch
import torch.nn as nn
import torch.nn.functional as F
from data import transform_qm9, transform_zinc250k
from data.transform_zinc250k import zinc250_atomic_num_list, transform_fn_zinc250k
# from mflow.generate import generate_mols_along_axis
from mflow.models.hyperparams import Hyperparameters
from mflow.models.utils import check_validity, construct_mol, adj_to_smiles
from mflow.utils.model_utils import load_model, get_latent_vec, smiles_to_adj
from mflow.utils.molecular_metrics import MolecularMetrics
from mflow.models.model import MoFlow, rescale_adj
from mflow.utils.timereport import TimeReport
import mflow.utils.environment as env
from sklearn.linear_model import LinearRegression
import time
import functools
print = functools.partial(print, flush=True)
class MoFlowProp(nn.Module):
def __init__(self, model:MoFlow, hidden_size):
super(MoFlowProp, self).__init__()
self.model = model
self.latent_size = model.b_size + model.a_size
self.hidden_size = hidden_size
vh = (self.latent_size,) + tuple(hidden_size) + (1,)
modules = []
for i in range(len(vh)-1):
modules.append(nn.Linear(vh[i], vh[i+1]))
if i < len(vh) - 2:
modules.append(nn.Tanh())
# modules.append(nn.ReLU())
self.propNN = nn.Sequential(*modules)
def encode(self, adj, x):
with torch.no_grad():
self.model.eval()
adj_normalized = rescale_adj(adj).to(adj)
z, sum_log_det_jacs = self.model(adj, x, adj_normalized) # z = [h, adj_h]
h = torch.cat([z[0].reshape(z[0].shape[0], -1), z[1].reshape(z[1].shape[0], -1)], dim=1)
return h, sum_log_det_jacs
def reverse(self, z):
with torch.no_grad():
self.model.eval()
adj, x = self.model.reverse(z, true_adj=None)
return adj, x
def forward(self, adj, x):
h, sum_log_det_jacs = self.encode(adj, x)
output = self.propNN(h) # do I need to add nll of the unsupervised part? or just keep few epoch? see the results
return output, h, sum_log_det_jacs
def fit_model(model, atomic_num_list, data, data_prop, device, property_name='qed',
max_epochs=10, learning_rate=1e-3, weight_decay=1e-5):
start = time.time()
print("Start at Time: {}".format(time.ctime()))
model = model.to(device)
model.train()
# Loss and optimizer
metrics = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
N = len(data.dataset)
assert len(data_prop) == N
iter_per_epoch = len(data)
log_step = 20
# batch_size = data.batch_size
tr = TimeReport(total_iter = max_epochs * iter_per_epoch)
if property_name == 'qed':
col = 0 # [0,1]
elif property_name == 'plogp':
col = 1 # unbounded, normalized later???
else:
raise ValueError("Wrong property_name{}".format(property_name))
for epoch in range(max_epochs):
print("In epoch {}, Time: {}".format(epoch + 1, time.ctime()))
for i, batch in enumerate(data):
x = batch[0].to(device) # (bs,9,5)
adj = batch[1].to(device) # (bs,4,9, 9)
bs = x.shape[0]
ps = i * bs
pe = min((i+1)*bs, N)
true_y = [[tt[col]] for tt in data_prop[ps:pe]] #[[propf(mol)] for mol in true_mols]
true_y = torch.tensor(true_y).float().cuda()
# model and loss
optimizer.zero_grad()
y, z, sum_log_det_jacs = model(adj, x)
loss = metrics(y, true_y)
loss.backward()
optimizer.step()
tr.update()
# Print log info
if (i + 1) % log_step == 0: # i % args.log_step == 0:
print('Epoch [{}/{}], Iter [{}/{}], loss: {:.5f}, {:.2f} sec/iter, {:.2f} iters/sec: '.
format(epoch + 1, args.max_epochs, i + 1, iter_per_epoch,
loss.item(),
tr.get_avg_time_per_iter(), tr.get_avg_iter_per_sec()))
tr.print_summary()
# How to validate??? set aside validation data and cal and print the loss
tr.print_summary()
tr.end()
print("[fit_model Ends], Start at {}, End at {}, Total {}".
format(time.ctime(start), time.ctime(), time.time()-start))
return model
def optimize_mol(model:MoFlow, property_model:MoFlowProp, smiles, device, sim_cutoff, lr=2.0, num_iter=20,
data_name='qm9', atomic_num_list=[6, 7, 8, 9, 0], property_name='qed', debug=True, random=False):
if property_name == 'qed':
propf = env.qed # [0,1]
elif property_name == 'plogp':
propf = env.penalized_logp # unbounded, normalized later???
else:
raise ValueError("Wrong property_name{}".format(property_name))
model.eval()
property_model.eval()
with torch.no_grad():
bond, atoms = smiles_to_adj(smiles, data_name)
bond = bond.to(device)
atoms = atoms.to(device)
mol_vec, sum_log_det_jacs = property_model.encode(bond, atoms)
if debug:
adj_rev, x_rev = property_model.reverse(mol_vec)
reverse_smiles = adj_to_smiles(adj_rev.cpu(), x_rev.cpu(), atomic_num_list)
print(smiles, reverse_smiles)
adj_normalized = rescale_adj(bond).to(device)
z, sum_log_det_jacs = model(bond, atoms, adj_normalized)
z0 = z[0].reshape(z[0].shape[0], -1)
z1 = z[1].reshape(z[1].shape[0], -1)
adj_rev, x_rev = model.reverse(torch.cat([z0, z1], dim=1))
# val_res = check_validity(adj_rev, x_rev, atomic_num_list)
reverse_smiles2 = adj_to_smiles(adj_rev.cpu(), x_rev.cpu(), atomic_num_list)
train_smiles2 = adj_to_smiles(bond.cpu(), atoms.cpu(), atomic_num_list)
print(train_smiles2, reverse_smiles2)
mol = Chem.MolFromSmiles(smiles)
fp1 = AllChem.GetMorganFingerprint(mol, 2)
start = (smiles, propf(mol), None) # , mol)
cur_vec = mol_vec.clone().detach().requires_grad_(True).to(device) # torch.tensor(mol_vec, requires_grad=True).to(mol_vec)
start_vec = mol_vec.clone().detach().requires_grad_(True).to(device)
visited = []
for step in range(num_iter):
prop_val = property_model.propNN(cur_vec).squeeze()
grad = torch.autograd.grad(prop_val, cur_vec)[0]
# cur_vec = cur_vec.data + lr * grad.data
if random:
rad = torch.randn_like(cur_vec.data)
cur_vec = start_vec.data + lr * rad / torch.sqrt(rad * rad)
else:
cur_vec = cur_vec.data + lr * grad.data / torch.sqrt(grad.data * grad.data)
cur_vec = cur_vec.clone().detach().requires_grad_(True).to(device) # torch.tensor(cur_vec, requires_grad=True).to(mol_vec)
visited.append(cur_vec)
hidden_z = torch.cat(visited, dim=0).to(device)
adj, x = property_model.reverse(hidden_z)
val_res = check_validity(adj, x, atomic_num_list, debug=debug)
valid_mols = val_res['valid_mols']
valid_smiles = val_res['valid_smiles']
results = []
sm_set = set()
sm_set.add(smiles)
for m, s in zip(valid_mols, valid_smiles):
if s in sm_set:
continue
sm_set.add(s)
p = propf(m)
fp2 = AllChem.GetMorganFingerprint(m, 2)
sim = DataStructs.TanimotoSimilarity(fp1, fp2)
if sim >= sim_cutoff:
results.append((s, p, sim, smiles))
# smile, property, similarity, mol
results.sort(key=lambda tup: tup[1], reverse=True)
return results, start
def smile_cvs_to_property(data_name='zinc250k'):
if data_name == 'qm9':
# Total: 133885 Invalid: 0 bad_plogp: 0 bad_qed: 0
atomic_num_list = [6, 7, 8, 9, 0]
filename = '../data/qm9.csv'
colname = 'SMILES1'
elif data_name == 'zinc250k':
# Total: 249455 Invalid: 0 bad_plogp: 0 bad_qed: 0
atomic_num_list = zinc250_atomic_num_list
filename = '../data/zinc250k.csv'
colname = 'smiles'
df = pd.read_csv(filename)
smiles = df[colname].tolist()
n = len(smiles)
# for index, row in df.iterrows():
# sm = row[colname]
# smiles.append(sm)
f = open(data_name+'_property.csv', "w")
f.write('qed,plogp,smile\n')
results = []
total = 0
bad_qed = 0
bad_plogp = 0
invalid = 0
for i, smile in enumerate(smiles):
if i % 10000 == 0:
print('In {}/{} line'.format(i, n))
total += 1
mol = Chem.MolFromSmiles(smile)
smile2 = Chem.MolToSmiles(mol, isomericSmiles=True)
if mol == None:
print(i, smile)
invalid += 1
qed = -1
plogp = -999
smile2 = 'N/A'
results.append((qed, plogp, smile, smile2))
f.write('{},{},{}\n'.format(qed, plogp, smile))
continue
try:
qed = env.qed(mol)
except ValueError as e:
bad_qed += 1
qed = -1
print(i + 1, Chem.MolToSmiles(mol, isomericSmiles=True), ' error in qed')
try:
plogp = env.penalized_logp(mol)
except RuntimeError as e:
bad_plogp += 1
plogp = -999
print(i + 1, Chem.MolToSmiles(mol, isomericSmiles=True), ' error in penalized_log')
results.append((qed, plogp, smile, smile2))
f.write('{},{},{}\n'.format(qed, plogp, smile))
f.flush()
f.close()
results.sort(key=lambda tup: tup[0], reverse=True)
f = open(data_name+'_property_sorted_qed.csv', "w") #
f.write('qed,plogp,smile\n')
for r in results:
qed, plogp, smile, smile2 = r
f.write('{},{},{}\n'.format(qed, plogp, smile))
f.flush()
f.close()
results.sort(key=lambda tup: tup[1], reverse=True)
f = open(data_name+'_property_sorted_plogp.csv', "w") #
f.write('qed,plogp,smile\n')
for r in results:
qed, plogp, smile, smile2 = r
f.write('{},{},{}\n'.format(qed, plogp, smile))
f.flush()
f.close()
print('Dump done!')
print('Total: {}\t Invalid: {}\t bad_plogp: {} \t bad_qed: {}\n'.format(total, invalid, bad_plogp, bad_qed))
def load_property_csv(data_name, normalize=True):
"""
We use qed and plogp in zinc250k_property.csv which are recalculated by rdkit
the recalculated qed results are in tiny inconsistent with qed in zinc250k.csv
e.g
zinc250k_property.csv:
qed,plogp,smile
0.7319008436872337,3.1399057164163766,CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1
0.9411116113894995,0.17238635659148804,C[C@@H]1CC(Nc2cncc(-c3nncn3C)c2)C[C@@H](C)C1
import rdkit
m = rdkit.Chem.MolFromSmiles('CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1')
rdkit.Chem.QED.qed(m): 0.7319008436872337
from mflow.utils.environment import penalized_logp
penalized_logp(m): 3.1399057164163766
However, in oringinal:
zinc250k.csv
,smiles,logP,qed,SAS
0,CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1,5.0506,0.702012232801,2.0840945720726807
1,C[C@@H]1CC(Nc2cncc(-c3nncn3C)c2)C[C@@H](C)C1,3.1137,0.928975488089,3.4320038192747795
0.7319008436872337 v.s. 0.702012232801
and no plogp in zinc250k.csv dataset!
"""
if data_name == 'qm9':
# Total: 133885 Invalid: 0 bad_plogp: 0 bad_qed: 0
filename = '../data/qm9_property.csv'
elif data_name == 'zinc250k':
# Total: 249455 Invalid: 0 bad_plogp: 0 bad_qed: 0
filename = '../data/zinc250k_property.csv'
df = pd.read_csv(filename) # qed, plogp, smile
if normalize:
# plogp: # [-62.52, 4.52]
m = df['plogp'].mean() # 0.00026
std = df['plogp'].std() # 2.05
mn = df['plogp'].min()
mx = df['plogp'].max()
# df['plogp'] = 0.5 * (np.tanh(0.01 * ((df['plogp'] - m) / std)) + 1) # [0.35, 0.51]
# df['plogp'] = (df['plogp'] - m) / std
lower = -10 # -5 # -10
df['plogp'] = df['plogp'].clip(lower=lower, upper=5)
df['plogp'] = (df['plogp'] - lower) / (mx-lower)
tuples = [tuple(x) for x in df.values]
print('Load {} done, length: {}'.format(filename, len(tuples)))
return tuples
def write_similes(filename, data, atomic_num_list):
"""
QM9: Total: 133885 bad_plogp: 133885 bad_qed: 142 plogp is not applicable to the QM9 dataset
zinc250k:
:param filename:
:param data:
:param atomic_num_list:
:return:
"""
f = open(filename, "w") # append mode
results = []
total = 0
bad_qed = 0
bad_plogp= 0
invalid = 0
for i, r in enumerate(data):
total += 1
x, adj, label = r
mol0 = construct_mol(x, adj, atomic_num_list)
smile = Chem.MolToSmiles(mol0, isomericSmiles=True) # 'CC(C)(C)C1=CC=C2OC=C(CC(=O)NC3=CC=CC=C3F)C2=C1'
mol = Chem.MolFromSmiles(smile)
if mol == None:
print(i, smile)
invalid += 1
qed = -1
plogp = -999
smile2 = 'N/A'
results.append((qed, plogp, smile, smile2))
f.write('{},{},{},{}\n'.format(qed, plogp, smile, smile2))
continue
smile2 = Chem.MolToSmiles(mol, isomericSmiles=True) # 'CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1'
try:
qed = env.qed(mol)
except ValueError as e:
bad_qed += 1
qed = -1
print(i+1, Chem.MolToSmiles(mol, isomericSmiles=True), ' error in qed')
try:
plogp = env.penalized_logp(mol)
except RuntimeError as e:
bad_plogp += 1
plogp = -999
print(i+1, Chem.MolToSmiles(mol, isomericSmiles=True), ' error in penalized_log')
results.append((qed, plogp, smile, smile2))
f.write('{},{},{},{}\n'.format(qed, plogp, smile, smile2))
f.flush()
f.close()
results.sort(key=lambda tup: tup[0], reverse=True)
fv = filename.split('.')
f = open(fv[0]+'_sortedByQED.'+fv[1], "w") # append mode
for r in results:
qed, plogp, smile, smile2 = r
f.write('{},{},{},{}\n'.format(qed, plogp, smile, smile2))
f.flush()
f.close()
results.sort(key=lambda tup: tup[1], reverse=True)
fv = filename.split('.')
f = open(fv[0] + '_sortedByPlogp.' + fv[1], "w") # append mode
for r in results:
qed, plogp, smile, smile2 = r
f.write('{},{},{},{}\n'.format(qed, plogp, smile, smile2))
f.flush()
f.close()
print('Dump done!')
print('Total: {}\t Invalid: {}\t bad_plogp: {} \t bad_qed: {}\n'.format(total, invalid, bad_plogp, bad_qed))
def test_smiles_to_tensor():
mol_smiles = 'CC(=O)c1ccc(S(=O)(=O)N2CCCC[C@H]2C)cc1' # 'CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1' # 'CCOC1=C(N)OC=C1' #'CCC1COC(C)CO1' # 'CCC1=NNN=C1CC' #'CCCC1=C(O)C=CO1' # 'CCCCC1=NC=NN1' # 'CCCNC1=COC=C1' # or 'CCCNc1ccoc1' same results for smile # 'CCCNC1=COC=C1' #'CCCNC1=CC=CO1' #'CC1=C2C(=O)N(C)C12'
mm = Chem.MolFromSmiles(mol_smiles)
Chem.Kekulize(mm, clearAromaticFlags=True) # use this after mol from simles
print(Chem.MolToSmiles(mm)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print('Chem.AddHs(mm)')
Chem.AddHs(mm)
Chem.SanitizeMol(mm, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES)
print(Chem.MolToSmiles(mm)) # CC(=O)C1C=CC(=CC=1)S(=O)(=O)N1CCCCC1C
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CC(=O)C1C=CC(=CC=1)S(=O)(=O)N1CCCCC1C
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
# atoms == atoms2 == atoms3
bond, atoms = smiles_to_adj('CC(=O)c1ccc(S(=O)(=O)N2CCCC[C@H]2C)cc1', 'zinc250k')
print(atoms.max(2)[1])
bond2, atoms2 = smiles_to_adj('CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1', 'zinc250k')
print(atoms2.max(2)[1], (bond == bond2).all(), (atoms == atoms2).all())
bond3, atoms3 = smiles_to_adj('CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1', 'zinc250k')
print(atoms3.max(2)[1], (bond == bond3).all(), (atoms == atoms3).all())
def test_property_of_smile_vs_tensor(data_name, atomic_num_list):
mol_smiles = 'COC1=CC=C(C2=CC(C3=CC=CC=C3)=CC(C3=CC=C(Br)C=C3)=[O+]2)C=C1' # 'CC(=O)c1ccc(S(=O)(=O)N2CCCC[C@H]2C)cc1' #'CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1' # 'CCOC1=C(N)OC=C1' #'CCC1COC(C)CO1' # 'CCC1=NNN=C1CC' #'CCCC1=C(O)C=CO1' # 'CCCCC1=NC=NN1' # 'CCCNC1=COC=C1' # or 'CCCNc1ccoc1' same results for smile # 'CCCNC1=COC=C1' #'CCCNC1=CC=CO1' #'CC1=C2C(=O)N(C)C12'
mm = Chem.MolFromSmiles(mol_smiles)
plogp = env.penalized_logp(mm)
qed = env.qed(mm)
print('{}: plogp: {}\tqed: {}'.format(mol_smiles, plogp, qed))
adj, x = smiles_to_adj(mol_smiles, data_name=data_name)
rev_mol_smiles = adj_to_smiles(adj, x, atomic_num_list)
mm2 = Chem.MolFromSmiles(rev_mol_smiles[0])
plogp = env.penalized_logp(mm2)
qed = env.qed(mm2)
print('{}: plogp: {}\tqed: {}'.format(rev_mol_smiles[0], plogp, qed))
Chem.Kekulize(mm) # , clearAromaticFlags=True) # use this after mol from simles
plogp = env.penalized_logp(mm)
qed = env.qed(mm)
print('plogp: {}\tqed: {}'.format(plogp, qed))
mm3 = Chem.MolFromSmiles(Chem.MolToSmiles(mm))
plogp = env.penalized_logp(mm3)
qed = env.qed(mm3)
print('plogp: {}\tqed: {}'.format(plogp, qed))
print(Chem.MolToSmiles(mm)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
print('Chem.AddHs(mm)')
Chem.AddHs(mm)
# Chem.SanitizeMol(mm, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES)
plogp = env.penalized_logp(mm)
qed = env.qed(mm)
print('plogp: {}\tqed: {}'.format(plogp, qed))
print(Chem.MolToSmiles(mm)) # CC(=O)C1C=CC(=CC=1)S(=O)(=O)N1CCCCC1C
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CC(=O)C1C=CC(=CC=1)S(=O)(=O)N1CCCCC1C
print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1
print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CC(=O)C1=CC=C(S(=O)(=O)N2CCCCC2C)C=C1
# atoms == atoms2 == atoms3
bond, atoms = smiles_to_adj('COC1=CC=C(C2=CC(C3=CC=CC=C3)=CC(C3=CC=C(Br)C=C3)=[O+]2)C=C1', 'zinc250k')
print(atoms.max(2)[1])
bond2, atoms2 = smiles_to_adj('COC1C=CC(=CC=1)C1=CC(=CC(=[O+]1)C1C=CC(Br)=CC=1)C1C=CC=CC=1', 'zinc250k')
print(atoms2.max(2)[1], (bond == bond2).all(), (atoms == atoms2).all())
# bond3, atoms3 = smiles_to_adj('CC(=O)C1=CC=C(S(=O)(=O)N2CCCC[C@H]2C)C=C1', 'zinc250k')
# print(atoms3.max(2)[1], (bond==bond3).all(), (atoms==atoms3).all())
def find_top_score_smiles(model, device, data_name, property_name, train_prop, topk, atomic_num_list, debug):
start_time = time.time()
if property_name == 'qed':
col = 0
elif property_name == 'plogp':
col = 1
print('Finding top {} score'.format(property_name))
train_prop_sorted = sorted(train_prop, key=lambda tup: tup[col], reverse=True) # qed, plogp, smile
result_list = []
for i, r in enumerate(train_prop_sorted):
if i >= topk:
break
if i % 50 == 0:
print('Optimization {}/{}, time: {:.2f} seconds'.format(i, topk, time.time() - start_time))
qed, plogp, smile = r
results, ori = optimize_mol(model, property_model, smile, device, sim_cutoff=0, lr=.005, num_iter=100,
data_name=data_name, atomic_num_list=atomic_num_list,
property_name=property_name, random=False, debug=debug)
result_list.extend(results) # results: [(smile2, property, sim, smile1), ...]
result_list.sort(key=lambda tup: tup[1], reverse=True)
# check novelty
train_smile = set()
for i, r in enumerate(train_prop_sorted):
qed, plogp, smile = r
train_smile.add(smile)
mol = Chem.MolFromSmiles(smile)
smile2 = Chem.MolToSmiles(mol, isomericSmiles=True)
train_smile.add(smile2)
result_list_novel = []
for i, r in enumerate(result_list):
smile, score, sim, smile_original = r
if smile not in train_smile:
result_list_novel.append(r)
# dump results
f = open(property_name + '_discovered_sorted.csv', "w")
for r in result_list_novel:
smile, score, sim, smile_original = r
f.write('{},{},{},{}\n'.format(score, smile, sim, smile_original))
f.flush()
f.close()
print('Dump done!')
def constrain_optimization_smiles(model, device, data_name, property_name, train_prop, topk,
atomic_num_list, debug, sim_cutoff=0.0):
start_time = time.time()
if property_name == 'qed':
col = 0
elif property_name == 'plogp':
col = 1
print('Constrained optimization of {} score'.format(property_name))
train_prop_sorted = sorted(train_prop, key=lambda tup: tup[col]) #, reverse=True) # qed, plogp, smile
result_list = []
nfail = 0
for i, r in enumerate(train_prop_sorted):
if i >= topk:
break
if i % 50 == 0:
print('Optimization {}/{}, time: {:.2f} seconds'.format(i, topk, time.time() - start_time))
qed, plogp, smile = r
results, ori = optimize_mol(model, property_model, smile, device, sim_cutoff=sim_cutoff, lr=.005, num_iter=100,
data_name=data_name, atomic_num_list=atomic_num_list,
property_name=property_name, random=False, debug=debug)
if len(results) > 0:
smile2, property2, sim, _ = results[0]
plogp_delta = property2 - plogp
if plogp_delta >= 0:
result_list.append((smile2, property2, sim, smile, qed, plogp, plogp_delta))
else:
nfail += 1
print('Failure:{}:{}'.format(i, smile))
else:
nfail += 1
print('Failure:{}:{}'.format(i, smile))
df = pd.DataFrame(result_list,
columns=['smile_new', 'prop_new', 'sim', 'smile_old', 'qed_old', 'plogp_old', 'plogp_delta'])
print(df.describe())
df.to_csv(property_name+'_constrain_optimization.csv', index=False)
print('Dump done!')
print('nfail:{} in total:{}'.format(nfail, topk))
print('success rate: {}'.format((topk-nfail)*1.0/topk))
def plot_top_qed_mol():
import cairosvg
filename = 'qed_discovered_sorted_bytop2k.csv'
df = pd.read_csv(filename)
vmol = []
vlabel = []
for index, row in df.head(n=25).iterrows():
score, smile, sim, smile_old = row
vmol.append(Chem.MolFromSmiles(smile))
vlabel.append('{:.3f}'.format(score))
svg = Draw.MolsToGridImage(vmol, legends=vlabel, molsPerRow=5, #5,
subImgSize=(120, 120), useSVG=True) # , useSVG=True
cairosvg.svg2pdf(bytestring=svg.encode('utf-8'), write_to="top_qed2.pdf")
cairosvg.svg2png(bytestring=svg.encode('utf-8'), write_to="top_qed2.png")
# print('Dump {}.png/pdf done'.format(filepath))
# img = Draw.MolsToGridImage(vmol, legends=vlabel, molsPerRow=5,
# subImgSize=(300, 300), useSVG=True)
# print(img)
def plot_mol_constraint_opt():
import cairosvg
vsmiles = ['O=C(NCc1ccc2c3c(cccc13)C(=O)N2)c1ccc(F)cc1',
'O=C(NCC1=Cc2c[nH]c(=O)c3cccc1c23)c1ccc(F)cc1']
vmol = [Chem.MolFromSmiles(s) for s in vsmiles]
vplogp = ['{:.2f}'.format(env.penalized_logp(mol)) for mol in vmol]
# vhighlight = [vmol[0].GetSubstructMatch(Chem.MolFromSmiles('C2=C1C=CC=C3C1=C(C=C2)NC3')),
# vmol[1].GetSubstructMatch(Chem.MolFromSmiles('C4=CC6=C5C4=CC=CC5=C[N](=C6)[H]'))]
svg = Draw.MolsToGridImage(vmol, legends=vplogp, molsPerRow=2,
subImgSize=(250, 100), useSVG=True)
#highlightAtoms=vhighlight) # , useSVG=True
cairosvg.svg2pdf(bytestring=svg.encode('utf-8'), write_to="copt2.pdf")
cairosvg.svg2png(bytestring=svg.encode('utf-8'), write_to="copt2.png")
def plot_mol_matrix():
import cairosvg
import seaborn as sns
import matplotlib.pyplot as plt
smiles = 'CN(C)C(=N)NC(=N)N' #'CC(C)NC1=CC=CO1' #'CC1=C(SC(=C1)C(=O)NCC2=NOC=C2)Br'
bond, atoms = smiles_to_adj(smiles, 'qm9')
bond = bond[0]
atoms = atoms[0]
# def save_mol_png(mol, filepath, size=(100, 100)):
# Draw.MolToFile(mol, filepath, size=size)
Draw.MolToImageFile(Chem.MolFromSmiles(smiles), 'mol.pdf')
# save_mol_png(Chem.MolFromSmiles(smiles), 'mol.png')
svg = Draw.MolsToGridImage([Chem.MolFromSmiles(smiles)], legends=[], molsPerRow=1,
subImgSize=(250, 250), useSVG=True)
# highlightAtoms=vhighlight) # , useSVG=True
cairosvg.svg2pdf(bytestring=svg.encode('utf-8'), write_to="mol.pdf")
cairosvg.svg2png(bytestring=svg.encode('utf-8'), write_to="mol.png")
# sns.set()
# ax = sns.heatmap(1-atoms)
# with sns.axes_style("white"):
fig, ax = plt.subplots(figsize=(2, 3.4))
# sns.palplot(sns.diverging_palette(240, 10, n=9))
ax = sns.heatmap(atoms, linewidths=.5, ax=ax, annot_kws={"size": 18}, cbar=False,
xticklabels=False, yticklabels=False, square=True, cmap="vlag", vmin=-1, vmax=1, linecolor='black')
# ,cmap=sns.diverging_palette(240, 10, n=9)) #"YlGnBu" , square=True
plt.show()
fig.savefig('atom.pdf')
fig.savefig('atom.png')
for i, x in enumerate(bond):
fig, ax = plt.subplots(figsize=(5, 5))
# sns.palplot(sns.diverging_palette(240, 10, n=9))
ax = sns.heatmap(x, linewidths=.5, ax=ax, annot_kws={"size": 18}, cbar=False,
xticklabels=False, yticklabels=False, square=True, cmap="vlag", vmin=-1, vmax=1, linecolor='black')
# ,cmap=sns.diverging_palette(240, 10, n=9)) #"YlGnBu" , square=True
plt.show()
fig.savefig('bond{}.pdf'.format(i))
fig.savefig('bond{}.png'.format(i))
if __name__ == '__main__':
# plot_mol()
# plot_mol_constraint_opt()
# plot_mol_matrix()
# plot_top_qed_mol()
# exit(-1)
start = time.time()
print("Start at Time: {}".format(time.ctime()))
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default='./results', required=True)
parser.add_argument("--data_dir", type=str, default='../data')
parser.add_argument('--data_name', type=str, default='qm9', choices=['qm9', 'zinc250k'],
help='dataset name')
parser.add_argument("--snapshot_path", "-snapshot", type=str, required=True)
parser.add_argument("--hyperparams_path", type=str, default='moflow-params.json', required=True)
parser.add_argument("--property_model_path", type=str, default=None)
# parser.add_argument('--molecule_file', type=str, default='qm9_relgcn_kekulized_ggnp.npz',
# help='path to molecule dataset')
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument('-l', '--learning_rate', type=float, default=0.001, help='Base learning rate')
parser.add_argument('-e', '--lr_decay', type=float, default=0.999995,
help='Learning rate decay, applied every step of the optimization')
parser.add_argument('-w', '--weight_decay', type=float, default=1e-5,
help='L2 norm for the parameters')
parser.add_argument('--hidden', type=str, default="",
help='Hidden dimension list for output regression')
parser.add_argument('-x', '--max_epochs', type=int, default=5, help='How many epochs to run in total?')
parser.add_argument('-g', '--gpu', type=int, default=0, help='GPU Id to use')
parser.add_argument("--delta", type=float, default=0.01)
parser.add_argument("--img_format", type=str, default='svg')
parser.add_argument("--property_name", type=str, default='qed', choices=['qed', 'plogp'])
parser.add_argument('--additive_transformations', type=strtobool, default=False,
help='apply only additive coupling layers')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature of the gaussian distributions')
parser.add_argument('--topk', type=int, default=500, help='Top k smiles as seeds')
parser.add_argument('--debug', type=strtobool, default='true', help='To run optimization with more information')
parser.add_argument("--sim_cutoff", type=float, default=0.00)
#
parser.add_argument('--topscore', action='store_true', default=False, help='To find top score')
parser.add_argument('--consopt', action='store_true', default=False, help='To do constrained optimization')
args = parser.parse_args()
# Device configuration
device = -1
if args.gpu >= 0:
# device = args.gpu
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cpu')
property_name = args.property_name.lower()
# chainer.config.train = False
snapshot_path = os.path.join(args.model_dir, args.snapshot_path)
hyperparams_path = os.path.join(args.model_dir, args.hyperparams_path)
model_params = Hyperparameters(path=hyperparams_path)
model = load_model(snapshot_path, model_params, debug=True) # Load moflow model
if args.hidden in ('', ','):
hidden = []
else:
hidden = [int(d) for d in args.hidden.strip(',').split(',')]
print('Hidden dim for output regression: ', hidden)
property_model = MoFlowProp(model, hidden)
# model.eval() # Set model for evaluation
if args.data_name == 'qm9':
atomic_num_list = [6, 7, 8, 9, 0]
transform_fn = transform_qm9.transform_fn
valid_idx = transform_qm9.get_val_ids()
molecule_file = 'qm9_relgcn_kekulized_ggnp.npz'
# smile_cvs_to_property('qm9')
elif args.data_name == 'zinc250k':
atomic_num_list = zinc250_atomic_num_list
transform_fn = transform_zinc250k.transform_fn_zinc250k
valid_idx = transform_zinc250k.get_val_ids()
molecule_file = 'zinc250k_relgcn_kekulized_ggnp.npz'
# smile_cvs_to_property('zinc250k')
else:
raise ValueError("Wrong data_name{}".format(args.data_name))
# dataset = NumpyTupleDataset(os.path.join(args.data_dir, molecule_file), transform=transform_fn) # 133885
dataset = NumpyTupleDataset.load(os.path.join(args.data_dir, molecule_file), transform=transform_fn)
print('Load {} done, length: {}'.format(os.path.join(args.data_dir, molecule_file), len(dataset)))
assert len(valid_idx) > 0
train_idx = [t for t in range(len(dataset)) if t not in valid_idx] # 224568 = 249455 - 24887
n_train = len(train_idx) # 120803 zinc: 224568
train = torch.utils.data.Subset(dataset, train_idx) # 120803
test = torch.utils.data.Subset(dataset, valid_idx) # 13082 not used for generation
train_dataloader = torch.utils.data.DataLoader(train, batch_size=args.batch_size)
# print("loading hyperparamaters from {}".format(hyperparams_path))
if args.property_model_path is None:
print("Training regression model over molecular embedding:")
prop_list = load_property_csv(args.data_name, normalize=True)
train_prop = [prop_list[i] for i in train_idx]
test_prop = [prop_list[i] for i in valid_idx]
print('Prepare data done! Time {:.2f} seconds'.format(time.time() - start))
property_model_path = os.path.join(args.model_dir, '{}_model.pt'.format(property_name))
property_model = fit_model(property_model, atomic_num_list, train_dataloader, train_prop, device,
property_name=property_name, max_epochs=args.max_epochs,
learning_rate=args.learning_rate, weight_decay=args.weight_decay)
print("saving {} regression model to: {}".format(property_name, property_model_path))
torch.save(property_model, property_model_path)
print('Train and save model done! Time {:.2f} seconds'.format(time.time() - start))
else:
print("Loading trained regression model for optimization")
prop_list = load_property_csv(args.data_name, normalize=False)
train_prop = [prop_list[i] for i in train_idx]
test_prop = [prop_list[i] for i in valid_idx]
print('Prepare data done! Time {:.2f} seconds'.format(time.time() - start))
property_model_path = os.path.join(args.model_dir, args.property_model_path)
print("loading {} regression model from: {}".format(property_name, property_model_path))
device = torch.device('cpu')
property_model = torch.load(property_model_path, map_location=device)
print('Load model done! Time {:.2f} seconds'.format(time.time() - start))
property_model.to(device)
property_model.eval()
model.to(device)
model.eval()
# mol_smiles = r'C1=CC=C(C=C1)CCCCCCCCCCCCCCCCCCCCCCCCCCCCC'
# #'CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1' #'CC(C)N1N=CC2=N[C@H](c3ccc(-c4ccccn4)cc3)N[C@@H]21'
# # print(adj_to_smiles(
# # np.expand_dims(dataset[33233][1], axis=0), np.expand_dims(dataset[33233][0], axis=0),
# # atomic_num_list)) # ['CCCNC1=COC=C1']
# # mol_smiles ='N=C(N)C(=NO)C1CN1' #'CCOC1=C(N)OC=C1' #'CCC1COC(C)CO1' # 'CCC1=NNN=C1CC' #'CCCC1=C(O)C=CO1' # 'CCCCC1=NC=NN1' # 'CCCNC1=COC=C1' # or 'CCCNc1ccoc1' same results for smile # 'CCCNC1=COC=C1' #'CCCNC1=CC=CO1' #'CC1=C2C(=O)N(C)C12'
# # mm = Chem.MolFromSmiles(mol_smiles)
# # Chem.Kekulize(mm, clearAromaticFlags=True) # use this after mol from simles
# # print(Chem.MolToSmiles(mm)) # CCCNc1ccoc1
# # print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=True)) # CCCNc1ccoc1
# # print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=True)) # CCCNc1ccoc1
# # print(Chem.MolToSmiles(mm, isomericSmiles=True, canonical=False)) # CCCNc1ccoc1
# # print(Chem.MolToSmiles(mm, isomericSmiles=False, canonical=False)) # CCCNc1ccoc1
#
# results, start = optimize_mol(model, property_model, mol_smiles, device, sim_cutoff=0, lr=0.01, num_iter=100,
# data_name=args.data_name, atomic_num_list=atomic_num_list,
# property_name=property_name, random=False)
#
# print(start)
# print(results)
if args.topscore:
print('Finding top score:')
find_top_score_smiles(model, device, args.data_name, property_name, train_prop, args.topk, atomic_num_list, args.debug)
if args.consopt:
print('Constrained optimization:')
constrain_optimization_smiles(model, device, args.data_name, property_name, train_prop, args.topk, # train_prop
atomic_num_list, args.debug, sim_cutoff=args.sim_cutoff)
print('Total Time {:.2f} seconds'.format(time.time() - start))
|
1678758
|
import torch
import torch.nn as nn
import torch.nn.functional as F
affine_par = True
class Separable_transpose_convolution(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=2,
padding=1, output_padding=0, bias=False, dilation=1):
super(Separable_transpose_convolution, self).__init__()
self.conv1 = nn.ConvTranspose2d(in_channels, in_channels, kernel_size, stride, padding, output_padding, groups=in_channels, bias=bias, dilation=dilation)
self.bn1 = nn.BatchNorm2d(in_channels, affine=affine_par)
self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=bias)
self.bn2 = nn.BatchNorm2d(out_channels, affine=affine_par)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, output_size):
x = self.relu(self.bn1(self.conv1(x, output_size)))
x = self.bn2(self.conv2(x))
return x
class Separable_convolution(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, bias=False, dilation=1):
super(Separable_convolution, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, groups=in_channels, bias=bias, dilation=dilation)
self.bn1 = nn.BatchNorm2d(in_channels, affine=affine_par)
self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=bias)
self.bn2 = nn.BatchNorm2d(out_channels, affine=affine_par)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.bn2(self.conv2(x))
return x
|
1678908
|
import LibCall
from .module import Module
from .. import functional as F
class Linear(Module):
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = LibCall.torch.callTensor(out_features, in_features)
if bias:
self.bias = LibCall.torch.callTensor(out_features)
else:
self.bias = None
def forward(self, input):
return F.linear(input, self.weight, self.bias)
|
1678920
|
import os
import time
from fds.analyticsapi.engines import ApiException
from fds.analyticsapi.engines.api_client import ApiClient
from fds.analyticsapi.engines.api.pub_calculations_api import PubCalculationsApi
from fds.analyticsapi.engines.configuration import Configuration
from fds.analyticsapi.engines.model.pub_calculation_parameters import PubCalculationParameters
from fds.analyticsapi.engines.model.pub_calculation_parameters_root import PubCalculationParametersRoot
from fds.analyticsapi.engines.model.pub_identifier import PubIdentifier
from fds.analyticsapi.engines.model.pub_date_parameters import PubDateParameters
from urllib3 import Retry
from pathlib import Path
host = "https://api.factset.com"
username = "<username-serial>"
password = "<<PASSWORD>>"
def main():
config = Configuration()
config.host = host
config.username = username
config.password = password
config.discard_unknown_keys = True
# add proxy and/or disable ssl verification according to your development environment
# config.proxy = "<proxyUrl>"
config.verify_ssl = False
# Setting configuration to retry api calls on http status codes of 429 and 503.
config.retries = Retry(total=3, status=3, status_forcelist=frozenset([429, 503]), backoff_factor=2,
raise_on_status=False)
api_client = ApiClient(config)
try:
pub_document_name = "Super_client:/publisher/Equity Snapshot.PUB_BRIDGE_PDF"
pub_account_id = "BENCH:SP50"
startdate = "-1M"
enddate = "0M"
# uncomment the below code line to setup cache control; max-stale=0 will be a fresh adhoc run and the max-stale value is in seconds.
# Results are by default cached for 12 hours; Setting max-stale=300 will fetch a cached result which is 5 minutes older.
# cache_control = "max-stale=0"
pub_account_identifier = PubIdentifier(pub_account_id)
pub_dates = PubDateParameters(enddate, startdate=startdate)
pub_calculation_parameters = {
"1": PubCalculationParameters(pub_document_name, pub_account_identifier, pub_dates),
"2": PubCalculationParameters(pub_document_name, pub_account_identifier, pub_dates)
}
pub_calculation_parameters_root = PubCalculationParametersRoot(
data=pub_calculation_parameters)
pub_calculations_api = PubCalculationsApi(api_client)
post_and_calculate_response = pub_calculations_api.post_and_calculate(
pub_calculation_parameters_root=pub_calculation_parameters_root)
# comment the above line and uncomment the below line to run the request with the cache_control header defined earlier
# post_and_calculate_response = pub_calculations_api.post_and_calculate(pub_calculation_parameters_root=pub_calculation_parameters_root, cache_control=cache_control)
if post_and_calculate_response[1] == 202 or post_and_calculate_response[1] == 200:
calculation_id = post_and_calculate_response[0].data.calculationid
print("Calculation Id: " + calculation_id)
status_response = pub_calculations_api.get_calculation_status_by_id(id=calculation_id)
while status_response[1] == 202 and (status_response[0].data.status in ("Queued", "Executing")):
max_age = '5'
age_value = status_response[2].get("cache-control")
if age_value is not None:
max_age = age_value.replace("max-age=", "")
print('Sleeping: ' + max_age)
time.sleep(int(max_age))
status_response = pub_calculations_api.get_calculation_status_by_id(calculation_id)
for (calculation_unit_id, calculation_unit) in status_response[0].data.units.items():
if calculation_unit.status == "Success":
print("Calculation Unit Id: " +
calculation_unit_id + " Succeeded!!!")
result_response = pub_calculations_api.get_calculation_unit_result_by_id(id=calculation_id,
unit_id=calculation_unit_id)
output_calculation_result(
calculation_unit_id, (result_response[0].read()))
else:
print("Calculation Unit Id:" +
calculation_unit_id + " Failed!!!")
print("Error message : " + str(calculation_unit.errors))
else:
print("Calculation creation failed")
print("Error status : " + str(post_and_calculate_response[1]))
print("Error message : " + str(post_and_calculate_response[0]))
except ApiException as e:
print("Api exception Encountered")
print(e)
exit()
def output_calculation_result(output_prefix, result):
filename = Path(f'{output_prefix}-Output.pdf')
print(f'Writing output to {filename}')
filename.write_bytes(result)
if __name__ == '__main__':
main()
|
1678941
|
import tensorflow as tf
import numpy as np
def linear(input_, output_size, scope_name="linear"):
with tf.variable_scope(scope_name):
input_ = tf.reshape(
input_,
[-1, np.prod(input_.get_shape().as_list()[1:])])
output = tf.layers.dense(
input_,
output_size)
return output
def flatten(input_, scope_name="flatten"):
with tf.variable_scope(scope_name):
output = tf.reshape(
input_,
[-1, np.prod(input_.get_shape().as_list()[1:])])
return output
class batch_norm(object):
# Code from:
# https://github.com/carpedm20/DCGAN-tensorflow
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x,
decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True,
is_training=train,
scope=self.name)
class layer_norm(object):
def __init__(self, name="layer_norm"):
self.name = name
def __call__(self, x):
return tf.contrib.layers.layer_norm(x, scope=self.name)
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d"):
# Code from:
# https://github.com/carpedm20/DCGAN-tensorflow
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable(
'w',
[k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(
input_,
w,
output_shape=output_shape,
strides=[1, d_h, d_w, 1])
# Support for verisons of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(
input_,
w,
output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable(
'biases',
[output_shape[-1]],
initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape)
return deconv
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
# Code from:
# https://github.com/carpedm20/DCGAN-tensorflow
with tf.variable_scope(name):
w = tf.get_variable(
'w',
[k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(
input_,
w,
strides=[1, d_h, d_w, 1],
padding='SAME')
biases = tf.get_variable(
'biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(
tf.nn.bias_add(conv, biases),
[-1] + conv.get_shape().as_list()[1:])
return conv
def lrelu(x, leak=0.2, name="lrelu"):
# Code from:
# https://github.com/carpedm20/DCGAN-tensorflow
return tf.maximum(x, leak * x)
|
1678945
|
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
training_data_df = pd.read_csv("dataset/sales_data_training_scaled.csv")
X = training_data_df.drop('销售总额', axis=1).values
Y = training_data_df[['销售总额']].values
# 定义模型:全连接网络
model = Sequential()
model.add(Dense(50, input_dim=9, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='adam')
# 训练模型
model.fit(
X,
Y,
epochs=50,
shuffle=True,
verbose=2
)
# 加载测试数据集
test_data_df = pd.read_csv("dataset/sales_data_testing_scaled.csv")
X_test = test_data_df.drop('销售总额', axis=1).values
Y_test = test_data_df[['销售总额']].values
test_error_rate = model.evaluate(X_test, Y_test, verbose=0)
print("The mean squared error (MSE) for the test data set is: {}".format(test_error_rate))
# 保存模型
# Save the model to disk
model.save("./models/trained_model.h5")
print("Model saved to disk.")
|
1679001
|
import numpy as np
def dbMoriWen(z, us, umf, d_bed, l_or, dist_type):
"""
Calculates the equivalent diameter of the gas bubble in the bed, assuming
that all of the volume in bubbles in the bed were combined into a single
spherical bubble. This uses the Mori/Wen correlation as given in
Fluidization Engineering by Kunii & Levenspiel (K/L), eqs. 5.15, 5.19, and
6.5.
Parameters
----------
z : float
Height of the bubble along the vertical axis of the bed [m]
us : float
Superficial velocity of the gas [m/s]
umf : float
Minimum fluidization velocity [m/s]
d_bed : float
Bed diameter [m]
l_or : float
Orifice spacing [m] (only if perforated plate distributor used)
dist_type : string
Type of distributor plate
Options are
'perf_sq' = perforated, square arrangement of orifices
'perf_tri' = perforated, triangular arrangement of orifices
'porous' = porous (equivalent to perforated triangular arrangement of
tiny orifices)
Returns
-------
db : float
Equivalent bubble diameter at the specified z position [m]
"""
# Constants
g_cgs = 981.0 # gravity, cm/s^2
# Convert all mks units to cgs units for use with the correlation, which
# appears in K/L in cgs units.
z_cgs = z * 100.0
us_cgs = us * 100.0
umf_cgs = float(umf) * 100.0
d_bed_cgs = d_bed * 100.0
l_or_cgs = l_or * 100.0
# Maximum bubble diameter, cm
db_max = 0.65 * (np.pi / 4 * d_bed_cgs**2 * (us_cgs - umf_cgs))**0.4
# Minimum bubble diameter, cm for high flow rate/large bubble sizes at
# distributor plate. Also works for porous distributors.
db_min_high = 2.78 / g_cgs * (us_cgs - umf_cgs)**2
if dist_type == 'perf_sq':
# Minimum bubble diameter, cm for low flow rate/small bubble sizes at
# distributor plate
db_min_low = 1.3 / (g_cgs**0.2) * \
((us_cgs - umf_cgs) * l_or_cgs**2)**0.4
# Set the minimum bubble diameter based on the orifice spacing
if db_min_low <= l_or_cgs:
db_min = db_min_low
else:
db_min = db_min_high
elif dist_type == 'perf_tri':
# Minimum bubble diameter, cm for low flow rate/small bubble sizes at
# distributor plate
db_min_low = 1.3 / (g_cgs**0.2) * ((us_cgs - umf_cgs) *
l_or_cgs**2 * np.sqrt(3)/2)**0.4
# Set the minimum bubble diameter based on the orifice spacing
if db_min_low <= l_or_cgs:
db_min = db_min_low
else:
db_min = db_min_high
elif dist_type == 'porous':
# Just use the high flow rate equation at the distributor
db_min = db_min_high
else:
raise NotImplementedError(f"Unknown distributor type {dist_type}" +
" in Mori/Wen bubble diameter calculation.")
# Equivalent bubble diameter, cm
db = db_max - (db_max - db_min) * np.exp(-0.3 * z_cgs / d_bed_cgs)
# Constrain to 80% of the diameter of the column
db = min(0.8*d_bed*100.0, db)
# Return the bubble diameter, m
return db / 100.0
|
1679028
|
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Composite 6 months of Landsat 8.
# Note that the input to simpleComposite is raw data.
l8 = ee.ImageCollection('LANDSAT/LC08/C01/T1')
# The asFloat parameter gives floating-point TOA output instead of
# the UINT8 outputs of the default simpleComposite().
composite = ee.Algorithms.Landsat.simpleComposite(**{
'collection': l8.filterDate('2015-1-1', '2015-7-1'),
'asFloat': True
})
# Pick a spot with lots of clouds.
Map.setCenter(-47.6735, -0.6344, 12)
# Display a composite with a band combination chosen from:
# https:#landsat.usgs.gov/how-do-landsat-8-band-combinations-differ-landsat-7-or-landsat-5-satellite-data
Map.addLayer(composite, {'bands': ['B6', 'B5', 'B4'], 'max': [0.3, 0.4, 0.3]})
# Display the map.
Map
|
1679053
|
from typing import Any, Dict
# Even though it is not imported, it is actually required, it downlaods some stuff.
import allennlp_models # noqa: F401
from allennlp.predictors.predictor import Predictor
from app.pipelines import Pipeline
class QuestionAnsweringPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
self.predictor = Predictor.from_path("hf://" + model_id)
def __call__(self, inputs: Dict[str, str]) -> Dict[str, Any]:
"""
Args:
inputs (:obj:`dict`):
a dictionnary containing two keys, 'question' being the question being asked and 'context' being some text containing the answer.
Return:
A :obj:`dict`:. The object return should be like {"answer": "XXX", "start": 3, "end": 6, "score": 0.82} containing :
- "answer": the extracted answer from the `context`.
- "start": the offset within `context` leading to `answer`. context[start:stop] == answer
- "end": the ending offset within `context` leading to `answer`. context[start:stop] === answer
- "score": A score between 0 and 1 describing how confident the model is for this answer.
"""
allenlp_input = {"passage": inputs["context"], "question": inputs["question"]}
predictions = self.predictor.predict_json(allenlp_input)
start_token_idx, end_token_idx = predictions["best_span"]
start = predictions["token_offsets"][start_token_idx][0]
end = predictions["token_offsets"][end_token_idx][1]
score = (
predictions["span_end_probs"][end_token_idx]
* predictions["span_start_probs"][start_token_idx]
)
return {
"answer": predictions["best_span_str"],
"start": start,
"end": end,
"score": score,
}
|
1679070
|
import unittest
from pathlib import Path
from bridgebots.board_record import BidMetadata
from bridgebots.deal_enums import Direction, Rank, Suit
from bridgebots.pbn import _build_record_dict, _parse_bidding_record, _sort_play_record, parse_pbn
class TestParsePbnFile(unittest.TestCase):
def test_parse_file(self):
sample_pbn_path = Path(__file__).parent / "resources" / "sample.pbn"
records = parse_pbn(sample_pbn_path)
self.assertEqual(3, len(records))
deal_1, board_record_1 = records[0].deal, records[0].board_records[0]
self.assertEqual(True, deal_1.ns_vulnerable)
self.assertEqual(True, deal_1.ew_vulnerable)
self.assertEqual(Direction.EAST, deal_1.dealer)
self.assertEqual(
[Rank.KING, Rank.QUEEN, Rank.JACK, Rank.SEVEN], deal_1.hands[Direction.EAST].suits[Suit.SPADES]
)
self.assertEqual(
[
"1H",
"Pass",
"1S",
"=1=",
"Pass",
"2C",
"!",
"Pass",
"2H",
"=2=",
"Pass",
"2S",
"=3=",
"Pass",
"3NT",
"AP",
],
board_record_1.raw_bidding_record,
)
self.assertEqual(
["1H", "PASS", "1S", "PASS", "2C", "PASS", "2H", "PASS", "2S", "PASS", "3NT", "PASS", "PASS", "PASS"],
board_record_1.bidding_record,
)
self.assertEqual(
[
BidMetadata(2, "1S", False, "0-4 !ss"),
BidMetadata(4, "2C", True, None),
BidMetadata(6, "2H", False, "less than 8 points"),
BidMetadata(8, "2S", False, "17+ with 4 !S"),
],
board_record_1.bidding_metadata,
)
self.assertEqual(
[
"CQ",
"CA",
"C8",
"C3",
"H4",
"HT",
"HK",
"H6",
"H3",
"H2",
"HQ",
"HA",
"C5",
"C6",
"CK",
"CT",
"D4",
"DJ",
"DQ",
"DK",
"CJ",
"C2",
"S7",
"C7",
"C9",
"C4",
"H5",
"S4",
"S6",
],
[str(card) for card in board_record_1.play_record],
)
self.assertEqual(Direction.WEST, board_record_1.declarer)
self.assertEqual("3NT", board_record_1.contract)
self.assertEqual(9, board_record_1.tricks)
self.assertEqual("IMP;Cross", board_record_1.scoring)
self.assertEqual("2004.05.05", board_record_1.date)
self.assertEqual("Cavendish Pairs Day 2", board_record_1.event)
deal_2, board_record_2 = records[1].deal, records[1].board_records[0]
self.assertEqual(
(
"{ Indonesians <NAME> and <NAME>, recent winners of the IOC Grand Prix in Lausanne and "
"Rhodes Olympiad finalists in 1996, are mainstays of their consistent national team. True gentlemen at"
" and away from the table, they focus on partnership and error-free bridge. 51 VP out of first, they "
"need a win here to stay in the hunt. Their opponents, <NAME> and <NAME> Holland, "
"Bermuda Bowl champs in 1993, are a further 24 behind . Our first deal, a partscore, is played "
"successfully in diamonds at every table but one (Auken/von Arnim fail in 3C). Karwur/Sacul achieve par"
" [datum is N/S minus 110] at 3D. However, getting there is half the fun, as the old Greyhound Bus "
"adverts used to boast. Muller\\\\'s (probably necessary) balanced takeout double is hardly a classic, "
"and de Boer shows no mercy, bidding both his majors on very slender values. Sacul (1D showed 2+ cards)"
" saves the day by converting 3C to 3D, expecting 2/3 in dummy on the auction. Muller leads a "
"challenging low heart but Sacul puts up the king: plus 110. {Your host for this match, John "
"Carruthers} }"
),
board_record_2.commentary,
)
self.assertEqual(board_record_2.names[Direction.NORTH], "<NAME>")
self.assertEqual(board_record_2.names[Direction.SOUTH], "<NAME>")
self.assertEqual(board_record_2.names[Direction.EAST], "<NAME>")
self.assertEqual(board_record_2.names[Direction.WEST], "<NAME>")
class TestPbnRecordDict(unittest.TestCase):
def test_ignore_non_key_lines(self):
pbn_strings = [
"% PBN 1.0",
"Random String",
'[Contract "3NT"]',
]
record_dict = _build_record_dict(pbn_strings)
self.assertEqual({"Contract": "3NT"}, record_dict)
def test_auction(self):
pbn_strings = ['[Auction "S"]', "1D X 2S Pass", "Pass 2NT Pass 3NT", "Pass Pass Pass"]
record_dict = _build_record_dict(pbn_strings)
expected = {
"Auction": "S",
"bidding_record": ["1D", "X", "2S", "Pass", "Pass", "2NT", "Pass", "3NT", "Pass", "Pass", "Pass"],
}
self.assertEqual(expected, record_dict)
def test_auction_with_alerts(self):
pbn_strings = ['[Auction "N"]', "1D =1= X ! AP"]
record_dict = _build_record_dict(pbn_strings)
expected = {"Auction": "N", "bidding_record": ["1D", "=1=", "X", "!", "AP"]}
self.assertEqual(expected, record_dict)
def test_play(self):
pbn_strings = ['[Play "N"]', "S6 S3 ST SQ", "C9 C2 C5 CQ", "CT C3 CA CK"]
record_dict = _build_record_dict(pbn_strings)
expected = {
"Play": "N",
"play_record": [["S6", "S3", "ST", "SQ"], ["C9", "C2", "C5", "CQ"], ["CT", "C3", "CA", "CK"]],
}
self.assertEqual(expected, record_dict)
def test_partial_play(self):
pbn_strings = ['[Play "N"]', "S6 S3 ST SQ", "C9 C2 C5 CQ", "CT C3 CA CK", "*", '[NextTag "Foo"]']
record_dict = _build_record_dict(pbn_strings)
expected = {
"Play": "N",
"play_record": [["S6", "S3", "ST", "SQ"], ["C9", "C2", "C5", "CQ"], ["CT", "C3", "CA", "CK"]],
"NextTag": "Foo",
}
self.assertEqual(expected, record_dict)
def test_commentary_at_end(self):
pbn_strings = [
'[Contract "3NT"]',
"{Brad Moss of USA and <NAME> of Canada won the Cavendish",
"Teams earlier this week, and with one 27-board session to go,",
"they are in front again.",
"}",
]
record_dict = _build_record_dict(pbn_strings)
self.assertEqual(
{
"Contract": "3NT",
"Commentary": (
"{Brad Moss of USA and <NAME> of Canada won the Cavendish Teams earlier this week, and with "
"one 27-board session to go, they are in front again. }"
),
},
record_dict,
)
def test_commentary_in_middle(self):
pbn_strings = [
'[Contract "3NT"]',
"{Brad Moss of USA and <NAME> of Canada won the Cavendish",
"Teams earlier this week, and with one 27-board session to go,",
"they are in front again.",
"}",
'[Declarer "W"]',
]
record_dict = _build_record_dict(pbn_strings)
self.assertEqual(
{
"Contract": "3NT",
"Commentary": (
"{<NAME>ss of USA and <NAME> of Canada won the Cavendish Teams earlier this week, and with "
"one 27-board session to go, they are in front again. }"
),
"Declarer": "W",
},
record_dict,
)
class TestPbnPlayRecord(unittest.TestCase):
def test_sort_suit_play_record(self):
trick_records = [["H6", "HK", "HQ", "H5"], ["CT", "C4", "D9", "CK"], ["H2", "H3", "DJ", "H8"]]
hearts_play_record = _sort_play_record(trick_records, "3H")
hearts_play_strings = [str(card) for card in hearts_play_record]
self.assertEqual(["H6", "HK", "HQ", "H5", "C4", "D9", "CK", "CT", "H8", "H2", "H3", "DJ"], hearts_play_strings)
diamonds_play_record = _sort_play_record(trick_records, "2D")
diamonds_play_strings = [str(card) for card in diamonds_play_record]
self.assertEqual(
["H6", "HK", "HQ", "H5", "C4", "D9", "CK", "CT", "DJ", "H8", "H2", "H3"], diamonds_play_strings
)
def test_sort_notrump_play_record(self):
trick_records = [["H6", "HK", "HQ", "C3"], ["CT", "C4", "D9", "CK"], ["H2", "H3", "DJ", "S8"]]
nt_play_record = _sort_play_record(trick_records, "2NT")
nt_play_strings = [str(card) for card in nt_play_record]
self.assertEqual(["H6", "HK", "HQ", "C3", "C4", "D9", "CK", "CT", "S8", "H2", "H3", "DJ"], nt_play_strings)
pass
def test_placeholder_symbols(self):
trick_records = [["H6", "HK", "HQ", "C3"], ["CT", "C4", "D9", "CK"], ["H2", "-", "--", "S8"]]
nt_play_record = _sort_play_record(trick_records, "2NT")
nt_play_strings = [str(card) for card in nt_play_record]
self.assertEqual(["H6", "HK", "HQ", "C3", "C4", "D9", "CK", "CT", "S8", "H2"], nt_play_strings)
def test_sort_pass_out(self):
self.assertEqual([], _sort_play_record([], "Pass"))
self.assertEqual([], _sort_play_record([], "PASS"))
def test_handle_malformed_play_record(self):
self.assertEqual([], _sort_play_record([], "junk"))
self.assertEqual([], _sort_play_record([["H3"]], "2NT"))
class TestPbnBiddingRecord(unittest.TestCase):
def test_bidding_record_without_annotations(self):
raw_auction = ["Pass", "1D", "X", "XX", "1S", "X", "Pass", "2C", "2H", "Pass", "Pass", "Pass"]
bidding_record, bidding_metadata = _parse_bidding_record(raw_auction, {})
self.assertEqual(
["PASS", "1D", "X", "XX", "1S", "X", "PASS", "2C", "2H", "PASS", "PASS", "PASS"], bidding_record
)
self.assertEqual([], bidding_metadata)
def test_bidding_metadata_without_notes(self):
raw_auction = ["Pass", "1D", "X", "=0=", "Pass", "Pass", "Pass"]
bidding_record, bidding_metadata = _parse_bidding_record(raw_auction, {})
self.assertEqual(["PASS", "1D", "X", "PASS", "PASS", "PASS"], bidding_record)
self.assertEqual([BidMetadata(2, "X", False, "=0=")], bidding_metadata)
def test_bidding_metadata_with_notes(self):
raw_auction = ["Pass", "1D", "=1=", "X", "=2=", "Pass", "Pass", "Pass"]
record_dict = {"Note_1": "2+", "Note_2": "Majors"}
bidding_record, bidding_metadata = _parse_bidding_record(raw_auction, record_dict)
self.assertEqual(["PASS", "1D", "X", "PASS", "PASS", "PASS"], bidding_record)
self.assertEqual([BidMetadata(1, "1D", False, "2+"), BidMetadata(2, "X", False, "Majors")], bidding_metadata)
def test_alert(self):
raw_auction = ["Pass", "1D", "!", "X", "!", "Pass", "Pass", "Pass"]
bidding_record, bidding_metadata = _parse_bidding_record(raw_auction, {})
self.assertEqual(["PASS", "1D", "X", "PASS", "PASS", "PASS"], bidding_record)
self.assertEqual([BidMetadata(1, "1D", True, None), BidMetadata(2, "X", True, None)], bidding_metadata)
def test_bidding_metadata_with_duplicate_notes(self):
raw_auction = ["1C", "2NT", "=0=", "!", "3H", "=0=", "=1=", "pass", "3S", "pass", "3NT", "pass", "pass", "pass"]
bidding_record, bidding_metadata = _parse_bidding_record(raw_auction, {"Note_1": "Spades"})
self.assertEqual(["1C", "2NT", "3H", "PASS", "3S", "PASS", "3NT", "PASS", "PASS", "PASS"], bidding_record)
self.assertEqual(
[BidMetadata(1, "2NT", True, "=0="), BidMetadata(2, "3H", False, "=0= | Spades")], bidding_metadata
)
def test_bidding_record_with_all_pass(self):
raw_auction = ["1C", "1S", "AP"]
bidding_record, bidding_metadata = _parse_bidding_record(raw_auction, {})
self.assertEqual(["1C", "1S", "PASS", "PASS", "PASS"], bidding_record)
self.assertEqual([], bidding_metadata)
|
1679082
|
import os
from unittest.case import TestCase
from checkov.cloudformation.graph_builder.graph_components.block_types import BlockType
from checkov.cloudformation.graph_manager import CloudformationGraphManager
from checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
class TestRenderer(TestCase):
def setUp(self) -> None:
os.environ['UNIQUE_TAG'] = ''
os.environ['RENDER_ASYNC_MAX_WORKERS'] = '50'
os.environ['RENDER_VARIABLES_ASYNC'] = 'False'
def test_render_ref(self):
relative_path = './resources/variable_rendering/render_ref/'
yaml_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'yaml'))
json_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'json'))
self.validate_render_ref(yaml_test_dir, 'yaml')
self.validate_render_ref(json_test_dir, 'json')
def validate_render_ref(self, test_dir: str, file_ext: str):
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(test_dir, render_variables=True)
db_name_default_value = "db1"
kms_master_key_id_expected_attributes = {'Default': None}
db_name_expected_attributes = {'Default': db_name_default_value}
my_source_queue_expected_attributes = {'KmsMasterKeyId.Ref': 'KmsMasterKeyId'}
my_db_expected_attributes = {'DBName': db_name_default_value}
my_db_instance_name_expected_attributes = {'Value.Ref': 'MyDB'}
self.compare_vertex_attributes(local_graph, kms_master_key_id_expected_attributes, BlockType.PARAMETERS, 'KmsMasterKeyId')
self.compare_vertex_attributes(local_graph, db_name_expected_attributes, BlockType.PARAMETERS, 'DBName')
self.compare_vertex_attributes(local_graph, my_source_queue_expected_attributes, BlockType.RESOURCE, 'AWS::SQS::Queue.MySourceQueue')
self.compare_vertex_attributes(local_graph, my_db_expected_attributes, BlockType.RESOURCE, 'AWS::RDS::DBInstance.MyDB')
self.compare_vertex_attributes(local_graph, my_db_instance_name_expected_attributes, BlockType.OUTPUTS, 'MyDBInstanceName')
kms_master_key_id_expected_breadcrumbs = {}
db_name_expected_breadcrumbs = {}
my_source_queue_expected_breadcrumbs = {}
my_db_expected_breadcrumbs = {'DBName': [{'type': BlockType.PARAMETERS, 'name': 'DBName', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}]}
my_db_instance_name_expected_breadcrumbs = {}
self.compare_vertex_breadcrumbs(local_graph, kms_master_key_id_expected_breadcrumbs, BlockType.PARAMETERS, 'KmsMasterKeyId')
self.compare_vertex_breadcrumbs(local_graph, db_name_expected_breadcrumbs, BlockType.PARAMETERS, 'DBName')
self.compare_vertex_breadcrumbs(local_graph, my_source_queue_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::SQS::Queue.MySourceQueue')
self.compare_vertex_breadcrumbs(local_graph, my_db_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::RDS::DBInstance.MyDB')
self.compare_vertex_breadcrumbs(local_graph, my_db_instance_name_expected_breadcrumbs, BlockType.OUTPUTS, 'MyDBInstanceName')
def test_render_findinmap(self):
relative_path = './resources/variable_rendering/render_findinmap/'
yaml_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'yaml'))
json_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'json'))
self.validate_render_findinmap(yaml_test_dir, 'yaml')
self.validate_render_findinmap(json_test_dir, 'json')
def validate_render_findinmap(self, test_dir: str, file_ext: str):
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(test_dir, render_variables=True)
region_map_expected_ami_value = "ami-0ff8a91507f77f867"
region_map_expected_attributes = {'us-east-1.AMI': region_map_expected_ami_value}
ec2instance_expected_attributes = {'ImageId': region_map_expected_ami_value}
self.compare_vertex_attributes(local_graph, region_map_expected_attributes, BlockType.MAPPINGS, 'RegionMap')
self.compare_vertex_attributes(local_graph, ec2instance_expected_attributes, BlockType.RESOURCE, 'AWS::EC2::Instance.EC2Instance')
region_map_expected_breadcrumbs = {}
ec2instance_expected_breadcrumbs = {'ImageId': [{'type': BlockType.MAPPINGS, 'name': 'RegionMap', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'us-east-1.AMI'}]}
self.compare_vertex_breadcrumbs(local_graph, region_map_expected_breadcrumbs, BlockType.MAPPINGS, 'RegionMap')
self.compare_vertex_breadcrumbs(local_graph, ec2instance_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::EC2::Instance.EC2Instance')
def test_render_getatt(self):
relative_path = './resources/variable_rendering/render_getatt/'
yaml_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'yaml'))
json_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'json'))
self.validate_render_getatt(yaml_test_dir, 'yaml')
self.validate_render_getatt(json_test_dir, 'json')
def validate_render_getatt(self, test_dir: str, file_ext: str):
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(test_dir, render_variables=True)
web_vpc_expected_cidr_block = "172.16.0.0/16"
web_vpc_expected_attributes = {'CidrBlock': web_vpc_expected_cidr_block}
my_sg_expected_attributes = {'SecurityGroupIngress.CidrIp': web_vpc_expected_cidr_block}
web_vpc_default_sg_expected_attributes = {'Value.Fn::GetAtt': ['WebVPC', 'DefaultSecurityGroup']}
self.compare_vertex_attributes(local_graph, web_vpc_expected_attributes, BlockType.RESOURCE, 'AWS::EC2::VPC.WebVPC')
self.compare_vertex_attributes(local_graph, my_sg_expected_attributes, BlockType.RESOURCE, 'AWS::EC2::SecurityGroup.MySG')
self.compare_vertex_attributes(local_graph, web_vpc_default_sg_expected_attributes, BlockType.OUTPUTS, 'WebVPCDefaultSg')
web_vpc_expected_breadcrumbs = {}
my_sg_expected_breadcrumbs = {'SecurityGroupIngress.CidrIp': [{'type': BlockType.RESOURCE, 'name': 'AWS::EC2::VPC.WebVPC', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'CidrBlock'}]}
web_vpc_default_sg_expected_breadcrumbs = {}
self.compare_vertex_breadcrumbs(local_graph, web_vpc_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::EC2::VPC.WebVPC')
self.compare_vertex_breadcrumbs(local_graph, my_sg_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::EC2::SecurityGroup.MySG')
self.compare_vertex_breadcrumbs(local_graph, web_vpc_default_sg_expected_breadcrumbs, BlockType.OUTPUTS, 'WebVPCDefaultSg')
def test_render_sub(self):
relative_path = './resources/variable_rendering/render_sub/'
yaml_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'yaml'))
json_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'json'))
self.validate_render_sub(yaml_test_dir, 'yaml')
self.validate_render_sub(json_test_dir, 'json')
def validate_render_sub(self, test_dir: str, file_ext: str):
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(test_dir, render_variables=True)
company_name_expected_value = "acme"
web_vpc_expected_cidr_block = "172.16.0.0/16"
# Parameters
company_name_expected_attributes = {'Default': company_name_expected_value}
environment_expected_attributes = {'Default': None}
# Resources
web_vpc_expected_attributes = {'CidrBlock': web_vpc_expected_cidr_block}
default_db_expected_attributes = {'DBName': {'Fn::Sub': 'rds-${AWS::AccountId}-${CompanyName}-${Environment}'}}
# Outputs
db_endpoint_sg_expected_attributes = {'Value.Fn::Sub': "${DefaultDB.Endpoint.Address}:${DefaultDB.Endpoint.Port}"}
web_vpc_cidr_block_expected_attributes = {'Value': web_vpc_expected_cidr_block}
cidr_block_associations_expected_attributes = {'Value.Fn::Sub': "${WebVPC.CidrBlockAssociations}"}
default_db_name_expected_attributes = {'Value': {'Fn::Sub': 'rds-${AWS::AccountId}-${CompanyName}-${Environment}'}}
self.compare_vertex_attributes(local_graph, company_name_expected_attributes, BlockType.PARAMETERS, 'CompanyName')
self.compare_vertex_attributes(local_graph, environment_expected_attributes, BlockType.PARAMETERS, 'Environment')
self.compare_vertex_attributes(local_graph, web_vpc_expected_attributes, BlockType.RESOURCE, 'AWS::EC2::VPC.WebVPC')
self.compare_vertex_attributes(local_graph, default_db_expected_attributes, BlockType.RESOURCE, 'AWS::RDS::DBInstance.DefaultDB')
self.compare_vertex_attributes(local_graph, db_endpoint_sg_expected_attributes, BlockType.OUTPUTS, 'DBEndpoint')
self.compare_vertex_attributes(local_graph, web_vpc_cidr_block_expected_attributes, BlockType.OUTPUTS, 'WebVPCCidrBlock')
self.compare_vertex_attributes(local_graph, cidr_block_associations_expected_attributes, BlockType.OUTPUTS, 'CidrBlockAssociations')
self.compare_vertex_attributes(local_graph, default_db_name_expected_attributes, BlockType.OUTPUTS, 'DefaultDBName')
company_name_expected_breadcrumbs = {}
environment_expected_breadcrumbs = {}
web_vpc_expected_breadcrumbs = {}
default_db_expected_breadcrumbs = {}
db_endpoint_sg_expected_breadcrumbs = {}
web_vpc_cidr_block_expected_breadcrumbs = {'Value': [{'type': BlockType.RESOURCE, 'name': 'AWS::EC2::VPC.WebVPC', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'CidrBlock'}]}
cidr_block_associations_expected_breadcrumbs = {}
default_db_name_expected_breadcrumbs = {}
self.compare_vertex_breadcrumbs(local_graph, company_name_expected_breadcrumbs, BlockType.PARAMETERS, 'CompanyName')
self.compare_vertex_breadcrumbs(local_graph, environment_expected_breadcrumbs, BlockType.PARAMETERS, 'Environment')
self.compare_vertex_breadcrumbs(local_graph, web_vpc_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::EC2::VPC.WebVPC')
self.compare_vertex_breadcrumbs(local_graph, default_db_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::RDS::DBInstance.DefaultDB')
self.compare_vertex_breadcrumbs(local_graph, db_endpoint_sg_expected_breadcrumbs, BlockType.OUTPUTS, 'DBEndpoint')
self.compare_vertex_breadcrumbs(local_graph, web_vpc_cidr_block_expected_breadcrumbs, BlockType.OUTPUTS, 'WebVPCCidrBlock')
self.compare_vertex_breadcrumbs(local_graph, cidr_block_associations_expected_breadcrumbs, BlockType.OUTPUTS, 'CidrBlockAssociations')
self.compare_vertex_breadcrumbs(local_graph, default_db_name_expected_breadcrumbs, BlockType.OUTPUTS, 'DefaultDBName')
def test_render_subsequent_evals(self):
relative_path = './resources/variable_rendering/render_subsequent_evals/'
yaml_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'yaml'))
json_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'json'))
self.validate_render_subsequent_evals(yaml_test_dir, 'yaml')
self.validate_render_subsequent_evals(json_test_dir, 'json')
def validate_render_subsequent_evals(self, test_dir: str, file_ext: str):
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(test_dir, render_variables=True)
cidr_block_expected_expected_value = "172.16.0.0/16"
cidr_block_expected_attributes = {'Default': cidr_block_expected_expected_value}
web_vpc_expected_attributes = {'CidrBlock': cidr_block_expected_expected_value}
my_sg_expected_attributes = {'SecurityGroupIngress.CidrIp': cidr_block_expected_expected_value}
self.compare_vertex_attributes(local_graph, cidr_block_expected_attributes, BlockType.PARAMETERS, 'CidrBlock')
self.compare_vertex_attributes(local_graph, web_vpc_expected_attributes, BlockType.RESOURCE, 'AWS::EC2::VPC.WebVPC')
self.compare_vertex_attributes(local_graph, my_sg_expected_attributes, BlockType.RESOURCE, 'AWS::EC2::SecurityGroup.MySG')
cidr_block_expected_breadcrumbs = {}
web_vpc_expected_breadcrumbs = {'CidrBlock': [{'type': BlockType.PARAMETERS, 'name': 'CidrBlock', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}, {'type': BlockType.RESOURCE, 'name': 'AWS::EC2::VPC.WebVPC', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'CidrBlock'}]}
my_sg_expected_breadcrumbs = {'SecurityGroupIngress.CidrIp': [{'type': BlockType.PARAMETERS, 'name': 'CidrBlock', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}, {'type': BlockType.RESOURCE, 'name': 'AWS::EC2::VPC.WebVPC', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'CidrBlock'}]}
self.compare_vertex_breadcrumbs(local_graph, cidr_block_expected_breadcrumbs, BlockType.PARAMETERS, 'CidrBlock')
self.compare_vertex_breadcrumbs(local_graph, web_vpc_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::EC2::VPC.WebVPC')
self.compare_vertex_breadcrumbs(local_graph, my_sg_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::EC2::SecurityGroup.MySG')
def test_render_select(self):
relative_path = './resources/variable_rendering/render_select/'
yaml_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'yaml'))
json_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'json'))
self.validate_render_select(yaml_test_dir, 'yaml')
self.validate_render_select(json_test_dir, 'json')
def validate_render_select(self, test_dir: str, file_ext: str):
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(test_dir, render_variables=True)
subnet0_expected_attributes = {'CidrBlock': '10.0.48.0/24'}
grapes_select_expected_attributes = {'Value': 'grapes'}
out_of_bound_select_expected_attributes = {'Value.Fn::Select': ['7', ['apples', 'grapes', 'oranges', 'mangoes']]}
self.compare_vertex_attributes(local_graph, subnet0_expected_attributes, BlockType.RESOURCE, 'AWS::EC2::Subnet.Subnet0')
self.compare_vertex_attributes(local_graph, grapes_select_expected_attributes, BlockType.OUTPUTS, 'GrapesSelect')
self.compare_vertex_attributes(local_graph, out_of_bound_select_expected_attributes, BlockType.OUTPUTS, 'OutOfBoundSelect')
subnet0_expected_breadcrumbs = {'CidrBlock.Fn::Select.1': [{'type': BlockType.PARAMETERS, 'name': 'DbSubnetIpBlocks', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}], 'CidrBlock.Fn::Select': [{'type': BlockType.PARAMETERS, 'name': 'DbSubnetIpBlocks', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}]}
grapes_select_expected_breadcrumbs = {}
out_of_bound_select_expected_breadcrumbs = {}
self.compare_vertex_breadcrumbs(local_graph, subnet0_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::EC2::Subnet.Subnet0')
self.compare_vertex_breadcrumbs(local_graph, grapes_select_expected_breadcrumbs, BlockType.OUTPUTS, 'GrapesSelect')
self.compare_vertex_breadcrumbs(local_graph, out_of_bound_select_expected_breadcrumbs, BlockType.OUTPUTS, 'OutOfBoundSelect')
def test_render_join(self):
relative_path = './resources/variable_rendering/render_join/'
yaml_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'yaml'))
json_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'json'))
self.validate_render_join(yaml_test_dir, 'yaml')
self.validate_render_join(json_test_dir, 'json')
def validate_render_join(self, test_dir: str, file_ext: str):
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(test_dir, render_variables=True)
s3bucket1_expected_attributes = {'BucketName': 'a:b:c'}
s3bucket2_expected_attributes = {'BucketName': 'my_bucket_name_test'}
self.compare_vertex_attributes(local_graph, s3bucket1_expected_attributes, BlockType.RESOURCE, 'AWS::S3::Bucket.S3Bucket1')
self.compare_vertex_attributes(local_graph, s3bucket2_expected_attributes, BlockType.RESOURCE, 'AWS::S3::Bucket.S3Bucket2')
s3bucket1_expected_breadcrumbs = {}
s3bucket2_expected_breadcrumbs = {'BucketName.Fn::Join.1.0': [{'type': BlockType.PARAMETERS, 'name': 'BucketName', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}], 'BucketName.Fn::Join.1': [{'type': BlockType.PARAMETERS, 'name': 'BucketName', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}], 'BucketName.Fn::Join': [{'type': BlockType.PARAMETERS, 'name': 'BucketName', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}]}
self.compare_vertex_breadcrumbs(local_graph, s3bucket1_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::S3::Bucket.S3Bucket1')
self.compare_vertex_breadcrumbs(local_graph, s3bucket2_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::S3::Bucket.S3Bucket2')
def test_render_if(self):
relative_path = './resources/variable_rendering/render_if/'
yaml_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'yaml'))
json_test_dir = os.path.realpath(os.path.join(TEST_DIRNAME, relative_path, 'json'))
self.valiate_render_if(yaml_test_dir, 'yaml')
self.valiate_render_if(json_test_dir, 'json')
def valiate_render_if(self, test_dir: str, file_ext: str):
graph_manager = CloudformationGraphManager(db_connector=NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(test_dir, render_variables=True)
ec2instance_expected_attributes = {'InstanceType': 'm1.large'}
s3bucketsuspended_expected_attributes = {'VersioningConfiguration.Status': 'Suspended'}
s3bucketenabled_expected_attributes = {'VersioningConfiguration.Status': 'Enabled'}
self.compare_vertex_attributes(local_graph, ec2instance_expected_attributes, BlockType.RESOURCE, 'AWS::EC2::Instance.EC2Instance')
self.compare_vertex_attributes(local_graph, s3bucketsuspended_expected_attributes, BlockType.RESOURCE, 'AWS::S3::Bucket.S3BucketSuspended')
self.compare_vertex_attributes(local_graph, s3bucketenabled_expected_attributes, BlockType.RESOURCE, 'AWS::S3::Bucket.S3BucketEnabled')
instancesize_breadcrumb = {'type': BlockType.PARAMETERS, 'name': 'InstanceSize', 'path': os.path.join(test_dir, f'test.{file_ext}'), 'attribute_key': 'Default'}
ec2instance_expected_breadcrumbs = {
'InstanceType.Fn::If.2.Fn::If.1': [instancesize_breadcrumb],
'InstanceType.Fn::If.2.Fn::If': [instancesize_breadcrumb],
'InstanceType.Fn::If.2': [instancesize_breadcrumb],
'InstanceType.Fn::If': [instancesize_breadcrumb],
'InstanceType': [instancesize_breadcrumb],
}
s3bucketsuspended_expected_breadcrumbs = {}
s3bucketenabled_expected_breadcrumbs = {}
self.compare_vertex_breadcrumbs(local_graph, ec2instance_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::EC2::Instance.EC2Instance')
self.compare_vertex_breadcrumbs(local_graph, s3bucketsuspended_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::S3::Bucket.S3BucketSuspended')
self.compare_vertex_breadcrumbs(local_graph, s3bucketenabled_expected_breadcrumbs, BlockType.RESOURCE, 'AWS::S3::Bucket.S3BucketEnabled')
def compare_vertex_attributes(self, local_graph, expected_attributes, block_type, block_name):
vertex = local_graph.vertices[local_graph.vertices_block_name_map[block_type][block_name][0]]
vertex_attributes = vertex.get_attribute_dict()
for attribute_key, expected_value in expected_attributes.items():
actual_value = vertex_attributes.get(attribute_key)
if not isinstance(expected_value, dict):
self.assertEqual(expected_value, actual_value, f'error during comparing {block_type} in attribute key: {attribute_key}')
else:
for cfn_func, evaluated_value in expected_value.items():
self.assertIn(cfn_func, actual_value, f'error during comparing {block_type} in attribute key: {attribute_key}')
self.assertIn(actual_value[cfn_func], evaluated_value, f'error during comparing {block_type} in attribute key: {attribute_key}')
def compare_vertex_breadcrumbs(self, local_graph, expected_breadcrumbs, block_type, block_name):
vertex = local_graph.vertices[local_graph.vertices_block_name_map[block_type][block_name][0]]
vertex_breadcrumbs = vertex.breadcrumbs
self.assertEqual(len(vertex_breadcrumbs), len(expected_breadcrumbs))
if len(expected_breadcrumbs) > 0:
for vertex_id, expected_value in expected_breadcrumbs.items():
actual_value = vertex_breadcrumbs.get(vertex_id)
self.assertEqual(expected_value, actual_value, f'actual breadcrumbs of vertex {vertex.id} different from'
f' expected. expected = {expected_breadcrumbs}'
f' and actual = {actual_value}')
|
1679092
|
from manimlib.scene.scene import Scene
class ThreeDScene(Scene):
CONFIG = {
"camera_config": {
"samples": 4,
"anti_alias_width": 0,
}
}
def begin_ambient_camera_rotation(self, rate=0.02):
pass # TODO
def stop_ambient_camera_rotation(self):
pass # TODO
def move_camera(self,
phi=None,
theta=None,
distance=None,
gamma=None,
frame_center=None,
**kwargs):
pass # TODO
|
1679145
|
import os
import yaml
import pkg_resources
class _ConfigurationItem(object):
def __init__(self, val):
self._val = val
def __getitem__(self, key):
val = self._val[key]
if isinstance(val, dict):
return _ConfigurationItem(val)
else:
return val
def __setitem__(self, key, value):
self._val[key] = value
def __contains__(self, key):
return key in self._val
def __getattr__(self, name):
if name == '_val':
return getattr(self, name)
else:
return self.__getitem__(name)
def __setattr__(self, name, value):
if name == '_val':
super().__setattr__(name, value)
else:
self.__setitem__(name, value)
def __str__(self):
return str(self._val)
def clear(self):
self._val.clear()
def update(self, b):
for key in b.keys():
if key in self:
if isinstance(b[key], dict) and isinstance(self._val[key], dict):
self[key].update(b[key])
else:
self[key] = b[key]
else:
self[key] = b[key]
class Configuration(object):
'''A configuration object that describes the current configuration status of the package.
'''
def __init__(self):
if Configuration._config is None:
self.reset()
_config = None
def __getitem__(self, key):
'''Get the value for the key.
Parameters
----------
key : string
The configuration key.
'''
return Configuration._config[key]
def __setitem__(self, key, value):
'''Set the value for the key.
Parameters
----------
key : string
The configuration key.
value : anything
The value to set this to.
'''
Configuration._config[key] = value
__getattr__ = __getitem__
__setattr__ = __setitem__
def __str__(self):
return str(Configuration._config)
def reset(self):
'''Reset the configuration to the default configuration.
This default configuration consists of the default parameters in `hcipy/data/default_config.yaml`, which
can be overridden by a configuration file in `~/.hcipy/hcipy_config.yaml`. This can in turn be overridden
by a configuration file named `hcipy_config.yaml` located in the current working directory.
'''
Configuration._config = _ConfigurationItem({})
default_config = pkg_resources.resource_stream('hcipy', 'data/default_config.yaml')
user_config = os.path.expanduser('~/.hcipy/hcipy_config.yaml')
current_working_directory = './hcipy_config.yaml'
paths = [default_config, user_config, current_working_directory]
for path in paths:
try:
contents = path.read()
except AttributeError:
try:
with open(path) as f:
contents = f.read()
except IOError:
continue
new_config = yaml.safe_load(contents)
self.update(new_config)
def update(self, b):
'''Update the configuration with the configuration `b`, as a dictionary.
Parameters
----------
b : dict
A dictionary containing the values to update in the configuration.
'''
Configuration._config.update(b)
|
1679171
|
from insights.parsers.tmpfilesd import TmpFilesD
from insights.tests import context_wrap
SAP_CONF = """
# systemd tmpfiles exclude file for SAP
# SAP software stores some important files
# in /tmp which should not be deleted
# Exclude SAP socket and lock files
x /tmp/.sap*
# Exclude HANA lock file
x /tmp/.hdb*lock
""".strip()
def test_tmpfilesd():
ctx = context_wrap(SAP_CONF, path='/etc/tmpfiles.d/sap.conf')
data = TmpFilesD(ctx)
assert len(data.files) == 2
assert data.files == ['/tmp/.sap*', '/tmp/.hdb*lock']
assert data.rules == [{'type': 'x',
'mode': None,
'path': '/tmp/.sap*',
'uid': None,
'gid': None,
'age': None,
'argument': None},
{'type': 'x',
'path': '/tmp/.hdb*lock',
'mode': None,
'uid': None,
'gid': None,
'age': None,
'argument': None}]
assert data.file_path == '/etc/tmpfiles.d/sap.conf'
assert data.file_name == 'sap.conf'
def test_find_file():
ctx = context_wrap(SAP_CONF, path='/etc/tmpfiles.d/sap.conf')
data = TmpFilesD(ctx)
assert data.find_file('.sap*') == [{'path': '/tmp/.sap*', 'type': 'x', 'mode': None,
'age': None, 'gid': None, 'uid': None,
'argument': None}]
assert data.find_file('.hdb*lock') == [{'path': '/tmp/.hdb*lock', 'type': 'x',
'mode': None, 'uid': None, 'gid': None,
'age': None, 'argument': None}]
assert data.find_file('bar') == []
|
1679178
|
from __future__ import unicode_literals
import frappe
from frappe.model.db_query import DatabaseQuery
@frappe.whitelist()
def get_data(rfm=None, rfp=None, por=None,
start=0, sort_by='', sort_order='desc'):
'''Return data to render the item dashboard'''
filters_rfm = []
if rfm:
filters_rfm.append(['name', '=', rfm])
if rfp:
filters_rfp.append(['name', '=', rfp])
try:
# check if user has any restrictions based on user permissions on warehouse
if DatabaseQuery('Warehouse', user=frappe.session.user).build_match_conditions():
pass # filters.append(['warehouse', 'in', [w.name for w in frappe.get_list('Warehouse')]])
except frappe.PermissionError:
# user does not have access on warehouse
return []
filters_rfm.append(['docstatus', '<', 2])
items = frappe.db.get_all('Request for Material', fields=['name'],
filters=filters_rfm,
limit_start=start,
limit_page_length='21')
for item in items:
exists_rfp = frappe.db.exists('Request for Purchase', {'request_for_material': item.name})
item.update({'rfp': ''})
item.update({'qcs': ''})
item.update({'po': ''})
item.update({'po_workflow': ''})
item.update({'pr': ''})
item.update({'pi': ''})
item.update({'pi_status': ''})
item.update({'progress': 20})
item.update({'progress_bgc': '00FF00;'})
item.update({'rfm_status': frappe.db.get_value('Request for Material', item.name, 'docstatus')})
item.update({'rfp_status': ''})
item.update({'po_status': ''})
item.update({'pr_status': ''})
item.update({'qcs_status': ''})
if exists_rfp:
item.update({'rfp': exists_rfp})
item.update({'progress': 40})
item.update({'rfp_status': frappe.db.get_value('Request for Purchase', exists_rfp, 'docstatus')})
exists_po = frappe.db.exists('Purchase Order', {'one_fm_request_for_purchase': exists_rfp})
exists_qcs = frappe.db.exists('Quotation Comparison Sheet', {'request_for_purchase': exists_rfp})
if exists_qcs:
item.update({'qcs': exists_qcs})
item.update({'qcs_status': frappe.db.get_value('Quotation Comparison Sheet', exists_qcs, 'docstatus')})
if exists_po:
item.update({'progress': 60})
item.update({'po': exists_po})
item.update({'po_status': frappe.db.get_value('Purchase Order', exists_po, 'docstatus')})
item.update({'po_workflow': frappe.db.get_value('Purchase Order', exists_po, 'workflow_state')})
# exists_pr = frappe.db.exists('Purchase Receipt', {'purchase_order': exists_po})
query = """
select
distinct pr.name, pr.docstatus
from
`tabPurchase Receipt` pr, `tabPurchase Receipt Item` pri
where
pri.parent=pr.name and pri.purchase_order=%s
"""
pr_list = frappe.db.sql(query, (exists_po), as_dict=True)
if pr_list:
item.update({'progress': 80})
i = 1
for pr in pr_list:
if i == 1:
item.update({'pr': pr.name})
item.update({'pr_status': pr.docstatus})
query = """
select
distinct pi.name, pi.docstatus
from
`tabPurchase Invoice` pi, `tabPurchase Invoice Item` pii
where
pii.parent=pi.name and pii.purchase_receipt=%s
"""
pi_list = frappe.db.sql(query, (pr.name), as_dict=True)
if pi_list:
item.update({'progress': 100})
item.update({'progress_bgc': 'blue;'})
j = 1
for pi in pi_list:
if j == 1:
item.update({'pi': pi.name})
item.update({'pi_status': pi.docstatus})
j += 1
i += 1
return items
|
1679185
|
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from summariser.rouge.rouge import Rouge
import summariser.utils.data_helpers as util
import numpy as np
import operator as op
import functools
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from resources import *
#from resources import BASE_DIR,ROUGE_DIR
class State:
def __init__(self, sum_token_length, base_length, sent_num, block_num, language):
# hyper parameters
self.reward_lambda = 0.9
# learning arguments
if sum_token_length != None:
self.sum_token_length = sum_token_length
else:
self.sum_token_length = 99999
self.state_length_computer = StateLengthComputer(block_num,base_length,sent_num)
self.vec_length = self.state_length_computer.getTotalLength()
self.summary_vector_length = self.state_length_computer.getStatesLength(block_num)
self.language = language
# stemmers and stop words list
self.stemmer = SnowballStemmer(self.language)
self.stoplist = set(stopwords.words(self.language))
# class variables
#self.draft_summary = ''
self.draft_summary_list = []
self.historical_actions = []
self.available_sents = [i for i in range(0,sent_num+1)]
self.terminal_state = 0 # 0 stands for non-terminal, and 1 stands for terminal
self.draft_summary_length = 0
#some flags/options
self.newReward = False
def getSelfVector(self, top_ngrams, sentences):
return self.getStateVector(self.draft_summary_list,self.historical_actions,
top_ngrams, sentences)
def getStateVector(self, draft_list, draft_index_list, top_ngrams, sentences):
'''
Represent the current draft summary using a vector
:param draft_list: a list of sentences, included in the current draft summary
:param draft_index_list: the indices of the included sentences
:param top_ngrams: top n-grams for all the original documents
:param sentences: all sentences information, used to find positions information
:param tfidf: decides to use the Japan version (state_type==True) or the REAPER version
:return: an numpy array, the vector representation of the state
'''
# for empty or over-length draft, return a full-zero vector
draft_length = 0
for sent in draft_list:
draft_length += len(sent.split(' '))
if len(draft_list) == 0 or draft_list == None: #or draft_length>self.sum_token_length:
return np.zeros(self.vec_length)
vector = [0] * self.vec_length
coverage_num = 0
redundant_count = 0
sent_num = len(draft_index_list)
index = -1 + self.state_length_computer.getIndexUntilSentNum(sent_num)
draft_ngrams = util.extract_ngrams_count(draft_list,self.stemmer,self.language,self.stoplist)
num = self.state_length_computer.getStatesLength(sent_num)-5
for i in range(num):
index += 1
if top_ngrams[i] in draft_ngrams:
vector[index] = 1
coverage_num += 1
if draft_ngrams[top_ngrams[i]] >= 2:
redundant_count += 1 # draft_ngrams[top_ngrams[i]]-1
#this is needed, because the above loop does not perform the last add
index += 1
#second part: coverage ratio
vector[index] = coverage_num*1.0/len(top_ngrams)
index += 1
#third part: redundant ratio;
vector[index] = redundant_count*1.0/len(top_ngrams)
index += 1
#fourth part: length ratio
vector[index] = draft_length*1.0/self.sum_token_length
index += 1
vector[index] = self.getPositionInfo(sentences,draft_index_list)
index += 1
#sixth part: length violation bit
if draft_length <= self.sum_token_length:
vector[index] = 1
if sent_num >= self.state_length_computer.block_num:
assert index == -1 + self.state_length_computer.getTotalLength()
else:
assert index == -1 + self.state_length_computer.getIndexUntilSentNum(sent_num+1)
return np.array(vector)
def getPositionInfo(self, sentences, draft_index_list):
position_index = 0
for idx in draft_index_list:
pos = sentences[idx].position
position_index += 1.0/pos
return position_index
def noCommonTokens(self, token_list1, token_list2, word_num_limit=float("inf")):
# we do not check long sentences
if len(token_list1) <= word_num_limit and len(token_list2) <= word_num_limit:
if set(token_list1).isdisjoint(token_list2):
return True
else:
return False
else:
return False
#the Japan version
def getSimilarity(self, tokens1, sentences2, fullDoc=False):
# tokens1 is a string of tokens
# sentences2 is a list of sentences, and each sentences is a list of tokens
token_list1 = tokens1.split(' ')
token_str1 = tokens1
if fullDoc:
token_str2 = ' '.join(sentences2)
token_list2 = token_str2.split(' ')
else:
token_list2 = sentences2.split(' ')
token_str2 = sentences2
tfidf_vectorizer = TfidfVectorizer(min_df=0)
#print('token_list 1: '+' '.join(token_list1))
#print('token_list 2: '+' '.join(token_list2))
if self.noCommonTokens(token_list1,token_list2):
return 0
if token_str2 == token_str1:
return 1
try:
tfidf_matrix_train = tfidf_vectorizer.fit_transform([token_str1, token_str2])
except ValueError:
return 0
return cosine_similarity(tfidf_matrix_train[0], tfidf_matrix_train[1])[0][0]
def getNewStateVec(self, new_sent_id, top_ngrams, sentences):
temp_draft_summary_list = self.draft_summary_list+[sentences[new_sent_id].untokenized_form]
draft_index_list = self.historical_actions+[new_sent_id]
return self.getStateVector(temp_draft_summary_list,draft_index_list,top_ngrams,sentences)
def removeOverlengthSents(self, sents, production):
new_avai_acts = [0]
for sent_id in self.available_sents:
if sent_id == 0:
continue
if not production and len(sents[sent_id-1].untokenized_form.split(' ')) > self.sum_token_length-self.draft_summary_length:
continue
elif production and len(sents[sent_id-1].untokenized_form.split(' ')) > int(1.2*self.sum_token_length)-self.draft_summary_length:
continue
else:
new_avai_acts.append(sent_id)
self.available_sents = new_avai_acts[:]
del new_avai_acts
def updateState(self, new_sent_id, sents, read=False, production=False):
self.draft_summary_list.append(sents[new_sent_id].untokenized_form)
self.historical_actions.append(new_sent_id)
self.draft_summary_length += len(sents[new_sent_id].untokenized_form.split(' '))
if not read:
self.available_sents.remove(new_sent_id+1)
self.removeOverlengthSents(sents,production)
if not production and self.draft_summary_length > self.sum_token_length:
self.available_sents = [0]
self.terminal_state = 1
print('overlength! should not happen')
return -1
return 0
def getOptimalTerminalRougeScores(self, model):
if len(self.draft_summary_list) == 0:
return 0.
rouge = Rouge(ROUGE_DIR, BASE_DIR, True, True)
R1, R2, R3, R4, RL, RSU = rouge(' '.join(self.draft_summary_list), [model], self.sum_token_length)
rouge.clean()
return [R1, R2, R3, R4, RL, RSU]
def getTerminalReward(self, sentences, sentences_stemmed_aggreate, sent2tokens, sim_scores):
# assert self.draft_summary_length <= self.sum_token_length
# print('summary: \n'+' ||| '.join(self.draft_summary_list))
relatedness_score = 0
redundant_score = 0
for i in range(len(self.historical_actions)):
sent_idx = self.historical_actions[i]
# compute relatedness scores
# the original version used in the japan version
# -1 stands for full docs
if (sent_idx, -1) in sim_scores:
relatedness_score += sim_scores[(sent_idx, -1)]
elif (-1, sent_idx) in sim_scores:
relatedness_score += sim_scores[(-1, sent_idx)]
else:
sim_score = self.getSimilarity(' '.join(sent2tokens(self.draft_summary_list[i])),
sentences_stemmed_aggreate, True) + 1.0 / sentences[sent_idx].position
relatedness_score += sim_score
sim_scores[(sent_idx, -1)] = sim_score
# compute redundancy scores
for j in range(i):
idx2 = self.historical_actions[j]
if (sent_idx, idx2) in sim_scores:
redundant_score += sim_scores[(sent_idx, idx2)]
elif (idx2, sent_idx) in sim_scores:
redundant_score += sim_scores[(idx2, sent_idx)]
else:
red_score = self.getSimilarity(' '.join(sent2tokens(self.draft_summary_list[j])),
' '.join(sent2tokens(self.draft_summary_list[i])))
redundant_score += red_score
sim_scores[(sent_idx, idx2)] = red_score
return relatedness_score*self.reward_lambda-(1-self.reward_lambda)*redundant_score
class StateLengthComputer():
def __init__(self, block_num, base_length, sent_num):
self.block_num = block_num
self.lengths = []
base_num = np.log10(self.ncr(sent_num,1))
for i in range(block_num):
self.lengths.append(int(base_length*np.log10(self.ncr(sent_num,i+1))*1.0/base_num)+5)
def getStatesLength(self, sent_num):
if sent_num < self.block_num:
return self.lengths[sent_num-1]
else:
return self.lengths[-1]
def getIndexUntilSentNum(self,n):
idx = 0
nn = min(n,self.block_num)
for i in range(0,nn-1):
idx += self.getStatesLength(i+1)
return idx
def getTotalLength(self):
return sum(self.lengths)
def ncr(self, n, r):
r = min(r, n - r)
if r == 0: return 1
numer = functools.reduce(op.mul, range(n, n - r, -1))
denom = functools.reduce(op.mul, range(1, r + 1))
return numer // denom
if __name__ == '__main__':
block_num = 6
base_num = 100
sent_num = 400
print('block num: {}; sentence num: {}; '
'the summary of length 1 will have {}-dimension states.'.format(block_num, sent_num, base_num))
com = StateLengthComputer(block_num, base_num, sent_num)
print('each state length:')
for i in range(1,9):
print(com.getStatesLength(i))
print('starting index:')
for i in range(1,9):
print(com.getIndexUntilSentNum(i))
print('total length:{}'.format(com.getTotalLength()))
|
1679191
|
import time
from collections import OrderedDict
import torch
import torch.nn as nn
import MinkowskiEngine as ME
__all__ = ['MinkUNet']
class BasicConvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=3):
super().__init__()
self.net = nn.Sequential(
ME.MinkowskiConvolution(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride,
dimension=D),
ME.MinkowskiBatchNorm(outc),
ME.MinkowskiReLU(inplace=True)
)
def forward(self, x):
out = self.net(x)
return out
class BasicDeconvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, D=3):
super().__init__()
self.net = nn.Sequential(
ME.MinkowskiConvolutionTranspose(inc,
outc,
kernel_size=ks,
stride=stride,
dimension=D),
ME.MinkowskiBatchNorm(outc),
ME.MinkowskiReLU(inplace=True)
)
def forward(self, x):
return self.net(x)
class ResidualBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1, D=3):
super().__init__()
self.net = nn.Sequential(
ME.MinkowskiConvolution(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride,
dimension=D),
ME.MinkowskiBatchNorm(outc),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiConvolution(outc,
outc,
kernel_size=ks,
dilation=dilation,
stride=1,
dimension=D),
ME.MinkowskiBatchNorm(outc)
)
self.downsample = nn.Sequential() if (inc == outc and stride == 1) else \
nn.Sequential(
ME.MinkowskiConvolution(inc, outc, kernel_size=1, dilation=1, stride=stride, dimension=D),
ME.MinkowskiBatchNorm(outc)
)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out
class MinkUNet(nn.Module):
def __init__(self, **kwargs):
super().__init__()
cr = kwargs.get('cr', 1.0)
in_channels = kwargs.get('in_channels', 3)
cs = [32, 32, 64, 128, 256, 256, 128, 96, 96]
cs = [int(cr * x) for x in cs]
self.run_up = kwargs.get('run_up', True)
self.D = kwargs.get('D', 3)
self.stem = nn.Sequential(
ME.MinkowskiConvolution(in_channels, cs[0], kernel_size=3, stride=1, dimension=self.D),
ME.MinkowskiBatchNorm(cs[0]),
ME.MinkowskiReLU(True),
ME.MinkowskiConvolution(cs[0], cs[0], kernel_size=3, stride=1, dimension=self.D),
ME.MinkowskiBatchNorm(cs[0]),
ME.MinkowskiReLU(inplace=True)
)
self.stage1 = nn.Sequential(
BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1, D=self.D),
ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1, D=self.D),
ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1, D=self.D),
)
self.stage2 = nn.Sequential(
BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1, D=self.D),
ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1, D=self.D),
ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1, D=self.D)
)
self.stage3 = nn.Sequential(
BasicConvolutionBlock(cs[2], cs[2], ks=2, stride=2, dilation=1, D=self.D),
ResidualBlock(cs[2], cs[3], ks=3, stride=1, dilation=1, D=self.D),
ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1, D=self.D),
)
self.stage4 = nn.Sequential(
BasicConvolutionBlock(cs[3], cs[3], ks=2, stride=2, dilation=1, D=self.D),
ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1, D=self.D),
ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1, D=self.D),
)
self.up1 = nn.ModuleList([
BasicDeconvolutionBlock(cs[4], cs[5], ks=2, stride=2, D=self.D),
nn.Sequential(
ResidualBlock(cs[5] + cs[3], cs[5], ks=3, stride=1,
dilation=1, D=self.D),
ResidualBlock(cs[5], cs[5], ks=3, stride=1, dilation=1, D=self.D),
)
])
self.up2 = nn.ModuleList([
BasicDeconvolutionBlock(cs[5], cs[6], ks=2, stride=2, D=self.D),
nn.Sequential(
ResidualBlock(cs[6] + cs[2], cs[6], ks=3, stride=1,
dilation=1, D=self.D),
ResidualBlock(cs[6], cs[6], ks=3, stride=1, dilation=1, D=self.D),
)
])
self.up3 = nn.ModuleList([
BasicDeconvolutionBlock(cs[6], cs[7], ks=2, stride=2, D=self.D),
nn.Sequential(
ResidualBlock(cs[7] + cs[1], cs[7], ks=3, stride=1,
dilation=1, D=self.D),
ResidualBlock(cs[7], cs[7], ks=3, stride=1, dilation=1, D=self.D),
)
])
self.up4 = nn.ModuleList([
BasicDeconvolutionBlock(cs[7], cs[8], ks=2, stride=2, D=self.D),
nn.Sequential(
ResidualBlock(cs[8] + cs[0], cs[8], ks=3, stride=1,
dilation=1, D=self.D),
ResidualBlock(cs[8], cs[8], ks=3, stride=1, dilation=1, D=self.D),
)
])
self.weight_initialization()
self.dropout = nn.Dropout(0.3, True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x0 = self.stem(x)
x1 = self.stage1(x0)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
y1 = self.up1[0](x4)
y1 = ME.cat(y1, x3)
y1 = self.up1[1](y1)
y2 = self.up2[0](y1)
y2 = ME.cat(y2, x2)
y2 = self.up2[1](y2)
y3 = self.up3[0](y2)
y3 = ME.cat(y3, x1)
y3 = self.up3[1](y3)
y4 = self.up4[0](y3)
y4 = ME.cat(y4, x0)
y4 = self.up4[1](y4)
return y4
|
1679212
|
import json
import typing as t
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from uuid import UUID
from piccolo.columns.base import Column
from piccolo.table import Table
from piccolo.testing.random_builder import RandomBuilder
from piccolo.utils.sync import run_sync
class ModelBuilder:
__DEFAULT_MAPPER: t.Dict[t.Type, t.Callable] = {
bool: RandomBuilder.next_bool,
bytes: RandomBuilder.next_bytes,
date: RandomBuilder.next_date,
datetime: RandomBuilder.next_datetime,
float: RandomBuilder.next_float,
int: RandomBuilder.next_int,
str: RandomBuilder.next_str,
time: RandomBuilder.next_time,
timedelta: RandomBuilder.next_timedelta,
UUID: RandomBuilder.next_uuid,
}
@classmethod
async def build(
cls,
table_class: t.Type[Table],
defaults: t.Dict[t.Union[Column, str], t.Any] = None,
persist: bool = True,
minimal: bool = False,
) -> Table:
"""
Build Table instance with random data and save async.
This can build relationships, supported data types and parameters.
:param table_class:
Table class to randomize.
Examples:
manager = await ModelBuilder.build(Manager)
manager = await ModelBuilder.build(Manager, name='Guido')
manager = await ModelBuilder(persist=False).build(Manager)
manager = await ModelBuilder(minimal=True).build(Manager)
band = await ModelBuilder.build(Band, manager=manager)
"""
return await cls._build(
table_class=table_class,
defaults=defaults,
persist=persist,
minimal=minimal,
)
@classmethod
def build_sync(
cls,
table_class: t.Type[Table],
defaults: t.Dict[t.Union[Column, str], t.Any] = None,
persist: bool = True,
minimal: bool = False,
) -> Table:
"""
Build Table instance with random data and save sync.
This can build relationships, supported data types and parameters.
:param table_class:
Table class to randomize.
Examples:
manager = ModelBuilder.build_sync(Manager)
manager = ModelBuilder.build_sync(Manager, name='Guido')
manager = ModelBuilder(persist=False).build_sync(Manager)
manager = ModelBuilder(minimal=True).build_sync(Manager)
band = ModelBuilder.build_sync(Band, manager=manager)
"""
return run_sync(
cls.build(
table_class=table_class,
defaults=defaults,
persist=persist,
minimal=minimal,
)
)
@classmethod
async def _build(
cls,
table_class: t.Type[Table],
defaults: t.Dict[t.Union[Column, str], t.Any] = None,
minimal: bool = False,
persist: bool = True,
) -> Table:
model = table_class()
defaults = {} if not defaults else defaults
for column, value in defaults.items():
if isinstance(column, str):
column = model._meta.get_column_by_name(column)
setattr(model, column._meta.name, value)
for column in model._meta.columns:
if column._meta.null and minimal:
continue
if column._meta.name in defaults:
continue # Column value exists
if "references" in column._meta.params and persist:
reference_model = await cls._build(
column._meta.params["references"],
persist=True,
)
random_value = getattr(
reference_model,
reference_model._meta.primary_key._meta.name,
)
else:
random_value = cls._randomize_attribute(column)
setattr(model, column._meta.name, random_value)
if persist:
await model.save().run()
return model
@classmethod
def _randomize_attribute(cls, column: Column) -> t.Any:
"""
Generate a random value for a column and apply formatting.
:param column:
Column class to randomize.
"""
if column.value_type == Decimal:
precision, scale = column._meta.params["digits"]
random_value = RandomBuilder.next_float(
maximum=10 ** (precision - scale), scale=scale
)
elif column._meta.choices:
random_value = RandomBuilder.next_enum(column._meta.choices)
else:
random_value = cls.__DEFAULT_MAPPER[column.value_type]()
if "length" in column._meta.params and isinstance(random_value, str):
return random_value[: column._meta.params["length"]]
elif column.column_type in ["JSON", "JSONB"]:
return json.dumps(random_value)
return random_value
|
1679214
|
import Exalt.view as vu
import Exalt.messages as messages
import Exalt.encodings as encodings
from lxml import etree
import Exalt.impl.parsetools as parsetools
from io import BytesIO
def format_markup(markup, view, **kwargs):
encoding = markup.docinfo.encoding
# lxml only indents HTML if method == "xml", but then it will self-close
# any <script> element, which is not OK.
#
# This hack adds a single space into any empty <script> elements, which
# forces lxml to add the closing tag.
if (vu.is_html(view)):
for script in markup.xpath("//script[@src][not(normalize-space(.))]"):
script.text = " "
return etree.tostring(
markup,
pretty_print=True,
encoding=encoding,
**kwargs
).decode(encoding)
def format_region(view, region, **kwargs):
if vu.is_eligible(view):
try:
parser = parsetools.get_parser(view,
encoding=encodings.UTF8,
remove_blank_text=True,
recover=True)
markup = parsetools.parse_string(view, parser, view.substr(region))
return format_markup(markup, view, **kwargs)
except etree.XMLSyntaxError:
vu.set_status(view, messages.NOT_WELL_FORMED_XML)
vu.reset_status(view)
def canonicalize_document(view, region):
parser = parsetools.get_parser(view,
encoding=encodings.UTF8,
remove_blank_text=True)
xml = parsetools.parse_string(view, parser, view.substr(region))
output = BytesIO()
xml.write_c14n(output)
return output.getvalue().decode(xml.docinfo.encoding)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.